base.cc (10466:73b7549d979e) base.cc (10623:b9646f4546ad)
1/*
1/*
2 * Copyright (c) 2013 ARM Limited
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 22 unchanged lines hidden (view full) ---

33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 22 unchanged lines hidden (view full) ---

33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 * Mitch Hayenga
41 */
42
43/**
44 * @file
45 * Hardware Prefetcher Definition.
46 */
47
48#include <list>
49
42 */
43
44/**
45 * @file
46 * Hardware Prefetcher Definition.
47 */
48
49#include <list>
50
50#include "base/trace.hh"
51#include "debug/HWPrefetch.hh"
52#include "mem/cache/prefetch/base.hh"
53#include "mem/cache/base.hh"
51#include "mem/cache/prefetch/base.hh"
52#include "mem/cache/base.hh"
54#include "mem/request.hh"
55#include "sim/system.hh"
56
53#include "sim/system.hh"
54
57BasePrefetcher::BasePrefetcher(const Params *p)
58 : ClockedObject(p), size(p->size), cache(nullptr), blkSize(0),
59 latency(p->latency), degree(p->degree),
60 useMasterId(p->use_master_id), pageStop(!p->cross_pages),
61 serialSquash(p->serial_squash), onlyData(p->data_accesses_only),
62 onMissOnly(p->on_miss_only), onReadOnly(p->on_read_only),
63 onPrefetch(p->on_prefetch), system(p->sys),
55BasePrefetcher::BasePrefetcher(const BasePrefetcherParams *p)
56 : ClockedObject(p), cache(nullptr), blkSize(0), system(p->sys),
57 onMiss(p->on_miss), onRead(p->on_read),
58 onWrite(p->on_write), onData(p->on_data), onInst(p->on_inst),
64 masterId(system->getMasterId(name())),
65 pageBytes(system->getPageBytes())
66{
67}
68
69void
70BasePrefetcher::setCache(BaseCache *_cache)
71{
72 assert(!cache);
73 cache = _cache;
74 blkSize = cache->getBlockSize();
75}
76
77void
78BasePrefetcher::regStats()
79{
59 masterId(system->getMasterId(name())),
60 pageBytes(system->getPageBytes())
61{
62}
63
64void
65BasePrefetcher::setCache(BaseCache *_cache)
66{
67 assert(!cache);
68 cache = _cache;
69 blkSize = cache->getBlockSize();
70}
71
72void
73BasePrefetcher::regStats()
74{
80 pfIdentified
81 .name(name() + ".prefetcher.num_hwpf_identified")
82 .desc("number of hwpf identified")
83 ;
84
85 pfMSHRHit
86 .name(name() + ".prefetcher.num_hwpf_already_in_mshr")
87 .desc("number of hwpf that were already in mshr")
88 ;
89
90 pfCacheHit
91 .name(name() + ".prefetcher.num_hwpf_already_in_cache")
92 .desc("number of hwpf that were already in the cache")
93 ;
94
95 pfBufferHit
96 .name(name() + ".prefetcher.num_hwpf_already_in_prefetcher")
97 .desc("number of hwpf that were already in the prefetch queue")
98 ;
99
100 pfRemovedFull
101 .name(name() + ".prefetcher.num_hwpf_evicted")
102 .desc("number of hwpf removed due to no buffer left")
103 ;
104
105 pfRemovedMSHR
106 .name(name() + ".prefetcher.num_hwpf_removed_MSHR_hit")
107 .desc("number of hwpf removed because MSHR allocated")
108 ;
109
110 pfIssued
75 pfIssued
111 .name(name() + ".prefetcher.num_hwpf_issued")
76 .name(name() + ".num_hwpf_issued")
112 .desc("number of hwpf issued")
113 ;
77 .desc("number of hwpf issued")
78 ;
79}
114
80
115 pfSpanPage
116 .name(name() + ".prefetcher.num_hwpf_span_page")
117 .desc("number of hwpf spanning a virtual page")
118 ;
81bool
82BasePrefetcher::observeAccess(const PacketPtr &pkt) const
83{
84 Addr addr = pkt->getAddr();
85 bool fetch = pkt->req->isInstFetch();
86 bool read= pkt->isRead();
87 bool is_secure = pkt->isSecure();
119
88
120 pfSquashed
121 .name(name() + ".prefetcher.num_hwpf_squashed_from_miss")
122 .desc("number of hwpf that got squashed due to a miss "
123 "aborting calculation time")
124 ;
89 if (pkt->req->isUncacheable()) return false;
90 if (fetch && !onInst) return false;
91 if (!fetch && !onData) return false;
92 if (!fetch && read && !onRead) return false;
93 if (!fetch && !read && !onWrite) return false;
94
95 if (onMiss) {
96 return !inCache(addr, is_secure) &&
97 !inMissQueue(addr, is_secure);
98 }
99
100 return true;
125}
126
101}
102
127inline bool
128BasePrefetcher::inCache(Addr addr, bool is_secure)
103bool
104BasePrefetcher::inCache(Addr addr, bool is_secure) const
129{
130 if (cache->inCache(addr, is_secure)) {
105{
106 if (cache->inCache(addr, is_secure)) {
131 pfCacheHit++;
132 return true;
133 }
134 return false;
135}
136
107 return true;
108 }
109 return false;
110}
111
137inline bool
138BasePrefetcher::inMissQueue(Addr addr, bool is_secure)
112bool
113BasePrefetcher::inMissQueue(Addr addr, bool is_secure) const
139{
140 if (cache->inMissQueue(addr, is_secure)) {
114{
115 if (cache->inMissQueue(addr, is_secure)) {
141 pfMSHRHit++;
142 return true;
143 }
144 return false;
145}
146
116 return true;
117 }
118 return false;
119}
120
147PacketPtr
148BasePrefetcher::getPacket()
149{
150 DPRINTF(HWPrefetch, "Requesting a hw_pf to issue\n");
151
152 if (pf.empty()) {
153 DPRINTF(HWPrefetch, "No HW_PF found\n");
154 return NULL;
155 }
156
157 PacketPtr pkt = pf.begin()->pkt;
158 while (!pf.empty()) {
159 pkt = pf.begin()->pkt;
160 pf.pop_front();
161
162 Addr blk_addr = pkt->getAddr() & ~(Addr)(blkSize-1);
163 bool is_secure = pkt->isSecure();
164
165 if (!inCache(blk_addr, is_secure) && !inMissQueue(blk_addr, is_secure))
166 // we found a prefetch, return it
167 break;
168
169 DPRINTF(HWPrefetch, "addr 0x%x (%s) in cache, skipping\n",
170 pkt->getAddr(), is_secure ? "s" : "ns");
171 delete pkt->req;
172 delete pkt;
173
174 if (pf.empty()) {
175 cache->deassertMemSideBusRequest(BaseCache::Request_PF);
176 return NULL; // None left, all were in cache
177 }
178 }
179
180 pfIssued++;
181 assert(pkt != NULL);
182 DPRINTF(HWPrefetch, "returning 0x%x (%s)\n", pkt->getAddr(),
183 pkt->isSecure() ? "s" : "ns");
184 return pkt;
185}
186
187
188Tick
189BasePrefetcher::notify(PacketPtr &pkt, Tick tick)
190{
191 // Don't consult the prefetcher if any of the following conditons are true
192 // 1) The request is uncacheable
193 // 2) The request is a fetch, but we are only prefeching data
194 // 3) The request is a cache hit, but we are only training on misses
195 // 4) THe request is a write, but we are only training on reads
196 if (!pkt->req->isUncacheable() && !(pkt->req->isInstFetch() && onlyData) &&
197 !(onMissOnly && inCache(pkt->getAddr(), true)) &&
198 !(onReadOnly && !pkt->isRead())) {
199 // Calculate the blk address
200 Addr blk_addr = pkt->getAddr() & ~(Addr)(blkSize-1);
201 bool is_secure = pkt->isSecure();
202
203 // Check if miss is in pfq, if so remove it
204 std::list<DeferredPacket>::iterator iter = inPrefetch(blk_addr,
205 is_secure);
206 if (iter != pf.end()) {
207 DPRINTF(HWPrefetch, "Saw a miss to a queued prefetch addr: "
208 "0x%x (%s), removing it\n", blk_addr,
209 is_secure ? "s" : "ns");
210 pfRemovedMSHR++;
211 delete iter->pkt->req;
212 delete iter->pkt;
213 iter = pf.erase(iter);
214 if (pf.empty())
215 cache->deassertMemSideBusRequest(BaseCache::Request_PF);
216 }
217
218 // Remove anything in queue with delay older than time
219 // since everything is inserted in time order, start from end
220 // and work until pf.empty() or time is earlier
221 // This is done to emulate Aborting the previous work on a new miss
222 // Needed for serial calculators like GHB
223 if (serialSquash) {
224 iter = pf.end();
225 if (iter != pf.begin())
226 iter--;
227 while (!pf.empty() && iter->tick >= tick) {
228 pfSquashed++;
229 DPRINTF(HWPrefetch, "Squashing old prefetch addr: 0x%x\n",
230 iter->pkt->getAddr());
231 delete iter->pkt->req;
232 delete iter->pkt;
233 iter = pf.erase(iter);
234 if (iter != pf.begin())
235 iter--;
236 }
237 if (pf.empty())
238 cache->deassertMemSideBusRequest(BaseCache::Request_PF);
239 }
240
241
242 std::list<Addr> addresses;
243 std::list<Cycles> delays;
244 calculatePrefetch(pkt, addresses, delays);
245
246 std::list<Addr>::iterator addrIter = addresses.begin();
247 std::list<Cycles>::iterator delayIter = delays.begin();
248 for (; addrIter != addresses.end(); ++addrIter, ++delayIter) {
249 Addr addr = *addrIter;
250
251 pfIdentified++;
252
253 DPRINTF(HWPrefetch, "Found a pf candidate addr: 0x%x, "
254 "inserting into prefetch queue with delay %d time %d\n",
255 addr, *delayIter, time);
256
257 // Check if it is already in the pf buffer
258 if (inPrefetch(addr, is_secure) != pf.end()) {
259 pfBufferHit++;
260 DPRINTF(HWPrefetch, "Prefetch addr already in pf buffer\n");
261 continue;
262 }
263
264 // create a prefetch memreq
265 Request *prefetchReq = new Request(*addrIter, blkSize, 0, masterId);
266 if (is_secure)
267 prefetchReq->setFlags(Request::SECURE);
268 prefetchReq->taskId(ContextSwitchTaskId::Prefetcher);
269 PacketPtr prefetch =
270 new Packet(prefetchReq, MemCmd::HardPFReq);
271 prefetch->allocate();
272 prefetch->req->setThreadContext(pkt->req->contextId(),
273 pkt->req->threadId());
274
275 // Tag orefetch reqeuests with corresponding PC to train lower
276 // cache-level prefetchers
277 if (onPrefetch && pkt->req->hasPC())
278 prefetch->req->setPC(pkt->req->getPC());
279
280 // We just remove the head if we are full
281 if (pf.size() == size) {
282 pfRemovedFull++;
283 PacketPtr old_pkt = pf.begin()->pkt;
284 DPRINTF(HWPrefetch, "Prefetch queue full, "
285 "removing oldest 0x%x\n", old_pkt->getAddr());
286 delete old_pkt->req;
287 delete old_pkt;
288 pf.pop_front();
289 }
290
291 pf.push_back(DeferredPacket(tick + clockPeriod() * *delayIter,
292 prefetch));
293 }
294 }
295
296 return pf.empty() ? 0 : pf.front().tick;
297}
298
299std::list<BasePrefetcher::DeferredPacket>::iterator
300BasePrefetcher::inPrefetch(Addr address, bool is_secure)
301{
302 // Guaranteed to only be one match, we always check before inserting
303 std::list<DeferredPacket>::iterator iter;
304 for (iter = pf.begin(); iter != pf.end(); iter++) {
305 if (((*iter).pkt->getAddr() & ~(Addr)(blkSize-1)) == address &&
306 (*iter).pkt->isSecure() == is_secure) {
307 return iter;
308 }
309 }
310 return pf.end();
311}
312
313bool
314BasePrefetcher::samePage(Addr a, Addr b) const
315{
316 return roundDown(a, pageBytes) == roundDown(b, pageBytes);
317}
318
319
121bool
122BasePrefetcher::samePage(Addr a, Addr b) const
123{
124 return roundDown(a, pageBytes) == roundDown(b, pageBytes);
125}
126
127