1/*
2 * Copyright (c) 2012-2013, 2015 ARM Limited
2 * Copyright (c) 2012-2013, 2015-2016 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Ron Dreslinski
43 */
44
45/**
46 * @file
47 * Declares a basic cache interface BaseCache.
48 */
49
50#ifndef __MEM_CACHE_BASE_HH__
51#define __MEM_CACHE_BASE_HH__
52
53#include <algorithm>
54#include <list>
55#include <string>
56#include <vector>
57
58#include "base/misc.hh"
59#include "base/statistics.hh"
60#include "base/trace.hh"
61#include "base/types.hh"
62#include "debug/Cache.hh"
63#include "debug/CachePort.hh"
64#include "mem/cache/mshr_queue.hh"
65#include "mem/cache/write_queue.hh"
66#include "mem/mem_object.hh"
67#include "mem/packet.hh"
68#include "mem/qport.hh"
69#include "mem/request.hh"
70#include "params/BaseCache.hh"
71#include "sim/eventq.hh"
72#include "sim/full_system.hh"
73#include "sim/sim_exit.hh"
74#include "sim/system.hh"
75
75class MSHR;
76/**
77 * A basic cache interface. Implements some common functions for speed.
78 */
79class BaseCache : public MemObject
80{
81 protected:
82 /**
83 * Indexes to enumerate the MSHR queues.
84 */
85 enum MSHRQueueIndex {
86 MSHRQueue_MSHRs,
87 MSHRQueue_WriteBuffer
88 };
89
90 public:
91 /**
92 * Reasons for caches to be blocked.
93 */
94 enum BlockedCause {
95 Blocked_NoMSHRs = MSHRQueue_MSHRs,
96 Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
97 Blocked_NoTargets,
98 NUM_BLOCKED_CAUSES
99 };
100
101 protected:
102
103 /**
104 * A cache master port is used for the memory-side port of the
105 * cache, and in addition to the basic timing port that only sends
106 * response packets through a transmit list, it also offers the
107 * ability to schedule and send request packets (requests &
108 * writebacks). The send event is scheduled through schedSendEvent,
109 * and the sendDeferredPacket of the timing port is modified to
110 * consider both the transmit list and the requests from the MSHR.
111 */
112 class CacheMasterPort : public QueuedMasterPort
113 {
114
115 public:
116
117 /**
118 * Schedule a send of a request packet (from the MSHR). Note
119 * that we could already have a retry outstanding.
120 */
121 void schedSendEvent(Tick time)
122 {
123 DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
124 reqQueue.schedSendEvent(time);
125 }
126
127 protected:
128
129 CacheMasterPort(const std::string &_name, BaseCache *_cache,
130 ReqPacketQueue &_reqQueue,
131 SnoopRespPacketQueue &_snoopRespQueue) :
132 QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
133 { }
134
135 /**
136 * Memory-side port always snoops.
137 *
138 * @return always true
139 */
140 virtual bool isSnooping() const { return true; }
141 };
142
143 /**
144 * A cache slave port is used for the CPU-side port of the cache,
145 * and it is basically a simple timing port that uses a transmit
146 * list for responses to the CPU (or connected master). In
147 * addition, it has the functionality to block the port for
148 * incoming requests. If blocked, the port will issue a retry once
149 * unblocked.
150 */
151 class CacheSlavePort : public QueuedSlavePort
152 {
153
154 public:
155
156 /** Do not accept any new requests. */
157 void setBlocked();
158
159 /** Return to normal operation and accept new requests. */
160 void clearBlocked();
161
162 bool isBlocked() const { return blocked; }
163
164 protected:
165
166 CacheSlavePort(const std::string &_name, BaseCache *_cache,
167 const std::string &_label);
168
169 /** A normal packet queue used to store responses. */
170 RespPacketQueue queue;
171
172 bool blocked;
173
174 bool mustSendRetry;
175
176 private:
177
178 void processSendRetry();
179
180 EventWrapper<CacheSlavePort,
181 &CacheSlavePort::processSendRetry> sendRetryEvent;
182
183 };
184
185 CacheSlavePort *cpuSidePort;
186 CacheMasterPort *memSidePort;
187
188 protected:
189
190 /** Miss status registers */
191 MSHRQueue mshrQueue;
192
193 /** Write/writeback buffer */
193 MSHRQueue writeBuffer;
194 WriteQueue writeBuffer;
195
196 /**
196 * Allocate a buffer, passing the time indicating when schedule an
197 * event to the queued port to go and ask the MSHR and write queue
198 * if they have packets to send.
199 *
200 * allocateBufferInternal() function is called in:
201 * - MSHR allocateWriteBuffer (unchached write forwarded to WriteBuffer);
202 * - MSHR allocateMissBuffer (miss in MSHR queue);
197 * Mark a request as in service (sent downstream in the memory
198 * system), effectively making this MSHR the ordering point.
199 */
204 MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
205 PacketPtr pkt, Tick time,
206 bool sched_send)
200 void markInService(MSHR *mshr, bool pending_modified_resp)
201 {
208 // check that the address is block aligned since we rely on
209 // this in a number of places when checking for matches and
210 // overlap
211 assert(addr == blockAlign(addr));
202 bool wasFull = mshrQueue.isFull();
203 mshrQueue.markInService(mshr, pending_modified_resp);
204
213 MSHR *mshr = mq->allocate(addr, size, pkt, time, order++,
214 allocOnFill(pkt->cmd));
215
216 if (mq->isFull()) {
217 setBlocked((BlockedCause)mq->index);
205 if (wasFull && !mshrQueue.isFull()) {
206 clearBlocked(Blocked_NoMSHRs);
207 }
219
220 if (sched_send)
221 // schedule the send
222 schedMemSideSendEvent(time);
223
224 return mshr;
208 }
209
227 void markInServiceInternal(MSHR *mshr, bool pending_modified_resp)
210 void markInService(WriteQueueEntry *entry)
211 {
229 MSHRQueue *mq = mshr->queue;
230 bool wasFull = mq->isFull();
231 mq->markInService(mshr, pending_modified_resp);
232 if (wasFull && !mq->isFull()) {
233 clearBlocked((BlockedCause)mq->index);
212 bool wasFull = writeBuffer.isFull();
213 writeBuffer.markInService(entry);
214
215 if (wasFull && !writeBuffer.isFull()) {
216 clearBlocked(Blocked_NoWBBuffers);
217 }
218 }
219
220 /**
221 * Determine if we should allocate on a fill or not.
222 *
223 * @param cmd Packet command being added as an MSHR target
224 *
225 * @return Whether we should allocate on a fill or not
226 */
227 virtual bool allocOnFill(MemCmd cmd) const = 0;
228
229 /**
230 * Write back dirty blocks in the cache using functional accesses.
231 */
232 virtual void memWriteback() = 0;
233 /**
234 * Invalidates all blocks in the cache.
235 *
236 * @warn Dirty cache lines will not be written back to
237 * memory. Make sure to call functionalWriteback() first if you
238 * want the to write them to memory.
239 */
240 virtual void memInvalidate() = 0;
241 /**
242 * Determine if there are any dirty blocks in the cache.
243 *
244 * \return true if at least one block is dirty, false otherwise.
245 */
246 virtual bool isDirty() const = 0;
247
248 /**
249 * Determine if an address is in the ranges covered by this
250 * cache. This is useful to filter snoops.
251 *
252 * @param addr Address to check against
253 *
254 * @return If the address in question is in range
255 */
256 bool inRange(Addr addr) const;
257
258 /** Block size of this cache */
259 const unsigned blkSize;
260
261 /**
262 * The latency of tag lookup of a cache. It occurs when there is
263 * an access to the cache.
264 */
265 const Cycles lookupLatency;
266
267 /**
268 * This is the forward latency of the cache. It occurs when there
269 * is a cache miss and a request is forwarded downstream, in
270 * particular an outbound miss.
271 */
272 const Cycles forwardLatency;
273
274 /** The latency to fill a cache block */
275 const Cycles fillLatency;
276
277 /**
278 * The latency of sending reponse to its upper level cache/core on
279 * a linefill. The responseLatency parameter captures this
280 * latency.
281 */
282 const Cycles responseLatency;
283
284 /** The number of targets for each MSHR. */
285 const int numTarget;
286
287 /** Do we forward snoops from mem side port through to cpu side port? */
288 bool forwardSnoops;
289
290 /**
291 * Is this cache read only, for example the instruction cache, or
292 * table-walker cache. A cache that is read only should never see
293 * any writes, and should never get any dirty data (and hence
294 * never have to do any writebacks).
295 */
296 const bool isReadOnly;
297
298 /**
299 * Bit vector of the blocking reasons for the access path.
300 * @sa #BlockedCause
301 */
302 uint8_t blocked;
303
304 /** Increasing order number assigned to each incoming request. */
305 uint64_t order;
306
307 /** Stores time the cache blocked for statistics. */
308 Cycles blockedCycle;
309
310 /** Pointer to the MSHR that has no targets. */
311 MSHR *noTargetMSHR;
312
313 /** The number of misses to trigger an exit event. */
314 Counter missCount;
315
316 /**
317 * The address range to which the cache responds on the CPU side.
318 * Normally this is all possible memory addresses. */
319 const AddrRangeList addrRanges;
320
321 public:
322 /** System we are currently operating in. */
323 System *system;
324
325 // Statistics
326 /**
327 * @addtogroup CacheStatistics
328 * @{
329 */
330
331 /** Number of hits per thread for each type of command. @sa Packet::Command */
332 Stats::Vector hits[MemCmd::NUM_MEM_CMDS];
333 /** Number of hits for demand accesses. */
334 Stats::Formula demandHits;
335 /** Number of hit for all accesses. */
336 Stats::Formula overallHits;
337
338 /** Number of misses per thread for each type of command. @sa Packet::Command */
339 Stats::Vector misses[MemCmd::NUM_MEM_CMDS];
340 /** Number of misses for demand accesses. */
341 Stats::Formula demandMisses;
342 /** Number of misses for all accesses. */
343 Stats::Formula overallMisses;
344
345 /**
346 * Total number of cycles per thread/command spent waiting for a miss.
347 * Used to calculate the average miss latency.
348 */
349 Stats::Vector missLatency[MemCmd::NUM_MEM_CMDS];
350 /** Total number of cycles spent waiting for demand misses. */
351 Stats::Formula demandMissLatency;
352 /** Total number of cycles spent waiting for all misses. */
353 Stats::Formula overallMissLatency;
354
355 /** The number of accesses per command and thread. */
356 Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
357 /** The number of demand accesses. */
358 Stats::Formula demandAccesses;
359 /** The number of overall accesses. */
360 Stats::Formula overallAccesses;
361
362 /** The miss rate per command and thread. */
363 Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
364 /** The miss rate of all demand accesses. */
365 Stats::Formula demandMissRate;
366 /** The miss rate for all accesses. */
367 Stats::Formula overallMissRate;
368
369 /** The average miss latency per command and thread. */
370 Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
371 /** The average miss latency for demand misses. */
372 Stats::Formula demandAvgMissLatency;
373 /** The average miss latency for all misses. */
374 Stats::Formula overallAvgMissLatency;
375
376 /** The total number of cycles blocked for each blocked cause. */
377 Stats::Vector blocked_cycles;
378 /** The number of times this cache blocked for each blocked cause. */
379 Stats::Vector blocked_causes;
380
381 /** The average number of cycles blocked for each blocked cause. */
382 Stats::Formula avg_blocked;
383
384 /** The number of fast writes (WH64) performed. */
385 Stats::Scalar fastWrites;
386
387 /** The number of cache copies performed. */
388 Stats::Scalar cacheCopies;
389
390 /** Number of blocks written back per thread. */
391 Stats::Vector writebacks;
392
393 /** Number of misses that hit in the MSHRs per command and thread. */
394 Stats::Vector mshr_hits[MemCmd::NUM_MEM_CMDS];
395 /** Demand misses that hit in the MSHRs. */
396 Stats::Formula demandMshrHits;
397 /** Total number of misses that hit in the MSHRs. */
398 Stats::Formula overallMshrHits;
399
400 /** Number of misses that miss in the MSHRs, per command and thread. */
401 Stats::Vector mshr_misses[MemCmd::NUM_MEM_CMDS];
402 /** Demand misses that miss in the MSHRs. */
403 Stats::Formula demandMshrMisses;
404 /** Total number of misses that miss in the MSHRs. */
405 Stats::Formula overallMshrMisses;
406
407 /** Number of misses that miss in the MSHRs, per command and thread. */
408 Stats::Vector mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
409 /** Total number of misses that miss in the MSHRs. */
410 Stats::Formula overallMshrUncacheable;
411
412 /** Total cycle latency of each MSHR miss, per command and thread. */
413 Stats::Vector mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
414 /** Total cycle latency of demand MSHR misses. */
415 Stats::Formula demandMshrMissLatency;
416 /** Total cycle latency of overall MSHR misses. */
417 Stats::Formula overallMshrMissLatency;
418
419 /** Total cycle latency of each MSHR miss, per command and thread. */
420 Stats::Vector mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
421 /** Total cycle latency of overall MSHR misses. */
422 Stats::Formula overallMshrUncacheableLatency;
423
424#if 0
425 /** The total number of MSHR accesses per command and thread. */
426 Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
427 /** The total number of demand MSHR accesses. */
428 Stats::Formula demandMshrAccesses;
429 /** The total number of MSHR accesses. */
430 Stats::Formula overallMshrAccesses;
431#endif
432
433 /** The miss rate in the MSHRs pre command and thread. */
434 Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
435 /** The demand miss rate in the MSHRs. */
436 Stats::Formula demandMshrMissRate;
437 /** The overall miss rate in the MSHRs. */
438 Stats::Formula overallMshrMissRate;
439
440 /** The average latency of an MSHR miss, per command and thread. */
441 Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
442 /** The average latency of a demand MSHR miss. */
443 Stats::Formula demandAvgMshrMissLatency;
444 /** The average overall latency of an MSHR miss. */
445 Stats::Formula overallAvgMshrMissLatency;
446
447 /** The average latency of an MSHR miss, per command and thread. */
448 Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
449 /** The average overall latency of an MSHR miss. */
450 Stats::Formula overallAvgMshrUncacheableLatency;
451
452 /** The number of times a thread hit its MSHR cap. */
453 Stats::Vector mshr_cap_events;
454 /** The number of times software prefetches caused the MSHR to block. */
455 Stats::Vector soft_prefetch_mshr_full;
456
457 Stats::Scalar mshr_no_allocate_misses;
458
459 /**
460 * @}
461 */
462
463 /**
464 * Register stats for this object.
465 */
466 virtual void regStats();
467
468 public:
469 BaseCache(const BaseCacheParams *p, unsigned blk_size);
470 ~BaseCache() {}
471
472 virtual void init();
473
474 virtual BaseMasterPort &getMasterPort(const std::string &if_name,
475 PortID idx = InvalidPortID);
476 virtual BaseSlavePort &getSlavePort(const std::string &if_name,
477 PortID idx = InvalidPortID);
478
479 /**
480 * Query block size of a cache.
481 * @return The block size
482 */
483 unsigned
484 getBlockSize() const
485 {
486 return blkSize;
487 }
488
489
490 Addr blockAlign(Addr addr) const { return (addr & ~(Addr(blkSize - 1))); }
491
492
493 const AddrRangeList &getAddrRanges() const { return addrRanges; }
494
495 MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
496 {
514 return allocateBufferInternal(&mshrQueue,
515 blockAlign(pkt->getAddr()), blkSize,
516 pkt, time, sched_send);
497 MSHR *mshr = mshrQueue.allocate(blockAlign(pkt->getAddr()), blkSize,
498 pkt, time, order++,
499 allocOnFill(pkt->cmd));
500
501 if (mshrQueue.isFull()) {
502 setBlocked((BlockedCause)MSHRQueue_MSHRs);
503 }
504
505 if (sched_send) {
506 // schedule the send
507 schedMemSideSendEvent(time);
508 }
509
510 return mshr;
511 }
512
519 MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time)
513 void allocateWriteBuffer(PacketPtr pkt, Tick time)
514 {
515 // should only see writes or clean evicts here
516 assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
517
524 return allocateBufferInternal(&writeBuffer,
525 blockAlign(pkt->getAddr()), blkSize,
526 pkt, time, true);
518 Addr blk_addr = blockAlign(pkt->getAddr());
519
520 WriteQueueEntry *wq_entry =
521 writeBuffer.findMatch(blk_addr, pkt->isSecure());
522 if (wq_entry && !wq_entry->inService) {
523 DPRINTF(Cache, "Potential to merge writeback %s to %#llx",
524 pkt->cmdString(), pkt->getAddr());
525 }
526
527 writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
528
529 if (writeBuffer.isFull()) {
530 setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
531 }
532
533 // schedule the send
534 schedMemSideSendEvent(time);
535 }
536
537 /**
538 * Returns true if the cache is blocked for accesses.
539 */
540 bool isBlocked() const
541 {
542 return blocked != 0;
543 }
544
545 /**
546 * Marks the access path of the cache as blocked for the given cause. This
547 * also sets the blocked flag in the slave interface.
548 * @param cause The reason for the cache blocking.
549 */
550 void setBlocked(BlockedCause cause)
551 {
552 uint8_t flag = 1 << cause;
553 if (blocked == 0) {
554 blocked_causes[cause]++;
555 blockedCycle = curCycle();
556 cpuSidePort->setBlocked();
557 }
558 blocked |= flag;
559 DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
560 }
561
562 /**
563 * Marks the cache as unblocked for the given cause. This also clears the
564 * blocked flags in the appropriate interfaces.
565 * @param cause The newly unblocked cause.
566 * @warning Calling this function can cause a blocked request on the bus to
567 * access the cache. The cache must be in a state to handle that request.
568 */
569 void clearBlocked(BlockedCause cause)
570 {
571 uint8_t flag = 1 << cause;
572 blocked &= ~flag;
573 DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
574 if (blocked == 0) {
575 blocked_cycles[cause] += curCycle() - blockedCycle;
576 cpuSidePort->clearBlocked();
577 }
578 }
579
580 /**
581 * Schedule a send event for the memory-side port. If already
582 * scheduled, this may reschedule the event at an earlier
583 * time. When the specified time is reached, the port is free to
584 * send either a response, a request, or a prefetch request.
585 *
586 * @param time The time when to attempt sending a packet.
587 */
588 void schedMemSideSendEvent(Tick time)
589 {
590 memSidePort->schedSendEvent(time);
591 }
592
593 virtual bool inCache(Addr addr, bool is_secure) const = 0;
594
595 virtual bool inMissQueue(Addr addr, bool is_secure) const = 0;
596
597 void incMissCount(PacketPtr pkt)
598 {
599 assert(pkt->req->masterId() < system->maxMasters());
600 misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
601 pkt->req->incAccessDepth();
602 if (missCount) {
603 --missCount;
604 if (missCount == 0)
605 exitSimLoop("A cache reached the maximum miss count");
606 }
607 }
608 void incHitCount(PacketPtr pkt)
609 {
610 assert(pkt->req->masterId() < system->maxMasters());
611 hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
612
613 }
614
615};
616
617#endif //__MEM_CACHE_BASE_HH__