1/* 2 * Copyright (c) 2012-2014 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software --- 42 unchanged lines hidden (view full) --- 51 52#ifndef __CACHE_HH__ 53#define __CACHE_HH__ 54 55#include "base/misc.hh" // fatal, panic, and warn 56#include "mem/cache/base.hh" 57#include "mem/cache/blk.hh" 58#include "mem/cache/mshr.hh" |
59#include "mem/cache/tags/base.hh" |
60#include "sim/eventq.hh" 61 62//Forward decleration 63class BasePrefetcher; 64 65/** 66 * A template-policy based cache. The behavior of the cache can be altered by 67 * supplying different template policies. TagStore handles all tag and data 68 * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System" 69 */ |
70class Cache : public BaseCache 71{ 72 public: |
73 |
74 /** A typedef for a list of CacheBlk pointers. */ 75 typedef std::list<CacheBlk*> BlkList; 76 |
77 protected: |
78 79 /** 80 * The CPU-side port extends the base cache slave port with access 81 * functions for functional, atomic and timing requests. 82 */ 83 class CpuSidePort : public CacheSlavePort 84 { 85 private: 86 87 // a pointer to our specific cache implementation |
88 Cache *cache; |
89 90 protected: 91 92 virtual bool recvTimingSnoopResp(PacketPtr pkt); 93 94 virtual bool recvTimingReq(PacketPtr pkt); 95 96 virtual Tick recvAtomic(PacketPtr pkt); 97 98 virtual void recvFunctional(PacketPtr pkt); 99 100 virtual AddrRangeList getAddrRanges() const; 101 102 public: 103 |
104 CpuSidePort(const std::string &_name, Cache *_cache, |
105 const std::string &_label); 106 107 }; 108 109 /** 110 * Override the default behaviour of sendDeferredPacket to enable 111 * the memory-side cache port to also send requests based on the 112 * current MSHR status. This queue has a pointer to our specific 113 * cache implementation and is used by the MemSidePort. 114 */ 115 class CacheReqPacketQueue : public ReqPacketQueue 116 { 117 118 protected: 119 |
120 Cache &cache; |
121 SnoopRespPacketQueue &snoopRespQueue; 122 123 public: 124 |
125 CacheReqPacketQueue(Cache &cache, MasterPort &port, |
126 SnoopRespPacketQueue &snoop_resp_queue, 127 const std::string &label) : 128 ReqPacketQueue(cache, port, label), cache(cache), 129 snoopRespQueue(snoop_resp_queue) { } 130 131 /** 132 * Override the normal sendDeferredPacket and do not only 133 * consider the transmit list (used for responses), but also --- 12 unchanged lines hidden (view full) --- 146 private: 147 148 /** The cache-specific queue. */ 149 CacheReqPacketQueue _reqQueue; 150 151 SnoopRespPacketQueue _snoopRespQueue; 152 153 // a pointer to our specific cache implementation |
154 Cache *cache; |
155 156 protected: 157 158 virtual void recvTimingSnoopReq(PacketPtr pkt); 159 160 virtual bool recvTimingResp(PacketPtr pkt); 161 162 virtual Tick recvAtomicSnoop(PacketPtr pkt); 163 164 virtual void recvFunctionalSnoop(PacketPtr pkt); 165 166 public: 167 |
168 MemSidePort(const std::string &_name, Cache *_cache, |
169 const std::string &_label); 170 }; 171 172 /** Tag and data Storage */ |
173 BaseTags *tags; |
174 175 /** Prefetcher */ 176 BasePrefetcher *prefetcher; 177 178 /** Temporary cache block for occasional transitory use */ |
179 CacheBlk *tempBlock; |
180 181 /** 182 * This cache should allocate a block on a line-sized write miss. 183 */ 184 const bool doFastWrites; 185 186 /** 187 * Turn line-sized writes into WriteInvalidate transactions. --- 15 unchanged lines hidden (view full) --- 203 /** 204 * Does all the processing necessary to perform the provided request. 205 * @param pkt The memory request to perform. 206 * @param blk The cache block to be updated. 207 * @param lat The latency of the access. 208 * @param writebacks List for any writebacks that need to be performed. 209 * @return Boolean indicating whether the request was satisfied. 210 */ |
211 bool access(PacketPtr pkt, CacheBlk *&blk, |
212 Cycles &lat, PacketList &writebacks); 213 214 /** 215 *Handle doing the Compare and Swap function for SPARC. 216 */ |
217 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt); |
218 219 /** 220 * Find a block frame for new block at address addr targeting the 221 * given security space, assuming that the block is not currently 222 * in the cache. Append writebacks if any to provided packet 223 * list. Return free block frame. May return NULL if there are 224 * no replaceable blocks at the moment. 225 */ |
226 CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks); |
227 228 /** 229 * Populates a cache block and handles all outstanding requests for the 230 * satisfied fill request. This version takes two memory requests. One 231 * contains the fill data, the other is an optional target to satisfy. 232 * @param pkt The memory request with the fill data. 233 * @param blk The cache block if it already exists. 234 * @param writebacks List for any writebacks that need to be performed. 235 * @return Pointer to the new cache block. 236 */ |
237 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, |
238 PacketList &writebacks); 239 240 241 /** 242 * Performs the access specified by the request. 243 * @param pkt The request to perform. 244 * @return The result of the access. 245 */ --- 34 unchanged lines hidden (view full) --- 280 281 /** 282 * Performs the access specified by the request. 283 * @param pkt The request to perform. 284 * @param fromCpuSide from the CPU side port or the memory side port 285 */ 286 void functionalAccess(PacketPtr pkt, bool fromCpuSide); 287 |
288 void satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk, |
289 bool deferred_response = false, 290 bool pending_downgrade = false); |
291 bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, CacheBlk *blk); |
292 293 void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 294 bool already_copied, bool pending_inval); 295 296 /** 297 * Sets the blk to the new state. 298 * @param blk The cache block being snooped. 299 * @param new_state The new coherence state for the block. 300 */ |
301 void handleSnoop(PacketPtr ptk, CacheBlk *blk, |
302 bool is_timing, bool is_deferred, bool pending_inval); 303 304 /** 305 * Create a writeback request for the given block. 306 * @param blk The block to writeback. 307 * @return The writeback request for the block. 308 */ |
309 PacketPtr writebackBlk(CacheBlk *blk); |
310 311 312 void memWriteback(); 313 void memInvalidate(); 314 bool isDirty() const; 315 316 /** 317 * Cache block visitor that writes back dirty cache blocks using 318 * functional writes. 319 * 320 * \return Always returns true. 321 */ |
322 bool writebackVisitor(CacheBlk &blk); |
323 /** 324 * Cache block visitor that invalidates all blocks in the cache. 325 * 326 * @warn Dirty cache lines will not be written back to memory. 327 * 328 * \return Always returns true. 329 */ |
330 bool invalidateVisitor(CacheBlk &blk); |
331 332 /** 333 * Squash all requests associated with specified thread. 334 * intended for use by I-cache. 335 * @param threadNum The thread to squash. 336 */ 337 void squash(int threadNum); 338 339 /** 340 * Generate an appropriate downstream bus request packet for the 341 * given parameters. 342 * @param cpu_pkt The upstream request that needs to be satisfied. 343 * @param blk The block currently in the cache corresponding to 344 * cpu_pkt (NULL if none). 345 * @param needsExclusive Indicates that an exclusive copy is required 346 * even if the request in cpu_pkt doesn't indicate that. 347 * @return A new Packet containing the request, or NULL if the 348 * current request in cpu_pkt should just be forwarded on. 349 */ |
350 PacketPtr getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk, |
351 bool needsExclusive) const; 352 353 /** 354 * Return the next MSHR to service, either a pending miss from the 355 * mshrQueue, a buffered write from the write buffer, or something 356 * from the prefetcher. This function is responsible for 357 * prioritizing among those sources on the fly. 358 */ --- 51 unchanged lines hidden (view full) --- 410 411 /** serialize the state of the caches 412 * We currently don't support checkpointing cache state, so this panics. 413 */ 414 virtual void serialize(std::ostream &os); 415 void unserialize(Checkpoint *cp, const std::string §ion); 416}; 417 |
418/** 419 * Wrap a method and present it as a cache block visitor. 420 * 421 * For example the forEachBlk method in the tag arrays expects a 422 * callable object/function as their parameter. This class wraps a 423 * method in an object and presents callable object that adheres to 424 * the cache block visitor protocol. 425 */ 426class CacheBlkVisitorWrapper : public CacheBlkVisitor 427{ 428 public: 429 typedef bool (Cache::*VisitorPtr)(CacheBlk &blk); 430 431 CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor) 432 : cache(_cache), visitor(_visitor) {} 433 434 bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE { 435 return (cache.*visitor)(blk); 436 } 437 438 private: 439 Cache &cache; 440 VisitorPtr visitor; 441}; 442 443/** 444 * Cache block visitor that determines if there are dirty blocks in a 445 * cache. 446 * 447 * Use with the forEachBlk method in the tag array to determine if the 448 * array contains dirty blocks. 449 */ 450class CacheBlkIsDirtyVisitor : public CacheBlkVisitor 451{ 452 public: 453 CacheBlkIsDirtyVisitor() 454 : _isDirty(false) {} 455 456 bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE { 457 if (blk.isDirty()) { 458 _isDirty = true; 459 return false; 460 } else { 461 return true; 462 } 463 } 464 465 /** 466 * Does the array contain a dirty line? 467 * 468 * \return true if yes, false otherwise. 469 */ 470 bool isDirty() const { return _isDirty; }; 471 472 private: 473 bool _isDirty; 474}; 475 |
476#endif // __CACHE_HH__ |