cache.hh revision 12346:9b1144d046ca
14776SN/A/* 27414SAli.Saidi@ARM.com * Copyright (c) 2012-2017 ARM Limited 37414SAli.Saidi@ARM.com * All rights reserved. 47414SAli.Saidi@ARM.com * 57414SAli.Saidi@ARM.com * The license below extends only to copyright in the software and shall 67414SAli.Saidi@ARM.com * not be construed as granting a license to any other intellectual 77414SAli.Saidi@ARM.com * property including but not limited to intellectual property relating 87414SAli.Saidi@ARM.com * to a hardware implementation of the functionality of the software 97414SAli.Saidi@ARM.com * licensed hereunder. You may use the software subject to the license 107414SAli.Saidi@ARM.com * terms below provided that you ensure that this notice is replicated 117414SAli.Saidi@ARM.com * unmodified and in its entirety in all distributions of the software, 127414SAli.Saidi@ARM.com * modified or unmodified, in source code or in binary form. 137414SAli.Saidi@ARM.com * 146365SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 154776SN/A * All rights reserved. 164776SN/A * 174776SN/A * Redistribution and use in source and binary forms, with or without 184776SN/A * modification, are permitted provided that the following conditions are 194776SN/A * met: redistributions of source code must retain the above copyright 204776SN/A * notice, this list of conditions and the following disclaimer; 214776SN/A * redistributions in binary form must reproduce the above copyright 224776SN/A * notice, this list of conditions and the following disclaimer in the 234776SN/A * documentation and/or other materials provided with the distribution; 244776SN/A * neither the name of the copyright holders nor the names of its 254776SN/A * contributors may be used to endorse or promote products derived from 264776SN/A * this software without specific prior written permission. 274776SN/A * 284776SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 294776SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 304776SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 314776SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 324776SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 334776SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 344776SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 354776SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 364776SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 374776SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 384776SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 394776SN/A * 406365SN/A * Authors: Erik Hallnor 414776SN/A * Dave Greene 424776SN/A * Steve Reinhardt 436397Sgblack@eecs.umich.edu * Ron Dreslinski 446397Sgblack@eecs.umich.edu * Andreas Hansson 456397Sgblack@eecs.umich.edu */ 464776SN/A 476397Sgblack@eecs.umich.edu/** 484776SN/A * @file 494776SN/A * Describes a cache based on template policies. 504776SN/A */ 516398Sgblack@eecs.umich.edu 526397Sgblack@eecs.umich.edu#ifndef __MEM_CACHE_CACHE_HH__ 536397Sgblack@eecs.umich.edu#define __MEM_CACHE_CACHE_HH__ 546397Sgblack@eecs.umich.edu 556397Sgblack@eecs.umich.edu#include <unordered_set> 566365SN/A 576398Sgblack@eecs.umich.edu#include "base/logging.hh" // fatal, panic, and warn 586398Sgblack@eecs.umich.edu#include "enums/Clusivity.hh" 596398Sgblack@eecs.umich.edu#include "mem/cache/base.hh" 606398Sgblack@eecs.umich.edu#include "mem/cache/blk.hh" 616398Sgblack@eecs.umich.edu#include "mem/cache/mshr.hh" 626398Sgblack@eecs.umich.edu#include "mem/cache/tags/base.hh" 636398Sgblack@eecs.umich.edu#include "params/Cache.hh" 646398Sgblack@eecs.umich.edu#include "sim/eventq.hh" 656398Sgblack@eecs.umich.edu 666411Sgblack@eecs.umich.edu//Forward decleration 676411Sgblack@eecs.umich.educlass BasePrefetcher; 686411Sgblack@eecs.umich.edu 696411Sgblack@eecs.umich.edu/** 706411Sgblack@eecs.umich.edu * A template-policy based cache. The behavior of the cache can be altered by 716411Sgblack@eecs.umich.edu * supplying different template policies. TagStore handles all tag and data 726411Sgblack@eecs.umich.edu * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System" 736398Sgblack@eecs.umich.edu */ 746411Sgblack@eecs.umich.educlass Cache : public BaseCache 756411Sgblack@eecs.umich.edu{ 766411Sgblack@eecs.umich.edu protected: 776411Sgblack@eecs.umich.edu 786411Sgblack@eecs.umich.edu /** 796411Sgblack@eecs.umich.edu * The CPU-side port extends the base cache slave port with access 806411Sgblack@eecs.umich.edu * functions for functional, atomic and timing requests. 816411Sgblack@eecs.umich.edu */ 826411Sgblack@eecs.umich.edu class CpuSidePort : public CacheSlavePort 836411Sgblack@eecs.umich.edu { 846411Sgblack@eecs.umich.edu private: 856411Sgblack@eecs.umich.edu 866411Sgblack@eecs.umich.edu // a pointer to our specific cache implementation 876411Sgblack@eecs.umich.edu Cache *cache; 886411Sgblack@eecs.umich.edu 896411Sgblack@eecs.umich.edu protected: 906411Sgblack@eecs.umich.edu 916398Sgblack@eecs.umich.edu virtual bool recvTimingSnoopResp(PacketPtr pkt); 926398Sgblack@eecs.umich.edu 936398Sgblack@eecs.umich.edu virtual bool tryTiming(PacketPtr pkt); 946398Sgblack@eecs.umich.edu 956398Sgblack@eecs.umich.edu virtual bool recvTimingReq(PacketPtr pkt); 966398Sgblack@eecs.umich.edu 976398Sgblack@eecs.umich.edu virtual Tick recvAtomic(PacketPtr pkt); 986398Sgblack@eecs.umich.edu 996398Sgblack@eecs.umich.edu virtual void recvFunctional(PacketPtr pkt); 1006398Sgblack@eecs.umich.edu 1016398Sgblack@eecs.umich.edu virtual AddrRangeList getAddrRanges() const; 1026398Sgblack@eecs.umich.edu 1036398Sgblack@eecs.umich.edu public: 1046398Sgblack@eecs.umich.edu 1056398Sgblack@eecs.umich.edu CpuSidePort(const std::string &_name, Cache *_cache, 1066398Sgblack@eecs.umich.edu const std::string &_label); 1076398Sgblack@eecs.umich.edu 1086398Sgblack@eecs.umich.edu }; 1096398Sgblack@eecs.umich.edu 1106398Sgblack@eecs.umich.edu /** 1116398Sgblack@eecs.umich.edu * Override the default behaviour of sendDeferredPacket to enable 1126724Sgblack@eecs.umich.edu * the memory-side cache port to also send requests based on the 1136724Sgblack@eecs.umich.edu * current MSHR status. This queue has a pointer to our specific 1146398Sgblack@eecs.umich.edu * cache implementation and is used by the MemSidePort. 1156398Sgblack@eecs.umich.edu */ 1166365SN/A class CacheReqPacketQueue : public ReqPacketQueue 1176365SN/A { 1186397Sgblack@eecs.umich.edu 1194776SN/A protected: 1206417Sgblack@eecs.umich.edu 1216417Sgblack@eecs.umich.edu Cache &cache; 1226417Sgblack@eecs.umich.edu SnoopRespPacketQueue &snoopRespQueue; 1236417Sgblack@eecs.umich.edu 1246417Sgblack@eecs.umich.edu public: 1256398Sgblack@eecs.umich.edu 1266417Sgblack@eecs.umich.edu CacheReqPacketQueue(Cache &cache, MasterPort &port, 1275523SN/A SnoopRespPacketQueue &snoop_resp_queue, 1287414SAli.Saidi@ARM.com const std::string &label) : 1297414SAli.Saidi@ARM.com ReqPacketQueue(cache, port, label), cache(cache), 1307414SAli.Saidi@ARM.com snoopRespQueue(snoop_resp_queue) { } 1317414SAli.Saidi@ARM.com 1327414SAli.Saidi@ARM.com /** 1337414SAli.Saidi@ARM.com * Override the normal sendDeferredPacket and do not only 1347414SAli.Saidi@ARM.com * consider the transmit list (used for responses), but also 1357414SAli.Saidi@ARM.com * requests. 1366409Sgblack@eecs.umich.edu */ 1376397Sgblack@eecs.umich.edu virtual void sendDeferredPacket(); 1386398Sgblack@eecs.umich.edu 1396398Sgblack@eecs.umich.edu /** 1406398Sgblack@eecs.umich.edu * Check if there is a conflicting snoop response about to be 1416410Sgblack@eecs.umich.edu * send out, and if so simply stall any requests, and schedule 1426410Sgblack@eecs.umich.edu * a send event at the same time as the next snoop response is 1436410Sgblack@eecs.umich.edu * being sent out. 1446410Sgblack@eecs.umich.edu */ 1456410Sgblack@eecs.umich.edu bool checkConflictingSnoop(Addr addr) 1466410Sgblack@eecs.umich.edu { 1476398Sgblack@eecs.umich.edu if (snoopRespQueue.hasAddr(addr)) { 1486410Sgblack@eecs.umich.edu DPRINTF(CachePort, "Waiting for snoop response to be " 1496398Sgblack@eecs.umich.edu "sent\n"); 1506398Sgblack@eecs.umich.edu Tick when = snoopRespQueue.deferredPacketReadyTime(); 1516410Sgblack@eecs.umich.edu schedSendEvent(when); 1526398Sgblack@eecs.umich.edu return true; 1536398Sgblack@eecs.umich.edu } 1546398Sgblack@eecs.umich.edu return false; 1556398Sgblack@eecs.umich.edu } 1566398Sgblack@eecs.umich.edu }; 1576398Sgblack@eecs.umich.edu 1586398Sgblack@eecs.umich.edu /** 1596398Sgblack@eecs.umich.edu * The memory-side port extends the base cache master port with 1606398Sgblack@eecs.umich.edu * access functions for functional, atomic and timing snoops. 1616398Sgblack@eecs.umich.edu */ 1626398Sgblack@eecs.umich.edu class MemSidePort : public CacheMasterPort 1636398Sgblack@eecs.umich.edu { 1646398Sgblack@eecs.umich.edu private: 1656398Sgblack@eecs.umich.edu 1666410Sgblack@eecs.umich.edu /** The cache-specific queue. */ 1676398Sgblack@eecs.umich.edu CacheReqPacketQueue _reqQueue; 1686398Sgblack@eecs.umich.edu 1696398Sgblack@eecs.umich.edu SnoopRespPacketQueue _snoopRespQueue; 1706398Sgblack@eecs.umich.edu 1716398Sgblack@eecs.umich.edu // a pointer to our specific cache implementation 1726398Sgblack@eecs.umich.edu Cache *cache; 1736398Sgblack@eecs.umich.edu 1746398Sgblack@eecs.umich.edu protected: 1754776SN/A 1766409Sgblack@eecs.umich.edu virtual void recvTimingSnoopReq(PacketPtr pkt); 1776409Sgblack@eecs.umich.edu 1786409Sgblack@eecs.umich.edu virtual bool recvTimingResp(PacketPtr pkt); 1796409Sgblack@eecs.umich.edu 1806409Sgblack@eecs.umich.edu virtual Tick recvAtomicSnoop(PacketPtr pkt); 1816409Sgblack@eecs.umich.edu 1826409Sgblack@eecs.umich.edu virtual void recvFunctionalSnoop(PacketPtr pkt); 1836409Sgblack@eecs.umich.edu 1846409Sgblack@eecs.umich.edu public: 1856409Sgblack@eecs.umich.edu 1866419Sgblack@eecs.umich.edu MemSidePort(const std::string &_name, Cache *_cache, 1876419Sgblack@eecs.umich.edu const std::string &_label); 1886419Sgblack@eecs.umich.edu }; 1896419Sgblack@eecs.umich.edu 1906419Sgblack@eecs.umich.edu /** Tag and data Storage */ 1916409Sgblack@eecs.umich.edu BaseTags *tags; 1924776SN/A 1934776SN/A /** Prefetcher */ 1946365SN/A BasePrefetcher *prefetcher; 1954776SN/A 1964776SN/A /** Temporary cache block for occasional transitory use */ 1974776SN/A CacheBlk *tempBlock; 1984776SN/A 1994776SN/A /** 2006397Sgblack@eecs.umich.edu * This cache should allocate a block on a line-sized write miss. 2016397Sgblack@eecs.umich.edu */ 2024776SN/A const bool doFastWrites; 2036397Sgblack@eecs.umich.edu 2044776SN/A /** 205 * Turn line-sized writes into WriteInvalidate transactions. 206 */ 207 void promoteWholeLineWrites(PacketPtr pkt); 208 209 /** 210 * Notify the prefetcher on every access, not just misses. 211 */ 212 const bool prefetchOnAccess; 213 214 /** 215 * Clusivity with respect to the upstream cache, determining if we 216 * fill into both this cache and the cache above on a miss. Note 217 * that we currently do not support strict clusivity policies. 218 */ 219 const Enums::Clusivity clusivity; 220 221 /** 222 * Determine if clean lines should be written back or not. In 223 * cases where a downstream cache is mostly inclusive we likely 224 * want it to act as a victim cache also for lines that have not 225 * been modified. Hence, we cannot simply drop the line (or send a 226 * clean evict), but rather need to send the actual data. 227 */ 228 const bool writebackClean; 229 230 /** 231 * Upstream caches need this packet until true is returned, so 232 * hold it for deletion until a subsequent call 233 */ 234 std::unique_ptr<Packet> pendingDelete; 235 236 /** 237 * Writebacks from the tempBlock, resulting on the response path 238 * in atomic mode, must happen after the call to recvAtomic has 239 * finished (for the right ordering of the packets). We therefore 240 * need to hold on to the packets, and have a method and an event 241 * to send them. 242 */ 243 PacketPtr tempBlockWriteback; 244 245 /** 246 * Send the outstanding tempBlock writeback. To be called after 247 * recvAtomic finishes in cases where the block we filled is in 248 * fact the tempBlock, and now needs to be written back. 249 */ 250 void writebackTempBlockAtomic() { 251 assert(tempBlockWriteback != nullptr); 252 PacketList writebacks{tempBlockWriteback}; 253 doWritebacksAtomic(writebacks); 254 tempBlockWriteback = nullptr; 255 } 256 257 /** 258 * An event to writeback the tempBlock after recvAtomic 259 * finishes. To avoid other calls to recvAtomic getting in 260 * between, we create this event with a higher priority. 261 */ 262 EventFunctionWrapper writebackTempBlockAtomicEvent; 263 264 /** 265 * Store the outstanding requests that we are expecting snoop 266 * responses from so we can determine which snoop responses we 267 * generated and which ones were merely forwarded. 268 */ 269 std::unordered_set<RequestPtr> outstandingSnoop; 270 271 /** 272 * Does all the processing necessary to perform the provided request. 273 * @param pkt The memory request to perform. 274 * @param blk The cache block to be updated. 275 * @param lat The latency of the access. 276 * @param writebacks List for any writebacks that need to be performed. 277 * @return Boolean indicating whether the request was satisfied. 278 */ 279 bool access(PacketPtr pkt, CacheBlk *&blk, 280 Cycles &lat, PacketList &writebacks); 281 282 /** 283 *Handle doing the Compare and Swap function for SPARC. 284 */ 285 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt); 286 287 /** 288 * Find a block frame for new block at address addr targeting the 289 * given security space, assuming that the block is not currently 290 * in the cache. Append writebacks if any to provided packet 291 * list. Return free block frame. May return nullptr if there are 292 * no replaceable blocks at the moment. 293 */ 294 CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks); 295 296 /** 297 * Invalidate a cache block. 298 * 299 * @param blk Block to invalidate 300 */ 301 void invalidateBlock(CacheBlk *blk); 302 303 /** 304 * Maintain the clusivity of this cache by potentially 305 * invalidating a block. This method works in conjunction with 306 * satisfyRequest, but is separate to allow us to handle all MSHR 307 * targets before potentially dropping a block. 308 * 309 * @param from_cache Whether we have dealt with a packet from a cache 310 * @param blk The block that should potentially be dropped 311 */ 312 void maintainClusivity(bool from_cache, CacheBlk *blk); 313 314 /** 315 * Populates a cache block and handles all outstanding requests for the 316 * satisfied fill request. This version takes two memory requests. One 317 * contains the fill data, the other is an optional target to satisfy. 318 * @param pkt The memory request with the fill data. 319 * @param blk The cache block if it already exists. 320 * @param writebacks List for any writebacks that need to be performed. 321 * @param allocate Whether to allocate a block or use the temp block 322 * @return Pointer to the new cache block. 323 */ 324 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk, 325 PacketList &writebacks, bool allocate); 326 327 /** 328 * Determine whether we should allocate on a fill or not. If this 329 * cache is mostly inclusive with regards to the upstream cache(s) 330 * we always allocate (for any non-forwarded and cacheable 331 * requests). In the case of a mostly exclusive cache, we allocate 332 * on fill if the packet did not come from a cache, thus if we: 333 * are dealing with a whole-line write (the latter behaves much 334 * like a writeback), the original target packet came from a 335 * non-caching source, or if we are performing a prefetch or LLSC. 336 * 337 * @param cmd Command of the incoming requesting packet 338 * @return Whether we should allocate on the fill 339 */ 340 inline bool allocOnFill(MemCmd cmd) const override 341 { 342 return clusivity == Enums::mostly_incl || 343 cmd == MemCmd::WriteLineReq || 344 cmd == MemCmd::ReadReq || 345 cmd == MemCmd::WriteReq || 346 cmd.isPrefetch() || 347 cmd.isLLSC(); 348 } 349 350 /** 351 * Performs the access specified by the request. 352 * @param pkt The request to perform. 353 * @return The result of the access. 354 */ 355 bool recvTimingReq(PacketPtr pkt); 356 357 /** 358 * Insert writebacks into the write buffer 359 */ 360 void doWritebacks(PacketList& writebacks, Tick forward_time); 361 362 /** 363 * Send writebacks down the memory hierarchy in atomic mode 364 */ 365 void doWritebacksAtomic(PacketList& writebacks); 366 367 /** 368 * Handling the special case of uncacheable write responses to 369 * make recvTimingResp less cluttered. 370 */ 371 void handleUncacheableWriteResp(PacketPtr pkt); 372 373 /** 374 * Handles a response (cache line fill/write ack) from the bus. 375 * @param pkt The response packet 376 */ 377 void recvTimingResp(PacketPtr pkt); 378 379 /** 380 * Snoops bus transactions to maintain coherence. 381 * @param pkt The current bus transaction. 382 */ 383 void recvTimingSnoopReq(PacketPtr pkt); 384 385 /** 386 * Handle a snoop response. 387 * @param pkt Snoop response packet 388 */ 389 void recvTimingSnoopResp(PacketPtr pkt); 390 391 /** 392 * Performs the access specified by the request. 393 * @param pkt The request to perform. 394 * @return The number of ticks required for the access. 395 */ 396 Tick recvAtomic(PacketPtr pkt); 397 398 /** 399 * Snoop for the provided request in the cache and return the estimated 400 * time taken. 401 * @param pkt The memory request to snoop 402 * @return The number of ticks required for the snoop. 403 */ 404 Tick recvAtomicSnoop(PacketPtr pkt); 405 406 /** 407 * Performs the access specified by the request. 408 * @param pkt The request to perform. 409 * @param fromCpuSide from the CPU side port or the memory side port 410 */ 411 void functionalAccess(PacketPtr pkt, bool fromCpuSide); 412 413 /** 414 * Perform any necessary updates to the block and perform any data 415 * exchange between the packet and the block. The flags of the 416 * packet are also set accordingly. 417 * 418 * @param pkt Request packet from upstream that hit a block 419 * @param blk Cache block that the packet hit 420 * @param deferred_response Whether this hit is to block that 421 * originally missed 422 * @param pending_downgrade Whether the writable flag is to be removed 423 * 424 * @return True if the block is to be invalidated 425 */ 426 void satisfyRequest(PacketPtr pkt, CacheBlk *blk, 427 bool deferred_response = false, 428 bool pending_downgrade = false); 429 430 void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data, 431 bool already_copied, bool pending_inval); 432 433 /** 434 * Perform an upward snoop if needed, and update the block state 435 * (possibly invalidating the block). Also create a response if required. 436 * 437 * @param pkt Snoop packet 438 * @param blk Cache block being snooped 439 * @param is_timing Timing or atomic for the response 440 * @param is_deferred Is this a deferred snoop or not? 441 * @param pending_inval Do we have a pending invalidation? 442 * 443 * @return The snoop delay incurred by the upwards snoop 444 */ 445 uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk, 446 bool is_timing, bool is_deferred, bool pending_inval); 447 448 /** 449 * Create a writeback request for the given block. 450 * @param blk The block to writeback. 451 * @return The writeback request for the block. 452 */ 453 PacketPtr writebackBlk(CacheBlk *blk); 454 455 /** 456 * Create a writeclean request for the given block. 457 * @param blk The block to write clean 458 * @param dest The destination of this clean operation 459 * @return The write clean packet for the block. 460 */ 461 PacketPtr writecleanBlk(CacheBlk *blk, Request::Flags dest = 0); 462 463 /** 464 * Create a CleanEvict request for the given block. 465 * @param blk The block to evict. 466 * @return The CleanEvict request for the block. 467 */ 468 PacketPtr cleanEvictBlk(CacheBlk *blk); 469 470 471 void memWriteback() override; 472 void memInvalidate() override; 473 bool isDirty() const override; 474 475 /** 476 * Cache block visitor that writes back dirty cache blocks using 477 * functional writes. 478 * 479 * \return Always returns true. 480 */ 481 bool writebackVisitor(CacheBlk &blk); 482 /** 483 * Cache block visitor that invalidates all blocks in the cache. 484 * 485 * @warn Dirty cache lines will not be written back to memory. 486 * 487 * \return Always returns true. 488 */ 489 bool invalidateVisitor(CacheBlk &blk); 490 491 /** 492 * Create an appropriate downstream bus request packet for the 493 * given parameters. 494 * @param cpu_pkt The miss that needs to be satisfied. 495 * @param blk The block currently in the cache corresponding to 496 * cpu_pkt (nullptr if none). 497 * @param needsWritable Indicates that the block must be writable 498 * even if the request in cpu_pkt doesn't indicate that. 499 * @return A new Packet containing the request, or nullptr if the 500 * current request in cpu_pkt should just be forwarded on. 501 */ 502 PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk, 503 bool needsWritable) const; 504 505 /** 506 * Return the next queue entry to service, either a pending miss 507 * from the MSHR queue, a buffered write from the write buffer, or 508 * something from the prefetcher. This function is responsible 509 * for prioritizing among those sources on the fly. 510 */ 511 QueueEntry* getNextQueueEntry(); 512 513 /** 514 * Send up a snoop request and find cached copies. If cached copies are 515 * found, set the BLOCK_CACHED flag in pkt. 516 */ 517 bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const; 518 519 /** 520 * Return whether there are any outstanding misses. 521 */ 522 bool outstandingMisses() const 523 { 524 return !mshrQueue.isEmpty(); 525 } 526 527 CacheBlk *findBlock(Addr addr, bool is_secure) const { 528 return tags->findBlock(addr, is_secure); 529 } 530 531 bool inCache(Addr addr, bool is_secure) const override { 532 return (tags->findBlock(addr, is_secure) != 0); 533 } 534 535 bool inMissQueue(Addr addr, bool is_secure) const override { 536 return (mshrQueue.findMatch(addr, is_secure) != 0); 537 } 538 539 /** 540 * Find next request ready time from among possible sources. 541 */ 542 Tick nextQueueReadyTime() const; 543 544 public: 545 /** Instantiates a basic cache object. */ 546 Cache(const CacheParams *p); 547 548 /** Non-default destructor is needed to deallocate memory. */ 549 virtual ~Cache(); 550 551 void regStats() override; 552 553 /** 554 * Take an MSHR, turn it into a suitable downstream packet, and 555 * send it out. This construct allows a queue entry to choose a suitable 556 * approach based on its type. 557 * 558 * @param mshr The MSHR to turn into a packet and send 559 * @return True if the port is waiting for a retry 560 */ 561 bool sendMSHRQueuePacket(MSHR* mshr); 562 563 /** 564 * Similar to sendMSHR, but for a write-queue entry 565 * instead. Create the packet, and send it, and if successful also 566 * mark the entry in service. 567 * 568 * @param wq_entry The write-queue entry to turn into a packet and send 569 * @return True if the port is waiting for a retry 570 */ 571 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry); 572 573 /** serialize the state of the caches 574 * We currently don't support checkpointing cache state, so this panics. 575 */ 576 void serialize(CheckpointOut &cp) const override; 577 void unserialize(CheckpointIn &cp) override; 578}; 579 580/** 581 * Wrap a method and present it as a cache block visitor. 582 * 583 * For example the forEachBlk method in the tag arrays expects a 584 * callable object/function as their parameter. This class wraps a 585 * method in an object and presents callable object that adheres to 586 * the cache block visitor protocol. 587 */ 588class CacheBlkVisitorWrapper : public CacheBlkVisitor 589{ 590 public: 591 typedef bool (Cache::*VisitorPtr)(CacheBlk &blk); 592 593 CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor) 594 : cache(_cache), visitor(_visitor) {} 595 596 bool operator()(CacheBlk &blk) override { 597 return (cache.*visitor)(blk); 598 } 599 600 private: 601 Cache &cache; 602 VisitorPtr visitor; 603}; 604 605/** 606 * Cache block visitor that determines if there are dirty blocks in a 607 * cache. 608 * 609 * Use with the forEachBlk method in the tag array to determine if the 610 * array contains dirty blocks. 611 */ 612class CacheBlkIsDirtyVisitor : public CacheBlkVisitor 613{ 614 public: 615 CacheBlkIsDirtyVisitor() 616 : _isDirty(false) {} 617 618 bool operator()(CacheBlk &blk) override { 619 if (blk.isDirty()) { 620 _isDirty = true; 621 return false; 622 } else { 623 return true; 624 } 625 } 626 627 /** 628 * Does the array contain a dirty line? 629 * 630 * \return true if yes, false otherwise. 631 */ 632 bool isDirty() const { return _isDirty; }; 633 634 private: 635 bool _isDirty; 636}; 637 638#endif // __MEM_CACHE_CACHE_HH__ 639