cache.cc (11742:3dcf0b891749) cache.cc (11744:5d33c6972dda)
1/*
2 * Copyright (c) 2010-2016 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 166 unchanged lines hidden (view full) ---

175 if (blk->checkWrite(pkt)) {
176 pkt->writeDataToBlock(blk->data, blkSize);
177 }
178 // Always mark the line as dirty (and thus transition to the
179 // Modified state) even if we are a failed StoreCond so we
180 // supply data to any snoops that have appended themselves to
181 // this cache before knowing the store will fail.
182 blk->status |= BlkDirty;
1/*
2 * Copyright (c) 2010-2016 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 166 unchanged lines hidden (view full) ---

175 if (blk->checkWrite(pkt)) {
176 pkt->writeDataToBlock(blk->data, blkSize);
177 }
178 // Always mark the line as dirty (and thus transition to the
179 // Modified state) even if we are a failed StoreCond so we
180 // supply data to any snoops that have appended themselves to
181 // this cache before knowing the store will fail.
182 blk->status |= BlkDirty;
183 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (write)\n",
184 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
183 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
185 } else if (pkt->isRead()) {
186 if (pkt->isLLSC()) {
187 blk->trackLoadLocked(pkt);
188 }
189
190 // all read responses have a data payload
191 assert(pkt->hasRespData());
192 pkt->setDataFromBlock(blk->data, blkSize);

--- 74 unchanged lines hidden (view full) ---

267 // has the line in Shared state needs to be made aware
268 // that the data it already has is in fact dirty
269 pkt->setCacheResponding();
270 blk->status &= ~BlkDirty;
271 }
272 } else {
273 assert(pkt->isInvalidate());
274 invalidateBlock(blk);
184 } else if (pkt->isRead()) {
185 if (pkt->isLLSC()) {
186 blk->trackLoadLocked(pkt);
187 }
188
189 // all read responses have a data payload
190 assert(pkt->hasRespData());
191 pkt->setDataFromBlock(blk->data, blkSize);

--- 74 unchanged lines hidden (view full) ---

266 // has the line in Shared state needs to be made aware
267 // that the data it already has is in fact dirty
268 pkt->setCacheResponding();
269 blk->status &= ~BlkDirty;
270 }
271 } else {
272 assert(pkt->isInvalidate());
273 invalidateBlock(blk);
275 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (invalidation)\n",
276 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
274 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
275 pkt->print());
277 }
278}
279
280/////////////////////////////////////////////////////
281//
282// Access path: requests coming in from the CPU side
283//
284/////////////////////////////////////////////////////

--- 4 unchanged lines hidden (view full) ---

289{
290 // sanity check
291 assert(pkt->isRequest());
292
293 chatty_assert(!(isReadOnly && pkt->isWrite()),
294 "Should never see a write in a read-only cache %s\n",
295 name());
296
276 }
277}
278
279/////////////////////////////////////////////////////
280//
281// Access path: requests coming in from the CPU side
282//
283/////////////////////////////////////////////////////

--- 4 unchanged lines hidden (view full) ---

288{
289 // sanity check
290 assert(pkt->isRequest());
291
292 chatty_assert(!(isReadOnly && pkt->isWrite()),
293 "Should never see a write in a read-only cache %s\n",
294 name());
295
297 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
298 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
296 DPRINTF(CacheVerbose, "%s for %s\n", __func__, pkt->print());
299
300 if (pkt->req->isUncacheable()) {
297
298 if (pkt->req->isUncacheable()) {
301 DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(),
302 pkt->req->isInstFetch() ? " (ifetch)" : "",
303 pkt->getAddr());
299 DPRINTF(Cache, "uncacheable: %s\n", pkt->print());
304
305 // flush and invalidate any existing block
306 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
307 if (old_blk && old_blk->isValid()) {
308 if (old_blk->isDirty() || writebackClean)
309 writebacks.push_back(writebackBlk(old_blk));
310 else
311 writebacks.push_back(cleanEvictBlk(old_blk));

--- 8 unchanged lines hidden (view full) ---

320 }
321
322 ContextID id = pkt->req->hasContextId() ?
323 pkt->req->contextId() : InvalidContextID;
324 // Here lat is the value passed as parameter to accessBlock() function
325 // that can modify its value.
326 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
327
300
301 // flush and invalidate any existing block
302 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
303 if (old_blk && old_blk->isValid()) {
304 if (old_blk->isDirty() || writebackClean)
305 writebacks.push_back(writebackBlk(old_blk));
306 else
307 writebacks.push_back(cleanEvictBlk(old_blk));

--- 8 unchanged lines hidden (view full) ---

316 }
317
318 ContextID id = pkt->req->hasContextId() ?
319 pkt->req->contextId() : InvalidContextID;
320 // Here lat is the value passed as parameter to accessBlock() function
321 // that can modify its value.
322 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
323
328 DPRINTF(Cache, "%s%s addr %#llx size %d (%s) %s\n", pkt->cmdString(),
329 pkt->req->isInstFetch() ? " (ifetch)" : "",
330 pkt->getAddr(), pkt->getSize(), pkt->isSecure() ? "s" : "ns",
324 DPRINTF(Cache, "%s %s\n", pkt->print(),
331 blk ? "hit " + blk->print() : "miss");
332
333
334 if (pkt->isEviction()) {
335 // We check for presence of block in above caches before issuing
336 // Writeback or CleanEvict to write buffer. Therefore the only
337 // possible cases can be of a CleanEvict packet coming from above
338 // encountering a Writeback generated in this cache peer cache and

--- 199 unchanged lines hidden (view full) ---

538 delete wbPkt;
539 }
540}
541
542
543void
544Cache::recvTimingSnoopResp(PacketPtr pkt)
545{
325 blk ? "hit " + blk->print() : "miss");
326
327
328 if (pkt->isEviction()) {
329 // We check for presence of block in above caches before issuing
330 // Writeback or CleanEvict to write buffer. Therefore the only
331 // possible cases can be of a CleanEvict packet coming from above
332 // encountering a Writeback generated in this cache peer cache and

--- 199 unchanged lines hidden (view full) ---

532 delete wbPkt;
533 }
534}
535
536
537void
538Cache::recvTimingSnoopResp(PacketPtr pkt)
539{
546 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
547 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
540 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
548
549 assert(pkt->isResponse());
550 assert(!system->bypassCaches());
551
552 // determine if the response is from a snoop request we created
553 // (in which case it should be in the outstandingSnoop), or if we
554 // merely forwarded someone else's snoop request
555 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==

--- 49 unchanged lines hidden (view full) ---

605 }
606
607 promoteWholeLineWrites(pkt);
608
609 if (pkt->cacheResponding()) {
610 // a cache above us (but not where the packet came from) is
611 // responding to the request, in other words it has the line
612 // in Modified or Owned state
541
542 assert(pkt->isResponse());
543 assert(!system->bypassCaches());
544
545 // determine if the response is from a snoop request we created
546 // (in which case it should be in the outstandingSnoop), or if we
547 // merely forwarded someone else's snoop request
548 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==

--- 49 unchanged lines hidden (view full) ---

598 }
599
600 promoteWholeLineWrites(pkt);
601
602 if (pkt->cacheResponding()) {
603 // a cache above us (but not where the packet came from) is
604 // responding to the request, in other words it has the line
605 // in Modified or Owned state
613 DPRINTF(Cache, "Cache above responding to %#llx (%s): "
614 "not responding\n",
615 pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
606 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
607 pkt->print());
616
617 // if the packet needs the block to be writable, and the cache
618 // that has promised to respond (setting the cache responding
619 // flag) is not providing writable (it is in Owned rather than
620 // the Modified state), we know that there may be other Shared
621 // copies in the system; go out and invalidate them all
622 assert(pkt->needsWritable() && !pkt->responderHadWritable());
623

--- 107 unchanged lines hidden (view full) ---

731
732 // In this case we are considering request_time that takes
733 // into account the delay of the xbar, if any, and just
734 // lat, neglecting responseLatency, modelling hit latency
735 // just as lookupLatency or or the value of lat overriden
736 // by access(), that calls accessBlock() function.
737 cpuSidePort->schedTimingResp(pkt, request_time, true);
738 } else {
608
609 // if the packet needs the block to be writable, and the cache
610 // that has promised to respond (setting the cache responding
611 // flag) is not providing writable (it is in Owned rather than
612 // the Modified state), we know that there may be other Shared
613 // copies in the system; go out and invalidate them all
614 assert(pkt->needsWritable() && !pkt->responderHadWritable());
615

--- 107 unchanged lines hidden (view full) ---

723
724 // In this case we are considering request_time that takes
725 // into account the delay of the xbar, if any, and just
726 // lat, neglecting responseLatency, modelling hit latency
727 // just as lookupLatency or or the value of lat overriden
728 // by access(), that calls accessBlock() function.
729 cpuSidePort->schedTimingResp(pkt, request_time, true);
730 } else {
739 DPRINTF(Cache, "%s satisfied %s addr %#llx, no response needed\n",
740 __func__, pkt->cmdString(), pkt->getAddr());
731 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
732 pkt->print());
741
742 // queue the packet for deletion, as the sending cache is
743 // still relying on it; if the block is found in access(),
744 // CleanEvict and Writeback messages will be deleted
745 // here as well
746 pendingDelete.reset(pkt);
747 }
748 } else {

--- 59 unchanged lines hidden (view full) ---

808 // Coalesce unless it was a software prefetch (see above).
809 if (pkt) {
810 assert(!pkt->isWriteback());
811 // CleanEvicts corresponding to blocks which have
812 // outstanding requests in MSHRs are simply sunk here
813 if (pkt->cmd == MemCmd::CleanEvict) {
814 pendingDelete.reset(pkt);
815 } else {
733
734 // queue the packet for deletion, as the sending cache is
735 // still relying on it; if the block is found in access(),
736 // CleanEvict and Writeback messages will be deleted
737 // here as well
738 pendingDelete.reset(pkt);
739 }
740 } else {

--- 59 unchanged lines hidden (view full) ---

800 // Coalesce unless it was a software prefetch (see above).
801 if (pkt) {
802 assert(!pkt->isWriteback());
803 // CleanEvicts corresponding to blocks which have
804 // outstanding requests in MSHRs are simply sunk here
805 if (pkt->cmd == MemCmd::CleanEvict) {
806 pendingDelete.reset(pkt);
807 } else {
816 DPRINTF(Cache, "%s coalescing MSHR for %s addr %#llx "
817 "size %d\n", __func__, pkt->cmdString(),
818 pkt->getAddr(), pkt->getSize());
808 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
809 pkt->print());
819
820 assert(pkt->req->masterId() < system->maxMasters());
821 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
822 // We use forward_time here because it is the same
823 // considering new targets. We have multiple
824 // requests for the same address here. It
825 // specifies the latency to allocate an internal
826 // buffer and to schedule an event to the queued

--- 135 unchanged lines hidden (view full) ---

962 // packet as having sharers (not passing writable), pass that info
963 // downstream
964 if (cpu_pkt->hasSharers() && !needsWritable) {
965 // note that cpu_pkt may have spent a considerable time in the
966 // MSHR queue and that the information could possibly be out
967 // of date, however, there is no harm in conservatively
968 // assuming the block has sharers
969 pkt->setHasSharers();
810
811 assert(pkt->req->masterId() < system->maxMasters());
812 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
813 // We use forward_time here because it is the same
814 // considering new targets. We have multiple
815 // requests for the same address here. It
816 // specifies the latency to allocate an internal
817 // buffer and to schedule an event to the queued

--- 135 unchanged lines hidden (view full) ---

953 // packet as having sharers (not passing writable), pass that info
954 // downstream
955 if (cpu_pkt->hasSharers() && !needsWritable) {
956 // note that cpu_pkt may have spent a considerable time in the
957 // MSHR queue and that the information could possibly be out
958 // of date, however, there is no harm in conservatively
959 // assuming the block has sharers
960 pkt->setHasSharers();
970 DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx "
971 "size %d\n",
972 __func__, cpu_pkt->cmdString(), pkt->cmdString(),
973 pkt->getAddr(), pkt->getSize());
961 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
962 __func__, cpu_pkt->print(), pkt->print());
974 }
975
976 // the packet should be block aligned
977 assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
978
979 pkt->allocate();
963 }
964
965 // the packet should be block aligned
966 assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
967
968 pkt->allocate();
980 DPRINTF(Cache, "%s created %s from %s for addr %#llx size %d\n",
981 __func__, pkt->cmdString(), cpu_pkt->cmdString(), pkt->getAddr(),
982 pkt->getSize());
969 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
970 cpu_pkt->print());
983 return pkt;
984}
985
986
987Tick
988Cache::recvAtomic(PacketPtr pkt)
989{
990 // We are in atomic mode so we pay just for lookupLatency here.
991 Cycles lat = lookupLatency;
992
993 // Forward the request if the system is in cache bypass mode.
994 if (system->bypassCaches())
995 return ticksToCycles(memSidePort->sendAtomic(pkt));
996
997 promoteWholeLineWrites(pkt);
998
999 // follow the same flow as in recvTimingReq, and check if a cache
1000 // above us is responding
1001 if (pkt->cacheResponding()) {
971 return pkt;
972}
973
974
975Tick
976Cache::recvAtomic(PacketPtr pkt)
977{
978 // We are in atomic mode so we pay just for lookupLatency here.
979 Cycles lat = lookupLatency;
980
981 // Forward the request if the system is in cache bypass mode.
982 if (system->bypassCaches())
983 return ticksToCycles(memSidePort->sendAtomic(pkt));
984
985 promoteWholeLineWrites(pkt);
986
987 // follow the same flow as in recvTimingReq, and check if a cache
988 // above us is responding
989 if (pkt->cacheResponding()) {
1002 DPRINTF(Cache, "Cache above responding to %#llx (%s): "
1003 "not responding\n",
1004 pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
990 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
991 pkt->print());
1005
1006 // if a cache is responding, and it had the line in Owned
1007 // rather than Modified state, we need to invalidate any
1008 // copies that are not on the same path to memory
1009 assert(pkt->needsWritable() && !pkt->responderHadWritable());
1010 lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1011
1012 return lat * clockPeriod();

--- 28 unchanged lines hidden (view full) ---

1041 bool is_forward = (bus_pkt == nullptr);
1042
1043 if (is_forward) {
1044 // just forwarding the same request to the next level
1045 // no local cache operation involved
1046 bus_pkt = pkt;
1047 }
1048
992
993 // if a cache is responding, and it had the line in Owned
994 // rather than Modified state, we need to invalidate any
995 // copies that are not on the same path to memory
996 assert(pkt->needsWritable() && !pkt->responderHadWritable());
997 lat += ticksToCycles(memSidePort->sendAtomic(pkt));
998
999 return lat * clockPeriod();

--- 28 unchanged lines hidden (view full) ---

1028 bool is_forward = (bus_pkt == nullptr);
1029
1030 if (is_forward) {
1031 // just forwarding the same request to the next level
1032 // no local cache operation involved
1033 bus_pkt = pkt;
1034 }
1035
1049 DPRINTF(Cache, "Sending an atomic %s for %#llx (%s)\n",
1050 bus_pkt->cmdString(), bus_pkt->getAddr(),
1051 bus_pkt->isSecure() ? "s" : "ns");
1036 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
1037 bus_pkt->print());
1052
1053#if TRACING_ON
1054 CacheBlk::State old_state = blk ? blk->status : 0;
1055#endif
1056
1057 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1058
1059 bool is_invalidate = bus_pkt->isInvalidate();
1060
1061 // We are now dealing with the response handling
1038
1039#if TRACING_ON
1040 CacheBlk::State old_state = blk ? blk->status : 0;
1041#endif
1042
1043 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
1044
1045 bool is_invalidate = bus_pkt->isInvalidate();
1046
1047 // We are now dealing with the response handling
1062 DPRINTF(Cache, "Receive response: %s for addr %#llx (%s) in "
1063 "state %i\n", bus_pkt->cmdString(), bus_pkt->getAddr(),
1064 bus_pkt->isSecure() ? "s" : "ns",
1065 old_state);
1048 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
1049 bus_pkt->print(), old_state);
1066
1067 // If packet was a forward, the response (if any) is already
1068 // in place in the bus_pkt == pkt structure, so we don't need
1069 // to do anything. Otherwise, use the separate bus_pkt to
1070 // generate response to pkt and then delete it.
1071 if (!is_forward) {
1072 if (pkt->needsResponse()) {
1073 assert(bus_pkt->isResponse());

--- 119 unchanged lines hidden (view full) ---

1193 (mshr && mshr->inService && mshr->isPendingModified()));
1194
1195 bool done = have_dirty
1196 || cpuSidePort->checkFunctional(pkt)
1197 || mshrQueue.checkFunctional(pkt, blk_addr)
1198 || writeBuffer.checkFunctional(pkt, blk_addr)
1199 || memSidePort->checkFunctional(pkt);
1200
1050
1051 // If packet was a forward, the response (if any) is already
1052 // in place in the bus_pkt == pkt structure, so we don't need
1053 // to do anything. Otherwise, use the separate bus_pkt to
1054 // generate response to pkt and then delete it.
1055 if (!is_forward) {
1056 if (pkt->needsResponse()) {
1057 assert(bus_pkt->isResponse());

--- 119 unchanged lines hidden (view full) ---

1177 (mshr && mshr->inService && mshr->isPendingModified()));
1178
1179 bool done = have_dirty
1180 || cpuSidePort->checkFunctional(pkt)
1181 || mshrQueue.checkFunctional(pkt, blk_addr)
1182 || writeBuffer.checkFunctional(pkt, blk_addr)
1183 || memSidePort->checkFunctional(pkt);
1184
1201 DPRINTF(CacheVerbose, "functional %s %#llx (%s) %s%s%s\n",
1202 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
1185 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
1203 (blk && blk->isValid()) ? "valid " : "",
1204 have_data ? "data " : "", done ? "done " : "");
1205
1206 // We're leaving the cache, so pop cache->name() label
1207 pkt->popLabel();
1208
1209 if (done) {
1210 pkt->makeResponse();

--- 38 unchanged lines hidden (view full) ---

1249 // all header delay should be paid for by the crossbar, unless
1250 // this is a prefetch response from above
1251 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1252 "%s saw a non-zero packet delay\n", name());
1253
1254 bool is_error = pkt->isError();
1255
1256 if (is_error) {
1186 (blk && blk->isValid()) ? "valid " : "",
1187 have_data ? "data " : "", done ? "done " : "");
1188
1189 // We're leaving the cache, so pop cache->name() label
1190 pkt->popLabel();
1191
1192 if (done) {
1193 pkt->makeResponse();

--- 38 unchanged lines hidden (view full) ---

1232 // all header delay should be paid for by the crossbar, unless
1233 // this is a prefetch response from above
1234 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1235 "%s saw a non-zero packet delay\n", name());
1236
1237 bool is_error = pkt->isError();
1238
1239 if (is_error) {
1257 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
1258 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1259 pkt->cmdString());
1240 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
1241 pkt->print());
1260 }
1261
1242 }
1243
1262 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n",
1263 pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1264 pkt->isSecure() ? "s" : "ns");
1244 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
1245 pkt->print());
1265
1266 // if this is a write, we should be looking at an uncacheable
1267 // write
1268 if (pkt->isWrite()) {
1269 assert(pkt->req->isUncacheable());
1270 handleUncacheableWriteResp(pkt);
1271 return;
1272 }

--- 156 unchanged lines hidden (view full) ---

1429 if (is_error)
1430 tgt_pkt->copyError(pkt);
1431 if (tgt_pkt->cmd == MemCmd::ReadResp &&
1432 (is_invalidate || mshr->hasPostInvalidate())) {
1433 // If intermediate cache got ReadRespWithInvalidate,
1434 // propagate that. Response should not have
1435 // isInvalidate() set otherwise.
1436 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
1246
1247 // if this is a write, we should be looking at an uncacheable
1248 // write
1249 if (pkt->isWrite()) {
1250 assert(pkt->req->isUncacheable());
1251 handleUncacheableWriteResp(pkt);
1252 return;
1253 }

--- 156 unchanged lines hidden (view full) ---

1410 if (is_error)
1411 tgt_pkt->copyError(pkt);
1412 if (tgt_pkt->cmd == MemCmd::ReadResp &&
1413 (is_invalidate || mshr->hasPostInvalidate())) {
1414 // If intermediate cache got ReadRespWithInvalidate,
1415 // propagate that. Response should not have
1416 // isInvalidate() set otherwise.
1417 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
1437 DPRINTF(Cache, "%s updated cmd to %s for addr %#llx\n",
1438 __func__, tgt_pkt->cmdString(), tgt_pkt->getAddr());
1418 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
1419 tgt_pkt->print());
1439 }
1440 // Reset the bus additional time as it is now accounted for
1441 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1442 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1443 break;
1444
1445 case MSHR::Target::FromPrefetcher:
1446 assert(tgt_pkt->cmd == MemCmd::HardPFReq);

--- 78 unchanged lines hidden (view full) ---

1525 if (isCachedAbove(wcPkt))
1526 delete wcPkt;
1527 else
1528 allocateWriteBuffer(wcPkt, forward_time);
1529 }
1530 blk->invalidate();
1531 }
1532
1420 }
1421 // Reset the bus additional time as it is now accounted for
1422 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
1423 cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
1424 break;
1425
1426 case MSHR::Target::FromPrefetcher:
1427 assert(tgt_pkt->cmd == MemCmd::HardPFReq);

--- 78 unchanged lines hidden (view full) ---

1506 if (isCachedAbove(wcPkt))
1507 delete wcPkt;
1508 else
1509 allocateWriteBuffer(wcPkt, forward_time);
1510 }
1511 blk->invalidate();
1512 }
1513
1533 DPRINTF(CacheVerbose, "Leaving %s with %s for addr %#llx\n", __func__,
1534 pkt->cmdString(), pkt->getAddr());
1514 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
1535 delete pkt;
1536}
1537
1538PacketPtr
1539Cache::writebackBlk(CacheBlk *blk)
1540{
1541 chatty_assert(!isReadOnly || writebackClean,
1542 "Writeback from read-only cache");

--- 9 unchanged lines hidden (view full) ---

1552 req->taskId(blk->task_id);
1553 blk->task_id= ContextSwitchTaskId::Unknown;
1554 blk->tickInserted = curTick();
1555
1556 PacketPtr pkt =
1557 new Packet(req, blk->isDirty() ?
1558 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1559
1515 delete pkt;
1516}
1517
1518PacketPtr
1519Cache::writebackBlk(CacheBlk *blk)
1520{
1521 chatty_assert(!isReadOnly || writebackClean,
1522 "Writeback from read-only cache");

--- 9 unchanged lines hidden (view full) ---

1532 req->taskId(blk->task_id);
1533 blk->task_id= ContextSwitchTaskId::Unknown;
1534 blk->tickInserted = curTick();
1535
1536 PacketPtr pkt =
1537 new Packet(req, blk->isDirty() ?
1538 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1539
1560 DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
1561 pkt->getAddr(), blk->isWritable(), blk->isDirty());
1540 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1541 pkt->print(), blk->isWritable(), blk->isDirty());
1562
1563 if (blk->isWritable()) {
1564 // not asserting shared means we pass the block in modified
1565 // state, mark our own block non-writeable
1566 blk->status &= ~BlkWritable;
1567 } else {
1568 // we are in the Owned state, tell the receiver
1569 pkt->setHasSharers();

--- 21 unchanged lines hidden (view full) ---

1591 req->setFlags(Request::SECURE);
1592
1593 req->taskId(blk->task_id);
1594 blk->task_id = ContextSwitchTaskId::Unknown;
1595 blk->tickInserted = curTick();
1596
1597 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
1598 pkt->allocate();
1542
1543 if (blk->isWritable()) {
1544 // not asserting shared means we pass the block in modified
1545 // state, mark our own block non-writeable
1546 blk->status &= ~BlkWritable;
1547 } else {
1548 // we are in the Owned state, tell the receiver
1549 pkt->setHasSharers();

--- 21 unchanged lines hidden (view full) ---

1571 req->setFlags(Request::SECURE);
1572
1573 req->taskId(blk->task_id);
1574 blk->task_id = ContextSwitchTaskId::Unknown;
1575 blk->tickInserted = curTick();
1576
1577 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
1578 pkt->allocate();
1599 DPRINTF(Cache, "%s%s %x Create CleanEvict\n", pkt->cmdString(),
1600 pkt->req->isInstFetch() ? " (ifetch)" : "",
1601 pkt->getAddr());
1579 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
1602
1603 return pkt;
1604}
1605
1606void
1607Cache::memWriteback()
1608{
1609 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);

--- 231 unchanged lines hidden (view full) ---

1841void
1842Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1843 bool already_copied, bool pending_inval)
1844{
1845 // sanity check
1846 assert(req_pkt->isRequest());
1847 assert(req_pkt->needsResponse());
1848
1580
1581 return pkt;
1582}
1583
1584void
1585Cache::memWriteback()
1586{
1587 CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);

--- 231 unchanged lines hidden (view full) ---

1819void
1820Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1821 bool already_copied, bool pending_inval)
1822{
1823 // sanity check
1824 assert(req_pkt->isRequest());
1825 assert(req_pkt->needsResponse());
1826
1849 DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
1850 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
1827 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
1851 // timing-mode snoop responses require a new packet, unless we
1852 // already made a copy...
1853 PacketPtr pkt = req_pkt;
1854 if (!already_copied)
1855 // do not clear flags, and allocate space for data if the
1856 // packet needs it (the only packets that carry data are read
1857 // responses)
1858 pkt = new Packet(req_pkt, false, req_pkt->isRead());

--- 15 unchanged lines hidden (view full) ---

1874 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1875 }
1876 // Here we consider forward_time, paying for just forward latency and
1877 // also charging the delay provided by the xbar.
1878 // forward_time is used as send_time in next allocateWriteBuffer().
1879 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1880 // Here we reset the timing of the packet.
1881 pkt->headerDelay = pkt->payloadDelay = 0;
1828 // timing-mode snoop responses require a new packet, unless we
1829 // already made a copy...
1830 PacketPtr pkt = req_pkt;
1831 if (!already_copied)
1832 // do not clear flags, and allocate space for data if the
1833 // packet needs it (the only packets that carry data are read
1834 // responses)
1835 pkt = new Packet(req_pkt, false, req_pkt->isRead());

--- 15 unchanged lines hidden (view full) ---

1851 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1852 }
1853 // Here we consider forward_time, paying for just forward latency and
1854 // also charging the delay provided by the xbar.
1855 // forward_time is used as send_time in next allocateWriteBuffer().
1856 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1857 // Here we reset the timing of the packet.
1858 pkt->headerDelay = pkt->payloadDelay = 0;
1882 DPRINTF(CacheVerbose,
1883 "%s created response: %s addr %#llx size %d tick: %lu\n",
1884 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1885 forward_time);
1859 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
1860 pkt->print(), forward_time);
1886 memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
1887}
1888
1889uint32_t
1890Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1891 bool is_deferred, bool pending_inval)
1892{
1861 memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
1862}
1863
1864uint32_t
1865Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
1866 bool is_deferred, bool pending_inval)
1867{
1893 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
1894 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1868 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1895 // deferred snoops can only happen in timing mode
1896 assert(!(is_deferred && !is_timing));
1897 // pending_inval only makes sense on deferred snoops
1898 assert(!(pending_inval && !is_deferred));
1899 assert(pkt->isRequest());
1900
1901 // the packet may get modified if we or a forwarded snooper
1902 // responds in atomic mode, so remember a few things about the
1903 // original packet up front
1904 bool invalidate = pkt->isInvalidate();
1905 bool M5_VAR_USED needs_writable = pkt->needsWritable();
1906
1907 // at the moment we could get an uncacheable write which does not
1908 // have the invalidate flag, and we need a suitable way of dealing
1909 // with this case
1910 panic_if(invalidate && pkt->req->isUncacheable(),
1869 // deferred snoops can only happen in timing mode
1870 assert(!(is_deferred && !is_timing));
1871 // pending_inval only makes sense on deferred snoops
1872 assert(!(pending_inval && !is_deferred));
1873 assert(pkt->isRequest());
1874
1875 // the packet may get modified if we or a forwarded snooper
1876 // responds in atomic mode, so remember a few things about the
1877 // original packet up front
1878 bool invalidate = pkt->isInvalidate();
1879 bool M5_VAR_USED needs_writable = pkt->needsWritable();
1880
1881 // at the moment we could get an uncacheable write which does not
1882 // have the invalidate flag, and we need a suitable way of dealing
1883 // with this case
1884 panic_if(invalidate && pkt->req->isUncacheable(),
1911 "%s got an invalidating uncacheable snoop request %s to %#llx",
1912 name(), pkt->cmdString(), pkt->getAddr());
1885 "%s got an invalidating uncacheable snoop request %s",
1886 name(), pkt->print());
1913
1914 uint32_t snoop_delay = 0;
1915
1916 if (forwardSnoops) {
1917 // first propagate snoop upward to see if anyone above us wants to
1918 // handle it. save & restore packet src since it will get
1919 // rewritten to be relative to cpu-side bus (if any)
1920 bool alreadyResponded = pkt->cacheResponding();

--- 36 unchanged lines hidden (view full) ---

1957 // cache-to-cache response from some upper cache:
1958 // forward response to original requester
1959 assert(pkt->isResponse());
1960 }
1961 }
1962 }
1963
1964 if (!blk || !blk->isValid()) {
1887
1888 uint32_t snoop_delay = 0;
1889
1890 if (forwardSnoops) {
1891 // first propagate snoop upward to see if anyone above us wants to
1892 // handle it. save & restore packet src since it will get
1893 // rewritten to be relative to cpu-side bus (if any)
1894 bool alreadyResponded = pkt->cacheResponding();

--- 36 unchanged lines hidden (view full) ---

1931 // cache-to-cache response from some upper cache:
1932 // forward response to original requester
1933 assert(pkt->isResponse());
1934 }
1935 }
1936 }
1937
1938 if (!blk || !blk->isValid()) {
1939 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1940 pkt->print());
1965 if (is_deferred) {
1966 // we no longer have the block, and will not respond, but a
1967 // packet was allocated in MSHR::handleSnoop and we have
1968 // to delete it
1969 assert(pkt->needsResponse());
1970
1971 // we have passed the block to a cache upstream, that
1972 // cache should be responding
1973 assert(pkt->cacheResponding());
1974
1975 delete pkt;
1976 }
1941 if (is_deferred) {
1942 // we no longer have the block, and will not respond, but a
1943 // packet was allocated in MSHR::handleSnoop and we have
1944 // to delete it
1945 assert(pkt->needsResponse());
1946
1947 // we have passed the block to a cache upstream, that
1948 // cache should be responding
1949 assert(pkt->cacheResponding());
1950
1951 delete pkt;
1952 }
1977
1978 DPRINTF(CacheVerbose, "%s snoop miss for %s addr %#llx size %d\n",
1979 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1980 return snoop_delay;
1981 } else {
1953 return snoop_delay;
1954 } else {
1982 DPRINTF(Cache, "%s snoop hit for %s addr %#llx size %d, "
1983 "old state is %s\n", __func__, pkt->cmdString(),
1984 pkt->getAddr(), pkt->getSize(), blk->print());
1955 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1956 pkt->print(), blk->print());
1985 }
1986
1987 chatty_assert(!(isReadOnly && blk->isDirty()),
1988 "Should never have a dirty block in a read-only cache %s\n",
1989 name());
1990
1991 // We may end up modifying both the block state and the packet (if
1992 // we respond in atomic mode), so just figure out what to do now

--- 5 unchanged lines hidden (view full) ---

1998 bool have_writable = blk->isWritable();
1999
2000 // Invalidate any prefetch's from below that would strip write permissions
2001 // MemCmd::HardPFReq is only observed by upstream caches. After missing
2002 // above and in it's own cache, a new MemCmd::ReadReq is created that
2003 // downstream caches observe.
2004 if (pkt->mustCheckAbove()) {
2005 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1957 }
1958
1959 chatty_assert(!(isReadOnly && blk->isDirty()),
1960 "Should never have a dirty block in a read-only cache %s\n",
1961 name());
1962
1963 // We may end up modifying both the block state and the packet (if
1964 // we respond in atomic mode), so just figure out what to do now

--- 5 unchanged lines hidden (view full) ---

1970 bool have_writable = blk->isWritable();
1971
1972 // Invalidate any prefetch's from below that would strip write permissions
1973 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1974 // above and in it's own cache, a new MemCmd::ReadReq is created that
1975 // downstream caches observe.
1976 if (pkt->mustCheckAbove()) {
1977 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
2006 "from lower cache\n", pkt->getAddr(), pkt->cmdString());
1978 "from lower cache\n", pkt->getAddr(), pkt->print());
2007 pkt->setBlockCached();
2008 return snoop_delay;
2009 }
2010
2011 if (pkt->isRead() && !invalidate) {
2012 // reading without requiring the line in a writable state
2013 assert(!needs_writable);
2014 pkt->setHasSharers();

--- 26 unchanged lines hidden (view full) ---

2041 // copy below and all other copies will be invalidates
2042 // through express snoops, and if needsWritable is not set
2043 // we already called setHasSharers above
2044 }
2045
2046 // if we are returning a writable and dirty (Modified) line,
2047 // we should be invalidating the line
2048 panic_if(!invalidate && !pkt->hasSharers(),
1979 pkt->setBlockCached();
1980 return snoop_delay;
1981 }
1982
1983 if (pkt->isRead() && !invalidate) {
1984 // reading without requiring the line in a writable state
1985 assert(!needs_writable);
1986 pkt->setHasSharers();

--- 26 unchanged lines hidden (view full) ---

2013 // copy below and all other copies will be invalidates
2014 // through express snoops, and if needsWritable is not set
2015 // we already called setHasSharers above
2016 }
2017
2018 // if we are returning a writable and dirty (Modified) line,
2019 // we should be invalidating the line
2020 panic_if(!invalidate && !pkt->hasSharers(),
2049 "%s is passing a Modified line through %s to %#llx, "
2050 "but keeping the block",
2051 name(), pkt->cmdString(), pkt->getAddr());
2021 "%s is passing a Modified line through %s, "
2022 "but keeping the block", name(), pkt->print());
2052
2053 if (is_timing) {
2054 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
2055 } else {
2056 pkt->makeAtomicResponse();
2057 // packets such as upgrades do not actually have any data
2058 // payload
2059 if (pkt->hasData())

--- 25 unchanged lines hidden (view full) ---

2085
2086 return snoop_delay;
2087}
2088
2089
2090void
2091Cache::recvTimingSnoopReq(PacketPtr pkt)
2092{
2023
2024 if (is_timing) {
2025 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
2026 } else {
2027 pkt->makeAtomicResponse();
2028 // packets such as upgrades do not actually have any data
2029 // payload
2030 if (pkt->hasData())

--- 25 unchanged lines hidden (view full) ---

2056
2057 return snoop_delay;
2058}
2059
2060
2061void
2062Cache::recvTimingSnoopReq(PacketPtr pkt)
2063{
2093 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d\n", __func__,
2094 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
2064 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
2095
2096 // Snoops shouldn't happen when bypassing caches
2097 assert(!system->bypassCaches());
2098
2099 // no need to snoop requests that are not in range
2100 if (!inRange(pkt->getAddr())) {
2101 return;
2102 }

--- 10 unchanged lines hidden (view full) ---

2113 // tentative, for cases where we return before an upward snoop
2114 // happens below.
2115 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
2116 lookupLatency * clockPeriod());
2117
2118 // Inform request(Prefetch, CleanEvict or Writeback) from below of
2119 // MSHR hit, set setBlockCached.
2120 if (mshr && pkt->mustCheckAbove()) {
2065
2066 // Snoops shouldn't happen when bypassing caches
2067 assert(!system->bypassCaches());
2068
2069 // no need to snoop requests that are not in range
2070 if (!inRange(pkt->getAddr())) {
2071 return;
2072 }

--- 10 unchanged lines hidden (view full) ---

2083 // tentative, for cases where we return before an upward snoop
2084 // happens below.
2085 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
2086 lookupLatency * clockPeriod());
2087
2088 // Inform request(Prefetch, CleanEvict or Writeback) from below of
2089 // MSHR hit, set setBlockCached.
2090 if (mshr && pkt->mustCheckAbove()) {
2121 DPRINTF(Cache, "Setting block cached for %s from"
2122 "lower cache on mshr hit %#x\n",
2123 pkt->cmdString(), pkt->getAddr());
2091 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
2092 "mshr hit\n", pkt->print());
2124 pkt->setBlockCached();
2125 return;
2126 }
2127
2128 // Let the MSHR itself track the snoop and decide whether we want
2129 // to go ahead and do the regular cache snoop
2130 if (mshr && mshr->handleSnoop(pkt, order++)) {
2131 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."

--- 20 unchanged lines hidden (view full) ---

2152 assert(wb_pkt->isEviction());
2153
2154 if (pkt->isEviction()) {
2155 // if the block is found in the write queue, set the BLOCK_CACHED
2156 // flag for Writeback/CleanEvict snoop. On return the snoop will
2157 // propagate the BLOCK_CACHED flag in Writeback packets and prevent
2158 // any CleanEvicts from travelling down the memory hierarchy.
2159 pkt->setBlockCached();
2093 pkt->setBlockCached();
2094 return;
2095 }
2096
2097 // Let the MSHR itself track the snoop and decide whether we want
2098 // to go ahead and do the regular cache snoop
2099 if (mshr && mshr->handleSnoop(pkt, order++)) {
2100 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."

--- 20 unchanged lines hidden (view full) ---

2121 assert(wb_pkt->isEviction());
2122
2123 if (pkt->isEviction()) {
2124 // if the block is found in the write queue, set the BLOCK_CACHED
2125 // flag for Writeback/CleanEvict snoop. On return the snoop will
2126 // propagate the BLOCK_CACHED flag in Writeback packets and prevent
2127 // any CleanEvicts from travelling down the memory hierarchy.
2128 pkt->setBlockCached();
2160 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit"
2161 " %#x\n", pkt->cmdString(), pkt->getAddr());
2129 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
2130 "hit\n", __func__, pkt->print());
2162 return;
2163 }
2164
2165 // conceptually writebacks are no different to other blocks in
2166 // this cache, so the behaviour is modelled after handleSnoop,
2167 // the difference being that instead of querying the block
2168 // state to determine if it is dirty and writable, we use the
2169 // command and fields of the writeback packet

--- 195 unchanged lines hidden (view full) ---

2365bool
2366Cache::sendMSHRQueuePacket(MSHR* mshr)
2367{
2368 assert(mshr);
2369
2370 // use request from 1st target
2371 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2372
2131 return;
2132 }
2133
2134 // conceptually writebacks are no different to other blocks in
2135 // this cache, so the behaviour is modelled after handleSnoop,
2136 // the difference being that instead of querying the block
2137 // state to determine if it is dirty and writable, we use the
2138 // command and fields of the writeback packet

--- 195 unchanged lines hidden (view full) ---

2334bool
2335Cache::sendMSHRQueuePacket(MSHR* mshr)
2336{
2337 assert(mshr);
2338
2339 // use request from 1st target
2340 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2341
2373 DPRINTF(Cache, "%s MSHR %s for addr %#llx size %d\n", __func__,
2374 tgt_pkt->cmdString(), tgt_pkt->getAddr(),
2375 tgt_pkt->getSize());
2342 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
2376
2377 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2378
2379 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2380 // we should never have hardware prefetches to allocated
2381 // blocks
2382 assert(blk == nullptr);
2383

--- 101 unchanged lines hidden (view full) ---

2485bool
2486Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2487{
2488 assert(wq_entry);
2489
2490 // always a single target for write queue entries
2491 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
2492
2343
2344 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2345
2346 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2347 // we should never have hardware prefetches to allocated
2348 // blocks
2349 assert(blk == nullptr);
2350

--- 101 unchanged lines hidden (view full) ---

2452bool
2453Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2454{
2455 assert(wq_entry);
2456
2457 // always a single target for write queue entries
2458 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
2459
2493 DPRINTF(Cache, "%s write %s for addr %#llx size %d\n", __func__,
2494 tgt_pkt->cmdString(), tgt_pkt->getAddr(),
2495 tgt_pkt->getSize());
2460 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
2496
2497 // forward as is, both for evictions and uncacheable writes
2498 if (!memSidePort->sendTimingReq(tgt_pkt)) {
2499 // note that we have now masked any requestBus and
2500 // schedSendEvent (we will wait for a retry before
2501 // doing anything), and this is so even if we do not
2502 // care about this packet and might override it before
2503 // it gets retried

--- 185 unchanged lines hidden ---
2461
2462 // forward as is, both for evictions and uncacheable writes
2463 if (!memSidePort->sendTimingReq(tgt_pkt)) {
2464 // note that we have now masked any requestBus and
2465 // schedSendEvent (we will wait for a retry before
2466 // doing anything), and this is so even if we do not
2467 // care about this packet and might override it before
2468 // it gets retried

--- 185 unchanged lines hidden ---