Deleted Added
sdiff udiff text old ( 11278:18411ccc4f3c ) new ( 11284:b3926db25371 )
full compact
1/*
2 * Copyright (c) 2010-2015 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 143 unchanged lines hidden (view full) ---

152
153 assert(blk && blk->isValid());
154 // Occasionally this is not true... if we are a lower-level cache
155 // satisfying a string of Read and ReadEx requests from
156 // upper-level caches, a Read will mark the block as shared but we
157 // can satisfy a following ReadEx anyway since we can rely on the
158 // Read requester(s) to have buffered the ReadEx snoop and to
159 // invalidate their blocks after receiving them.
160 // assert(!pkt->needsExclusive() || blk->isWritable());
161 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
162
163 // Check RMW operations first since both isRead() and
164 // isWrite() will be true for them
165 if (pkt->cmd == MemCmd::SwapReq) {
166 cmpAndSwap(blk, pkt);
167 } else if (pkt->isWrite()) {
168 assert(blk->isWritable());
169 // Write or WriteLine at the first cache with block in Exclusive
170 if (blk->checkWrite(pkt)) {
171 pkt->writeDataToBlock(blk->data, blkSize);
172 }
173 // Always mark the line as dirty even if we are a failed
174 // StoreCond so we supply data to any snoops that have
175 // appended themselves to this cache before knowing the store
176 // will fail.
177 blk->status |= BlkDirty;
178 DPRINTF(Cache, "%s for %s addr %#llx size %d (write)\n", __func__,
179 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
180 } else if (pkt->isRead()) {
181 if (pkt->isLLSC()) {
182 blk->trackLoadLocked(pkt);
183 }
184 pkt->setDataFromBlock(blk->data, blkSize);
185 // determine if this read is from a (coherent) cache, or not
186 // by looking at the command type; we could potentially add a
187 // packet attribute such as 'FromCache' to make this check a
188 // bit cleaner
189 if (pkt->cmd == MemCmd::ReadExReq ||
190 pkt->cmd == MemCmd::ReadSharedReq ||
191 pkt->cmd == MemCmd::ReadCleanReq ||
192 pkt->cmd == MemCmd::SCUpgradeFailReq) {
193 assert(pkt->getSize() == blkSize);
194 // special handling for coherent block requests from
195 // upper-level caches
196 if (pkt->needsExclusive()) {
197 // sanity check
198 assert(pkt->cmd == MemCmd::ReadExReq ||
199 pkt->cmd == MemCmd::SCUpgradeFailReq);
200
201 // if we have a dirty copy, make sure the recipient
202 // keeps it marked dirty
203 if (blk->isDirty()) {
204 pkt->assertMemInhibit();
205 }
206 // on ReadExReq we give up our copy unconditionally,
207 // even if this cache is mostly inclusive, we may want
208 // to revisit this
209 invalidateBlock(blk);
210 } else if (blk->isWritable() && !pending_downgrade &&
211 !pkt->sharedAsserted() &&
212 pkt->cmd != MemCmd::ReadCleanReq) {
213 // we can give the requester an exclusive copy (by not
214 // asserting shared line) on a read request if:
215 // - we have an exclusive copy at this level (& below)
216 // - we don't have a pending snoop from below
217 // signaling another read request
218 // - no other cache above has a copy (otherwise it
219 // would have asseretd shared line on request)
220 // - we are not satisfying an instruction fetch (this
221 // prevents dirty data in the i-cache)
222
223 if (blk->isDirty()) {
224 // special considerations if we're owner:
225 if (!deferred_response) {
226 // if we are responding immediately and can
227 // signal that we're transferring ownership
228 // (inhibit set) along with exclusivity
229 // (shared not set), do so
230 pkt->assertMemInhibit();
231
232 // if this cache is mostly inclusive, we keep
233 // the block as writable (exclusive), and pass
234 // it upwards as writable and dirty
235 // (modified), hence we have multiple caches
236 // considering the same block writable,
237 // something that we get away with due to the
238 // fact that: 1) this cache has been
239 // considered the ordering points and
240 // responded to all snoops up till now, and 2)
241 // we always snoop upwards before consulting
242 // the local cache, both on a normal request
243 // (snooping done by the crossbar), and on a
244 // snoop
245 blk->status &= ~BlkDirty;
246
247 // if this cache is mostly exclusive with
248 // respect to the cache above, drop the block
249 if (clusivity == Enums::mostly_excl) {
250 invalidateBlock(blk);
251 }
252 } else {
253 // if we're responding after our own miss,
254 // there's a window where the recipient didn't
255 // know it was getting ownership and may not
256 // have responded to snoops correctly, so we
257 // can't pass off ownership *or* exclusivity
258 pkt->assertShared();
259 }
260 }
261 } else {
262 // otherwise only respond with a shared copy
263 pkt->assertShared();
264 }
265 }
266 } else {
267 // Upgrade or Invalidate, since we have it Exclusively (E or
268 // M), we ack then invalidate.
269 assert(pkt->isUpgrade() || pkt->isInvalidate());
270
271 // for invalidations we could be looking at the temp block
272 // (for upgrades we always allocate)
273 invalidateBlock(blk);
274 DPRINTF(Cache, "%s for %s addr %#llx size %d (invalidation)\n",
275 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
276 }
277}
278
279
280/////////////////////////////////////////////////////
281//
282// MSHR helper functions
283//
284/////////////////////////////////////////////////////
285
286
287void
288Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
289{
290 markInServiceInternal(mshr, pending_dirty_resp);
291}
292
293/////////////////////////////////////////////////////
294//
295// Access path: requests coming in from the CPU side
296//
297/////////////////////////////////////////////////////
298

--- 116 unchanged lines hidden (view full) ---

415 blk->status |= BlkSecure;
416 }
417 }
418 // only mark the block dirty if we got a writeback command,
419 // and leave it as is for a clean writeback
420 if (pkt->cmd == MemCmd::WritebackDirty) {
421 blk->status |= BlkDirty;
422 }
423 // if shared is not asserted we got the writeback in modified
424 // state, if it is asserted we are in the owned state
425 if (!pkt->sharedAsserted()) {
426 blk->status |= BlkWritable;
427 }
428 // nothing else to do; writeback doesn't expect response
429 assert(!pkt->needsResponse());
430 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
431 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
432 incHitCount(pkt);
433 return true;

--- 6 unchanged lines hidden (view full) ---

440 return true;
441 }
442 // We didn't find the block here, propagate the CleanEvict further
443 // down the memory hierarchy. Returning false will treat the CleanEvict
444 // like a Writeback which could not find a replaceable block so has to
445 // go to next level.
446 return false;
447 } else if ((blk != NULL) &&
448 (pkt->needsExclusive() ? blk->isWritable()
449 : blk->isReadable())) {
450 // OK to satisfy access
451 incHitCount(pkt);
452 satisfyCpuSideRequest(pkt, blk);
453 return true;
454 }
455
456 // Can't satisfy access normally... either no block (blk == NULL)
457 // or have block but need exclusive & only have shared.
458
459 incMissCount(pkt);
460
461 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
462 // complete miss on store conditional... just give up now
463 pkt->req->setExtraData(0);
464 return true;
465 }

--- 136 unchanged lines hidden (view full) ---

602 // @todo This should really enqueue the packet rather
603 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
604 assert(success);
605 return true;
606 }
607
608 promoteWholeLineWrites(pkt);
609
610 if (pkt->memInhibitAsserted()) {
611 // a cache above us (but not where the packet came from) is
612 // responding to the request
613 DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
614 pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
615
616 // if the packet needs exclusive, and the cache that has
617 // promised to respond (setting the inhibit flag) is not
618 // providing exclusive (it is in O vs M state), we know that
619 // there may be other shared copies in the system; go out and
620 // invalidate them all
621 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
622 // create a downstream express snoop with cleared packet
623 // flags, there is no need to allocate any data as the
624 // packet is merely used to co-ordinate state transitions
625 Packet *snoop_pkt = new Packet(pkt, true, false);
626
627 // also reset the bus time that the original packet has
628 // not yet paid for
629 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
630
631 // make this an instantaneous express snoop, and let the
632 // other caches in the system know that the packet is
633 // inhibited, because we have found the authorative copy
634 // (O) that will supply the right data
635 snoop_pkt->setExpressSnoop();
636 snoop_pkt->assertMemInhibit();
637
638 // this express snoop travels towards the memory, and at
639 // every crossbar it is snooped upwards thus reaching
640 // every cache in the system
641 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
642 // express snoops always succeed
643 assert(success);
644
645 // main memory will delete the packet
646 }
647
648 // queue for deletion, as the sending cache is still relying
649 // on the packet
650 pendingDelete.reset(pkt);
651
652 // no need to take any action in this particular cache as the
653 // caches along the path to memory are allowed to keep lines
654 // in a shared state, and a cache above us already committed
655 // to responding
656 return true;
657 }
658
659 // anything that is merely forwarded pays for the forward latency and
660 // the delay provided by the crossbar
661 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
662
663 // We use lookupLatency here because it is used to specify the latency

--- 203 unchanged lines hidden (view full) ---

867 // that yet. Note that we do need to leave the
868 // block valid so that it stays in the cache, in
869 // case we get an upgrade response (and hence no
870 // new data) when the write miss completes.
871 // As long as CPUs do proper store/load forwarding
872 // internally, and have a sufficiently weak memory
873 // model, this is probably unnecessary, but at some
874 // point it must have seemed like we needed it...
875 assert(pkt->needsExclusive());
876 assert(!blk->isWritable());
877 blk->status &= ~BlkReadable;
878 }
879 // Here we are using forward_time, modelling the latency of
880 // a miss (outbound) just as forwardLatency, neglecting the
881 // lookupLatency component.
882 allocateMissBuffer(pkt, forward_time);
883 }

--- 11 unchanged lines hidden (view full) ---

895
896 return true;
897}
898
899
900// See comment in cache.hh.
901PacketPtr
902Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
903 bool needsExclusive) const
904{
905 bool blkValid = blk && blk->isValid();
906
907 if (cpu_pkt->req->isUncacheable()) {
908 // note that at the point we see the uncacheable request we
909 // flush any block, but there could be an outstanding MSHR,
910 // and the cache could have filled again before we actually
911 // send out the forwarded uncacheable request (blk could thus

--- 14 unchanged lines hidden (view full) ---

926
927 MemCmd cmd;
928 // @TODO make useUpgrades a parameter.
929 // Note that ownership protocols require upgrade, otherwise a
930 // write miss on a shared owned block will generate a ReadExcl,
931 // which will clobber the owned copy.
932 const bool useUpgrades = true;
933 if (blkValid && useUpgrades) {
934 // only reason to be here is that blk is shared
935 // (read-only) and we need exclusive
936 assert(needsExclusive);
937 assert(!blk->isWritable());
938 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
939 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
940 cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
941 // Even though this SC will fail, we still need to send out the
942 // request and get the data to supply it to other snoopers in the case
943 // where the determination the StoreCond fails is delayed due to
944 // all caches not being on the same local bus.
945 cmd = MemCmd::SCUpgradeFailReq;
946 } else if (cpu_pkt->cmd == MemCmd::WriteLineReq) {
947 // forward as invalidate to all other caches, this gives us
948 // the line in exclusive state, and invalidates all other
949 // copies
950 cmd = MemCmd::InvalidateReq;
951 } else {
952 // block is invalid
953 cmd = needsExclusive ? MemCmd::ReadExReq :
954 (isReadOnly ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
955 }
956 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
957
958 // if there are sharers in the upper levels, pass that info downstream
959 if (cpu_pkt->sharedAsserted()) {
960 // note that cpu_pkt may have spent a considerable time in the
961 // MSHR queue and that the information could possibly be out
962 // of date, however, there is no harm in conservatively
963 // assuming the block is shared
964 pkt->assertShared();
965 DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
966 __func__, cpu_pkt->cmdString(), pkt->cmdString(),
967 pkt->getAddr(), pkt->getSize());
968 }
969
970 // the packet should be block aligned
971 assert(pkt->getAddr() == blockAlign(pkt->getAddr()));
972
973 pkt->allocate();

--- 13 unchanged lines hidden (view full) ---

987 bool last_level_cache = false;
988
989 // Forward the request if the system is in cache bypass mode.
990 if (system->bypassCaches())
991 return ticksToCycles(memSidePort->sendAtomic(pkt));
992
993 promoteWholeLineWrites(pkt);
994
995 if (pkt->memInhibitAsserted()) {
996 // have to invalidate ourselves and any lower caches even if
997 // upper cache will be responding
998 if (pkt->isInvalidate()) {
999 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1000 if (blk && blk->isValid()) {
1001 tags->invalidate(blk);
1002 blk->invalidate();
1003 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
1004 " invalidating\n",
1005 pkt->cmdString(), pkt->getAddr(),
1006 pkt->isSecure() ? "s" : "ns");
1007 }
1008 if (!last_level_cache) {
1009 DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
1010 pkt->cmdString(), pkt->getAddr(),
1011 pkt->isSecure() ? "s" : "ns");
1012 lat += ticksToCycles(memSidePort->sendAtomic(pkt));
1013 }
1014 } else {
1015 DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
1016 pkt->cmdString(), pkt->getAddr());
1017 }
1018
1019 return lat * clockPeriod();
1020 }
1021
1022 // should assert here that there are no outstanding MSHRs or
1023 // writebacks... that would mean that someone used an atomic

--- 5 unchanged lines hidden (view full) ---

1029
1030 // handle writebacks resulting from the access here to ensure they
1031 // logically proceed anything happening below
1032 doWritebacksAtomic(writebacks);
1033
1034 if (!satisfied) {
1035 // MISS
1036
1037 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
1038
1039 bool is_forward = (bus_pkt == NULL);
1040
1041 if (is_forward) {
1042 // just forwarding the same request to the next level
1043 // no local cache operation involved
1044 bus_pkt = pkt;
1045 }

--- 130 unchanged lines hidden (view full) ---

1176 // needs to be found. As a result we always update the request if
1177 // we have it, but only declare it satisfied if we are the owner.
1178
1179 // see if we have data at all (owned or otherwise)
1180 bool have_data = blk && blk->isValid()
1181 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1182 blk->data);
1183
1184 // data we have is dirty if marked as such or if valid & ownership
1185 // pending due to outstanding UpgradeReq
1186 bool have_dirty =
1187 have_data && (blk->isDirty() ||
1188 (mshr && mshr->inService && mshr->isPendingDirty()));
1189
1190 bool done = have_dirty
1191 || cpuSidePort->checkFunctional(pkt)
1192 || mshrQueue.checkFunctional(pkt, blk_addr)
1193 || writeBuffer.checkFunctional(pkt, blk_addr)
1194 || memSidePort->checkFunctional(pkt);
1195
1196 DPRINTF(Cache, "functional %s %#llx (%s) %s%s%s\n",

--- 79 unchanged lines hidden (view full) ---

1276 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1277 miss_latency;
1278 } else {
1279 assert(pkt->req->masterId() < system->maxMasters());
1280 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1281 miss_latency;
1282 }
1283
1284 // upgrade deferred targets if we got exclusive
1285 if (!pkt->sharedAsserted()) {
1286 mshr->promoteExclusive();
1287 }
1288
1289 bool is_fill = !mshr->isForward &&
1290 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1291
1292 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1293
1294 if (is_fill && !is_error) {

--- 35 unchanged lines hidden (view full) ---

1330 // unlike the other packet flows, where data is found in other
1331 // caches or memory and brought back, write-line requests always
1332 // have the data right away, so the above check for "is fill?"
1333 // cannot actually be determined until examining the stored MSHR
1334 // state. We "catch up" with that logic here, which is duplicated
1335 // from above.
1336 if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
1337 assert(!is_error);
1338 // we got the block in exclusive state, so promote any
1339 // deferred targets if possible
1340 mshr->promoteExclusive();
1341 // NB: we use the original packet here and not the response!
1342 blk = handleFill(tgt_pkt, blk, writebacks, mshr->allocOnFill);
1343 assert(blk != NULL);
1344
1345 // treat as a fill, and discard the invalidation
1346 // response
1347 is_fill = true;
1348 is_invalidate = false;

--- 184 unchanged lines hidden (view full) ---

1533 DPRINTF(Cache, "Create Writeback %#llx writable: %d, dirty: %d\n",
1534 pkt->getAddr(), blk->isWritable(), blk->isDirty());
1535
1536 if (blk->isWritable()) {
1537 // not asserting shared means we pass the block in modified
1538 // state, mark our own block non-writeable
1539 blk->status &= ~BlkWritable;
1540 } else {
1541 // we are in the owned state, tell the receiver
1542 pkt->assertShared();
1543 }
1544
1545 // make sure the block is not marked dirty
1546 blk->status &= ~BlkDirty;
1547
1548 pkt->allocate();
1549 std::memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
1550

--- 96 unchanged lines hidden (view full) ---

1647
1648 if (blk->isValid()) {
1649 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1650 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1651 if (repl_mshr) {
1652 // must be an outstanding upgrade request
1653 // on a block we're about to replace...
1654 assert(!blk->isWritable() || blk->isDirty());
1655 assert(repl_mshr->needsExclusive());
1656 // too hard to replace block with transient state
1657 // allocation failed, block not inserted
1658 return NULL;
1659 } else {
1660 DPRINTF(Cache, "replacement: replacing %#llx (%s) with %#llx (%s): %s\n",
1661 repl_addr, blk->isSecure() ? "s" : "ns",
1662 addr, is_secure ? "s" : "ns",
1663 blk->isDirty() ? "writeback" : "clean");

--- 84 unchanged lines hidden (view full) ---

1748 if (is_secure)
1749 blk->status |= BlkSecure;
1750 blk->status |= BlkValid | BlkReadable;
1751
1752 // sanity check for whole-line writes, which should always be
1753 // marked as writable as part of the fill, and then later marked
1754 // dirty as part of satisfyCpuSideRequest
1755 if (pkt->cmd == MemCmd::WriteLineReq) {
1756 assert(!pkt->sharedAsserted());
1757 // at the moment other caches do not respond to the
1758 // invalidation requests corresponding to a whole-line write
1759 assert(!pkt->memInhibitAsserted());
1760 }
1761
1762 if (!pkt->sharedAsserted()) {
1763 // we could get non-shared responses from memory (rather than
1764 // a cache) even in a read-only cache, note that we set this
1765 // bit even for a read-only cache as we use it to represent
1766 // the exclusive state
1767 blk->status |= BlkWritable;
1768
1769 // If we got this via cache-to-cache transfer (i.e., from a
1770 // cache that was an owner) and took away that owner's copy,
1771 // then we need to write it back. Normally this happens
1772 // anyway as a side effect of getting a copy to write it, but
1773 // there are cases (such as failed store conditionals or
1774 // compare-and-swaps) where we'll demand an exclusive copy but
1775 // end up not writing it.
1776 if (pkt->memInhibitAsserted()) {
1777 blk->status |= BlkDirty;
1778
1779 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1780 "in read-only cache %s\n", name());
1781 }
1782 }
1783
1784 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",

--- 37 unchanged lines hidden (view full) ---

1822 PacketPtr pkt = req_pkt;
1823 if (!already_copied)
1824 // do not clear flags, and allocate space for data if the
1825 // packet needs it (the only packets that carry data are read
1826 // responses)
1827 pkt = new Packet(req_pkt, false, req_pkt->isRead());
1828
1829 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
1830 pkt->sharedAsserted());
1831 pkt->makeTimingResponse();
1832 if (pkt->isRead()) {
1833 pkt->setDataFromBlock(blk_data, blkSize);
1834 }
1835 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1836 // Assume we defer a response to a read from a far-away cache
1837 // A, then later defer a ReadExcl from a cache B on the same
1838 // bus as us. We'll assert MemInhibit in both cases, but in
1839 // the latter case MemInhibit will keep the invalidation from
1840 // reaching cache A. This special response tells cache A that
1841 // it gets the block to satisfy its read, but must immediately
1842 // invalidate it.
1843 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1844 }
1845 // Here we consider forward_time, paying for just forward latency and
1846 // also charging the delay provided by the xbar.
1847 // forward_time is used as send_time in next allocateWriteBuffer().
1848 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1849 // Here we reset the timing of the packet.
1850 pkt->headerDelay = pkt->payloadDelay = 0;

--- 14 unchanged lines hidden (view full) ---

1865 // pending_inval only makes sense on deferred snoops
1866 assert(!(pending_inval && !is_deferred));
1867 assert(pkt->isRequest());
1868
1869 // the packet may get modified if we or a forwarded snooper
1870 // responds in atomic mode, so remember a few things about the
1871 // original packet up front
1872 bool invalidate = pkt->isInvalidate();
1873 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1874
1875 uint32_t snoop_delay = 0;
1876
1877 if (forwardSnoops) {
1878 // first propagate snoop upward to see if anyone above us wants to
1879 // handle it. save & restore packet src since it will get
1880 // rewritten to be relative to cpu-side bus (if any)
1881 bool alreadyResponded = pkt->memInhibitAsserted();
1882 if (is_timing) {
1883 // copy the packet so that we can clear any flags before
1884 // forwarding it upwards, we also allocate data (passing
1885 // the pointer along in case of static data), in case
1886 // there is a snoop hit in upper levels
1887 Packet snoopPkt(pkt, true, true);
1888 snoopPkt.setExpressSnoop();
1889 // the snoop packet does not need to wait any additional
1890 // time
1891 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1892 cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1893
1894 // add the header delay (including crossbar and snoop
1895 // delays) of the upward snoop to the snoop delay for this
1896 // cache
1897 snoop_delay += snoopPkt.headerDelay;
1898
1899 if (snoopPkt.memInhibitAsserted()) {
1900 // cache-to-cache response from some upper cache
1901 assert(!alreadyResponded);
1902 pkt->assertMemInhibit();
1903 }
1904 if (snoopPkt.sharedAsserted()) {
1905 pkt->assertShared();
1906 }
1907 // If this request is a prefetch or clean evict and an upper level
1908 // signals block present, make sure to propagate the block
1909 // presence to the requester.
1910 if (snoopPkt.isBlockCached()) {
1911 pkt->setBlockCached();
1912 }
1913 } else {
1914 cpuSidePort->sendAtomicSnoop(pkt);
1915 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1916 // cache-to-cache response from some upper cache:
1917 // forward response to original requester
1918 assert(pkt->isResponse());
1919 }
1920 }
1921 }
1922
1923 if (!blk || !blk->isValid()) {

--- 12 unchanged lines hidden (view full) ---

1936
1937 // We may end up modifying both the block state and the packet (if
1938 // we respond in atomic mode), so just figure out what to do now
1939 // and then do it later. If we find dirty data while snooping for
1940 // an invalidate, we don't need to send a response. The
1941 // invalidation itself is taken care of below.
1942 bool respond = blk->isDirty() && pkt->needsResponse() &&
1943 pkt->cmd != MemCmd::InvalidateReq;
1944 bool have_exclusive = blk->isWritable();
1945
1946 // Invalidate any prefetch's from below that would strip write permissions
1947 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1948 // above and in it's own cache, a new MemCmd::ReadReq is created that
1949 // downstream caches observe.
1950 if (pkt->mustCheckAbove()) {
1951 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s from"
1952 " lower cache\n", pkt->getAddr(), pkt->cmdString());
1953 pkt->setBlockCached();
1954 return snoop_delay;
1955 }
1956
1957 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1958 // reading non-exclusive shared data, note that we retain
1959 // the block in owned state if it is dirty, with the response
1960 // taken care of below, and otherwhise simply downgrade to
1961 // shared
1962 assert(!needs_exclusive);
1963 pkt->assertShared();
1964 blk->status &= ~BlkWritable;
1965 }
1966
1967 if (respond) {
1968 // prevent anyone else from responding, cache as well as
1969 // memory, and also prevent any memory from even seeing the
1970 // request (with current inhibited semantics), note that this
1971 // applies both to reads and writes and that for writes it
1972 // works thanks to the fact that we still have dirty data and
1973 // will write it back at a later point
1974 assert(!pkt->memInhibitAsserted());
1975 pkt->assertMemInhibit();
1976 if (have_exclusive) {
1977 // in the case of an uncacheable request there is no point
1978 // in setting the exclusive flag, but since the recipient
1979 // does not care there is no harm in doing so, in any case
1980 // it is just a hint
1981 pkt->setSupplyExclusive();
1982 }
1983 if (is_timing) {
1984 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1985 } else {
1986 pkt->makeAtomicResponse();
1987 pkt->setDataFromBlock(blk->data, blkSize);
1988 }
1989 }
1990

--- 94 unchanged lines hidden (view full) ---

2085 // any CleanEvicts from travelling down the memory hierarchy.
2086 pkt->setBlockCached();
2087 DPRINTF(Cache, "Squashing %s from lower cache on writequeue hit"
2088 " %#x\n", pkt->cmdString(), pkt->getAddr());
2089 return;
2090 }
2091
2092 if (wb_pkt->cmd == MemCmd::WritebackDirty) {
2093 assert(!pkt->memInhibitAsserted());
2094 pkt->assertMemInhibit();
2095 if (!pkt->needsExclusive()) {
2096 pkt->assertShared();
2097 // the writeback is no longer passing exclusivity (the
2098 // receiving cache should consider the block owned
2099 // rather than modified)
2100 wb_pkt->assertShared();
2101 } else {
2102 // if we're not asserting the shared line, we need to
2103 // invalidate our copy. we'll do that below as long as
2104 // the packet's invalidate flag is set...
2105 assert(pkt->isInvalidate());
2106 }
2107 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2108 false, false);
2109 } else {
2110 // on hitting a clean writeback we play it safe and do not
2111 // provide a response, the block may be dirty somewhere
2112 // else
2113 assert(wb_pkt->isCleanEviction());
2114 // The cache technically holds the block until the
2115 // corresponding message reaches the crossbar
2116 // below. Therefore when a snoop encounters a CleanEvict
2117 // or WritebackClean message we must set assertShared
2118 // (just like when it encounters a Writeback) to avoid the
2119 // snoop filter prematurely clearing the holder bit in the
2120 // crossbar below
2121 if (!pkt->needsExclusive()) {
2122 pkt->assertShared();
2123 // the writeback is no longer passing exclusivity (the
2124 // receiving cache should consider the block owned
2125 // rather than modified)
2126 wb_pkt->assertShared();
2127 } else {
2128 assert(pkt->isInvalidate());
2129 }
2130 }
2131
2132 if (pkt->isInvalidate()) {
2133 // Invalidation trumps our writeback... discard here
2134 // Note: markInService will remove entry from writeback buffer.

--- 140 unchanged lines hidden (view full) ---

2275 snoop_pkt.setExpressSnoop();
2276 // Assert that packet is either Writeback or CleanEvict and not a
2277 // prefetch request because prefetch requests need an MSHR and may
2278 // generate a snoop response.
2279 assert(pkt->isEviction());
2280 snoop_pkt.senderState = NULL;
2281 cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2282 // Writeback/CleanEvict snoops do not generate a snoop response.
2283 assert(!(snoop_pkt.memInhibitAsserted()));
2284 return snoop_pkt.isBlockCached();
2285 } else {
2286 cpuSidePort->sendAtomicSnoop(pkt);
2287 return pkt->isBlockCached();
2288 }
2289}
2290
2291PacketPtr

--- 30 unchanged lines hidden (view full) ---

2322 cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2323
2324 // Check to see if the prefetch was squashed by an upper cache (to
2325 // prevent us from grabbing the line) or if a Check to see if a
2326 // writeback arrived between the time the prefetch was placed in
2327 // the MSHRs and when it was selected to be sent or if the
2328 // prefetch was squashed by an upper cache.
2329
2330 // It is important to check memInhibitAsserted before
2331 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
2332 // will be sending a response which will arrive at the MSHR
2333 // allocated ofr this request. Checking the prefetchSquash first
2334 // may result in the MSHR being prematurely deallocated.
2335
2336 if (snoop_pkt.memInhibitAsserted()) {
2337 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
2338 assert(r.second);
2339 // If we are getting a non-shared response it is dirty
2340 bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
2341 markInService(mshr, pending_dirty_resp);
2342 DPRINTF(Cache, "Upward snoop of prefetch for addr"
2343 " %#x (%s) hit\n",
2344 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2345 return NULL;
2346 }
2347
2348 if (snoop_pkt.isBlockCached() || blk != NULL) {
2349 DPRINTF(Cache, "Block present, prefetch squashed by cache. "

--- 9 unchanged lines hidden (view full) ---

2359 }
2360 }
2361
2362 if (mshr->isForwardNoResponse()) {
2363 // no response expected, just forward packet as it is
2364 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
2365 pkt = tgt_pkt;
2366 } else {
2367 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
2368
2369 mshr->isForward = (pkt == NULL);
2370
2371 if (mshr->isForward) {
2372 // not a cache block request, but a response is expected
2373 // make copy of current packet to forward, keep current
2374 // copy for response handling
2375 pkt = new Packet(tgt_pkt, false, true);

--- 73 unchanged lines hidden (view full) ---

2449
2450bool
2451Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2452{
2453 assert(!cache->system->bypassCaches());
2454
2455 bool success = false;
2456
2457 // always let inhibited requests through, even if blocked,
2458 // ultimately we should check if this is an express snoop, but at
2459 // the moment that flag is only set in the cache itself
2460 if (pkt->memInhibitAsserted()) {
2461 // do not change the current retry state
2462 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2463 assert(bypass_success);
2464 return true;
2465 } else if (blocked || mustSendRetry) {
2466 // either already committed to send a retry, or blocked
2467 success = false;
2468 } else {

--- 123 unchanged lines hidden (view full) ---

2592 }
2593 // note that we have now masked any requestBus and
2594 // schedSendEvent (we will wait for a retry before
2595 // doing anything), and this is so even if we do not
2596 // care about this packet and might override it before
2597 // it gets retried
2598 } else {
2599 // As part of the call to sendTimingReq the packet is
2600 // forwarded to all neighbouring caches (and any
2601 // caches above them) as a snoop. The packet is also
2602 // sent to any potential cache below as the
2603 // interconnect is not allowed to buffer the
2604 // packet. Thus at this point we know if any of the
2605 // neighbouring, or the downstream cache is
2606 // responding, and if so, if it is with a dirty line
2607 // or not.
2608 bool pending_dirty_resp = !pkt->sharedAsserted() &&
2609 pkt->memInhibitAsserted();
2610
2611 cache.markInService(mshr, pending_dirty_resp);
2612 }
2613 }
2614
2615 // if we succeeded and are not waiting for a retry, schedule the
2616 // next send considering when the next MSHR is ready, note that
2617 // snoop responses have their own packet queue and thus schedule
2618 // their own events
2619 if (!waitingOnRetry) {

--- 12 unchanged lines hidden ---