160c160
< // assert(!pkt->needsExclusive() || blk->isWritable());
---
> // assert(!pkt->needsWritable() || blk->isWritable());
167a168,171
> // we have the block in a writable state and can go ahead,
> // note that the line may be also be considered writable in
> // downstream caches along the path to memory, but always
> // Exclusive, and never Modified
169c173
< // Write or WriteLine at the first cache with block in Exclusive
---
> // Write or WriteLine at the first cache with block in writable state
173,176c177,180
< // Always mark the line as dirty even if we are a failed
< // StoreCond so we supply data to any snoops that have
< // appended themselves to this cache before knowing the store
< // will fail.
---
> // Always mark the line as dirty (and thus transition to the
> // Modified state) even if we are a failed StoreCond so we
> // supply data to any snoops that have appended themselves to
> // this cache before knowing the store will fail.
196c200
< if (pkt->needsExclusive()) {
---
> if (pkt->needsWritable()) {
202c206
< // keeps it marked dirty
---
> // keeps it marked dirty (in the modified state)
204c208
< pkt->assertMemInhibit();
---
> pkt->setCacheResponding();
211c215
< !pkt->sharedAsserted() &&
---
> !pkt->hasSharers() &&
213,215c217,219
< // we can give the requester an exclusive copy (by not
< // asserting shared line) on a read request if:
< // - we have an exclusive copy at this level (& below)
---
> // we can give the requester a writable copy on a read
> // request if:
> // - we have a writable copy at this level (& below)
219,222c223,226
< // would have asseretd shared line on request)
< // - we are not satisfying an instruction fetch (this
< // prevents dirty data in the i-cache)
<
---
> // would have set hasSharers flag when
> // snooping the packet)
> // - the read has explicitly asked for a clean
> // copy of the line
226,230c230,232
< // if we are responding immediately and can
< // signal that we're transferring ownership
< // (inhibit set) along with exclusivity
< // (shared not set), do so
< pkt->assertMemInhibit();
---
> // respond with the line in Modified state
> // (cacheResponding set, hasSharers not set)
> pkt->setCacheResponding();
232,248d233
< // if this cache is mostly inclusive, we keep
< // the block as writable (exclusive), and pass
< // it upwards as writable and dirty
< // (modified), hence we have multiple caches
< // considering the same block writable,
< // something that we get away with due to the
< // fact that: 1) this cache has been
< // considered the ordering points and
< // responded to all snoops up till now, and 2)
< // we always snoop upwards before consulting
< // the local cache, both on a normal request
< // (snooping done by the crossbar), and on a
< // snoop
< blk->status &= ~BlkDirty;
<
< // if this cache is mostly exclusive with
< // respect to the cache above, drop the block
249a235,238
> // if this cache is mostly exclusive with
> // respect to the cache above, drop the
> // block, no need to first unset the dirty
> // bit
250a240,256
> } else {
> // if this cache is mostly inclusive, we
> // keep the block in the Exclusive state,
> // and pass it upwards as Modified
> // (writable and dirty), hence we have
> // multiple caches, all on the same path
> // towards memory, all considering the
> // same block writable, but only one
> // considering it Modified
>
> // we get away with multiple caches (on
> // the same path to memory) considering
> // the block writeable as we always enter
> // the cache hierarchy through a cache,
> // and first snoop upwards in all other
> // branches
> blk->status &= ~BlkDirty;
257,258c263,264
< // can't pass off ownership *or* exclusivity
< pkt->assertShared();
---
> // have to respond with a shared line
> pkt->setHasSharers();
263c269
< pkt->assertShared();
---
> pkt->setHasSharers();
267,268c273
< // Upgrade or Invalidate, since we have it Exclusively (E or
< // M), we ack then invalidate.
---
> // Upgrade or Invalidate
288c293
< Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
---
> Cache::markInService(MSHR *mshr, bool pending_modified_resp)
290c295
< markInServiceInternal(mshr, pending_dirty_resp);
---
> markInServiceInternal(mshr, pending_modified_resp);
423,425c428,431
< // if shared is not asserted we got the writeback in modified
< // state, if it is asserted we are in the owned state
< if (!pkt->sharedAsserted()) {
---
> // if the packet does not have sharers, it is passing
> // writable, and we got the writeback in Modified or Exclusive
> // state, if not we are in the Owned or Shared state
> if (!pkt->hasSharers()) {
448,449c454
< (pkt->needsExclusive() ? blk->isWritable()
< : blk->isReadable())) {
---
> (pkt->needsWritable() ? blk->isWritable() : blk->isReadable())) {
457c462
< // or have block but need exclusive & only have shared.
---
> // or have block but need writable
610c615
< if (pkt->memInhibitAsserted()) {
---
> if (pkt->cacheResponding()) {
612,613c617,620
< // responding to the request
< DPRINTF(Cache, "mem inhibited on addr %#llx (%s): not responding\n",
---
> // responding to the request, in other words it has the line
> // in Modified or Owned state
> DPRINTF(Cache, "Cache above responding to %#llx (%s): "
> "not responding\n",
616,621c623,640
< // if the packet needs exclusive, and the cache that has
< // promised to respond (setting the inhibit flag) is not
< // providing exclusive (it is in O vs M state), we know that
< // there may be other shared copies in the system; go out and
< // invalidate them all
< if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
---
> // if the packet needs the block to be writable, and the cache
> // that has promised to respond (setting the cache responding
> // flag) is not providing writable (it is in Owned rather than
> // the Modified state), we know that there may be other Shared
> // copies in the system; go out and invalidate them all
> if (pkt->needsWritable() && !pkt->responderHadWritable()) {
> // an upstream cache that had the line in Owned state
> // (dirty, but not writable), is responding and thus
> // transferring the dirty line from one branch of the
> // cache hierarchy to another
>
> // send out an express snoop and invalidate all other
> // copies (snooping a packet that needs writable is the
> // same as an invalidation), thus turning the Owned line
> // into a Modified line, note that we don't invalidate the
> // block in the current cache or any other cache on the
> // path to memory
>
632,634c651,654
< // other caches in the system know that the packet is
< // inhibited, because we have found the authorative copy
< // (O) that will supply the right data
---
> // other caches in the system know that the another cache
> // is responding, because we have found the authorative
> // copy (Modified or Owned) that will supply the right
> // data
636c656
< snoop_pkt->assertMemInhibit();
---
> snoop_pkt->setCacheResponding();
645c665
< // main memory will delete the packet
---
> // main memory will delete the snoop packet
648,649c668,669
< // queue for deletion, as the sending cache is still relying
< // on the packet
---
> // queue for deletion, as opposed to immediate deletion, as
> // the sending cache is still relying on the packet
652,655c672,678
< // no need to take any action in this particular cache as the
< // caches along the path to memory are allowed to keep lines
< // in a shared state, and a cache above us already committed
< // to responding
---
> // no need to take any action in this particular cache as an
> // upstream cache has already committed to responding, and
> // either the packet does not need writable (and we can let
> // the cache that set the cache responding flag pass on the
> // line without any need for intervention), or if the packet
> // needs writable it is provided, or we have already sent out
> // any express snoops in the section above
875c898
< assert(pkt->needsExclusive());
---
> assert(pkt->needsWritable());
903c926
< bool needsExclusive) const
---
> bool needsWritable) const
934,936c957,959
< // only reason to be here is that blk is shared
< // (read-only) and we need exclusive
< assert(needsExclusive);
---
> // only reason to be here is that blk is read only and we need
> // it to be writable
> assert(needsWritable);
948c971
< // the line in exclusive state, and invalidates all other
---
> // the line in Exclusive state, and invalidates all other
953c976
< cmd = needsExclusive ? MemCmd::ReadExReq :
---
> cmd = needsWritable ? MemCmd::ReadExReq :
958,959c981,984
< // if there are sharers in the upper levels, pass that info downstream
< if (cpu_pkt->sharedAsserted()) {
---
> // if there are upstream caches that have already marked the
> // packet as having sharers (not passing writable), pass that info
> // downstream
> if (cpu_pkt->hasSharers()) {
963,965c988,991
< // assuming the block is shared
< pkt->assertShared();
< DPRINTF(Cache, "%s passing shared from %s to %s addr %#llx size %d\n",
---
> // assuming the block has sharers
> pkt->setHasSharers();
> DPRINTF(Cache, "%s passing hasSharers from %s to %s addr %#llx "
> "size %d\n",
995c1021
< if (pkt->memInhibitAsserted()) {
---
> if (pkt->cacheResponding()) {
1003c1029
< DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx (%s):"
---
> DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):"
1009c1035,1036
< DPRINTF(Cache, "forwarding mem-inhibited %s on %#llx (%s)\n",
---
> DPRINTF(Cache, "Other cache responding to %s on %#llx (%s):"
> " forwarding\n",
1015c1042,1043
< DPRINTF(Cache, "rcvd mem-inhibited %s on %#llx: not responding\n",
---
> DPRINTF(Cache, "Other cache responding to %s on %#llx: "
> "not responding\n",
1037c1065
< PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
---
> PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsWritable());
1184,1185c1212,1213
< // data we have is dirty if marked as such or if valid & ownership
< // pending due to outstanding UpgradeReq
---
> // data we have is dirty if marked as such or if we have an
> // in-service MSHR that is pending a modified line
1188c1216
< (mshr && mshr->inService && mshr->isPendingDirty()));
---
> (mshr && mshr->inService && mshr->isPendingModified()));
1284,1286c1312,1315
< // upgrade deferred targets if we got exclusive
< if (!pkt->sharedAsserted()) {
< mshr->promoteExclusive();
---
> // upgrade deferred targets if the response has no sharers, and is
> // thus passing writable
> if (!pkt->hasSharers()) {
> mshr->promoteWritable();
1338,1340c1367,1369
< // we got the block in exclusive state, so promote any
< // deferred targets if possible
< mshr->promoteExclusive();
---
> // we got the block in a writable state, so promote
> // any deferred targets if possible
> mshr->promoteWritable();
1541,1542c1570,1571
< // we are in the owned state, tell the receiver
< pkt->assertShared();
---
> // we are in the Owned state, tell the receiver
> pkt->setHasSharers();
1655c1684
< assert(repl_mshr->needsExclusive());
---
> assert(repl_mshr->needsWritable());
1756c1785
< assert(!pkt->sharedAsserted());
---
> assert(!pkt->hasSharers());
1759c1788
< assert(!pkt->memInhibitAsserted());
---
> assert(!pkt->cacheResponding());
1762,1766c1791,1801
< if (!pkt->sharedAsserted()) {
< // we could get non-shared responses from memory (rather than
< // a cache) even in a read-only cache, note that we set this
< // bit even for a read-only cache as we use it to represent
< // the exclusive state
---
> // here we deal with setting the appropriate state of the line,
> // and we start by looking at the hasSharers flag, and ignore the
> // cacheResponding flag (normally signalling dirty data) if the
> // packet has sharers, thus the line is never allocated as Owned
> // (dirty but not writable), and always ends up being either
> // Shared, Exclusive or Modified, see Packet::setCacheResponding
> // for more details
> if (!pkt->hasSharers()) {
> // we could get a writable line from memory (rather than a
> // cache) even in a read-only cache, note that we set this bit
> // even for a read-only cache, possibly revisit this decision
1769,1776c1804,1808
< // If we got this via cache-to-cache transfer (i.e., from a
< // cache that was an owner) and took away that owner's copy,
< // then we need to write it back. Normally this happens
< // anyway as a side effect of getting a copy to write it, but
< // there are cases (such as failed store conditionals or
< // compare-and-swaps) where we'll demand an exclusive copy but
< // end up not writing it.
< if (pkt->memInhibitAsserted()) {
---
> // check if we got this via cache-to-cache transfer (i.e., from a
> // cache that had the block in Modified or Owned state)
> if (pkt->cacheResponding()) {
> // we got the block in Modified state, and invalidated the
> // owners copy
1830c1862
< pkt->sharedAsserted());
---
> pkt->hasSharers());
1838,1842c1870,1874
< // bus as us. We'll assert MemInhibit in both cases, but in
< // the latter case MemInhibit will keep the invalidation from
< // reaching cache A. This special response tells cache A that
< // it gets the block to satisfy its read, but must immediately
< // invalidate it.
---
> // bus as us. We'll assert cacheResponding in both cases, but
> // in the latter case cacheResponding will keep the
> // invalidation from reaching cache A. This special response
> // tells cache A that it gets the block to satisfy its read,
> // but must immediately invalidate it.
1873c1905
< bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
---
> bool M5_VAR_USED needs_writable = pkt->needsWritable();
1881c1913
< bool alreadyResponded = pkt->memInhibitAsserted();
---
> bool alreadyResponded = pkt->cacheResponding();
1899c1931
< if (snoopPkt.memInhibitAsserted()) {
---
> if (snoopPkt.cacheResponding()) {
1902c1934
< pkt->assertMemInhibit();
---
> pkt->setCacheResponding();
1904,1905c1936,1939
< if (snoopPkt.sharedAsserted()) {
< pkt->assertShared();
---
> // upstream cache has the block, or has an outstanding
> // MSHR, pass the flag on
> if (snoopPkt.hasSharers()) {
> pkt->setHasSharers();
1915c1949
< if (!alreadyResponded && pkt->memInhibitAsserted()) {
---
> if (!alreadyResponded && pkt->cacheResponding()) {
1944c1978
< bool have_exclusive = blk->isWritable();
---
> bool have_writable = blk->isWritable();
1958,1963c1992,1998
< // reading non-exclusive shared data, note that we retain
< // the block in owned state if it is dirty, with the response
< // taken care of below, and otherwhise simply downgrade to
< // shared
< assert(!needs_exclusive);
< pkt->assertShared();
---
> // reading without requiring the line in a writable state,
> // note that we retain the block as Owned if it is Modified
> // (dirty data), with the response taken care of below, and
> // otherwhise simply downgrade from Exclusive to Shared (or
> // remain in Shared)
> assert(!needs_writable);
> pkt->setHasSharers();
1970,1976c2005,2012
< // request (with current inhibited semantics), note that this
< // applies both to reads and writes and that for writes it
< // works thanks to the fact that we still have dirty data and
< // will write it back at a later point
< assert(!pkt->memInhibitAsserted());
< pkt->assertMemInhibit();
< if (have_exclusive) {
---
> // request
> pkt->setCacheResponding();
> if (have_writable) {
> // inform the cache hierarchy that this cache had the line
> // in the Modified state so that we avoid unnecessary
> // invalidations (see Packet::setResponderHadWritable)
> pkt->setResponderHadWritable();
>
1978,1981c2014,2020
< // in setting the exclusive flag, but since the recipient
< // does not care there is no harm in doing so, in any case
< // it is just a hint
< pkt->setSupplyExclusive();
---
> // in setting the responderHadWritable flag, but since the
> // recipient does not care there is no harm in doing so
> } else {
> // if the packet has needsWritable set we invalidate our
> // copy below and all other copies will be invalidates
> // through express snoops, and if needsWritable is not set
> // we already called setHasSharers above
1982a2022
>
2093,2100c2133,2142
< assert(!pkt->memInhibitAsserted());
< pkt->assertMemInhibit();
< if (!pkt->needsExclusive()) {
< pkt->assertShared();
< // the writeback is no longer passing exclusivity (the
< // receiving cache should consider the block owned
< // rather than modified)
< wb_pkt->assertShared();
---
> // we have dirty data, and so will proceed to respond
> pkt->setCacheResponding();
> if (!pkt->needsWritable()) {
> // the packet should end up in the Shared state (non
> // writable) on the completion of the fill
> pkt->setHasSharers();
> // similarly, the writeback is no longer passing
> // writeable (the receiving cache should consider the
> // block Owned rather than Modified)
> wb_pkt->setHasSharers();
2102,2104c2144,2145
< // if we're not asserting the shared line, we need to
< // invalidate our copy. we'll do that below as long as
< // the packet's invalidate flag is set...
---
> // we need to invalidate our copy. we do that
> // below.
2117,2126c2158,2167
< // or WritebackClean message we must set assertShared
< // (just like when it encounters a Writeback) to avoid the
< // snoop filter prematurely clearing the holder bit in the
< // crossbar below
< if (!pkt->needsExclusive()) {
< pkt->assertShared();
< // the writeback is no longer passing exclusivity (the
< // receiving cache should consider the block owned
< // rather than modified)
< wb_pkt->assertShared();
---
> // or WritebackClean message we must call
> // setHasSharers (just like when it encounters a
> // Writeback) to avoid the snoop filter prematurely
> // clearing the holder bit in the crossbar below
> if (!pkt->needsWritable()) {
> pkt->setHasSharers();
> // the writeback is no longer passing writeable (the
> // receiving cache should consider the block Owned
> // rather than Modified)
> wb_pkt->setHasSharers();
2283c2324
< assert(!(snoop_pkt.memInhibitAsserted()));
---
> assert(!(snoop_pkt.cacheResponding()));
2330,2336c2371,2377
< // It is important to check memInhibitAsserted before
< // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
< // will be sending a response which will arrive at the MSHR
< // allocated ofr this request. Checking the prefetchSquash first
< // may result in the MSHR being prematurely deallocated.
<
< if (snoop_pkt.memInhibitAsserted()) {
---
> // It is important to check cacheResponding before
> // prefetchSquashed. If another cache has committed to
> // responding, it will be sending a dirty response which will
> // arrive at the MSHR allocated for this request. Checking the
> // prefetchSquash first may result in the MSHR being
> // prematurely deallocated.
> if (snoop_pkt.cacheResponding()) {
2339,2341c2380,2385
< // If we are getting a non-shared response it is dirty
< bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
< markInService(mshr, pending_dirty_resp);
---
>
> // if we are getting a snoop response with no sharers it
> // will be allocated as Modified
> bool pending_modified_resp = !snoop_pkt.hasSharers();
> markInService(mshr, pending_modified_resp);
>
2367c2411
< pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
---
> pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
2457,2460c2501,2505
< // always let inhibited requests through, even if blocked,
< // ultimately we should check if this is an express snoop, but at
< // the moment that flag is only set in the cache itself
< if (pkt->memInhibitAsserted()) {
---
> // always let packets through if an upstream cache has committed
> // to responding, even if blocked (we should technically look at
> // the isExpressSnoop flag, but it is set by the cache itself, and
> // consequently we have to rely on the cacheResponding flag)
> if (pkt->cacheResponding()) {
2600,2609c2645,2652
< // forwarded to all neighbouring caches (and any
< // caches above them) as a snoop. The packet is also
< // sent to any potential cache below as the
< // interconnect is not allowed to buffer the
< // packet. Thus at this point we know if any of the
< // neighbouring, or the downstream cache is
< // responding, and if so, if it is with a dirty line
< // or not.
< bool pending_dirty_resp = !pkt->sharedAsserted() &&
< pkt->memInhibitAsserted();
---
> // forwarded to all neighbouring caches (and any caches
> // above them) as a snoop. Thus at this point we know if
> // any of the neighbouring caches are responding, and if
> // so, we know it is dirty, and we can determine if it is
> // being passed as Modified, making our MSHR the ordering
> // point
> bool pending_modified_resp = !pkt->hasSharers() &&
> pkt->cacheResponding();
2611c2654
< cache.markInService(mshr, pending_dirty_resp);
---
> cache.markInService(mshr, pending_modified_resp);