cache.cc (13954:2f400a5f2627) cache.cc (14035:60068a2d56e0)
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 147 unchanged lines hidden (view full) ---

156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 147 unchanged lines hidden (view full) ---

156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
165 PacketList &writebacks)
165{
166
167 if (pkt->req->isUncacheable()) {
168 assert(pkt->isRequest());
169
170 chatty_assert(!(isReadOnly && pkt->isWrite()),
171 "Should never see a write in a read-only cache %s\n",
172 name());
173
174 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
175
166{
167
168 if (pkt->req->isUncacheable()) {
169 assert(pkt->isRequest());
170
171 chatty_assert(!(isReadOnly && pkt->isWrite()),
172 "Should never see a write in a read-only cache %s\n",
173 name());
174
175 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
176
176 // lookupLatency is the latency in case the request is uncacheable.
177 lat = lookupLatency;
178
179 // flush and invalidate any existing block
180 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
181 if (old_blk && old_blk->isValid()) {
177 // flush and invalidate any existing block
178 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
179 if (old_blk && old_blk->isValid()) {
182 BaseCache::evictBlock(old_blk, clockEdge(lat + forwardLatency));
180 BaseCache::evictBlock(old_blk, writebacks);
183 }
184
185 blk = nullptr;
181 }
182
183 blk = nullptr;
184 // lookupLatency is the latency in case the request is uncacheable.
185 lat = lookupLatency;
186 return false;
187 }
188
186 return false;
187 }
188
189 return BaseCache::access(pkt, blk, lat);
189 return BaseCache::access(pkt, blk, lat, writebacks);
190}
191
192void
190}
191
192void
193Cache::doWritebacks(PacketPtr pkt, Tick forward_time)
193Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
194{
194{
195 // We use forwardLatency here because we are copying writebacks to
196 // write buffer.
195 while (!writebacks.empty()) {
196 PacketPtr wbPkt = writebacks.front();
197 // We use forwardLatency here because we are copying writebacks to
198 // write buffer.
197
199
198 // Call isCachedAbove for Writebacks, CleanEvicts and
199 // WriteCleans to discover if the block is cached above.
200 if (isCachedAbove(pkt)) {
201 if (pkt->cmd == MemCmd::CleanEvict) {
202 // Delete CleanEvict because cached copies exist above. The
203 // packet destructor will delete the request object because
204 // this is a non-snoop request packet which does not require a
205 // response.
206 delete pkt;
207 } else if (pkt->cmd == MemCmd::WritebackClean) {
208 // clean writeback, do not send since the block is
209 // still cached above
210 assert(writebackClean);
211 delete pkt;
200 // Call isCachedAbove for Writebacks, CleanEvicts and
201 // WriteCleans to discover if the block is cached above.
202 if (isCachedAbove(wbPkt)) {
203 if (wbPkt->cmd == MemCmd::CleanEvict) {
204 // Delete CleanEvict because cached copies exist above. The
205 // packet destructor will delete the request object because
206 // this is a non-snoop request packet which does not require a
207 // response.
208 delete wbPkt;
209 } else if (wbPkt->cmd == MemCmd::WritebackClean) {
210 // clean writeback, do not send since the block is
211 // still cached above
212 assert(writebackClean);
213 delete wbPkt;
214 } else {
215 assert(wbPkt->cmd == MemCmd::WritebackDirty ||
216 wbPkt->cmd == MemCmd::WriteClean);
217 // Set BLOCK_CACHED flag in Writeback and send below, so that
218 // the Writeback does not reset the bit corresponding to this
219 // address in the snoop filter below.
220 wbPkt->setBlockCached();
221 allocateWriteBuffer(wbPkt, forward_time);
222 }
212 } else {
223 } else {
213 assert(pkt->cmd == MemCmd::WritebackDirty ||
214 pkt->cmd == MemCmd::WriteClean);
215 // Set BLOCK_CACHED flag in Writeback and send below, so that
216 // the Writeback does not reset the bit corresponding to this
217 // address in the snoop filter below.
218 pkt->setBlockCached();
219 allocateWriteBuffer(pkt, forward_time);
224 // If the block is not cached above, send packet below. Both
225 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
226 // reset the bit corresponding to this address in the snoop filter
227 // below.
228 allocateWriteBuffer(wbPkt, forward_time);
220 }
229 }
221 } else {
222 // If the block is not cached above, send packet below. Both
223 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
224 // reset the bit corresponding to this address in the snoop filter
225 // below.
226 allocateWriteBuffer(pkt, forward_time);
230 writebacks.pop_front();
227 }
228}
229
230void
231 }
232}
233
234void
231Cache::doWritebacksAtomic(PacketPtr pkt)
235Cache::doWritebacksAtomic(PacketList& writebacks)
232{
236{
233 // Call isCachedAbove for both Writebacks and CleanEvicts. If
234 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
235 // and discard CleanEvicts.
236 if (isCachedAbove(pkt, false)) {
237 if (pkt->cmd == MemCmd::WritebackDirty ||
238 pkt->cmd == MemCmd::WriteClean) {
239 // Set BLOCK_CACHED flag in Writeback and send below,
240 // so that the Writeback does not reset the bit
241 // corresponding to this address in the snoop filter
242 // below. We can discard CleanEvicts because cached
243 // copies exist above. Atomic mode isCachedAbove
244 // modifies packet to set BLOCK_CACHED flag
245 memSidePort.sendAtomic(pkt);
237 while (!writebacks.empty()) {
238 PacketPtr wbPkt = writebacks.front();
239 // Call isCachedAbove for both Writebacks and CleanEvicts. If
240 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
241 // and discard CleanEvicts.
242 if (isCachedAbove(wbPkt, false)) {
243 if (wbPkt->cmd == MemCmd::WritebackDirty ||
244 wbPkt->cmd == MemCmd::WriteClean) {
245 // Set BLOCK_CACHED flag in Writeback and send below,
246 // so that the Writeback does not reset the bit
247 // corresponding to this address in the snoop filter
248 // below. We can discard CleanEvicts because cached
249 // copies exist above. Atomic mode isCachedAbove
250 // modifies packet to set BLOCK_CACHED flag
251 memSidePort.sendAtomic(wbPkt);
252 }
253 } else {
254 // If the block is not cached above, send packet below. Both
255 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
256 // reset the bit corresponding to this address in the snoop filter
257 // below.
258 memSidePort.sendAtomic(wbPkt);
246 }
259 }
247 } else {
248 // If the block is not cached above, send packet below. Both
249 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
250 // reset the bit corresponding to this address in the snoop filter
251 // below.
252 memSidePort.sendAtomic(pkt);
260 writebacks.pop_front();
261 // In case of CleanEvicts, the packet destructor will delete the
262 // request object because this is a non-snoop request packet which
263 // does not require a response.
264 delete wbPkt;
253 }
265 }
254
255 // In case of CleanEvicts, the packet destructor will delete the
256 // request object because this is a non-snoop request packet which
257 // does not require a response.
258 delete pkt;
259}
260
266}
267
268
261void
262Cache::recvTimingSnoopResp(PacketPtr pkt)
263{
264 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
265
266 // determine if the response is from a snoop request we created
267 // (in which case it should be in the outstandingSnoop), or if we
268 // merely forwarded someone else's snoop request

--- 281 unchanged lines hidden (view full) ---

550 pkt->allocate();
551 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
552 cpu_pkt->print());
553 return pkt;
554}
555
556
557Cycles
269void
270Cache::recvTimingSnoopResp(PacketPtr pkt)
271{
272 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
273
274 // determine if the response is from a snoop request we created
275 // (in which case it should be in the outstandingSnoop), or if we
276 // merely forwarded someone else's snoop request

--- 281 unchanged lines hidden (view full) ---

558 pkt->allocate();
559 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
560 cpu_pkt->print());
561 return pkt;
562}
563
564
565Cycles
558Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk)
566Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
567 PacketList &writebacks)
559{
560 // deal with the packets that go through the write path of
561 // the cache, i.e. any evictions and writes
562 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
563 (pkt->req->isUncacheable() && pkt->isWrite())) {
564 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
565
566 // at this point, if the request was an uncacheable write

--- 45 unchanged lines hidden (view full) ---

612 pkt->copyError(bus_pkt);
613 } else if (pkt->isWholeLineWrite(blkSize)) {
614 // note the use of pkt, not bus_pkt here.
615
616 // write-line request to the cache that promoted
617 // the write to a whole line
618 const bool allocate = allocOnFill(pkt->cmd) &&
619 (!writeAllocator || writeAllocator->allocate());
568{
569 // deal with the packets that go through the write path of
570 // the cache, i.e. any evictions and writes
571 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
572 (pkt->req->isUncacheable() && pkt->isWrite())) {
573 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
574
575 // at this point, if the request was an uncacheable write

--- 45 unchanged lines hidden (view full) ---

621 pkt->copyError(bus_pkt);
622 } else if (pkt->isWholeLineWrite(blkSize)) {
623 // note the use of pkt, not bus_pkt here.
624
625 // write-line request to the cache that promoted
626 // the write to a whole line
627 const bool allocate = allocOnFill(pkt->cmd) &&
628 (!writeAllocator || writeAllocator->allocate());
620 blk = handleFill(bus_pkt, blk, allocate);
629 blk = handleFill(bus_pkt, blk, writebacks, allocate);
621 assert(blk != NULL);
622 is_invalidate = false;
623 satisfyRequest(pkt, blk);
624 } else if (bus_pkt->isRead() ||
625 bus_pkt->cmd == MemCmd::UpgradeResp) {
626 // we're updating cache state to allow us to
627 // satisfy the upstream request from the cache
630 assert(blk != NULL);
631 is_invalidate = false;
632 satisfyRequest(pkt, blk);
633 } else if (bus_pkt->isRead() ||
634 bus_pkt->cmd == MemCmd::UpgradeResp) {
635 // we're updating cache state to allow us to
636 // satisfy the upstream request from the cache
628 blk = handleFill(bus_pkt, blk, allocOnFill(pkt->cmd));
637 blk = handleFill(bus_pkt, blk, writebacks,
638 allocOnFill(pkt->cmd));
629 satisfyRequest(pkt, blk);
630 maintainClusivity(pkt->fromCache(), blk);
631 } else {
632 // we're satisfying the upstream request without
633 // modifying cache state, e.g., a write-through
634 pkt->makeAtomicResponse();
635 }
636 }

--- 369 unchanged lines hidden (view full) ---

1006
1007 bool respond = false;
1008 bool blk_valid = blk && blk->isValid();
1009 if (pkt->isClean()) {
1010 if (blk_valid && blk->isDirty()) {
1011 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1012 __func__, pkt->print(), blk->print());
1013 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
639 satisfyRequest(pkt, blk);
640 maintainClusivity(pkt->fromCache(), blk);
641 } else {
642 // we're satisfying the upstream request without
643 // modifying cache state, e.g., a write-through
644 pkt->makeAtomicResponse();
645 }
646 }

--- 369 unchanged lines hidden (view full) ---

1016
1017 bool respond = false;
1018 bool blk_valid = blk && blk->isValid();
1019 if (pkt->isClean()) {
1020 if (blk_valid && blk->isDirty()) {
1021 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1022 __func__, pkt->print(), blk->print());
1023 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1024 PacketList writebacks;
1025 writebacks.push_back(wb_pkt);
1014
1015 if (is_timing) {
1016 // anything that is merely forwarded pays for the forward
1017 // latency and the delay provided by the crossbar
1018 Tick forward_time = clockEdge(forwardLatency) +
1019 pkt->headerDelay;
1026
1027 if (is_timing) {
1028 // anything that is merely forwarded pays for the forward
1029 // latency and the delay provided by the crossbar
1030 Tick forward_time = clockEdge(forwardLatency) +
1031 pkt->headerDelay;
1020 doWritebacks(wb_pkt, forward_time);
1032 doWritebacks(writebacks, forward_time);
1021 } else {
1033 } else {
1022 doWritebacksAtomic(wb_pkt);
1034 doWritebacksAtomic(writebacks);
1023 }
1024 pkt->setSatisfied();
1025 }
1026 } else if (!blk_valid) {
1027 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1028 pkt->print());
1029 if (is_deferred) {
1030 // we no longer have the block, and will not respond, but a

--- 359 unchanged lines hidden ---
1035 }
1036 pkt->setSatisfied();
1037 }
1038 } else if (!blk_valid) {
1039 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1040 pkt->print());
1041 if (is_deferred) {
1042 // we no longer have the block, and will not respond, but a

--- 359 unchanged lines hidden ---