cache.cc (13945:a573bed35a8b) cache.cc (13948:f8666d4d5855)
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 147 unchanged lines hidden (view full) ---

156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
1/*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software

--- 147 unchanged lines hidden (view full) ---

156
157/////////////////////////////////////////////////////
158//
159// Access path: requests coming in from the CPU side
160//
161/////////////////////////////////////////////////////
162
163bool
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
165 PacketList &writebacks)
164Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
166{
167
168 if (pkt->req->isUncacheable()) {
169 assert(pkt->isRequest());
170
171 chatty_assert(!(isReadOnly && pkt->isWrite()),
172 "Should never see a write in a read-only cache %s\n",
173 name());
174
175 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
176
165{
166
167 if (pkt->req->isUncacheable()) {
168 assert(pkt->isRequest());
169
170 chatty_assert(!(isReadOnly && pkt->isWrite()),
171 "Should never see a write in a read-only cache %s\n",
172 name());
173
174 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
175
176 // lookupLatency is the latency in case the request is uncacheable.
177 lat = lookupLatency;
178
177 // flush and invalidate any existing block
178 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
179 if (old_blk && old_blk->isValid()) {
179 // flush and invalidate any existing block
180 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
181 if (old_blk && old_blk->isValid()) {
180 BaseCache::evictBlock(old_blk, writebacks);
182 BaseCache::evictBlock(old_blk, clockEdge(lat + forwardLatency));
181 }
182
183 blk = nullptr;
183 }
184
185 blk = nullptr;
184 // lookupLatency is the latency in case the request is uncacheable.
185 lat = lookupLatency;
186 return false;
187 }
188
186 return false;
187 }
188
189 return BaseCache::access(pkt, blk, lat, writebacks);
189 return BaseCache::access(pkt, blk, lat);
190}
191
192void
190}
191
192void
193Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
193Cache::doWritebacks(PacketPtr pkt, Tick forward_time)
194{
194{
195 while (!writebacks.empty()) {
196 PacketPtr wbPkt = writebacks.front();
197 // We use forwardLatency here because we are copying writebacks to
198 // write buffer.
195 // We use forwardLatency here because we are copying writebacks to
196 // write buffer.
199
197
200 // Call isCachedAbove for Writebacks, CleanEvicts and
201 // WriteCleans to discover if the block is cached above.
202 if (isCachedAbove(wbPkt)) {
203 if (wbPkt->cmd == MemCmd::CleanEvict) {
204 // Delete CleanEvict because cached copies exist above. The
205 // packet destructor will delete the request object because
206 // this is a non-snoop request packet which does not require a
207 // response.
208 delete wbPkt;
209 } else if (wbPkt->cmd == MemCmd::WritebackClean) {
210 // clean writeback, do not send since the block is
211 // still cached above
212 assert(writebackClean);
213 delete wbPkt;
214 } else {
215 assert(wbPkt->cmd == MemCmd::WritebackDirty ||
216 wbPkt->cmd == MemCmd::WriteClean);
217 // Set BLOCK_CACHED flag in Writeback and send below, so that
218 // the Writeback does not reset the bit corresponding to this
219 // address in the snoop filter below.
220 wbPkt->setBlockCached();
221 allocateWriteBuffer(wbPkt, forward_time);
222 }
198 // Call isCachedAbove for Writebacks, CleanEvicts and
199 // WriteCleans to discover if the block is cached above.
200 if (isCachedAbove(pkt)) {
201 if (pkt->cmd == MemCmd::CleanEvict) {
202 // Delete CleanEvict because cached copies exist above. The
203 // packet destructor will delete the request object because
204 // this is a non-snoop request packet which does not require a
205 // response.
206 delete pkt;
207 } else if (pkt->cmd == MemCmd::WritebackClean) {
208 // clean writeback, do not send since the block is
209 // still cached above
210 assert(writebackClean);
211 delete pkt;
223 } else {
212 } else {
224 // If the block is not cached above, send packet below. Both
225 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
226 // reset the bit corresponding to this address in the snoop filter
227 // below.
228 allocateWriteBuffer(wbPkt, forward_time);
213 assert(pkt->cmd == MemCmd::WritebackDirty ||
214 pkt->cmd == MemCmd::WriteClean);
215 // Set BLOCK_CACHED flag in Writeback and send below, so that
216 // the Writeback does not reset the bit corresponding to this
217 // address in the snoop filter below.
218 pkt->setBlockCached();
219 allocateWriteBuffer(pkt, forward_time);
229 }
220 }
230 writebacks.pop_front();
221 } else {
222 // If the block is not cached above, send packet below. Both
223 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
224 // reset the bit corresponding to this address in the snoop filter
225 // below.
226 allocateWriteBuffer(pkt, forward_time);
231 }
232}
233
234void
227 }
228}
229
230void
235Cache::doWritebacksAtomic(PacketList& writebacks)
231Cache::doWritebacksAtomic(PacketPtr pkt)
236{
232{
237 while (!writebacks.empty()) {
238 PacketPtr wbPkt = writebacks.front();
239 // Call isCachedAbove for both Writebacks and CleanEvicts. If
240 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
241 // and discard CleanEvicts.
242 if (isCachedAbove(wbPkt, false)) {
243 if (wbPkt->cmd == MemCmd::WritebackDirty ||
244 wbPkt->cmd == MemCmd::WriteClean) {
245 // Set BLOCK_CACHED flag in Writeback and send below,
246 // so that the Writeback does not reset the bit
247 // corresponding to this address in the snoop filter
248 // below. We can discard CleanEvicts because cached
249 // copies exist above. Atomic mode isCachedAbove
250 // modifies packet to set BLOCK_CACHED flag
251 memSidePort.sendAtomic(wbPkt);
252 }
253 } else {
254 // If the block is not cached above, send packet below. Both
255 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
256 // reset the bit corresponding to this address in the snoop filter
257 // below.
258 memSidePort.sendAtomic(wbPkt);
233 // Call isCachedAbove for both Writebacks and CleanEvicts. If
234 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
235 // and discard CleanEvicts.
236 if (isCachedAbove(pkt, false)) {
237 if (pkt->cmd == MemCmd::WritebackDirty ||
238 pkt->cmd == MemCmd::WriteClean) {
239 // Set BLOCK_CACHED flag in Writeback and send below,
240 // so that the Writeback does not reset the bit
241 // corresponding to this address in the snoop filter
242 // below. We can discard CleanEvicts because cached
243 // copies exist above. Atomic mode isCachedAbove
244 // modifies packet to set BLOCK_CACHED flag
245 memSidePort.sendAtomic(pkt);
259 }
246 }
260 writebacks.pop_front();
261 // In case of CleanEvicts, the packet destructor will delete the
262 // request object because this is a non-snoop request packet which
263 // does not require a response.
264 delete wbPkt;
247 } else {
248 // If the block is not cached above, send packet below. Both
249 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
250 // reset the bit corresponding to this address in the snoop filter
251 // below.
252 memSidePort.sendAtomic(pkt);
265 }
253 }
254
255 // In case of CleanEvicts, the packet destructor will delete the
256 // request object because this is a non-snoop request packet which
257 // does not require a response.
258 delete pkt;
266}
267
259}
260
268
269void
270Cache::recvTimingSnoopResp(PacketPtr pkt)
271{
272 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
273
274 // determine if the response is from a snoop request we created
275 // (in which case it should be in the outstandingSnoop), or if we
276 // merely forwarded someone else's snoop request

--- 280 unchanged lines hidden (view full) ---

557 pkt->allocate();
558 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
559 cpu_pkt->print());
560 return pkt;
561}
562
563
564Cycles
261void
262Cache::recvTimingSnoopResp(PacketPtr pkt)
263{
264 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
265
266 // determine if the response is from a snoop request we created
267 // (in which case it should be in the outstandingSnoop), or if we
268 // merely forwarded someone else's snoop request

--- 280 unchanged lines hidden (view full) ---

549 pkt->allocate();
550 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
551 cpu_pkt->print());
552 return pkt;
553}
554
555
556Cycles
565Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
566 PacketList &writebacks)
557Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk)
567{
568 // deal with the packets that go through the write path of
569 // the cache, i.e. any evictions and writes
570 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
571 (pkt->req->isUncacheable() && pkt->isWrite())) {
572 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
573
574 // at this point, if the request was an uncacheable write

--- 45 unchanged lines hidden (view full) ---

620 pkt->copyError(bus_pkt);
621 } else if (pkt->isWholeLineWrite(blkSize)) {
622 // note the use of pkt, not bus_pkt here.
623
624 // write-line request to the cache that promoted
625 // the write to a whole line
626 const bool allocate = allocOnFill(pkt->cmd) &&
627 (!writeAllocator || writeAllocator->allocate());
558{
559 // deal with the packets that go through the write path of
560 // the cache, i.e. any evictions and writes
561 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
562 (pkt->req->isUncacheable() && pkt->isWrite())) {
563 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
564
565 // at this point, if the request was an uncacheable write

--- 45 unchanged lines hidden (view full) ---

611 pkt->copyError(bus_pkt);
612 } else if (pkt->isWholeLineWrite(blkSize)) {
613 // note the use of pkt, not bus_pkt here.
614
615 // write-line request to the cache that promoted
616 // the write to a whole line
617 const bool allocate = allocOnFill(pkt->cmd) &&
618 (!writeAllocator || writeAllocator->allocate());
628 blk = handleFill(bus_pkt, blk, writebacks, allocate);
619 blk = handleFill(bus_pkt, blk, allocate);
629 assert(blk != NULL);
630 is_invalidate = false;
631 satisfyRequest(pkt, blk);
632 } else if (bus_pkt->isRead() ||
633 bus_pkt->cmd == MemCmd::UpgradeResp) {
634 // we're updating cache state to allow us to
635 // satisfy the upstream request from the cache
620 assert(blk != NULL);
621 is_invalidate = false;
622 satisfyRequest(pkt, blk);
623 } else if (bus_pkt->isRead() ||
624 bus_pkt->cmd == MemCmd::UpgradeResp) {
625 // we're updating cache state to allow us to
626 // satisfy the upstream request from the cache
636 blk = handleFill(bus_pkt, blk, writebacks,
637 allocOnFill(pkt->cmd));
627 blk = handleFill(bus_pkt, blk, allocOnFill(pkt->cmd));
638 satisfyRequest(pkt, blk);
639 maintainClusivity(pkt->fromCache(), blk);
640 } else {
641 // we're satisfying the upstream request without
642 // modifying cache state, e.g., a write-through
643 pkt->makeAtomicResponse();
644 }
645 }

--- 369 unchanged lines hidden (view full) ---

1015
1016 bool respond = false;
1017 bool blk_valid = blk && blk->isValid();
1018 if (pkt->isClean()) {
1019 if (blk_valid && blk->isDirty()) {
1020 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1021 __func__, pkt->print(), blk->print());
1022 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
628 satisfyRequest(pkt, blk);
629 maintainClusivity(pkt->fromCache(), blk);
630 } else {
631 // we're satisfying the upstream request without
632 // modifying cache state, e.g., a write-through
633 pkt->makeAtomicResponse();
634 }
635 }

--- 369 unchanged lines hidden (view full) ---

1005
1006 bool respond = false;
1007 bool blk_valid = blk && blk->isValid();
1008 if (pkt->isClean()) {
1009 if (blk_valid && blk->isDirty()) {
1010 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1011 __func__, pkt->print(), blk->print());
1012 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1023 PacketList writebacks;
1024 writebacks.push_back(wb_pkt);
1025
1026 if (is_timing) {
1027 // anything that is merely forwarded pays for the forward
1028 // latency and the delay provided by the crossbar
1029 Tick forward_time = clockEdge(forwardLatency) +
1030 pkt->headerDelay;
1013
1014 if (is_timing) {
1015 // anything that is merely forwarded pays for the forward
1016 // latency and the delay provided by the crossbar
1017 Tick forward_time = clockEdge(forwardLatency) +
1018 pkt->headerDelay;
1031 doWritebacks(writebacks, forward_time);
1019 doWritebacks(wb_pkt, forward_time);
1032 } else {
1020 } else {
1033 doWritebacksAtomic(writebacks);
1021 doWritebacksAtomic(wb_pkt);
1034 }
1035 pkt->setSatisfied();
1036 }
1037 } else if (!blk_valid) {
1038 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1039 pkt->print());
1040 if (is_deferred) {
1041 // we no longer have the block, and will not respond, but a

--- 359 unchanged lines hidden ---
1022 }
1023 pkt->setSatisfied();
1024 }
1025 } else if (!blk_valid) {
1026 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1027 pkt->print());
1028 if (is_deferred) {
1029 // we no longer have the block, and will not respond, but a

--- 359 unchanged lines hidden ---