Deleted Added
sdiff udiff text old ( 11352:4e195fb9ec4f ) new ( 11375:f98df9231cdd )
full compact
1/*
2 * Copyright (c) 2010-2015 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated

--- 270 unchanged lines hidden (view full) ---

281 // for invalidations we could be looking at the temp block
282 // (for upgrades we always allocate)
283 invalidateBlock(blk);
284 DPRINTF(CacheVerbose, "%s for %s addr %#llx size %d (invalidation)\n",
285 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
286 }
287}
288
289
290/////////////////////////////////////////////////////
291//
292// MSHR helper functions
293//
294/////////////////////////////////////////////////////
295
296
297void
298Cache::markInService(MSHR *mshr, bool pending_modified_resp)
299{
300 markInServiceInternal(mshr, pending_modified_resp);
301}
302
303/////////////////////////////////////////////////////
304//
305// Access path: requests coming in from the CPU side
306//
307/////////////////////////////////////////////////////
308
309bool
310Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
311 PacketList &writebacks)
312{

--- 45 unchanged lines hidden (view full) ---

358 // We check for presence of block in above caches before issuing
359 // Writeback or CleanEvict to write buffer. Therefore the only
360 // possible cases can be of a CleanEvict packet coming from above
361 // encountering a Writeback generated in this cache peer cache and
362 // waiting in the write buffer. Cases of upper level peer caches
363 // generating CleanEvict and Writeback or simply CleanEvict and
364 // CleanEvict almost simultaneously will be caught by snoops sent out
365 // by crossbar.
366 std::vector<MSHR *> outgoing;
367 if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
368 outgoing)) {
369 assert(outgoing.size() == 1);
370 MSHR *wb_entry = outgoing[0];
371 assert(wb_entry->getNumTargets() == 1);
372 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
373 assert(wbPkt->isWriteback());
374
375 if (pkt->isCleanEviction()) {
376 // The CleanEvict and WritebackClean snoops into other
377 // peer caches of the same level while traversing the
378 // crossbar. If a copy of the block is found, the

--- 4 unchanged lines hidden (view full) ---

383 // discard the CleanEvict by returning true.
384 wbPkt->clearBlockCached();
385 return true;
386 } else {
387 assert(pkt->cmd == MemCmd::WritebackDirty);
388 // Dirty writeback from above trumps our clean
389 // writeback... discard here
390 // Note: markInService will remove entry from writeback buffer.
391 markInService(wb_entry, false);
392 delete wbPkt;
393 }
394 }
395 }
396
397 // Writeback handling is special case. We can write the block into
398 // the cache without having a writeable copy (or any copy at all).
399 if (pkt->isWriteback()) {

--- 834 unchanged lines hidden (view full) ---

1234/////////////////////////////////////////////////////
1235//
1236// Response handling: responses from the memory side
1237//
1238/////////////////////////////////////////////////////
1239
1240
1241void
1242Cache::recvTimingResp(PacketPtr pkt)
1243{
1244 assert(pkt->isResponse());
1245
1246 // all header delay should be paid for by the crossbar, unless
1247 // this is a prefetch response from above
1248 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
1249 "%s saw a non-zero packet delay\n", name());
1250
1251 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1252 bool is_error = pkt->isError();
1253
1254 assert(mshr);
1255
1256 if (is_error) {
1257 DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
1258 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1259 pkt->cmdString());
1260 }
1261
1262 DPRINTF(Cache, "Handling response %s for addr %#llx size %d (%s)\n",
1263 pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
1264 pkt->isSecure() ? "s" : "ns");
1265
1266 MSHRQueue *mq = mshr->queue;
1267 bool wasFull = mq->isFull();
1268
1269 if (mshr == noTargetMSHR) {
1270 // we always clear at least one target
1271 clearBlocked(Blocked_NoTargets);
1272 noTargetMSHR = NULL;
1273 }
1274
1275 // Initial target is used just for stats
1276 MSHR::Target *initial_tgt = mshr->getTarget();
1277 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1278 Tick miss_latency = curTick() - initial_tgt->recvTime;
1279 PacketList writebacks;
1280 // We need forward_time here because we have a call of
1281 // allocateWriteBuffer() that need this parameter to specify the
1282 // time to request the bus. In this case we use forward latency
1283 // because there is a writeback. We pay also here for headerDelay
1284 // that is charged of bus latencies if the packet comes from the
1285 // bus.
1286 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1287
1288 if (pkt->req->isUncacheable()) {
1289 assert(pkt->req->masterId() < system->maxMasters());
1290 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1291 miss_latency;
1292 } else {
1293 assert(pkt->req->masterId() < system->maxMasters());
1294 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1295 miss_latency;
1296 }
1297
1298 // upgrade deferred targets if the response has no sharers, and is
1299 // thus passing writable
1300 if (!pkt->hasSharers()) {
1301 mshr->promoteWritable();
1302 }
1303
1304 bool is_fill = !mshr->isForward &&
1305 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);

--- 159 unchanged lines hidden (view full) ---

1465 }
1466
1467 if (mshr->promoteDeferredTargets()) {
1468 // avoid later read getting stale data while write miss is
1469 // outstanding.. see comment in timingAccess()
1470 if (blk) {
1471 blk->status &= ~BlkReadable;
1472 }
1473 mq = mshr->queue;
1474 mq->markPending(mshr);
1475 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
1476 } else {
1477 mq->deallocate(mshr);
1478 if (wasFull && !mq->isFull()) {
1479 clearBlocked((BlockedCause)mq->index);
1480 }
1481
1482 // Request the bus for a prefetch if this deallocation freed enough
1483 // MSHRs for a prefetch to take place
1484 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
1485 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1486 clockEdge());
1487 if (next_pf_time != MaxTick)
1488 schedMemSideSendEvent(next_pf_time);
1489 }
1490 }
1491 // reset the xbar additional timinig as it is now accounted for
1492 pkt->headerDelay = pkt->payloadDelay = 0;

--- 217 unchanged lines hidden (view full) ---

1710{
1711 assert(pkt->isResponse() || pkt->cmd == MemCmd::WriteLineReq);
1712 Addr addr = pkt->getAddr();
1713 bool is_secure = pkt->isSecure();
1714#if TRACING_ON
1715 CacheBlk::State old_state = blk ? blk->status : 0;
1716#endif
1717
1718 // When handling a fill, discard any CleanEvicts for the
1719 // same address in write buffer.
1720 Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr());
1721 std::vector<MSHR *> M5_VAR_USED wbs;
1722 assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs));
1723
1724 if (blk == NULL) {
1725 // better have read new data...
1726 assert(pkt->hasData());
1727
1728 // only read responses and write-line requests have data;
1729 // note that we don't write the data here for write-line - that
1730 // happens in the subsequent satisfyCpuSideRequest.

--- 371 unchanged lines hidden (view full) ---

2102 mshr->print());
2103
2104 if (mshr->getNumTargets() > numTarget)
2105 warn("allocating bonus target for snoop"); //handle later
2106 return;
2107 }
2108
2109 //We also need to check the writeback buffers and handle those
2110 std::vector<MSHR *> writebacks;
2111 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
2112 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
2113 pkt->getAddr(), is_secure ? "s" : "ns");
2114
2115 // Look through writebacks for any cachable writes.
2116 // We should only ever find a single match
2117 assert(writebacks.size() == 1);
2118 MSHR *wb_entry = writebacks[0];
2119 // Expect to see only Writebacks and/or CleanEvicts here, both of
2120 // which should not be generated for uncacheable data.
2121 assert(!wb_entry->isUncacheable());
2122 // There should only be a single request responsible for generating
2123 // Writebacks/CleanEvicts.
2124 assert(wb_entry->getNumTargets() == 1);
2125 PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
2126 assert(wb_pkt->isEviction());

--- 34 unchanged lines hidden (view full) ---

2161
2162 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
2163 false, false);
2164 }
2165
2166 if (invalidate) {
2167 // Invalidation trumps our writeback... discard here
2168 // Note: markInService will remove entry from writeback buffer.
2169 markInService(wb_entry, false);
2170 delete wb_pkt;
2171 }
2172 }
2173
2174 // If this was a shared writeback, there may still be
2175 // other shared copies above that require invalidation.
2176 // We could be more selective and return here if the
2177 // request is non-exclusive or if the writeback is

--- 26 unchanged lines hidden (view full) ---

2204 }
2205
2206 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
2207 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
2208 return snoop_delay + lookupLatency * clockPeriod();
2209}
2210
2211
2212MSHR *
2213Cache::getNextMSHR()
2214{
2215 // Check both MSHR queue and write buffer for potential requests,
2216 // note that null does not mean there is no request, it could
2217 // simply be that it is not ready
2218 MSHR *miss_mshr = mshrQueue.getNextMSHR();
2219 MSHR *write_mshr = writeBuffer.getNextMSHR();
2220
2221 // If we got a write buffer request ready, first priority is a
2222 // full write buffer, otherwhise we favour the miss requests
2223 if (write_mshr &&
2224 ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) ||
2225 !miss_mshr)) {
2226 // need to search MSHR queue for conflicting earlier miss.
2227 MSHR *conflict_mshr =
2228 mshrQueue.findPending(write_mshr->blkAddr,
2229 write_mshr->isSecure);
2230
2231 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
2232 // Service misses in order until conflict is cleared.
2233 return conflict_mshr;
2234
2235 // @todo Note that we ignore the ready time of the conflict here
2236 }
2237
2238 // No conflicts; issue write
2239 return write_mshr;
2240 } else if (miss_mshr) {
2241 // need to check for conflicting earlier writeback
2242 MSHR *conflict_mshr =
2243 writeBuffer.findPending(miss_mshr->blkAddr,
2244 miss_mshr->isSecure);
2245 if (conflict_mshr) {
2246 // not sure why we don't check order here... it was in the
2247 // original code but commented out.
2248
2249 // The only way this happens is if we are
2250 // doing a write and we didn't have permissions
2251 // then subsequently saw a writeback (owned got evicted)
2252 // We need to make sure to perform the writeback first
2253 // To preserve the dirty data, then we can issue the write
2254
2255 // should we return write_mshr here instead? I.e. do we
2256 // have to flush writes in order? I don't think so... not
2257 // for Alpha anyway. Maybe for x86?
2258 return conflict_mshr;
2259
2260 // @todo Note that we ignore the ready time of the conflict here
2261 }
2262
2263 // No conflicts; issue read
2264 return miss_mshr;
2265 }
2266
2267 // fall through... no pending requests. Try a prefetch.
2268 assert(!miss_mshr && !write_mshr);
2269 if (prefetcher && mshrQueue.canPrefetch()) {
2270 // If we have a miss queue slot, we can try a prefetch
2271 PacketPtr pkt = prefetcher->getPacket();
2272 if (pkt) {
2273 Addr pf_addr = blockAlign(pkt->getAddr());
2274 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
2275 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
2276 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {

--- 9 unchanged lines hidden (view full) ---

2286 } else {
2287 // free the request and packet
2288 delete pkt->req;
2289 delete pkt;
2290 }
2291 }
2292 }
2293
2294 return NULL;
2295}
2296
2297bool
2298Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
2299{
2300 if (!forwardSnoops)
2301 return false;
2302 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and

--- 14 unchanged lines hidden (view full) ---

2317 assert(!(snoop_pkt.cacheResponding()));
2318 return snoop_pkt.isBlockCached();
2319 } else {
2320 cpuSidePort->sendAtomicSnoop(pkt);
2321 return pkt->isBlockCached();
2322 }
2323}
2324
2325PacketPtr
2326Cache::getTimingPacket()
2327{
2328 MSHR *mshr = getNextMSHR();
2329
2330 if (mshr == NULL) {
2331 return NULL;
2332 }
2333
2334 // use request from 1st target
2335 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
2336 PacketPtr pkt = NULL;
2337
2338 DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__,
2339 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
2340
2341 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
2342
2343 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
2344 // We need to check the caches above us to verify that
2345 // they don't have a copy of this block in the dirty state
2346 // at the moment. Without this check we could get a stale
2347 // copy from memory that might get used in place of the
2348 // dirty one.
2349 Packet snoop_pkt(tgt_pkt, true, false);
2350 snoop_pkt.setExpressSnoop();
2351 // We are sending this packet upwards, but if it hits we will

--- 22 unchanged lines hidden (view full) ---

2374 // if we are getting a snoop response with no sharers it
2375 // will be allocated as Modified
2376 bool pending_modified_resp = !snoop_pkt.hasSharers();
2377 markInService(mshr, pending_modified_resp);
2378
2379 DPRINTF(Cache, "Upward snoop of prefetch for addr"
2380 " %#x (%s) hit\n",
2381 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2382 return NULL;
2383 }
2384
2385 if (snoop_pkt.isBlockCached() || blk != NULL) {
2386 DPRINTF(Cache, "Block present, prefetch squashed by cache. "
2387 "Deallocating mshr target %#x.\n",
2388 mshr->blkAddr);
2389 // Deallocate the mshr target
2390 if (mshr->queue->forceDeallocateTarget(mshr)) {
2391 // Clear block if this deallocation resulted freed an
2392 // mshr when all had previously been utilized
2393 clearBlocked((BlockedCause)(mshr->queue->index));
2394 }
2395 return NULL;
2396 }
2397 }
2398
2399 if (mshr->isForwardNoResponse()) {
2400 // no response expected, just forward packet as it is
2401 assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
2402 pkt = tgt_pkt;
2403 } else {
2404 pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
2405
2406 mshr->isForward = (pkt == NULL);
2407
2408 if (mshr->isForward) {
2409 // not a cache block request, but a response is expected
2410 // make copy of current packet to forward, keep current
2411 // copy for response handling
2412 pkt = new Packet(tgt_pkt, false, true);
2413 if (pkt->isWrite()) {
2414 pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
2415 }
2416 }
2417 }
2418
2419 assert(pkt != NULL);
2420 // play it safe and append (rather than set) the sender state, as
2421 // forwarded packets may already have existing state
2422 pkt->pushSenderState(mshr);
2423 return pkt;
2424}
2425
2426
2427Tick
2428Cache::nextMSHRReadyTime() const
2429{
2430 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
2431 writeBuffer.nextMSHRReadyTime());
2432
2433 // Don't signal prefetch ready time if no MSHRs available
2434 // Will signal once enoguh MSHRs are deallocated
2435 if (prefetcher && mshrQueue.canPrefetch()) {
2436 nextReady = std::min(nextReady,
2437 prefetcher->nextPrefetchReadyTime());
2438 }
2439
2440 return nextReady;
2441}
2442
2443void
2444Cache::serialize(CheckpointOut &cp) const
2445{
2446 bool dirty(isDirty());
2447
2448 if (dirty) {

--- 132 unchanged lines hidden (view full) ---

2581 assert(!waitingOnRetry);
2582
2583 // there should never be any deferred request packets in the
2584 // queue, instead we resly on the cache to provide the packets
2585 // from the MSHR queue or write queue
2586 assert(deferredPacketReadyTime() == MaxTick);
2587
2588 // check for request packets (requests & writebacks)
2589 PacketPtr pkt = cache.getTimingPacket();
2590 if (pkt == NULL) {
2591 // can happen if e.g. we attempt a writeback and fail, but
2592 // before the retry, the writeback is eliminated because
2593 // we snoop another cache's ReadEx.
2594 } else {
2595 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
2596 // in most cases getTimingPacket allocates a new packet, and
2597 // we must delete it unless it is successfully sent
2598 bool delete_pkt = !mshr->isForwardNoResponse();
2599
2600 // let our snoop responses go first if there are responses to
2601 // the same addresses we are about to writeback, note that
2602 // this creates a dependency between requests and snoop
2603 // responses, but that should not be a problem since there is
2604 // a chain already and the key is that the snoop responses can
2605 // sink unconditionally
2606 if (snoopRespQueue.hasAddr(pkt->getAddr())) {
2607 DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
2608 Tick when = snoopRespQueue.deferredPacketReadyTime();
2609 schedSendEvent(when);
2610
2611 if (delete_pkt)
2612 delete pkt;
2613
2614 return;
2615 }
2616
2617
2618 waitingOnRetry = !masterPort.sendTimingReq(pkt);
2619
2620 if (waitingOnRetry) {
2621 DPRINTF(CachePort, "now waiting on a retry\n");
2622 if (delete_pkt) {
2623 // we are awaiting a retry, but we
2624 // delete the packet and will be creating a new packet
2625 // when we get the opportunity
2626 delete pkt;
2627 }
2628 // note that we have now masked any requestBus and
2629 // schedSendEvent (we will wait for a retry before
2630 // doing anything), and this is so even if we do not
2631 // care about this packet and might override it before
2632 // it gets retried
2633 } else {
2634 // As part of the call to sendTimingReq the packet is
2635 // forwarded to all neighbouring caches (and any caches
2636 // above them) as a snoop. Thus at this point we know if
2637 // any of the neighbouring caches are responding, and if
2638 // so, we know it is dirty, and we can determine if it is
2639 // being passed as Modified, making our MSHR the ordering
2640 // point
2641 bool pending_modified_resp = !pkt->hasSharers() &&
2642 pkt->cacheResponding();
2643
2644 cache.markInService(mshr, pending_modified_resp);
2645 }
2646 }
2647
2648 // if we succeeded and are not waiting for a retry, schedule the
2649 // next send considering when the next MSHR is ready, note that
2650 // snoop responses have their own packet queue and thus schedule
2651 // their own events
2652 if (!waitingOnRetry) {
2653 schedSendEvent(cache.nextMSHRReadyTime());
2654 }
2655}
2656
2657Cache::
2658MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
2659 const std::string &_label)
2660 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2661 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2662 _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2663{
2664}