2c2
< * Copyright (c) 2010-2015 ARM Limited
---
> * Copyright (c) 2010-2016 ARM Limited
289d288
<
292,304d290
< // MSHR helper functions
< //
< /////////////////////////////////////////////////////
<
<
< void
< Cache::markInService(MSHR *mshr, bool pending_modified_resp)
< {
< markInServiceInternal(mshr, pending_modified_resp);
< }
<
< /////////////////////////////////////////////////////
< //
366,370c352,354
< std::vector<MSHR *> outgoing;
< if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
< outgoing)) {
< assert(outgoing.size() == 1);
< MSHR *wb_entry = outgoing[0];
---
> WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
> pkt->isSecure());
> if (wb_entry) {
391c375
< markInService(wb_entry, false);
---
> markInService(wb_entry);
1241a1226,1269
> Cache::handleUncacheableWriteResp(PacketPtr pkt)
> {
> WriteQueueEntry *wq_entry =
> dynamic_cast<WriteQueueEntry*>(pkt->senderState);
> assert(wq_entry);
>
> WriteQueueEntry::Target *target = wq_entry->getTarget();
> Packet *tgt_pkt = target->pkt;
>
> // we send out invalidation reqs and get invalidation
> // responses for write-line requests
> assert(tgt_pkt->cmd != MemCmd::WriteLineReq);
>
> int stats_cmd_idx = tgt_pkt->cmdToIndex();
> Tick miss_latency = curTick() - target->recvTime;
> assert(pkt->req->masterId() < system->maxMasters());
> mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
> miss_latency;
>
> tgt_pkt->makeTimingResponse();
> // if this packet is an error copy that to the new packet
> if (pkt->isError())
> tgt_pkt->copyError(pkt);
> // Reset the bus additional time as it is now accounted for
> tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
> Tick completion_time = clockEdge(responseLatency) +
> pkt->headerDelay + pkt->payloadDelay;
>
> cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
>
> wq_entry->popTarget();
> assert(!wq_entry->hasTargets());
>
> bool wasFull = writeBuffer.isFull();
> writeBuffer.deallocate(wq_entry);
>
> if (wasFull && !writeBuffer.isFull()) {
> clearBlocked(Blocked_NoWBBuffers);
> }
>
> delete pkt;
> }
>
> void
1251d1278
< MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1254,1255d1280
< assert(mshr);
<
1266,1267c1291,1297
< MSHRQueue *mq = mshr->queue;
< bool wasFull = mq->isFull();
---
> // if this is a write, we should be looking at an uncacheable
> // write
> if (pkt->isWrite()) {
> assert(pkt->req->isUncacheable());
> handleUncacheableWriteResp(pkt);
> return;
> }
1268a1299,1303
> // we have dealt with any (uncacheable) writes above, from here on
> // we know we are dealing with an MSHR due to a miss or a prefetch
> MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
> assert(mshr);
>
1279,1286d1313
< PacketList writebacks;
< // We need forward_time here because we have a call of
< // allocateWriteBuffer() that need this parameter to specify the
< // time to request the bus. In this case we use forward latency
< // because there is a writeback. We pay also here for headerDelay
< // that is charged of bus latencies if the packet comes from the
< // bus.
< Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1297a1325,1330
> bool wasFull = mshrQueue.isFull();
>
> PacketList writebacks;
>
> Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
>
1473,1474c1506
< mq = mshr->queue;
< mq->markPending(mshr);
---
> mshrQueue.markPending(mshr);
1477,1479c1509,1511
< mq->deallocate(mshr);
< if (wasFull && !mq->isFull()) {
< clearBlocked((BlockedCause)mq->index);
---
> mshrQueue.deallocate(mshr);
> if (wasFull && !mshrQueue.isFull()) {
> clearBlocked(Blocked_NoMSHRs);
1484c1516
< if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
---
> if (prefetcher && mshrQueue.canPrefetch()) {
1718,1722c1750,1752
< // When handling a fill, discard any CleanEvicts for the
< // same address in write buffer.
< Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr());
< std::vector<MSHR *> M5_VAR_USED wbs;
< assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs));
---
> // When handling a fill, we should have no writes to this line.
> assert(addr == blockAlign(addr));
> assert(!writeBuffer.findMatch(addr, is_secure));
2110,2111c2140,2141
< std::vector<MSHR *> writebacks;
< if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
---
> WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
> if (wb_entry) {
2114,2118d2143
<
< // Look through writebacks for any cachable writes.
< // We should only ever find a single match
< assert(writebacks.size() == 1);
< MSHR *wb_entry = writebacks[0];
2169c2194
< markInService(wb_entry, false);
---
> markInService(wb_entry);
2212,2213c2237,2238
< MSHR *
< Cache::getNextMSHR()
---
> QueueEntry*
> Cache::getNextQueueEntry()
2218,2219c2243,2244
< MSHR *miss_mshr = mshrQueue.getNextMSHR();
< MSHR *write_mshr = writeBuffer.getNextMSHR();
---
> MSHR *miss_mshr = mshrQueue.getNext();
> WriteQueueEntry *wq_entry = writeBuffer.getNext();
2222,2224c2247,2251
< // full write buffer, otherwhise we favour the miss requests
< if (write_mshr &&
< ((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) ||
---
> // full write buffer (but only if we have no uncacheable write
> // responses outstanding, possibly revisit this last part),
> // otherwhise we favour the miss requests
> if (wq_entry &&
> ((writeBuffer.isFull() && writeBuffer.numInService() == 0) ||
2228,2229c2255,2256
< mshrQueue.findPending(write_mshr->blkAddr,
< write_mshr->isSecure);
---
> mshrQueue.findPending(wq_entry->blkAddr,
> wq_entry->isSecure);
2231c2258
< if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
---
> if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
2239c2266
< return write_mshr;
---
> return wq_entry;
2242c2269
< MSHR *conflict_mshr =
---
> WriteQueueEntry *conflict_mshr =
2255c2282
< // should we return write_mshr here instead? I.e. do we
---
> // should we return wq_entry here instead? I.e. do we
2268c2295
< assert(!miss_mshr && !write_mshr);
---
> assert(!miss_mshr && !wq_entry);
2294c2321
< return NULL;
---
> return nullptr;
2325,2326c2352,2353
< PacketPtr
< Cache::getTimingPacket()
---
> Tick
> Cache::nextQueueReadyTime() const
2328c2355,2356
< MSHR *mshr = getNextMSHR();
---
> Tick nextReady = std::min(mshrQueue.nextReadyTime(),
> writeBuffer.nextReadyTime());
2330,2331c2358,2362
< if (mshr == NULL) {
< return NULL;
---
> // Don't signal prefetch ready time if no MSHRs available
> // Will signal once enoguh MSHRs are deallocated
> if (prefetcher && mshrQueue.canPrefetch()) {
> nextReady = std::min(nextReady,
> prefetcher->nextPrefetchReadyTime());
2333a2365,2372
> return nextReady;
> }
>
> bool
> Cache::sendMSHRQueuePacket(MSHR* mshr)
> {
> assert(mshr);
>
2336d2374
< PacketPtr pkt = NULL;
2338,2339c2376,2378
< DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__,
< tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
---
> DPRINTF(Cache, "%s MSHR %s for addr %#llx size %d\n", __func__,
> tgt_pkt->cmdString(), tgt_pkt->getAddr(),
> tgt_pkt->getSize());
2343a2383,2386
> // we should never have hardware prefetches to allocated
> // blocks
> assert(blk == NULL);
>
2382c2425
< return NULL;
---
> return false;
2385c2428
< if (snoop_pkt.isBlockCached() || blk != NULL) {
---
> if (snoop_pkt.isBlockCached()) {
2388a2432
>
2390c2434
< if (mshr->queue->forceDeallocateTarget(mshr)) {
---
> if (mshrQueue.forceDeallocateTarget(mshr)) {
2393c2437
< clearBlocked((BlockedCause)(mshr->queue->index));
---
> clearBlocked(Blocked_NoMSHRs);
2395c2439
< return NULL;
---
> return false;
2399,2404c2443,2445
< if (mshr->isForwardNoResponse()) {
< // no response expected, just forward packet as it is
< assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
< pkt = tgt_pkt;
< } else {
< pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
---
> // either a prefetch that is not present upstream, or a normal
> // MSHR request, proceed to get the packet to send downstream
> PacketPtr pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
2406c2447
< mshr->isForward = (pkt == NULL);
---
> mshr->isForward = (pkt == NULL);
2408,2416c2449,2454
< if (mshr->isForward) {
< // not a cache block request, but a response is expected
< // make copy of current packet to forward, keep current
< // copy for response handling
< pkt = new Packet(tgt_pkt, false, true);
< if (pkt->isWrite()) {
< pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
< }
< }
---
> if (mshr->isForward) {
> // not a cache block request, but a response is expected
> // make copy of current packet to forward, keep current
> // copy for response handling
> pkt = new Packet(tgt_pkt, false, true);
> assert(!pkt->isWrite());
2419,2421c2457,2458
< assert(pkt != NULL);
< // play it safe and append (rather than set) the sender state, as
< // forwarded packets may already have existing state
---
> // play it safe and append (rather than set) the sender state,
> // as forwarded packets may already have existing state
2423,2424d2459
< return pkt;
< }
2425a2461,2465
> if (!memSidePort->sendTimingReq(pkt)) {
> // we are awaiting a retry, but we
> // delete the packet and will be creating a new packet
> // when we get the opportunity
> delete pkt;
2427,2428c2467,2489
< Tick
< Cache::nextMSHRReadyTime() const
---
> // note that we have now masked any requestBus and
> // schedSendEvent (we will wait for a retry before
> // doing anything), and this is so even if we do not
> // care about this packet and might override it before
> // it gets retried
> return true;
> } else {
> // As part of the call to sendTimingReq the packet is
> // forwarded to all neighbouring caches (and any caches
> // above them) as a snoop. Thus at this point we know if
> // any of the neighbouring caches are responding, and if
> // so, we know it is dirty, and we can determine if it is
> // being passed as Modified, making our MSHR the ordering
> // point
> bool pending_modified_resp = !pkt->hasSharers() &&
> pkt->cacheResponding();
> markInService(mshr, pending_modified_resp);
> return false;
> }
> }
>
> bool
> Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
2430,2431c2491
< Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
< writeBuffer.nextMSHRReadyTime());
---
> assert(wq_entry);
2433,2437c2493,2516
< // Don't signal prefetch ready time if no MSHRs available
< // Will signal once enoguh MSHRs are deallocated
< if (prefetcher && mshrQueue.canPrefetch()) {
< nextReady = std::min(nextReady,
< prefetcher->nextPrefetchReadyTime());
---
> // always a single target for write queue entries
> PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
>
> DPRINTF(Cache, "%s write %s for addr %#llx size %d\n", __func__,
> tgt_pkt->cmdString(), tgt_pkt->getAddr(),
> tgt_pkt->getSize());
>
> PacketPtr pkt = nullptr;
> bool delete_pkt = false;
>
> if (tgt_pkt->isEviction()) {
> assert(!wq_entry->isUncacheable());
> // no response expected, just forward packet as it is
> pkt = tgt_pkt;
> } else {
> // the only thing we deal with besides eviction commands
> // are uncacheable writes
> assert(tgt_pkt->req->isUncacheable() && tgt_pkt->isWrite());
> // not a cache block request, but a response is expected
> // make copy of current packet to forward, keep current
> // copy for response handling
> pkt = new Packet(tgt_pkt, false, true);
> pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
> delete_pkt = true;
2440c2519,2537
< return nextReady;
---
> pkt->pushSenderState(wq_entry);
>
> if (!memSidePort->sendTimingReq(pkt)) {
> if (delete_pkt) {
> // we are awaiting a retry, but we
> // delete the packet and will be creating a new packet
> // when we get the opportunity
> delete pkt;
> }
> // note that we have now masked any requestBus and
> // schedSendEvent (we will wait for a retry before
> // doing anything), and this is so even if we do not
> // care about this packet and might override it before
> // it gets retried
> return true;
> } else {
> markInService(wq_entry);
> return false;
> }
2589,2590c2686,2688
< PacketPtr pkt = cache.getTimingPacket();
< if (pkt == NULL) {
---
> QueueEntry* entry = cache.getNextQueueEntry();
>
> if (!entry) {
2595,2599d2692
< MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
< // in most cases getTimingPacket allocates a new packet, and
< // we must delete it unless it is successfully sent
< bool delete_pkt = !mshr->isForwardNoResponse();
<
2601,2613c2694,2695
< // the same addresses we are about to writeback, note that
< // this creates a dependency between requests and snoop
< // responses, but that should not be a problem since there is
< // a chain already and the key is that the snoop responses can
< // sink unconditionally
< if (snoopRespQueue.hasAddr(pkt->getAddr())) {
< DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
< Tick when = snoopRespQueue.deferredPacketReadyTime();
< schedSendEvent(when);
<
< if (delete_pkt)
< delete pkt;
<
---
> // the same addresses
> if (checkConflictingSnoop(entry->blkAddr)) {
2616,2645c2698
<
<
< waitingOnRetry = !masterPort.sendTimingReq(pkt);
<
< if (waitingOnRetry) {
< DPRINTF(CachePort, "now waiting on a retry\n");
< if (delete_pkt) {
< // we are awaiting a retry, but we
< // delete the packet and will be creating a new packet
< // when we get the opportunity
< delete pkt;
< }
< // note that we have now masked any requestBus and
< // schedSendEvent (we will wait for a retry before
< // doing anything), and this is so even if we do not
< // care about this packet and might override it before
< // it gets retried
< } else {
< // As part of the call to sendTimingReq the packet is
< // forwarded to all neighbouring caches (and any caches
< // above them) as a snoop. Thus at this point we know if
< // any of the neighbouring caches are responding, and if
< // so, we know it is dirty, and we can determine if it is
< // being passed as Modified, making our MSHR the ordering
< // point
< bool pending_modified_resp = !pkt->hasSharers() &&
< pkt->cacheResponding();
<
< cache.markInService(mshr, pending_modified_resp);
< }
---
> waitingOnRetry = entry->sendPacket(cache);
2649c2702
< // next send considering when the next MSHR is ready, note that
---
> // next send considering when the next queue is ready, note that
2653c2706
< schedSendEvent(cache.nextMSHRReadyTime());
---
> schedSendEvent(cache.nextQueueReadyTime());