base.cc revision 4022
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31/**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36#include "cpu/base.hh"
37#include "cpu/smt.hh"
38#include "mem/cache/base_cache.hh"
39#include "mem/cache/miss/mshr.hh"
40
41using namespace std;
42
43BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
44                                bool _isCpuSide)
45    : Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
46{
47    blocked = false;
48    waitingOnRetry = false;
49    //Start ports at null if more than one is created we should panic
50    //cpuSidePort = NULL;
51    //memSidePort = NULL;
52}
53
54
55void
56BaseCache::CachePort::recvStatusChange(Port::Status status)
57{
58    cache->recvStatusChange(status, isCpuSide);
59}
60
61void
62BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
63                                       AddrRangeList &snoop)
64{
65    cache->getAddressRanges(resp, snoop, isCpuSide);
66}
67
68int
69BaseCache::CachePort::deviceBlockSize()
70{
71    return cache->getBlockSize();
72}
73
74bool
75BaseCache::CachePort::checkFunctional(PacketPtr pkt)
76{
77    //Check storage here first
78    list<PacketPtr>::iterator i = drainList.begin();
79    list<PacketPtr>::iterator iend = drainList.end();
80    bool notDone = true;
81    while (i != iend && notDone) {
82        PacketPtr target = *i;
83        // If the target contains data, and it overlaps the
84        // probed request, need to update data
85        if (target->intersect(pkt)) {
86            DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a drain\n",
87                    pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
88            notDone = fixPacket(pkt, target);
89        }
90        i++;
91    }
92    //Also check the response not yet ready to be on the list
93    std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
94    std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
95
96    while (j != jend && notDone) {
97        PacketPtr target = j->second;
98        // If the target contains data, and it overlaps the
99        // probed request, need to update data
100        if (target->intersect(pkt)) {
101            DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a response\n",
102                    pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
103            notDone = fixDelayedResponsePacket(pkt, target);
104        }
105        j++;
106    }
107    return notDone;
108}
109
110void
111BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
112{
113    bool notDone = checkFunctional(pkt);
114    if (notDone)
115        sendFunctional(pkt);
116}
117
118void
119BaseCache::CachePort::recvRetry()
120{
121    PacketPtr pkt;
122    assert(waitingOnRetry);
123    if (!drainList.empty()) {
124        DPRINTF(CachePort, "%s attempting to send a retry for response (%i waiting)\n"
125                , name(), drainList.size());
126        //We have some responses to drain first
127        pkt = drainList.front();
128        drainList.pop_front();
129        if (sendTiming(pkt)) {
130            DPRINTF(CachePort, "%s sucessful in sending a retry for"
131                    "response (%i still waiting)\n", name(), drainList.size());
132            if (!drainList.empty() ||
133                !isCpuSide && cache->doMasterRequest() ||
134                isCpuSide && cache->doSlaveRequest()) {
135
136                DPRINTF(CachePort, "%s has more responses/requests\n", name());
137                BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
138                reqCpu->schedule(curTick + 1);
139            }
140            waitingOnRetry = false;
141        }
142        else {
143            drainList.push_front(pkt);
144        }
145        // Check if we're done draining once this list is empty
146        if (drainList.empty())
147            cache->checkDrain();
148    }
149    else if (!isCpuSide)
150    {
151        DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
152        if (!cache->doMasterRequest()) {
153            //This can happen if I am the owner of a block and see an upgrade
154            //while the block was in my WB Buffers.  I just remove the
155            //wb and de-assert the masterRequest
156            waitingOnRetry = false;
157            return;
158        }
159        pkt = cache->getPacket();
160        MSHR* mshr = (MSHR*) pkt->senderState;
161        //Copy the packet, it may be modified/destroyed elsewhere
162        PacketPtr copyPkt = new Packet(*pkt);
163        copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
164        mshr->pkt = copyPkt;
165
166        bool success = sendTiming(pkt);
167        DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
168                pkt->getAddr(), success ? "succesful" : "unsuccesful");
169
170        waitingOnRetry = !success;
171        if (waitingOnRetry) {
172            DPRINTF(CachePort, "%s now waiting on a retry\n", name());
173        }
174
175        cache->sendResult(pkt, mshr, success);
176
177        if (success && cache->doMasterRequest())
178        {
179            DPRINTF(CachePort, "%s has more requests\n", name());
180            //Still more to issue, rerequest in 1 cycle
181            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
182            reqCpu->schedule(curTick + 1);
183        }
184    }
185    else
186    {
187        assert(cache->doSlaveRequest());
188        //pkt = cache->getCoherencePacket();
189        //We save the packet, no reordering on CSHRS
190        pkt = cache->getCoherencePacket();
191        MSHR* cshr = (MSHR*)pkt->senderState;
192        bool success = sendTiming(pkt);
193        cache->sendCoherenceResult(pkt, cshr, success);
194        waitingOnRetry = !success;
195        if (success && cache->doSlaveRequest())
196        {
197            DPRINTF(CachePort, "%s has more requests\n", name());
198            //Still more to issue, rerequest in 1 cycle
199            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
200            reqCpu->schedule(curTick + 1);
201        }
202    }
203    if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
204    else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
205    return;
206}
207void
208BaseCache::CachePort::setBlocked()
209{
210    assert(!blocked);
211    DPRINTF(Cache, "Cache Blocking\n");
212    blocked = true;
213    //Clear the retry flag
214    mustSendRetry = false;
215}
216
217void
218BaseCache::CachePort::clearBlocked()
219{
220    assert(blocked);
221    DPRINTF(Cache, "Cache Unblocking\n");
222    blocked = false;
223    if (mustSendRetry)
224    {
225        DPRINTF(Cache, "Cache Sending Retry\n");
226        mustSendRetry = false;
227        sendRetry();
228    }
229}
230
231BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, bool _newResponse)
232    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort),
233      newResponse(_newResponse)
234{
235    if (!newResponse)
236        this->setFlags(AutoDelete);
237    pkt = NULL;
238}
239
240void
241BaseCache::CacheEvent::process()
242{
243    if (!newResponse)
244    {
245        if (cachePort->waitingOnRetry) return;
246       //We have some responses to drain first
247        if (!cachePort->drainList.empty()) {
248            DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
249            if (cachePort->sendTiming(cachePort->drainList.front())) {
250                DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
251                cachePort->drainList.pop_front();
252                if (!cachePort->drainList.empty() ||
253                    !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
254                    cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
255
256                    DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
257                    this->schedule(curTick + 1);
258                }
259            }
260            else {
261                cachePort->waitingOnRetry = true;
262                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
263            }
264        }
265        else if (!cachePort->isCpuSide)
266        {            //MSHR
267            DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
268            if (!cachePort->cache->doMasterRequest()) {
269                //This can happen if I am the owner of a block and see an upgrade
270                //while the block was in my WB Buffers.  I just remove the
271                //wb and de-assert the masterRequest
272                return;
273            }
274
275            pkt = cachePort->cache->getPacket();
276            MSHR* mshr = (MSHR*) pkt->senderState;
277            //Copy the packet, it may be modified/destroyed elsewhere
278            PacketPtr copyPkt = new Packet(*pkt);
279            copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
280            mshr->pkt = copyPkt;
281
282            bool success = cachePort->sendTiming(pkt);
283            DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
284                    pkt->getAddr(), success ? "succesful" : "unsuccesful");
285
286            cachePort->waitingOnRetry = !success;
287            if (cachePort->waitingOnRetry) {
288                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
289            }
290
291            cachePort->cache->sendResult(pkt, mshr, success);
292            if (success && cachePort->cache->doMasterRequest())
293            {
294                DPRINTF(CachePort, "%s still more MSHR requests to send\n",
295                        cachePort->name());
296                //Still more to issue, rerequest in 1 cycle
297                pkt = NULL;
298                this->schedule(curTick+1);
299            }
300        }
301        else
302        {
303            //CSHR
304            assert(cachePort->cache->doSlaveRequest());
305            pkt = cachePort->cache->getCoherencePacket();
306            MSHR* cshr = (MSHR*) pkt->senderState;
307            bool success = cachePort->sendTiming(pkt);
308            cachePort->cache->sendCoherenceResult(pkt, cshr, success);
309            cachePort->waitingOnRetry = !success;
310            if (cachePort->waitingOnRetry)
311                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
312            if (success && cachePort->cache->doSlaveRequest())
313            {
314                DPRINTF(CachePort, "%s still more CSHR requests to send\n",
315                        cachePort->name());
316                //Still more to issue, rerequest in 1 cycle
317                pkt = NULL;
318                this->schedule(curTick+1);
319            }
320        }
321        return;
322    }
323    //Else it's a response
324    assert(cachePort->transmitList.size());
325    assert(cachePort->transmitList.front().first <= curTick);
326    pkt = cachePort->transmitList.front().second;
327    cachePort->transmitList.pop_front();
328    if (!cachePort->transmitList.empty()) {
329        Tick time = cachePort->transmitList.front().first;
330        schedule(time <= curTick ? curTick+1 : time);
331    }
332
333    if (pkt->flags & NACKED_LINE)
334        pkt->result = Packet::Nacked;
335    else
336        pkt->result = Packet::Success;
337    pkt->makeTimingResponse();
338    DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
339    if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
340        //Already have a list, just append
341        cachePort->drainList.push_back(pkt);
342        DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
343    }
344    else if (!cachePort->sendTiming(pkt)) {
345        //It failed, save it to list of drain events
346        DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
347        cachePort->drainList.push_back(pkt);
348        cachePort->waitingOnRetry = true;
349    }
350
351    // Check if we're done draining once this list is empty
352    if (cachePort->drainList.empty() && cachePort->transmitList.empty())
353        cachePort->cache->checkDrain();
354}
355
356const char *
357BaseCache::CacheEvent::description()
358{
359    return "BaseCache timing event";
360}
361
362void
363BaseCache::init()
364{
365    if (!cpuSidePort || !memSidePort)
366        panic("Cache not hooked up on both sides\n");
367    cpuSidePort->sendStatusChange(Port::RangeChange);
368}
369
370void
371BaseCache::regStats()
372{
373    using namespace Stats;
374
375    // Hit statistics
376    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
377        MemCmd cmd(access_idx);
378        const string &cstr = cmd.toString();
379
380        hits[access_idx]
381            .init(maxThreadsPerCPU)
382            .name(name() + "." + cstr + "_hits")
383            .desc("number of " + cstr + " hits")
384            .flags(total | nozero | nonan)
385            ;
386    }
387
388    demandHits
389        .name(name() + ".demand_hits")
390        .desc("number of demand (read+write) hits")
391        .flags(total)
392        ;
393    demandHits = hits[MemCmd::ReadReq] + hits[MemCmd::WriteReq];
394
395    overallHits
396        .name(name() + ".overall_hits")
397        .desc("number of overall hits")
398        .flags(total)
399        ;
400    overallHits = demandHits + hits[MemCmd::SoftPFReq] + hits[MemCmd::HardPFReq]
401        + hits[MemCmd::Writeback];
402
403    // Miss statistics
404    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
405        MemCmd cmd(access_idx);
406        const string &cstr = cmd.toString();
407
408        misses[access_idx]
409            .init(maxThreadsPerCPU)
410            .name(name() + "." + cstr + "_misses")
411            .desc("number of " + cstr + " misses")
412            .flags(total | nozero | nonan)
413            ;
414    }
415
416    demandMisses
417        .name(name() + ".demand_misses")
418        .desc("number of demand (read+write) misses")
419        .flags(total)
420        ;
421    demandMisses = misses[MemCmd::ReadReq] + misses[MemCmd::WriteReq];
422
423    overallMisses
424        .name(name() + ".overall_misses")
425        .desc("number of overall misses")
426        .flags(total)
427        ;
428    overallMisses = demandMisses + misses[MemCmd::SoftPFReq] +
429        misses[MemCmd::HardPFReq] + misses[MemCmd::Writeback];
430
431    // Miss latency statistics
432    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
433        MemCmd cmd(access_idx);
434        const string &cstr = cmd.toString();
435
436        missLatency[access_idx]
437            .init(maxThreadsPerCPU)
438            .name(name() + "." + cstr + "_miss_latency")
439            .desc("number of " + cstr + " miss cycles")
440            .flags(total | nozero | nonan)
441            ;
442    }
443
444    demandMissLatency
445        .name(name() + ".demand_miss_latency")
446        .desc("number of demand (read+write) miss cycles")
447        .flags(total)
448        ;
449    demandMissLatency = missLatency[MemCmd::ReadReq] + missLatency[MemCmd::WriteReq];
450
451    overallMissLatency
452        .name(name() + ".overall_miss_latency")
453        .desc("number of overall miss cycles")
454        .flags(total)
455        ;
456    overallMissLatency = demandMissLatency + missLatency[MemCmd::SoftPFReq] +
457        missLatency[MemCmd::HardPFReq];
458
459    // access formulas
460    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
461        MemCmd cmd(access_idx);
462        const string &cstr = cmd.toString();
463
464        accesses[access_idx]
465            .name(name() + "." + cstr + "_accesses")
466            .desc("number of " + cstr + " accesses(hits+misses)")
467            .flags(total | nozero | nonan)
468            ;
469
470        accesses[access_idx] = hits[access_idx] + misses[access_idx];
471    }
472
473    demandAccesses
474        .name(name() + ".demand_accesses")
475        .desc("number of demand (read+write) accesses")
476        .flags(total)
477        ;
478    demandAccesses = demandHits + demandMisses;
479
480    overallAccesses
481        .name(name() + ".overall_accesses")
482        .desc("number of overall (read+write) accesses")
483        .flags(total)
484        ;
485    overallAccesses = overallHits + overallMisses;
486
487    // miss rate formulas
488    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
489        MemCmd cmd(access_idx);
490        const string &cstr = cmd.toString();
491
492        missRate[access_idx]
493            .name(name() + "." + cstr + "_miss_rate")
494            .desc("miss rate for " + cstr + " accesses")
495            .flags(total | nozero | nonan)
496            ;
497
498        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
499    }
500
501    demandMissRate
502        .name(name() + ".demand_miss_rate")
503        .desc("miss rate for demand accesses")
504        .flags(total)
505        ;
506    demandMissRate = demandMisses / demandAccesses;
507
508    overallMissRate
509        .name(name() + ".overall_miss_rate")
510        .desc("miss rate for overall accesses")
511        .flags(total)
512        ;
513    overallMissRate = overallMisses / overallAccesses;
514
515    // miss latency formulas
516    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
517        MemCmd cmd(access_idx);
518        const string &cstr = cmd.toString();
519
520        avgMissLatency[access_idx]
521            .name(name() + "." + cstr + "_avg_miss_latency")
522            .desc("average " + cstr + " miss latency")
523            .flags(total | nozero | nonan)
524            ;
525
526        avgMissLatency[access_idx] =
527            missLatency[access_idx] / misses[access_idx];
528    }
529
530    demandAvgMissLatency
531        .name(name() + ".demand_avg_miss_latency")
532        .desc("average overall miss latency")
533        .flags(total)
534        ;
535    demandAvgMissLatency = demandMissLatency / demandMisses;
536
537    overallAvgMissLatency
538        .name(name() + ".overall_avg_miss_latency")
539        .desc("average overall miss latency")
540        .flags(total)
541        ;
542    overallAvgMissLatency = overallMissLatency / overallMisses;
543
544    blocked_cycles.init(NUM_BLOCKED_CAUSES);
545    blocked_cycles
546        .name(name() + ".blocked_cycles")
547        .desc("number of cycles access was blocked")
548        .subname(Blocked_NoMSHRs, "no_mshrs")
549        .subname(Blocked_NoTargets, "no_targets")
550        ;
551
552
553    blocked_causes.init(NUM_BLOCKED_CAUSES);
554    blocked_causes
555        .name(name() + ".blocked")
556        .desc("number of cycles access was blocked")
557        .subname(Blocked_NoMSHRs, "no_mshrs")
558        .subname(Blocked_NoTargets, "no_targets")
559        ;
560
561    avg_blocked
562        .name(name() + ".avg_blocked_cycles")
563        .desc("average number of cycles each access was blocked")
564        .subname(Blocked_NoMSHRs, "no_mshrs")
565        .subname(Blocked_NoTargets, "no_targets")
566        ;
567
568    avg_blocked = blocked_cycles / blocked_causes;
569
570    fastWrites
571        .name(name() + ".fast_writes")
572        .desc("number of fast writes performed")
573        ;
574
575    cacheCopies
576        .name(name() + ".cache_copies")
577        .desc("number of cache copies performed")
578        ;
579
580}
581
582unsigned int
583BaseCache::drain(Event *de)
584{
585    // Set status
586    if (!canDrain()) {
587        drainEvent = de;
588
589        changeState(SimObject::Draining);
590        return 1;
591    }
592
593    changeState(SimObject::Drained);
594    return 0;
595}
596