base.cc revision 4475
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31/**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36#include "cpu/base.hh"
37#include "cpu/smt.hh"
38#include "mem/cache/base_cache.hh"
39#include "mem/cache/miss/mshr.hh"
40
41using namespace std;
42
43BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
44                                bool _isCpuSide)
45    : Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
46{
47    blocked = false;
48    waitingOnRetry = false;
49    //Start ports at null if more than one is created we should panic
50    //cpuSidePort = NULL;
51    //memSidePort = NULL;
52}
53
54
55void
56BaseCache::CachePort::recvStatusChange(Port::Status status)
57{
58    cache->recvStatusChange(status, isCpuSide);
59}
60
61void
62BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
63{
64    cache->getAddressRanges(resp, snoop, isCpuSide);
65}
66
67int
68BaseCache::CachePort::deviceBlockSize()
69{
70    return cache->getBlockSize();
71}
72
73bool
74BaseCache::CachePort::checkFunctional(PacketPtr pkt)
75{
76    //Check storage here first
77    list<PacketPtr>::iterator i = drainList.begin();
78    list<PacketPtr>::iterator iend = drainList.end();
79    bool notDone = true;
80    while (i != iend && notDone) {
81        PacketPtr target = *i;
82        // If the target contains data, and it overlaps the
83        // probed request, need to update data
84        if (target->intersect(pkt)) {
85            DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a drain\n",
86                    pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
87            notDone = fixPacket(pkt, target);
88        }
89        i++;
90    }
91    //Also check the response not yet ready to be on the list
92    std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
93    std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
94
95    while (j != jend && notDone) {
96        PacketPtr target = j->second;
97        // If the target contains data, and it overlaps the
98        // probed request, need to update data
99        if (target->intersect(pkt)) {
100            DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a response\n",
101                    pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
102            notDone = fixDelayedResponsePacket(pkt, target);
103        }
104        j++;
105    }
106    return notDone;
107}
108
109void
110BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
111{
112    bool notDone = checkFunctional(pkt);
113    if (notDone)
114        sendFunctional(pkt);
115}
116
117void
118BaseCache::CachePort::recvRetry()
119{
120    PacketPtr pkt;
121    assert(waitingOnRetry);
122    if (!drainList.empty()) {
123        DPRINTF(CachePort, "%s attempting to send a retry for response (%i waiting)\n"
124                , name(), drainList.size());
125        //We have some responses to drain first
126        pkt = drainList.front();
127        drainList.pop_front();
128        if (sendTiming(pkt)) {
129            DPRINTF(CachePort, "%s sucessful in sending a retry for"
130                    "response (%i still waiting)\n", name(), drainList.size());
131            if (!drainList.empty() ||
132                !isCpuSide && cache->doMasterRequest() ||
133                isCpuSide && cache->doSlaveRequest()) {
134
135                DPRINTF(CachePort, "%s has more responses/requests\n", name());
136                new BaseCache::RequestEvent(this, curTick + 1);
137            }
138            waitingOnRetry = false;
139        }
140        else {
141            drainList.push_front(pkt);
142        }
143        // Check if we're done draining once this list is empty
144        if (drainList.empty())
145            cache->checkDrain();
146    }
147    else if (!isCpuSide)
148    {
149        DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
150        if (!cache->doMasterRequest()) {
151            //This can happen if I am the owner of a block and see an upgrade
152            //while the block was in my WB Buffers.  I just remove the
153            //wb and de-assert the masterRequest
154            waitingOnRetry = false;
155            return;
156        }
157        pkt = cache->getPacket();
158        MSHR* mshr = (MSHR*) pkt->senderState;
159        //Copy the packet, it may be modified/destroyed elsewhere
160        PacketPtr copyPkt = new Packet(*pkt);
161        copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
162        mshr->pkt = copyPkt;
163
164        bool success = sendTiming(pkt);
165        DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
166                pkt->getAddr(), success ? "succesful" : "unsuccesful");
167
168        waitingOnRetry = !success;
169        if (waitingOnRetry) {
170            DPRINTF(CachePort, "%s now waiting on a retry\n", name());
171        }
172
173        cache->sendResult(pkt, mshr, success);
174
175        if (success && cache->doMasterRequest())
176        {
177            DPRINTF(CachePort, "%s has more requests\n", name());
178            //Still more to issue, rerequest in 1 cycle
179            new BaseCache::RequestEvent(this, curTick + 1);
180        }
181    }
182    else
183    {
184        assert(cache->doSlaveRequest());
185        //pkt = cache->getCoherencePacket();
186        //We save the packet, no reordering on CSHRS
187        pkt = cache->getCoherencePacket();
188        MSHR* cshr = (MSHR*)pkt->senderState;
189        bool success = sendTiming(pkt);
190        cache->sendCoherenceResult(pkt, cshr, success);
191        waitingOnRetry = !success;
192        if (success && cache->doSlaveRequest())
193        {
194            DPRINTF(CachePort, "%s has more requests\n", name());
195            //Still more to issue, rerequest in 1 cycle
196            new BaseCache::RequestEvent(this, curTick + 1);
197        }
198    }
199    if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
200    else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
201    return;
202}
203void
204BaseCache::CachePort::setBlocked()
205{
206    assert(!blocked);
207    DPRINTF(Cache, "Cache Blocking\n");
208    blocked = true;
209    //Clear the retry flag
210    mustSendRetry = false;
211}
212
213void
214BaseCache::CachePort::clearBlocked()
215{
216    assert(blocked);
217    DPRINTF(Cache, "Cache Unblocking\n");
218    blocked = false;
219    if (mustSendRetry)
220    {
221        DPRINTF(Cache, "Cache Sending Retry\n");
222        mustSendRetry = false;
223        sendRetry();
224    }
225}
226
227BaseCache::RequestEvent::RequestEvent(CachePort *_cachePort, Tick when)
228    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
229{
230    this->setFlags(AutoDelete);
231    schedule(when);
232}
233
234void
235BaseCache::RequestEvent::process()
236{
237    if (cachePort->waitingOnRetry) return;
238    //We have some responses to drain first
239    if (!cachePort->drainList.empty()) {
240        DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
241        if (cachePort->sendTiming(cachePort->drainList.front())) {
242            DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
243            cachePort->drainList.pop_front();
244            if (!cachePort->drainList.empty() ||
245                !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
246                cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
247
248                DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
249                this->schedule(curTick + 1);
250            }
251        }
252        else {
253            cachePort->waitingOnRetry = true;
254            DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
255        }
256    }
257    else if (!cachePort->isCpuSide)
258    {            //MSHR
259        DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
260        if (!cachePort->cache->doMasterRequest()) {
261            //This can happen if I am the owner of a block and see an upgrade
262            //while the block was in my WB Buffers.  I just remove the
263            //wb and de-assert the masterRequest
264            return;
265        }
266
267        PacketPtr pkt = cachePort->cache->getPacket();
268        MSHR* mshr = (MSHR*) pkt->senderState;
269        //Copy the packet, it may be modified/destroyed elsewhere
270        PacketPtr copyPkt = new Packet(*pkt);
271        copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
272        mshr->pkt = copyPkt;
273
274        bool success = cachePort->sendTiming(pkt);
275        DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
276                pkt->getAddr(), success ? "succesful" : "unsuccesful");
277
278        cachePort->waitingOnRetry = !success;
279        if (cachePort->waitingOnRetry) {
280            DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
281        }
282
283        cachePort->cache->sendResult(pkt, mshr, success);
284        if (success && cachePort->cache->doMasterRequest())
285        {
286            DPRINTF(CachePort, "%s still more MSHR requests to send\n",
287                    cachePort->name());
288            //Still more to issue, rerequest in 1 cycle
289            this->schedule(curTick+1);
290        }
291    }
292    else
293    {
294        //CSHR
295        assert(cachePort->cache->doSlaveRequest());
296        PacketPtr pkt = cachePort->cache->getCoherencePacket();
297        MSHR* cshr = (MSHR*) pkt->senderState;
298        bool success = cachePort->sendTiming(pkt);
299        cachePort->cache->sendCoherenceResult(pkt, cshr, success);
300        cachePort->waitingOnRetry = !success;
301        if (cachePort->waitingOnRetry)
302            DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
303        if (success && cachePort->cache->doSlaveRequest())
304        {
305            DPRINTF(CachePort, "%s still more CSHR requests to send\n",
306                    cachePort->name());
307            //Still more to issue, rerequest in 1 cycle
308            this->schedule(curTick+1);
309        }
310    }
311}
312
313const char *
314BaseCache::RequestEvent::description()
315{
316    return "Cache request event";
317}
318
319BaseCache::ResponseEvent::ResponseEvent(CachePort *_cachePort)
320    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
321{
322}
323
324void
325BaseCache::ResponseEvent::process()
326{
327    assert(cachePort->transmitList.size());
328    assert(cachePort->transmitList.front().first <= curTick);
329    PacketPtr pkt = cachePort->transmitList.front().second;
330    cachePort->transmitList.pop_front();
331    if (!cachePort->transmitList.empty()) {
332        Tick time = cachePort->transmitList.front().first;
333        schedule(time <= curTick ? curTick+1 : time);
334    }
335
336    if (pkt->flags & NACKED_LINE)
337        pkt->result = Packet::Nacked;
338    else
339        pkt->result = Packet::Success;
340    pkt->makeTimingResponse();
341    DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
342    if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
343        //Already have a list, just append
344        cachePort->drainList.push_back(pkt);
345        DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
346    }
347    else if (!cachePort->sendTiming(pkt)) {
348        //It failed, save it to list of drain events
349        DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
350        cachePort->drainList.push_back(pkt);
351        cachePort->waitingOnRetry = true;
352    }
353
354    // Check if we're done draining once this list is empty
355    if (cachePort->drainList.empty() && cachePort->transmitList.empty())
356        cachePort->cache->checkDrain();
357}
358
359const char *
360BaseCache::ResponseEvent::description()
361{
362    return "Cache response event";
363}
364
365void
366BaseCache::init()
367{
368    if (!cpuSidePort || !memSidePort)
369        panic("Cache not hooked up on both sides\n");
370    cpuSidePort->sendStatusChange(Port::RangeChange);
371}
372
373void
374BaseCache::regStats()
375{
376    using namespace Stats;
377
378    // Hit statistics
379    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
380        MemCmd cmd(access_idx);
381        const string &cstr = cmd.toString();
382
383        hits[access_idx]
384            .init(maxThreadsPerCPU)
385            .name(name() + "." + cstr + "_hits")
386            .desc("number of " + cstr + " hits")
387            .flags(total | nozero | nonan)
388            ;
389    }
390
391    demandHits
392        .name(name() + ".demand_hits")
393        .desc("number of demand (read+write) hits")
394        .flags(total)
395        ;
396    demandHits = hits[MemCmd::ReadReq] + hits[MemCmd::WriteReq];
397
398    overallHits
399        .name(name() + ".overall_hits")
400        .desc("number of overall hits")
401        .flags(total)
402        ;
403    overallHits = demandHits + hits[MemCmd::SoftPFReq] + hits[MemCmd::HardPFReq]
404        + hits[MemCmd::Writeback];
405
406    // Miss statistics
407    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
408        MemCmd cmd(access_idx);
409        const string &cstr = cmd.toString();
410
411        misses[access_idx]
412            .init(maxThreadsPerCPU)
413            .name(name() + "." + cstr + "_misses")
414            .desc("number of " + cstr + " misses")
415            .flags(total | nozero | nonan)
416            ;
417    }
418
419    demandMisses
420        .name(name() + ".demand_misses")
421        .desc("number of demand (read+write) misses")
422        .flags(total)
423        ;
424    demandMisses = misses[MemCmd::ReadReq] + misses[MemCmd::WriteReq];
425
426    overallMisses
427        .name(name() + ".overall_misses")
428        .desc("number of overall misses")
429        .flags(total)
430        ;
431    overallMisses = demandMisses + misses[MemCmd::SoftPFReq] +
432        misses[MemCmd::HardPFReq] + misses[MemCmd::Writeback];
433
434    // Miss latency statistics
435    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
436        MemCmd cmd(access_idx);
437        const string &cstr = cmd.toString();
438
439        missLatency[access_idx]
440            .init(maxThreadsPerCPU)
441            .name(name() + "." + cstr + "_miss_latency")
442            .desc("number of " + cstr + " miss cycles")
443            .flags(total | nozero | nonan)
444            ;
445    }
446
447    demandMissLatency
448        .name(name() + ".demand_miss_latency")
449        .desc("number of demand (read+write) miss cycles")
450        .flags(total)
451        ;
452    demandMissLatency = missLatency[MemCmd::ReadReq] + missLatency[MemCmd::WriteReq];
453
454    overallMissLatency
455        .name(name() + ".overall_miss_latency")
456        .desc("number of overall miss cycles")
457        .flags(total)
458        ;
459    overallMissLatency = demandMissLatency + missLatency[MemCmd::SoftPFReq] +
460        missLatency[MemCmd::HardPFReq];
461
462    // access formulas
463    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
464        MemCmd cmd(access_idx);
465        const string &cstr = cmd.toString();
466
467        accesses[access_idx]
468            .name(name() + "." + cstr + "_accesses")
469            .desc("number of " + cstr + " accesses(hits+misses)")
470            .flags(total | nozero | nonan)
471            ;
472
473        accesses[access_idx] = hits[access_idx] + misses[access_idx];
474    }
475
476    demandAccesses
477        .name(name() + ".demand_accesses")
478        .desc("number of demand (read+write) accesses")
479        .flags(total)
480        ;
481    demandAccesses = demandHits + demandMisses;
482
483    overallAccesses
484        .name(name() + ".overall_accesses")
485        .desc("number of overall (read+write) accesses")
486        .flags(total)
487        ;
488    overallAccesses = overallHits + overallMisses;
489
490    // miss rate formulas
491    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
492        MemCmd cmd(access_idx);
493        const string &cstr = cmd.toString();
494
495        missRate[access_idx]
496            .name(name() + "." + cstr + "_miss_rate")
497            .desc("miss rate for " + cstr + " accesses")
498            .flags(total | nozero | nonan)
499            ;
500
501        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
502    }
503
504    demandMissRate
505        .name(name() + ".demand_miss_rate")
506        .desc("miss rate for demand accesses")
507        .flags(total)
508        ;
509    demandMissRate = demandMisses / demandAccesses;
510
511    overallMissRate
512        .name(name() + ".overall_miss_rate")
513        .desc("miss rate for overall accesses")
514        .flags(total)
515        ;
516    overallMissRate = overallMisses / overallAccesses;
517
518    // miss latency formulas
519    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
520        MemCmd cmd(access_idx);
521        const string &cstr = cmd.toString();
522
523        avgMissLatency[access_idx]
524            .name(name() + "." + cstr + "_avg_miss_latency")
525            .desc("average " + cstr + " miss latency")
526            .flags(total | nozero | nonan)
527            ;
528
529        avgMissLatency[access_idx] =
530            missLatency[access_idx] / misses[access_idx];
531    }
532
533    demandAvgMissLatency
534        .name(name() + ".demand_avg_miss_latency")
535        .desc("average overall miss latency")
536        .flags(total)
537        ;
538    demandAvgMissLatency = demandMissLatency / demandMisses;
539
540    overallAvgMissLatency
541        .name(name() + ".overall_avg_miss_latency")
542        .desc("average overall miss latency")
543        .flags(total)
544        ;
545    overallAvgMissLatency = overallMissLatency / overallMisses;
546
547    blocked_cycles.init(NUM_BLOCKED_CAUSES);
548    blocked_cycles
549        .name(name() + ".blocked_cycles")
550        .desc("number of cycles access was blocked")
551        .subname(Blocked_NoMSHRs, "no_mshrs")
552        .subname(Blocked_NoTargets, "no_targets")
553        ;
554
555
556    blocked_causes.init(NUM_BLOCKED_CAUSES);
557    blocked_causes
558        .name(name() + ".blocked")
559        .desc("number of cycles access was blocked")
560        .subname(Blocked_NoMSHRs, "no_mshrs")
561        .subname(Blocked_NoTargets, "no_targets")
562        ;
563
564    avg_blocked
565        .name(name() + ".avg_blocked_cycles")
566        .desc("average number of cycles each access was blocked")
567        .subname(Blocked_NoMSHRs, "no_mshrs")
568        .subname(Blocked_NoTargets, "no_targets")
569        ;
570
571    avg_blocked = blocked_cycles / blocked_causes;
572
573    fastWrites
574        .name(name() + ".fast_writes")
575        .desc("number of fast writes performed")
576        ;
577
578    cacheCopies
579        .name(name() + ".cache_copies")
580        .desc("number of cache copies performed")
581        ;
582
583}
584
585unsigned int
586BaseCache::drain(Event *de)
587{
588    // Set status
589    if (!canDrain()) {
590        drainEvent = de;
591
592        changeState(SimObject::Draining);
593        return 1;
594    }
595
596    changeState(SimObject::Drained);
597    return 0;
598}
599