base.cc revision 3503
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31/**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36#include "cpu/base.hh"
37#include "cpu/smt.hh"
38#include "mem/cache/base_cache.hh"
39#include "mem/cache/miss/mshr.hh"
40
41using namespace std;
42
43BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
44                                bool _isCpuSide)
45    : Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
46{
47    blocked = false;
48    waitingOnRetry = false;
49    //Start ports at null if more than one is created we should panic
50    //cpuSidePort = NULL;
51    //memSidePort = NULL;
52}
53
54void
55BaseCache::CachePort::recvStatusChange(Port::Status status)
56{
57    cache->recvStatusChange(status, isCpuSide);
58}
59
60void
61BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
62                                       AddrRangeList &snoop)
63{
64    cache->getAddressRanges(resp, snoop, isCpuSide);
65}
66
67int
68BaseCache::CachePort::deviceBlockSize()
69{
70    return cache->getBlockSize();
71}
72
73bool
74BaseCache::CachePort::recvTiming(PacketPtr pkt)
75{
76    if (isCpuSide
77        && !pkt->req->isUncacheable()
78        && pkt->isInvalidate()
79        && !pkt->isRead() && !pkt->isWrite()) {
80        //Upgrade or Invalidate
81        //Look into what happens if two slave caches on bus
82        DPRINTF(Cache, "%s %x ?\n", pkt->cmdString(), pkt->getAddr());
83
84        assert(!(pkt->flags & SATISFIED));
85        pkt->flags |= SATISFIED;
86        //Invalidates/Upgrades need no response if they get the bus
87        return true;
88    }
89
90    if (pkt->isRequest() && blocked)
91    {
92        DPRINTF(Cache,"Scheduling a retry while blocked\n");
93        mustSendRetry = true;
94        return false;
95    }
96    return cache->doTimingAccess(pkt, this, isCpuSide);
97}
98
99Tick
100BaseCache::CachePort::recvAtomic(PacketPtr pkt)
101{
102    return cache->doAtomicAccess(pkt, isCpuSide);
103}
104
105void
106BaseCache::CachePort::recvFunctional(PacketPtr pkt)
107{
108    //Check storage here first
109    list<PacketPtr>::iterator i = drainList.begin();
110    list<PacketPtr>::iterator end = drainList.end();
111    for (; i != end; ++i) {
112        PacketPtr target = *i;
113        // If the target contains data, and it overlaps the
114        // probed request, need to update data
115        if (target->intersect(pkt)) {
116            fixPacket(pkt, target);
117        }
118    }
119    cache->doFunctionalAccess(pkt, isCpuSide);
120}
121
122void
123BaseCache::CachePort::recvRetry()
124{
125    PacketPtr pkt;
126    assert(waitingOnRetry);
127    if (!drainList.empty()) {
128        DPRINTF(CachePort, "%s attempting to send a retry for response\n", name());
129        //We have some responses to drain first
130        if (sendTiming(drainList.front())) {
131            DPRINTF(CachePort, "%s sucessful in sending a retry for response\n", name());
132            drainList.pop_front();
133            if (!drainList.empty() ||
134                !isCpuSide && cache->doMasterRequest() ||
135                isCpuSide && cache->doSlaveRequest()) {
136
137                DPRINTF(CachePort, "%s has more responses/requests\n", name());
138                BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
139                reqCpu->schedule(curTick + 1);
140            }
141            waitingOnRetry = false;
142        }
143        // Check if we're done draining once this list is empty
144        if (drainList.empty())
145            cache->checkDrain();
146    }
147    else if (!isCpuSide)
148    {
149        DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
150        if (!cache->doMasterRequest()) {
151            //This can happen if I am the owner of a block and see an upgrade
152            //while the block was in my WB Buffers.  I just remove the
153            //wb and de-assert the masterRequest
154            waitingOnRetry = false;
155            return;
156        }
157        pkt = cache->getPacket();
158        MSHR* mshr = (MSHR*) pkt->senderState;
159        //Copy the packet, it may be modified/destroyed elsewhere
160        PacketPtr copyPkt = new Packet(*pkt);
161        copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
162        mshr->pkt = copyPkt;
163
164        bool success = sendTiming(pkt);
165        DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
166                pkt->getAddr(), success ? "succesful" : "unsuccesful");
167
168        waitingOnRetry = !success;
169        if (waitingOnRetry) {
170            DPRINTF(CachePort, "%s now waiting on a retry\n", name());
171        }
172
173        cache->sendResult(pkt, mshr, success);
174
175        if (success && cache->doMasterRequest())
176        {
177            DPRINTF(CachePort, "%s has more requests\n", name());
178            //Still more to issue, rerequest in 1 cycle
179            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
180            reqCpu->schedule(curTick + 1);
181        }
182    }
183    else
184    {
185        assert(cache->doSlaveRequest());
186        //pkt = cache->getCoherencePacket();
187        //We save the packet, no reordering on CSHRS
188        pkt = cache->getCoherencePacket();
189        MSHR* cshr = (MSHR*)pkt->senderState;
190        bool success = sendTiming(pkt);
191        cache->sendCoherenceResult(pkt, cshr, success);
192        waitingOnRetry = !success;
193        if (success && cache->doSlaveRequest())
194        {
195            DPRINTF(CachePort, "%s has more requests\n", name());
196            //Still more to issue, rerequest in 1 cycle
197            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
198            reqCpu->schedule(curTick + 1);
199        }
200    }
201    if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
202    else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
203    return;
204}
205void
206BaseCache::CachePort::setBlocked()
207{
208    assert(!blocked);
209    DPRINTF(Cache, "Cache Blocking\n");
210    blocked = true;
211    //Clear the retry flag
212    mustSendRetry = false;
213}
214
215void
216BaseCache::CachePort::clearBlocked()
217{
218    assert(blocked);
219    DPRINTF(Cache, "Cache Unblocking\n");
220    blocked = false;
221    if (mustSendRetry)
222    {
223        DPRINTF(Cache, "Cache Sending Retry\n");
224        mustSendRetry = false;
225        sendRetry();
226    }
227}
228
229BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
230    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
231{
232    this->setFlags(AutoDelete);
233    pkt = NULL;
234}
235
236BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, PacketPtr _pkt)
237    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
238{
239    this->setFlags(AutoDelete);
240}
241
242void
243BaseCache::CacheEvent::process()
244{
245    if (!pkt)
246    {
247        if (cachePort->waitingOnRetry) return;
248       //We have some responses to drain first
249        if (!cachePort->drainList.empty()) {
250            DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
251            if (cachePort->sendTiming(cachePort->drainList.front())) {
252                DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
253                cachePort->drainList.pop_front();
254                if (!cachePort->drainList.empty() ||
255                    !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
256                    cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
257
258                    DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
259                    this->schedule(curTick + 1);
260                }
261            }
262            else {
263                cachePort->waitingOnRetry = true;
264                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
265            }
266        }
267        else if (!cachePort->isCpuSide)
268        {            //MSHR
269            DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
270            if (!cachePort->cache->doMasterRequest()) {
271                //This can happen if I am the owner of a block and see an upgrade
272                //while the block was in my WB Buffers.  I just remove the
273                //wb and de-assert the masterRequest
274                return;
275            }
276
277            pkt = cachePort->cache->getPacket();
278            MSHR* mshr = (MSHR*) pkt->senderState;
279            //Copy the packet, it may be modified/destroyed elsewhere
280            PacketPtr copyPkt = new Packet(*pkt);
281            copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
282            mshr->pkt = copyPkt;
283
284            bool success = cachePort->sendTiming(pkt);
285            DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
286                    pkt->getAddr(), success ? "succesful" : "unsuccesful");
287
288            cachePort->waitingOnRetry = !success;
289            if (cachePort->waitingOnRetry) {
290                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
291            }
292
293            cachePort->cache->sendResult(pkt, mshr, success);
294            if (success && cachePort->cache->doMasterRequest())
295            {
296                DPRINTF(CachePort, "%s still more MSHR requests to send\n",
297                        cachePort->name());
298                //Still more to issue, rerequest in 1 cycle
299                pkt = NULL;
300                this->schedule(curTick+1);
301            }
302        }
303        else
304        {
305            //CSHR
306            assert(cachePort->cache->doSlaveRequest());
307            pkt = cachePort->cache->getCoherencePacket();
308            MSHR* cshr = (MSHR*) pkt->senderState;
309            bool success = cachePort->sendTiming(pkt);
310            cachePort->cache->sendCoherenceResult(pkt, cshr, success);
311            cachePort->waitingOnRetry = !success;
312            if (cachePort->waitingOnRetry)
313                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
314            if (success && cachePort->cache->doSlaveRequest())
315            {
316                DPRINTF(CachePort, "%s still more CSHR requests to send\n",
317                        cachePort->name());
318                //Still more to issue, rerequest in 1 cycle
319                pkt = NULL;
320                this->schedule(curTick+1);
321            }
322        }
323        return;
324    }
325    //Response
326    //Know the packet to send
327    if (pkt->flags & NACKED_LINE)
328        pkt->result = Packet::Nacked;
329    else
330        pkt->result = Packet::Success;
331    pkt->makeTimingResponse();
332    DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
333    if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
334        //Already have a list, just append
335        cachePort->drainList.push_back(pkt);
336        DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
337    }
338    else if (!cachePort->sendTiming(pkt)) {
339        //It failed, save it to list of drain events
340        DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
341        cachePort->drainList.push_back(pkt);
342        cachePort->waitingOnRetry = true;
343    }
344
345    // Check if we're done draining once this list is empty
346    if (cachePort->drainList.empty())
347        cachePort->cache->checkDrain();
348}
349
350const char *
351BaseCache::CacheEvent::description()
352{
353    return "timing event\n";
354}
355
356Port*
357BaseCache::getPort(const std::string &if_name, int idx)
358{
359    if (if_name == "")
360    {
361        if(cpuSidePort == NULL)
362            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
363        return cpuSidePort;
364    }
365    else if (if_name == "functional")
366    {
367        return new CachePort(name() + "-cpu_side_port", this, true);
368    }
369    else if (if_name == "cpu_side")
370    {
371        if(cpuSidePort == NULL)
372            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
373        return cpuSidePort;
374    }
375    else if (if_name == "mem_side")
376    {
377        if (memSidePort != NULL)
378            panic("Already have a mem side for this cache\n");
379        memSidePort = new CachePort(name() + "-mem_side_port", this, false);
380        return memSidePort;
381    }
382    else panic("Port name %s unrecognized\n", if_name);
383}
384
385void
386BaseCache::init()
387{
388    if (!cpuSidePort || !memSidePort)
389        panic("Cache not hooked up on both sides\n");
390    cpuSidePort->sendStatusChange(Port::RangeChange);
391}
392
393void
394BaseCache::regStats()
395{
396    Request temp_req((Addr) NULL, 4, 0);
397    Packet::Command temp_cmd = Packet::ReadReq;
398    Packet temp_pkt(&temp_req, temp_cmd, 0);  //@todo FIx command strings so this isn't neccessary
399    temp_pkt.allocate(); //Temp allocate, all need data
400
401    using namespace Stats;
402
403    // Hit statistics
404    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
405        Packet::Command cmd = (Packet::Command)access_idx;
406        const string &cstr = temp_pkt.cmdIdxToString(cmd);
407
408        hits[access_idx]
409            .init(maxThreadsPerCPU)
410            .name(name() + "." + cstr + "_hits")
411            .desc("number of " + cstr + " hits")
412            .flags(total | nozero | nonan)
413            ;
414    }
415
416    demandHits
417        .name(name() + ".demand_hits")
418        .desc("number of demand (read+write) hits")
419        .flags(total)
420        ;
421    demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
422
423    overallHits
424        .name(name() + ".overall_hits")
425        .desc("number of overall hits")
426        .flags(total)
427        ;
428    overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
429        + hits[Packet::Writeback];
430
431    // Miss statistics
432    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
433        Packet::Command cmd = (Packet::Command)access_idx;
434        const string &cstr = temp_pkt.cmdIdxToString(cmd);
435
436        misses[access_idx]
437            .init(maxThreadsPerCPU)
438            .name(name() + "." + cstr + "_misses")
439            .desc("number of " + cstr + " misses")
440            .flags(total | nozero | nonan)
441            ;
442    }
443
444    demandMisses
445        .name(name() + ".demand_misses")
446        .desc("number of demand (read+write) misses")
447        .flags(total)
448        ;
449    demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
450
451    overallMisses
452        .name(name() + ".overall_misses")
453        .desc("number of overall misses")
454        .flags(total)
455        ;
456    overallMisses = demandMisses + misses[Packet::SoftPFReq] +
457        misses[Packet::HardPFReq] + misses[Packet::Writeback];
458
459    // Miss latency statistics
460    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
461        Packet::Command cmd = (Packet::Command)access_idx;
462        const string &cstr = temp_pkt.cmdIdxToString(cmd);
463
464        missLatency[access_idx]
465            .init(maxThreadsPerCPU)
466            .name(name() + "." + cstr + "_miss_latency")
467            .desc("number of " + cstr + " miss cycles")
468            .flags(total | nozero | nonan)
469            ;
470    }
471
472    demandMissLatency
473        .name(name() + ".demand_miss_latency")
474        .desc("number of demand (read+write) miss cycles")
475        .flags(total)
476        ;
477    demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
478
479    overallMissLatency
480        .name(name() + ".overall_miss_latency")
481        .desc("number of overall miss cycles")
482        .flags(total)
483        ;
484    overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
485        missLatency[Packet::HardPFReq];
486
487    // access formulas
488    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
489        Packet::Command cmd = (Packet::Command)access_idx;
490        const string &cstr = temp_pkt.cmdIdxToString(cmd);
491
492        accesses[access_idx]
493            .name(name() + "." + cstr + "_accesses")
494            .desc("number of " + cstr + " accesses(hits+misses)")
495            .flags(total | nozero | nonan)
496            ;
497
498        accesses[access_idx] = hits[access_idx] + misses[access_idx];
499    }
500
501    demandAccesses
502        .name(name() + ".demand_accesses")
503        .desc("number of demand (read+write) accesses")
504        .flags(total)
505        ;
506    demandAccesses = demandHits + demandMisses;
507
508    overallAccesses
509        .name(name() + ".overall_accesses")
510        .desc("number of overall (read+write) accesses")
511        .flags(total)
512        ;
513    overallAccesses = overallHits + overallMisses;
514
515    // miss rate formulas
516    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
517        Packet::Command cmd = (Packet::Command)access_idx;
518        const string &cstr = temp_pkt.cmdIdxToString(cmd);
519
520        missRate[access_idx]
521            .name(name() + "." + cstr + "_miss_rate")
522            .desc("miss rate for " + cstr + " accesses")
523            .flags(total | nozero | nonan)
524            ;
525
526        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
527    }
528
529    demandMissRate
530        .name(name() + ".demand_miss_rate")
531        .desc("miss rate for demand accesses")
532        .flags(total)
533        ;
534    demandMissRate = demandMisses / demandAccesses;
535
536    overallMissRate
537        .name(name() + ".overall_miss_rate")
538        .desc("miss rate for overall accesses")
539        .flags(total)
540        ;
541    overallMissRate = overallMisses / overallAccesses;
542
543    // miss latency formulas
544    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
545        Packet::Command cmd = (Packet::Command)access_idx;
546        const string &cstr = temp_pkt.cmdIdxToString(cmd);
547
548        avgMissLatency[access_idx]
549            .name(name() + "." + cstr + "_avg_miss_latency")
550            .desc("average " + cstr + " miss latency")
551            .flags(total | nozero | nonan)
552            ;
553
554        avgMissLatency[access_idx] =
555            missLatency[access_idx] / misses[access_idx];
556    }
557
558    demandAvgMissLatency
559        .name(name() + ".demand_avg_miss_latency")
560        .desc("average overall miss latency")
561        .flags(total)
562        ;
563    demandAvgMissLatency = demandMissLatency / demandMisses;
564
565    overallAvgMissLatency
566        .name(name() + ".overall_avg_miss_latency")
567        .desc("average overall miss latency")
568        .flags(total)
569        ;
570    overallAvgMissLatency = overallMissLatency / overallMisses;
571
572    blocked_cycles.init(NUM_BLOCKED_CAUSES);
573    blocked_cycles
574        .name(name() + ".blocked_cycles")
575        .desc("number of cycles access was blocked")
576        .subname(Blocked_NoMSHRs, "no_mshrs")
577        .subname(Blocked_NoTargets, "no_targets")
578        ;
579
580
581    blocked_causes.init(NUM_BLOCKED_CAUSES);
582    blocked_causes
583        .name(name() + ".blocked")
584        .desc("number of cycles access was blocked")
585        .subname(Blocked_NoMSHRs, "no_mshrs")
586        .subname(Blocked_NoTargets, "no_targets")
587        ;
588
589    avg_blocked
590        .name(name() + ".avg_blocked_cycles")
591        .desc("average number of cycles each access was blocked")
592        .subname(Blocked_NoMSHRs, "no_mshrs")
593        .subname(Blocked_NoTargets, "no_targets")
594        ;
595
596    avg_blocked = blocked_cycles / blocked_causes;
597
598    fastWrites
599        .name(name() + ".fast_writes")
600        .desc("number of fast writes performed")
601        ;
602
603    cacheCopies
604        .name(name() + ".cache_copies")
605        .desc("number of cache copies performed")
606        ;
607
608}
609
610unsigned int
611BaseCache::drain(Event *de)
612{
613    // Set status
614    if (!canDrain()) {
615        drainEvent = de;
616
617        changeState(SimObject::Draining);
618        return 1;
619    }
620
621    changeState(SimObject::Drained);
622    return 0;
623}
624