base.cc revision 3236
1/*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31/**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36#include "mem/cache/base_cache.hh"
37#include "cpu/smt.hh"
38#include "cpu/base.hh"
39
40using namespace std;
41
42BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
43                                bool _isCpuSide)
44    : Port(_name), cache(_cache), isCpuSide(_isCpuSide)
45{
46    blocked = false;
47    cshrRetry = NULL;
48    waitingOnRetry = false;
49    //Start ports at null if more than one is created we should panic
50    //cpuSidePort = NULL;
51    //memSidePort = NULL;
52}
53
54void
55BaseCache::CachePort::recvStatusChange(Port::Status status)
56{
57    cache->recvStatusChange(status, isCpuSide);
58}
59
60void
61BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
62                                       AddrRangeList &snoop)
63{
64    cache->getAddressRanges(resp, snoop, isCpuSide);
65}
66
67int
68BaseCache::CachePort::deviceBlockSize()
69{
70    return cache->getBlockSize();
71}
72
73bool
74BaseCache::CachePort::recvTiming(Packet *pkt)
75{
76    if (isCpuSide
77        && !pkt->req->isUncacheable()
78        && pkt->isInvalidate()
79        && !pkt->isRead() && !pkt->isWrite()) {
80        //Upgrade or Invalidate
81        //Look into what happens if two slave caches on bus
82        DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
83                pkt->getAddr() & (((ULL(1))<<48)-1),
84                pkt->getAddr() & ~((Addr)cache->blkSize - 1));
85
86        assert(!(pkt->flags & SATISFIED));
87        pkt->flags |= SATISFIED;
88        //Invalidates/Upgrades need no response if they get the bus
89        return true;
90    }
91
92    if (pkt->isRequest() && blocked)
93    {
94        DPRINTF(Cache,"Scheduling a retry while blocked\n");
95        mustSendRetry = true;
96        return false;
97    }
98    return cache->doTimingAccess(pkt, this, isCpuSide);
99}
100
101Tick
102BaseCache::CachePort::recvAtomic(Packet *pkt)
103{
104    return cache->doAtomicAccess(pkt, isCpuSide);
105}
106
107void
108BaseCache::CachePort::recvFunctional(Packet *pkt)
109{
110    cache->doFunctionalAccess(pkt, isCpuSide);
111}
112
113void
114BaseCache::CachePort::recvRetry()
115{
116    Packet *pkt;
117    assert(waitingOnRetry);
118    if (!drainList.empty()) {
119        //We have some responses to drain first
120        if (sendTiming(drainList.front())) {
121            drainList.pop_front();
122            if (!drainList.empty() ||
123                !isCpuSide && cache->doMasterRequest() ||
124                isCpuSide && cache->doSlaveRequest()) {
125                BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
126                reqCpu->schedule(curTick + 1);
127            }
128            waitingOnRetry = false;
129        }
130    }
131    else if (!isCpuSide)
132    {
133        assert(cache->doMasterRequest());
134        pkt = cache->getPacket();
135        MSHR* mshr = (MSHR*)pkt->senderState;
136        bool success = sendTiming(pkt);
137        DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
138                pkt->getAddr(), success ? "succesful" : "unsuccesful");
139        cache->sendResult(pkt, mshr, success);
140        waitingOnRetry = !success;
141        if (success && cache->doMasterRequest())
142        {
143            //Still more to issue, rerequest in 1 cycle
144            pkt = NULL;
145            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
146            reqCpu->schedule(curTick + 1);
147        }
148    }
149    else
150    {
151        assert(cshrRetry);
152        //pkt = cache->getCoherencePacket();
153        //We save the packet, no reordering on CSHRS
154        pkt = cshrRetry;
155        bool success = sendTiming(pkt);
156        waitingOnRetry = !success;
157        if (success && cache->doSlaveRequest())
158        {
159            //Still more to issue, rerequest in 1 cycle
160            pkt = NULL;
161            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
162            reqCpu->schedule(curTick + 1);
163            cshrRetry = NULL;
164        }
165    }
166    return;
167}
168void
169BaseCache::CachePort::setBlocked()
170{
171    assert(!blocked);
172    DPRINTF(Cache, "Cache Blocking\n");
173    blocked = true;
174    //Clear the retry flag
175    mustSendRetry = false;
176}
177
178void
179BaseCache::CachePort::clearBlocked()
180{
181    assert(blocked);
182    DPRINTF(Cache, "Cache Unblocking\n");
183    blocked = false;
184    if (mustSendRetry)
185    {
186        DPRINTF(Cache, "Cache Sending Retry\n");
187        mustSendRetry = false;
188        sendRetry();
189    }
190}
191
192BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
193    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
194{
195    this->setFlags(AutoDelete);
196    pkt = NULL;
197}
198
199BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, Packet *_pkt)
200    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
201{
202    this->setFlags(AutoDelete);
203}
204
205void
206BaseCache::CacheEvent::process()
207{
208    if (!pkt)
209    {
210        if (cachePort->waitingOnRetry) return;
211       //We have some responses to drain first
212        if (!cachePort->drainList.empty()) {
213            if (cachePort->sendTiming(cachePort->drainList.front())) {
214                cachePort->drainList.pop_front();
215                if (!cachePort->drainList.empty() ||
216                    !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
217                    cachePort->isCpuSide && cachePort->cache->doSlaveRequest())
218                    this->schedule(curTick + 1);
219            }
220            else cachePort->waitingOnRetry = true;
221        }
222        else if (!cachePort->isCpuSide)
223        {
224            //MSHR
225            pkt = cachePort->cache->getPacket();
226            MSHR* mshr = (MSHR*) pkt->senderState;
227            bool success = cachePort->sendTiming(pkt);
228            DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
229                    pkt->getAddr(), success ? "succesful" : "unsuccesful");
230            cachePort->cache->sendResult(pkt, mshr, success);
231            cachePort->waitingOnRetry = !success;
232            if (success && cachePort->cache->doMasterRequest())
233            {
234                //Still more to issue, rerequest in 1 cycle
235                pkt = NULL;
236                this->schedule(curTick+1);
237            }
238        }
239        else
240        {
241            //CSHR
242            pkt = cachePort->cache->getCoherencePacket();
243            bool success = cachePort->sendTiming(pkt);
244            if (!success) {
245                //Need to send on a retry
246                cachePort->cshrRetry = pkt;
247                cachePort->waitingOnRetry = true;
248            }
249            else if (cachePort->cache->doSlaveRequest())
250            {
251                //Still more to issue, rerequest in 1 cycle
252                pkt = NULL;
253                this->schedule(curTick+1);
254            }
255        }
256        return;
257    }
258    //Response
259    //Know the packet to send
260    if (pkt->flags & NACKED_LINE)
261        pkt->result = Packet::Nacked;
262    else
263        pkt->result = Packet::Success;
264    pkt->makeTimingResponse();
265    if (!cachePort->drainList.empty()) {
266        //Already have a list, just append
267        cachePort->drainList.push_back(pkt);
268    }
269    else if (!cachePort->sendTiming(pkt)) {
270        //It failed, save it to list of drain events
271        cachePort->drainList.push_back(pkt);
272        cachePort->waitingOnRetry = true;
273    }
274}
275
276const char *
277BaseCache::CacheEvent::description()
278{
279    return "timing event\n";
280}
281
282Port*
283BaseCache::getPort(const std::string &if_name, int idx)
284{
285    if (if_name == "")
286    {
287        if(cpuSidePort == NULL)
288            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
289        return cpuSidePort;
290    }
291    else if (if_name == "functional")
292    {
293        if(cpuSidePort == NULL)
294            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
295        return cpuSidePort;
296    }
297    else if (if_name == "cpu_side")
298    {
299        if(cpuSidePort == NULL)
300            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
301        return cpuSidePort;
302    }
303    else if (if_name == "mem_side")
304    {
305        if (memSidePort != NULL)
306            panic("Already have a mem side for this cache\n");
307        memSidePort = new CachePort(name() + "-mem_side_port", this, false);
308        return memSidePort;
309    }
310    else panic("Port name %s unrecognized\n", if_name);
311}
312
313void
314BaseCache::init()
315{
316    if (!cpuSidePort || !memSidePort)
317        panic("Cache not hooked up on both sides\n");
318    cpuSidePort->sendStatusChange(Port::RangeChange);
319}
320
321void
322BaseCache::regStats()
323{
324    Request temp_req((Addr) NULL, 4, 0);
325    Packet::Command temp_cmd = Packet::ReadReq;
326    Packet temp_pkt(&temp_req, temp_cmd, 0);  //@todo FIx command strings so this isn't neccessary
327    temp_pkt.allocate(); //Temp allocate, all need data
328
329    using namespace Stats;
330
331    // Hit statistics
332    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
333        Packet::Command cmd = (Packet::Command)access_idx;
334        const string &cstr = temp_pkt.cmdIdxToString(cmd);
335
336        hits[access_idx]
337            .init(maxThreadsPerCPU)
338            .name(name() + "." + cstr + "_hits")
339            .desc("number of " + cstr + " hits")
340            .flags(total | nozero | nonan)
341            ;
342    }
343
344    demandHits
345        .name(name() + ".demand_hits")
346        .desc("number of demand (read+write) hits")
347        .flags(total)
348        ;
349    demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
350
351    overallHits
352        .name(name() + ".overall_hits")
353        .desc("number of overall hits")
354        .flags(total)
355        ;
356    overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
357        + hits[Packet::Writeback];
358
359    // Miss statistics
360    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
361        Packet::Command cmd = (Packet::Command)access_idx;
362        const string &cstr = temp_pkt.cmdIdxToString(cmd);
363
364        misses[access_idx]
365            .init(maxThreadsPerCPU)
366            .name(name() + "." + cstr + "_misses")
367            .desc("number of " + cstr + " misses")
368            .flags(total | nozero | nonan)
369            ;
370    }
371
372    demandMisses
373        .name(name() + ".demand_misses")
374        .desc("number of demand (read+write) misses")
375        .flags(total)
376        ;
377    demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
378
379    overallMisses
380        .name(name() + ".overall_misses")
381        .desc("number of overall misses")
382        .flags(total)
383        ;
384    overallMisses = demandMisses + misses[Packet::SoftPFReq] +
385        misses[Packet::HardPFReq] + misses[Packet::Writeback];
386
387    // Miss latency statistics
388    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
389        Packet::Command cmd = (Packet::Command)access_idx;
390        const string &cstr = temp_pkt.cmdIdxToString(cmd);
391
392        missLatency[access_idx]
393            .init(maxThreadsPerCPU)
394            .name(name() + "." + cstr + "_miss_latency")
395            .desc("number of " + cstr + " miss cycles")
396            .flags(total | nozero | nonan)
397            ;
398    }
399
400    demandMissLatency
401        .name(name() + ".demand_miss_latency")
402        .desc("number of demand (read+write) miss cycles")
403        .flags(total)
404        ;
405    demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
406
407    overallMissLatency
408        .name(name() + ".overall_miss_latency")
409        .desc("number of overall miss cycles")
410        .flags(total)
411        ;
412    overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
413        missLatency[Packet::HardPFReq];
414
415    // access formulas
416    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
417        Packet::Command cmd = (Packet::Command)access_idx;
418        const string &cstr = temp_pkt.cmdIdxToString(cmd);
419
420        accesses[access_idx]
421            .name(name() + "." + cstr + "_accesses")
422            .desc("number of " + cstr + " accesses(hits+misses)")
423            .flags(total | nozero | nonan)
424            ;
425
426        accesses[access_idx] = hits[access_idx] + misses[access_idx];
427    }
428
429    demandAccesses
430        .name(name() + ".demand_accesses")
431        .desc("number of demand (read+write) accesses")
432        .flags(total)
433        ;
434    demandAccesses = demandHits + demandMisses;
435
436    overallAccesses
437        .name(name() + ".overall_accesses")
438        .desc("number of overall (read+write) accesses")
439        .flags(total)
440        ;
441    overallAccesses = overallHits + overallMisses;
442
443    // miss rate formulas
444    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
445        Packet::Command cmd = (Packet::Command)access_idx;
446        const string &cstr = temp_pkt.cmdIdxToString(cmd);
447
448        missRate[access_idx]
449            .name(name() + "." + cstr + "_miss_rate")
450            .desc("miss rate for " + cstr + " accesses")
451            .flags(total | nozero | nonan)
452            ;
453
454        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
455    }
456
457    demandMissRate
458        .name(name() + ".demand_miss_rate")
459        .desc("miss rate for demand accesses")
460        .flags(total)
461        ;
462    demandMissRate = demandMisses / demandAccesses;
463
464    overallMissRate
465        .name(name() + ".overall_miss_rate")
466        .desc("miss rate for overall accesses")
467        .flags(total)
468        ;
469    overallMissRate = overallMisses / overallAccesses;
470
471    // miss latency formulas
472    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
473        Packet::Command cmd = (Packet::Command)access_idx;
474        const string &cstr = temp_pkt.cmdIdxToString(cmd);
475
476        avgMissLatency[access_idx]
477            .name(name() + "." + cstr + "_avg_miss_latency")
478            .desc("average " + cstr + " miss latency")
479            .flags(total | nozero | nonan)
480            ;
481
482        avgMissLatency[access_idx] =
483            missLatency[access_idx] / misses[access_idx];
484    }
485
486    demandAvgMissLatency
487        .name(name() + ".demand_avg_miss_latency")
488        .desc("average overall miss latency")
489        .flags(total)
490        ;
491    demandAvgMissLatency = demandMissLatency / demandMisses;
492
493    overallAvgMissLatency
494        .name(name() + ".overall_avg_miss_latency")
495        .desc("average overall miss latency")
496        .flags(total)
497        ;
498    overallAvgMissLatency = overallMissLatency / overallMisses;
499
500    blocked_cycles.init(NUM_BLOCKED_CAUSES);
501    blocked_cycles
502        .name(name() + ".blocked_cycles")
503        .desc("number of cycles access was blocked")
504        .subname(Blocked_NoMSHRs, "no_mshrs")
505        .subname(Blocked_NoTargets, "no_targets")
506        ;
507
508
509    blocked_causes.init(NUM_BLOCKED_CAUSES);
510    blocked_causes
511        .name(name() + ".blocked")
512        .desc("number of cycles access was blocked")
513        .subname(Blocked_NoMSHRs, "no_mshrs")
514        .subname(Blocked_NoTargets, "no_targets")
515        ;
516
517    avg_blocked
518        .name(name() + ".avg_blocked_cycles")
519        .desc("average number of cycles each access was blocked")
520        .subname(Blocked_NoMSHRs, "no_mshrs")
521        .subname(Blocked_NoTargets, "no_targets")
522        ;
523
524    avg_blocked = blocked_cycles / blocked_causes;
525
526    fastWrites
527        .name(name() + ".fast_writes")
528        .desc("number of fast writes performed")
529        ;
530
531    cacheCopies
532        .name(name() + ".cache_copies")
533        .desc("number of cache copies performed")
534        ;
535
536}
537