dram_ctrl.cc revision 9727
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 *          Ani Udipi
39 */
40
41#include "base/trace.hh"
42#include "debug/Drain.hh"
43#include "debug/DRAM.hh"
44#include "debug/DRAMWR.hh"
45#include "mem/simple_dram.hh"
46
47using namespace std;
48
49SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
50    AbstractMemory(p),
51    port(name() + ".port", *this),
52    retryRdReq(false), retryWrReq(false),
53    rowHitFlag(false), stopReads(false), actTicks(p->activation_limit, 0),
54    writeEvent(this), respondEvent(this),
55    refreshEvent(this), nextReqEvent(this), drainManager(NULL),
56    bytesPerCacheLine(0),
57    linesPerRowBuffer(p->lines_per_rowbuffer),
58    ranksPerChannel(p->ranks_per_channel),
59    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
60    readBufferSize(p->read_buffer_size),
61    writeBufferSize(p->write_buffer_size),
62    writeThresholdPerc(p->write_thresh_perc),
63    tWTR(p->tWTR), tBURST(p->tBURST),
64    tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP),
65    tRFC(p->tRFC), tREFI(p->tREFI),
66    tXAW(p->tXAW), activationLimit(p->activation_limit),
67    memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
68    pageMgmt(p->page_policy),
69    frontendLatency(p->static_frontend_latency),
70    backendLatency(p->static_backend_latency),
71    busBusyUntil(0), writeStartTime(0),
72    prevArrival(0), numReqs(0)
73{
74    // create the bank states based on the dimensions of the ranks and
75    // banks
76    banks.resize(ranksPerChannel);
77    for (size_t c = 0; c < ranksPerChannel; ++c) {
78        banks[c].resize(banksPerRank);
79    }
80
81    // round the write threshold percent to a whole number of entries
82    // in the buffer
83    writeThreshold = writeBufferSize * writeThresholdPerc / 100.0;
84}
85
86void
87SimpleDRAM::init()
88{
89    if (!port.isConnected()) {
90        fatal("SimpleDRAM %s is unconnected!\n", name());
91    } else {
92        port.sendRangeChange();
93    }
94
95    // get the burst size from the connected port as it is currently
96    // assumed to be equal to the cache line size
97    bytesPerCacheLine = port.peerBlockSize();
98
99    // we could deal with plenty options here, but for now do a quick
100    // sanity check
101    if (bytesPerCacheLine != 64 && bytesPerCacheLine != 32)
102        panic("Unexpected burst size %d", bytesPerCacheLine);
103
104    // determine the rows per bank by looking at the total capacity
105    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
106
107    DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
108            AbstractMemory::size());
109    rowsPerBank = capacity / (bytesPerCacheLine * linesPerRowBuffer *
110                              banksPerRank * ranksPerChannel);
111
112    if (range.interleaved()) {
113        if (channels != range.stripes())
114            panic("%s has %d interleaved address stripes but %d channel(s)\n",
115                  name(), range.stripes(), channels);
116
117        if (addrMapping == Enums::RaBaChCo) {
118            if (bytesPerCacheLine * linesPerRowBuffer !=
119                range.granularity()) {
120                panic("Interleaving of %s doesn't match RaBaChCo address map\n",
121                      name());
122            }
123        } else if (addrMapping == Enums::RaBaCoCh) {
124            if (bytesPerCacheLine != range.granularity()) {
125                panic("Interleaving of %s doesn't match RaBaCoCh address map\n",
126                      name());
127            }
128        } else if (addrMapping == Enums::CoRaBaCh) {
129            if (bytesPerCacheLine != range.granularity())
130                panic("Interleaving of %s doesn't match CoRaBaCh address map\n",
131                      name());
132        }
133    }
134}
135
136void
137SimpleDRAM::startup()
138{
139    // print the configuration of the controller
140    printParams();
141
142    // kick off the refresh
143    schedule(refreshEvent, curTick() + tREFI);
144}
145
146Tick
147SimpleDRAM::recvAtomic(PacketPtr pkt)
148{
149    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
150
151    // do the actual memory access and turn the packet into a response
152    access(pkt);
153
154    Tick latency = 0;
155    if (!pkt->memInhibitAsserted() && pkt->hasData()) {
156        // this value is not supposed to be accurate, just enough to
157        // keep things going, mimic a closed page
158        latency = tRP + tRCD + tCL;
159    }
160    return latency;
161}
162
163bool
164SimpleDRAM::readQueueFull() const
165{
166    DPRINTF(DRAM, "Read queue limit %d current size %d\n",
167            readBufferSize, readQueue.size() + respQueue.size());
168
169    return (readQueue.size() + respQueue.size()) == readBufferSize;
170}
171
172bool
173SimpleDRAM::writeQueueFull() const
174{
175    DPRINTF(DRAM, "Write queue limit %d current size %d\n",
176            writeBufferSize, writeQueue.size());
177    return writeQueue.size() == writeBufferSize;
178}
179
180SimpleDRAM::DRAMPacket*
181SimpleDRAM::decodeAddr(PacketPtr pkt)
182{
183    // decode the address based on the address mapping scheme, with
184    // Ra, Co, Ba and Ch denoting rank, column, bank and channel,
185    // respectively
186    uint8_t rank;
187    uint16_t bank;
188    uint16_t row;
189
190    Addr addr = pkt->getAddr();
191
192    // truncate the address to the access granularity
193    addr = addr / bytesPerCacheLine;
194
195    // we have removed the lowest order address bits that denote the
196    // position within the cache line
197    if (addrMapping == Enums::RaBaChCo) {
198        // the lowest order bits denote the column to ensure that
199        // sequential cache lines occupy the same row
200        addr = addr / linesPerRowBuffer;
201
202        // take out the channel part of the address
203        addr = addr / channels;
204
205        // after the channel bits, get the bank bits to interleave
206        // over the banks
207        bank = addr % banksPerRank;
208        addr = addr / banksPerRank;
209
210        // after the bank, we get the rank bits which thus interleaves
211        // over the ranks
212        rank = addr % ranksPerChannel;
213        addr = addr / ranksPerChannel;
214
215        // lastly, get the row bits
216        row = addr % rowsPerBank;
217        addr = addr / rowsPerBank;
218    } else if (addrMapping == Enums::RaBaCoCh) {
219        // take out the channel part of the address
220        addr = addr / channels;
221
222        // next, the column
223        addr = addr / linesPerRowBuffer;
224
225        // after the column bits, we get the bank bits to interleave
226        // over the banks
227        bank = addr % banksPerRank;
228        addr = addr / banksPerRank;
229
230        // after the bank, we get the rank bits which thus interleaves
231        // over the ranks
232        rank = addr % ranksPerChannel;
233        addr = addr / ranksPerChannel;
234
235        // lastly, get the row bits
236        row = addr % rowsPerBank;
237        addr = addr / rowsPerBank;
238    } else if (addrMapping == Enums::CoRaBaCh) {
239        // optimise for closed page mode and utilise maximum
240        // parallelism of the DRAM (at the cost of power)
241
242        // take out the channel part of the address, not that this has
243        // to match with how accesses are interleaved between the
244        // controllers in the address mapping
245        addr = addr / channels;
246
247        // start with the bank bits, as this provides the maximum
248        // opportunity for parallelism between requests
249        bank = addr % banksPerRank;
250        addr = addr / banksPerRank;
251
252        // next get the rank bits
253        rank = addr % ranksPerChannel;
254        addr = addr / ranksPerChannel;
255
256        // next the column bits which we do not need to keep track of
257        // and simply skip past
258        addr = addr / linesPerRowBuffer;
259
260        // lastly, get the row bits
261        row = addr % rowsPerBank;
262        addr = addr / rowsPerBank;
263    } else
264        panic("Unknown address mapping policy chosen!");
265
266    assert(rank < ranksPerChannel);
267    assert(bank < banksPerRank);
268    assert(row < rowsPerBank);
269
270    DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
271            pkt->getAddr(), rank, bank, row);
272
273    // create the corresponding DRAM packet with the entry time and
274    // ready time set to the current tick, the latter will be updated
275    // later
276    return new DRAMPacket(pkt, rank, bank, row, pkt->getAddr(),
277                          banks[rank][bank]);
278}
279
280void
281SimpleDRAM::addToReadQueue(PacketPtr pkt)
282{
283    // only add to the read queue here. whenever the request is
284    // eventually done, set the readyTime, and call schedule()
285    assert(!pkt->isWrite());
286
287    // First check write buffer to see if the data is already at
288    // the controller
289    list<DRAMPacket*>::const_iterator i;
290    Addr addr = pkt->getAddr();
291
292    // @todo: add size check
293    for (i = writeQueue.begin(); i != writeQueue.end(); ++i) {
294        if ((*i)->addr == addr){
295            servicedByWrQ++;
296            DPRINTF(DRAM, "Read to %lld serviced by write queue\n", addr);
297            bytesRead += bytesPerCacheLine;
298            bytesConsumedRd += pkt->getSize();
299            accessAndRespond(pkt, frontendLatency);
300            return;
301        }
302    }
303
304    DRAMPacket* dram_pkt = decodeAddr(pkt);
305
306    assert(readQueue.size() + respQueue.size() < readBufferSize);
307    rdQLenPdf[readQueue.size() + respQueue.size()]++;
308
309    DPRINTF(DRAM, "Adding to read queue\n");
310
311    readQueue.push_back(dram_pkt);
312
313    // Update stats
314    uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
315    assert(bank_id < ranksPerChannel * banksPerRank);
316    perBankRdReqs[bank_id]++;
317
318    avgRdQLen = readQueue.size() + respQueue.size();
319
320    // If we are not already scheduled to get the read request out of
321    // the queue, do so now
322    if (!nextReqEvent.scheduled() && !stopReads) {
323        DPRINTF(DRAM, "Request scheduled immediately\n");
324        schedule(nextReqEvent, curTick());
325    }
326}
327
328void
329SimpleDRAM::processWriteEvent()
330{
331    assert(!writeQueue.empty());
332    uint32_t numWritesThisTime = 0;
333
334    DPRINTF(DRAMWR, "Beginning DRAM Writes\n");
335    Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil);
336    Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt());
337
338    // @todo: are there any dangers with the untimed while loop?
339    while (!writeQueue.empty()) {
340        if (numWritesThisTime > writeThreshold) {
341            DPRINTF(DRAMWR, "Hit write threshold %d\n", writeThreshold);
342            break;
343        }
344
345        chooseNextWrite();
346        DRAMPacket* dram_pkt = writeQueue.front();
347        // What's the earliest the request can be put on the bus
348        Tick schedTime = std::max(curTick(), busBusyUntil);
349
350        DPRINTF(DRAMWR, "Asking for latency estimate at %lld\n",
351                schedTime + tBURST);
352
353        pair<Tick, Tick> lat = estimateLatency(dram_pkt, schedTime + tBURST);
354        Tick accessLat = lat.second;
355
356        // look at the rowHitFlag set by estimateLatency
357        if (rowHitFlag)
358            writeRowHits++;
359
360        Bank& bank = dram_pkt->bank_ref;
361
362        if (pageMgmt == Enums::open) {
363            bank.openRow = dram_pkt->row;
364            bank.freeAt = schedTime + tBURST + std::max(accessLat, tCL);
365            busBusyUntil = bank.freeAt - tCL;
366            bank.bytesAccessed += bytesPerCacheLine;
367
368            if (!rowHitFlag) {
369                bank.tRASDoneAt = bank.freeAt + tRP;
370                recordActivate(bank.freeAt - tCL - tRCD);
371                busBusyUntil = bank.freeAt - tCL - tRCD;
372
373                // sample the number of bytes accessed and reset it as
374                // we are now closing this row
375                bytesPerActivate.sample(bank.bytesAccessed);
376                bank.bytesAccessed = 0;
377            }
378        } else if (pageMgmt == Enums::close) {
379            bank.freeAt = schedTime + tBURST + accessLat + tRP + tRP;
380            // Work backwards from bank.freeAt to determine activate time
381            recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD);
382            busBusyUntil = bank.freeAt - tRP - tRP - tCL - tRCD;
383            DPRINTF(DRAMWR, "processWriteEvent::bank.freeAt for "
384                    "banks_id %d is %lld\n",
385                    dram_pkt->rank * banksPerRank + dram_pkt->bank,
386                    bank.freeAt);
387            bytesPerActivate.sample(bytesPerCacheLine);
388        } else
389            panic("Unknown page management policy chosen\n");
390
391        DPRINTF(DRAMWR, "Done writing to address %lld\n", dram_pkt->addr);
392
393        DPRINTF(DRAMWR, "schedtime is %lld, tBURST is %lld, "
394                "busbusyuntil is %lld\n",
395                schedTime, tBURST, busBusyUntil);
396
397        writeQueue.pop_front();
398        delete dram_pkt;
399
400        numWritesThisTime++;
401    }
402
403    DPRINTF(DRAMWR, "Completed %d writes, bus busy for %lld ticks,"\
404            "banks busy for %lld ticks\n", numWritesThisTime,
405            busBusyUntil - temp1, maxBankFreeAt() - temp2);
406
407    // Update stats
408    avgWrQLen = writeQueue.size();
409
410    // turn the bus back around for reads again
411    busBusyUntil += tWTR;
412    stopReads = false;
413
414    if (retryWrReq) {
415        retryWrReq = false;
416        port.sendRetry();
417    }
418
419    // if there is nothing left in any queue, signal a drain
420    if (writeQueue.empty() && readQueue.empty() &&
421        respQueue.empty () && drainManager) {
422        drainManager->signalDrainDone();
423        drainManager = NULL;
424    }
425
426    // Once you're done emptying the write queue, check if there's
427    // anything in the read queue, and call schedule if required. The
428    // retry above could already have caused it to be scheduled, so
429    // first check
430    if (!nextReqEvent.scheduled())
431        schedule(nextReqEvent, busBusyUntil);
432}
433
434void
435SimpleDRAM::triggerWrites()
436{
437    DPRINTF(DRAM, "Writes triggered at %lld\n", curTick());
438    // Flag variable to stop any more read scheduling
439    stopReads = true;
440
441    writeStartTime = std::max(busBusyUntil, curTick()) + tWTR;
442
443    DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime);
444
445    assert(writeStartTime >= curTick());
446    assert(!writeEvent.scheduled());
447    schedule(writeEvent, writeStartTime);
448}
449
450void
451SimpleDRAM::addToWriteQueue(PacketPtr pkt)
452{
453    // only add to the write queue here. whenever the request is
454    // eventually done, set the readyTime, and call schedule()
455    assert(pkt->isWrite());
456
457    DRAMPacket* dram_pkt = decodeAddr(pkt);
458
459    assert(writeQueue.size() < writeBufferSize);
460    wrQLenPdf[writeQueue.size()]++;
461
462    DPRINTF(DRAM, "Adding to write queue\n");
463
464    writeQueue.push_back(dram_pkt);
465
466    // Update stats
467    uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
468    assert(bank_id < ranksPerChannel * banksPerRank);
469    perBankWrReqs[bank_id]++;
470
471    avgWrQLen = writeQueue.size();
472
473    // we do not wait for the writes to be send to the actual memory,
474    // but instead take responsibility for the consistency here and
475    // snoop the write queue for any upcoming reads
476
477    bytesConsumedWr += pkt->getSize();
478    bytesWritten += bytesPerCacheLine;
479    accessAndRespond(pkt, frontendLatency);
480
481    // If your write buffer is starting to fill up, drain it!
482    if (writeQueue.size() > writeThreshold && !stopReads){
483        triggerWrites();
484    }
485}
486
487void
488SimpleDRAM::printParams() const
489{
490    // Sanity check print of important parameters
491    DPRINTF(DRAM,
492            "Memory controller %s physical organization\n"      \
493            "Bytes per cacheline  %d\n"                         \
494            "Lines per row buffer %d\n"                         \
495            "Rows  per bank       %d\n"                         \
496            "Banks per rank       %d\n"                         \
497            "Ranks per channel    %d\n"                         \
498            "Total mem capacity   %u\n",
499            name(), bytesPerCacheLine, linesPerRowBuffer, rowsPerBank,
500            banksPerRank, ranksPerChannel, bytesPerCacheLine *
501            linesPerRowBuffer * rowsPerBank * banksPerRank * ranksPerChannel);
502
503    string scheduler =  memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS";
504    string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" :
505        (addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh");
506    string page_policy = pageMgmt == Enums::open ? "OPEN" : "CLOSE";
507
508    DPRINTF(DRAM,
509            "Memory controller %s characteristics\n"    \
510            "Read buffer size     %d\n"                 \
511            "Write buffer size    %d\n"                 \
512            "Write buffer thresh  %d\n"                 \
513            "Scheduler            %s\n"                 \
514            "Address mapping      %s\n"                 \
515            "Page policy          %s\n",
516            name(), readBufferSize, writeBufferSize, writeThreshold,
517            scheduler, address_mapping, page_policy);
518
519    DPRINTF(DRAM, "Memory controller %s timing specs\n" \
520            "tRCD      %d ticks\n"                        \
521            "tCL       %d ticks\n"                        \
522            "tRP       %d ticks\n"                        \
523            "tBURST    %d ticks\n"                        \
524            "tRFC      %d ticks\n"                        \
525            "tREFI     %d ticks\n"                        \
526            "tWTR      %d ticks\n"                        \
527            "tXAW (%d) %d ticks\n",
528            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR,
529            activationLimit, tXAW);
530}
531
532void
533SimpleDRAM::printQs() const {
534
535    list<DRAMPacket*>::const_iterator i;
536
537    DPRINTF(DRAM, "===READ QUEUE===\n\n");
538    for (i = readQueue.begin() ;  i != readQueue.end() ; ++i) {
539        DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
540    }
541    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
542    for (i = respQueue.begin() ;  i != respQueue.end() ; ++i) {
543        DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
544    }
545    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
546    for (i = writeQueue.begin() ;  i != writeQueue.end() ; ++i) {
547        DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
548    }
549}
550
551bool
552SimpleDRAM::recvTimingReq(PacketPtr pkt)
553{
554    /// @todo temporary hack to deal with memory corruption issues until
555    /// 4-phase transactions are complete
556    for (int x = 0; x < pendingDelete.size(); x++)
557        delete pendingDelete[x];
558    pendingDelete.clear();
559
560    // This is where we enter from the outside world
561    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
562            pkt->cmdString(),pkt->getAddr(), pkt->getSize());
563
564    // simply drop inhibited packets for now
565    if (pkt->memInhibitAsserted()) {
566        DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n");
567        pendingDelete.push_back(pkt);
568        return true;
569    }
570
571   if (pkt->getSize() == bytesPerCacheLine)
572       cpuReqs++;
573
574   // Every million accesses, print the state of the queues
575   if (numReqs % 1000000 == 0)
576       printQs();
577
578    // Calc avg gap between requests
579    if (prevArrival != 0) {
580        totGap += curTick() - prevArrival;
581    }
582    prevArrival = curTick();
583
584    unsigned size = pkt->getSize();
585    if (size > bytesPerCacheLine)
586        panic("Request size %d is greater than burst size %d",
587              size, bytesPerCacheLine);
588
589    // check local buffers and do not accept if full
590    if (pkt->isRead()) {
591        assert(size != 0);
592        if (readQueueFull()) {
593            DPRINTF(DRAM, "Read queue full, not accepting\n");
594            // remember that we have to retry this port
595            retryRdReq = true;
596            numRdRetry++;
597            return false;
598        } else {
599            readPktSize[ceilLog2(size)]++;
600            addToReadQueue(pkt);
601            readReqs++;
602            numReqs++;
603        }
604    } else if (pkt->isWrite()) {
605        assert(size != 0);
606        if (writeQueueFull()) {
607            DPRINTF(DRAM, "Write queue full, not accepting\n");
608            // remember that we have to retry this port
609            retryWrReq = true;
610            numWrRetry++;
611            return false;
612        } else {
613            writePktSize[ceilLog2(size)]++;
614            addToWriteQueue(pkt);
615            writeReqs++;
616            numReqs++;
617        }
618    } else {
619        DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
620        neitherReadNorWrite++;
621        accessAndRespond(pkt, 1);
622    }
623
624    retryRdReq = false;
625    retryWrReq = false;
626    return true;
627}
628
629void
630SimpleDRAM::processRespondEvent()
631{
632    DPRINTF(DRAM,
633            "processRespondEvent(): Some req has reached its readyTime\n");
634
635     PacketPtr pkt = respQueue.front()->pkt;
636
637     // Actually responds to the requestor
638     bytesConsumedRd += pkt->getSize();
639     bytesRead += bytesPerCacheLine;
640     accessAndRespond(pkt, frontendLatency + backendLatency);
641
642     delete respQueue.front();
643     respQueue.pop_front();
644
645     // Update stats
646     avgRdQLen = readQueue.size() + respQueue.size();
647
648     if (!respQueue.empty()) {
649         assert(respQueue.front()->readyTime >= curTick());
650         assert(!respondEvent.scheduled());
651         schedule(respondEvent, respQueue.front()->readyTime);
652     } else {
653         // if there is nothing left in any queue, signal a drain
654         if (writeQueue.empty() && readQueue.empty() &&
655             drainManager) {
656             drainManager->signalDrainDone();
657             drainManager = NULL;
658         }
659     }
660
661     // We have made a location in the queue available at this point,
662     // so if there is a read that was forced to wait, retry now
663     if (retryRdReq) {
664         retryRdReq = false;
665         port.sendRetry();
666     }
667}
668
669void
670SimpleDRAM::chooseNextWrite()
671{
672    // This method does the arbitration between write requests. The
673    // chosen packet is simply moved to the head of the write
674    // queue. The other methods know that this is the place to
675    // look. For example, with FCFS, this method does nothing
676    assert(!writeQueue.empty());
677
678    if (writeQueue.size() == 1) {
679        DPRINTF(DRAMWR, "Single write request, nothing to do\n");
680        return;
681    }
682
683    if (memSchedPolicy == Enums::fcfs) {
684        // Do nothing, since the correct request is already head
685    } else if (memSchedPolicy == Enums::frfcfs) {
686        list<DRAMPacket*>::iterator i = writeQueue.begin();
687        bool foundRowHit = false;
688        while (!foundRowHit && i != writeQueue.end()) {
689            DRAMPacket* dram_pkt = *i;
690            const Bank& bank = dram_pkt->bank_ref;
691            if (bank.openRow == dram_pkt->row) { //FR part
692                DPRINTF(DRAMWR, "Write row buffer hit\n");
693                writeQueue.erase(i);
694                writeQueue.push_front(dram_pkt);
695                foundRowHit = true;
696            } else { //FCFS part
697                ;
698            }
699            ++i;
700        }
701    } else
702        panic("No scheduling policy chosen\n");
703
704    DPRINTF(DRAMWR, "Selected next write request\n");
705}
706
707bool
708SimpleDRAM::chooseNextRead()
709{
710    // This method does the arbitration between read requests. The
711    // chosen packet is simply moved to the head of the queue. The
712    // other methods know that this is the place to look. For example,
713    // with FCFS, this method does nothing
714    if (readQueue.empty()) {
715        DPRINTF(DRAM, "No read request to select\n");
716        return false;
717    }
718
719    // If there is only one request then there is nothing left to do
720    if (readQueue.size() == 1)
721        return true;
722
723    if (memSchedPolicy == Enums::fcfs) {
724        // Do nothing, since the request to serve is already the first
725        // one in the read queue
726    } else if (memSchedPolicy == Enums::frfcfs) {
727        for (list<DRAMPacket*>::iterator i = readQueue.begin();
728             i != readQueue.end() ; ++i) {
729            DRAMPacket* dram_pkt = *i;
730            const Bank& bank = dram_pkt->bank_ref;
731            // Check if it is a row hit
732            if (bank.openRow == dram_pkt->row) { //FR part
733                DPRINTF(DRAM, "Row buffer hit\n");
734                readQueue.erase(i);
735                readQueue.push_front(dram_pkt);
736                break;
737            } else { //FCFS part
738                ;
739            }
740        }
741    } else
742        panic("No scheduling policy chosen!\n");
743
744    DPRINTF(DRAM, "Selected next read request\n");
745    return true;
746}
747
748void
749SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency)
750{
751    DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
752
753    bool needsResponse = pkt->needsResponse();
754    // do the actual memory access which also turns the packet into a
755    // response
756    access(pkt);
757
758    // turn packet around to go back to requester if response expected
759    if (needsResponse) {
760        // access already turned the packet into a response
761        assert(pkt->isResponse());
762
763        // @todo someone should pay for this
764        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
765
766        // queue the packet in the response queue to be sent out after
767        // the static latency has passed
768        port.schedTimingResp(pkt, curTick() + static_latency);
769    } else {
770        // @todo the packet is going to be deleted, and the DRAMPacket
771        // is still having a pointer to it
772        pendingDelete.push_back(pkt);
773    }
774
775    DPRINTF(DRAM, "Done\n");
776
777    return;
778}
779
780pair<Tick, Tick>
781SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
782{
783    // If a request reaches a bank at tick 'inTime', how much time
784    // *after* that does it take to finish the request, depending
785    // on bank status and page open policy. Note that this method
786    // considers only the time taken for the actual read or write
787    // to complete, NOT any additional time thereafter for tRAS or
788    // tRP.
789    Tick accLat = 0;
790    Tick bankLat = 0;
791    rowHitFlag = false;
792
793    const Bank& bank = dram_pkt->bank_ref;
794    if (pageMgmt == Enums::open) { // open-page policy
795        if (bank.openRow == dram_pkt->row) {
796            // When we have a row-buffer hit,
797            // we don't care about tRAS having expired or not,
798            // but do care about bank being free for access
799            rowHitFlag = true;
800
801            if (bank.freeAt < inTime) {
802               // CAS latency only
803               accLat += tCL;
804               bankLat += tCL;
805            } else {
806                accLat += 0;
807                bankLat += 0;
808            }
809
810        } else {
811            // Row-buffer miss, need to close existing row
812            // once tRAS has expired, then open the new one,
813            // then add cas latency.
814            Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt);
815
816            if (freeTime > inTime)
817               accLat += freeTime - inTime;
818
819            accLat += tRP + tRCD + tCL;
820            bankLat += tRP + tRCD + tCL;
821        }
822    } else if (pageMgmt == Enums::close) {
823        // With a close page policy, no notion of
824        // bank.tRASDoneAt
825        if (bank.freeAt > inTime)
826            accLat += bank.freeAt - inTime;
827
828        // page already closed, simply open the row, and
829        // add cas latency
830        accLat += tRCD + tCL;
831        bankLat += tRCD + tCL;
832    } else
833        panic("No page management policy chosen\n");
834
835    DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n",
836            bankLat, accLat);
837
838    return make_pair(bankLat, accLat);
839}
840
841void
842SimpleDRAM::processNextReqEvent()
843{
844    scheduleNextReq();
845}
846
847void
848SimpleDRAM::recordActivate(Tick act_tick)
849{
850    assert(actTicks.size() == activationLimit);
851
852    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
853
854    // sanity check
855    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
856        panic("Got %d activates in window %d (%d - %d) which is smaller "
857              "than %d\n", activationLimit, act_tick - actTicks.back(),
858              act_tick, actTicks.back(), tXAW);
859    }
860
861    // shift the times used for the book keeping, the last element
862    // (highest index) is the oldest one and hence the lowest value
863    actTicks.pop_back();
864
865    // record an new activation (in the future)
866    actTicks.push_front(act_tick);
867
868    // cannot activate more than X times in time window tXAW, push the
869    // next one (the X + 1'st activate) to be tXAW away from the
870    // oldest in our window of X
871    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
872        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
873                "than %d\n", activationLimit, actTicks.back() + tXAW);
874        for(int i = 0; i < ranksPerChannel; i++)
875            for(int j = 0; j < banksPerRank; j++)
876                // next activate must not happen before end of window
877                banks[i][j].freeAt = std::max(banks[i][j].freeAt,
878                                              actTicks.back() + tXAW);
879    }
880}
881
882void
883SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
884{
885
886    DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
887            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
888
889    // estimate the bank and access latency
890    pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick());
891    Tick bankLat = lat.first;
892    Tick accessLat = lat.second;
893
894    // This request was woken up at this time based on a prior call
895    // to estimateLatency(). However, between then and now, both the
896    // accessLatency and/or busBusyUntil may have changed. We need
897    // to correct for that.
898
899    Tick addDelay = (curTick() + accessLat < busBusyUntil) ?
900        busBusyUntil - (curTick() + accessLat) : 0;
901
902    Bank& bank = dram_pkt->bank_ref;
903
904    // Update bank state
905    if (pageMgmt == Enums::open) {
906        bank.openRow = dram_pkt->row;
907        bank.freeAt = curTick() + addDelay + accessLat;
908        bank.bytesAccessed += bytesPerCacheLine;
909
910        // If you activated a new row do to this access, the next access
911        // will have to respect tRAS for this bank. Assume tRAS ~= 3 * tRP.
912        // Also need to account for t_XAW
913        if (!rowHitFlag) {
914            bank.tRASDoneAt = bank.freeAt + tRP;
915            recordActivate(bank.freeAt - tCL - tRCD); //since this is open page,
916                                                      //no tRP by default
917            // sample the number of bytes accessed and reset it as
918            // we are now closing this row
919            bytesPerActivate.sample(bank.bytesAccessed);
920            bank.bytesAccessed = 0;
921        }
922    } else if (pageMgmt == Enums::close) { // accounting for tRAS also
923        // assuming that tRAS ~= 3 * tRP, and tRC ~= 4 * tRP, as is common
924        // (refer Jacob/Ng/Wang and Micron datasheets)
925        bank.freeAt = curTick() + addDelay + accessLat + tRP + tRP;
926        recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD); //essentially (freeAt - tRC)
927        DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt);
928        bytesPerActivate.sample(bytesPerCacheLine);
929    } else
930        panic("No page management policy chosen\n");
931
932    // Update request parameters
933    dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST;
934
935
936    DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \
937                  "readytime is %lld busbusyuntil is %lld. " \
938                  "Scheduling at readyTime\n", dram_pkt->addr,
939                   curTick(), accessLat, dram_pkt->readyTime, busBusyUntil);
940
941    // Make sure requests are not overlapping on the databus
942    assert (dram_pkt->readyTime - busBusyUntil >= tBURST);
943
944    // Update bus state
945    busBusyUntil = dram_pkt->readyTime;
946
947    DPRINTF(DRAM,"Access time is %lld\n",
948            dram_pkt->readyTime - dram_pkt->entryTime);
949
950    // Update stats
951    totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
952    totBankLat += bankLat;
953    totBusLat += tBURST;
954    totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST;
955
956    if (rowHitFlag)
957        readRowHits++;
958
959    // At this point we're done dealing with the request
960    // It will be moved to a separate response queue with a
961    // correct readyTime, and eventually be sent back at that
962    //time
963    moveToRespQ();
964
965    // The absolute soonest you have to start thinking about the
966    // next request is the longest access time that can occur before
967    // busBusyUntil. Assuming you need to meet tRAS, then precharge,
968    // open a new row, and access, it is ~4*tRCD.
969
970
971    Tick newTime = (busBusyUntil > 4 * tRCD) ?
972                   std::max(busBusyUntil - 4 * tRCD, curTick()) :
973                   curTick();
974
975    if (!nextReqEvent.scheduled() && !stopReads){
976        schedule(nextReqEvent, newTime);
977    } else {
978        if (newTime < nextReqEvent.when())
979            reschedule(nextReqEvent, newTime);
980    }
981
982
983}
984
985void
986SimpleDRAM::moveToRespQ()
987{
988    // Remove from read queue
989    DRAMPacket* dram_pkt = readQueue.front();
990    readQueue.pop_front();
991
992    // Insert into response queue sorted by readyTime
993    // It will be sent back to the requestor at its
994    // readyTime
995    if (respQueue.empty()) {
996        respQueue.push_front(dram_pkt);
997        assert(!respondEvent.scheduled());
998        assert(dram_pkt->readyTime >= curTick());
999        schedule(respondEvent, dram_pkt->readyTime);
1000    } else {
1001        bool done = false;
1002        list<DRAMPacket*>::iterator i = respQueue.begin();
1003        while (!done && i != respQueue.end()) {
1004            if ((*i)->readyTime > dram_pkt->readyTime) {
1005                respQueue.insert(i, dram_pkt);
1006                done = true;
1007            }
1008            ++i;
1009        }
1010
1011        if (!done)
1012            respQueue.push_back(dram_pkt);
1013
1014        assert(respondEvent.scheduled());
1015
1016        if (respQueue.front()->readyTime < respondEvent.when()) {
1017            assert(respQueue.front()->readyTime >= curTick());
1018            reschedule(respondEvent, respQueue.front()->readyTime);
1019        }
1020    }
1021}
1022
1023void
1024SimpleDRAM::scheduleNextReq()
1025{
1026    DPRINTF(DRAM, "Reached scheduleNextReq()\n");
1027
1028    // Figure out which read request goes next, and move it to the
1029    // front of the read queue
1030    if (!chooseNextRead()) {
1031        // In the case there is no read request to go next, see if we
1032        // are asked to drain, and if so trigger writes, this also
1033        // ensures that if we hit the write limit we will do this
1034        // multiple times until we are completely drained
1035        if (drainManager && !writeQueue.empty() && !writeEvent.scheduled())
1036            triggerWrites();
1037    } else {
1038        doDRAMAccess(readQueue.front());
1039    }
1040}
1041
1042Tick
1043SimpleDRAM::maxBankFreeAt() const
1044{
1045    Tick banksFree = 0;
1046
1047    for(int i = 0; i < ranksPerChannel; i++)
1048        for(int j = 0; j < banksPerRank; j++)
1049            banksFree = std::max(banks[i][j].freeAt, banksFree);
1050
1051    return banksFree;
1052}
1053
1054void
1055SimpleDRAM::processRefreshEvent()
1056{
1057    DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick());
1058
1059    Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC;
1060
1061    for(int i = 0; i < ranksPerChannel; i++)
1062        for(int j = 0; j < banksPerRank; j++)
1063            banks[i][j].freeAt = banksFree;
1064
1065    schedule(refreshEvent, curTick() + tREFI);
1066}
1067
1068void
1069SimpleDRAM::regStats()
1070{
1071    using namespace Stats;
1072
1073    AbstractMemory::regStats();
1074
1075    readReqs
1076        .name(name() + ".readReqs")
1077        .desc("Total number of read requests seen");
1078
1079    writeReqs
1080        .name(name() + ".writeReqs")
1081        .desc("Total number of write requests seen");
1082
1083    servicedByWrQ
1084        .name(name() + ".servicedByWrQ")
1085        .desc("Number of read reqs serviced by write Q");
1086
1087    cpuReqs
1088        .name(name() + ".cpureqs")
1089        .desc("Reqs generatd by CPU via cache - shady");
1090
1091    neitherReadNorWrite
1092        .name(name() + ".neitherReadNorWrite")
1093        .desc("Reqs where no action is needed");
1094
1095    perBankRdReqs
1096        .init(banksPerRank * ranksPerChannel)
1097        .name(name() + ".perBankRdReqs")
1098        .desc("Track reads on a per bank basis");
1099
1100    perBankWrReqs
1101        .init(banksPerRank * ranksPerChannel)
1102        .name(name() + ".perBankWrReqs")
1103        .desc("Track writes on a per bank basis");
1104
1105    avgRdQLen
1106        .name(name() + ".avgRdQLen")
1107        .desc("Average read queue length over time")
1108        .precision(2);
1109
1110    avgWrQLen
1111        .name(name() + ".avgWrQLen")
1112        .desc("Average write queue length over time")
1113        .precision(2);
1114
1115    totQLat
1116        .name(name() + ".totQLat")
1117        .desc("Total cycles spent in queuing delays");
1118
1119    totBankLat
1120        .name(name() + ".totBankLat")
1121        .desc("Total cycles spent in bank access");
1122
1123    totBusLat
1124        .name(name() + ".totBusLat")
1125        .desc("Total cycles spent in databus access");
1126
1127    totMemAccLat
1128        .name(name() + ".totMemAccLat")
1129        .desc("Sum of mem lat for all requests");
1130
1131    avgQLat
1132        .name(name() + ".avgQLat")
1133        .desc("Average queueing delay per request")
1134        .precision(2);
1135
1136    avgQLat = totQLat / (readReqs - servicedByWrQ);
1137
1138    avgBankLat
1139        .name(name() + ".avgBankLat")
1140        .desc("Average bank access latency per request")
1141        .precision(2);
1142
1143    avgBankLat = totBankLat / (readReqs - servicedByWrQ);
1144
1145    avgBusLat
1146        .name(name() + ".avgBusLat")
1147        .desc("Average bus latency per request")
1148        .precision(2);
1149
1150    avgBusLat = totBusLat / (readReqs - servicedByWrQ);
1151
1152    avgMemAccLat
1153        .name(name() + ".avgMemAccLat")
1154        .desc("Average memory access latency")
1155        .precision(2);
1156
1157    avgMemAccLat = totMemAccLat / (readReqs - servicedByWrQ);
1158
1159    numRdRetry
1160        .name(name() + ".numRdRetry")
1161        .desc("Number of times rd buffer was full causing retry");
1162
1163    numWrRetry
1164        .name(name() + ".numWrRetry")
1165        .desc("Number of times wr buffer was full causing retry");
1166
1167    readRowHits
1168        .name(name() + ".readRowHits")
1169        .desc("Number of row buffer hits during reads");
1170
1171    writeRowHits
1172        .name(name() + ".writeRowHits")
1173        .desc("Number of row buffer hits during writes");
1174
1175    readRowHitRate
1176        .name(name() + ".readRowHitRate")
1177        .desc("Row buffer hit rate for reads")
1178        .precision(2);
1179
1180    readRowHitRate = (readRowHits / (readReqs - servicedByWrQ)) * 100;
1181
1182    writeRowHitRate
1183        .name(name() + ".writeRowHitRate")
1184        .desc("Row buffer hit rate for writes")
1185        .precision(2);
1186
1187    writeRowHitRate = (writeRowHits / writeReqs) * 100;
1188
1189    readPktSize
1190        .init(ceilLog2(bytesPerCacheLine) + 1)
1191        .name(name() + ".readPktSize")
1192        .desc("Categorize read packet sizes");
1193
1194     writePktSize
1195        .init(ceilLog2(bytesPerCacheLine) + 1)
1196        .name(name() + ".writePktSize")
1197        .desc("Categorize write packet sizes");
1198
1199     rdQLenPdf
1200        .init(readBufferSize)
1201        .name(name() + ".rdQLenPdf")
1202        .desc("What read queue length does an incoming req see");
1203
1204     wrQLenPdf
1205        .init(writeBufferSize)
1206        .name(name() + ".wrQLenPdf")
1207        .desc("What write queue length does an incoming req see");
1208
1209     bytesPerActivate
1210         .init(bytesPerCacheLine * linesPerRowBuffer)
1211         .name(name() + ".bytesPerActivate")
1212         .desc("Bytes accessed per row activation")
1213         .flags(nozero);
1214
1215    bytesRead
1216        .name(name() + ".bytesRead")
1217        .desc("Total number of bytes read from memory");
1218
1219    bytesWritten
1220        .name(name() + ".bytesWritten")
1221        .desc("Total number of bytes written to memory");
1222
1223    bytesConsumedRd
1224        .name(name() + ".bytesConsumedRd")
1225        .desc("bytesRead derated as per pkt->getSize()");
1226
1227    bytesConsumedWr
1228        .name(name() + ".bytesConsumedWr")
1229        .desc("bytesWritten derated as per pkt->getSize()");
1230
1231    avgRdBW
1232        .name(name() + ".avgRdBW")
1233        .desc("Average achieved read bandwidth in MB/s")
1234        .precision(2);
1235
1236    avgRdBW = (bytesRead / 1000000) / simSeconds;
1237
1238    avgWrBW
1239        .name(name() + ".avgWrBW")
1240        .desc("Average achieved write bandwidth in MB/s")
1241        .precision(2);
1242
1243    avgWrBW = (bytesWritten / 1000000) / simSeconds;
1244
1245    avgConsumedRdBW
1246        .name(name() + ".avgConsumedRdBW")
1247        .desc("Average consumed read bandwidth in MB/s")
1248        .precision(2);
1249
1250    avgConsumedRdBW = (bytesConsumedRd / 1000000) / simSeconds;
1251
1252    avgConsumedWrBW
1253        .name(name() + ".avgConsumedWrBW")
1254        .desc("Average consumed write bandwidth in MB/s")
1255        .precision(2);
1256
1257    avgConsumedWrBW = (bytesConsumedWr / 1000000) / simSeconds;
1258
1259    peakBW
1260        .name(name() + ".peakBW")
1261        .desc("Theoretical peak bandwidth in MB/s")
1262        .precision(2);
1263
1264    peakBW = (SimClock::Frequency / tBURST) * bytesPerCacheLine / 1000000;
1265
1266    busUtil
1267        .name(name() + ".busUtil")
1268        .desc("Data bus utilization in percentage")
1269        .precision(2);
1270
1271    busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1272
1273    totGap
1274        .name(name() + ".totGap")
1275        .desc("Total gap between requests");
1276
1277    avgGap
1278        .name(name() + ".avgGap")
1279        .desc("Average gap between requests")
1280        .precision(2);
1281
1282    avgGap = totGap / (readReqs + writeReqs);
1283}
1284
1285void
1286SimpleDRAM::recvFunctional(PacketPtr pkt)
1287{
1288    // rely on the abstract memory
1289    functionalAccess(pkt);
1290}
1291
1292BaseSlavePort&
1293SimpleDRAM::getSlavePort(const string &if_name, PortID idx)
1294{
1295    if (if_name != "port") {
1296        return MemObject::getSlavePort(if_name, idx);
1297    } else {
1298        return port;
1299    }
1300}
1301
1302unsigned int
1303SimpleDRAM::drain(DrainManager *dm)
1304{
1305    unsigned int count = port.drain(dm);
1306
1307    // if there is anything in any of our internal queues, keep track
1308    // of that as well
1309    if (!(writeQueue.empty() && readQueue.empty() &&
1310          respQueue.empty())) {
1311        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
1312                " resp: %d\n", writeQueue.size(), readQueue.size(),
1313                respQueue.size());
1314        ++count;
1315        drainManager = dm;
1316        // the only part that is not drained automatically over time
1317        // is the write queue, thus trigger writes if there are any
1318        // waiting and no reads waiting, otherwise wait until the
1319        // reads are done
1320        if (readQueue.empty() && !writeQueue.empty() &&
1321            !writeEvent.scheduled())
1322            triggerWrites();
1323    }
1324
1325    if (count)
1326        setDrainState(Drainable::Draining);
1327    else
1328        setDrainState(Drainable::Drained);
1329    return count;
1330}
1331
1332SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory)
1333    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
1334      memory(_memory)
1335{ }
1336
1337AddrRangeList
1338SimpleDRAM::MemoryPort::getAddrRanges() const
1339{
1340    AddrRangeList ranges;
1341    ranges.push_back(memory.getAddrRange());
1342    return ranges;
1343}
1344
1345void
1346SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt)
1347{
1348    pkt->pushLabel(memory.name());
1349
1350    if (!queue.checkFunctional(pkt)) {
1351        // Default implementation of SimpleTimingPort::recvFunctional()
1352        // calls recvAtomic() and throws away the latency; we can save a
1353        // little here by just not calculating the latency.
1354        memory.recvFunctional(pkt);
1355    }
1356
1357    pkt->popLabel();
1358}
1359
1360Tick
1361SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt)
1362{
1363    return memory.recvAtomic(pkt);
1364}
1365
1366bool
1367SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt)
1368{
1369    // pass it to the memory controller
1370    return memory.recvTimingReq(pkt);
1371}
1372
1373SimpleDRAM*
1374SimpleDRAMParams::create()
1375{
1376    return new SimpleDRAM(this);
1377}
1378