dram_ctrl.cc revision 10142
1/*
2 * Copyright (c) 2010-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 *          Ani Udipi
42 *          Neha Agarwal
43 */
44
45#include "base/trace.hh"
46#include "base/bitfield.hh"
47#include "debug/Drain.hh"
48#include "debug/DRAM.hh"
49#include "mem/simple_dram.hh"
50#include "sim/system.hh"
51
52using namespace std;
53
54SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
55    AbstractMemory(p),
56    port(name() + ".port", *this),
57    retryRdReq(false), retryWrReq(false),
58    rowHitFlag(false), stopReads(false),
59    writeEvent(this), respondEvent(this),
60    refreshEvent(this), nextReqEvent(this), drainManager(NULL),
61    deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
62    deviceRowBufferSize(p->device_rowbuffer_size),
63    devicesPerRank(p->devices_per_rank),
64    burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
65    rowBufferSize(devicesPerRank * deviceRowBufferSize),
66    columnsPerRowBuffer(rowBufferSize / burstSize),
67    ranksPerChannel(p->ranks_per_channel),
68    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
69    readBufferSize(p->read_buffer_size),
70    writeBufferSize(p->write_buffer_size),
71    writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
72    writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
73    minWritesPerSwitch(p->min_writes_per_switch), writesThisTime(0),
74    tWTR(p->tWTR), tBURST(p->tBURST),
75    tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
76    tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
77    tXAW(p->tXAW), activationLimit(p->activation_limit),
78    memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
79    pageMgmt(p->page_policy),
80    maxAccessesPerRow(p->max_accesses_per_row),
81    frontendLatency(p->static_frontend_latency),
82    backendLatency(p->static_backend_latency),
83    busBusyUntil(0), writeStartTime(0),
84    prevArrival(0), numReqs(0),
85    newTime(0), startTickPrechargeAll(0), numBanksActive(0)
86{
87    // create the bank states based on the dimensions of the ranks and
88    // banks
89    banks.resize(ranksPerChannel);
90    actTicks.resize(ranksPerChannel);
91    for (size_t c = 0; c < ranksPerChannel; ++c) {
92        banks[c].resize(banksPerRank);
93        actTicks[c].resize(activationLimit, 0);
94    }
95
96    // perform a basic check of the write thresholds
97    if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
98        fatal("Write buffer low threshold %d must be smaller than the "
99              "high threshold %d\n", p->write_low_thresh_perc,
100              p->write_high_thresh_perc);
101
102    // determine the rows per bank by looking at the total capacity
103    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
104
105    DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
106            AbstractMemory::size());
107
108    DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n",
109            rowBufferSize, columnsPerRowBuffer);
110
111    rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel);
112
113    if (range.interleaved()) {
114        if (channels != range.stripes())
115            panic("%s has %d interleaved address stripes but %d channel(s)\n",
116                  name(), range.stripes(), channels);
117
118        if (addrMapping == Enums::RoRaBaChCo) {
119            if (rowBufferSize != range.granularity()) {
120                panic("Interleaving of %s doesn't match RoRaBaChCo "
121                      "address map\n", name());
122            }
123        } else if (addrMapping == Enums::RoRaBaCoCh) {
124            if (system()->cacheLineSize() != range.granularity()) {
125                panic("Interleaving of %s doesn't match RoRaBaCoCh "
126                      "address map\n", name());
127            }
128        } else if (addrMapping == Enums::RoCoRaBaCh) {
129            if (system()->cacheLineSize() != range.granularity())
130                panic("Interleaving of %s doesn't match RoCoRaBaCh "
131                      "address map\n", name());
132        }
133    }
134}
135
136void
137SimpleDRAM::init()
138{
139    if (!port.isConnected()) {
140        fatal("SimpleDRAM %s is unconnected!\n", name());
141    } else {
142        port.sendRangeChange();
143    }
144}
145
146void
147SimpleDRAM::startup()
148{
149    // print the configuration of the controller
150    printParams();
151
152    // kick off the refresh
153    schedule(refreshEvent, curTick() + tREFI);
154}
155
156Tick
157SimpleDRAM::recvAtomic(PacketPtr pkt)
158{
159    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
160
161    // do the actual memory access and turn the packet into a response
162    access(pkt);
163
164    Tick latency = 0;
165    if (!pkt->memInhibitAsserted() && pkt->hasData()) {
166        // this value is not supposed to be accurate, just enough to
167        // keep things going, mimic a closed page
168        latency = tRP + tRCD + tCL;
169    }
170    return latency;
171}
172
173bool
174SimpleDRAM::readQueueFull(unsigned int neededEntries) const
175{
176    DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
177            readBufferSize, readQueue.size() + respQueue.size(),
178            neededEntries);
179
180    return
181        (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
182}
183
184bool
185SimpleDRAM::writeQueueFull(unsigned int neededEntries) const
186{
187    DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
188            writeBufferSize, writeQueue.size(), neededEntries);
189    return (writeQueue.size() + neededEntries) > writeBufferSize;
190}
191
192SimpleDRAM::DRAMPacket*
193SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead)
194{
195    // decode the address based on the address mapping scheme, with
196    // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
197    // channel, respectively
198    uint8_t rank;
199    uint8_t bank;
200    uint16_t row;
201
202    // truncate the address to the access granularity
203    Addr addr = dramPktAddr / burstSize;
204
205    // we have removed the lowest order address bits that denote the
206    // position within the column
207    if (addrMapping == Enums::RoRaBaChCo) {
208        // the lowest order bits denote the column to ensure that
209        // sequential cache lines occupy the same row
210        addr = addr / columnsPerRowBuffer;
211
212        // take out the channel part of the address
213        addr = addr / channels;
214
215        // after the channel bits, get the bank bits to interleave
216        // over the banks
217        bank = addr % banksPerRank;
218        addr = addr / banksPerRank;
219
220        // after the bank, we get the rank bits which thus interleaves
221        // over the ranks
222        rank = addr % ranksPerChannel;
223        addr = addr / ranksPerChannel;
224
225        // lastly, get the row bits
226        row = addr % rowsPerBank;
227        addr = addr / rowsPerBank;
228    } else if (addrMapping == Enums::RoRaBaCoCh) {
229        // take out the channel part of the address
230        addr = addr / channels;
231
232        // next, the column
233        addr = addr / columnsPerRowBuffer;
234
235        // after the column bits, we get the bank bits to interleave
236        // over the banks
237        bank = addr % banksPerRank;
238        addr = addr / banksPerRank;
239
240        // after the bank, we get the rank bits which thus interleaves
241        // over the ranks
242        rank = addr % ranksPerChannel;
243        addr = addr / ranksPerChannel;
244
245        // lastly, get the row bits
246        row = addr % rowsPerBank;
247        addr = addr / rowsPerBank;
248    } else if (addrMapping == Enums::RoCoRaBaCh) {
249        // optimise for closed page mode and utilise maximum
250        // parallelism of the DRAM (at the cost of power)
251
252        // take out the channel part of the address, not that this has
253        // to match with how accesses are interleaved between the
254        // controllers in the address mapping
255        addr = addr / channels;
256
257        // start with the bank bits, as this provides the maximum
258        // opportunity for parallelism between requests
259        bank = addr % banksPerRank;
260        addr = addr / banksPerRank;
261
262        // next get the rank bits
263        rank = addr % ranksPerChannel;
264        addr = addr / ranksPerChannel;
265
266        // next the column bits which we do not need to keep track of
267        // and simply skip past
268        addr = addr / columnsPerRowBuffer;
269
270        // lastly, get the row bits
271        row = addr % rowsPerBank;
272        addr = addr / rowsPerBank;
273    } else
274        panic("Unknown address mapping policy chosen!");
275
276    assert(rank < ranksPerChannel);
277    assert(bank < banksPerRank);
278    assert(row < rowsPerBank);
279
280    DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
281            dramPktAddr, rank, bank, row);
282
283    // create the corresponding DRAM packet with the entry time and
284    // ready time set to the current tick, the latter will be updated
285    // later
286    uint16_t bank_id = banksPerRank * rank + bank;
287    return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
288                          size, banks[rank][bank]);
289}
290
291void
292SimpleDRAM::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
293{
294    // only add to the read queue here. whenever the request is
295    // eventually done, set the readyTime, and call schedule()
296    assert(!pkt->isWrite());
297
298    assert(pktCount != 0);
299
300    // if the request size is larger than burst size, the pkt is split into
301    // multiple DRAM packets
302    // Note if the pkt starting address is not aligened to burst size, the
303    // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
304    // are aligned to burst size boundaries. This is to ensure we accurately
305    // check read packets against packets in write queue.
306    Addr addr = pkt->getAddr();
307    unsigned pktsServicedByWrQ = 0;
308    BurstHelper* burst_helper = NULL;
309    for (int cnt = 0; cnt < pktCount; ++cnt) {
310        unsigned size = std::min((addr | (burstSize - 1)) + 1,
311                        pkt->getAddr() + pkt->getSize()) - addr;
312        readPktSize[ceilLog2(size)]++;
313        readBursts++;
314
315        // First check write buffer to see if the data is already at
316        // the controller
317        bool foundInWrQ = false;
318        for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) {
319            // check if the read is subsumed in the write entry we are
320            // looking at
321            if ((*i)->addr <= addr &&
322                (addr + size) <= ((*i)->addr + (*i)->size)) {
323                foundInWrQ = true;
324                servicedByWrQ++;
325                pktsServicedByWrQ++;
326                DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
327                        "write queue\n", addr, size);
328                bytesReadWrQ += burstSize;
329                break;
330            }
331        }
332
333        // If not found in the write q, make a DRAM packet and
334        // push it onto the read queue
335        if (!foundInWrQ) {
336
337            // Make the burst helper for split packets
338            if (pktCount > 1 && burst_helper == NULL) {
339                DPRINTF(DRAM, "Read to addr %lld translates to %d "
340                        "dram requests\n", pkt->getAddr(), pktCount);
341                burst_helper = new BurstHelper(pktCount);
342            }
343
344            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true);
345            dram_pkt->burstHelper = burst_helper;
346
347            assert(!readQueueFull(1));
348            rdQLenPdf[readQueue.size() + respQueue.size()]++;
349
350            DPRINTF(DRAM, "Adding to read queue\n");
351
352            readQueue.push_back(dram_pkt);
353
354            // Update stats
355            avgRdQLen = readQueue.size() + respQueue.size();
356        }
357
358        // Starting address of next dram pkt (aligend to burstSize boundary)
359        addr = (addr | (burstSize - 1)) + 1;
360    }
361
362    // If all packets are serviced by write queue, we send the repsonse back
363    if (pktsServicedByWrQ == pktCount) {
364        accessAndRespond(pkt, frontendLatency);
365        return;
366    }
367
368    // Update how many split packets are serviced by write queue
369    if (burst_helper != NULL)
370        burst_helper->burstsServiced = pktsServicedByWrQ;
371
372    // If we are not already scheduled to get the read request out of
373    // the queue, do so now
374    if (!nextReqEvent.scheduled() && !stopReads) {
375        DPRINTF(DRAM, "Request scheduled immediately\n");
376        schedule(nextReqEvent, curTick());
377    }
378}
379
380void
381SimpleDRAM::processWriteEvent()
382{
383    assert(!writeQueue.empty());
384
385    DPRINTF(DRAM, "Beginning DRAM Write\n");
386    Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil);
387    Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt());
388
389    chooseNextWrite();
390    DRAMPacket* dram_pkt = writeQueue.front();
391    // sanity check
392    assert(dram_pkt->size <= burstSize);
393    doDRAMAccess(dram_pkt);
394
395    writeQueue.pop_front();
396    delete dram_pkt;
397
398    ++writesThisTime;
399
400    DPRINTF(DRAM, "Writing, bus busy for %lld ticks, banks busy "
401            "for %lld ticks\n", busBusyUntil - temp1, maxBankFreeAt() - temp2);
402
403    // Update stats
404    avgWrQLen = writeQueue.size();
405
406    // If we emptied the write queue, or got below the threshold and
407    // are not draining, or we have reads waiting and have done enough
408    // writes, then switch to reads. The retry above could already
409    // have caused it to be scheduled, so first check
410    if (writeQueue.empty() ||
411        (writeQueue.size() < writeLowThreshold && !drainManager) ||
412        (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
413        // turn the bus back around for reads again
414        busBusyUntil += tWTR;
415        stopReads = false;
416        writesThisTime = 0;
417
418        if (!nextReqEvent.scheduled())
419            schedule(nextReqEvent, busBusyUntil);
420    } else {
421        assert(!writeEvent.scheduled());
422        DPRINTF(DRAM, "Next write scheduled at %lld\n", newTime);
423        schedule(writeEvent, newTime);
424    }
425
426    if (retryWrReq) {
427        retryWrReq = false;
428        port.sendRetry();
429    }
430
431    // if there is nothing left in any queue, signal a drain
432    if (writeQueue.empty() && readQueue.empty() &&
433        respQueue.empty () && drainManager) {
434        drainManager->signalDrainDone();
435        drainManager = NULL;
436    }
437}
438
439
440void
441SimpleDRAM::triggerWrites()
442{
443    DPRINTF(DRAM, "Writes triggered at %lld\n", curTick());
444    // Flag variable to stop any more read scheduling
445    stopReads = true;
446
447    writeStartTime = std::max(busBusyUntil, curTick()) + tWTR;
448
449    DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime);
450
451    assert(writeStartTime >= curTick());
452    assert(!writeEvent.scheduled());
453    schedule(writeEvent, writeStartTime);
454}
455
456void
457SimpleDRAM::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
458{
459    // only add to the write queue here. whenever the request is
460    // eventually done, set the readyTime, and call schedule()
461    assert(pkt->isWrite());
462
463    // if the request size is larger than burst size, the pkt is split into
464    // multiple DRAM packets
465    Addr addr = pkt->getAddr();
466    for (int cnt = 0; cnt < pktCount; ++cnt) {
467        unsigned size = std::min((addr | (burstSize - 1)) + 1,
468                        pkt->getAddr() + pkt->getSize()) - addr;
469        writePktSize[ceilLog2(size)]++;
470        writeBursts++;
471
472        // see if we can merge with an existing item in the write
473        // queue and keep track of whether we have merged or not so we
474        // can stop at that point and also avoid enqueueing a new
475        // request
476        bool merged = false;
477        auto w = writeQueue.begin();
478
479        while(!merged && w != writeQueue.end()) {
480            // either of the two could be first, if they are the same
481            // it does not matter which way we go
482            if ((*w)->addr >= addr) {
483                // the existing one starts after the new one, figure
484                // out where the new one ends with respect to the
485                // existing one
486                if ((addr + size) >= ((*w)->addr + (*w)->size)) {
487                    // check if the existing one is completely
488                    // subsumed in the new one
489                    DPRINTF(DRAM, "Merging write covering existing burst\n");
490                    merged = true;
491                    // update both the address and the size
492                    (*w)->addr = addr;
493                    (*w)->size = size;
494                } else if ((addr + size) >= (*w)->addr &&
495                           ((*w)->addr + (*w)->size - addr) <= burstSize) {
496                    // the new one is just before or partially
497                    // overlapping with the existing one, and together
498                    // they fit within a burst
499                    DPRINTF(DRAM, "Merging write before existing burst\n");
500                    merged = true;
501                    // the existing queue item needs to be adjusted with
502                    // respect to both address and size
503                    (*w)->size = (*w)->addr + (*w)->size - addr;
504                    (*w)->addr = addr;
505                }
506            } else {
507                // the new one starts after the current one, figure
508                // out where the existing one ends with respect to the
509                // new one
510                if (((*w)->addr + (*w)->size) >= (addr + size)) {
511                    // check if the new one is completely subsumed in the
512                    // existing one
513                    DPRINTF(DRAM, "Merging write into existing burst\n");
514                    merged = true;
515                    // no adjustments necessary
516                } else if (((*w)->addr + (*w)->size) >= addr &&
517                           (addr + size - (*w)->addr) <= burstSize) {
518                    // the existing one is just before or partially
519                    // overlapping with the new one, and together
520                    // they fit within a burst
521                    DPRINTF(DRAM, "Merging write after existing burst\n");
522                    merged = true;
523                    // the address is right, and only the size has
524                    // to be adjusted
525                    (*w)->size = addr + size - (*w)->addr;
526                }
527            }
528            ++w;
529        }
530
531        // if the item was not merged we need to create a new write
532        // and enqueue it
533        if (!merged) {
534            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false);
535
536            assert(writeQueue.size() < writeBufferSize);
537            wrQLenPdf[writeQueue.size()]++;
538
539            DPRINTF(DRAM, "Adding to write queue\n");
540
541            writeQueue.push_back(dram_pkt);
542
543            // Update stats
544            avgWrQLen = writeQueue.size();
545        } else {
546            // keep track of the fact that this burst effectively
547            // disappeared as it was merged with an existing one
548            mergedWrBursts++;
549        }
550
551        // Starting address of next dram pkt (aligend to burstSize boundary)
552        addr = (addr | (burstSize - 1)) + 1;
553    }
554
555    // we do not wait for the writes to be send to the actual memory,
556    // but instead take responsibility for the consistency here and
557    // snoop the write queue for any upcoming reads
558    // @todo, if a pkt size is larger than burst size, we might need a
559    // different front end latency
560    accessAndRespond(pkt, frontendLatency);
561
562    // If your write buffer is starting to fill up, drain it!
563    if (writeQueue.size() >= writeHighThreshold && !stopReads){
564        triggerWrites();
565    }
566}
567
568void
569SimpleDRAM::printParams() const
570{
571    // Sanity check print of important parameters
572    DPRINTF(DRAM,
573            "Memory controller %s physical organization\n"      \
574            "Number of devices per rank   %d\n"                 \
575            "Device bus width (in bits)   %d\n"                 \
576            "DRAM data bus burst          %d\n"                 \
577            "Row buffer size              %d\n"                 \
578            "Columns per row buffer       %d\n"                 \
579            "Rows    per bank             %d\n"                 \
580            "Banks   per rank             %d\n"                 \
581            "Ranks   per channel          %d\n"                 \
582            "Total mem capacity           %u\n",
583            name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize,
584            columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel,
585            rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel);
586
587    string scheduler =  memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS";
588    string address_mapping = addrMapping == Enums::RoRaBaChCo ? "RoRaBaChCo" :
589        (addrMapping == Enums::RoRaBaCoCh ? "RoRaBaCoCh" : "RoCoRaBaCh");
590    string page_policy = pageMgmt == Enums::open ? "OPEN" :
591        (pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" : "CLOSE");
592
593    DPRINTF(DRAM,
594            "Memory controller %s characteristics\n"    \
595            "Read buffer size     %d\n"                 \
596            "Write buffer size    %d\n"                 \
597            "Write high thresh    %d\n"                 \
598            "Write low thresh     %d\n"                 \
599            "Scheduler            %s\n"                 \
600            "Address mapping      %s\n"                 \
601            "Page policy          %s\n",
602            name(), readBufferSize, writeBufferSize, writeHighThreshold,
603            writeLowThreshold, scheduler, address_mapping, page_policy);
604
605    DPRINTF(DRAM, "Memory controller %s timing specs\n" \
606            "tRCD      %d ticks\n"                        \
607            "tCL       %d ticks\n"                        \
608            "tRP       %d ticks\n"                        \
609            "tBURST    %d ticks\n"                        \
610            "tRFC      %d ticks\n"                        \
611            "tREFI     %d ticks\n"                        \
612            "tWTR      %d ticks\n"                        \
613            "tXAW (%d) %d ticks\n",
614            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR,
615            activationLimit, tXAW);
616}
617
618void
619SimpleDRAM::printQs() const {
620    DPRINTF(DRAM, "===READ QUEUE===\n\n");
621    for (auto i = readQueue.begin() ;  i != readQueue.end() ; ++i) {
622        DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
623    }
624    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
625    for (auto i = respQueue.begin() ;  i != respQueue.end() ; ++i) {
626        DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
627    }
628    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
629    for (auto i = writeQueue.begin() ;  i != writeQueue.end() ; ++i) {
630        DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
631    }
632}
633
634bool
635SimpleDRAM::recvTimingReq(PacketPtr pkt)
636{
637    /// @todo temporary hack to deal with memory corruption issues until
638    /// 4-phase transactions are complete
639    for (int x = 0; x < pendingDelete.size(); x++)
640        delete pendingDelete[x];
641    pendingDelete.clear();
642
643    // This is where we enter from the outside world
644    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
645            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
646
647    // simply drop inhibited packets for now
648    if (pkt->memInhibitAsserted()) {
649        DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n");
650        pendingDelete.push_back(pkt);
651        return true;
652    }
653
654   // Every million accesses, print the state of the queues
655   if (numReqs % 1000000 == 0)
656       printQs();
657
658    // Calc avg gap between requests
659    if (prevArrival != 0) {
660        totGap += curTick() - prevArrival;
661    }
662    prevArrival = curTick();
663
664
665    // Find out how many dram packets a pkt translates to
666    // If the burst size is equal or larger than the pkt size, then a pkt
667    // translates to only one dram packet. Otherwise, a pkt translates to
668    // multiple dram packets
669    unsigned size = pkt->getSize();
670    unsigned offset = pkt->getAddr() & (burstSize - 1);
671    unsigned int dram_pkt_count = divCeil(offset + size, burstSize);
672
673    // check local buffers and do not accept if full
674    if (pkt->isRead()) {
675        assert(size != 0);
676        if (readQueueFull(dram_pkt_count)) {
677            DPRINTF(DRAM, "Read queue full, not accepting\n");
678            // remember that we have to retry this port
679            retryRdReq = true;
680            numRdRetry++;
681            return false;
682        } else {
683            addToReadQueue(pkt, dram_pkt_count);
684            readReqs++;
685            numReqs++;
686            bytesReadSys += size;
687        }
688    } else if (pkt->isWrite()) {
689        assert(size != 0);
690        if (writeQueueFull(dram_pkt_count)) {
691            DPRINTF(DRAM, "Write queue full, not accepting\n");
692            // remember that we have to retry this port
693            retryWrReq = true;
694            numWrRetry++;
695            return false;
696        } else {
697            addToWriteQueue(pkt, dram_pkt_count);
698            writeReqs++;
699            numReqs++;
700            bytesWrittenSys += size;
701        }
702    } else {
703        DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
704        neitherReadNorWrite++;
705        accessAndRespond(pkt, 1);
706    }
707
708    retryRdReq = false;
709    retryWrReq = false;
710    return true;
711}
712
713void
714SimpleDRAM::processRespondEvent()
715{
716    DPRINTF(DRAM,
717            "processRespondEvent(): Some req has reached its readyTime\n");
718
719    DRAMPacket* dram_pkt = respQueue.front();
720
721    if (dram_pkt->burstHelper) {
722        // it is a split packet
723        dram_pkt->burstHelper->burstsServiced++;
724        if (dram_pkt->burstHelper->burstsServiced ==
725                                  dram_pkt->burstHelper->burstCount) {
726            // we have now serviced all children packets of a system packet
727            // so we can now respond to the requester
728            // @todo we probably want to have a different front end and back
729            // end latency for split packets
730            accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
731            delete dram_pkt->burstHelper;
732            dram_pkt->burstHelper = NULL;
733        }
734    } else {
735        // it is not a split packet
736        accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
737    }
738
739    delete respQueue.front();
740    respQueue.pop_front();
741
742    // Update stats
743    avgRdQLen = readQueue.size() + respQueue.size();
744
745    if (!respQueue.empty()) {
746        assert(respQueue.front()->readyTime >= curTick());
747        assert(!respondEvent.scheduled());
748        schedule(respondEvent, respQueue.front()->readyTime);
749    } else {
750        // if there is nothing left in any queue, signal a drain
751        if (writeQueue.empty() && readQueue.empty() &&
752            drainManager) {
753            drainManager->signalDrainDone();
754            drainManager = NULL;
755        }
756    }
757
758    // We have made a location in the queue available at this point,
759    // so if there is a read that was forced to wait, retry now
760    if (retryRdReq) {
761        retryRdReq = false;
762        port.sendRetry();
763    }
764}
765
766void
767SimpleDRAM::chooseNextWrite()
768{
769    // This method does the arbitration between write requests. The
770    // chosen packet is simply moved to the head of the write
771    // queue. The other methods know that this is the place to
772    // look. For example, with FCFS, this method does nothing
773    assert(!writeQueue.empty());
774
775    if (writeQueue.size() == 1) {
776        DPRINTF(DRAM, "Single write request, nothing to do\n");
777        return;
778    }
779
780    if (memSchedPolicy == Enums::fcfs) {
781        // Do nothing, since the correct request is already head
782    } else if (memSchedPolicy == Enums::frfcfs) {
783        reorderQueue(writeQueue);
784    } else
785        panic("No scheduling policy chosen\n");
786
787    DPRINTF(DRAM, "Selected next write request\n");
788}
789
790bool
791SimpleDRAM::chooseNextRead()
792{
793    // This method does the arbitration between read requests. The
794    // chosen packet is simply moved to the head of the queue. The
795    // other methods know that this is the place to look. For example,
796    // with FCFS, this method does nothing
797    if (readQueue.empty()) {
798        DPRINTF(DRAM, "No read request to select\n");
799        return false;
800    }
801
802    // If there is only one request then there is nothing left to do
803    if (readQueue.size() == 1)
804        return true;
805
806    if (memSchedPolicy == Enums::fcfs) {
807        // Do nothing, since the request to serve is already the first
808        // one in the read queue
809    } else if (memSchedPolicy == Enums::frfcfs) {
810        reorderQueue(readQueue);
811    } else
812        panic("No scheduling policy chosen!\n");
813
814    DPRINTF(DRAM, "Selected next read request\n");
815    return true;
816}
817
818void
819SimpleDRAM::reorderQueue(std::deque<DRAMPacket*>& queue)
820{
821    // Only determine this when needed
822    uint64_t earliest_banks = 0;
823
824    // Search for row hits first, if no row hit is found then schedule the
825    // packet to one of the earliest banks available
826    bool found_earliest_pkt = false;
827    auto selected_pkt_it = queue.begin();
828
829    for (auto i = queue.begin(); i != queue.end() ; ++i) {
830        DRAMPacket* dram_pkt = *i;
831        const Bank& bank = dram_pkt->bankRef;
832        // Check if it is a row hit
833        if (bank.openRow == dram_pkt->row) {
834            DPRINTF(DRAM, "Row buffer hit\n");
835            selected_pkt_it = i;
836            break;
837        } else if (!found_earliest_pkt) {
838            // No row hit, go for first ready
839            if (earliest_banks == 0)
840                earliest_banks = minBankFreeAt(queue);
841
842            // Bank is ready or is the first available bank
843            if (bank.freeAt <= curTick() ||
844                bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
845                // Remember the packet to be scheduled to one of the earliest
846                // banks available
847                selected_pkt_it = i;
848                found_earliest_pkt = true;
849            }
850        }
851    }
852
853    DRAMPacket* selected_pkt = *selected_pkt_it;
854    queue.erase(selected_pkt_it);
855    queue.push_front(selected_pkt);
856}
857
858void
859SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency)
860{
861    DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
862
863    bool needsResponse = pkt->needsResponse();
864    // do the actual memory access which also turns the packet into a
865    // response
866    access(pkt);
867
868    // turn packet around to go back to requester if response expected
869    if (needsResponse) {
870        // access already turned the packet into a response
871        assert(pkt->isResponse());
872
873        // @todo someone should pay for this
874        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
875
876        // queue the packet in the response queue to be sent out after
877        // the static latency has passed
878        port.schedTimingResp(pkt, curTick() + static_latency);
879    } else {
880        // @todo the packet is going to be deleted, and the DRAMPacket
881        // is still having a pointer to it
882        pendingDelete.push_back(pkt);
883    }
884
885    DPRINTF(DRAM, "Done\n");
886
887    return;
888}
889
890pair<Tick, Tick>
891SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
892{
893    // If a request reaches a bank at tick 'inTime', how much time
894    // *after* that does it take to finish the request, depending
895    // on bank status and page open policy. Note that this method
896    // considers only the time taken for the actual read or write
897    // to complete, NOT any additional time thereafter for tRAS or
898    // tRP.
899    Tick accLat = 0;
900    Tick bankLat = 0;
901    rowHitFlag = false;
902    Tick potentialActTick;
903
904    const Bank& bank = dram_pkt->bankRef;
905     // open-page policy
906    if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) {
907        if (bank.openRow == dram_pkt->row) {
908            // When we have a row-buffer hit,
909            // we don't care about tRAS having expired or not,
910            // but do care about bank being free for access
911            rowHitFlag = true;
912
913            // When a series of requests arrive to the same row,
914            // DDR systems are capable of streaming data continuously
915            // at maximum bandwidth (subject to tCCD). Here, we approximate
916            // this condition, and assume that if whenever a bank is already
917            // busy and a new request comes in, it can be completed with no
918            // penalty beyond waiting for the existing read to complete.
919            if (bank.freeAt > inTime) {
920                accLat += bank.freeAt - inTime;
921                bankLat += 0;
922            } else {
923               // CAS latency only
924               accLat += tCL;
925               bankLat += tCL;
926            }
927
928        } else {
929            // Row-buffer miss, need to close existing row
930            // once tRAS has expired, then open the new one,
931            // then add cas latency.
932            Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt);
933
934            if (freeTime > inTime)
935               accLat += freeTime - inTime;
936
937            // If the there is no open row (open adaptive), then there
938            // is no precharge delay, otherwise go with tRP
939            Tick precharge_delay = bank.openRow == -1 ? 0 : tRP;
940
941            //The bank is free, and you may be able to activate
942            potentialActTick = inTime + accLat + precharge_delay;
943            if (potentialActTick < bank.actAllowedAt)
944                accLat += bank.actAllowedAt - potentialActTick;
945
946            accLat += precharge_delay + tRCD + tCL;
947            bankLat += precharge_delay + tRCD + tCL;
948        }
949    } else if (pageMgmt == Enums::close) {
950        // With a close page policy, no notion of
951        // bank.tRASDoneAt
952        if (bank.freeAt > inTime)
953            accLat += bank.freeAt - inTime;
954
955        //The bank is free, and you may be able to activate
956        potentialActTick = inTime + accLat;
957        if (potentialActTick < bank.actAllowedAt)
958            accLat += bank.actAllowedAt - potentialActTick;
959
960        // page already closed, simply open the row, and
961        // add cas latency
962        accLat += tRCD + tCL;
963        bankLat += tRCD + tCL;
964    } else
965        panic("No page management policy chosen\n");
966
967    DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n",
968            bankLat, accLat);
969
970    return make_pair(bankLat, accLat);
971}
972
973void
974SimpleDRAM::processNextReqEvent()
975{
976    scheduleNextReq();
977}
978
979void
980SimpleDRAM::recordActivate(Tick act_tick, uint8_t rank, uint8_t bank)
981{
982    assert(0 <= rank && rank < ranksPerChannel);
983    assert(actTicks[rank].size() == activationLimit);
984
985    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
986
987    // Tracking accesses after all banks are precharged.
988    // startTickPrechargeAll: is the tick when all the banks were again
989    // precharged. The difference between act_tick and startTickPrechargeAll
990    // gives the time for which DRAM doesn't get any accesses after refreshing
991    // or after a page is closed in closed-page or open-adaptive-page policy.
992    if ((numBanksActive == 0) && (act_tick > startTickPrechargeAll)) {
993        prechargeAllTime += act_tick - startTickPrechargeAll;
994    }
995
996    // No need to update number of active banks for closed-page policy as only 1
997    // bank will be activated at any given point, which will be instatntly
998    // precharged
999    if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive)
1000        ++numBanksActive;
1001
1002    // start by enforcing tRRD
1003    for(int i = 0; i < banksPerRank; i++) {
1004        // next activate must not happen before tRRD
1005        banks[rank][i].actAllowedAt = act_tick + tRRD;
1006    }
1007    // tRC should be added to activation tick of the bank currently accessed,
1008    // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same
1009    // bank is already captured by bank.freeAt and bank.tRASDoneAt
1010    banks[rank][bank].actAllowedAt = act_tick + tRAS + tRP;
1011
1012    // next, we deal with tXAW, if the activation limit is disabled
1013    // then we are done
1014    if (actTicks[rank].empty())
1015        return;
1016
1017    // sanity check
1018    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
1019        // @todo For now, stick with a warning
1020        warn("Got %d activates in window %d (%d - %d) which is smaller "
1021             "than %d\n", activationLimit, act_tick - actTicks[rank].back(),
1022             act_tick, actTicks[rank].back(), tXAW);
1023    }
1024
1025    // shift the times used for the book keeping, the last element
1026    // (highest index) is the oldest one and hence the lowest value
1027    actTicks[rank].pop_back();
1028
1029    // record an new activation (in the future)
1030    actTicks[rank].push_front(act_tick);
1031
1032    // cannot activate more than X times in time window tXAW, push the
1033    // next one (the X + 1'st activate) to be tXAW away from the
1034    // oldest in our window of X
1035    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
1036        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
1037                "than %d\n", activationLimit, actTicks[rank].back() + tXAW);
1038            for(int j = 0; j < banksPerRank; j++)
1039                // next activate must not happen before end of window
1040                banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW;
1041    }
1042}
1043
1044void
1045SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
1046{
1047
1048    DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1049            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1050
1051    // estimate the bank and access latency
1052    pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick());
1053    Tick bankLat = lat.first;
1054    Tick accessLat = lat.second;
1055    Tick actTick;
1056
1057    // This request was woken up at this time based on a prior call
1058    // to estimateLatency(). However, between then and now, both the
1059    // accessLatency and/or busBusyUntil may have changed. We need
1060    // to correct for that.
1061
1062    Tick addDelay = (curTick() + accessLat < busBusyUntil) ?
1063        busBusyUntil - (curTick() + accessLat) : 0;
1064
1065    Bank& bank = dram_pkt->bankRef;
1066
1067    // Update bank state
1068    if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) {
1069        bank.freeAt = curTick() + addDelay + accessLat;
1070
1071        // If you activated a new row do to this access, the next access
1072        // will have to respect tRAS for this bank.
1073        if (!rowHitFlag) {
1074            // any waiting for banks account for in freeAt
1075            actTick = bank.freeAt - tCL - tRCD;
1076            bank.tRASDoneAt = actTick + tRAS;
1077            recordActivate(actTick, dram_pkt->rank, dram_pkt->bank);
1078
1079            // if we closed an open row as a result of this access,
1080            // then sample the number of bytes accessed before
1081            // resetting it
1082            if (bank.openRow != -1)
1083                bytesPerActivate.sample(bank.bytesAccessed);
1084
1085            // update the open row
1086            bank.openRow = dram_pkt->row;
1087
1088            // start counting anew, this covers both the case when we
1089            // auto-precharged, and when this access is forced to
1090            // precharge
1091            bank.bytesAccessed = 0;
1092            bank.rowAccesses = 0;
1093        }
1094
1095        // increment the bytes accessed and the accesses per row
1096        bank.bytesAccessed += burstSize;
1097        ++bank.rowAccesses;
1098
1099        // if we reached the max, then issue with an auto-precharge
1100        bool auto_precharge = bank.rowAccesses == maxAccessesPerRow;
1101
1102        // if we did not hit the limit, we might still want to
1103        // auto-precharge
1104        if (!auto_precharge && pageMgmt == Enums::open_adaptive) {
1105            // a twist on the open page policy is to not blindly keep the
1106            // page open, but close it if there are no row hits, and there
1107            // are bank conflicts in the queue
1108            bool got_more_hits = false;
1109            bool got_bank_conflict = false;
1110
1111            // either look at the read queue or write queue
1112            const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue :
1113                writeQueue;
1114            auto p = queue.begin();
1115            // make sure we are not considering the packet that we are
1116            // currently dealing with (which is the head of the queue)
1117            ++p;
1118
1119            // keep on looking until we have found both or reached
1120            // the end
1121            while (!(got_more_hits && got_bank_conflict) &&
1122                   p != queue.end()) {
1123                bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
1124                    (dram_pkt->bank == (*p)->bank);
1125                bool same_row = dram_pkt->row == (*p)->row;
1126                got_more_hits |= same_rank_bank && same_row;
1127                got_bank_conflict |= same_rank_bank && !same_row;
1128                ++p;
1129            }
1130
1131            // auto pre-charge if we have not got any more hits, and
1132            // have a bank conflict
1133            auto_precharge = !got_more_hits && got_bank_conflict;
1134        }
1135
1136        // if this access should use auto-precharge, then we are
1137        // closing the row
1138        if (auto_precharge) {
1139            bank.openRow = -1;
1140            bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP;
1141            --numBanksActive;
1142            if (numBanksActive == 0) {
1143                startTickPrechargeAll = std::max(startTickPrechargeAll,
1144                                                 bank.freeAt);
1145                DPRINTF(DRAM, "All banks precharged at tick: %ld\n",
1146                        startTickPrechargeAll);
1147            }
1148
1149            // sample the bytes per activate here since we are closing
1150            // the page
1151            bytesPerActivate.sample(bank.bytesAccessed);
1152
1153            DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1154        }
1155
1156        DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
1157    } else if (pageMgmt == Enums::close) {
1158        actTick = curTick() + addDelay + accessLat - tRCD - tCL;
1159        recordActivate(actTick, dram_pkt->rank, dram_pkt->bank);
1160
1161        // If the DRAM has a very quick tRAS, bank can be made free
1162        // after consecutive tCL,tRCD,tRP times. In general, however,
1163        // an additional wait is required to respect tRAS.
1164        bank.freeAt = std::max(actTick + tRAS + tRP,
1165                actTick + tRCD + tCL + tRP);
1166        DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
1167        bytesPerActivate.sample(burstSize);
1168        startTickPrechargeAll = std::max(startTickPrechargeAll, bank.freeAt);
1169    } else
1170        panic("No page management policy chosen\n");
1171
1172    // Update request parameters
1173    dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST;
1174
1175
1176    DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \
1177                  "readytime is %lld busbusyuntil is %lld. " \
1178                  "Scheduling at readyTime\n", dram_pkt->addr,
1179                   curTick(), accessLat, dram_pkt->readyTime, busBusyUntil);
1180
1181    // Make sure requests are not overlapping on the databus
1182    assert (dram_pkt->readyTime - busBusyUntil >= tBURST);
1183
1184    // Update bus state
1185    busBusyUntil = dram_pkt->readyTime;
1186
1187    DPRINTF(DRAM,"Access time is %lld\n",
1188            dram_pkt->readyTime - dram_pkt->entryTime);
1189
1190    // Update the minimum timing between the requests
1191    newTime = (busBusyUntil > tRP + tRCD + tCL) ?
1192        std::max(busBusyUntil - (tRP + tRCD + tCL), curTick()) : curTick();
1193
1194    // Update the access related stats
1195    if (dram_pkt->isRead) {
1196        if (rowHitFlag)
1197            readRowHits++;
1198        bytesReadDRAM += burstSize;
1199        perBankRdBursts[dram_pkt->bankId]++;
1200    } else {
1201        if (rowHitFlag)
1202            writeRowHits++;
1203        bytesWritten += burstSize;
1204        perBankWrBursts[dram_pkt->bankId]++;
1205
1206        // At this point, commonality between reads and writes ends.
1207        // For writes, we are done since we long ago responded to the
1208        // requestor.
1209        return;
1210    }
1211
1212    // Update latency stats
1213    totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
1214    totBankLat += bankLat;
1215    totBusLat += tBURST;
1216    totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST;
1217
1218
1219    // At this point we're done dealing with the request
1220    // It will be moved to a separate response queue with a
1221    // correct readyTime, and eventually be sent back at that
1222    //time
1223    moveToRespQ();
1224
1225    // Schedule the next read event
1226    if (!nextReqEvent.scheduled() && !stopReads){
1227        schedule(nextReqEvent, newTime);
1228    } else {
1229        if (newTime < nextReqEvent.when())
1230            reschedule(nextReqEvent, newTime);
1231    }
1232}
1233
1234void
1235SimpleDRAM::moveToRespQ()
1236{
1237    // Remove from read queue
1238    DRAMPacket* dram_pkt = readQueue.front();
1239    readQueue.pop_front();
1240
1241    // sanity check
1242    assert(dram_pkt->size <= burstSize);
1243
1244    // Insert into response queue sorted by readyTime
1245    // It will be sent back to the requestor at its
1246    // readyTime
1247    if (respQueue.empty()) {
1248        respQueue.push_front(dram_pkt);
1249        assert(!respondEvent.scheduled());
1250        assert(dram_pkt->readyTime >= curTick());
1251        schedule(respondEvent, dram_pkt->readyTime);
1252    } else {
1253        bool done = false;
1254        auto i = respQueue.begin();
1255        while (!done && i != respQueue.end()) {
1256            if ((*i)->readyTime > dram_pkt->readyTime) {
1257                respQueue.insert(i, dram_pkt);
1258                done = true;
1259            }
1260            ++i;
1261        }
1262
1263        if (!done)
1264            respQueue.push_back(dram_pkt);
1265
1266        assert(respondEvent.scheduled());
1267
1268        if (respQueue.front()->readyTime < respondEvent.when()) {
1269            assert(respQueue.front()->readyTime >= curTick());
1270            reschedule(respondEvent, respQueue.front()->readyTime);
1271        }
1272    }
1273}
1274
1275void
1276SimpleDRAM::scheduleNextReq()
1277{
1278    DPRINTF(DRAM, "Reached scheduleNextReq()\n");
1279
1280    // Figure out which read request goes next, and move it to the
1281    // front of the read queue
1282    if (!chooseNextRead()) {
1283        // In the case there is no read request to go next, trigger
1284        // writes if we have passed the low threshold (or if we are
1285        // draining)
1286        if (!writeQueue.empty() && !writeEvent.scheduled() &&
1287            (writeQueue.size() > writeLowThreshold || drainManager))
1288            triggerWrites();
1289    } else {
1290        doDRAMAccess(readQueue.front());
1291    }
1292}
1293
1294Tick
1295SimpleDRAM::maxBankFreeAt() const
1296{
1297    Tick banksFree = 0;
1298
1299    for(int i = 0; i < ranksPerChannel; i++)
1300        for(int j = 0; j < banksPerRank; j++)
1301            banksFree = std::max(banks[i][j].freeAt, banksFree);
1302
1303    return banksFree;
1304}
1305
1306uint64_t
1307SimpleDRAM::minBankFreeAt(const deque<DRAMPacket*>& queue) const
1308{
1309    uint64_t bank_mask = 0;
1310    Tick freeAt = MaxTick;
1311
1312    // detemrine if we have queued transactions targetting the
1313    // bank in question
1314    vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1315    for (auto p = queue.begin(); p != queue.end(); ++p) {
1316        got_waiting[(*p)->bankId] = true;
1317    }
1318
1319    for (int i = 0; i < ranksPerChannel; i++) {
1320        for (int j = 0; j < banksPerRank; j++) {
1321            // if we have waiting requests for the bank, and it is
1322            // amongst the first available, update the mask
1323            if (got_waiting[i * banksPerRank + j] &&
1324                banks[i][j].freeAt <= freeAt) {
1325                // reset bank mask if new minimum is found
1326                if (banks[i][j].freeAt < freeAt)
1327                    bank_mask = 0;
1328                // set the bit corresponding to the available bank
1329                uint8_t bit_index = i * ranksPerChannel + j;
1330                replaceBits(bank_mask, bit_index, bit_index, 1);
1331                freeAt = banks[i][j].freeAt;
1332            }
1333        }
1334    }
1335    return bank_mask;
1336}
1337
1338void
1339SimpleDRAM::processRefreshEvent()
1340{
1341    DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick());
1342
1343    Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC;
1344
1345    for(int i = 0; i < ranksPerChannel; i++)
1346        for(int j = 0; j < banksPerRank; j++) {
1347            banks[i][j].freeAt = banksFree;
1348            banks[i][j].openRow = -1;
1349        }
1350
1351    // updating startTickPrechargeAll, isprechargeAll
1352    numBanksActive = 0;
1353    startTickPrechargeAll = banksFree;
1354
1355    schedule(refreshEvent, curTick() + tREFI);
1356}
1357
1358void
1359SimpleDRAM::regStats()
1360{
1361    using namespace Stats;
1362
1363    AbstractMemory::regStats();
1364
1365    readReqs
1366        .name(name() + ".readReqs")
1367        .desc("Number of read requests accepted");
1368
1369    writeReqs
1370        .name(name() + ".writeReqs")
1371        .desc("Number of write requests accepted");
1372
1373    readBursts
1374        .name(name() + ".readBursts")
1375        .desc("Number of DRAM read bursts, "
1376              "including those serviced by the write queue");
1377
1378    writeBursts
1379        .name(name() + ".writeBursts")
1380        .desc("Number of DRAM write bursts, "
1381              "including those merged in the write queue");
1382
1383    servicedByWrQ
1384        .name(name() + ".servicedByWrQ")
1385        .desc("Number of DRAM read bursts serviced by the write queue");
1386
1387    mergedWrBursts
1388        .name(name() + ".mergedWrBursts")
1389        .desc("Number of DRAM write bursts merged with an existing one");
1390
1391    neitherReadNorWrite
1392        .name(name() + ".neitherReadNorWriteReqs")
1393        .desc("Number of requests that are neither read nor write");
1394
1395    perBankRdBursts
1396        .init(banksPerRank * ranksPerChannel)
1397        .name(name() + ".perBankRdBursts")
1398        .desc("Per bank write bursts");
1399
1400    perBankWrBursts
1401        .init(banksPerRank * ranksPerChannel)
1402        .name(name() + ".perBankWrBursts")
1403        .desc("Per bank write bursts");
1404
1405    avgRdQLen
1406        .name(name() + ".avgRdQLen")
1407        .desc("Average read queue length when enqueuing")
1408        .precision(2);
1409
1410    avgWrQLen
1411        .name(name() + ".avgWrQLen")
1412        .desc("Average write queue length when enqueuing")
1413        .precision(2);
1414
1415    totQLat
1416        .name(name() + ".totQLat")
1417        .desc("Total ticks spent queuing");
1418
1419    totBankLat
1420        .name(name() + ".totBankLat")
1421        .desc("Total ticks spent accessing banks");
1422
1423    totBusLat
1424        .name(name() + ".totBusLat")
1425        .desc("Total ticks spent in databus transfers");
1426
1427    totMemAccLat
1428        .name(name() + ".totMemAccLat")
1429        .desc("Total ticks spent from burst creation until serviced "
1430              "by the DRAM");
1431
1432    avgQLat
1433        .name(name() + ".avgQLat")
1434        .desc("Average queueing delay per DRAM burst")
1435        .precision(2);
1436
1437    avgQLat = totQLat / (readBursts - servicedByWrQ);
1438
1439    avgBankLat
1440        .name(name() + ".avgBankLat")
1441        .desc("Average bank access latency per DRAM burst")
1442        .precision(2);
1443
1444    avgBankLat = totBankLat / (readBursts - servicedByWrQ);
1445
1446    avgBusLat
1447        .name(name() + ".avgBusLat")
1448        .desc("Average bus latency per DRAM burst")
1449        .precision(2);
1450
1451    avgBusLat = totBusLat / (readBursts - servicedByWrQ);
1452
1453    avgMemAccLat
1454        .name(name() + ".avgMemAccLat")
1455        .desc("Average memory access latency per DRAM burst")
1456        .precision(2);
1457
1458    avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ);
1459
1460    numRdRetry
1461        .name(name() + ".numRdRetry")
1462        .desc("Number of times read queue was full causing retry");
1463
1464    numWrRetry
1465        .name(name() + ".numWrRetry")
1466        .desc("Number of times write queue was full causing retry");
1467
1468    readRowHits
1469        .name(name() + ".readRowHits")
1470        .desc("Number of row buffer hits during reads");
1471
1472    writeRowHits
1473        .name(name() + ".writeRowHits")
1474        .desc("Number of row buffer hits during writes");
1475
1476    readRowHitRate
1477        .name(name() + ".readRowHitRate")
1478        .desc("Row buffer hit rate for reads")
1479        .precision(2);
1480
1481    readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100;
1482
1483    writeRowHitRate
1484        .name(name() + ".writeRowHitRate")
1485        .desc("Row buffer hit rate for writes")
1486        .precision(2);
1487
1488    writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100;
1489
1490    readPktSize
1491        .init(ceilLog2(burstSize) + 1)
1492        .name(name() + ".readPktSize")
1493        .desc("Read request sizes (log2)");
1494
1495     writePktSize
1496        .init(ceilLog2(burstSize) + 1)
1497        .name(name() + ".writePktSize")
1498        .desc("Write request sizes (log2)");
1499
1500     rdQLenPdf
1501        .init(readBufferSize)
1502        .name(name() + ".rdQLenPdf")
1503        .desc("What read queue length does an incoming req see");
1504
1505     wrQLenPdf
1506        .init(writeBufferSize)
1507        .name(name() + ".wrQLenPdf")
1508        .desc("What write queue length does an incoming req see");
1509
1510     bytesPerActivate
1511         .init(maxAccessesPerRow)
1512         .name(name() + ".bytesPerActivate")
1513         .desc("Bytes accessed per row activation")
1514         .flags(nozero);
1515
1516    bytesReadDRAM
1517        .name(name() + ".bytesReadDRAM")
1518        .desc("Total number of bytes read from DRAM");
1519
1520    bytesReadWrQ
1521        .name(name() + ".bytesReadWrQ")
1522        .desc("Total number of bytes read from write queue");
1523
1524    bytesWritten
1525        .name(name() + ".bytesWritten")
1526        .desc("Total number of bytes written to DRAM");
1527
1528    bytesReadSys
1529        .name(name() + ".bytesReadSys")
1530        .desc("Total read bytes from the system interface side");
1531
1532    bytesWrittenSys
1533        .name(name() + ".bytesWrittenSys")
1534        .desc("Total written bytes from the system interface side");
1535
1536    avgRdBW
1537        .name(name() + ".avgRdBW")
1538        .desc("Average DRAM read bandwidth in MiByte/s")
1539        .precision(2);
1540
1541    avgRdBW = (bytesReadDRAM / 1000000) / simSeconds;
1542
1543    avgWrBW
1544        .name(name() + ".avgWrBW")
1545        .desc("Average achieved write bandwidth in MiByte/s")
1546        .precision(2);
1547
1548    avgWrBW = (bytesWritten / 1000000) / simSeconds;
1549
1550    avgRdBWSys
1551        .name(name() + ".avgRdBWSys")
1552        .desc("Average system read bandwidth in MiByte/s")
1553        .precision(2);
1554
1555    avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
1556
1557    avgWrBWSys
1558        .name(name() + ".avgWrBWSys")
1559        .desc("Average system write bandwidth in MiByte/s")
1560        .precision(2);
1561
1562    avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
1563
1564    peakBW
1565        .name(name() + ".peakBW")
1566        .desc("Theoretical peak bandwidth in MiByte/s")
1567        .precision(2);
1568
1569    peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
1570
1571    busUtil
1572        .name(name() + ".busUtil")
1573        .desc("Data bus utilization in percentage")
1574        .precision(2);
1575
1576    busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1577
1578    totGap
1579        .name(name() + ".totGap")
1580        .desc("Total gap between requests");
1581
1582    avgGap
1583        .name(name() + ".avgGap")
1584        .desc("Average gap between requests")
1585        .precision(2);
1586
1587    avgGap = totGap / (readReqs + writeReqs);
1588
1589    // Stats for DRAM Power calculation based on Micron datasheet
1590    busUtilRead
1591        .name(name() + ".busUtilRead")
1592        .desc("Data bus utilization in percentage for reads")
1593        .precision(2);
1594
1595    busUtilRead = avgRdBW / peakBW * 100;
1596
1597    busUtilWrite
1598        .name(name() + ".busUtilWrite")
1599        .desc("Data bus utilization in percentage for writes")
1600        .precision(2);
1601
1602    busUtilWrite = avgWrBW / peakBW * 100;
1603
1604    pageHitRate
1605        .name(name() + ".pageHitRate")
1606        .desc("Row buffer hit rate, read and write combined")
1607        .precision(2);
1608
1609    pageHitRate = (writeRowHits + readRowHits) /
1610        (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
1611
1612    prechargeAllPercent
1613        .name(name() + ".prechargeAllPercent")
1614        .desc("Percentage of time for which DRAM has all the banks in "
1615              "precharge state")
1616        .precision(2);
1617
1618    prechargeAllPercent = prechargeAllTime / simTicks * 100;
1619}
1620
1621void
1622SimpleDRAM::recvFunctional(PacketPtr pkt)
1623{
1624    // rely on the abstract memory
1625    functionalAccess(pkt);
1626}
1627
1628BaseSlavePort&
1629SimpleDRAM::getSlavePort(const string &if_name, PortID idx)
1630{
1631    if (if_name != "port") {
1632        return MemObject::getSlavePort(if_name, idx);
1633    } else {
1634        return port;
1635    }
1636}
1637
1638unsigned int
1639SimpleDRAM::drain(DrainManager *dm)
1640{
1641    unsigned int count = port.drain(dm);
1642
1643    // if there is anything in any of our internal queues, keep track
1644    // of that as well
1645    if (!(writeQueue.empty() && readQueue.empty() &&
1646          respQueue.empty())) {
1647        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
1648                " resp: %d\n", writeQueue.size(), readQueue.size(),
1649                respQueue.size());
1650        ++count;
1651        drainManager = dm;
1652        // the only part that is not drained automatically over time
1653        // is the write queue, thus trigger writes if there are any
1654        // waiting and no reads waiting, otherwise wait until the
1655        // reads are done
1656        if (readQueue.empty() && !writeQueue.empty() &&
1657            !writeEvent.scheduled())
1658            triggerWrites();
1659    }
1660
1661    if (count)
1662        setDrainState(Drainable::Draining);
1663    else
1664        setDrainState(Drainable::Drained);
1665    return count;
1666}
1667
1668SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory)
1669    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
1670      memory(_memory)
1671{ }
1672
1673AddrRangeList
1674SimpleDRAM::MemoryPort::getAddrRanges() const
1675{
1676    AddrRangeList ranges;
1677    ranges.push_back(memory.getAddrRange());
1678    return ranges;
1679}
1680
1681void
1682SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt)
1683{
1684    pkt->pushLabel(memory.name());
1685
1686    if (!queue.checkFunctional(pkt)) {
1687        // Default implementation of SimpleTimingPort::recvFunctional()
1688        // calls recvAtomic() and throws away the latency; we can save a
1689        // little here by just not calculating the latency.
1690        memory.recvFunctional(pkt);
1691    }
1692
1693    pkt->popLabel();
1694}
1695
1696Tick
1697SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt)
1698{
1699    return memory.recvAtomic(pkt);
1700}
1701
1702bool
1703SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt)
1704{
1705    // pass it to the memory controller
1706    return memory.recvTimingReq(pkt);
1707}
1708
1709SimpleDRAM*
1710SimpleDRAMParams::create()
1711{
1712    return new SimpleDRAM(this);
1713}
1714