dram_ctrl.cc revision 10208:c249f7660eb7
1/*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 *          Ani Udipi
42 *          Neha Agarwal
43 */
44
45#include "base/bitfield.hh"
46#include "base/trace.hh"
47#include "debug/DRAM.hh"
48#include "debug/DRAMState.hh"
49#include "debug/Drain.hh"
50#include "mem/dram_ctrl.hh"
51#include "sim/system.hh"
52
53using namespace std;
54
55DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
56    AbstractMemory(p),
57    port(name() + ".port", *this),
58    retryRdReq(false), retryWrReq(false),
59    rowHitFlag(false), busState(READ),
60    nextReqEvent(this), respondEvent(this), activateEvent(this),
61    prechargeEvent(this), refreshEvent(this), powerEvent(this),
62    drainManager(NULL),
63    deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
64    deviceRowBufferSize(p->device_rowbuffer_size),
65    devicesPerRank(p->devices_per_rank),
66    burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
67    rowBufferSize(devicesPerRank * deviceRowBufferSize),
68    columnsPerRowBuffer(rowBufferSize / burstSize),
69    ranksPerChannel(p->ranks_per_channel),
70    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
71    readBufferSize(p->read_buffer_size),
72    writeBufferSize(p->write_buffer_size),
73    writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
74    writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
75    minWritesPerSwitch(p->min_writes_per_switch),
76    writesThisTime(0), readsThisTime(0),
77    tWTR(p->tWTR), tRTW(p->tRTW), tBURST(p->tBURST),
78    tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
79    tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
80    tXAW(p->tXAW), activationLimit(p->activation_limit),
81    memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
82    pageMgmt(p->page_policy),
83    maxAccessesPerRow(p->max_accesses_per_row),
84    frontendLatency(p->static_frontend_latency),
85    backendLatency(p->static_backend_latency),
86    busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE),
87    pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), prevArrival(0),
88    nextReqTime(0), pwrStateTick(0), numBanksActive(0)
89{
90    // create the bank states based on the dimensions of the ranks and
91    // banks
92    banks.resize(ranksPerChannel);
93    actTicks.resize(ranksPerChannel);
94    for (size_t c = 0; c < ranksPerChannel; ++c) {
95        banks[c].resize(banksPerRank);
96        actTicks[c].resize(activationLimit, 0);
97    }
98
99    // perform a basic check of the write thresholds
100    if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
101        fatal("Write buffer low threshold %d must be smaller than the "
102              "high threshold %d\n", p->write_low_thresh_perc,
103              p->write_high_thresh_perc);
104
105    // determine the rows per bank by looking at the total capacity
106    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
107
108    DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
109            AbstractMemory::size());
110
111    DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n",
112            rowBufferSize, columnsPerRowBuffer);
113
114    rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel);
115
116    if (range.interleaved()) {
117        if (channels != range.stripes())
118            fatal("%s has %d interleaved address stripes but %d channel(s)\n",
119                  name(), range.stripes(), channels);
120
121        if (addrMapping == Enums::RoRaBaChCo) {
122            if (rowBufferSize != range.granularity()) {
123                fatal("Interleaving of %s doesn't match RoRaBaChCo "
124                      "address map\n", name());
125            }
126        } else if (addrMapping == Enums::RoRaBaCoCh) {
127            if (system()->cacheLineSize() != range.granularity()) {
128                fatal("Interleaving of %s doesn't match RoRaBaCoCh "
129                      "address map\n", name());
130            }
131        } else if (addrMapping == Enums::RoCoRaBaCh) {
132            if (system()->cacheLineSize() != range.granularity())
133                fatal("Interleaving of %s doesn't match RoCoRaBaCh "
134                      "address map\n", name());
135        }
136    }
137
138    // some basic sanity checks
139    if (tREFI <= tRP || tREFI <= tRFC) {
140        fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
141              tREFI, tRP, tRFC);
142    }
143}
144
145void
146DRAMCtrl::init()
147{
148    if (!port.isConnected()) {
149        fatal("DRAMCtrl %s is unconnected!\n", name());
150    } else {
151        port.sendRangeChange();
152    }
153}
154
155void
156DRAMCtrl::startup()
157{
158    // update the start tick for the precharge accounting to the
159    // current tick
160    pwrStateTick = curTick();
161
162    // shift the bus busy time sufficiently far ahead that we never
163    // have to worry about negative values when computing the time for
164    // the next request, this will add an insignificant bubble at the
165    // start of simulation
166    busBusyUntil = curTick() + tRP + tRCD + tCL;
167
168    // print the configuration of the controller
169    printParams();
170
171    // kick off the refresh, and give ourselves enough time to
172    // precharge
173    schedule(refreshEvent, curTick() + tREFI - tRP);
174}
175
176Tick
177DRAMCtrl::recvAtomic(PacketPtr pkt)
178{
179    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
180
181    // do the actual memory access and turn the packet into a response
182    access(pkt);
183
184    Tick latency = 0;
185    if (!pkt->memInhibitAsserted() && pkt->hasData()) {
186        // this value is not supposed to be accurate, just enough to
187        // keep things going, mimic a closed page
188        latency = tRP + tRCD + tCL;
189    }
190    return latency;
191}
192
193bool
194DRAMCtrl::readQueueFull(unsigned int neededEntries) const
195{
196    DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
197            readBufferSize, readQueue.size() + respQueue.size(),
198            neededEntries);
199
200    return
201        (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
202}
203
204bool
205DRAMCtrl::writeQueueFull(unsigned int neededEntries) const
206{
207    DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
208            writeBufferSize, writeQueue.size(), neededEntries);
209    return (writeQueue.size() + neededEntries) > writeBufferSize;
210}
211
212DRAMCtrl::DRAMPacket*
213DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size,
214                       bool isRead)
215{
216    // decode the address based on the address mapping scheme, with
217    // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
218    // channel, respectively
219    uint8_t rank;
220    uint8_t bank;
221    uint16_t row;
222
223    // truncate the address to the access granularity
224    Addr addr = dramPktAddr / burstSize;
225
226    // we have removed the lowest order address bits that denote the
227    // position within the column
228    if (addrMapping == Enums::RoRaBaChCo) {
229        // the lowest order bits denote the column to ensure that
230        // sequential cache lines occupy the same row
231        addr = addr / columnsPerRowBuffer;
232
233        // take out the channel part of the address
234        addr = addr / channels;
235
236        // after the channel bits, get the bank bits to interleave
237        // over the banks
238        bank = addr % banksPerRank;
239        addr = addr / banksPerRank;
240
241        // after the bank, we get the rank bits which thus interleaves
242        // over the ranks
243        rank = addr % ranksPerChannel;
244        addr = addr / ranksPerChannel;
245
246        // lastly, get the row bits
247        row = addr % rowsPerBank;
248        addr = addr / rowsPerBank;
249    } else if (addrMapping == Enums::RoRaBaCoCh) {
250        // take out the channel part of the address
251        addr = addr / channels;
252
253        // next, the column
254        addr = addr / columnsPerRowBuffer;
255
256        // after the column bits, we get the bank bits to interleave
257        // over the banks
258        bank = addr % banksPerRank;
259        addr = addr / banksPerRank;
260
261        // after the bank, we get the rank bits which thus interleaves
262        // over the ranks
263        rank = addr % ranksPerChannel;
264        addr = addr / ranksPerChannel;
265
266        // lastly, get the row bits
267        row = addr % rowsPerBank;
268        addr = addr / rowsPerBank;
269    } else if (addrMapping == Enums::RoCoRaBaCh) {
270        // optimise for closed page mode and utilise maximum
271        // parallelism of the DRAM (at the cost of power)
272
273        // take out the channel part of the address, not that this has
274        // to match with how accesses are interleaved between the
275        // controllers in the address mapping
276        addr = addr / channels;
277
278        // start with the bank bits, as this provides the maximum
279        // opportunity for parallelism between requests
280        bank = addr % banksPerRank;
281        addr = addr / banksPerRank;
282
283        // next get the rank bits
284        rank = addr % ranksPerChannel;
285        addr = addr / ranksPerChannel;
286
287        // next the column bits which we do not need to keep track of
288        // and simply skip past
289        addr = addr / columnsPerRowBuffer;
290
291        // lastly, get the row bits
292        row = addr % rowsPerBank;
293        addr = addr / rowsPerBank;
294    } else
295        panic("Unknown address mapping policy chosen!");
296
297    assert(rank < ranksPerChannel);
298    assert(bank < banksPerRank);
299    assert(row < rowsPerBank);
300
301    DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
302            dramPktAddr, rank, bank, row);
303
304    // create the corresponding DRAM packet with the entry time and
305    // ready time set to the current tick, the latter will be updated
306    // later
307    uint16_t bank_id = banksPerRank * rank + bank;
308    return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
309                          size, banks[rank][bank]);
310}
311
312void
313DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
314{
315    // only add to the read queue here. whenever the request is
316    // eventually done, set the readyTime, and call schedule()
317    assert(!pkt->isWrite());
318
319    assert(pktCount != 0);
320
321    // if the request size is larger than burst size, the pkt is split into
322    // multiple DRAM packets
323    // Note if the pkt starting address is not aligened to burst size, the
324    // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
325    // are aligned to burst size boundaries. This is to ensure we accurately
326    // check read packets against packets in write queue.
327    Addr addr = pkt->getAddr();
328    unsigned pktsServicedByWrQ = 0;
329    BurstHelper* burst_helper = NULL;
330    for (int cnt = 0; cnt < pktCount; ++cnt) {
331        unsigned size = std::min((addr | (burstSize - 1)) + 1,
332                        pkt->getAddr() + pkt->getSize()) - addr;
333        readPktSize[ceilLog2(size)]++;
334        readBursts++;
335
336        // First check write buffer to see if the data is already at
337        // the controller
338        bool foundInWrQ = false;
339        for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) {
340            // check if the read is subsumed in the write entry we are
341            // looking at
342            if ((*i)->addr <= addr &&
343                (addr + size) <= ((*i)->addr + (*i)->size)) {
344                foundInWrQ = true;
345                servicedByWrQ++;
346                pktsServicedByWrQ++;
347                DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
348                        "write queue\n", addr, size);
349                bytesReadWrQ += burstSize;
350                break;
351            }
352        }
353
354        // If not found in the write q, make a DRAM packet and
355        // push it onto the read queue
356        if (!foundInWrQ) {
357
358            // Make the burst helper for split packets
359            if (pktCount > 1 && burst_helper == NULL) {
360                DPRINTF(DRAM, "Read to addr %lld translates to %d "
361                        "dram requests\n", pkt->getAddr(), pktCount);
362                burst_helper = new BurstHelper(pktCount);
363            }
364
365            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true);
366            dram_pkt->burstHelper = burst_helper;
367
368            assert(!readQueueFull(1));
369            rdQLenPdf[readQueue.size() + respQueue.size()]++;
370
371            DPRINTF(DRAM, "Adding to read queue\n");
372
373            readQueue.push_back(dram_pkt);
374
375            // Update stats
376            avgRdQLen = readQueue.size() + respQueue.size();
377        }
378
379        // Starting address of next dram pkt (aligend to burstSize boundary)
380        addr = (addr | (burstSize - 1)) + 1;
381    }
382
383    // If all packets are serviced by write queue, we send the repsonse back
384    if (pktsServicedByWrQ == pktCount) {
385        accessAndRespond(pkt, frontendLatency);
386        return;
387    }
388
389    // Update how many split packets are serviced by write queue
390    if (burst_helper != NULL)
391        burst_helper->burstsServiced = pktsServicedByWrQ;
392
393    // If we are not already scheduled to get a request out of the
394    // queue, do so now
395    if (!nextReqEvent.scheduled()) {
396        DPRINTF(DRAM, "Request scheduled immediately\n");
397        schedule(nextReqEvent, curTick());
398    }
399}
400
401void
402DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
403{
404    // only add to the write queue here. whenever the request is
405    // eventually done, set the readyTime, and call schedule()
406    assert(pkt->isWrite());
407
408    // if the request size is larger than burst size, the pkt is split into
409    // multiple DRAM packets
410    Addr addr = pkt->getAddr();
411    for (int cnt = 0; cnt < pktCount; ++cnt) {
412        unsigned size = std::min((addr | (burstSize - 1)) + 1,
413                        pkt->getAddr() + pkt->getSize()) - addr;
414        writePktSize[ceilLog2(size)]++;
415        writeBursts++;
416
417        // see if we can merge with an existing item in the write
418        // queue and keep track of whether we have merged or not so we
419        // can stop at that point and also avoid enqueueing a new
420        // request
421        bool merged = false;
422        auto w = writeQueue.begin();
423
424        while(!merged && w != writeQueue.end()) {
425            // either of the two could be first, if they are the same
426            // it does not matter which way we go
427            if ((*w)->addr >= addr) {
428                // the existing one starts after the new one, figure
429                // out where the new one ends with respect to the
430                // existing one
431                if ((addr + size) >= ((*w)->addr + (*w)->size)) {
432                    // check if the existing one is completely
433                    // subsumed in the new one
434                    DPRINTF(DRAM, "Merging write covering existing burst\n");
435                    merged = true;
436                    // update both the address and the size
437                    (*w)->addr = addr;
438                    (*w)->size = size;
439                } else if ((addr + size) >= (*w)->addr &&
440                           ((*w)->addr + (*w)->size - addr) <= burstSize) {
441                    // the new one is just before or partially
442                    // overlapping with the existing one, and together
443                    // they fit within a burst
444                    DPRINTF(DRAM, "Merging write before existing burst\n");
445                    merged = true;
446                    // the existing queue item needs to be adjusted with
447                    // respect to both address and size
448                    (*w)->size = (*w)->addr + (*w)->size - addr;
449                    (*w)->addr = addr;
450                }
451            } else {
452                // the new one starts after the current one, figure
453                // out where the existing one ends with respect to the
454                // new one
455                if (((*w)->addr + (*w)->size) >= (addr + size)) {
456                    // check if the new one is completely subsumed in the
457                    // existing one
458                    DPRINTF(DRAM, "Merging write into existing burst\n");
459                    merged = true;
460                    // no adjustments necessary
461                } else if (((*w)->addr + (*w)->size) >= addr &&
462                           (addr + size - (*w)->addr) <= burstSize) {
463                    // the existing one is just before or partially
464                    // overlapping with the new one, and together
465                    // they fit within a burst
466                    DPRINTF(DRAM, "Merging write after existing burst\n");
467                    merged = true;
468                    // the address is right, and only the size has
469                    // to be adjusted
470                    (*w)->size = addr + size - (*w)->addr;
471                }
472            }
473            ++w;
474        }
475
476        // if the item was not merged we need to create a new write
477        // and enqueue it
478        if (!merged) {
479            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false);
480
481            assert(writeQueue.size() < writeBufferSize);
482            wrQLenPdf[writeQueue.size()]++;
483
484            DPRINTF(DRAM, "Adding to write queue\n");
485
486            writeQueue.push_back(dram_pkt);
487
488            // Update stats
489            avgWrQLen = writeQueue.size();
490        } else {
491            // keep track of the fact that this burst effectively
492            // disappeared as it was merged with an existing one
493            mergedWrBursts++;
494        }
495
496        // Starting address of next dram pkt (aligend to burstSize boundary)
497        addr = (addr | (burstSize - 1)) + 1;
498    }
499
500    // we do not wait for the writes to be send to the actual memory,
501    // but instead take responsibility for the consistency here and
502    // snoop the write queue for any upcoming reads
503    // @todo, if a pkt size is larger than burst size, we might need a
504    // different front end latency
505    accessAndRespond(pkt, frontendLatency);
506
507    // If we are not already scheduled to get a request out of the
508    // queue, do so now
509    if (!nextReqEvent.scheduled()) {
510        DPRINTF(DRAM, "Request scheduled immediately\n");
511        schedule(nextReqEvent, curTick());
512    }
513}
514
515void
516DRAMCtrl::printParams() const
517{
518    // Sanity check print of important parameters
519    DPRINTF(DRAM,
520            "Memory controller %s physical organization\n"      \
521            "Number of devices per rank   %d\n"                 \
522            "Device bus width (in bits)   %d\n"                 \
523            "DRAM data bus burst (bytes)  %d\n"                 \
524            "Row buffer size (bytes)      %d\n"                 \
525            "Columns per row buffer       %d\n"                 \
526            "Rows    per bank             %d\n"                 \
527            "Banks   per rank             %d\n"                 \
528            "Ranks   per channel          %d\n"                 \
529            "Total mem capacity (bytes)   %u\n",
530            name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize,
531            columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel,
532            rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel);
533
534    string scheduler =  memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS";
535    string address_mapping = addrMapping == Enums::RoRaBaChCo ? "RoRaBaChCo" :
536        (addrMapping == Enums::RoRaBaCoCh ? "RoRaBaCoCh" : "RoCoRaBaCh");
537    string page_policy = pageMgmt == Enums::open ? "OPEN" :
538        (pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" :
539        (pageMgmt == Enums::close_adaptive ? "CLOSE (adaptive)" : "CLOSE"));
540
541    DPRINTF(DRAM,
542            "Memory controller %s characteristics\n"    \
543            "Read buffer size     %d\n"                 \
544            "Write buffer size    %d\n"                 \
545            "Write high thresh    %d\n"                 \
546            "Write low thresh     %d\n"                 \
547            "Scheduler            %s\n"                 \
548            "Address mapping      %s\n"                 \
549            "Page policy          %s\n",
550            name(), readBufferSize, writeBufferSize, writeHighThreshold,
551            writeLowThreshold, scheduler, address_mapping, page_policy);
552
553    DPRINTF(DRAM, "Memory controller %s timing specs\n" \
554            "tRCD      %d ticks\n"                        \
555            "tCL       %d ticks\n"                        \
556            "tRP       %d ticks\n"                        \
557            "tBURST    %d ticks\n"                        \
558            "tRFC      %d ticks\n"                        \
559            "tREFI     %d ticks\n"                        \
560            "tWTR      %d ticks\n"                        \
561            "tRTW      %d ticks\n"                        \
562            "tXAW (%d) %d ticks\n",
563            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR,
564            tRTW, activationLimit, tXAW);
565}
566
567void
568DRAMCtrl::printQs() const {
569    DPRINTF(DRAM, "===READ QUEUE===\n\n");
570    for (auto i = readQueue.begin() ;  i != readQueue.end() ; ++i) {
571        DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
572    }
573    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
574    for (auto i = respQueue.begin() ;  i != respQueue.end() ; ++i) {
575        DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
576    }
577    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
578    for (auto i = writeQueue.begin() ;  i != writeQueue.end() ; ++i) {
579        DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
580    }
581}
582
583bool
584DRAMCtrl::recvTimingReq(PacketPtr pkt)
585{
586    /// @todo temporary hack to deal with memory corruption issues until
587    /// 4-phase transactions are complete
588    for (int x = 0; x < pendingDelete.size(); x++)
589        delete pendingDelete[x];
590    pendingDelete.clear();
591
592    // This is where we enter from the outside world
593    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
594            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
595
596    // simply drop inhibited packets for now
597    if (pkt->memInhibitAsserted()) {
598        DPRINTF(DRAM, "Inhibited packet -- Dropping it now\n");
599        pendingDelete.push_back(pkt);
600        return true;
601    }
602
603    // Calc avg gap between requests
604    if (prevArrival != 0) {
605        totGap += curTick() - prevArrival;
606    }
607    prevArrival = curTick();
608
609
610    // Find out how many dram packets a pkt translates to
611    // If the burst size is equal or larger than the pkt size, then a pkt
612    // translates to only one dram packet. Otherwise, a pkt translates to
613    // multiple dram packets
614    unsigned size = pkt->getSize();
615    unsigned offset = pkt->getAddr() & (burstSize - 1);
616    unsigned int dram_pkt_count = divCeil(offset + size, burstSize);
617
618    // check local buffers and do not accept if full
619    if (pkt->isRead()) {
620        assert(size != 0);
621        if (readQueueFull(dram_pkt_count)) {
622            DPRINTF(DRAM, "Read queue full, not accepting\n");
623            // remember that we have to retry this port
624            retryRdReq = true;
625            numRdRetry++;
626            return false;
627        } else {
628            addToReadQueue(pkt, dram_pkt_count);
629            readReqs++;
630            bytesReadSys += size;
631        }
632    } else if (pkt->isWrite()) {
633        assert(size != 0);
634        if (writeQueueFull(dram_pkt_count)) {
635            DPRINTF(DRAM, "Write queue full, not accepting\n");
636            // remember that we have to retry this port
637            retryWrReq = true;
638            numWrRetry++;
639            return false;
640        } else {
641            addToWriteQueue(pkt, dram_pkt_count);
642            writeReqs++;
643            bytesWrittenSys += size;
644        }
645    } else {
646        DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
647        neitherReadNorWrite++;
648        accessAndRespond(pkt, 1);
649    }
650
651    return true;
652}
653
654void
655DRAMCtrl::processRespondEvent()
656{
657    DPRINTF(DRAM,
658            "processRespondEvent(): Some req has reached its readyTime\n");
659
660    DRAMPacket* dram_pkt = respQueue.front();
661
662    if (dram_pkt->burstHelper) {
663        // it is a split packet
664        dram_pkt->burstHelper->burstsServiced++;
665        if (dram_pkt->burstHelper->burstsServiced ==
666            dram_pkt->burstHelper->burstCount) {
667            // we have now serviced all children packets of a system packet
668            // so we can now respond to the requester
669            // @todo we probably want to have a different front end and back
670            // end latency for split packets
671            accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
672            delete dram_pkt->burstHelper;
673            dram_pkt->burstHelper = NULL;
674        }
675    } else {
676        // it is not a split packet
677        accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
678    }
679
680    delete respQueue.front();
681    respQueue.pop_front();
682
683    if (!respQueue.empty()) {
684        assert(respQueue.front()->readyTime >= curTick());
685        assert(!respondEvent.scheduled());
686        schedule(respondEvent, respQueue.front()->readyTime);
687    } else {
688        // if there is nothing left in any queue, signal a drain
689        if (writeQueue.empty() && readQueue.empty() &&
690            drainManager) {
691            drainManager->signalDrainDone();
692            drainManager = NULL;
693        }
694    }
695
696    // We have made a location in the queue available at this point,
697    // so if there is a read that was forced to wait, retry now
698    if (retryRdReq) {
699        retryRdReq = false;
700        port.sendRetry();
701    }
702}
703
704void
705DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue)
706{
707    // This method does the arbitration between requests. The chosen
708    // packet is simply moved to the head of the queue. The other
709    // methods know that this is the place to look. For example, with
710    // FCFS, this method does nothing
711    assert(!queue.empty());
712
713    if (queue.size() == 1) {
714        DPRINTF(DRAM, "Single request, nothing to do\n");
715        return;
716    }
717
718    if (memSchedPolicy == Enums::fcfs) {
719        // Do nothing, since the correct request is already head
720    } else if (memSchedPolicy == Enums::frfcfs) {
721        reorderQueue(queue);
722    } else
723        panic("No scheduling policy chosen\n");
724}
725
726void
727DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue)
728{
729    // Only determine this when needed
730    uint64_t earliest_banks = 0;
731
732    // Search for row hits first, if no row hit is found then schedule the
733    // packet to one of the earliest banks available
734    bool found_earliest_pkt = false;
735    auto selected_pkt_it = queue.begin();
736
737    for (auto i = queue.begin(); i != queue.end() ; ++i) {
738        DRAMPacket* dram_pkt = *i;
739        const Bank& bank = dram_pkt->bankRef;
740        // Check if it is a row hit
741        if (bank.openRow == dram_pkt->row) {
742            DPRINTF(DRAM, "Row buffer hit\n");
743            selected_pkt_it = i;
744            break;
745        } else if (!found_earliest_pkt) {
746            // No row hit, go for first ready
747            if (earliest_banks == 0)
748                earliest_banks = minBankFreeAt(queue);
749
750            // Bank is ready or is the first available bank
751            if (bank.freeAt <= curTick() ||
752                bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
753                // Remember the packet to be scheduled to one of the earliest
754                // banks available
755                selected_pkt_it = i;
756                found_earliest_pkt = true;
757            }
758        }
759    }
760
761    DRAMPacket* selected_pkt = *selected_pkt_it;
762    queue.erase(selected_pkt_it);
763    queue.push_front(selected_pkt);
764}
765
766void
767DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
768{
769    DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
770
771    bool needsResponse = pkt->needsResponse();
772    // do the actual memory access which also turns the packet into a
773    // response
774    access(pkt);
775
776    // turn packet around to go back to requester if response expected
777    if (needsResponse) {
778        // access already turned the packet into a response
779        assert(pkt->isResponse());
780
781        // @todo someone should pay for this
782        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
783
784        // queue the packet in the response queue to be sent out after
785        // the static latency has passed
786        port.schedTimingResp(pkt, curTick() + static_latency);
787    } else {
788        // @todo the packet is going to be deleted, and the DRAMPacket
789        // is still having a pointer to it
790        pendingDelete.push_back(pkt);
791    }
792
793    DPRINTF(DRAM, "Done\n");
794
795    return;
796}
797
798pair<Tick, Tick>
799DRAMCtrl::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
800{
801    // If a request reaches a bank at tick 'inTime', how much time
802    // *after* that does it take to finish the request, depending
803    // on bank status and page open policy. Note that this method
804    // considers only the time taken for the actual read or write
805    // to complete, NOT any additional time thereafter for tRAS or
806    // tRP.
807    Tick accLat = 0;
808    Tick bankLat = 0;
809    rowHitFlag = false;
810    Tick potentialActTick;
811
812    const Bank& bank = dram_pkt->bankRef;
813     // open-page policy or close_adaptive policy
814    if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive ||
815        pageMgmt == Enums::close_adaptive) {
816        if (bank.openRow == dram_pkt->row) {
817            // When we have a row-buffer hit,
818            // we don't care about tRAS having expired or not,
819            // but do care about bank being free for access
820            rowHitFlag = true;
821
822            // When a series of requests arrive to the same row,
823            // DDR systems are capable of streaming data continuously
824            // at maximum bandwidth (subject to tCCD). Here, we approximate
825            // this condition, and assume that if whenever a bank is already
826            // busy and a new request comes in, it can be completed with no
827            // penalty beyond waiting for the existing read to complete.
828            if (bank.freeAt > inTime) {
829                accLat += bank.freeAt - inTime;
830                bankLat += 0;
831            } else {
832               // CAS latency only
833               accLat += tCL;
834               bankLat += tCL;
835            }
836
837        } else {
838            // Row-buffer miss, need to close existing row
839            // once tRAS has expired, then open the new one,
840            // then add cas latency.
841            Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt);
842
843            if (freeTime > inTime)
844               accLat += freeTime - inTime;
845
846            // If the there is no open row (open adaptive), then there
847            // is no precharge delay, otherwise go with tRP
848            Tick precharge_delay = bank.openRow == Bank::NO_ROW ? 0 : tRP;
849
850            //The bank is free, and you may be able to activate
851            potentialActTick = inTime + accLat + precharge_delay;
852            if (potentialActTick < bank.actAllowedAt)
853                accLat += bank.actAllowedAt - potentialActTick;
854
855            accLat += precharge_delay + tRCD + tCL;
856            bankLat += precharge_delay + tRCD + tCL;
857        }
858    } else if (pageMgmt == Enums::close) {
859        // With a close page policy, no notion of
860        // bank.tRASDoneAt
861        if (bank.freeAt > inTime)
862            accLat += bank.freeAt - inTime;
863
864        //The bank is free, and you may be able to activate
865        potentialActTick = inTime + accLat;
866        if (potentialActTick < bank.actAllowedAt)
867            accLat += bank.actAllowedAt - potentialActTick;
868
869        // page already closed, simply open the row, and
870        // add cas latency
871        accLat += tRCD + tCL;
872        bankLat += tRCD + tCL;
873    } else
874        panic("No page management policy chosen\n");
875
876    DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n",
877            bankLat, accLat);
878
879    return make_pair(bankLat, accLat);
880}
881
882void
883DRAMCtrl::recordActivate(Tick act_tick, uint8_t rank, uint8_t bank,
884                         uint16_t row)
885{
886    assert(0 <= rank && rank < ranksPerChannel);
887    assert(actTicks[rank].size() == activationLimit);
888
889    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
890
891    // update the open row
892    assert(banks[rank][bank].openRow == Bank::NO_ROW);
893    banks[rank][bank].openRow = row;
894
895    // start counting anew, this covers both the case when we
896    // auto-precharged, and when this access is forced to
897    // precharge
898    banks[rank][bank].bytesAccessed = 0;
899    banks[rank][bank].rowAccesses = 0;
900
901    ++numBanksActive;
902    assert(numBanksActive <= banksPerRank * ranksPerChannel);
903
904    DPRINTF(DRAM, "Activate bank at tick %lld, now got %d active\n",
905            act_tick, numBanksActive);
906
907    // start by enforcing tRRD
908    for(int i = 0; i < banksPerRank; i++) {
909        // next activate must not happen before tRRD
910        banks[rank][i].actAllowedAt = act_tick + tRRD;
911    }
912
913    // tRC should be added to activation tick of the bank currently accessed,
914    // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same
915    // bank is already captured by bank.freeAt and bank.tRASDoneAt
916    banks[rank][bank].actAllowedAt = act_tick + tRAS + tRP;
917
918    // next, we deal with tXAW, if the activation limit is disabled
919    // then we are done
920    if (actTicks[rank].empty())
921        return;
922
923    // sanity check
924    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
925        // @todo For now, stick with a warning
926        warn("Got %d activates in window %d (%d - %d) which is smaller "
927             "than %d\n", activationLimit, act_tick - actTicks[rank].back(),
928             act_tick, actTicks[rank].back(), tXAW);
929    }
930
931    // shift the times used for the book keeping, the last element
932    // (highest index) is the oldest one and hence the lowest value
933    actTicks[rank].pop_back();
934
935    // record an new activation (in the future)
936    actTicks[rank].push_front(act_tick);
937
938    // cannot activate more than X times in time window tXAW, push the
939    // next one (the X + 1'st activate) to be tXAW away from the
940    // oldest in our window of X
941    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
942        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
943                "than %d\n", activationLimit, actTicks[rank].back() + tXAW);
944            for(int j = 0; j < banksPerRank; j++)
945                // next activate must not happen before end of window
946                banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW;
947    }
948
949    // at the point when this activate takes place, make sure we
950    // transition to the active power state
951    if (!activateEvent.scheduled())
952        schedule(activateEvent, act_tick);
953    else if (activateEvent.when() > act_tick)
954        // move it sooner in time
955        reschedule(activateEvent, act_tick);
956}
957
958void
959DRAMCtrl::processActivateEvent()
960{
961    // we should transition to the active state as soon as any bank is active
962    if (pwrState != PWR_ACT)
963        // note that at this point numBanksActive could be back at
964        // zero again due to a precharge scheduled in the future
965        schedulePowerEvent(PWR_ACT, curTick());
966}
967
968void
969DRAMCtrl::prechargeBank(Bank& bank, Tick free_at)
970{
971    // make sure the bank has an open row
972    assert(bank.openRow != Bank::NO_ROW);
973
974    // sample the bytes per activate here since we are closing
975    // the page
976    bytesPerActivate.sample(bank.bytesAccessed);
977
978    bank.openRow = Bank::NO_ROW;
979
980    bank.freeAt = free_at;
981
982    assert(numBanksActive != 0);
983    --numBanksActive;
984
985    DPRINTF(DRAM, "Precharged bank, done at tick %lld, now got %d active\n",
986            bank.freeAt, numBanksActive);
987
988    // if we look at the current number of active banks we might be
989    // tempted to think the DRAM is now idle, however this can be
990    // undone by an activate that is scheduled to happen before we
991    // would have reached the idle state, so schedule an event and
992    // rather check once we actually make it to the point in time when
993    // the (last) precharge takes place
994    if (!prechargeEvent.scheduled())
995        schedule(prechargeEvent, free_at);
996    else if (prechargeEvent.when() < free_at)
997        reschedule(prechargeEvent, free_at);
998}
999
1000void
1001DRAMCtrl::processPrechargeEvent()
1002{
1003    // if we reached zero, then special conditions apply as we track
1004    // if all banks are precharged for the power models
1005    if (numBanksActive == 0) {
1006        // we should transition to the idle state when the last bank
1007        // is precharged
1008        schedulePowerEvent(PWR_IDLE, curTick());
1009    }
1010}
1011
1012void
1013DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
1014{
1015
1016    DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1017            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1018
1019    // estimate the bank and access latency
1020    pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick());
1021    Tick bankLat = lat.first;
1022    Tick accessLat = lat.second;
1023    Tick actTick;
1024
1025    // This request was woken up at this time based on a prior call
1026    // to estimateLatency(). However, between then and now, both the
1027    // accessLatency and/or busBusyUntil may have changed. We need
1028    // to correct for that.
1029
1030    Tick addDelay = (curTick() + accessLat < busBusyUntil) ?
1031        busBusyUntil - (curTick() + accessLat) : 0;
1032
1033    Bank& bank = dram_pkt->bankRef;
1034
1035    // Update bank state
1036    if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive ||
1037        pageMgmt == Enums::close_adaptive) {
1038
1039        if (rowHitFlag) {
1040            bank.freeAt = curTick() + addDelay + accessLat;
1041        } else {
1042            // If there is a page open, precharge it.
1043            if (bank.openRow != Bank::NO_ROW) {
1044                prechargeBank(bank, std::max(std::max(bank.freeAt,
1045                                                      bank.tRASDoneAt),
1046                                             curTick()) + tRP);
1047            }
1048
1049            // Any precharge is already part of the latency
1050            // estimation, so update the bank free time
1051            bank.freeAt = curTick() + addDelay + accessLat;
1052
1053            // any waiting for banks account for in freeAt
1054            actTick = bank.freeAt - tCL - tRCD;
1055
1056            // If you activated a new row do to this access, the next access
1057            // will have to respect tRAS for this bank
1058            bank.tRASDoneAt = actTick + tRAS;
1059
1060            recordActivate(actTick, dram_pkt->rank, dram_pkt->bank,
1061                           dram_pkt->row);
1062        }
1063
1064        // increment the bytes accessed and the accesses per row
1065        bank.bytesAccessed += burstSize;
1066        ++bank.rowAccesses;
1067
1068        // if we reached the max, then issue with an auto-precharge
1069        bool auto_precharge = bank.rowAccesses == maxAccessesPerRow;
1070
1071        // if we did not hit the limit, we might still want to
1072        // auto-precharge
1073        if (!auto_precharge &&
1074            (pageMgmt == Enums::open_adaptive ||
1075             pageMgmt == Enums::close_adaptive)) {
1076            // a twist on the open and close page policies:
1077            // 1) open_adaptive page policy does not blindly keep the
1078            // page open, but close it if there are no row hits, and there
1079            // are bank conflicts in the queue
1080            // 2) close_adaptive page policy does not blindly close the
1081            // page, but closes it only if there are no row hits in the queue.
1082            // In this case, only force an auto precharge when there
1083            // are no same page hits in the queue
1084            bool got_more_hits = false;
1085            bool got_bank_conflict = false;
1086
1087            // either look at the read queue or write queue
1088            const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue :
1089                writeQueue;
1090            auto p = queue.begin();
1091            // make sure we are not considering the packet that we are
1092            // currently dealing with (which is the head of the queue)
1093            ++p;
1094
1095            // keep on looking until we have found required condition or
1096            // reached the end
1097            while (!(got_more_hits &&
1098                    (got_bank_conflict || pageMgmt == Enums::close_adaptive)) &&
1099                   p != queue.end()) {
1100                bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
1101                    (dram_pkt->bank == (*p)->bank);
1102                bool same_row = dram_pkt->row == (*p)->row;
1103                got_more_hits |= same_rank_bank && same_row;
1104                got_bank_conflict |= same_rank_bank && !same_row;
1105                ++p;
1106            }
1107
1108            // auto pre-charge when either
1109            // 1) open_adaptive policy, we have not got any more hits, and
1110            //    have a bank conflict
1111            // 2) close_adaptive policy and we have not got any more hits
1112            auto_precharge = !got_more_hits &&
1113                (got_bank_conflict || pageMgmt == Enums::close_adaptive);
1114        }
1115
1116        // if this access should use auto-precharge, then we are
1117        // closing the row
1118        if (auto_precharge) {
1119            prechargeBank(bank, std::max(bank.freeAt, bank.tRASDoneAt) + tRP);
1120
1121            DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1122        }
1123
1124        DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
1125    } else if (pageMgmt == Enums::close) {
1126        actTick = curTick() + addDelay + accessLat - tRCD - tCL;
1127        recordActivate(actTick, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1128
1129        bank.freeAt = actTick + tRCD + tCL;
1130        bank.tRASDoneAt = actTick + tRAS;
1131
1132        // sample the relevant values when precharging
1133        bank.bytesAccessed = burstSize;
1134        bank.rowAccesses = 1;
1135
1136        prechargeBank(bank, std::max(bank.freeAt, bank.tRASDoneAt) + tRP);
1137        DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
1138    } else
1139        panic("No page management policy chosen\n");
1140
1141    // Update request parameters
1142    dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST;
1143
1144
1145    DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \
1146                  "readytime is %lld busbusyuntil is %lld. " \
1147                  "Scheduling at readyTime\n", dram_pkt->addr,
1148                   curTick(), accessLat, dram_pkt->readyTime, busBusyUntil);
1149
1150    // Make sure requests are not overlapping on the databus
1151    assert(dram_pkt->readyTime - busBusyUntil >= tBURST);
1152
1153    // Update bus state
1154    busBusyUntil = dram_pkt->readyTime;
1155
1156    DPRINTF(DRAM,"Access time is %lld\n",
1157            dram_pkt->readyTime - dram_pkt->entryTime);
1158
1159    // Update the minimum timing between the requests, this is a
1160    // conservative estimate of when we have to schedule the next
1161    // request to not introduce any unecessary bubbles. In most cases
1162    // we will wake up sooner than we have to.
1163    nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1164
1165    // Update the stats and schedule the next request
1166    if (dram_pkt->isRead) {
1167        ++readsThisTime;
1168        if (rowHitFlag)
1169            readRowHits++;
1170        bytesReadDRAM += burstSize;
1171        perBankRdBursts[dram_pkt->bankId]++;
1172
1173        // Update latency stats
1174        totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
1175        totBankLat += bankLat;
1176        totBusLat += tBURST;
1177        totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat -
1178            tBURST;
1179    } else {
1180        ++writesThisTime;
1181        if (rowHitFlag)
1182            writeRowHits++;
1183        bytesWritten += burstSize;
1184        perBankWrBursts[dram_pkt->bankId]++;
1185    }
1186}
1187
1188void
1189DRAMCtrl::moveToRespQ()
1190{
1191    // Remove from read queue
1192    DRAMPacket* dram_pkt = readQueue.front();
1193    readQueue.pop_front();
1194
1195    // sanity check
1196    assert(dram_pkt->size <= burstSize);
1197
1198    // Insert into response queue sorted by readyTime
1199    // It will be sent back to the requestor at its
1200    // readyTime
1201    if (respQueue.empty()) {
1202        respQueue.push_front(dram_pkt);
1203        assert(!respondEvent.scheduled());
1204        assert(dram_pkt->readyTime >= curTick());
1205        schedule(respondEvent, dram_pkt->readyTime);
1206    } else {
1207        bool done = false;
1208        auto i = respQueue.begin();
1209        while (!done && i != respQueue.end()) {
1210            if ((*i)->readyTime > dram_pkt->readyTime) {
1211                respQueue.insert(i, dram_pkt);
1212                done = true;
1213            }
1214            ++i;
1215        }
1216
1217        if (!done)
1218            respQueue.push_back(dram_pkt);
1219
1220        assert(respondEvent.scheduled());
1221
1222        if (respQueue.front()->readyTime < respondEvent.when()) {
1223            assert(respQueue.front()->readyTime >= curTick());
1224            reschedule(respondEvent, respQueue.front()->readyTime);
1225        }
1226    }
1227}
1228
1229void
1230DRAMCtrl::processNextReqEvent()
1231{
1232    if (busState == READ_TO_WRITE) {
1233        DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1234                "waiting\n", readsThisTime, readQueue.size());
1235
1236        // sample and reset the read-related stats as we are now
1237        // transitioning to writes, and all reads are done
1238        rdPerTurnAround.sample(readsThisTime);
1239        readsThisTime = 0;
1240
1241        // now proceed to do the actual writes
1242        busState = WRITE;
1243    } else if (busState == WRITE_TO_READ) {
1244        DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
1245                "waiting\n", writesThisTime, writeQueue.size());
1246
1247        wrPerTurnAround.sample(writesThisTime);
1248        writesThisTime = 0;
1249
1250        busState = READ;
1251    }
1252
1253    if (refreshState != REF_IDLE) {
1254        // if a refresh waiting for this event loop to finish, then hand
1255        // over now, and do not schedule a new nextReqEvent
1256        if (refreshState == REF_DRAIN) {
1257            DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1258
1259            refreshState = REF_PRE;
1260
1261            // hand control back to the refresh event loop
1262            schedule(refreshEvent, curTick());
1263        }
1264
1265        // let the refresh finish before issuing any further requests
1266        return;
1267    }
1268
1269    // when we get here it is either a read or a write
1270    if (busState == READ) {
1271
1272        // track if we should switch or not
1273        bool switch_to_writes = false;
1274
1275        if (readQueue.empty()) {
1276            // In the case there is no read request to go next,
1277            // trigger writes if we have passed the low threshold (or
1278            // if we are draining)
1279            if (!writeQueue.empty() &&
1280                (drainManager || writeQueue.size() > writeLowThreshold)) {
1281
1282                switch_to_writes = true;
1283            } else {
1284                // check if we are drained
1285                if (respQueue.empty () && drainManager) {
1286                    drainManager->signalDrainDone();
1287                    drainManager = NULL;
1288                }
1289
1290                // nothing to do, not even any point in scheduling an
1291                // event for the next request
1292                return;
1293            }
1294        } else {
1295            // Figure out which read request goes next, and move it to the
1296            // front of the read queue
1297            chooseNext(readQueue);
1298
1299            doDRAMAccess(readQueue.front());
1300
1301            // At this point we're done dealing with the request
1302            // It will be moved to a separate response queue with a
1303            // correct readyTime, and eventually be sent back at that
1304            // time
1305            moveToRespQ();
1306
1307            // we have so many writes that we have to transition
1308            if (writeQueue.size() > writeHighThreshold) {
1309                switch_to_writes = true;
1310            }
1311        }
1312
1313        // switching to writes, either because the read queue is empty
1314        // and the writes have passed the low threshold (or we are
1315        // draining), or because the writes hit the hight threshold
1316        if (switch_to_writes) {
1317            // transition to writing
1318            busState = READ_TO_WRITE;
1319
1320            // add a bubble to the data bus, as defined by the
1321            // tRTW parameter
1322            busBusyUntil += tRTW;
1323
1324            // update the minimum timing between the requests,
1325            // this shifts us back in time far enough to do any
1326            // bank preparation
1327            nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1328        }
1329    } else {
1330        chooseNext(writeQueue);
1331        DRAMPacket* dram_pkt = writeQueue.front();
1332        // sanity check
1333        assert(dram_pkt->size <= burstSize);
1334        doDRAMAccess(dram_pkt);
1335
1336        writeQueue.pop_front();
1337        delete dram_pkt;
1338
1339        // If we emptied the write queue, or got sufficiently below the
1340        // threshold (using the minWritesPerSwitch as the hysteresis) and
1341        // are not draining, or we have reads waiting and have done enough
1342        // writes, then switch to reads.
1343        if (writeQueue.empty() ||
1344            (writeQueue.size() + minWritesPerSwitch < writeLowThreshold &&
1345             !drainManager) ||
1346            (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
1347            // turn the bus back around for reads again
1348            busState = WRITE_TO_READ;
1349
1350            // note that the we switch back to reads also in the idle
1351            // case, which eventually will check for any draining and
1352            // also pause any further scheduling if there is really
1353            // nothing to do
1354
1355            // here we get a bit creative and shift the bus busy time not
1356            // just the tWTR, but also a CAS latency to capture the fact
1357            // that we are allowed to prepare a new bank, but not issue a
1358            // read command until after tWTR, in essence we capture a
1359            // bubble on the data bus that is tWTR + tCL
1360            busBusyUntil += tWTR + tCL;
1361
1362            // update the minimum timing between the requests, this shifts
1363            // us back in time far enough to do any bank preparation
1364            nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1365        }
1366    }
1367
1368    schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1369
1370    // If there is space available and we have writes waiting then let
1371    // them retry. This is done here to ensure that the retry does not
1372    // cause a nextReqEvent to be scheduled before we do so as part of
1373    // the next request processing
1374    if (retryWrReq && writeQueue.size() < writeBufferSize) {
1375        retryWrReq = false;
1376        port.sendRetry();
1377    }
1378}
1379
1380uint64_t
1381DRAMCtrl::minBankFreeAt(const deque<DRAMPacket*>& queue) const
1382{
1383    uint64_t bank_mask = 0;
1384    Tick freeAt = MaxTick;
1385
1386    // detemrine if we have queued transactions targetting the
1387    // bank in question
1388    vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1389    for (auto p = queue.begin(); p != queue.end(); ++p) {
1390        got_waiting[(*p)->bankId] = true;
1391    }
1392
1393    for (int i = 0; i < ranksPerChannel; i++) {
1394        for (int j = 0; j < banksPerRank; j++) {
1395            // if we have waiting requests for the bank, and it is
1396            // amongst the first available, update the mask
1397            if (got_waiting[i * banksPerRank + j] &&
1398                banks[i][j].freeAt <= freeAt) {
1399                // reset bank mask if new minimum is found
1400                if (banks[i][j].freeAt < freeAt)
1401                    bank_mask = 0;
1402                // set the bit corresponding to the available bank
1403                uint8_t bit_index = i * ranksPerChannel + j;
1404                replaceBits(bank_mask, bit_index, bit_index, 1);
1405                freeAt = banks[i][j].freeAt;
1406            }
1407        }
1408    }
1409    return bank_mask;
1410}
1411
1412void
1413DRAMCtrl::processRefreshEvent()
1414{
1415    // when first preparing the refresh, remember when it was due
1416    if (refreshState == REF_IDLE) {
1417        // remember when the refresh is due
1418        refreshDueAt = curTick();
1419
1420        // proceed to drain
1421        refreshState = REF_DRAIN;
1422
1423        DPRINTF(DRAM, "Refresh due\n");
1424    }
1425
1426    // let any scheduled read or write go ahead, after which it will
1427    // hand control back to this event loop
1428    if (refreshState == REF_DRAIN) {
1429        if (nextReqEvent.scheduled()) {
1430            // hand control over to the request loop until it is
1431            // evaluated next
1432            DPRINTF(DRAM, "Refresh awaiting draining\n");
1433
1434            return;
1435        } else {
1436            refreshState = REF_PRE;
1437        }
1438    }
1439
1440    // at this point, ensure that all banks are precharged
1441    if (refreshState == REF_PRE) {
1442        // precharge any active bank if we are not already in the idle
1443        // state
1444        if (pwrState != PWR_IDLE) {
1445            DPRINTF(DRAM, "Precharging all\n");
1446            for (int i = 0; i < ranksPerChannel; i++) {
1447                for (int j = 0; j < banksPerRank; j++) {
1448                    if (banks[i][j].openRow != Bank::NO_ROW) {
1449                        // respect both causality and any existing bank
1450                        // constraints
1451                        Tick free_at =
1452                            std::max(std::max(banks[i][j].freeAt,
1453                                              banks[i][j].tRASDoneAt),
1454                                     curTick()) + tRP;
1455
1456                        prechargeBank(banks[i][j], free_at);
1457                    }
1458                }
1459            }
1460        } else {
1461            DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1462
1463            // go ahead and kick the power state machine into gear if
1464            // we are already idle
1465            schedulePowerEvent(PWR_REF, curTick());
1466        }
1467
1468        refreshState = REF_RUN;
1469        assert(numBanksActive == 0);
1470
1471        // wait for all banks to be precharged, at which point the
1472        // power state machine will transition to the idle state, and
1473        // automatically move to a refresh, at that point it will also
1474        // call this method to get the refresh event loop going again
1475        return;
1476    }
1477
1478    // last but not least we perform the actual refresh
1479    if (refreshState == REF_RUN) {
1480        // should never get here with any banks active
1481        assert(numBanksActive == 0);
1482        assert(pwrState == PWR_REF);
1483
1484        Tick banksFree = curTick() + tRFC;
1485
1486        for (int i = 0; i < ranksPerChannel; i++) {
1487            for (int j = 0; j < banksPerRank; j++) {
1488                banks[i][j].freeAt = banksFree;
1489            }
1490        }
1491
1492        // make sure we did not wait so long that we cannot make up
1493        // for it
1494        if (refreshDueAt + tREFI < banksFree) {
1495            fatal("Refresh was delayed so long we cannot catch up\n");
1496        }
1497
1498        // compensate for the delay in actually performing the refresh
1499        // when scheduling the next one
1500        schedule(refreshEvent, refreshDueAt + tREFI - tRP);
1501
1502        assert(!powerEvent.scheduled());
1503
1504        // move to the idle power state once the refresh is done, this
1505        // will also move the refresh state machine to the refresh
1506        // idle state
1507        schedulePowerEvent(PWR_IDLE, banksFree);
1508
1509        DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n",
1510                banksFree, refreshDueAt + tREFI);
1511    }
1512}
1513
1514void
1515DRAMCtrl::schedulePowerEvent(PowerState pwr_state, Tick tick)
1516{
1517    // respect causality
1518    assert(tick >= curTick());
1519
1520    if (!powerEvent.scheduled()) {
1521        DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
1522                tick, pwr_state);
1523
1524        // insert the new transition
1525        pwrStateTrans = pwr_state;
1526
1527        schedule(powerEvent, tick);
1528    } else {
1529        panic("Scheduled power event at %llu to state %d, "
1530              "with scheduled event at %llu to %d\n", tick, pwr_state,
1531              powerEvent.when(), pwrStateTrans);
1532    }
1533}
1534
1535void
1536DRAMCtrl::processPowerEvent()
1537{
1538    // remember where we were, and for how long
1539    Tick duration = curTick() - pwrStateTick;
1540    PowerState prev_state = pwrState;
1541
1542    // update the accounting
1543    pwrStateTime[prev_state] += duration;
1544
1545    pwrState = pwrStateTrans;
1546    pwrStateTick = curTick();
1547
1548    if (pwrState == PWR_IDLE) {
1549        DPRINTF(DRAMState, "All banks precharged\n");
1550
1551        // if we were refreshing, make sure we start scheduling requests again
1552        if (prev_state == PWR_REF) {
1553            DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
1554            assert(pwrState == PWR_IDLE);
1555
1556            // kick things into action again
1557            refreshState = REF_IDLE;
1558            assert(!nextReqEvent.scheduled());
1559            schedule(nextReqEvent, curTick());
1560        } else {
1561            assert(prev_state == PWR_ACT);
1562
1563            // if we have a pending refresh, and are now moving to
1564            // the idle state, direclty transition to a refresh
1565            if (refreshState == REF_RUN) {
1566                // there should be nothing waiting at this point
1567                assert(!powerEvent.scheduled());
1568
1569                // update the state in zero time and proceed below
1570                pwrState = PWR_REF;
1571            }
1572        }
1573    }
1574
1575    // we transition to the refresh state, let the refresh state
1576    // machine know of this state update and let it deal with the
1577    // scheduling of the next power state transition as well as the
1578    // following refresh
1579    if (pwrState == PWR_REF) {
1580        DPRINTF(DRAMState, "Refreshing\n");
1581        // kick the refresh event loop into action again, and that
1582        // in turn will schedule a transition to the idle power
1583        // state once the refresh is done
1584        assert(refreshState == REF_RUN);
1585        processRefreshEvent();
1586    }
1587}
1588
1589void
1590DRAMCtrl::regStats()
1591{
1592    using namespace Stats;
1593
1594    AbstractMemory::regStats();
1595
1596    readReqs
1597        .name(name() + ".readReqs")
1598        .desc("Number of read requests accepted");
1599
1600    writeReqs
1601        .name(name() + ".writeReqs")
1602        .desc("Number of write requests accepted");
1603
1604    readBursts
1605        .name(name() + ".readBursts")
1606        .desc("Number of DRAM read bursts, "
1607              "including those serviced by the write queue");
1608
1609    writeBursts
1610        .name(name() + ".writeBursts")
1611        .desc("Number of DRAM write bursts, "
1612              "including those merged in the write queue");
1613
1614    servicedByWrQ
1615        .name(name() + ".servicedByWrQ")
1616        .desc("Number of DRAM read bursts serviced by the write queue");
1617
1618    mergedWrBursts
1619        .name(name() + ".mergedWrBursts")
1620        .desc("Number of DRAM write bursts merged with an existing one");
1621
1622    neitherReadNorWrite
1623        .name(name() + ".neitherReadNorWriteReqs")
1624        .desc("Number of requests that are neither read nor write");
1625
1626    perBankRdBursts
1627        .init(banksPerRank * ranksPerChannel)
1628        .name(name() + ".perBankRdBursts")
1629        .desc("Per bank write bursts");
1630
1631    perBankWrBursts
1632        .init(banksPerRank * ranksPerChannel)
1633        .name(name() + ".perBankWrBursts")
1634        .desc("Per bank write bursts");
1635
1636    avgRdQLen
1637        .name(name() + ".avgRdQLen")
1638        .desc("Average read queue length when enqueuing")
1639        .precision(2);
1640
1641    avgWrQLen
1642        .name(name() + ".avgWrQLen")
1643        .desc("Average write queue length when enqueuing")
1644        .precision(2);
1645
1646    totQLat
1647        .name(name() + ".totQLat")
1648        .desc("Total ticks spent queuing");
1649
1650    totBankLat
1651        .name(name() + ".totBankLat")
1652        .desc("Total ticks spent accessing banks");
1653
1654    totBusLat
1655        .name(name() + ".totBusLat")
1656        .desc("Total ticks spent in databus transfers");
1657
1658    totMemAccLat
1659        .name(name() + ".totMemAccLat")
1660        .desc("Total ticks spent from burst creation until serviced "
1661              "by the DRAM");
1662
1663    avgQLat
1664        .name(name() + ".avgQLat")
1665        .desc("Average queueing delay per DRAM burst")
1666        .precision(2);
1667
1668    avgQLat = totQLat / (readBursts - servicedByWrQ);
1669
1670    avgBankLat
1671        .name(name() + ".avgBankLat")
1672        .desc("Average bank access latency per DRAM burst")
1673        .precision(2);
1674
1675    avgBankLat = totBankLat / (readBursts - servicedByWrQ);
1676
1677    avgBusLat
1678        .name(name() + ".avgBusLat")
1679        .desc("Average bus latency per DRAM burst")
1680        .precision(2);
1681
1682    avgBusLat = totBusLat / (readBursts - servicedByWrQ);
1683
1684    avgMemAccLat
1685        .name(name() + ".avgMemAccLat")
1686        .desc("Average memory access latency per DRAM burst")
1687        .precision(2);
1688
1689    avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ);
1690
1691    numRdRetry
1692        .name(name() + ".numRdRetry")
1693        .desc("Number of times read queue was full causing retry");
1694
1695    numWrRetry
1696        .name(name() + ".numWrRetry")
1697        .desc("Number of times write queue was full causing retry");
1698
1699    readRowHits
1700        .name(name() + ".readRowHits")
1701        .desc("Number of row buffer hits during reads");
1702
1703    writeRowHits
1704        .name(name() + ".writeRowHits")
1705        .desc("Number of row buffer hits during writes");
1706
1707    readRowHitRate
1708        .name(name() + ".readRowHitRate")
1709        .desc("Row buffer hit rate for reads")
1710        .precision(2);
1711
1712    readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100;
1713
1714    writeRowHitRate
1715        .name(name() + ".writeRowHitRate")
1716        .desc("Row buffer hit rate for writes")
1717        .precision(2);
1718
1719    writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100;
1720
1721    readPktSize
1722        .init(ceilLog2(burstSize) + 1)
1723        .name(name() + ".readPktSize")
1724        .desc("Read request sizes (log2)");
1725
1726     writePktSize
1727        .init(ceilLog2(burstSize) + 1)
1728        .name(name() + ".writePktSize")
1729        .desc("Write request sizes (log2)");
1730
1731     rdQLenPdf
1732        .init(readBufferSize)
1733        .name(name() + ".rdQLenPdf")
1734        .desc("What read queue length does an incoming req see");
1735
1736     wrQLenPdf
1737        .init(writeBufferSize)
1738        .name(name() + ".wrQLenPdf")
1739        .desc("What write queue length does an incoming req see");
1740
1741     bytesPerActivate
1742         .init(maxAccessesPerRow)
1743         .name(name() + ".bytesPerActivate")
1744         .desc("Bytes accessed per row activation")
1745         .flags(nozero);
1746
1747     rdPerTurnAround
1748         .init(readBufferSize)
1749         .name(name() + ".rdPerTurnAround")
1750         .desc("Reads before turning the bus around for writes")
1751         .flags(nozero);
1752
1753     wrPerTurnAround
1754         .init(writeBufferSize)
1755         .name(name() + ".wrPerTurnAround")
1756         .desc("Writes before turning the bus around for reads")
1757         .flags(nozero);
1758
1759    bytesReadDRAM
1760        .name(name() + ".bytesReadDRAM")
1761        .desc("Total number of bytes read from DRAM");
1762
1763    bytesReadWrQ
1764        .name(name() + ".bytesReadWrQ")
1765        .desc("Total number of bytes read from write queue");
1766
1767    bytesWritten
1768        .name(name() + ".bytesWritten")
1769        .desc("Total number of bytes written to DRAM");
1770
1771    bytesReadSys
1772        .name(name() + ".bytesReadSys")
1773        .desc("Total read bytes from the system interface side");
1774
1775    bytesWrittenSys
1776        .name(name() + ".bytesWrittenSys")
1777        .desc("Total written bytes from the system interface side");
1778
1779    avgRdBW
1780        .name(name() + ".avgRdBW")
1781        .desc("Average DRAM read bandwidth in MiByte/s")
1782        .precision(2);
1783
1784    avgRdBW = (bytesReadDRAM / 1000000) / simSeconds;
1785
1786    avgWrBW
1787        .name(name() + ".avgWrBW")
1788        .desc("Average achieved write bandwidth in MiByte/s")
1789        .precision(2);
1790
1791    avgWrBW = (bytesWritten / 1000000) / simSeconds;
1792
1793    avgRdBWSys
1794        .name(name() + ".avgRdBWSys")
1795        .desc("Average system read bandwidth in MiByte/s")
1796        .precision(2);
1797
1798    avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
1799
1800    avgWrBWSys
1801        .name(name() + ".avgWrBWSys")
1802        .desc("Average system write bandwidth in MiByte/s")
1803        .precision(2);
1804
1805    avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
1806
1807    peakBW
1808        .name(name() + ".peakBW")
1809        .desc("Theoretical peak bandwidth in MiByte/s")
1810        .precision(2);
1811
1812    peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
1813
1814    busUtil
1815        .name(name() + ".busUtil")
1816        .desc("Data bus utilization in percentage")
1817        .precision(2);
1818
1819    busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1820
1821    totGap
1822        .name(name() + ".totGap")
1823        .desc("Total gap between requests");
1824
1825    avgGap
1826        .name(name() + ".avgGap")
1827        .desc("Average gap between requests")
1828        .precision(2);
1829
1830    avgGap = totGap / (readReqs + writeReqs);
1831
1832    // Stats for DRAM Power calculation based on Micron datasheet
1833    busUtilRead
1834        .name(name() + ".busUtilRead")
1835        .desc("Data bus utilization in percentage for reads")
1836        .precision(2);
1837
1838    busUtilRead = avgRdBW / peakBW * 100;
1839
1840    busUtilWrite
1841        .name(name() + ".busUtilWrite")
1842        .desc("Data bus utilization in percentage for writes")
1843        .precision(2);
1844
1845    busUtilWrite = avgWrBW / peakBW * 100;
1846
1847    pageHitRate
1848        .name(name() + ".pageHitRate")
1849        .desc("Row buffer hit rate, read and write combined")
1850        .precision(2);
1851
1852    pageHitRate = (writeRowHits + readRowHits) /
1853        (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
1854
1855    pwrStateTime
1856        .init(5)
1857        .name(name() + ".memoryStateTime")
1858        .desc("Time in different power states");
1859    pwrStateTime.subname(0, "IDLE");
1860    pwrStateTime.subname(1, "REF");
1861    pwrStateTime.subname(2, "PRE_PDN");
1862    pwrStateTime.subname(3, "ACT");
1863    pwrStateTime.subname(4, "ACT_PDN");
1864}
1865
1866void
1867DRAMCtrl::recvFunctional(PacketPtr pkt)
1868{
1869    // rely on the abstract memory
1870    functionalAccess(pkt);
1871}
1872
1873BaseSlavePort&
1874DRAMCtrl::getSlavePort(const string &if_name, PortID idx)
1875{
1876    if (if_name != "port") {
1877        return MemObject::getSlavePort(if_name, idx);
1878    } else {
1879        return port;
1880    }
1881}
1882
1883unsigned int
1884DRAMCtrl::drain(DrainManager *dm)
1885{
1886    unsigned int count = port.drain(dm);
1887
1888    // if there is anything in any of our internal queues, keep track
1889    // of that as well
1890    if (!(writeQueue.empty() && readQueue.empty() &&
1891          respQueue.empty())) {
1892        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
1893                " resp: %d\n", writeQueue.size(), readQueue.size(),
1894                respQueue.size());
1895        ++count;
1896        drainManager = dm;
1897
1898        // the only part that is not drained automatically over time
1899        // is the write queue, thus kick things into action if needed
1900        if (!writeQueue.empty() && !nextReqEvent.scheduled()) {
1901            schedule(nextReqEvent, curTick());
1902        }
1903    }
1904
1905    if (count)
1906        setDrainState(Drainable::Draining);
1907    else
1908        setDrainState(Drainable::Drained);
1909    return count;
1910}
1911
1912DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory)
1913    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
1914      memory(_memory)
1915{ }
1916
1917AddrRangeList
1918DRAMCtrl::MemoryPort::getAddrRanges() const
1919{
1920    AddrRangeList ranges;
1921    ranges.push_back(memory.getAddrRange());
1922    return ranges;
1923}
1924
1925void
1926DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
1927{
1928    pkt->pushLabel(memory.name());
1929
1930    if (!queue.checkFunctional(pkt)) {
1931        // Default implementation of SimpleTimingPort::recvFunctional()
1932        // calls recvAtomic() and throws away the latency; we can save a
1933        // little here by just not calculating the latency.
1934        memory.recvFunctional(pkt);
1935    }
1936
1937    pkt->popLabel();
1938}
1939
1940Tick
1941DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
1942{
1943    return memory.recvAtomic(pkt);
1944}
1945
1946bool
1947DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
1948{
1949    // pass it to the memory controller
1950    return memory.recvTimingReq(pkt);
1951}
1952
1953DRAMCtrl*
1954DRAMCtrlParams::create()
1955{
1956    return new DRAMCtrl(this);
1957}
1958