dram_ctrl.cc revision 10394:70cfafa17653
1/*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 *          Ani Udipi
42 *          Neha Agarwal
43 */
44
45#include "base/bitfield.hh"
46#include "base/trace.hh"
47#include "debug/DRAM.hh"
48#include "debug/DRAMPower.hh"
49#include "debug/DRAMState.hh"
50#include "debug/Drain.hh"
51#include "mem/dram_ctrl.hh"
52#include "sim/system.hh"
53
54using namespace std;
55
56DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
57    AbstractMemory(p),
58    port(name() + ".port", *this),
59    retryRdReq(false), retryWrReq(false),
60    busState(READ),
61    nextReqEvent(this), respondEvent(this), activateEvent(this),
62    prechargeEvent(this), refreshEvent(this), powerEvent(this),
63    drainManager(NULL),
64    deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
65    deviceRowBufferSize(p->device_rowbuffer_size),
66    devicesPerRank(p->devices_per_rank),
67    burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
68    rowBufferSize(devicesPerRank * deviceRowBufferSize),
69    columnsPerRowBuffer(rowBufferSize / burstSize),
70    columnsPerStripe(range.granularity() / burstSize),
71    ranksPerChannel(p->ranks_per_channel),
72    bankGroupsPerRank(p->bank_groups_per_rank),
73    bankGroupArch(p->bank_groups_per_rank > 0),
74    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
75    readBufferSize(p->read_buffer_size),
76    writeBufferSize(p->write_buffer_size),
77    writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
78    writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
79    minWritesPerSwitch(p->min_writes_per_switch),
80    writesThisTime(0), readsThisTime(0),
81    tCK(p->tCK), tWTR(p->tWTR), tRTW(p->tRTW), tCS(p->tCS), tBURST(p->tBURST),
82    tCCD_L(p->tCCD_L), tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
83    tWR(p->tWR), tRTP(p->tRTP), tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
84    tRRD_L(p->tRRD_L), tXAW(p->tXAW), activationLimit(p->activation_limit),
85    memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
86    pageMgmt(p->page_policy),
87    maxAccessesPerRow(p->max_accesses_per_row),
88    frontendLatency(p->static_frontend_latency),
89    backendLatency(p->static_backend_latency),
90    busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE),
91    pwrStateTrans(PWR_IDLE), pwrState(PWR_IDLE), prevArrival(0),
92    nextReqTime(0), pwrStateTick(0), numBanksActive(0),
93    activeRank(0)
94{
95    // create the bank states based on the dimensions of the ranks and
96    // banks
97    banks.resize(ranksPerChannel);
98    actTicks.resize(ranksPerChannel);
99    for (size_t c = 0; c < ranksPerChannel; ++c) {
100        banks[c].resize(banksPerRank);
101        actTicks[c].resize(activationLimit, 0);
102    }
103
104    // set the bank indices
105    for (int r = 0; r < ranksPerChannel; r++) {
106        for (int b = 0; b < banksPerRank; b++) {
107            banks[r][b].rank = r;
108            banks[r][b].bank = b;
109            if (bankGroupArch) {
110                // Simply assign lower bits to bank group in order to
111                // rotate across bank groups as banks are incremented
112                // e.g. with 4 banks per bank group and 16 banks total:
113                //    banks 0,4,8,12  are in bank group 0
114                //    banks 1,5,9,13  are in bank group 1
115                //    banks 2,6,10,14 are in bank group 2
116                //    banks 3,7,11,15 are in bank group 3
117                banks[r][b].bankgr = b % bankGroupsPerRank;
118            } else {
119                // No bank groups; simply assign to bank number
120                banks[r][b].bankgr = b;
121            }
122        }
123    }
124
125    // perform a basic check of the write thresholds
126    if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
127        fatal("Write buffer low threshold %d must be smaller than the "
128              "high threshold %d\n", p->write_low_thresh_perc,
129              p->write_high_thresh_perc);
130
131    // determine the rows per bank by looking at the total capacity
132    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
133
134    DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
135            AbstractMemory::size());
136
137    DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n",
138            rowBufferSize, columnsPerRowBuffer);
139
140    rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel);
141
142    // a bit of sanity checks on the interleaving
143    if (range.interleaved()) {
144        if (channels != range.stripes())
145            fatal("%s has %d interleaved address stripes but %d channel(s)\n",
146                  name(), range.stripes(), channels);
147
148        if (addrMapping == Enums::RoRaBaChCo) {
149            if (rowBufferSize != range.granularity()) {
150                fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
151                      "address map\n", name());
152            }
153        } else if (addrMapping == Enums::RoRaBaCoCh ||
154                   addrMapping == Enums::RoCoRaBaCh) {
155            // for the interleavings with channel bits in the bottom,
156            // if the system uses a channel striping granularity that
157            // is larger than the DRAM burst size, then map the
158            // sequential accesses within a stripe to a number of
159            // columns in the DRAM, effectively placing some of the
160            // lower-order column bits as the least-significant bits
161            // of the address (above the ones denoting the burst size)
162            assert(columnsPerStripe >= 1);
163
164            // channel striping has to be done at a granularity that
165            // is equal or larger to a cache line
166            if (system()->cacheLineSize() > range.granularity()) {
167                fatal("Channel interleaving of %s must be at least as large "
168                      "as the cache line size\n", name());
169            }
170
171            // ...and equal or smaller than the row-buffer size
172            if (rowBufferSize < range.granularity()) {
173                fatal("Channel interleaving of %s must be at most as large "
174                      "as the row-buffer size\n", name());
175            }
176            // this is essentially the check above, so just to be sure
177            assert(columnsPerStripe <= columnsPerRowBuffer);
178        }
179    }
180
181    // some basic sanity checks
182    if (tREFI <= tRP || tREFI <= tRFC) {
183        fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
184              tREFI, tRP, tRFC);
185    }
186
187    // basic bank group architecture checks ->
188    if (bankGroupArch) {
189        // must have at least one bank per bank group
190        if (bankGroupsPerRank > banksPerRank) {
191            fatal("banks per rank (%d) must be equal to or larger than "
192                  "banks groups per rank (%d)\n",
193                  banksPerRank, bankGroupsPerRank);
194        }
195        // must have same number of banks in each bank group
196        if ((banksPerRank % bankGroupsPerRank) != 0) {
197            fatal("Banks per rank (%d) must be evenly divisible by bank groups "
198                  "per rank (%d) for equal banks per bank group\n",
199                  banksPerRank, bankGroupsPerRank);
200        }
201        // tCCD_L should be greater than minimal, back-to-back burst delay
202        if (tCCD_L <= tBURST) {
203            fatal("tCCD_L (%d) should be larger than tBURST (%d) when "
204                  "bank groups per rank (%d) is greater than 1\n",
205                  tCCD_L, tBURST, bankGroupsPerRank);
206        }
207        // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
208        if (tRRD_L <= tRRD) {
209            fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
210                  "bank groups per rank (%d) is greater than 1\n",
211                  tRRD_L, tRRD, bankGroupsPerRank);
212        }
213    }
214
215}
216
217void
218DRAMCtrl::init()
219{
220    if (!port.isConnected()) {
221        fatal("DRAMCtrl %s is unconnected!\n", name());
222    } else {
223        port.sendRangeChange();
224    }
225}
226
227void
228DRAMCtrl::startup()
229{
230    // update the start tick for the precharge accounting to the
231    // current tick
232    pwrStateTick = curTick();
233
234    // shift the bus busy time sufficiently far ahead that we never
235    // have to worry about negative values when computing the time for
236    // the next request, this will add an insignificant bubble at the
237    // start of simulation
238    busBusyUntil = curTick() + tRP + tRCD + tCL;
239
240    // kick off the refresh, and give ourselves enough time to
241    // precharge
242    schedule(refreshEvent, curTick() + tREFI - tRP);
243}
244
245Tick
246DRAMCtrl::recvAtomic(PacketPtr pkt)
247{
248    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
249
250    // do the actual memory access and turn the packet into a response
251    access(pkt);
252
253    Tick latency = 0;
254    if (!pkt->memInhibitAsserted() && pkt->hasData()) {
255        // this value is not supposed to be accurate, just enough to
256        // keep things going, mimic a closed page
257        latency = tRP + tRCD + tCL;
258    }
259    return latency;
260}
261
262bool
263DRAMCtrl::readQueueFull(unsigned int neededEntries) const
264{
265    DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
266            readBufferSize, readQueue.size() + respQueue.size(),
267            neededEntries);
268
269    return
270        (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
271}
272
273bool
274DRAMCtrl::writeQueueFull(unsigned int neededEntries) const
275{
276    DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
277            writeBufferSize, writeQueue.size(), neededEntries);
278    return (writeQueue.size() + neededEntries) > writeBufferSize;
279}
280
281DRAMCtrl::DRAMPacket*
282DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size,
283                       bool isRead)
284{
285    // decode the address based on the address mapping scheme, with
286    // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
287    // channel, respectively
288    uint8_t rank;
289    uint8_t bank;
290    // use a 64-bit unsigned during the computations as the row is
291    // always the top bits, and check before creating the DRAMPacket
292    uint64_t row;
293
294    // truncate the address to a DRAM burst, which makes it unique to
295    // a specific column, row, bank, rank and channel
296    Addr addr = dramPktAddr / burstSize;
297
298    // we have removed the lowest order address bits that denote the
299    // position within the column
300    if (addrMapping == Enums::RoRaBaChCo) {
301        // the lowest order bits denote the column to ensure that
302        // sequential cache lines occupy the same row
303        addr = addr / columnsPerRowBuffer;
304
305        // take out the channel part of the address
306        addr = addr / channels;
307
308        // after the channel bits, get the bank bits to interleave
309        // over the banks
310        bank = addr % banksPerRank;
311        addr = addr / banksPerRank;
312
313        // after the bank, we get the rank bits which thus interleaves
314        // over the ranks
315        rank = addr % ranksPerChannel;
316        addr = addr / ranksPerChannel;
317
318        // lastly, get the row bits
319        row = addr % rowsPerBank;
320        addr = addr / rowsPerBank;
321    } else if (addrMapping == Enums::RoRaBaCoCh) {
322        // take out the lower-order column bits
323        addr = addr / columnsPerStripe;
324
325        // take out the channel part of the address
326        addr = addr / channels;
327
328        // next, the higher-order column bites
329        addr = addr / (columnsPerRowBuffer / columnsPerStripe);
330
331        // after the column bits, we get the bank bits to interleave
332        // over the banks
333        bank = addr % banksPerRank;
334        addr = addr / banksPerRank;
335
336        // after the bank, we get the rank bits which thus interleaves
337        // over the ranks
338        rank = addr % ranksPerChannel;
339        addr = addr / ranksPerChannel;
340
341        // lastly, get the row bits
342        row = addr % rowsPerBank;
343        addr = addr / rowsPerBank;
344    } else if (addrMapping == Enums::RoCoRaBaCh) {
345        // optimise for closed page mode and utilise maximum
346        // parallelism of the DRAM (at the cost of power)
347
348        // take out the lower-order column bits
349        addr = addr / columnsPerStripe;
350
351        // take out the channel part of the address, not that this has
352        // to match with how accesses are interleaved between the
353        // controllers in the address mapping
354        addr = addr / channels;
355
356        // start with the bank bits, as this provides the maximum
357        // opportunity for parallelism between requests
358        bank = addr % banksPerRank;
359        addr = addr / banksPerRank;
360
361        // next get the rank bits
362        rank = addr % ranksPerChannel;
363        addr = addr / ranksPerChannel;
364
365        // next, the higher-order column bites
366        addr = addr / (columnsPerRowBuffer / columnsPerStripe);
367
368        // lastly, get the row bits
369        row = addr % rowsPerBank;
370        addr = addr / rowsPerBank;
371    } else
372        panic("Unknown address mapping policy chosen!");
373
374    assert(rank < ranksPerChannel);
375    assert(bank < banksPerRank);
376    assert(row < rowsPerBank);
377    assert(row < Bank::NO_ROW);
378
379    DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
380            dramPktAddr, rank, bank, row);
381
382    // create the corresponding DRAM packet with the entry time and
383    // ready time set to the current tick, the latter will be updated
384    // later
385    uint16_t bank_id = banksPerRank * rank + bank;
386    return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
387                          size, banks[rank][bank]);
388}
389
390void
391DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
392{
393    // only add to the read queue here. whenever the request is
394    // eventually done, set the readyTime, and call schedule()
395    assert(!pkt->isWrite());
396
397    assert(pktCount != 0);
398
399    // if the request size is larger than burst size, the pkt is split into
400    // multiple DRAM packets
401    // Note if the pkt starting address is not aligened to burst size, the
402    // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
403    // are aligned to burst size boundaries. This is to ensure we accurately
404    // check read packets against packets in write queue.
405    Addr addr = pkt->getAddr();
406    unsigned pktsServicedByWrQ = 0;
407    BurstHelper* burst_helper = NULL;
408    for (int cnt = 0; cnt < pktCount; ++cnt) {
409        unsigned size = std::min((addr | (burstSize - 1)) + 1,
410                        pkt->getAddr() + pkt->getSize()) - addr;
411        readPktSize[ceilLog2(size)]++;
412        readBursts++;
413
414        // First check write buffer to see if the data is already at
415        // the controller
416        bool foundInWrQ = false;
417        for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) {
418            // check if the read is subsumed in the write entry we are
419            // looking at
420            if ((*i)->addr <= addr &&
421                (addr + size) <= ((*i)->addr + (*i)->size)) {
422                foundInWrQ = true;
423                servicedByWrQ++;
424                pktsServicedByWrQ++;
425                DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
426                        "write queue\n", addr, size);
427                bytesReadWrQ += burstSize;
428                break;
429            }
430        }
431
432        // If not found in the write q, make a DRAM packet and
433        // push it onto the read queue
434        if (!foundInWrQ) {
435
436            // Make the burst helper for split packets
437            if (pktCount > 1 && burst_helper == NULL) {
438                DPRINTF(DRAM, "Read to addr %lld translates to %d "
439                        "dram requests\n", pkt->getAddr(), pktCount);
440                burst_helper = new BurstHelper(pktCount);
441            }
442
443            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true);
444            dram_pkt->burstHelper = burst_helper;
445
446            assert(!readQueueFull(1));
447            rdQLenPdf[readQueue.size() + respQueue.size()]++;
448
449            DPRINTF(DRAM, "Adding to read queue\n");
450
451            readQueue.push_back(dram_pkt);
452
453            // Update stats
454            avgRdQLen = readQueue.size() + respQueue.size();
455        }
456
457        // Starting address of next dram pkt (aligend to burstSize boundary)
458        addr = (addr | (burstSize - 1)) + 1;
459    }
460
461    // If all packets are serviced by write queue, we send the repsonse back
462    if (pktsServicedByWrQ == pktCount) {
463        accessAndRespond(pkt, frontendLatency);
464        return;
465    }
466
467    // Update how many split packets are serviced by write queue
468    if (burst_helper != NULL)
469        burst_helper->burstsServiced = pktsServicedByWrQ;
470
471    // If we are not already scheduled to get a request out of the
472    // queue, do so now
473    if (!nextReqEvent.scheduled()) {
474        DPRINTF(DRAM, "Request scheduled immediately\n");
475        schedule(nextReqEvent, curTick());
476    }
477}
478
479void
480DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
481{
482    // only add to the write queue here. whenever the request is
483    // eventually done, set the readyTime, and call schedule()
484    assert(pkt->isWrite());
485
486    // if the request size is larger than burst size, the pkt is split into
487    // multiple DRAM packets
488    Addr addr = pkt->getAddr();
489    for (int cnt = 0; cnt < pktCount; ++cnt) {
490        unsigned size = std::min((addr | (burstSize - 1)) + 1,
491                        pkt->getAddr() + pkt->getSize()) - addr;
492        writePktSize[ceilLog2(size)]++;
493        writeBursts++;
494
495        // see if we can merge with an existing item in the write
496        // queue and keep track of whether we have merged or not so we
497        // can stop at that point and also avoid enqueueing a new
498        // request
499        bool merged = false;
500        auto w = writeQueue.begin();
501
502        while(!merged && w != writeQueue.end()) {
503            // either of the two could be first, if they are the same
504            // it does not matter which way we go
505            if ((*w)->addr >= addr) {
506                // the existing one starts after the new one, figure
507                // out where the new one ends with respect to the
508                // existing one
509                if ((addr + size) >= ((*w)->addr + (*w)->size)) {
510                    // check if the existing one is completely
511                    // subsumed in the new one
512                    DPRINTF(DRAM, "Merging write covering existing burst\n");
513                    merged = true;
514                    // update both the address and the size
515                    (*w)->addr = addr;
516                    (*w)->size = size;
517                } else if ((addr + size) >= (*w)->addr &&
518                           ((*w)->addr + (*w)->size - addr) <= burstSize) {
519                    // the new one is just before or partially
520                    // overlapping with the existing one, and together
521                    // they fit within a burst
522                    DPRINTF(DRAM, "Merging write before existing burst\n");
523                    merged = true;
524                    // the existing queue item needs to be adjusted with
525                    // respect to both address and size
526                    (*w)->size = (*w)->addr + (*w)->size - addr;
527                    (*w)->addr = addr;
528                }
529            } else {
530                // the new one starts after the current one, figure
531                // out where the existing one ends with respect to the
532                // new one
533                if (((*w)->addr + (*w)->size) >= (addr + size)) {
534                    // check if the new one is completely subsumed in the
535                    // existing one
536                    DPRINTF(DRAM, "Merging write into existing burst\n");
537                    merged = true;
538                    // no adjustments necessary
539                } else if (((*w)->addr + (*w)->size) >= addr &&
540                           (addr + size - (*w)->addr) <= burstSize) {
541                    // the existing one is just before or partially
542                    // overlapping with the new one, and together
543                    // they fit within a burst
544                    DPRINTF(DRAM, "Merging write after existing burst\n");
545                    merged = true;
546                    // the address is right, and only the size has
547                    // to be adjusted
548                    (*w)->size = addr + size - (*w)->addr;
549                }
550            }
551            ++w;
552        }
553
554        // if the item was not merged we need to create a new write
555        // and enqueue it
556        if (!merged) {
557            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false);
558
559            assert(writeQueue.size() < writeBufferSize);
560            wrQLenPdf[writeQueue.size()]++;
561
562            DPRINTF(DRAM, "Adding to write queue\n");
563
564            writeQueue.push_back(dram_pkt);
565
566            // Update stats
567            avgWrQLen = writeQueue.size();
568        } else {
569            // keep track of the fact that this burst effectively
570            // disappeared as it was merged with an existing one
571            mergedWrBursts++;
572        }
573
574        // Starting address of next dram pkt (aligend to burstSize boundary)
575        addr = (addr | (burstSize - 1)) + 1;
576    }
577
578    // we do not wait for the writes to be send to the actual memory,
579    // but instead take responsibility for the consistency here and
580    // snoop the write queue for any upcoming reads
581    // @todo, if a pkt size is larger than burst size, we might need a
582    // different front end latency
583    accessAndRespond(pkt, frontendLatency);
584
585    // If we are not already scheduled to get a request out of the
586    // queue, do so now
587    if (!nextReqEvent.scheduled()) {
588        DPRINTF(DRAM, "Request scheduled immediately\n");
589        schedule(nextReqEvent, curTick());
590    }
591}
592
593void
594DRAMCtrl::printQs() const {
595    DPRINTF(DRAM, "===READ QUEUE===\n\n");
596    for (auto i = readQueue.begin() ;  i != readQueue.end() ; ++i) {
597        DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
598    }
599    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
600    for (auto i = respQueue.begin() ;  i != respQueue.end() ; ++i) {
601        DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
602    }
603    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
604    for (auto i = writeQueue.begin() ;  i != writeQueue.end() ; ++i) {
605        DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
606    }
607}
608
609bool
610DRAMCtrl::recvTimingReq(PacketPtr pkt)
611{
612    /// @todo temporary hack to deal with memory corruption issues until
613    /// 4-phase transactions are complete
614    for (int x = 0; x < pendingDelete.size(); x++)
615        delete pendingDelete[x];
616    pendingDelete.clear();
617
618    // This is where we enter from the outside world
619    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
620            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
621
622    // simply drop inhibited packets for now
623    if (pkt->memInhibitAsserted()) {
624        DPRINTF(DRAM, "Inhibited packet -- Dropping it now\n");
625        pendingDelete.push_back(pkt);
626        return true;
627    }
628
629    // Calc avg gap between requests
630    if (prevArrival != 0) {
631        totGap += curTick() - prevArrival;
632    }
633    prevArrival = curTick();
634
635
636    // Find out how many dram packets a pkt translates to
637    // If the burst size is equal or larger than the pkt size, then a pkt
638    // translates to only one dram packet. Otherwise, a pkt translates to
639    // multiple dram packets
640    unsigned size = pkt->getSize();
641    unsigned offset = pkt->getAddr() & (burstSize - 1);
642    unsigned int dram_pkt_count = divCeil(offset + size, burstSize);
643
644    // check local buffers and do not accept if full
645    if (pkt->isRead()) {
646        assert(size != 0);
647        if (readQueueFull(dram_pkt_count)) {
648            DPRINTF(DRAM, "Read queue full, not accepting\n");
649            // remember that we have to retry this port
650            retryRdReq = true;
651            numRdRetry++;
652            return false;
653        } else {
654            addToReadQueue(pkt, dram_pkt_count);
655            readReqs++;
656            bytesReadSys += size;
657        }
658    } else if (pkt->isWrite()) {
659        assert(size != 0);
660        if (writeQueueFull(dram_pkt_count)) {
661            DPRINTF(DRAM, "Write queue full, not accepting\n");
662            // remember that we have to retry this port
663            retryWrReq = true;
664            numWrRetry++;
665            return false;
666        } else {
667            addToWriteQueue(pkt, dram_pkt_count);
668            writeReqs++;
669            bytesWrittenSys += size;
670        }
671    } else {
672        DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
673        neitherReadNorWrite++;
674        accessAndRespond(pkt, 1);
675    }
676
677    return true;
678}
679
680void
681DRAMCtrl::processRespondEvent()
682{
683    DPRINTF(DRAM,
684            "processRespondEvent(): Some req has reached its readyTime\n");
685
686    DRAMPacket* dram_pkt = respQueue.front();
687
688    if (dram_pkt->burstHelper) {
689        // it is a split packet
690        dram_pkt->burstHelper->burstsServiced++;
691        if (dram_pkt->burstHelper->burstsServiced ==
692            dram_pkt->burstHelper->burstCount) {
693            // we have now serviced all children packets of a system packet
694            // so we can now respond to the requester
695            // @todo we probably want to have a different front end and back
696            // end latency for split packets
697            accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
698            delete dram_pkt->burstHelper;
699            dram_pkt->burstHelper = NULL;
700        }
701    } else {
702        // it is not a split packet
703        accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
704    }
705
706    delete respQueue.front();
707    respQueue.pop_front();
708
709    if (!respQueue.empty()) {
710        assert(respQueue.front()->readyTime >= curTick());
711        assert(!respondEvent.scheduled());
712        schedule(respondEvent, respQueue.front()->readyTime);
713    } else {
714        // if there is nothing left in any queue, signal a drain
715        if (writeQueue.empty() && readQueue.empty() &&
716            drainManager) {
717            drainManager->signalDrainDone();
718            drainManager = NULL;
719        }
720    }
721
722    // We have made a location in the queue available at this point,
723    // so if there is a read that was forced to wait, retry now
724    if (retryRdReq) {
725        retryRdReq = false;
726        port.sendRetry();
727    }
728}
729
730void
731DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
732{
733    // This method does the arbitration between requests. The chosen
734    // packet is simply moved to the head of the queue. The other
735    // methods know that this is the place to look. For example, with
736    // FCFS, this method does nothing
737    assert(!queue.empty());
738
739    if (queue.size() == 1) {
740        DPRINTF(DRAM, "Single request, nothing to do\n");
741        return;
742    }
743
744    if (memSchedPolicy == Enums::fcfs) {
745        // Do nothing, since the correct request is already head
746    } else if (memSchedPolicy == Enums::frfcfs) {
747        reorderQueue(queue, switched_cmd_type);
748    } else
749        panic("No scheduling policy chosen\n");
750}
751
752void
753DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, bool switched_cmd_type)
754{
755    // Only determine this when needed
756    uint64_t earliest_banks = 0;
757
758    // Search for row hits first, if no row hit is found then schedule the
759    // packet to one of the earliest banks available
760    bool found_earliest_pkt = false;
761    bool found_prepped_diff_rank_pkt = false;
762    auto selected_pkt_it = queue.begin();
763
764    for (auto i = queue.begin(); i != queue.end() ; ++i) {
765        DRAMPacket* dram_pkt = *i;
766        const Bank& bank = dram_pkt->bankRef;
767        // Check if it is a row hit
768        if (bank.openRow == dram_pkt->row) {
769            if (dram_pkt->rank == activeRank || switched_cmd_type) {
770                // FCFS within the hits, giving priority to commands
771                // that access the same rank as the previous burst
772                // to minimize bus turnaround delays
773                // Only give rank prioity when command type is not changing
774                DPRINTF(DRAM, "Row buffer hit\n");
775                selected_pkt_it = i;
776                break;
777            } else if (!found_prepped_diff_rank_pkt) {
778                // found row hit for command on different rank than prev burst
779                selected_pkt_it = i;
780                found_prepped_diff_rank_pkt = true;
781            }
782        } else if (!found_earliest_pkt & !found_prepped_diff_rank_pkt) {
783            // No row hit and
784            // haven't found an entry with a row hit to a new rank
785            if (earliest_banks == 0)
786                // Determine entries with earliest bank prep delay
787                // Function will give priority to commands that access the
788                // same rank as previous burst and can prep the bank seamlessly
789                earliest_banks = minBankPrep(queue, switched_cmd_type);
790
791            // FCFS - Bank is first available bank
792            if (bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
793                // Remember the packet to be scheduled to one of the earliest
794                // banks available, FCFS amongst the earliest banks
795                selected_pkt_it = i;
796                found_earliest_pkt = true;
797            }
798        }
799    }
800
801    DRAMPacket* selected_pkt = *selected_pkt_it;
802    queue.erase(selected_pkt_it);
803    queue.push_front(selected_pkt);
804}
805
806void
807DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
808{
809    DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
810
811    bool needsResponse = pkt->needsResponse();
812    // do the actual memory access which also turns the packet into a
813    // response
814    access(pkt);
815
816    // turn packet around to go back to requester if response expected
817    if (needsResponse) {
818        // access already turned the packet into a response
819        assert(pkt->isResponse());
820
821        // @todo someone should pay for this
822        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
823
824        // queue the packet in the response queue to be sent out after
825        // the static latency has passed
826        port.schedTimingResp(pkt, curTick() + static_latency);
827    } else {
828        // @todo the packet is going to be deleted, and the DRAMPacket
829        // is still having a pointer to it
830        pendingDelete.push_back(pkt);
831    }
832
833    DPRINTF(DRAM, "Done\n");
834
835    return;
836}
837
838void
839DRAMCtrl::activateBank(Bank& bank, Tick act_tick, uint32_t row)
840{
841    // get the rank index from the bank
842    uint8_t rank = bank.rank;
843
844    assert(actTicks[rank].size() == activationLimit);
845
846    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
847
848    // update the open row
849    assert(bank.openRow == Bank::NO_ROW);
850    bank.openRow = row;
851
852    // start counting anew, this covers both the case when we
853    // auto-precharged, and when this access is forced to
854    // precharge
855    bank.bytesAccessed = 0;
856    bank.rowAccesses = 0;
857
858    ++numBanksActive;
859    assert(numBanksActive <= banksPerRank * ranksPerChannel);
860
861    DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got %d active\n",
862            bank.bank, bank.rank, act_tick, numBanksActive);
863
864    DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_tick, tCK), bank.bank,
865            bank.rank);
866
867    // The next access has to respect tRAS for this bank
868    bank.preAllowedAt = act_tick + tRAS;
869
870    // Respect the row-to-column command delay
871    bank.colAllowedAt = std::max(act_tick + tRCD, bank.colAllowedAt);
872
873    // start by enforcing tRRD
874    for(int i = 0; i < banksPerRank; i++) {
875        // next activate to any bank in this rank must not happen
876        // before tRRD
877        if (bankGroupArch && (bank.bankgr == banks[rank][i].bankgr)) {
878            // bank group architecture requires longer delays between
879            // ACT commands within the same bank group.  Use tRRD_L
880            // in this case
881            banks[rank][i].actAllowedAt = std::max(act_tick + tRRD_L,
882                                                   banks[rank][i].actAllowedAt);
883        } else {
884            // use shorter tRRD value when either
885            // 1) bank group architecture is not supportted
886            // 2) bank is in a different bank group
887            banks[rank][i].actAllowedAt = std::max(act_tick + tRRD,
888                                                   banks[rank][i].actAllowedAt);
889        }
890    }
891
892    // next, we deal with tXAW, if the activation limit is disabled
893    // then we are done
894    if (actTicks[rank].empty())
895        return;
896
897    // sanity check
898    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
899        panic("Got %d activates in window %d (%llu - %llu) which is smaller "
900              "than %llu\n", activationLimit, act_tick - actTicks[rank].back(),
901              act_tick, actTicks[rank].back(), tXAW);
902    }
903
904    // shift the times used for the book keeping, the last element
905    // (highest index) is the oldest one and hence the lowest value
906    actTicks[rank].pop_back();
907
908    // record an new activation (in the future)
909    actTicks[rank].push_front(act_tick);
910
911    // cannot activate more than X times in time window tXAW, push the
912    // next one (the X + 1'st activate) to be tXAW away from the
913    // oldest in our window of X
914    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
915        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
916                "than %llu\n", activationLimit, actTicks[rank].back() + tXAW);
917            for(int j = 0; j < banksPerRank; j++)
918                // next activate must not happen before end of window
919                banks[rank][j].actAllowedAt =
920                    std::max(actTicks[rank].back() + tXAW,
921                             banks[rank][j].actAllowedAt);
922    }
923
924    // at the point when this activate takes place, make sure we
925    // transition to the active power state
926    if (!activateEvent.scheduled())
927        schedule(activateEvent, act_tick);
928    else if (activateEvent.when() > act_tick)
929        // move it sooner in time
930        reschedule(activateEvent, act_tick);
931}
932
933void
934DRAMCtrl::processActivateEvent()
935{
936    // we should transition to the active state as soon as any bank is active
937    if (pwrState != PWR_ACT)
938        // note that at this point numBanksActive could be back at
939        // zero again due to a precharge scheduled in the future
940        schedulePowerEvent(PWR_ACT, curTick());
941}
942
943void
944DRAMCtrl::prechargeBank(Bank& bank, Tick pre_at, bool trace)
945{
946    // make sure the bank has an open row
947    assert(bank.openRow != Bank::NO_ROW);
948
949    // sample the bytes per activate here since we are closing
950    // the page
951    bytesPerActivate.sample(bank.bytesAccessed);
952
953    bank.openRow = Bank::NO_ROW;
954
955    // no precharge allowed before this one
956    bank.preAllowedAt = pre_at;
957
958    Tick pre_done_at = pre_at + tRP;
959
960    bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
961
962    assert(numBanksActive != 0);
963    --numBanksActive;
964
965    DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
966            "%d active\n", bank.bank, bank.rank, pre_at, numBanksActive);
967
968    if (trace)
969        DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK),
970                bank.bank, bank.rank);
971
972    // if we look at the current number of active banks we might be
973    // tempted to think the DRAM is now idle, however this can be
974    // undone by an activate that is scheduled to happen before we
975    // would have reached the idle state, so schedule an event and
976    // rather check once we actually make it to the point in time when
977    // the (last) precharge takes place
978    if (!prechargeEvent.scheduled())
979        schedule(prechargeEvent, pre_done_at);
980    else if (prechargeEvent.when() < pre_done_at)
981        reschedule(prechargeEvent, pre_done_at);
982}
983
984void
985DRAMCtrl::processPrechargeEvent()
986{
987    // if we reached zero, then special conditions apply as we track
988    // if all banks are precharged for the power models
989    if (numBanksActive == 0) {
990        // we should transition to the idle state when the last bank
991        // is precharged
992        schedulePowerEvent(PWR_IDLE, curTick());
993    }
994}
995
996void
997DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
998{
999    DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
1000            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1001
1002    // get the bank
1003    Bank& bank = dram_pkt->bankRef;
1004
1005    // for the state we need to track if it is a row hit or not
1006    bool row_hit = true;
1007
1008    // respect any constraints on the command (e.g. tRCD or tCCD)
1009    Tick cmd_at = std::max(bank.colAllowedAt, curTick());
1010
1011    // Determine the access latency and update the bank state
1012    if (bank.openRow == dram_pkt->row) {
1013        // nothing to do
1014    } else {
1015        row_hit = false;
1016
1017        // If there is a page open, precharge it.
1018        if (bank.openRow != Bank::NO_ROW) {
1019            prechargeBank(bank, std::max(bank.preAllowedAt, curTick()));
1020        }
1021
1022        // next we need to account for the delay in activating the
1023        // page
1024        Tick act_tick = std::max(bank.actAllowedAt, curTick());
1025
1026        // Record the activation and deal with all the global timing
1027        // constraints caused be a new activation (tRRD and tXAW)
1028        activateBank(bank, act_tick, dram_pkt->row);
1029
1030        // issue the command as early as possible
1031        cmd_at = bank.colAllowedAt;
1032    }
1033
1034    // we need to wait until the bus is available before we can issue
1035    // the command
1036    cmd_at = std::max(cmd_at, busBusyUntil - tCL);
1037
1038    // update the packet ready time
1039    dram_pkt->readyTime = cmd_at + tCL + tBURST;
1040
1041    // only one burst can use the bus at any one point in time
1042    assert(dram_pkt->readyTime - busBusyUntil >= tBURST);
1043
1044    // update the time for the next read/write burst for each
1045    // bank (add a max with tCCD/tCCD_L here)
1046    Tick cmd_dly;
1047    for(int j = 0; j < ranksPerChannel; j++) {
1048        for(int i = 0; i < banksPerRank; i++) {
1049            // next burst to same bank group in this rank must not happen
1050            // before tCCD_L.  Different bank group timing requirement is
1051            // tBURST; Add tCS for different ranks
1052            if (dram_pkt->rank == j) {
1053                if (bankGroupArch && (bank.bankgr == banks[j][i].bankgr)) {
1054                    // bank group architecture requires longer delays between
1055                    // RD/WR burst commands to the same bank group.
1056                    // Use tCCD_L in this case
1057                    cmd_dly = tCCD_L;
1058                } else {
1059                    // use tBURST (equivalent to tCCD_S), the shorter
1060                    // cas-to-cas delay value, when either:
1061                    // 1) bank group architecture is not supportted
1062                    // 2) bank is in a different bank group
1063                    cmd_dly = tBURST;
1064                }
1065            } else {
1066                // different rank is by default in a different bank group
1067                // use tBURST (equivalent to tCCD_S), which is the shorter
1068                // cas-to-cas delay in this case
1069                // Add tCS to account for rank-to-rank bus delay requirements
1070                cmd_dly = tBURST + tCS;
1071            }
1072            banks[j][i].colAllowedAt = std::max(cmd_at + cmd_dly,
1073                                                banks[j][i].colAllowedAt);
1074        }
1075    }
1076
1077    // Save rank of current access
1078    activeRank = dram_pkt->rank;
1079
1080    // If this is a write, we also need to respect the write recovery
1081    // time before a precharge, in the case of a read, respect the
1082    // read to precharge constraint
1083    bank.preAllowedAt = std::max(bank.preAllowedAt,
1084                                 dram_pkt->isRead ? cmd_at + tRTP :
1085                                 dram_pkt->readyTime + tWR);
1086
1087    // increment the bytes accessed and the accesses per row
1088    bank.bytesAccessed += burstSize;
1089    ++bank.rowAccesses;
1090
1091    // if we reached the max, then issue with an auto-precharge
1092    bool auto_precharge = pageMgmt == Enums::close ||
1093        bank.rowAccesses == maxAccessesPerRow;
1094
1095    // if we did not hit the limit, we might still want to
1096    // auto-precharge
1097    if (!auto_precharge &&
1098        (pageMgmt == Enums::open_adaptive ||
1099         pageMgmt == Enums::close_adaptive)) {
1100        // a twist on the open and close page policies:
1101        // 1) open_adaptive page policy does not blindly keep the
1102        // page open, but close it if there are no row hits, and there
1103        // are bank conflicts in the queue
1104        // 2) close_adaptive page policy does not blindly close the
1105        // page, but closes it only if there are no row hits in the queue.
1106        // In this case, only force an auto precharge when there
1107        // are no same page hits in the queue
1108        bool got_more_hits = false;
1109        bool got_bank_conflict = false;
1110
1111        // either look at the read queue or write queue
1112        const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue :
1113            writeQueue;
1114        auto p = queue.begin();
1115        // make sure we are not considering the packet that we are
1116        // currently dealing with (which is the head of the queue)
1117        ++p;
1118
1119        // keep on looking until we have found required condition or
1120        // reached the end
1121        while (!(got_more_hits &&
1122                 (got_bank_conflict || pageMgmt == Enums::close_adaptive)) &&
1123               p != queue.end()) {
1124            bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
1125                (dram_pkt->bank == (*p)->bank);
1126            bool same_row = dram_pkt->row == (*p)->row;
1127            got_more_hits |= same_rank_bank && same_row;
1128            got_bank_conflict |= same_rank_bank && !same_row;
1129            ++p;
1130        }
1131
1132        // auto pre-charge when either
1133        // 1) open_adaptive policy, we have not got any more hits, and
1134        //    have a bank conflict
1135        // 2) close_adaptive policy and we have not got any more hits
1136        auto_precharge = !got_more_hits &&
1137            (got_bank_conflict || pageMgmt == Enums::close_adaptive);
1138    }
1139
1140    // DRAMPower trace command to be written
1141    std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR";
1142
1143    // if this access should use auto-precharge, then we are
1144    // closing the row
1145    if (auto_precharge) {
1146        prechargeBank(bank, std::max(curTick(), bank.preAllowedAt), false);
1147
1148        mem_cmd.append("A");
1149
1150        DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1151    }
1152
1153    // Update bus state
1154    busBusyUntil = dram_pkt->readyTime;
1155
1156    DPRINTF(DRAM, "Access to %lld, ready at %lld bus busy until %lld.\n",
1157            dram_pkt->addr, dram_pkt->readyTime, busBusyUntil);
1158
1159    DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK), mem_cmd,
1160            dram_pkt->bank, dram_pkt->rank);
1161
1162    // Update the minimum timing between the requests, this is a
1163    // conservative estimate of when we have to schedule the next
1164    // request to not introduce any unecessary bubbles. In most cases
1165    // we will wake up sooner than we have to.
1166    nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1167
1168    // Update the stats and schedule the next request
1169    if (dram_pkt->isRead) {
1170        ++readsThisTime;
1171        if (row_hit)
1172            readRowHits++;
1173        bytesReadDRAM += burstSize;
1174        perBankRdBursts[dram_pkt->bankId]++;
1175
1176        // Update latency stats
1177        totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
1178        totBusLat += tBURST;
1179        totQLat += cmd_at - dram_pkt->entryTime;
1180    } else {
1181        ++writesThisTime;
1182        if (row_hit)
1183            writeRowHits++;
1184        bytesWritten += burstSize;
1185        perBankWrBursts[dram_pkt->bankId]++;
1186    }
1187}
1188
1189void
1190DRAMCtrl::processNextReqEvent()
1191{
1192    // pre-emptively set to false.  Overwrite if in READ_TO_WRITE
1193    // or WRITE_TO_READ state
1194    bool switched_cmd_type = false;
1195    if (busState == READ_TO_WRITE) {
1196        DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1197                "waiting\n", readsThisTime, readQueue.size());
1198
1199        // sample and reset the read-related stats as we are now
1200        // transitioning to writes, and all reads are done
1201        rdPerTurnAround.sample(readsThisTime);
1202        readsThisTime = 0;
1203
1204        // now proceed to do the actual writes
1205        busState = WRITE;
1206        switched_cmd_type = true;
1207    } else if (busState == WRITE_TO_READ) {
1208        DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
1209                "waiting\n", writesThisTime, writeQueue.size());
1210
1211        wrPerTurnAround.sample(writesThisTime);
1212        writesThisTime = 0;
1213
1214        busState = READ;
1215        switched_cmd_type = true;
1216    }
1217
1218    if (refreshState != REF_IDLE) {
1219        // if a refresh waiting for this event loop to finish, then hand
1220        // over now, and do not schedule a new nextReqEvent
1221        if (refreshState == REF_DRAIN) {
1222            DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1223
1224            refreshState = REF_PRE;
1225
1226            // hand control back to the refresh event loop
1227            schedule(refreshEvent, curTick());
1228        }
1229
1230        // let the refresh finish before issuing any further requests
1231        return;
1232    }
1233
1234    // when we get here it is either a read or a write
1235    if (busState == READ) {
1236
1237        // track if we should switch or not
1238        bool switch_to_writes = false;
1239
1240        if (readQueue.empty()) {
1241            // In the case there is no read request to go next,
1242            // trigger writes if we have passed the low threshold (or
1243            // if we are draining)
1244            if (!writeQueue.empty() &&
1245                (drainManager || writeQueue.size() > writeLowThreshold)) {
1246
1247                switch_to_writes = true;
1248            } else {
1249                // check if we are drained
1250                if (respQueue.empty () && drainManager) {
1251                    drainManager->signalDrainDone();
1252                    drainManager = NULL;
1253                }
1254
1255                // nothing to do, not even any point in scheduling an
1256                // event for the next request
1257                return;
1258            }
1259        } else {
1260            // Figure out which read request goes next, and move it to the
1261            // front of the read queue
1262            chooseNext(readQueue, switched_cmd_type);
1263
1264            DRAMPacket* dram_pkt = readQueue.front();
1265
1266            // here we get a bit creative and shift the bus busy time not
1267            // just the tWTR, but also a CAS latency to capture the fact
1268            // that we are allowed to prepare a new bank, but not issue a
1269            // read command until after tWTR, in essence we capture a
1270            // bubble on the data bus that is tWTR + tCL
1271            if (switched_cmd_type && dram_pkt->rank == activeRank) {
1272                busBusyUntil += tWTR + tCL;
1273            }
1274
1275            doDRAMAccess(dram_pkt);
1276
1277            // At this point we're done dealing with the request
1278            readQueue.pop_front();
1279
1280            // sanity check
1281            assert(dram_pkt->size <= burstSize);
1282            assert(dram_pkt->readyTime >= curTick());
1283
1284            // Insert into response queue. It will be sent back to the
1285            // requestor at its readyTime
1286            if (respQueue.empty()) {
1287                assert(!respondEvent.scheduled());
1288                schedule(respondEvent, dram_pkt->readyTime);
1289            } else {
1290                assert(respQueue.back()->readyTime <= dram_pkt->readyTime);
1291                assert(respondEvent.scheduled());
1292            }
1293
1294            respQueue.push_back(dram_pkt);
1295
1296            // we have so many writes that we have to transition
1297            if (writeQueue.size() > writeHighThreshold) {
1298                switch_to_writes = true;
1299            }
1300        }
1301
1302        // switching to writes, either because the read queue is empty
1303        // and the writes have passed the low threshold (or we are
1304        // draining), or because the writes hit the hight threshold
1305        if (switch_to_writes) {
1306            // transition to writing
1307            busState = READ_TO_WRITE;
1308        }
1309    } else {
1310        chooseNext(writeQueue, switched_cmd_type);
1311        DRAMPacket* dram_pkt = writeQueue.front();
1312        // sanity check
1313        assert(dram_pkt->size <= burstSize);
1314
1315        // add a bubble to the data bus, as defined by the
1316        // tRTW when access is to the same rank as previous burst
1317        // Different rank timing is handled with tCS, which is
1318        // applied to colAllowedAt
1319        if (switched_cmd_type && dram_pkt->rank == activeRank) {
1320            busBusyUntil += tRTW;
1321        }
1322
1323        doDRAMAccess(dram_pkt);
1324
1325        writeQueue.pop_front();
1326        delete dram_pkt;
1327
1328        // If we emptied the write queue, or got sufficiently below the
1329        // threshold (using the minWritesPerSwitch as the hysteresis) and
1330        // are not draining, or we have reads waiting and have done enough
1331        // writes, then switch to reads.
1332        if (writeQueue.empty() ||
1333            (writeQueue.size() + minWritesPerSwitch < writeLowThreshold &&
1334             !drainManager) ||
1335            (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
1336            // turn the bus back around for reads again
1337            busState = WRITE_TO_READ;
1338
1339            // note that the we switch back to reads also in the idle
1340            // case, which eventually will check for any draining and
1341            // also pause any further scheduling if there is really
1342            // nothing to do
1343        }
1344    }
1345
1346    schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1347
1348    // If there is space available and we have writes waiting then let
1349    // them retry. This is done here to ensure that the retry does not
1350    // cause a nextReqEvent to be scheduled before we do so as part of
1351    // the next request processing
1352    if (retryWrReq && writeQueue.size() < writeBufferSize) {
1353        retryWrReq = false;
1354        port.sendRetry();
1355    }
1356}
1357
1358uint64_t
1359DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue,
1360                      bool switched_cmd_type) const
1361{
1362    uint64_t bank_mask = 0;
1363    Tick min_act_at = MaxTick;
1364
1365    uint64_t bank_mask_same_rank = 0;
1366    Tick min_act_at_same_rank = MaxTick;
1367
1368    // Give precedence to commands that access same rank as previous command
1369    bool same_rank_match = false;
1370
1371    // determine if we have queued transactions targetting the
1372    // bank in question
1373    vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1374    for (auto p = queue.begin(); p != queue.end(); ++p) {
1375        got_waiting[(*p)->bankId] = true;
1376    }
1377
1378    for (int i = 0; i < ranksPerChannel; i++) {
1379        for (int j = 0; j < banksPerRank; j++) {
1380            uint8_t bank_id = i * banksPerRank + j;
1381
1382            // if we have waiting requests for the bank, and it is
1383            // amongst the first available, update the mask
1384            if (got_waiting[bank_id]) {
1385                // simplistic approximation of when the bank can issue
1386                // an activate, ignoring any rank-to-rank switching
1387                // cost in this calculation
1388                Tick act_at = banks[i][j].openRow == Bank::NO_ROW ?
1389                    banks[i][j].actAllowedAt :
1390                    std::max(banks[i][j].preAllowedAt, curTick()) + tRP;
1391
1392                // prioritize commands that access the
1393                // same rank as previous burst
1394                // Calculate bank mask separately for the case and
1395                // evaluate after loop iterations complete
1396                if (i == activeRank && ranksPerChannel > 1) {
1397                    if (act_at <= min_act_at_same_rank) {
1398                        // reset same rank bank mask if new minimum is found
1399                        // and previous minimum could not immediately send ACT
1400                        if (act_at < min_act_at_same_rank &&
1401                            min_act_at_same_rank > curTick())
1402                            bank_mask_same_rank = 0;
1403
1404                        // Set flag indicating that a same rank
1405                        // opportunity was found
1406                        same_rank_match = true;
1407
1408                        // set the bit corresponding to the available bank
1409                        replaceBits(bank_mask_same_rank, bank_id, bank_id, 1);
1410                        min_act_at_same_rank = act_at;
1411                    }
1412                } else {
1413                    if (act_at <= min_act_at) {
1414                        // reset bank mask if new minimum is found
1415                        // and either previous minimum could not immediately send ACT
1416                        if (act_at < min_act_at && min_act_at > curTick())
1417                            bank_mask = 0;
1418                        // set the bit corresponding to the available bank
1419                        replaceBits(bank_mask, bank_id, bank_id, 1);
1420                        min_act_at = act_at;
1421                    }
1422                }
1423            }
1424        }
1425    }
1426
1427    // Determine the earliest time when the next burst can issue based
1428    // on the current busBusyUntil delay.
1429    // Offset by tRCD to correlate with ACT timing variables
1430    Tick min_cmd_at = busBusyUntil - tCL - tRCD;
1431
1432    // Prioritize same rank accesses that can issue B2B
1433    // Only optimize for same ranks when the command type
1434    // does not change; do not want to unnecessarily incur tWTR
1435    //
1436    // Resulting FCFS prioritization Order is:
1437    // 1) Commands that access the same rank as previous burst
1438    //    and can prep the bank seamlessly.
1439    // 2) Commands (any rank) with earliest bank prep
1440    if (!switched_cmd_type && same_rank_match &&
1441        min_act_at_same_rank <= min_cmd_at) {
1442        bank_mask = bank_mask_same_rank;
1443    }
1444
1445    return bank_mask;
1446}
1447
1448void
1449DRAMCtrl::processRefreshEvent()
1450{
1451    // when first preparing the refresh, remember when it was due
1452    if (refreshState == REF_IDLE) {
1453        // remember when the refresh is due
1454        refreshDueAt = curTick();
1455
1456        // proceed to drain
1457        refreshState = REF_DRAIN;
1458
1459        DPRINTF(DRAM, "Refresh due\n");
1460    }
1461
1462    // let any scheduled read or write go ahead, after which it will
1463    // hand control back to this event loop
1464    if (refreshState == REF_DRAIN) {
1465        if (nextReqEvent.scheduled()) {
1466            // hand control over to the request loop until it is
1467            // evaluated next
1468            DPRINTF(DRAM, "Refresh awaiting draining\n");
1469
1470            return;
1471        } else {
1472            refreshState = REF_PRE;
1473        }
1474    }
1475
1476    // at this point, ensure that all banks are precharged
1477    if (refreshState == REF_PRE) {
1478        // precharge any active bank if we are not already in the idle
1479        // state
1480        if (pwrState != PWR_IDLE) {
1481            // at the moment, we use a precharge all even if there is
1482            // only a single bank open
1483            DPRINTF(DRAM, "Precharging all\n");
1484
1485            // first determine when we can precharge
1486            Tick pre_at = curTick();
1487            for (int i = 0; i < ranksPerChannel; i++) {
1488                for (int j = 0; j < banksPerRank; j++) {
1489                    // respect both causality and any existing bank
1490                    // constraints, some banks could already have a
1491                    // (auto) precharge scheduled
1492                    pre_at = std::max(banks[i][j].preAllowedAt, pre_at);
1493                }
1494            }
1495
1496            // make sure all banks are precharged, and for those that
1497            // already are, update their availability
1498            Tick act_allowed_at = pre_at + tRP;
1499
1500            for (int i = 0; i < ranksPerChannel; i++) {
1501                for (int j = 0; j < banksPerRank; j++) {
1502                    if (banks[i][j].openRow != Bank::NO_ROW) {
1503                        prechargeBank(banks[i][j], pre_at, false);
1504                    } else {
1505                        banks[i][j].actAllowedAt =
1506                            std::max(banks[i][j].actAllowedAt, act_allowed_at);
1507                        banks[i][j].preAllowedAt =
1508                            std::max(banks[i][j].preAllowedAt, pre_at);
1509                    }
1510                }
1511
1512                // at the moment this affects all ranks
1513                DPRINTF(DRAMPower, "%llu,PREA,0,%d\n", divCeil(pre_at, tCK),
1514                        i);
1515            }
1516        } else {
1517            DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1518
1519            // go ahead and kick the power state machine into gear if
1520            // we are already idle
1521            schedulePowerEvent(PWR_REF, curTick());
1522        }
1523
1524        refreshState = REF_RUN;
1525        assert(numBanksActive == 0);
1526
1527        // wait for all banks to be precharged, at which point the
1528        // power state machine will transition to the idle state, and
1529        // automatically move to a refresh, at that point it will also
1530        // call this method to get the refresh event loop going again
1531        return;
1532    }
1533
1534    // last but not least we perform the actual refresh
1535    if (refreshState == REF_RUN) {
1536        // should never get here with any banks active
1537        assert(numBanksActive == 0);
1538        assert(pwrState == PWR_REF);
1539
1540        Tick ref_done_at = curTick() + tRFC;
1541
1542        for (int i = 0; i < ranksPerChannel; i++) {
1543            for (int j = 0; j < banksPerRank; j++) {
1544                banks[i][j].actAllowedAt = ref_done_at;
1545            }
1546
1547            // at the moment this affects all ranks
1548            DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), tCK), i);
1549        }
1550
1551        // make sure we did not wait so long that we cannot make up
1552        // for it
1553        if (refreshDueAt + tREFI < ref_done_at) {
1554            fatal("Refresh was delayed so long we cannot catch up\n");
1555        }
1556
1557        // compensate for the delay in actually performing the refresh
1558        // when scheduling the next one
1559        schedule(refreshEvent, refreshDueAt + tREFI - tRP);
1560
1561        assert(!powerEvent.scheduled());
1562
1563        // move to the idle power state once the refresh is done, this
1564        // will also move the refresh state machine to the refresh
1565        // idle state
1566        schedulePowerEvent(PWR_IDLE, ref_done_at);
1567
1568        DPRINTF(DRAMState, "Refresh done at %llu and next refresh at %llu\n",
1569                ref_done_at, refreshDueAt + tREFI);
1570    }
1571}
1572
1573void
1574DRAMCtrl::schedulePowerEvent(PowerState pwr_state, Tick tick)
1575{
1576    // respect causality
1577    assert(tick >= curTick());
1578
1579    if (!powerEvent.scheduled()) {
1580        DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
1581                tick, pwr_state);
1582
1583        // insert the new transition
1584        pwrStateTrans = pwr_state;
1585
1586        schedule(powerEvent, tick);
1587    } else {
1588        panic("Scheduled power event at %llu to state %d, "
1589              "with scheduled event at %llu to %d\n", tick, pwr_state,
1590              powerEvent.when(), pwrStateTrans);
1591    }
1592}
1593
1594void
1595DRAMCtrl::processPowerEvent()
1596{
1597    // remember where we were, and for how long
1598    Tick duration = curTick() - pwrStateTick;
1599    PowerState prev_state = pwrState;
1600
1601    // update the accounting
1602    pwrStateTime[prev_state] += duration;
1603
1604    pwrState = pwrStateTrans;
1605    pwrStateTick = curTick();
1606
1607    if (pwrState == PWR_IDLE) {
1608        DPRINTF(DRAMState, "All banks precharged\n");
1609
1610        // if we were refreshing, make sure we start scheduling requests again
1611        if (prev_state == PWR_REF) {
1612            DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
1613            assert(pwrState == PWR_IDLE);
1614
1615            // kick things into action again
1616            refreshState = REF_IDLE;
1617            assert(!nextReqEvent.scheduled());
1618            schedule(nextReqEvent, curTick());
1619        } else {
1620            assert(prev_state == PWR_ACT);
1621
1622            // if we have a pending refresh, and are now moving to
1623            // the idle state, direclty transition to a refresh
1624            if (refreshState == REF_RUN) {
1625                // there should be nothing waiting at this point
1626                assert(!powerEvent.scheduled());
1627
1628                // update the state in zero time and proceed below
1629                pwrState = PWR_REF;
1630            }
1631        }
1632    }
1633
1634    // we transition to the refresh state, let the refresh state
1635    // machine know of this state update and let it deal with the
1636    // scheduling of the next power state transition as well as the
1637    // following refresh
1638    if (pwrState == PWR_REF) {
1639        DPRINTF(DRAMState, "Refreshing\n");
1640        // kick the refresh event loop into action again, and that
1641        // in turn will schedule a transition to the idle power
1642        // state once the refresh is done
1643        assert(refreshState == REF_RUN);
1644        processRefreshEvent();
1645    }
1646}
1647
1648void
1649DRAMCtrl::regStats()
1650{
1651    using namespace Stats;
1652
1653    AbstractMemory::regStats();
1654
1655    readReqs
1656        .name(name() + ".readReqs")
1657        .desc("Number of read requests accepted");
1658
1659    writeReqs
1660        .name(name() + ".writeReqs")
1661        .desc("Number of write requests accepted");
1662
1663    readBursts
1664        .name(name() + ".readBursts")
1665        .desc("Number of DRAM read bursts, "
1666              "including those serviced by the write queue");
1667
1668    writeBursts
1669        .name(name() + ".writeBursts")
1670        .desc("Number of DRAM write bursts, "
1671              "including those merged in the write queue");
1672
1673    servicedByWrQ
1674        .name(name() + ".servicedByWrQ")
1675        .desc("Number of DRAM read bursts serviced by the write queue");
1676
1677    mergedWrBursts
1678        .name(name() + ".mergedWrBursts")
1679        .desc("Number of DRAM write bursts merged with an existing one");
1680
1681    neitherReadNorWrite
1682        .name(name() + ".neitherReadNorWriteReqs")
1683        .desc("Number of requests that are neither read nor write");
1684
1685    perBankRdBursts
1686        .init(banksPerRank * ranksPerChannel)
1687        .name(name() + ".perBankRdBursts")
1688        .desc("Per bank write bursts");
1689
1690    perBankWrBursts
1691        .init(banksPerRank * ranksPerChannel)
1692        .name(name() + ".perBankWrBursts")
1693        .desc("Per bank write bursts");
1694
1695    avgRdQLen
1696        .name(name() + ".avgRdQLen")
1697        .desc("Average read queue length when enqueuing")
1698        .precision(2);
1699
1700    avgWrQLen
1701        .name(name() + ".avgWrQLen")
1702        .desc("Average write queue length when enqueuing")
1703        .precision(2);
1704
1705    totQLat
1706        .name(name() + ".totQLat")
1707        .desc("Total ticks spent queuing");
1708
1709    totBusLat
1710        .name(name() + ".totBusLat")
1711        .desc("Total ticks spent in databus transfers");
1712
1713    totMemAccLat
1714        .name(name() + ".totMemAccLat")
1715        .desc("Total ticks spent from burst creation until serviced "
1716              "by the DRAM");
1717
1718    avgQLat
1719        .name(name() + ".avgQLat")
1720        .desc("Average queueing delay per DRAM burst")
1721        .precision(2);
1722
1723    avgQLat = totQLat / (readBursts - servicedByWrQ);
1724
1725    avgBusLat
1726        .name(name() + ".avgBusLat")
1727        .desc("Average bus latency per DRAM burst")
1728        .precision(2);
1729
1730    avgBusLat = totBusLat / (readBursts - servicedByWrQ);
1731
1732    avgMemAccLat
1733        .name(name() + ".avgMemAccLat")
1734        .desc("Average memory access latency per DRAM burst")
1735        .precision(2);
1736
1737    avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ);
1738
1739    numRdRetry
1740        .name(name() + ".numRdRetry")
1741        .desc("Number of times read queue was full causing retry");
1742
1743    numWrRetry
1744        .name(name() + ".numWrRetry")
1745        .desc("Number of times write queue was full causing retry");
1746
1747    readRowHits
1748        .name(name() + ".readRowHits")
1749        .desc("Number of row buffer hits during reads");
1750
1751    writeRowHits
1752        .name(name() + ".writeRowHits")
1753        .desc("Number of row buffer hits during writes");
1754
1755    readRowHitRate
1756        .name(name() + ".readRowHitRate")
1757        .desc("Row buffer hit rate for reads")
1758        .precision(2);
1759
1760    readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100;
1761
1762    writeRowHitRate
1763        .name(name() + ".writeRowHitRate")
1764        .desc("Row buffer hit rate for writes")
1765        .precision(2);
1766
1767    writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100;
1768
1769    readPktSize
1770        .init(ceilLog2(burstSize) + 1)
1771        .name(name() + ".readPktSize")
1772        .desc("Read request sizes (log2)");
1773
1774     writePktSize
1775        .init(ceilLog2(burstSize) + 1)
1776        .name(name() + ".writePktSize")
1777        .desc("Write request sizes (log2)");
1778
1779     rdQLenPdf
1780        .init(readBufferSize)
1781        .name(name() + ".rdQLenPdf")
1782        .desc("What read queue length does an incoming req see");
1783
1784     wrQLenPdf
1785        .init(writeBufferSize)
1786        .name(name() + ".wrQLenPdf")
1787        .desc("What write queue length does an incoming req see");
1788
1789     bytesPerActivate
1790         .init(maxAccessesPerRow)
1791         .name(name() + ".bytesPerActivate")
1792         .desc("Bytes accessed per row activation")
1793         .flags(nozero);
1794
1795     rdPerTurnAround
1796         .init(readBufferSize)
1797         .name(name() + ".rdPerTurnAround")
1798         .desc("Reads before turning the bus around for writes")
1799         .flags(nozero);
1800
1801     wrPerTurnAround
1802         .init(writeBufferSize)
1803         .name(name() + ".wrPerTurnAround")
1804         .desc("Writes before turning the bus around for reads")
1805         .flags(nozero);
1806
1807    bytesReadDRAM
1808        .name(name() + ".bytesReadDRAM")
1809        .desc("Total number of bytes read from DRAM");
1810
1811    bytesReadWrQ
1812        .name(name() + ".bytesReadWrQ")
1813        .desc("Total number of bytes read from write queue");
1814
1815    bytesWritten
1816        .name(name() + ".bytesWritten")
1817        .desc("Total number of bytes written to DRAM");
1818
1819    bytesReadSys
1820        .name(name() + ".bytesReadSys")
1821        .desc("Total read bytes from the system interface side");
1822
1823    bytesWrittenSys
1824        .name(name() + ".bytesWrittenSys")
1825        .desc("Total written bytes from the system interface side");
1826
1827    avgRdBW
1828        .name(name() + ".avgRdBW")
1829        .desc("Average DRAM read bandwidth in MiByte/s")
1830        .precision(2);
1831
1832    avgRdBW = (bytesReadDRAM / 1000000) / simSeconds;
1833
1834    avgWrBW
1835        .name(name() + ".avgWrBW")
1836        .desc("Average achieved write bandwidth in MiByte/s")
1837        .precision(2);
1838
1839    avgWrBW = (bytesWritten / 1000000) / simSeconds;
1840
1841    avgRdBWSys
1842        .name(name() + ".avgRdBWSys")
1843        .desc("Average system read bandwidth in MiByte/s")
1844        .precision(2);
1845
1846    avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
1847
1848    avgWrBWSys
1849        .name(name() + ".avgWrBWSys")
1850        .desc("Average system write bandwidth in MiByte/s")
1851        .precision(2);
1852
1853    avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
1854
1855    peakBW
1856        .name(name() + ".peakBW")
1857        .desc("Theoretical peak bandwidth in MiByte/s")
1858        .precision(2);
1859
1860    peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
1861
1862    busUtil
1863        .name(name() + ".busUtil")
1864        .desc("Data bus utilization in percentage")
1865        .precision(2);
1866
1867    busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1868
1869    totGap
1870        .name(name() + ".totGap")
1871        .desc("Total gap between requests");
1872
1873    avgGap
1874        .name(name() + ".avgGap")
1875        .desc("Average gap between requests")
1876        .precision(2);
1877
1878    avgGap = totGap / (readReqs + writeReqs);
1879
1880    // Stats for DRAM Power calculation based on Micron datasheet
1881    busUtilRead
1882        .name(name() + ".busUtilRead")
1883        .desc("Data bus utilization in percentage for reads")
1884        .precision(2);
1885
1886    busUtilRead = avgRdBW / peakBW * 100;
1887
1888    busUtilWrite
1889        .name(name() + ".busUtilWrite")
1890        .desc("Data bus utilization in percentage for writes")
1891        .precision(2);
1892
1893    busUtilWrite = avgWrBW / peakBW * 100;
1894
1895    pageHitRate
1896        .name(name() + ".pageHitRate")
1897        .desc("Row buffer hit rate, read and write combined")
1898        .precision(2);
1899
1900    pageHitRate = (writeRowHits + readRowHits) /
1901        (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
1902
1903    pwrStateTime
1904        .init(5)
1905        .name(name() + ".memoryStateTime")
1906        .desc("Time in different power states");
1907    pwrStateTime.subname(0, "IDLE");
1908    pwrStateTime.subname(1, "REF");
1909    pwrStateTime.subname(2, "PRE_PDN");
1910    pwrStateTime.subname(3, "ACT");
1911    pwrStateTime.subname(4, "ACT_PDN");
1912}
1913
1914void
1915DRAMCtrl::recvFunctional(PacketPtr pkt)
1916{
1917    // rely on the abstract memory
1918    functionalAccess(pkt);
1919}
1920
1921BaseSlavePort&
1922DRAMCtrl::getSlavePort(const string &if_name, PortID idx)
1923{
1924    if (if_name != "port") {
1925        return MemObject::getSlavePort(if_name, idx);
1926    } else {
1927        return port;
1928    }
1929}
1930
1931unsigned int
1932DRAMCtrl::drain(DrainManager *dm)
1933{
1934    unsigned int count = port.drain(dm);
1935
1936    // if there is anything in any of our internal queues, keep track
1937    // of that as well
1938    if (!(writeQueue.empty() && readQueue.empty() &&
1939          respQueue.empty())) {
1940        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
1941                " resp: %d\n", writeQueue.size(), readQueue.size(),
1942                respQueue.size());
1943        ++count;
1944        drainManager = dm;
1945
1946        // the only part that is not drained automatically over time
1947        // is the write queue, thus kick things into action if needed
1948        if (!writeQueue.empty() && !nextReqEvent.scheduled()) {
1949            schedule(nextReqEvent, curTick());
1950        }
1951    }
1952
1953    if (count)
1954        setDrainState(Drainable::Draining);
1955    else
1956        setDrainState(Drainable::Drained);
1957    return count;
1958}
1959
1960DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory)
1961    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
1962      memory(_memory)
1963{ }
1964
1965AddrRangeList
1966DRAMCtrl::MemoryPort::getAddrRanges() const
1967{
1968    AddrRangeList ranges;
1969    ranges.push_back(memory.getAddrRange());
1970    return ranges;
1971}
1972
1973void
1974DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
1975{
1976    pkt->pushLabel(memory.name());
1977
1978    if (!queue.checkFunctional(pkt)) {
1979        // Default implementation of SimpleTimingPort::recvFunctional()
1980        // calls recvAtomic() and throws away the latency; we can save a
1981        // little here by just not calculating the latency.
1982        memory.recvFunctional(pkt);
1983    }
1984
1985    pkt->popLabel();
1986}
1987
1988Tick
1989DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
1990{
1991    return memory.recvAtomic(pkt);
1992}
1993
1994bool
1995DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
1996{
1997    // pass it to the memory controller
1998    return memory.recvTimingReq(pkt);
1999}
2000
2001DRAMCtrl*
2002DRAMCtrlParams::create()
2003{
2004    return new DRAMCtrl(this);
2005}
2006