dram_ctrl.cc revision 10207
1/*
2 * Copyright (c) 2010-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Andreas Hansson
41 *          Ani Udipi
42 *          Neha Agarwal
43 */
44
45#include "base/bitfield.hh"
46#include "base/trace.hh"
47#include "debug/DRAM.hh"
48#include "debug/Drain.hh"
49#include "mem/dram_ctrl.hh"
50#include "sim/system.hh"
51
52using namespace std;
53
54DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
55    AbstractMemory(p),
56    port(name() + ".port", *this),
57    retryRdReq(false), retryWrReq(false),
58    rowHitFlag(false), busState(READ),
59    respondEvent(this), refreshEvent(this),
60    nextReqEvent(this), drainManager(NULL),
61    deviceBusWidth(p->device_bus_width), burstLength(p->burst_length),
62    deviceRowBufferSize(p->device_rowbuffer_size),
63    devicesPerRank(p->devices_per_rank),
64    burstSize((devicesPerRank * burstLength * deviceBusWidth) / 8),
65    rowBufferSize(devicesPerRank * deviceRowBufferSize),
66    columnsPerRowBuffer(rowBufferSize / burstSize),
67    ranksPerChannel(p->ranks_per_channel),
68    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
69    readBufferSize(p->read_buffer_size),
70    writeBufferSize(p->write_buffer_size),
71    writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
72    writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
73    minWritesPerSwitch(p->min_writes_per_switch),
74    writesThisTime(0), readsThisTime(0),
75    tWTR(p->tWTR), tRTW(p->tRTW), tBURST(p->tBURST),
76    tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP), tRAS(p->tRAS),
77    tRFC(p->tRFC), tREFI(p->tREFI), tRRD(p->tRRD),
78    tXAW(p->tXAW), activationLimit(p->activation_limit),
79    memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
80    pageMgmt(p->page_policy),
81    maxAccessesPerRow(p->max_accesses_per_row),
82    frontendLatency(p->static_frontend_latency),
83    backendLatency(p->static_backend_latency),
84    busBusyUntil(0), refreshDueAt(0), refreshState(REF_IDLE), prevArrival(0),
85    nextReqTime(0), idleStartTick(0), numBanksActive(0)
86{
87    // create the bank states based on the dimensions of the ranks and
88    // banks
89    banks.resize(ranksPerChannel);
90    actTicks.resize(ranksPerChannel);
91    for (size_t c = 0; c < ranksPerChannel; ++c) {
92        banks[c].resize(banksPerRank);
93        actTicks[c].resize(activationLimit, 0);
94    }
95
96    // perform a basic check of the write thresholds
97    if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
98        fatal("Write buffer low threshold %d must be smaller than the "
99              "high threshold %d\n", p->write_low_thresh_perc,
100              p->write_high_thresh_perc);
101
102    // determine the rows per bank by looking at the total capacity
103    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
104
105    DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
106            AbstractMemory::size());
107
108    DPRINTF(DRAM, "Row buffer size %d bytes with %d columns per row buffer\n",
109            rowBufferSize, columnsPerRowBuffer);
110
111    rowsPerBank = capacity / (rowBufferSize * banksPerRank * ranksPerChannel);
112
113    if (range.interleaved()) {
114        if (channels != range.stripes())
115            fatal("%s has %d interleaved address stripes but %d channel(s)\n",
116                  name(), range.stripes(), channels);
117
118        if (addrMapping == Enums::RoRaBaChCo) {
119            if (rowBufferSize != range.granularity()) {
120                fatal("Interleaving of %s doesn't match RoRaBaChCo "
121                      "address map\n", name());
122            }
123        } else if (addrMapping == Enums::RoRaBaCoCh) {
124            if (system()->cacheLineSize() != range.granularity()) {
125                fatal("Interleaving of %s doesn't match RoRaBaCoCh "
126                      "address map\n", name());
127            }
128        } else if (addrMapping == Enums::RoCoRaBaCh) {
129            if (system()->cacheLineSize() != range.granularity())
130                fatal("Interleaving of %s doesn't match RoCoRaBaCh "
131                      "address map\n", name());
132        }
133    }
134
135    // some basic sanity checks
136    if (tREFI <= tRP || tREFI <= tRFC) {
137        fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
138              tREFI, tRP, tRFC);
139    }
140}
141
142void
143DRAMCtrl::init()
144{
145    if (!port.isConnected()) {
146        fatal("DRAMCtrl %s is unconnected!\n", name());
147    } else {
148        port.sendRangeChange();
149    }
150}
151
152void
153DRAMCtrl::startup()
154{
155    // update the start tick for the precharge accounting to the
156    // current tick
157    idleStartTick = curTick();
158
159    // shift the bus busy time sufficiently far ahead that we never
160    // have to worry about negative values when computing the time for
161    // the next request, this will add an insignificant bubble at the
162    // start of simulation
163    busBusyUntil = curTick() + tRP + tRCD + tCL;
164
165    // print the configuration of the controller
166    printParams();
167
168    // kick off the refresh, and give ourselves enough time to
169    // precharge
170    schedule(refreshEvent, curTick() + tREFI - tRP);
171}
172
173Tick
174DRAMCtrl::recvAtomic(PacketPtr pkt)
175{
176    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
177
178    // do the actual memory access and turn the packet into a response
179    access(pkt);
180
181    Tick latency = 0;
182    if (!pkt->memInhibitAsserted() && pkt->hasData()) {
183        // this value is not supposed to be accurate, just enough to
184        // keep things going, mimic a closed page
185        latency = tRP + tRCD + tCL;
186    }
187    return latency;
188}
189
190bool
191DRAMCtrl::readQueueFull(unsigned int neededEntries) const
192{
193    DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
194            readBufferSize, readQueue.size() + respQueue.size(),
195            neededEntries);
196
197    return
198        (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
199}
200
201bool
202DRAMCtrl::writeQueueFull(unsigned int neededEntries) const
203{
204    DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
205            writeBufferSize, writeQueue.size(), neededEntries);
206    return (writeQueue.size() + neededEntries) > writeBufferSize;
207}
208
209DRAMCtrl::DRAMPacket*
210DRAMCtrl::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size,
211                       bool isRead)
212{
213    // decode the address based on the address mapping scheme, with
214    // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
215    // channel, respectively
216    uint8_t rank;
217    uint8_t bank;
218    uint16_t row;
219
220    // truncate the address to the access granularity
221    Addr addr = dramPktAddr / burstSize;
222
223    // we have removed the lowest order address bits that denote the
224    // position within the column
225    if (addrMapping == Enums::RoRaBaChCo) {
226        // the lowest order bits denote the column to ensure that
227        // sequential cache lines occupy the same row
228        addr = addr / columnsPerRowBuffer;
229
230        // take out the channel part of the address
231        addr = addr / channels;
232
233        // after the channel bits, get the bank bits to interleave
234        // over the banks
235        bank = addr % banksPerRank;
236        addr = addr / banksPerRank;
237
238        // after the bank, we get the rank bits which thus interleaves
239        // over the ranks
240        rank = addr % ranksPerChannel;
241        addr = addr / ranksPerChannel;
242
243        // lastly, get the row bits
244        row = addr % rowsPerBank;
245        addr = addr / rowsPerBank;
246    } else if (addrMapping == Enums::RoRaBaCoCh) {
247        // take out the channel part of the address
248        addr = addr / channels;
249
250        // next, the column
251        addr = addr / columnsPerRowBuffer;
252
253        // after the column bits, we get the bank bits to interleave
254        // over the banks
255        bank = addr % banksPerRank;
256        addr = addr / banksPerRank;
257
258        // after the bank, we get the rank bits which thus interleaves
259        // over the ranks
260        rank = addr % ranksPerChannel;
261        addr = addr / ranksPerChannel;
262
263        // lastly, get the row bits
264        row = addr % rowsPerBank;
265        addr = addr / rowsPerBank;
266    } else if (addrMapping == Enums::RoCoRaBaCh) {
267        // optimise for closed page mode and utilise maximum
268        // parallelism of the DRAM (at the cost of power)
269
270        // take out the channel part of the address, not that this has
271        // to match with how accesses are interleaved between the
272        // controllers in the address mapping
273        addr = addr / channels;
274
275        // start with the bank bits, as this provides the maximum
276        // opportunity for parallelism between requests
277        bank = addr % banksPerRank;
278        addr = addr / banksPerRank;
279
280        // next get the rank bits
281        rank = addr % ranksPerChannel;
282        addr = addr / ranksPerChannel;
283
284        // next the column bits which we do not need to keep track of
285        // and simply skip past
286        addr = addr / columnsPerRowBuffer;
287
288        // lastly, get the row bits
289        row = addr % rowsPerBank;
290        addr = addr / rowsPerBank;
291    } else
292        panic("Unknown address mapping policy chosen!");
293
294    assert(rank < ranksPerChannel);
295    assert(bank < banksPerRank);
296    assert(row < rowsPerBank);
297
298    DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
299            dramPktAddr, rank, bank, row);
300
301    // create the corresponding DRAM packet with the entry time and
302    // ready time set to the current tick, the latter will be updated
303    // later
304    uint16_t bank_id = banksPerRank * rank + bank;
305    return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
306                          size, banks[rank][bank]);
307}
308
309void
310DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
311{
312    // only add to the read queue here. whenever the request is
313    // eventually done, set the readyTime, and call schedule()
314    assert(!pkt->isWrite());
315
316    assert(pktCount != 0);
317
318    // if the request size is larger than burst size, the pkt is split into
319    // multiple DRAM packets
320    // Note if the pkt starting address is not aligened to burst size, the
321    // address of first DRAM packet is kept unaliged. Subsequent DRAM packets
322    // are aligned to burst size boundaries. This is to ensure we accurately
323    // check read packets against packets in write queue.
324    Addr addr = pkt->getAddr();
325    unsigned pktsServicedByWrQ = 0;
326    BurstHelper* burst_helper = NULL;
327    for (int cnt = 0; cnt < pktCount; ++cnt) {
328        unsigned size = std::min((addr | (burstSize - 1)) + 1,
329                        pkt->getAddr() + pkt->getSize()) - addr;
330        readPktSize[ceilLog2(size)]++;
331        readBursts++;
332
333        // First check write buffer to see if the data is already at
334        // the controller
335        bool foundInWrQ = false;
336        for (auto i = writeQueue.begin(); i != writeQueue.end(); ++i) {
337            // check if the read is subsumed in the write entry we are
338            // looking at
339            if ((*i)->addr <= addr &&
340                (addr + size) <= ((*i)->addr + (*i)->size)) {
341                foundInWrQ = true;
342                servicedByWrQ++;
343                pktsServicedByWrQ++;
344                DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
345                        "write queue\n", addr, size);
346                bytesReadWrQ += burstSize;
347                break;
348            }
349        }
350
351        // If not found in the write q, make a DRAM packet and
352        // push it onto the read queue
353        if (!foundInWrQ) {
354
355            // Make the burst helper for split packets
356            if (pktCount > 1 && burst_helper == NULL) {
357                DPRINTF(DRAM, "Read to addr %lld translates to %d "
358                        "dram requests\n", pkt->getAddr(), pktCount);
359                burst_helper = new BurstHelper(pktCount);
360            }
361
362            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, true);
363            dram_pkt->burstHelper = burst_helper;
364
365            assert(!readQueueFull(1));
366            rdQLenPdf[readQueue.size() + respQueue.size()]++;
367
368            DPRINTF(DRAM, "Adding to read queue\n");
369
370            readQueue.push_back(dram_pkt);
371
372            // Update stats
373            avgRdQLen = readQueue.size() + respQueue.size();
374        }
375
376        // Starting address of next dram pkt (aligend to burstSize boundary)
377        addr = (addr | (burstSize - 1)) + 1;
378    }
379
380    // If all packets are serviced by write queue, we send the repsonse back
381    if (pktsServicedByWrQ == pktCount) {
382        accessAndRespond(pkt, frontendLatency);
383        return;
384    }
385
386    // Update how many split packets are serviced by write queue
387    if (burst_helper != NULL)
388        burst_helper->burstsServiced = pktsServicedByWrQ;
389
390    // If we are not already scheduled to get a request out of the
391    // queue, do so now
392    if (!nextReqEvent.scheduled()) {
393        DPRINTF(DRAM, "Request scheduled immediately\n");
394        schedule(nextReqEvent, curTick());
395    }
396}
397
398void
399DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
400{
401    // only add to the write queue here. whenever the request is
402    // eventually done, set the readyTime, and call schedule()
403    assert(pkt->isWrite());
404
405    // if the request size is larger than burst size, the pkt is split into
406    // multiple DRAM packets
407    Addr addr = pkt->getAddr();
408    for (int cnt = 0; cnt < pktCount; ++cnt) {
409        unsigned size = std::min((addr | (burstSize - 1)) + 1,
410                        pkt->getAddr() + pkt->getSize()) - addr;
411        writePktSize[ceilLog2(size)]++;
412        writeBursts++;
413
414        // see if we can merge with an existing item in the write
415        // queue and keep track of whether we have merged or not so we
416        // can stop at that point and also avoid enqueueing a new
417        // request
418        bool merged = false;
419        auto w = writeQueue.begin();
420
421        while(!merged && w != writeQueue.end()) {
422            // either of the two could be first, if they are the same
423            // it does not matter which way we go
424            if ((*w)->addr >= addr) {
425                // the existing one starts after the new one, figure
426                // out where the new one ends with respect to the
427                // existing one
428                if ((addr + size) >= ((*w)->addr + (*w)->size)) {
429                    // check if the existing one is completely
430                    // subsumed in the new one
431                    DPRINTF(DRAM, "Merging write covering existing burst\n");
432                    merged = true;
433                    // update both the address and the size
434                    (*w)->addr = addr;
435                    (*w)->size = size;
436                } else if ((addr + size) >= (*w)->addr &&
437                           ((*w)->addr + (*w)->size - addr) <= burstSize) {
438                    // the new one is just before or partially
439                    // overlapping with the existing one, and together
440                    // they fit within a burst
441                    DPRINTF(DRAM, "Merging write before existing burst\n");
442                    merged = true;
443                    // the existing queue item needs to be adjusted with
444                    // respect to both address and size
445                    (*w)->size = (*w)->addr + (*w)->size - addr;
446                    (*w)->addr = addr;
447                }
448            } else {
449                // the new one starts after the current one, figure
450                // out where the existing one ends with respect to the
451                // new one
452                if (((*w)->addr + (*w)->size) >= (addr + size)) {
453                    // check if the new one is completely subsumed in the
454                    // existing one
455                    DPRINTF(DRAM, "Merging write into existing burst\n");
456                    merged = true;
457                    // no adjustments necessary
458                } else if (((*w)->addr + (*w)->size) >= addr &&
459                           (addr + size - (*w)->addr) <= burstSize) {
460                    // the existing one is just before or partially
461                    // overlapping with the new one, and together
462                    // they fit within a burst
463                    DPRINTF(DRAM, "Merging write after existing burst\n");
464                    merged = true;
465                    // the address is right, and only the size has
466                    // to be adjusted
467                    (*w)->size = addr + size - (*w)->addr;
468                }
469            }
470            ++w;
471        }
472
473        // if the item was not merged we need to create a new write
474        // and enqueue it
475        if (!merged) {
476            DRAMPacket* dram_pkt = decodeAddr(pkt, addr, size, false);
477
478            assert(writeQueue.size() < writeBufferSize);
479            wrQLenPdf[writeQueue.size()]++;
480
481            DPRINTF(DRAM, "Adding to write queue\n");
482
483            writeQueue.push_back(dram_pkt);
484
485            // Update stats
486            avgWrQLen = writeQueue.size();
487        } else {
488            // keep track of the fact that this burst effectively
489            // disappeared as it was merged with an existing one
490            mergedWrBursts++;
491        }
492
493        // Starting address of next dram pkt (aligend to burstSize boundary)
494        addr = (addr | (burstSize - 1)) + 1;
495    }
496
497    // we do not wait for the writes to be send to the actual memory,
498    // but instead take responsibility for the consistency here and
499    // snoop the write queue for any upcoming reads
500    // @todo, if a pkt size is larger than burst size, we might need a
501    // different front end latency
502    accessAndRespond(pkt, frontendLatency);
503
504    // If we are not already scheduled to get a request out of the
505    // queue, do so now
506    if (!nextReqEvent.scheduled()) {
507        DPRINTF(DRAM, "Request scheduled immediately\n");
508        schedule(nextReqEvent, curTick());
509    }
510}
511
512void
513DRAMCtrl::printParams() const
514{
515    // Sanity check print of important parameters
516    DPRINTF(DRAM,
517            "Memory controller %s physical organization\n"      \
518            "Number of devices per rank   %d\n"                 \
519            "Device bus width (in bits)   %d\n"                 \
520            "DRAM data bus burst (bytes)  %d\n"                 \
521            "Row buffer size (bytes)      %d\n"                 \
522            "Columns per row buffer       %d\n"                 \
523            "Rows    per bank             %d\n"                 \
524            "Banks   per rank             %d\n"                 \
525            "Ranks   per channel          %d\n"                 \
526            "Total mem capacity (bytes)   %u\n",
527            name(), devicesPerRank, deviceBusWidth, burstSize, rowBufferSize,
528            columnsPerRowBuffer, rowsPerBank, banksPerRank, ranksPerChannel,
529            rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel);
530
531    string scheduler =  memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS";
532    string address_mapping = addrMapping == Enums::RoRaBaChCo ? "RoRaBaChCo" :
533        (addrMapping == Enums::RoRaBaCoCh ? "RoRaBaCoCh" : "RoCoRaBaCh");
534    string page_policy = pageMgmt == Enums::open ? "OPEN" :
535        (pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" :
536        (pageMgmt == Enums::close_adaptive ? "CLOSE (adaptive)" : "CLOSE"));
537
538    DPRINTF(DRAM,
539            "Memory controller %s characteristics\n"    \
540            "Read buffer size     %d\n"                 \
541            "Write buffer size    %d\n"                 \
542            "Write high thresh    %d\n"                 \
543            "Write low thresh     %d\n"                 \
544            "Scheduler            %s\n"                 \
545            "Address mapping      %s\n"                 \
546            "Page policy          %s\n",
547            name(), readBufferSize, writeBufferSize, writeHighThreshold,
548            writeLowThreshold, scheduler, address_mapping, page_policy);
549
550    DPRINTF(DRAM, "Memory controller %s timing specs\n" \
551            "tRCD      %d ticks\n"                        \
552            "tCL       %d ticks\n"                        \
553            "tRP       %d ticks\n"                        \
554            "tBURST    %d ticks\n"                        \
555            "tRFC      %d ticks\n"                        \
556            "tREFI     %d ticks\n"                        \
557            "tWTR      %d ticks\n"                        \
558            "tRTW      %d ticks\n"                        \
559            "tXAW (%d) %d ticks\n",
560            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR,
561            tRTW, activationLimit, tXAW);
562}
563
564void
565DRAMCtrl::printQs() const {
566    DPRINTF(DRAM, "===READ QUEUE===\n\n");
567    for (auto i = readQueue.begin() ;  i != readQueue.end() ; ++i) {
568        DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
569    }
570    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
571    for (auto i = respQueue.begin() ;  i != respQueue.end() ; ++i) {
572        DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
573    }
574    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
575    for (auto i = writeQueue.begin() ;  i != writeQueue.end() ; ++i) {
576        DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
577    }
578}
579
580bool
581DRAMCtrl::recvTimingReq(PacketPtr pkt)
582{
583    /// @todo temporary hack to deal with memory corruption issues until
584    /// 4-phase transactions are complete
585    for (int x = 0; x < pendingDelete.size(); x++)
586        delete pendingDelete[x];
587    pendingDelete.clear();
588
589    // This is where we enter from the outside world
590    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
591            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
592
593    // simply drop inhibited packets for now
594    if (pkt->memInhibitAsserted()) {
595        DPRINTF(DRAM, "Inhibited packet -- Dropping it now\n");
596        pendingDelete.push_back(pkt);
597        return true;
598    }
599
600    // Calc avg gap between requests
601    if (prevArrival != 0) {
602        totGap += curTick() - prevArrival;
603    }
604    prevArrival = curTick();
605
606
607    // Find out how many dram packets a pkt translates to
608    // If the burst size is equal or larger than the pkt size, then a pkt
609    // translates to only one dram packet. Otherwise, a pkt translates to
610    // multiple dram packets
611    unsigned size = pkt->getSize();
612    unsigned offset = pkt->getAddr() & (burstSize - 1);
613    unsigned int dram_pkt_count = divCeil(offset + size, burstSize);
614
615    // check local buffers and do not accept if full
616    if (pkt->isRead()) {
617        assert(size != 0);
618        if (readQueueFull(dram_pkt_count)) {
619            DPRINTF(DRAM, "Read queue full, not accepting\n");
620            // remember that we have to retry this port
621            retryRdReq = true;
622            numRdRetry++;
623            return false;
624        } else {
625            addToReadQueue(pkt, dram_pkt_count);
626            readReqs++;
627            bytesReadSys += size;
628        }
629    } else if (pkt->isWrite()) {
630        assert(size != 0);
631        if (writeQueueFull(dram_pkt_count)) {
632            DPRINTF(DRAM, "Write queue full, not accepting\n");
633            // remember that we have to retry this port
634            retryWrReq = true;
635            numWrRetry++;
636            return false;
637        } else {
638            addToWriteQueue(pkt, dram_pkt_count);
639            writeReqs++;
640            bytesWrittenSys += size;
641        }
642    } else {
643        DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
644        neitherReadNorWrite++;
645        accessAndRespond(pkt, 1);
646    }
647
648    return true;
649}
650
651void
652DRAMCtrl::processRespondEvent()
653{
654    DPRINTF(DRAM,
655            "processRespondEvent(): Some req has reached its readyTime\n");
656
657    DRAMPacket* dram_pkt = respQueue.front();
658
659    if (dram_pkt->burstHelper) {
660        // it is a split packet
661        dram_pkt->burstHelper->burstsServiced++;
662        if (dram_pkt->burstHelper->burstsServiced ==
663            dram_pkt->burstHelper->burstCount) {
664            // we have now serviced all children packets of a system packet
665            // so we can now respond to the requester
666            // @todo we probably want to have a different front end and back
667            // end latency for split packets
668            accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
669            delete dram_pkt->burstHelper;
670            dram_pkt->burstHelper = NULL;
671        }
672    } else {
673        // it is not a split packet
674        accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
675    }
676
677    delete respQueue.front();
678    respQueue.pop_front();
679
680    if (!respQueue.empty()) {
681        assert(respQueue.front()->readyTime >= curTick());
682        assert(!respondEvent.scheduled());
683        schedule(respondEvent, respQueue.front()->readyTime);
684    } else {
685        // if there is nothing left in any queue, signal a drain
686        if (writeQueue.empty() && readQueue.empty() &&
687            drainManager) {
688            drainManager->signalDrainDone();
689            drainManager = NULL;
690        }
691    }
692
693    // We have made a location in the queue available at this point,
694    // so if there is a read that was forced to wait, retry now
695    if (retryRdReq) {
696        retryRdReq = false;
697        port.sendRetry();
698    }
699}
700
701void
702DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue)
703{
704    // This method does the arbitration between requests. The chosen
705    // packet is simply moved to the head of the queue. The other
706    // methods know that this is the place to look. For example, with
707    // FCFS, this method does nothing
708    assert(!queue.empty());
709
710    if (queue.size() == 1) {
711        DPRINTF(DRAM, "Single request, nothing to do\n");
712        return;
713    }
714
715    if (memSchedPolicy == Enums::fcfs) {
716        // Do nothing, since the correct request is already head
717    } else if (memSchedPolicy == Enums::frfcfs) {
718        reorderQueue(queue);
719    } else
720        panic("No scheduling policy chosen\n");
721}
722
723void
724DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue)
725{
726    // Only determine this when needed
727    uint64_t earliest_banks = 0;
728
729    // Search for row hits first, if no row hit is found then schedule the
730    // packet to one of the earliest banks available
731    bool found_earliest_pkt = false;
732    auto selected_pkt_it = queue.begin();
733
734    for (auto i = queue.begin(); i != queue.end() ; ++i) {
735        DRAMPacket* dram_pkt = *i;
736        const Bank& bank = dram_pkt->bankRef;
737        // Check if it is a row hit
738        if (bank.openRow == dram_pkt->row) {
739            DPRINTF(DRAM, "Row buffer hit\n");
740            selected_pkt_it = i;
741            break;
742        } else if (!found_earliest_pkt) {
743            // No row hit, go for first ready
744            if (earliest_banks == 0)
745                earliest_banks = minBankFreeAt(queue);
746
747            // Bank is ready or is the first available bank
748            if (bank.freeAt <= curTick() ||
749                bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
750                // Remember the packet to be scheduled to one of the earliest
751                // banks available
752                selected_pkt_it = i;
753                found_earliest_pkt = true;
754            }
755        }
756    }
757
758    DRAMPacket* selected_pkt = *selected_pkt_it;
759    queue.erase(selected_pkt_it);
760    queue.push_front(selected_pkt);
761}
762
763void
764DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
765{
766    DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
767
768    bool needsResponse = pkt->needsResponse();
769    // do the actual memory access which also turns the packet into a
770    // response
771    access(pkt);
772
773    // turn packet around to go back to requester if response expected
774    if (needsResponse) {
775        // access already turned the packet into a response
776        assert(pkt->isResponse());
777
778        // @todo someone should pay for this
779        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
780
781        // queue the packet in the response queue to be sent out after
782        // the static latency has passed
783        port.schedTimingResp(pkt, curTick() + static_latency);
784    } else {
785        // @todo the packet is going to be deleted, and the DRAMPacket
786        // is still having a pointer to it
787        pendingDelete.push_back(pkt);
788    }
789
790    DPRINTF(DRAM, "Done\n");
791
792    return;
793}
794
795pair<Tick, Tick>
796DRAMCtrl::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
797{
798    // If a request reaches a bank at tick 'inTime', how much time
799    // *after* that does it take to finish the request, depending
800    // on bank status and page open policy. Note that this method
801    // considers only the time taken for the actual read or write
802    // to complete, NOT any additional time thereafter for tRAS or
803    // tRP.
804    Tick accLat = 0;
805    Tick bankLat = 0;
806    rowHitFlag = false;
807    Tick potentialActTick;
808
809    const Bank& bank = dram_pkt->bankRef;
810     // open-page policy or close_adaptive policy
811    if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive ||
812        pageMgmt == Enums::close_adaptive) {
813        if (bank.openRow == dram_pkt->row) {
814            // When we have a row-buffer hit,
815            // we don't care about tRAS having expired or not,
816            // but do care about bank being free for access
817            rowHitFlag = true;
818
819            // When a series of requests arrive to the same row,
820            // DDR systems are capable of streaming data continuously
821            // at maximum bandwidth (subject to tCCD). Here, we approximate
822            // this condition, and assume that if whenever a bank is already
823            // busy and a new request comes in, it can be completed with no
824            // penalty beyond waiting for the existing read to complete.
825            if (bank.freeAt > inTime) {
826                accLat += bank.freeAt - inTime;
827                bankLat += 0;
828            } else {
829               // CAS latency only
830               accLat += tCL;
831               bankLat += tCL;
832            }
833
834        } else {
835            // Row-buffer miss, need to close existing row
836            // once tRAS has expired, then open the new one,
837            // then add cas latency.
838            Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt);
839
840            if (freeTime > inTime)
841               accLat += freeTime - inTime;
842
843            // If the there is no open row (open adaptive), then there
844            // is no precharge delay, otherwise go with tRP
845            Tick precharge_delay = bank.openRow == Bank::NO_ROW ? 0 : tRP;
846
847            //The bank is free, and you may be able to activate
848            potentialActTick = inTime + accLat + precharge_delay;
849            if (potentialActTick < bank.actAllowedAt)
850                accLat += bank.actAllowedAt - potentialActTick;
851
852            accLat += precharge_delay + tRCD + tCL;
853            bankLat += precharge_delay + tRCD + tCL;
854        }
855    } else if (pageMgmt == Enums::close) {
856        // With a close page policy, no notion of
857        // bank.tRASDoneAt
858        if (bank.freeAt > inTime)
859            accLat += bank.freeAt - inTime;
860
861        //The bank is free, and you may be able to activate
862        potentialActTick = inTime + accLat;
863        if (potentialActTick < bank.actAllowedAt)
864            accLat += bank.actAllowedAt - potentialActTick;
865
866        // page already closed, simply open the row, and
867        // add cas latency
868        accLat += tRCD + tCL;
869        bankLat += tRCD + tCL;
870    } else
871        panic("No page management policy chosen\n");
872
873    DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n",
874            bankLat, accLat);
875
876    return make_pair(bankLat, accLat);
877}
878
879void
880DRAMCtrl::recordActivate(Tick act_tick, uint8_t rank, uint8_t bank,
881                         uint16_t row)
882{
883    assert(0 <= rank && rank < ranksPerChannel);
884    assert(actTicks[rank].size() == activationLimit);
885
886    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
887
888    // idleStartTick is the tick when all the banks were
889    // precharged. Thus, the difference between act_tick and
890    // idleStartTick gives the time for which the DRAM is in an idle
891    // state with all banks precharged. Note that we may end up
892    // "changing history" by scheduling an activation before an
893    // already scheduled precharge, effectively canceling it out.
894    if (numBanksActive == 0 && act_tick > idleStartTick) {
895        prechargeAllTime += act_tick - idleStartTick;
896    }
897
898    // update the open row
899    assert(banks[rank][bank].openRow == Bank::NO_ROW);
900    banks[rank][bank].openRow = row;
901
902    // start counting anew, this covers both the case when we
903    // auto-precharged, and when this access is forced to
904    // precharge
905    banks[rank][bank].bytesAccessed = 0;
906    banks[rank][bank].rowAccesses = 0;
907
908    ++numBanksActive;
909    assert(numBanksActive <= banksPerRank * ranksPerChannel);
910
911    DPRINTF(DRAM, "Activate bank at tick %lld, now got %d active\n",
912            act_tick, numBanksActive);
913
914    // start by enforcing tRRD
915    for(int i = 0; i < banksPerRank; i++) {
916        // next activate must not happen before tRRD
917        banks[rank][i].actAllowedAt = act_tick + tRRD;
918    }
919    // tRC should be added to activation tick of the bank currently accessed,
920    // where tRC = tRAS + tRP, this is just for a check as actAllowedAt for same
921    // bank is already captured by bank.freeAt and bank.tRASDoneAt
922    banks[rank][bank].actAllowedAt = act_tick + tRAS + tRP;
923
924    // next, we deal with tXAW, if the activation limit is disabled
925    // then we are done
926    if (actTicks[rank].empty())
927        return;
928
929    // sanity check
930    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
931        // @todo For now, stick with a warning
932        warn("Got %d activates in window %d (%d - %d) which is smaller "
933             "than %d\n", activationLimit, act_tick - actTicks[rank].back(),
934             act_tick, actTicks[rank].back(), tXAW);
935    }
936
937    // shift the times used for the book keeping, the last element
938    // (highest index) is the oldest one and hence the lowest value
939    actTicks[rank].pop_back();
940
941    // record an new activation (in the future)
942    actTicks[rank].push_front(act_tick);
943
944    // cannot activate more than X times in time window tXAW, push the
945    // next one (the X + 1'st activate) to be tXAW away from the
946    // oldest in our window of X
947    if (actTicks[rank].back() && (act_tick - actTicks[rank].back()) < tXAW) {
948        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
949                "than %d\n", activationLimit, actTicks[rank].back() + tXAW);
950            for(int j = 0; j < banksPerRank; j++)
951                // next activate must not happen before end of window
952                banks[rank][j].actAllowedAt = actTicks[rank].back() + tXAW;
953    }
954}
955
956void
957DRAMCtrl::prechargeBank(Bank& bank, Tick free_at)
958{
959    // make sure the bank has an open row
960    assert(bank.openRow != Bank::NO_ROW);
961
962    // sample the bytes per activate here since we are closing
963    // the page
964    bytesPerActivate.sample(bank.bytesAccessed);
965
966    bank.openRow = Bank::NO_ROW;
967
968    bank.freeAt = free_at;
969
970    assert(numBanksActive != 0);
971    --numBanksActive;
972
973    DPRINTF(DRAM, "Precharged bank, done at tick %lld, now got %d active\n",
974            bank.freeAt, numBanksActive);
975
976    // if we reached zero, then special conditions apply as we track
977    // if all banks are precharged for the power models
978    if (numBanksActive == 0) {
979        idleStartTick = std::max(idleStartTick, bank.freeAt);
980        DPRINTF(DRAM, "All banks precharged at tick: %ld\n",
981                idleStartTick);
982    }
983}
984
985void
986DRAMCtrl::doDRAMAccess(DRAMPacket* dram_pkt)
987{
988
989    DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
990            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
991
992    // estimate the bank and access latency
993    pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick());
994    Tick bankLat = lat.first;
995    Tick accessLat = lat.second;
996    Tick actTick;
997
998    // This request was woken up at this time based on a prior call
999    // to estimateLatency(). However, between then and now, both the
1000    // accessLatency and/or busBusyUntil may have changed. We need
1001    // to correct for that.
1002
1003    Tick addDelay = (curTick() + accessLat < busBusyUntil) ?
1004        busBusyUntil - (curTick() + accessLat) : 0;
1005
1006    Bank& bank = dram_pkt->bankRef;
1007
1008    // Update bank state
1009    if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive ||
1010        pageMgmt == Enums::close_adaptive) {
1011
1012        if (rowHitFlag) {
1013            bank.freeAt = curTick() + addDelay + accessLat;
1014        } else {
1015            // If there is a page open, precharge it.
1016            if (bank.openRow != Bank::NO_ROW) {
1017                prechargeBank(bank, std::max(std::max(bank.freeAt,
1018                                                      bank.tRASDoneAt),
1019                                             curTick()) + tRP);
1020            }
1021
1022            // Any precharge is already part of the latency
1023            // estimation, so update the bank free time
1024            bank.freeAt = curTick() + addDelay + accessLat;
1025
1026            // any waiting for banks account for in freeAt
1027            actTick = bank.freeAt - tCL - tRCD;
1028
1029            // If you activated a new row do to this access, the next access
1030            // will have to respect tRAS for this bank
1031            bank.tRASDoneAt = actTick + tRAS;
1032
1033            recordActivate(actTick, dram_pkt->rank, dram_pkt->bank,
1034                           dram_pkt->row);
1035        }
1036
1037        // increment the bytes accessed and the accesses per row
1038        bank.bytesAccessed += burstSize;
1039        ++bank.rowAccesses;
1040
1041        // if we reached the max, then issue with an auto-precharge
1042        bool auto_precharge = bank.rowAccesses == maxAccessesPerRow;
1043
1044        // if we did not hit the limit, we might still want to
1045        // auto-precharge
1046        if (!auto_precharge &&
1047            (pageMgmt == Enums::open_adaptive ||
1048             pageMgmt == Enums::close_adaptive)) {
1049            // a twist on the open and close page policies:
1050            // 1) open_adaptive page policy does not blindly keep the
1051            // page open, but close it if there are no row hits, and there
1052            // are bank conflicts in the queue
1053            // 2) close_adaptive page policy does not blindly close the
1054            // page, but closes it only if there are no row hits in the queue.
1055            // In this case, only force an auto precharge when there
1056            // are no same page hits in the queue
1057            bool got_more_hits = false;
1058            bool got_bank_conflict = false;
1059
1060            // either look at the read queue or write queue
1061            const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue :
1062                writeQueue;
1063            auto p = queue.begin();
1064            // make sure we are not considering the packet that we are
1065            // currently dealing with (which is the head of the queue)
1066            ++p;
1067
1068            // keep on looking until we have found required condition or
1069            // reached the end
1070            while (!(got_more_hits &&
1071                    (got_bank_conflict || pageMgmt == Enums::close_adaptive)) &&
1072                   p != queue.end()) {
1073                bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
1074                    (dram_pkt->bank == (*p)->bank);
1075                bool same_row = dram_pkt->row == (*p)->row;
1076                got_more_hits |= same_rank_bank && same_row;
1077                got_bank_conflict |= same_rank_bank && !same_row;
1078                ++p;
1079            }
1080
1081            // auto pre-charge when either
1082            // 1) open_adaptive policy, we have not got any more hits, and
1083            //    have a bank conflict
1084            // 2) close_adaptive policy and we have not got any more hits
1085            auto_precharge = !got_more_hits &&
1086                (got_bank_conflict || pageMgmt == Enums::close_adaptive);
1087        }
1088
1089        // if this access should use auto-precharge, then we are
1090        // closing the row
1091        if (auto_precharge) {
1092            prechargeBank(bank, std::max(bank.freeAt, bank.tRASDoneAt) + tRP);
1093
1094            DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
1095        }
1096
1097        DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
1098    } else if (pageMgmt == Enums::close) {
1099        actTick = curTick() + addDelay + accessLat - tRCD - tCL;
1100        recordActivate(actTick, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
1101
1102        bank.freeAt = actTick + tRCD + tCL;
1103        bank.tRASDoneAt = actTick + tRAS;
1104
1105        // sample the relevant values when precharging
1106        bank.bytesAccessed = burstSize;
1107        bank.rowAccesses = 1;
1108
1109        prechargeBank(bank, std::max(bank.freeAt, bank.tRASDoneAt) + tRP);
1110        DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
1111    } else
1112        panic("No page management policy chosen\n");
1113
1114    // Update request parameters
1115    dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST;
1116
1117
1118    DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \
1119                  "readytime is %lld busbusyuntil is %lld. " \
1120                  "Scheduling at readyTime\n", dram_pkt->addr,
1121                   curTick(), accessLat, dram_pkt->readyTime, busBusyUntil);
1122
1123    // Make sure requests are not overlapping on the databus
1124    assert(dram_pkt->readyTime - busBusyUntil >= tBURST);
1125
1126    // Update bus state
1127    busBusyUntil = dram_pkt->readyTime;
1128
1129    DPRINTF(DRAM,"Access time is %lld\n",
1130            dram_pkt->readyTime - dram_pkt->entryTime);
1131
1132    // Update the minimum timing between the requests, this is a
1133    // conservative estimate of when we have to schedule the next
1134    // request to not introduce any unecessary bubbles. In most cases
1135    // we will wake up sooner than we have to.
1136    nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1137
1138    // Update the stats and schedule the next request
1139    if (dram_pkt->isRead) {
1140        ++readsThisTime;
1141        if (rowHitFlag)
1142            readRowHits++;
1143        bytesReadDRAM += burstSize;
1144        perBankRdBursts[dram_pkt->bankId]++;
1145
1146        // Update latency stats
1147        totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
1148        totBankLat += bankLat;
1149        totBusLat += tBURST;
1150        totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat -
1151            tBURST;
1152    } else {
1153        ++writesThisTime;
1154        if (rowHitFlag)
1155            writeRowHits++;
1156        bytesWritten += burstSize;
1157        perBankWrBursts[dram_pkt->bankId]++;
1158    }
1159}
1160
1161void
1162DRAMCtrl::moveToRespQ()
1163{
1164    // Remove from read queue
1165    DRAMPacket* dram_pkt = readQueue.front();
1166    readQueue.pop_front();
1167
1168    // sanity check
1169    assert(dram_pkt->size <= burstSize);
1170
1171    // Insert into response queue sorted by readyTime
1172    // It will be sent back to the requestor at its
1173    // readyTime
1174    if (respQueue.empty()) {
1175        respQueue.push_front(dram_pkt);
1176        assert(!respondEvent.scheduled());
1177        assert(dram_pkt->readyTime >= curTick());
1178        schedule(respondEvent, dram_pkt->readyTime);
1179    } else {
1180        bool done = false;
1181        auto i = respQueue.begin();
1182        while (!done && i != respQueue.end()) {
1183            if ((*i)->readyTime > dram_pkt->readyTime) {
1184                respQueue.insert(i, dram_pkt);
1185                done = true;
1186            }
1187            ++i;
1188        }
1189
1190        if (!done)
1191            respQueue.push_back(dram_pkt);
1192
1193        assert(respondEvent.scheduled());
1194
1195        if (respQueue.front()->readyTime < respondEvent.when()) {
1196            assert(respQueue.front()->readyTime >= curTick());
1197            reschedule(respondEvent, respQueue.front()->readyTime);
1198        }
1199    }
1200}
1201
1202void
1203DRAMCtrl::processNextReqEvent()
1204{
1205    if (busState == READ_TO_WRITE) {
1206        DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
1207                "waiting\n", readsThisTime, readQueue.size());
1208
1209        // sample and reset the read-related stats as we are now
1210        // transitioning to writes, and all reads are done
1211        rdPerTurnAround.sample(readsThisTime);
1212        readsThisTime = 0;
1213
1214        // now proceed to do the actual writes
1215        busState = WRITE;
1216    } else if (busState == WRITE_TO_READ) {
1217        DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
1218                "waiting\n", writesThisTime, writeQueue.size());
1219
1220        wrPerTurnAround.sample(writesThisTime);
1221        writesThisTime = 0;
1222
1223        busState = READ;
1224    }
1225
1226    if (refreshState != REF_IDLE) {
1227        // if a refresh waiting for this event loop to finish, then hand
1228        // over now, and do not schedule a new nextReqEvent
1229        if (refreshState == REF_DRAIN) {
1230            DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1231
1232            refreshState = REF_PRE;
1233
1234            // hand control back to the refresh event loop
1235            schedule(refreshEvent, curTick());
1236        }
1237
1238        // let the refresh finish before issuing any further requests
1239        return;
1240    }
1241
1242    // when we get here it is either a read or a write
1243    if (busState == READ) {
1244
1245        // track if we should switch or not
1246        bool switch_to_writes = false;
1247
1248        if (readQueue.empty()) {
1249            // In the case there is no read request to go next,
1250            // trigger writes if we have passed the low threshold (or
1251            // if we are draining)
1252            if (!writeQueue.empty() &&
1253                (drainManager || writeQueue.size() > writeLowThreshold)) {
1254
1255                switch_to_writes = true;
1256            } else {
1257                // check if we are drained
1258                if (respQueue.empty () && drainManager) {
1259                    drainManager->signalDrainDone();
1260                    drainManager = NULL;
1261                }
1262
1263                // nothing to do, not even any point in scheduling an
1264                // event for the next request
1265                return;
1266            }
1267        } else {
1268            // Figure out which read request goes next, and move it to the
1269            // front of the read queue
1270            chooseNext(readQueue);
1271
1272            doDRAMAccess(readQueue.front());
1273
1274            // At this point we're done dealing with the request
1275            // It will be moved to a separate response queue with a
1276            // correct readyTime, and eventually be sent back at that
1277            // time
1278            moveToRespQ();
1279
1280            // we have so many writes that we have to transition
1281            if (writeQueue.size() > writeHighThreshold) {
1282                switch_to_writes = true;
1283            }
1284        }
1285
1286        // switching to writes, either because the read queue is empty
1287        // and the writes have passed the low threshold (or we are
1288        // draining), or because the writes hit the hight threshold
1289        if (switch_to_writes) {
1290            // transition to writing
1291            busState = READ_TO_WRITE;
1292
1293            // add a bubble to the data bus, as defined by the
1294            // tRTW parameter
1295            busBusyUntil += tRTW;
1296
1297            // update the minimum timing between the requests,
1298            // this shifts us back in time far enough to do any
1299            // bank preparation
1300            nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1301        }
1302    } else {
1303        chooseNext(writeQueue);
1304        DRAMPacket* dram_pkt = writeQueue.front();
1305        // sanity check
1306        assert(dram_pkt->size <= burstSize);
1307        doDRAMAccess(dram_pkt);
1308
1309        writeQueue.pop_front();
1310        delete dram_pkt;
1311
1312        // If we emptied the write queue, or got sufficiently below the
1313        // threshold (using the minWritesPerSwitch as the hysteresis) and
1314        // are not draining, or we have reads waiting and have done enough
1315        // writes, then switch to reads.
1316        if (writeQueue.empty() ||
1317            (writeQueue.size() + minWritesPerSwitch < writeLowThreshold &&
1318             !drainManager) ||
1319            (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
1320            // turn the bus back around for reads again
1321            busState = WRITE_TO_READ;
1322
1323            // note that the we switch back to reads also in the idle
1324            // case, which eventually will check for any draining and
1325            // also pause any further scheduling if there is really
1326            // nothing to do
1327
1328            // here we get a bit creative and shift the bus busy time not
1329            // just the tWTR, but also a CAS latency to capture the fact
1330            // that we are allowed to prepare a new bank, but not issue a
1331            // read command until after tWTR, in essence we capture a
1332            // bubble on the data bus that is tWTR + tCL
1333            busBusyUntil += tWTR + tCL;
1334
1335            // update the minimum timing between the requests, this shifts
1336            // us back in time far enough to do any bank preparation
1337            nextReqTime = busBusyUntil - (tRP + tRCD + tCL);
1338        }
1339    }
1340
1341    schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1342
1343    // If there is space available and we have writes waiting then let
1344    // them retry. This is done here to ensure that the retry does not
1345    // cause a nextReqEvent to be scheduled before we do so as part of
1346    // the next request processing
1347    if (retryWrReq && writeQueue.size() < writeBufferSize) {
1348        retryWrReq = false;
1349        port.sendRetry();
1350    }
1351}
1352
1353uint64_t
1354DRAMCtrl::minBankFreeAt(const deque<DRAMPacket*>& queue) const
1355{
1356    uint64_t bank_mask = 0;
1357    Tick freeAt = MaxTick;
1358
1359    // detemrine if we have queued transactions targetting the
1360    // bank in question
1361    vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1362    for (auto p = queue.begin(); p != queue.end(); ++p) {
1363        got_waiting[(*p)->bankId] = true;
1364    }
1365
1366    for (int i = 0; i < ranksPerChannel; i++) {
1367        for (int j = 0; j < banksPerRank; j++) {
1368            // if we have waiting requests for the bank, and it is
1369            // amongst the first available, update the mask
1370            if (got_waiting[i * banksPerRank + j] &&
1371                banks[i][j].freeAt <= freeAt) {
1372                // reset bank mask if new minimum is found
1373                if (banks[i][j].freeAt < freeAt)
1374                    bank_mask = 0;
1375                // set the bit corresponding to the available bank
1376                uint8_t bit_index = i * ranksPerChannel + j;
1377                replaceBits(bank_mask, bit_index, bit_index, 1);
1378                freeAt = banks[i][j].freeAt;
1379            }
1380        }
1381    }
1382    return bank_mask;
1383}
1384
1385void
1386DRAMCtrl::processRefreshEvent()
1387{
1388    // when first preparing the refresh, remember when it was due
1389    if (refreshState == REF_IDLE) {
1390        // remember when the refresh is due
1391        refreshDueAt = curTick();
1392
1393        // proceed to drain
1394        refreshState = REF_DRAIN;
1395
1396        DPRINTF(DRAM, "Refresh due\n");
1397    }
1398
1399    // let any scheduled read or write go ahead, after which it will
1400    // hand control back to this event loop
1401    if (refreshState == REF_DRAIN) {
1402        if (nextReqEvent.scheduled()) {
1403            // hand control over to the request loop until it is
1404            // evaluated next
1405            DPRINTF(DRAM, "Refresh awaiting draining\n");
1406
1407            return;
1408        } else {
1409            refreshState = REF_PRE;
1410        }
1411    }
1412
1413    // at this point, ensure that all banks are precharged
1414    if (refreshState == REF_PRE) {
1415        DPRINTF(DRAM, "Precharging all\n");
1416
1417        // precharge any active bank
1418        for (int i = 0; i < ranksPerChannel; i++) {
1419            for (int j = 0; j < banksPerRank; j++) {
1420                if (banks[i][j].openRow != Bank::NO_ROW) {
1421                    // respect both causality and any existing bank
1422                    // constraints
1423                    Tick free_at = std::max(std::max(banks[i][j].freeAt,
1424                                                     banks[i][j].tRASDoneAt),
1425                                            curTick()) + tRP;
1426
1427                    prechargeBank(banks[i][j], free_at);
1428                }
1429            }
1430        }
1431
1432        if (numBanksActive != 0)
1433            panic("Refresh scheduled with %d active banks\n", numBanksActive);
1434
1435        // advance the state
1436        refreshState = REF_RUN;
1437
1438        // call ourselves in the future
1439        schedule(refreshEvent, std::max(curTick(), idleStartTick));
1440        return;
1441    }
1442
1443    // last but not least we perform the actual refresh
1444    if (refreshState == REF_RUN) {
1445        // should never get here with any banks active
1446        assert(numBanksActive == 0);
1447
1448        Tick banksFree = curTick() + tRFC;
1449
1450        for (int i = 0; i < ranksPerChannel; i++) {
1451            for (int j = 0; j < banksPerRank; j++) {
1452                banks[i][j].freeAt = banksFree;
1453            }
1454        }
1455
1456        // make sure we did not wait so long that we cannot make up
1457        // for it
1458        if (refreshDueAt + tREFI < banksFree) {
1459            fatal("Refresh was delayed so long we cannot catch up\n");
1460        }
1461
1462        // compensate for the delay in actually performing the refresh
1463        // when scheduling the next one
1464        schedule(refreshEvent, refreshDueAt + tREFI - tRP);
1465
1466        // back to business as usual
1467        refreshState = REF_IDLE;
1468
1469        // we are now refreshing until tRFC is done
1470        idleStartTick = banksFree;
1471
1472        // kick the normal request processing loop into action again
1473        // as early as possible, i.e. when the request is done, the
1474        // scheduling of this event also prevents any new requests
1475        // from going ahead before the scheduled point in time
1476        nextReqTime = banksFree;
1477        schedule(nextReqEvent, nextReqTime);
1478    }
1479}
1480
1481void
1482DRAMCtrl::regStats()
1483{
1484    using namespace Stats;
1485
1486    AbstractMemory::regStats();
1487
1488    readReqs
1489        .name(name() + ".readReqs")
1490        .desc("Number of read requests accepted");
1491
1492    writeReqs
1493        .name(name() + ".writeReqs")
1494        .desc("Number of write requests accepted");
1495
1496    readBursts
1497        .name(name() + ".readBursts")
1498        .desc("Number of DRAM read bursts, "
1499              "including those serviced by the write queue");
1500
1501    writeBursts
1502        .name(name() + ".writeBursts")
1503        .desc("Number of DRAM write bursts, "
1504              "including those merged in the write queue");
1505
1506    servicedByWrQ
1507        .name(name() + ".servicedByWrQ")
1508        .desc("Number of DRAM read bursts serviced by the write queue");
1509
1510    mergedWrBursts
1511        .name(name() + ".mergedWrBursts")
1512        .desc("Number of DRAM write bursts merged with an existing one");
1513
1514    neitherReadNorWrite
1515        .name(name() + ".neitherReadNorWriteReqs")
1516        .desc("Number of requests that are neither read nor write");
1517
1518    perBankRdBursts
1519        .init(banksPerRank * ranksPerChannel)
1520        .name(name() + ".perBankRdBursts")
1521        .desc("Per bank write bursts");
1522
1523    perBankWrBursts
1524        .init(banksPerRank * ranksPerChannel)
1525        .name(name() + ".perBankWrBursts")
1526        .desc("Per bank write bursts");
1527
1528    avgRdQLen
1529        .name(name() + ".avgRdQLen")
1530        .desc("Average read queue length when enqueuing")
1531        .precision(2);
1532
1533    avgWrQLen
1534        .name(name() + ".avgWrQLen")
1535        .desc("Average write queue length when enqueuing")
1536        .precision(2);
1537
1538    totQLat
1539        .name(name() + ".totQLat")
1540        .desc("Total ticks spent queuing");
1541
1542    totBankLat
1543        .name(name() + ".totBankLat")
1544        .desc("Total ticks spent accessing banks");
1545
1546    totBusLat
1547        .name(name() + ".totBusLat")
1548        .desc("Total ticks spent in databus transfers");
1549
1550    totMemAccLat
1551        .name(name() + ".totMemAccLat")
1552        .desc("Total ticks spent from burst creation until serviced "
1553              "by the DRAM");
1554
1555    avgQLat
1556        .name(name() + ".avgQLat")
1557        .desc("Average queueing delay per DRAM burst")
1558        .precision(2);
1559
1560    avgQLat = totQLat / (readBursts - servicedByWrQ);
1561
1562    avgBankLat
1563        .name(name() + ".avgBankLat")
1564        .desc("Average bank access latency per DRAM burst")
1565        .precision(2);
1566
1567    avgBankLat = totBankLat / (readBursts - servicedByWrQ);
1568
1569    avgBusLat
1570        .name(name() + ".avgBusLat")
1571        .desc("Average bus latency per DRAM burst")
1572        .precision(2);
1573
1574    avgBusLat = totBusLat / (readBursts - servicedByWrQ);
1575
1576    avgMemAccLat
1577        .name(name() + ".avgMemAccLat")
1578        .desc("Average memory access latency per DRAM burst")
1579        .precision(2);
1580
1581    avgMemAccLat = totMemAccLat / (readBursts - servicedByWrQ);
1582
1583    numRdRetry
1584        .name(name() + ".numRdRetry")
1585        .desc("Number of times read queue was full causing retry");
1586
1587    numWrRetry
1588        .name(name() + ".numWrRetry")
1589        .desc("Number of times write queue was full causing retry");
1590
1591    readRowHits
1592        .name(name() + ".readRowHits")
1593        .desc("Number of row buffer hits during reads");
1594
1595    writeRowHits
1596        .name(name() + ".writeRowHits")
1597        .desc("Number of row buffer hits during writes");
1598
1599    readRowHitRate
1600        .name(name() + ".readRowHitRate")
1601        .desc("Row buffer hit rate for reads")
1602        .precision(2);
1603
1604    readRowHitRate = (readRowHits / (readBursts - servicedByWrQ)) * 100;
1605
1606    writeRowHitRate
1607        .name(name() + ".writeRowHitRate")
1608        .desc("Row buffer hit rate for writes")
1609        .precision(2);
1610
1611    writeRowHitRate = (writeRowHits / (writeBursts - mergedWrBursts)) * 100;
1612
1613    readPktSize
1614        .init(ceilLog2(burstSize) + 1)
1615        .name(name() + ".readPktSize")
1616        .desc("Read request sizes (log2)");
1617
1618     writePktSize
1619        .init(ceilLog2(burstSize) + 1)
1620        .name(name() + ".writePktSize")
1621        .desc("Write request sizes (log2)");
1622
1623     rdQLenPdf
1624        .init(readBufferSize)
1625        .name(name() + ".rdQLenPdf")
1626        .desc("What read queue length does an incoming req see");
1627
1628     wrQLenPdf
1629        .init(writeBufferSize)
1630        .name(name() + ".wrQLenPdf")
1631        .desc("What write queue length does an incoming req see");
1632
1633     bytesPerActivate
1634         .init(maxAccessesPerRow)
1635         .name(name() + ".bytesPerActivate")
1636         .desc("Bytes accessed per row activation")
1637         .flags(nozero);
1638
1639     rdPerTurnAround
1640         .init(readBufferSize)
1641         .name(name() + ".rdPerTurnAround")
1642         .desc("Reads before turning the bus around for writes")
1643         .flags(nozero);
1644
1645     wrPerTurnAround
1646         .init(writeBufferSize)
1647         .name(name() + ".wrPerTurnAround")
1648         .desc("Writes before turning the bus around for reads")
1649         .flags(nozero);
1650
1651    bytesReadDRAM
1652        .name(name() + ".bytesReadDRAM")
1653        .desc("Total number of bytes read from DRAM");
1654
1655    bytesReadWrQ
1656        .name(name() + ".bytesReadWrQ")
1657        .desc("Total number of bytes read from write queue");
1658
1659    bytesWritten
1660        .name(name() + ".bytesWritten")
1661        .desc("Total number of bytes written to DRAM");
1662
1663    bytesReadSys
1664        .name(name() + ".bytesReadSys")
1665        .desc("Total read bytes from the system interface side");
1666
1667    bytesWrittenSys
1668        .name(name() + ".bytesWrittenSys")
1669        .desc("Total written bytes from the system interface side");
1670
1671    avgRdBW
1672        .name(name() + ".avgRdBW")
1673        .desc("Average DRAM read bandwidth in MiByte/s")
1674        .precision(2);
1675
1676    avgRdBW = (bytesReadDRAM / 1000000) / simSeconds;
1677
1678    avgWrBW
1679        .name(name() + ".avgWrBW")
1680        .desc("Average achieved write bandwidth in MiByte/s")
1681        .precision(2);
1682
1683    avgWrBW = (bytesWritten / 1000000) / simSeconds;
1684
1685    avgRdBWSys
1686        .name(name() + ".avgRdBWSys")
1687        .desc("Average system read bandwidth in MiByte/s")
1688        .precision(2);
1689
1690    avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
1691
1692    avgWrBWSys
1693        .name(name() + ".avgWrBWSys")
1694        .desc("Average system write bandwidth in MiByte/s")
1695        .precision(2);
1696
1697    avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
1698
1699    peakBW
1700        .name(name() + ".peakBW")
1701        .desc("Theoretical peak bandwidth in MiByte/s")
1702        .precision(2);
1703
1704    peakBW = (SimClock::Frequency / tBURST) * burstSize / 1000000;
1705
1706    busUtil
1707        .name(name() + ".busUtil")
1708        .desc("Data bus utilization in percentage")
1709        .precision(2);
1710
1711    busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1712
1713    totGap
1714        .name(name() + ".totGap")
1715        .desc("Total gap between requests");
1716
1717    avgGap
1718        .name(name() + ".avgGap")
1719        .desc("Average gap between requests")
1720        .precision(2);
1721
1722    avgGap = totGap / (readReqs + writeReqs);
1723
1724    // Stats for DRAM Power calculation based on Micron datasheet
1725    busUtilRead
1726        .name(name() + ".busUtilRead")
1727        .desc("Data bus utilization in percentage for reads")
1728        .precision(2);
1729
1730    busUtilRead = avgRdBW / peakBW * 100;
1731
1732    busUtilWrite
1733        .name(name() + ".busUtilWrite")
1734        .desc("Data bus utilization in percentage for writes")
1735        .precision(2);
1736
1737    busUtilWrite = avgWrBW / peakBW * 100;
1738
1739    pageHitRate
1740        .name(name() + ".pageHitRate")
1741        .desc("Row buffer hit rate, read and write combined")
1742        .precision(2);
1743
1744    pageHitRate = (writeRowHits + readRowHits) /
1745        (writeBursts - mergedWrBursts + readBursts - servicedByWrQ) * 100;
1746
1747    prechargeAllPercent
1748        .name(name() + ".prechargeAllPercent")
1749        .desc("Percentage of time for which DRAM has all the banks in "
1750              "precharge state")
1751        .precision(2);
1752
1753    prechargeAllPercent = prechargeAllTime / simTicks * 100;
1754}
1755
1756void
1757DRAMCtrl::recvFunctional(PacketPtr pkt)
1758{
1759    // rely on the abstract memory
1760    functionalAccess(pkt);
1761}
1762
1763BaseSlavePort&
1764DRAMCtrl::getSlavePort(const string &if_name, PortID idx)
1765{
1766    if (if_name != "port") {
1767        return MemObject::getSlavePort(if_name, idx);
1768    } else {
1769        return port;
1770    }
1771}
1772
1773unsigned int
1774DRAMCtrl::drain(DrainManager *dm)
1775{
1776    unsigned int count = port.drain(dm);
1777
1778    // if there is anything in any of our internal queues, keep track
1779    // of that as well
1780    if (!(writeQueue.empty() && readQueue.empty() &&
1781          respQueue.empty())) {
1782        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
1783                " resp: %d\n", writeQueue.size(), readQueue.size(),
1784                respQueue.size());
1785        ++count;
1786        drainManager = dm;
1787
1788        // the only part that is not drained automatically over time
1789        // is the write queue, thus kick things into action if needed
1790        if (!writeQueue.empty() && !nextReqEvent.scheduled()) {
1791            schedule(nextReqEvent, curTick());
1792        }
1793    }
1794
1795    if (count)
1796        setDrainState(Drainable::Draining);
1797    else
1798        setDrainState(Drainable::Drained);
1799    return count;
1800}
1801
1802DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _memory)
1803    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
1804      memory(_memory)
1805{ }
1806
1807AddrRangeList
1808DRAMCtrl::MemoryPort::getAddrRanges() const
1809{
1810    AddrRangeList ranges;
1811    ranges.push_back(memory.getAddrRange());
1812    return ranges;
1813}
1814
1815void
1816DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
1817{
1818    pkt->pushLabel(memory.name());
1819
1820    if (!queue.checkFunctional(pkt)) {
1821        // Default implementation of SimpleTimingPort::recvFunctional()
1822        // calls recvAtomic() and throws away the latency; we can save a
1823        // little here by just not calculating the latency.
1824        memory.recvFunctional(pkt);
1825    }
1826
1827    pkt->popLabel();
1828}
1829
1830Tick
1831DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
1832{
1833    return memory.recvAtomic(pkt);
1834}
1835
1836bool
1837DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
1838{
1839    // pass it to the memory controller
1840    return memory.recvTimingReq(pkt);
1841}
1842
1843DRAMCtrl*
1844DRAMCtrlParams::create()
1845{
1846    return new DRAMCtrl(this);
1847}
1848