dram_ctrl.cc revision 9566
111723Sar4jc@virginia.edu/*
211723Sar4jc@virginia.edu * Copyright (c) 2010-2012 ARM Limited
311723Sar4jc@virginia.edu * All rights reserved
412120Sar4jc@virginia.edu *
511723Sar4jc@virginia.edu * The license below extends only to copyright in the software and shall
611723Sar4jc@virginia.edu * not be construed as granting a license to any other intellectual
711723Sar4jc@virginia.edu * property including but not limited to intellectual property relating
811723Sar4jc@virginia.edu * to a hardware implementation of the functionality of the software
911723Sar4jc@virginia.edu * licensed hereunder.  You may use the software subject to the license
1011723Sar4jc@virginia.edu * terms below provided that you ensure that this notice is replicated
1111723Sar4jc@virginia.edu * unmodified and in its entirety in all distributions of the software,
1211723Sar4jc@virginia.edu * modified or unmodified, in source code or in binary form.
1311723Sar4jc@virginia.edu *
1411723Sar4jc@virginia.edu * Redistribution and use in source and binary forms, with or without
1511723Sar4jc@virginia.edu * modification, are permitted provided that the following conditions are
1611723Sar4jc@virginia.edu * met: redistributions of source code must retain the above copyright
1711723Sar4jc@virginia.edu * notice, this list of conditions and the following disclaimer;
1811723Sar4jc@virginia.edu * redistributions in binary form must reproduce the above copyright
1911723Sar4jc@virginia.edu * notice, this list of conditions and the following disclaimer in the
2011723Sar4jc@virginia.edu * documentation and/or other materials provided with the distribution;
2111723Sar4jc@virginia.edu * neither the name of the copyright holders nor the names of its
2211723Sar4jc@virginia.edu * contributors may be used to endorse or promote products derived from
2311723Sar4jc@virginia.edu * this software without specific prior written permission.
2411723Sar4jc@virginia.edu *
2511723Sar4jc@virginia.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2611723Sar4jc@virginia.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2711723Sar4jc@virginia.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2811723Sar4jc@virginia.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2911723Sar4jc@virginia.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3011723Sar4jc@virginia.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3111723Sar4jc@virginia.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3211723Sar4jc@virginia.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3311723Sar4jc@virginia.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3411723Sar4jc@virginia.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3511723Sar4jc@virginia.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3611723Sar4jc@virginia.edu *
3712120Sar4jc@virginia.edu * Authors: Andreas Hansson
3812120Sar4jc@virginia.edu *          Ani Udipi
3912120Sar4jc@virginia.edu */
4012120Sar4jc@virginia.edu
4112120Sar4jc@virginia.edu#include "base/trace.hh"
4212120Sar4jc@virginia.edu#include "debug/Drain.hh"
4312120Sar4jc@virginia.edu#include "debug/DRAM.hh"
4412120Sar4jc@virginia.edu#include "debug/DRAMWR.hh"
4512136Sar4jc@virginia.edu#include "mem/simple_dram.hh"
4612136Sar4jc@virginia.edu#include "sim/stat_control.hh"
4712120Sar4jc@virginia.edu
4812120Sar4jc@virginia.eduusing namespace std;
4912120Sar4jc@virginia.edu
5012120Sar4jc@virginia.eduSimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
5112120Sar4jc@virginia.edu    AbstractMemory(p),
5212120Sar4jc@virginia.edu    port(name() + ".port", *this),
5312120Sar4jc@virginia.edu    retryRdReq(false), retryWrReq(false),
5412120Sar4jc@virginia.edu    rowHitFlag(false), stopReads(false), actTicks(p->activation_limit, 0),
5512120Sar4jc@virginia.edu    writeEvent(this), respondEvent(this),
5611723Sar4jc@virginia.edu    refreshEvent(this), nextReqEvent(this), drainManager(NULL),
5712120Sar4jc@virginia.edu    bytesPerCacheLine(0),
5812120Sar4jc@virginia.edu    linesPerRowBuffer(p->lines_per_rowbuffer),
5912120Sar4jc@virginia.edu    ranksPerChannel(p->ranks_per_channel),
6012120Sar4jc@virginia.edu    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
6112120Sar4jc@virginia.edu    readBufferSize(p->read_buffer_size),
6212120Sar4jc@virginia.edu    writeBufferSize(p->write_buffer_size),
6312120Sar4jc@virginia.edu    writeThresholdPerc(p->write_thresh_perc),
6412120Sar4jc@virginia.edu    tWTR(p->tWTR), tBURST(p->tBURST),
6511723Sar4jc@virginia.edu    tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP),
6612120Sar4jc@virginia.edu    tRFC(p->tRFC), tREFI(p->tREFI),
6712120Sar4jc@virginia.edu    tXAW(p->tXAW), activationLimit(p->activation_limit),
6812120Sar4jc@virginia.edu    memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
6912120Sar4jc@virginia.edu    pageMgmt(p->page_policy),
7012120Sar4jc@virginia.edu    busBusyUntil(0), prevdramaccess(0), writeStartTime(0),
7112120Sar4jc@virginia.edu    prevArrival(0), numReqs(0)
7211723Sar4jc@virginia.edu{
7312120Sar4jc@virginia.edu    // create the bank states based on the dimensions of the ranks and
7412120Sar4jc@virginia.edu    // banks
7512120Sar4jc@virginia.edu    banks.resize(ranksPerChannel);
7612120Sar4jc@virginia.edu    for (size_t c = 0; c < ranksPerChannel; ++c) {
7712120Sar4jc@virginia.edu        banks[c].resize(banksPerRank);
7812120Sar4jc@virginia.edu    }
7912120Sar4jc@virginia.edu
8012120Sar4jc@virginia.edu    // round the write threshold percent to a whole number of entries
8111723Sar4jc@virginia.edu    // in the buffer
8212120Sar4jc@virginia.edu    writeThreshold = writeBufferSize * writeThresholdPerc / 100.0;
8312120Sar4jc@virginia.edu}
8412120Sar4jc@virginia.edu
8512120Sar4jc@virginia.eduvoid
8612120Sar4jc@virginia.eduSimpleDRAM::init()
8712120Sar4jc@virginia.edu{
8812120Sar4jc@virginia.edu    if (!port.isConnected()) {
8912120Sar4jc@virginia.edu        fatal("SimpleDRAM %s is unconnected!\n", name());
9011723Sar4jc@virginia.edu    } else {
9112120Sar4jc@virginia.edu        port.sendRangeChange();
9212120Sar4jc@virginia.edu    }
9312120Sar4jc@virginia.edu
9412120Sar4jc@virginia.edu    // get the cache line size from the connected port
9512120Sar4jc@virginia.edu    bytesPerCacheLine = port.peerBlockSize();
9612120Sar4jc@virginia.edu
9711723Sar4jc@virginia.edu    // we could deal with plenty options here, but for now do a quick
9811723Sar4jc@virginia.edu    // sanity check
9911723Sar4jc@virginia.edu    if (bytesPerCacheLine != 64 && bytesPerCacheLine != 32)
10012120Sar4jc@virginia.edu        panic("Unexpected cache line size %d", bytesPerCacheLine);
10112120Sar4jc@virginia.edu
10212120Sar4jc@virginia.edu    // determine the rows per bank by looking at the total capacity
10312120Sar4jc@virginia.edu    uint64_t capacity = AbstractMemory::size();
10412120Sar4jc@virginia.edu    uint64_t i = 1;
10512120Sar4jc@virginia.edu    while (i < 64 && capacity > ((1 << i))) {
10612120Sar4jc@virginia.edu        ++i;
10712136Sar4jc@virginia.edu    }
10812136Sar4jc@virginia.edu
10912136Sar4jc@virginia.edu    // rounded up to nearest power of two
11012136Sar4jc@virginia.edu    DPRINTF(DRAM, "i is %lld\n", i);
11112136Sar4jc@virginia.edu    capacity = 1 << i;
11212136Sar4jc@virginia.edu
11312120Sar4jc@virginia.edu    DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
11412120Sar4jc@virginia.edu            AbstractMemory::size());
11512120Sar4jc@virginia.edu    rowsPerBank = capacity / (bytesPerCacheLine * linesPerRowBuffer *
11612120Sar4jc@virginia.edu                              banksPerRank * ranksPerChannel);
11712120Sar4jc@virginia.edu
11812120Sar4jc@virginia.edu    if (range.interleaved()) {
11912120Sar4jc@virginia.edu        if (channels != range.stripes())
12012120Sar4jc@virginia.edu            panic("%s has %d interleaved address stripes but %d channel(s)\n",
12112120Sar4jc@virginia.edu                  name(), range.stripes(), channels);
12212120Sar4jc@virginia.edu
12312120Sar4jc@virginia.edu        if (addrMapping == Enums::openmap) {
12412120Sar4jc@virginia.edu            if (bytesPerCacheLine * linesPerRowBuffer !=
12512120Sar4jc@virginia.edu                range.granularity()) {
12612120Sar4jc@virginia.edu                panic("Interleaving of %s doesn't match open address map\n",
12712120Sar4jc@virginia.edu                      name());
12812120Sar4jc@virginia.edu            }
12912120Sar4jc@virginia.edu        } else if (addrMapping == Enums::closemap) {
13012120Sar4jc@virginia.edu            if (bytesPerCacheLine != range.granularity())
13112120Sar4jc@virginia.edu                panic("Interleaving of %s doesn't match closed address map\n",
13212120Sar4jc@virginia.edu                      name());
13312120Sar4jc@virginia.edu        }
13412120Sar4jc@virginia.edu    }
13512120Sar4jc@virginia.edu}
13612120Sar4jc@virginia.edu
13712120Sar4jc@virginia.eduvoid
13812120Sar4jc@virginia.eduSimpleDRAM::startup()
13912120Sar4jc@virginia.edu{
14012120Sar4jc@virginia.edu    // print the configuration of the controller
14112120Sar4jc@virginia.edu    printParams();
14212120Sar4jc@virginia.edu
14312120Sar4jc@virginia.edu    // kick off the refresh
14412120Sar4jc@virginia.edu    schedule(&refreshEvent, curTick() + tREFI);
14512120Sar4jc@virginia.edu}
14612120Sar4jc@virginia.edu
14712120Sar4jc@virginia.edu
14812120Sar4jc@virginia.eduTick
14912120Sar4jc@virginia.eduSimpleDRAM::recvAtomic(PacketPtr pkt)
15012120Sar4jc@virginia.edu{
15112120Sar4jc@virginia.edu    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
15212120Sar4jc@virginia.edu
15312120Sar4jc@virginia.edu    // do the actual memory access and turn the packet into a response
15412120Sar4jc@virginia.edu    access(pkt);
15512120Sar4jc@virginia.edu
15612120Sar4jc@virginia.edu    Tick latency = 0;
15712120Sar4jc@virginia.edu    if (!pkt->memInhibitAsserted() && pkt->hasData()) {
15812120Sar4jc@virginia.edu        // this value is not supposed to be accurate, just enough to
15912120Sar4jc@virginia.edu        // keep things going, mimic a closed page
16012120Sar4jc@virginia.edu        latency = tRP + tRCD + tCL;
16112120Sar4jc@virginia.edu    }
16212120Sar4jc@virginia.edu    return latency;
16312120Sar4jc@virginia.edu}
16412120Sar4jc@virginia.edu
16512120Sar4jc@virginia.edubool
16612120Sar4jc@virginia.eduSimpleDRAM::readQueueFull() const
16712120Sar4jc@virginia.edu{
16812120Sar4jc@virginia.edu    DPRINTF(DRAM, "Read queue limit %d current size %d\n",
16912120Sar4jc@virginia.edu            readBufferSize, dramReadQueue.size() + dramRespQueue.size());
17012120Sar4jc@virginia.edu
17112120Sar4jc@virginia.edu    return (dramReadQueue.size() + dramRespQueue.size()) == readBufferSize;
17212120Sar4jc@virginia.edu}
17312120Sar4jc@virginia.edu
17412120Sar4jc@virginia.edubool
17512120Sar4jc@virginia.eduSimpleDRAM::writeQueueFull() const
17612120Sar4jc@virginia.edu{
17712120Sar4jc@virginia.edu    DPRINTF(DRAM, "Write queue limit %d current size %d\n",
17812120Sar4jc@virginia.edu            writeBufferSize, dramWriteQueue.size());
17912120Sar4jc@virginia.edu    return dramWriteQueue.size() == writeBufferSize;
18012120Sar4jc@virginia.edu}
18112120Sar4jc@virginia.edu
18212120Sar4jc@virginia.edu
18312120Sar4jc@virginia.eduSimpleDRAM::DRAMPacket*
18412120Sar4jc@virginia.eduSimpleDRAM::decodeAddr(PacketPtr pkt)
18512120Sar4jc@virginia.edu{
18612120Sar4jc@virginia.edu    // decode the address based on the address mapping scheme
18712120Sar4jc@virginia.edu    //
18812120Sar4jc@virginia.edu    // with R, C, B and K denoting rank, column, bank and rank,
18912120Sar4jc@virginia.edu    // respectively, and going from MSB to LSB, the two schemes are
19012120Sar4jc@virginia.edu    // RKBC (openmap) and RCKB (closedmap)
19112120Sar4jc@virginia.edu    uint8_t rank;
19212120Sar4jc@virginia.edu    uint16_t bank;
19312120Sar4jc@virginia.edu    uint16_t row;
19412120Sar4jc@virginia.edu
19512120Sar4jc@virginia.edu    Addr addr = pkt->getAddr();
19612120Sar4jc@virginia.edu    Addr temp = addr;
19712120Sar4jc@virginia.edu
19812120Sar4jc@virginia.edu    // truncate the address to the access granularity
19912120Sar4jc@virginia.edu    addr = addr / bytesPerCacheLine;
20012120Sar4jc@virginia.edu
20112120Sar4jc@virginia.edu    // we have removed the lowest order address bits that denote the
20212120Sar4jc@virginia.edu    // position within the cache line, proceed and select the
20312120Sar4jc@virginia.edu    // appropriate bits for bank, rank and row (no column address is
20412120Sar4jc@virginia.edu    // needed)
20512120Sar4jc@virginia.edu    if (addrMapping == Enums::openmap) {
20612120Sar4jc@virginia.edu        // the lowest order bits denote the column to ensure that
20712120Sar4jc@virginia.edu        // sequential cache lines occupy the same row
20812120Sar4jc@virginia.edu        addr = addr / linesPerRowBuffer;
20912120Sar4jc@virginia.edu
21012120Sar4jc@virginia.edu        // take out the channel part of the address, note that this has
21112120Sar4jc@virginia.edu        // to match with how accesses are interleaved between the
21212120Sar4jc@virginia.edu        // controllers in the address mapping
21312120Sar4jc@virginia.edu        addr = addr / channels;
21412120Sar4jc@virginia.edu
21512120Sar4jc@virginia.edu        // after the column bits, we get the bank bits to interleave
21612120Sar4jc@virginia.edu        // over the banks
21712120Sar4jc@virginia.edu        bank = addr % banksPerRank;
21812120Sar4jc@virginia.edu        addr = addr / banksPerRank;
21912120Sar4jc@virginia.edu
22012120Sar4jc@virginia.edu        // after the bank, we get the rank bits which thus interleaves
22112120Sar4jc@virginia.edu        // over the ranks
22212120Sar4jc@virginia.edu        rank = addr % ranksPerChannel;
22311723Sar4jc@virginia.edu        addr = addr / ranksPerChannel;
22412120Sar4jc@virginia.edu
22512120Sar4jc@virginia.edu        // lastly, get the row bits
22612120Sar4jc@virginia.edu        row = addr % rowsPerBank;
22712120Sar4jc@virginia.edu        addr = addr / rowsPerBank;
22812120Sar4jc@virginia.edu    } else if (addrMapping == Enums::closemap) {
22912120Sar4jc@virginia.edu        // optimise for closed page mode and utilise maximum
23012120Sar4jc@virginia.edu        // parallelism of the DRAM (at the cost of power)
23112120Sar4jc@virginia.edu
23212120Sar4jc@virginia.edu        // take out the channel part of the address, not that this has
23312120Sar4jc@virginia.edu        // to match with how accesses are interleaved between the
23412120Sar4jc@virginia.edu        // controllers in the address mapping
23512120Sar4jc@virginia.edu        addr = addr / channels;
23612120Sar4jc@virginia.edu
23712120Sar4jc@virginia.edu        // start with the bank bits, as this provides the maximum
23812120Sar4jc@virginia.edu        // opportunity for parallelism between requests
23912120Sar4jc@virginia.edu        bank = addr % banksPerRank;
24012120Sar4jc@virginia.edu        addr = addr / banksPerRank;
24112120Sar4jc@virginia.edu
24212120Sar4jc@virginia.edu        // next get the rank bits
24312120Sar4jc@virginia.edu        rank = addr % ranksPerChannel;
24412120Sar4jc@virginia.edu        addr = addr / ranksPerChannel;
24512120Sar4jc@virginia.edu
24612120Sar4jc@virginia.edu        // next the column bits which we do not need to keep track of
24712120Sar4jc@virginia.edu        // and simply skip past
24812120Sar4jc@virginia.edu        addr = addr / linesPerRowBuffer;
24912120Sar4jc@virginia.edu
25012120Sar4jc@virginia.edu        // lastly, get the row bits
25112120Sar4jc@virginia.edu        row = addr % rowsPerBank;
25212120Sar4jc@virginia.edu        addr = addr / rowsPerBank;
25312120Sar4jc@virginia.edu    } else
25412120Sar4jc@virginia.edu        panic("Unknown address mapping policy chosen!");
25512120Sar4jc@virginia.edu
25612120Sar4jc@virginia.edu    assert(rank < ranksPerChannel);
25712120Sar4jc@virginia.edu    assert(bank < banksPerRank);
25812120Sar4jc@virginia.edu    assert(row < rowsPerBank);
25912120Sar4jc@virginia.edu
26012120Sar4jc@virginia.edu    DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
26111725Sar4jc@virginia.edu            temp, rank, bank, row);
26212120Sar4jc@virginia.edu
26312120Sar4jc@virginia.edu    // create the corresponding DRAM packet with the entry time and
26412120Sar4jc@virginia.edu    // ready time set to the current tick, they will be updated later
26512120Sar4jc@virginia.edu    DRAMPacket* dram_pkt = new DRAMPacket(pkt, rank, bank, row, temp,
26612120Sar4jc@virginia.edu                                          banks[rank][bank]);
26712120Sar4jc@virginia.edu
26812120Sar4jc@virginia.edu    return dram_pkt;
26912120Sar4jc@virginia.edu}
27012120Sar4jc@virginia.edu
27112120Sar4jc@virginia.eduvoid
27212120Sar4jc@virginia.eduSimpleDRAM::addToReadQueue(PacketPtr pkt)
27312120Sar4jc@virginia.edu{
27412120Sar4jc@virginia.edu    // only add to the read queue here. whenever the request is
27512120Sar4jc@virginia.edu    // eventually done, set the readyTime, and call schedule()
27612120Sar4jc@virginia.edu    assert(!pkt->isWrite());
27712120Sar4jc@virginia.edu
27812120Sar4jc@virginia.edu    // First check write buffer to see if the data is already at
27912120Sar4jc@virginia.edu    // the controller
28012120Sar4jc@virginia.edu    std::list<DRAMPacket*>::const_iterator i;
28112120Sar4jc@virginia.edu    Addr addr = pkt->getAddr();
28212120Sar4jc@virginia.edu
28312120Sar4jc@virginia.edu    // @todo: add size check
28412120Sar4jc@virginia.edu    for (i = dramWriteQueue.begin();  i != dramWriteQueue.end(); ++i) {
28512120Sar4jc@virginia.edu        if ((*i)->addr == addr){
28612120Sar4jc@virginia.edu            servicedByWrQ++;
28712120Sar4jc@virginia.edu            DPRINTF(DRAM,"Serviced by write Q\n");
28812120Sar4jc@virginia.edu            bytesRead += bytesPerCacheLine;
28912120Sar4jc@virginia.edu            bytesConsumedRd += pkt->getSize();
29012120Sar4jc@virginia.edu            accessAndRespond(pkt);
29112120Sar4jc@virginia.edu            return;
29212120Sar4jc@virginia.edu        }
29312120Sar4jc@virginia.edu    }
29412120Sar4jc@virginia.edu
29512120Sar4jc@virginia.edu    DRAMPacket* dram_pkt = decodeAddr(pkt);
29612120Sar4jc@virginia.edu
29712120Sar4jc@virginia.edu    assert(dramReadQueue.size() + dramRespQueue.size() < readBufferSize);
29812120Sar4jc@virginia.edu    rdQLenPdf[dramReadQueue.size() + dramRespQueue.size()]++;
29912120Sar4jc@virginia.edu
30012120Sar4jc@virginia.edu    DPRINTF(DRAM, "Adding to read queue\n");
30112120Sar4jc@virginia.edu
30212120Sar4jc@virginia.edu    dramReadQueue.push_back(dram_pkt);
30312120Sar4jc@virginia.edu
30412120Sar4jc@virginia.edu    // Update stats
30512120Sar4jc@virginia.edu    uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
30612120Sar4jc@virginia.edu    assert(bank_id < ranksPerChannel * banksPerRank);
30712120Sar4jc@virginia.edu    perBankRdReqs[bank_id]++;
30812120Sar4jc@virginia.edu
30912120Sar4jc@virginia.edu    avgRdQLen = dramReadQueue.size() + dramRespQueue.size();
31012120Sar4jc@virginia.edu
31112120Sar4jc@virginia.edu    // Special case where no arbitration is required between requests
31212120Sar4jc@virginia.edu    if (!nextReqEvent.scheduled() && !stopReads) {
31312120Sar4jc@virginia.edu        DPRINTF(DRAM, "Request %lld - need to schedule immediately");
31412120Sar4jc@virginia.edu        schedule(&nextReqEvent, curTick() + 1);
31512120Sar4jc@virginia.edu    }
31612120Sar4jc@virginia.edu}
31712120Sar4jc@virginia.edu
31812120Sar4jc@virginia.eduvoid
31912120Sar4jc@virginia.eduSimpleDRAM::processWriteEvent()
32012120Sar4jc@virginia.edu{
32112120Sar4jc@virginia.edu    assert(!dramWriteQueue.empty());
32212120Sar4jc@virginia.edu    uint32_t numWritesThisTime = 0;
32312120Sar4jc@virginia.edu
32412120Sar4jc@virginia.edu    DPRINTF(DRAMWR, "Beginning DRAM Writes\n");
32512120Sar4jc@virginia.edu    Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil);
32612120Sar4jc@virginia.edu    Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt());
32712120Sar4jc@virginia.edu
32812120Sar4jc@virginia.edu    // @todo: are there any dangers with the untimed while loop?
32912120Sar4jc@virginia.edu    while (!dramWriteQueue.empty()) {
33012120Sar4jc@virginia.edu        if (numWritesThisTime > writeThreshold)
33112120Sar4jc@virginia.edu            break;
33212120Sar4jc@virginia.edu
33312120Sar4jc@virginia.edu        chooseNextWrite();
33412120Sar4jc@virginia.edu        DRAMPacket* dram_pkt = dramWriteQueue.front();
33511725Sar4jc@virginia.edu        // What's the earlier the request can be put on the bus
33611725Sar4jc@virginia.edu        Tick schedTime = std::max(curTick(), busBusyUntil);
33711725Sar4jc@virginia.edu
33812120Sar4jc@virginia.edu        DPRINTF(DRAMWR, "Asking for latency estimate at %lld\n",
33912120Sar4jc@virginia.edu                schedTime + tBURST);
34012120Sar4jc@virginia.edu
34112120Sar4jc@virginia.edu        pair<Tick, Tick> lat = estimateLatency(dram_pkt, schedTime + tBURST);
34212120Sar4jc@virginia.edu        Tick accessLat = lat.second;
34311723Sar4jc@virginia.edu
34412120Sar4jc@virginia.edu        // look at the rowHitFlag set by estimateLatency
34512120Sar4jc@virginia.edu
34611723Sar4jc@virginia.edu        // @todo: Race condition here where another packet gives rise
34712120Sar4jc@virginia.edu        // to another call to estimateLatency in the meanwhile?
34812120Sar4jc@virginia.edu        if (rowHitFlag)
34911723Sar4jc@virginia.edu            writeRowHits++;
35012120Sar4jc@virginia.edu
35112120Sar4jc@virginia.edu        Bank& bank = dram_pkt->bank_ref;
35212120Sar4jc@virginia.edu
35312120Sar4jc@virginia.edu        if (pageMgmt == Enums::open) {
35412120Sar4jc@virginia.edu            bank.openRow = dram_pkt->row;
35512120Sar4jc@virginia.edu            bank.freeAt = schedTime + tBURST + std::max(accessLat, tCL);
35612120Sar4jc@virginia.edu            busBusyUntil = bank.freeAt - tCL;
35712120Sar4jc@virginia.edu
35812120Sar4jc@virginia.edu            if (!rowHitFlag) {
35912120Sar4jc@virginia.edu                bank.tRASDoneAt = bank.freeAt + tRP;
36012120Sar4jc@virginia.edu                recordActivate(bank.freeAt - tCL - tRCD);
36111723Sar4jc@virginia.edu                busBusyUntil = bank.freeAt - tCL - tRCD;
36211723Sar4jc@virginia.edu            }
36311723Sar4jc@virginia.edu        } else if (pageMgmt == Enums::close) {
36411723Sar4jc@virginia.edu            bank.freeAt = schedTime + tBURST + accessLat + tRP + tRP;
36512120Sar4jc@virginia.edu            // Work backwards from bank.freeAt to determine activate time
36612120Sar4jc@virginia.edu            recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD);
36712120Sar4jc@virginia.edu            busBusyUntil = bank.freeAt - tRP - tRP - tCL - tRCD;
36812120Sar4jc@virginia.edu            DPRINTF(DRAMWR, "processWriteEvent::bank.freeAt for "
36912120Sar4jc@virginia.edu                    "banks_id %d is %lld\n",
37012120Sar4jc@virginia.edu                    dram_pkt->rank * banksPerRank + dram_pkt->bank,
37112120Sar4jc@virginia.edu                    bank.freeAt);
37212120Sar4jc@virginia.edu        } else
37311726Sar4jc@virginia.edu            panic("Unknown page management policy chosen\n");
37411726Sar4jc@virginia.edu
37512120Sar4jc@virginia.edu        DPRINTF(DRAMWR,"Done writing to address %lld\n",dram_pkt->addr);
37612120Sar4jc@virginia.edu
37712120Sar4jc@virginia.edu        DPRINTF(DRAMWR,"schedtime is %lld, tBURST is %lld, "
37812120Sar4jc@virginia.edu                "busbusyuntil is %lld\n",
37912120Sar4jc@virginia.edu                schedTime, tBURST, busBusyUntil);
38012120Sar4jc@virginia.edu
38112120Sar4jc@virginia.edu        dramWriteQueue.pop_front();
38211726Sar4jc@virginia.edu        delete dram_pkt;
38311726Sar4jc@virginia.edu
38412120Sar4jc@virginia.edu        numWritesThisTime++;
38512120Sar4jc@virginia.edu    }
38612120Sar4jc@virginia.edu
38712120Sar4jc@virginia.edu    DPRINTF(DRAMWR, "Completed %d writes, bus busy for %lld ticks,"\
38812120Sar4jc@virginia.edu            "banks busy for %lld ticks\n", numWritesThisTime,
38911723Sar4jc@virginia.edu            busBusyUntil - temp1, maxBankFreeAt() - temp2);
39012120Sar4jc@virginia.edu
39112120Sar4jc@virginia.edu    // Update stats
39212120Sar4jc@virginia.edu    avgWrQLen = dramWriteQueue.size();
39312120Sar4jc@virginia.edu
39412120Sar4jc@virginia.edu    // turn the bus back around for reads again
39512120Sar4jc@virginia.edu    busBusyUntil += tWTR;
39612120Sar4jc@virginia.edu    stopReads = false;
39712120Sar4jc@virginia.edu
39812120Sar4jc@virginia.edu    if (retryWrReq) {
39912120Sar4jc@virginia.edu        retryWrReq = false;
40012120Sar4jc@virginia.edu        port.sendRetry();
40112120Sar4jc@virginia.edu    }
40212120Sar4jc@virginia.edu
40312120Sar4jc@virginia.edu    // if there is nothing left in any queue, signal a drain
40412120Sar4jc@virginia.edu    if (dramWriteQueue.empty() && dramReadQueue.empty() &&
40512120Sar4jc@virginia.edu        dramRespQueue.empty () && drainManager) {
40612120Sar4jc@virginia.edu        drainManager->signalDrainDone();
40712120Sar4jc@virginia.edu        drainManager = NULL;
40812120Sar4jc@virginia.edu    }
40912120Sar4jc@virginia.edu
41012120Sar4jc@virginia.edu    // Once you're done emptying the write queue, check if there's
41112120Sar4jc@virginia.edu    // anything in the read queue, and call schedule if required
41212120Sar4jc@virginia.edu    schedule(&nextReqEvent, busBusyUntil);
41312120Sar4jc@virginia.edu}
41412120Sar4jc@virginia.edu
41511723Sar4jc@virginia.eduvoid
41611723Sar4jc@virginia.eduSimpleDRAM::triggerWrites()
41712120Sar4jc@virginia.edu{
41812120Sar4jc@virginia.edu    DPRINTF(DRAM, "Writes triggered at %lld\n", curTick());
41912120Sar4jc@virginia.edu    // Flag variable to stop any more read scheduling
42012120Sar4jc@virginia.edu    stopReads = true;
42112120Sar4jc@virginia.edu
42212120Sar4jc@virginia.edu    writeStartTime = std::max(busBusyUntil, curTick()) + tWTR;
42312120Sar4jc@virginia.edu
42412120Sar4jc@virginia.edu    DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime);
42512120Sar4jc@virginia.edu
42612120Sar4jc@virginia.edu    assert(writeStartTime >= curTick());
42711723Sar4jc@virginia.edu    assert(!writeEvent.scheduled());
42812120Sar4jc@virginia.edu    schedule(&writeEvent, writeStartTime);
42912120Sar4jc@virginia.edu}
43012120Sar4jc@virginia.edu
43112120Sar4jc@virginia.eduvoid
43212120Sar4jc@virginia.eduSimpleDRAM::addToWriteQueue(PacketPtr pkt)
43312120Sar4jc@virginia.edu{
43412120Sar4jc@virginia.edu    // only add to the write queue here. whenever the request is
43512120Sar4jc@virginia.edu    // eventually done, set the readyTime, and call schedule()
43612120Sar4jc@virginia.edu    assert(pkt->isWrite());
43712120Sar4jc@virginia.edu
43812120Sar4jc@virginia.edu    DRAMPacket* dram_pkt = decodeAddr(pkt);
43912120Sar4jc@virginia.edu
44012120Sar4jc@virginia.edu    assert(dramWriteQueue.size() < writeBufferSize);
44111724Sar4jc@virginia.edu    wrQLenPdf[dramWriteQueue.size()]++;
44212120Sar4jc@virginia.edu
44312120Sar4jc@virginia.edu    DPRINTF(DRAM, "Adding to write queue\n");
44412120Sar4jc@virginia.edu
44512120Sar4jc@virginia.edu    dramWriteQueue.push_back(dram_pkt);
44612120Sar4jc@virginia.edu
44712120Sar4jc@virginia.edu    // Update stats
44812120Sar4jc@virginia.edu    uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
44912120Sar4jc@virginia.edu    assert(bank_id < ranksPerChannel * banksPerRank);
45012120Sar4jc@virginia.edu    perBankWrReqs[bank_id]++;
45112120Sar4jc@virginia.edu
45212120Sar4jc@virginia.edu    avgWrQLen = dramWriteQueue.size();
45312120Sar4jc@virginia.edu
45412120Sar4jc@virginia.edu    // we do not wait for the writes to be send to the actual memory,
45512120Sar4jc@virginia.edu    // but instead take responsibility for the consistency here and
45612120Sar4jc@virginia.edu    // snoop the write queue for any upcoming reads
45712120Sar4jc@virginia.edu
45811724Sar4jc@virginia.edu    bytesConsumedWr += pkt->getSize();
45912120Sar4jc@virginia.edu    bytesWritten += bytesPerCacheLine;
46012120Sar4jc@virginia.edu    accessAndRespond(pkt);
46112120Sar4jc@virginia.edu
46212120Sar4jc@virginia.edu    // If your write buffer is starting to fill up, drain it!
46312120Sar4jc@virginia.edu    if (dramWriteQueue.size() > writeThreshold  && !stopReads){
46412120Sar4jc@virginia.edu        triggerWrites();
46512120Sar4jc@virginia.edu    }
46612120Sar4jc@virginia.edu}
46712120Sar4jc@virginia.edu
46812120Sar4jc@virginia.eduvoid
46911724Sar4jc@virginia.eduSimpleDRAM::printParams() const
47012120Sar4jc@virginia.edu{
47112120Sar4jc@virginia.edu    // Sanity check print of important parameters
47212120Sar4jc@virginia.edu    DPRINTF(DRAM,
47312120Sar4jc@virginia.edu            "Memory controller %s physical organization\n"      \
47412120Sar4jc@virginia.edu            "Bytes per cacheline  %d\n"                         \
47512120Sar4jc@virginia.edu            "Lines per row buffer %d\n"                         \
47612120Sar4jc@virginia.edu            "Rows  per bank       %d\n"                         \
47712120Sar4jc@virginia.edu            "Banks per rank       %d\n"                         \
47812120Sar4jc@virginia.edu            "Ranks per channel    %d\n"                         \
47912120Sar4jc@virginia.edu            "Total mem capacity   %u\n",
48012120Sar4jc@virginia.edu            name(), bytesPerCacheLine ,linesPerRowBuffer, rowsPerBank,
48112120Sar4jc@virginia.edu            banksPerRank, ranksPerChannel, bytesPerCacheLine *
48212120Sar4jc@virginia.edu            linesPerRowBuffer * rowsPerBank * banksPerRank * ranksPerChannel);
48312120Sar4jc@virginia.edu
48412120Sar4jc@virginia.edu    string scheduler =  memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS";
48512120Sar4jc@virginia.edu    string address_mapping = addrMapping == Enums::openmap ? "OPENMAP" :
48612120Sar4jc@virginia.edu        "CLOSEMAP";
48712120Sar4jc@virginia.edu    string page_policy = pageMgmt == Enums::open ? "OPEN" : "CLOSE";
48812120Sar4jc@virginia.edu
48912120Sar4jc@virginia.edu    DPRINTF(DRAM,
49012120Sar4jc@virginia.edu            "Memory controller %s characteristics\n"    \
49112120Sar4jc@virginia.edu            "Read buffer size     %d\n"                 \
49212120Sar4jc@virginia.edu            "Write buffer size    %d\n"                 \
49312120Sar4jc@virginia.edu            "Write buffer thresh  %d\n"                 \
49412120Sar4jc@virginia.edu            "Scheduler            %s\n"                 \
49512120Sar4jc@virginia.edu            "Address mapping      %s\n"                 \
49612120Sar4jc@virginia.edu            "Page policy          %s\n",
49712120Sar4jc@virginia.edu            name(), readBufferSize, writeBufferSize, writeThreshold,
49812120Sar4jc@virginia.edu            scheduler, address_mapping, page_policy);
49912120Sar4jc@virginia.edu
50012120Sar4jc@virginia.edu    DPRINTF(DRAM, "Memory controller %s timing specs\n" \
50112120Sar4jc@virginia.edu            "tRCD    %d ticks\n"                        \
50212120Sar4jc@virginia.edu            "tCL     %d ticks\n"                        \
50312120Sar4jc@virginia.edu            "tRP     %d ticks\n"                        \
50412120Sar4jc@virginia.edu            "tBURST  %d ticks\n"                        \
50512120Sar4jc@virginia.edu            "tRFC    %d ticks\n"                        \
50612120Sar4jc@virginia.edu            "tREFI   %d ticks\n"                        \
50712120Sar4jc@virginia.edu            "tWTR    %d ticks\n",
50812120Sar4jc@virginia.edu            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR);
50912120Sar4jc@virginia.edu}
51012120Sar4jc@virginia.edu
51112120Sar4jc@virginia.eduvoid
51212120Sar4jc@virginia.eduSimpleDRAM::printQs() const {
51312120Sar4jc@virginia.edu
51412120Sar4jc@virginia.edu    list<DRAMPacket*>::const_iterator i;
51512120Sar4jc@virginia.edu
51612120Sar4jc@virginia.edu    DPRINTF(DRAM, "===READ QUEUE===\n\n");
51712120Sar4jc@virginia.edu    for (i = dramReadQueue.begin() ;  i != dramReadQueue.end() ; ++i) {
51811723Sar4jc@virginia.edu        DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
51912120Sar4jc@virginia.edu    }
52012120Sar4jc@virginia.edu    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
52112120Sar4jc@virginia.edu    for (i = dramRespQueue.begin() ;  i != dramRespQueue.end() ; ++i) {
52212120Sar4jc@virginia.edu        DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
52312120Sar4jc@virginia.edu    }
52412120Sar4jc@virginia.edu    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
52512120Sar4jc@virginia.edu    for (i = dramWriteQueue.begin() ;  i != dramWriteQueue.end() ; ++i) {
52612120Sar4jc@virginia.edu        DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
52712120Sar4jc@virginia.edu    }
52812120Sar4jc@virginia.edu}
52912120Sar4jc@virginia.edu
53012120Sar4jc@virginia.edubool
53112120Sar4jc@virginia.eduSimpleDRAM::recvTimingReq(PacketPtr pkt)
53212120Sar4jc@virginia.edu{
53312120Sar4jc@virginia.edu    /// @todo temporary hack to deal with memory corruption issues until
53412120Sar4jc@virginia.edu    /// 4-phase transactions are complete
53512120Sar4jc@virginia.edu    for (int x = 0; x < pendingDelete.size(); x++)
53612120Sar4jc@virginia.edu        delete pendingDelete[x];
53712120Sar4jc@virginia.edu    pendingDelete.clear();
53812120Sar4jc@virginia.edu
53912120Sar4jc@virginia.edu
54012120Sar4jc@virginia.edu    // This is where we enter from the outside world
54112120Sar4jc@virginia.edu    DPRINTF(DRAM, "Inside recvTimingReq: request %s addr %lld size %d\n",
54212120Sar4jc@virginia.edu            pkt->cmdString(),pkt->getAddr(), pkt->getSize());
54312120Sar4jc@virginia.edu
54412120Sar4jc@virginia.edu   int index;
54512120Sar4jc@virginia.edu
54612120Sar4jc@virginia.edu   if (pkt->getSize() == bytesPerCacheLine)
54712120Sar4jc@virginia.edu       cpuReqs++;
54812120Sar4jc@virginia.edu
54912120Sar4jc@virginia.edu   if (numReqs % 1000000 == 0)
55012120Sar4jc@virginia.edu       printQs();
55112120Sar4jc@virginia.edu
55212120Sar4jc@virginia.edu    // Calc avg gap between requests
55312120Sar4jc@virginia.edu    if (prevArrival != 0) {
55412120Sar4jc@virginia.edu        totGap += curTick() - prevArrival;
55512120Sar4jc@virginia.edu    }
55612120Sar4jc@virginia.edu    prevArrival = curTick();
55712120Sar4jc@virginia.edu
55812120Sar4jc@virginia.edu    // simply drop inhibited packets for now
55912120Sar4jc@virginia.edu    if (pkt->memInhibitAsserted()) {
56012120Sar4jc@virginia.edu        DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n");
56112120Sar4jc@virginia.edu        pendingDelete.push_back(pkt);
56212120Sar4jc@virginia.edu        return true;
56312120Sar4jc@virginia.edu    }
56412120Sar4jc@virginia.edu
56512120Sar4jc@virginia.edu    unsigned size = pkt->getSize();
56612120Sar4jc@virginia.edu    if (size > bytesPerCacheLine)
56712120Sar4jc@virginia.edu        panic("Request size %d is greater than cache line size %d",
56812120Sar4jc@virginia.edu              size, bytesPerCacheLine);
56912120Sar4jc@virginia.edu
57012120Sar4jc@virginia.edu    if (size == 0)
57112120Sar4jc@virginia.edu        index = log2(bytesPerCacheLine) + 1;
57212120Sar4jc@virginia.edu    else
57312120Sar4jc@virginia.edu        index = log2(size);
57412120Sar4jc@virginia.edu
57512120Sar4jc@virginia.edu    if (size != 0 && (1 << index) != size)
57612120Sar4jc@virginia.edu        index = log2(bytesPerCacheLine) + 2;
57712120Sar4jc@virginia.edu
57812120Sar4jc@virginia.edu    // @todo: Do we really want to do all this before the packet is
57912120Sar4jc@virginia.edu    // actually accepted?
58012120Sar4jc@virginia.edu
58112120Sar4jc@virginia.edu    /* Index 0 - Size 1 byte
58212120Sar4jc@virginia.edu       Index 1 - Size 2 bytes
58312120Sar4jc@virginia.edu       Index 2 - Size 4 bytes
58412120Sar4jc@virginia.edu         .
58512120Sar4jc@virginia.edu         .
58612120Sar4jc@virginia.edu       Index 6 - Size 64 bytes
58712120Sar4jc@virginia.edu       Index 7 - Size 0 bytes
58812120Sar4jc@virginia.edu       Index 8 - Non-power-of-2 size */
58912120Sar4jc@virginia.edu
59012120Sar4jc@virginia.edu    if (pkt->isRead())
59112120Sar4jc@virginia.edu        readPktSize[index]++;
59212120Sar4jc@virginia.edu    else if (pkt->isWrite())
59312120Sar4jc@virginia.edu        writePktSize[index]++;
59412120Sar4jc@virginia.edu    else
59512120Sar4jc@virginia.edu        neitherPktSize[index]++;
59612120Sar4jc@virginia.edu
59712120Sar4jc@virginia.edu    // check local buffers and do not accept if full
59812120Sar4jc@virginia.edu    if (pkt->isRead()) {
59912120Sar4jc@virginia.edu        if (readQueueFull()) {
60012120Sar4jc@virginia.edu            DPRINTF(DRAM,"Read queue full, not accepting\n");
60112120Sar4jc@virginia.edu            // remember that we have to retry this port
60212120Sar4jc@virginia.edu            retryRdReq = true;
60312120Sar4jc@virginia.edu            numRdRetry++;
60412120Sar4jc@virginia.edu            return false;
60512120Sar4jc@virginia.edu        } else {
60612120Sar4jc@virginia.edu            addToReadQueue(pkt);
60712120Sar4jc@virginia.edu            readReqs++;
60812120Sar4jc@virginia.edu            numReqs++;
60912120Sar4jc@virginia.edu        }
61012120Sar4jc@virginia.edu    } else if (pkt->isWrite()) {
61112120Sar4jc@virginia.edu        if (writeQueueFull()) {
61212120Sar4jc@virginia.edu            DPRINTF(DRAM,"Write queue full, not accepting\n");
61312120Sar4jc@virginia.edu            // remember that we have to retry this port
61412120Sar4jc@virginia.edu            retryWrReq = true;
61512120Sar4jc@virginia.edu            numWrRetry++;
61612120Sar4jc@virginia.edu            return false;
61712120Sar4jc@virginia.edu        } else {
61812120Sar4jc@virginia.edu            addToWriteQueue(pkt);
61912120Sar4jc@virginia.edu            writeReqs++;
62012120Sar4jc@virginia.edu            numReqs++;
62112120Sar4jc@virginia.edu        }
62212120Sar4jc@virginia.edu    } else {
62312120Sar4jc@virginia.edu        DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
62412120Sar4jc@virginia.edu        neitherReadNorWrite++;
62512120Sar4jc@virginia.edu        accessAndRespond(pkt);
62612120Sar4jc@virginia.edu    }
62712120Sar4jc@virginia.edu
62812120Sar4jc@virginia.edu
62912120Sar4jc@virginia.edu    retryRdReq = false;
63012120Sar4jc@virginia.edu    retryWrReq = false;
63112120Sar4jc@virginia.edu    return true;
63212120Sar4jc@virginia.edu}
63312120Sar4jc@virginia.edu
63412120Sar4jc@virginia.eduvoid
63512120Sar4jc@virginia.eduSimpleDRAM::processRespondEvent()
63612120Sar4jc@virginia.edu{
63712120Sar4jc@virginia.edu    DPRINTF(DRAM,
63812120Sar4jc@virginia.edu            "processRespondEvent(): Some req has reached its readyTime\n");
63912120Sar4jc@virginia.edu
64012120Sar4jc@virginia.edu     PacketPtr pkt = dramRespQueue.front()->pkt;
64112120Sar4jc@virginia.edu
64212120Sar4jc@virginia.edu     // Actually responds to the requestor
64312120Sar4jc@virginia.edu     bytesConsumedRd += pkt->getSize();
64412120Sar4jc@virginia.edu     bytesRead += bytesPerCacheLine;
64512120Sar4jc@virginia.edu     accessAndRespond(pkt);
64612120Sar4jc@virginia.edu
64712120Sar4jc@virginia.edu     DRAMPacket* dram_pkt = dramRespQueue.front();
64812120Sar4jc@virginia.edu     dramRespQueue.pop_front();
64912120Sar4jc@virginia.edu     delete dram_pkt;
65012120Sar4jc@virginia.edu
65112120Sar4jc@virginia.edu     // Update stats
65212120Sar4jc@virginia.edu     avgRdQLen = dramReadQueue.size() + dramRespQueue.size();
65312120Sar4jc@virginia.edu
65412120Sar4jc@virginia.edu     if (!dramRespQueue.empty()){
65512120Sar4jc@virginia.edu         assert(dramRespQueue.front()->readyTime >= curTick());
65612120Sar4jc@virginia.edu         assert(!respondEvent.scheduled());
65712120Sar4jc@virginia.edu         schedule(&respondEvent, dramRespQueue.front()->readyTime);
65812120Sar4jc@virginia.edu     } else {
65912120Sar4jc@virginia.edu         // if there is nothing left in any queue, signal a drain
66012120Sar4jc@virginia.edu         if (dramWriteQueue.empty() && dramReadQueue.empty() &&
66112120Sar4jc@virginia.edu             drainManager) {
66212120Sar4jc@virginia.edu             drainManager->signalDrainDone();
66312120Sar4jc@virginia.edu             drainManager = NULL;
66412120Sar4jc@virginia.edu         }
66512120Sar4jc@virginia.edu     }
66612120Sar4jc@virginia.edu}
66712120Sar4jc@virginia.edu
66812120Sar4jc@virginia.eduvoid
66912120Sar4jc@virginia.eduSimpleDRAM::chooseNextWrite()
67012120Sar4jc@virginia.edu{
67112120Sar4jc@virginia.edu    // This method does the arbitration between requests. The chosen
67212120Sar4jc@virginia.edu    // packet is simply moved to the head of the queue. The other
67312120Sar4jc@virginia.edu    // methods know that this is the place to look. For example, with
67412120Sar4jc@virginia.edu    // FCFS, this method does nothing
67512120Sar4jc@virginia.edu    assert(!dramWriteQueue.empty());
67612120Sar4jc@virginia.edu
67712120Sar4jc@virginia.edu    if (dramWriteQueue.size() == 1) {
67812120Sar4jc@virginia.edu        DPRINTF(DRAMWR, "chooseNextWrite(): Single element, nothing to do\n");
67912120Sar4jc@virginia.edu        return;
68012120Sar4jc@virginia.edu    }
68112120Sar4jc@virginia.edu
68212120Sar4jc@virginia.edu    if (memSchedPolicy == Enums::fcfs) {
68312120Sar4jc@virginia.edu
68412120Sar4jc@virginia.edu        // Do nothing, since the correct request is already head
68512120Sar4jc@virginia.edu
68612120Sar4jc@virginia.edu    } else if (memSchedPolicy == Enums::frfcfs) {
68712120Sar4jc@virginia.edu
68812120Sar4jc@virginia.edu        list<DRAMPacket*>::iterator i = dramWriteQueue.begin();
68912120Sar4jc@virginia.edu        bool foundRowHit = false;
69012120Sar4jc@virginia.edu        while (!foundRowHit && i != dramWriteQueue.end()) {
69112120Sar4jc@virginia.edu            DRAMPacket* dram_pkt = *i;
69212120Sar4jc@virginia.edu            const Bank& bank = dram_pkt->bank_ref;
69312120Sar4jc@virginia.edu            if (bank.openRow == dram_pkt->row) { //FR part
69412120Sar4jc@virginia.edu                DPRINTF(DRAMWR,"Row buffer hit\n");
69512120Sar4jc@virginia.edu                dramWriteQueue.erase(i);
69612120Sar4jc@virginia.edu                dramWriteQueue.push_front(dram_pkt);
69712120Sar4jc@virginia.edu                foundRowHit = true;
69812120Sar4jc@virginia.edu            } else { //FCFS part
69912120Sar4jc@virginia.edu                ;
70012120Sar4jc@virginia.edu            }
70112120Sar4jc@virginia.edu            ++i;
70212120Sar4jc@virginia.edu        }
70312120Sar4jc@virginia.edu
70412120Sar4jc@virginia.edu    } else
70512120Sar4jc@virginia.edu        panic("No scheduling policy chosen\n");
70612120Sar4jc@virginia.edu
70712120Sar4jc@virginia.edu    DPRINTF(DRAMWR, "chooseNextWrite(): Something chosen\n");
70812120Sar4jc@virginia.edu}
70912120Sar4jc@virginia.edu
71012120Sar4jc@virginia.edubool
71112120Sar4jc@virginia.eduSimpleDRAM::chooseNextReq()
71212120Sar4jc@virginia.edu{
71312120Sar4jc@virginia.edu    // This method does the arbitration between requests.
71412120Sar4jc@virginia.edu    // The chosen packet is simply moved to the head of the
71512120Sar4jc@virginia.edu    // queue. The other methods know that this is the place
71612120Sar4jc@virginia.edu    // to look. For example, with FCFS, this method does nothing
71712120Sar4jc@virginia.edu    list<DRAMPacket*>::iterator i;
71812120Sar4jc@virginia.edu    DRAMPacket* dram_pkt;
71912120Sar4jc@virginia.edu
72012120Sar4jc@virginia.edu    if (dramReadQueue.empty()){
72112120Sar4jc@virginia.edu        DPRINTF(DRAM, "chooseNextReq(): Returning False\n");
72212120Sar4jc@virginia.edu        return false;
72312120Sar4jc@virginia.edu    }
72412120Sar4jc@virginia.edu
72512120Sar4jc@virginia.edu    if (dramReadQueue.size() == 1)
72612120Sar4jc@virginia.edu        return true;
72712120Sar4jc@virginia.edu
72812120Sar4jc@virginia.edu    if (memSchedPolicy == Enums::fcfs) {
72912120Sar4jc@virginia.edu
73012120Sar4jc@virginia.edu        // Do nothing, since the correct request is already head
73112120Sar4jc@virginia.edu
73211723Sar4jc@virginia.edu    } else if (memSchedPolicy == Enums::frfcfs) {
73312120Sar4jc@virginia.edu
73412120Sar4jc@virginia.edu        for (i = dramReadQueue.begin() ; i != dramReadQueue.end() ; ++i) {
73511724Sar4jc@virginia.edu            dram_pkt = *i;
73612120Sar4jc@virginia.edu            const Bank& bank = dram_pkt->bank_ref;
73712120Sar4jc@virginia.edu            if (bank.openRow == dram_pkt->row) { //FR part
73812120Sar4jc@virginia.edu                DPRINTF(DRAM, "Row buffer hit\n");
73911724Sar4jc@virginia.edu                dramReadQueue.erase(i);
74012120Sar4jc@virginia.edu                dramReadQueue.push_front(dram_pkt);
74111724Sar4jc@virginia.edu                break;
74211724Sar4jc@virginia.edu            } else { //FCFS part
74312120Sar4jc@virginia.edu                ;
74412120Sar4jc@virginia.edu            }
74512120Sar4jc@virginia.edu
74612120Sar4jc@virginia.edu        }
74712120Sar4jc@virginia.edu
74812120Sar4jc@virginia.edu    } else
74912120Sar4jc@virginia.edu        panic("No scheduling policy chosen!\n");
75012120Sar4jc@virginia.edu
75112120Sar4jc@virginia.edu
75212120Sar4jc@virginia.edu    DPRINTF(DRAM,"chooseNextReq(): Chosen something, returning True\n");
75312120Sar4jc@virginia.edu    return true;
75412120Sar4jc@virginia.edu}
75512120Sar4jc@virginia.edu
75612120Sar4jc@virginia.eduvoid
75712120Sar4jc@virginia.eduSimpleDRAM::accessAndRespond(PacketPtr pkt)
75812120Sar4jc@virginia.edu{
75912120Sar4jc@virginia.edu    DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
76012120Sar4jc@virginia.edu
76112120Sar4jc@virginia.edu    bool needsResponse = pkt->needsResponse();
76212120Sar4jc@virginia.edu    // do the actual memory access which also turns the packet into a
76312120Sar4jc@virginia.edu    // response
76411724Sar4jc@virginia.edu    access(pkt);
76512120Sar4jc@virginia.edu
76611724Sar4jc@virginia.edu    // turn packet around to go back to requester if response expected
76711724Sar4jc@virginia.edu    if (needsResponse) {
76812120Sar4jc@virginia.edu        // access already turned the packet into a response
76912120Sar4jc@virginia.edu        assert(pkt->isResponse());
77012120Sar4jc@virginia.edu
77111724Sar4jc@virginia.edu        // @todo someone should pay for this
77212120Sar4jc@virginia.edu        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
77311724Sar4jc@virginia.edu
77411724Sar4jc@virginia.edu        // queue the packet in the response queue to be sent out the
77511723Sar4jc@virginia.edu        // next tick
77611723Sar4jc@virginia.edu        port.schedTimingResp(pkt, curTick() + 1);
77711723Sar4jc@virginia.edu    } else {
77812120Sar4jc@virginia.edu    }
77912120Sar4jc@virginia.edu
78012120Sar4jc@virginia.edu    DPRINTF(DRAM, "Done\n");
78112120Sar4jc@virginia.edu
78212120Sar4jc@virginia.edu    return;
78312120Sar4jc@virginia.edu}
78412120Sar4jc@virginia.edu
78512120Sar4jc@virginia.edupair<Tick, Tick>
78611723Sar4jc@virginia.eduSimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
78712120Sar4jc@virginia.edu{
78812120Sar4jc@virginia.edu    // If a request reaches a bank at tick 'inTime', how much time
78912120Sar4jc@virginia.edu    // *after* that does it take to finish the request, depending
79012120Sar4jc@virginia.edu    // on bank status and page open policy. Note that this method
79112120Sar4jc@virginia.edu    // considers only the time taken for the actual read or write
79212120Sar4jc@virginia.edu    // to complete, NOT any additional time thereafter for tRAS or
79312120Sar4jc@virginia.edu    // tRP.
79412120Sar4jc@virginia.edu    Tick accLat = 0;
79512120Sar4jc@virginia.edu    Tick bankLat = 0;
79612120Sar4jc@virginia.edu    rowHitFlag = false;
79712120Sar4jc@virginia.edu
79812120Sar4jc@virginia.edu    const Bank& bank = dram_pkt->bank_ref;
79912120Sar4jc@virginia.edu    if (pageMgmt == Enums::open) { // open-page policy
80012120Sar4jc@virginia.edu        if (bank.openRow == dram_pkt->row) {
80112120Sar4jc@virginia.edu            // When we have a row-buffer hit,
80212120Sar4jc@virginia.edu            // we don't care about tRAS having expired or not,
80312120Sar4jc@virginia.edu            // but do care about bank being free for access
80412120Sar4jc@virginia.edu            rowHitFlag = true;
80512120Sar4jc@virginia.edu
80612120Sar4jc@virginia.edu            if (bank.freeAt < inTime) {
80712120Sar4jc@virginia.edu               // CAS latency only
80812120Sar4jc@virginia.edu               accLat += tCL;
80912120Sar4jc@virginia.edu               bankLat += tCL;
81012120Sar4jc@virginia.edu            } else {
81112120Sar4jc@virginia.edu                accLat += 0;
81212120Sar4jc@virginia.edu                bankLat += 0;
81312120Sar4jc@virginia.edu            }
81412120Sar4jc@virginia.edu
81512120Sar4jc@virginia.edu        } else {
81612120Sar4jc@virginia.edu            // Row-buffer miss, need to close existing row
81712120Sar4jc@virginia.edu            // once tRAS has expired, then open the new one,
81812120Sar4jc@virginia.edu            // then add cas latency.
81912120Sar4jc@virginia.edu            Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt);
82012120Sar4jc@virginia.edu
82112120Sar4jc@virginia.edu            if (freeTime > inTime)
82212120Sar4jc@virginia.edu               accLat += freeTime - inTime;
82312120Sar4jc@virginia.edu
82412120Sar4jc@virginia.edu            accLat += tRP + tRCD + tCL;
82512120Sar4jc@virginia.edu            bankLat += tRP + tRCD + tCL;
82612120Sar4jc@virginia.edu        }
82712120Sar4jc@virginia.edu    } else if (pageMgmt == Enums::close) {
82812120Sar4jc@virginia.edu
82912120Sar4jc@virginia.edu        // With a close page policy, no notion of
83012120Sar4jc@virginia.edu        // bank.tRASDoneAt
83111723Sar4jc@virginia.edu        if (bank.freeAt > inTime)
83212120Sar4jc@virginia.edu            accLat += bank.freeAt - inTime;
83312120Sar4jc@virginia.edu
83412120Sar4jc@virginia.edu        // page already closed, simply open the row, and
83512120Sar4jc@virginia.edu        // add cas latency
83612120Sar4jc@virginia.edu        accLat += tRCD + tCL;
83712120Sar4jc@virginia.edu        bankLat += tRCD + tCL;
83812120Sar4jc@virginia.edu    } else
83912120Sar4jc@virginia.edu        panic("No page management policy chosen\n");
84012120Sar4jc@virginia.edu
84112120Sar4jc@virginia.edu    DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n",
84212120Sar4jc@virginia.edu            bankLat, accLat);
84312120Sar4jc@virginia.edu
84412120Sar4jc@virginia.edu    return make_pair(bankLat, accLat);
84512120Sar4jc@virginia.edu}
84612120Sar4jc@virginia.edu
84712120Sar4jc@virginia.eduvoid
84812120Sar4jc@virginia.eduSimpleDRAM::processNextReqEvent()
84912120Sar4jc@virginia.edu{
85012120Sar4jc@virginia.edu    scheduleNextReq();
85112120Sar4jc@virginia.edu}
85212120Sar4jc@virginia.edu
85312120Sar4jc@virginia.eduvoid
85412120Sar4jc@virginia.eduSimpleDRAM::recordActivate(Tick act_tick)
85512120Sar4jc@virginia.edu{
85612120Sar4jc@virginia.edu    assert(actTicks.size() == activationLimit);
85711724Sar4jc@virginia.edu
85812120Sar4jc@virginia.edu    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
85911724Sar4jc@virginia.edu
86012120Sar4jc@virginia.edu    // sanity check
86112120Sar4jc@virginia.edu    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
86212120Sar4jc@virginia.edu        panic("Got %d activates in window %d (%d - %d) which is smaller "
86312120Sar4jc@virginia.edu              "than %d\n", activationLimit, act_tick - actTicks.back(),
86412120Sar4jc@virginia.edu              act_tick, actTicks.back(), tXAW);
86512120Sar4jc@virginia.edu    }
86612120Sar4jc@virginia.edu
86712120Sar4jc@virginia.edu    // shift the times used for the book keeping, the last element
86812120Sar4jc@virginia.edu    // (highest index) is the oldest one and hence the lowest value
86912120Sar4jc@virginia.edu    actTicks.pop_back();
87012120Sar4jc@virginia.edu
87112120Sar4jc@virginia.edu    // record an new activation (in the future)
87212120Sar4jc@virginia.edu    actTicks.push_front(act_tick);
87312120Sar4jc@virginia.edu
87412120Sar4jc@virginia.edu    // cannot activate more than X times in time window tXAW, push the
87512120Sar4jc@virginia.edu    // next one (the X + 1'st activate) to be tXAW away from the
87612120Sar4jc@virginia.edu    // oldest in our window of X
87712120Sar4jc@virginia.edu    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
87812120Sar4jc@virginia.edu        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
87912120Sar4jc@virginia.edu                "than %d\n", activationLimit, actTicks.back() + tXAW);
88012120Sar4jc@virginia.edu        for(int i = 0; i < ranksPerChannel; i++)
88112120Sar4jc@virginia.edu            for(int j = 0; j < banksPerRank; j++)
88212120Sar4jc@virginia.edu                // next activate must not happen before end of window
88312120Sar4jc@virginia.edu                banks[i][j].freeAt = std::max(banks[i][j].freeAt,
88411723Sar4jc@virginia.edu                                              actTicks.back() + tXAW);
88512120Sar4jc@virginia.edu    }
88612120Sar4jc@virginia.edu}
88712120Sar4jc@virginia.edu
88812120Sar4jc@virginia.eduvoid
88912120Sar4jc@virginia.eduSimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
89012120Sar4jc@virginia.edu{
89112120Sar4jc@virginia.edu
89211723Sar4jc@virginia.edu    DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
89312120Sar4jc@virginia.edu            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
89412120Sar4jc@virginia.edu
89512120Sar4jc@virginia.edu    assert(curTick() >= prevdramaccess);
89612120Sar4jc@virginia.edu    prevdramaccess = curTick();
89712120Sar4jc@virginia.edu
89812120Sar4jc@virginia.edu    // estimate the bank and access latency
89912120Sar4jc@virginia.edu    pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick());
90012120Sar4jc@virginia.edu    Tick bankLat = lat.first;
90112120Sar4jc@virginia.edu    Tick accessLat = lat.second;
90212120Sar4jc@virginia.edu
90312120Sar4jc@virginia.edu    // This request was woken up at this time based on a prior call
90412120Sar4jc@virginia.edu    // to estimateLatency(). However, between then and now, both the
90512120Sar4jc@virginia.edu    // accessLatency and/or busBusyUntil may have changed. We need
90612120Sar4jc@virginia.edu    // to correct for that.
90712120Sar4jc@virginia.edu
90812120Sar4jc@virginia.edu    Tick addDelay = (curTick() + accessLat < busBusyUntil) ?
90912120Sar4jc@virginia.edu        busBusyUntil - (curTick() + accessLat) : 0;
91012120Sar4jc@virginia.edu
91112120Sar4jc@virginia.edu    Bank& bank = dram_pkt->bank_ref;
91212120Sar4jc@virginia.edu
91312120Sar4jc@virginia.edu    // Update bank state
91412120Sar4jc@virginia.edu    if (pageMgmt == Enums::open) {
91512120Sar4jc@virginia.edu        bank.openRow = dram_pkt->row;
91612120Sar4jc@virginia.edu        bank.freeAt = curTick() + addDelay + accessLat;
91712120Sar4jc@virginia.edu        // If you activated a new row do to this access, the next access
91812120Sar4jc@virginia.edu        // will have to respect tRAS for this bank. Assume tRAS ~= 3 * tRP.
91912120Sar4jc@virginia.edu        // Also need to account for t_XAW
92012120Sar4jc@virginia.edu        if (!rowHitFlag) {
92112120Sar4jc@virginia.edu            bank.tRASDoneAt = bank.freeAt + tRP;
92212120Sar4jc@virginia.edu            recordActivate(bank.freeAt - tCL - tRCD); //since this is open page,
92312120Sar4jc@virginia.edu                                                      //no tRP by default
92412120Sar4jc@virginia.edu        }
92512120Sar4jc@virginia.edu    } else if (pageMgmt == Enums::close) { // accounting for tRAS also
92612120Sar4jc@virginia.edu        // assuming that tRAS ~= 3 * tRP, and tRC ~= 4 * tRP, as is common
92712120Sar4jc@virginia.edu        // (refer Jacob/Ng/Wang and Micron datasheets)
92812120Sar4jc@virginia.edu        bank.freeAt = curTick() + addDelay + accessLat + tRP + tRP;
92912120Sar4jc@virginia.edu        recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD); //essentially (freeAt - tRC)
93012120Sar4jc@virginia.edu        DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt);
93112120Sar4jc@virginia.edu    } else
93212120Sar4jc@virginia.edu        panic("No page management policy chosen\n");
93312120Sar4jc@virginia.edu
93412120Sar4jc@virginia.edu    // Update request parameters
93512120Sar4jc@virginia.edu    dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST;
93612120Sar4jc@virginia.edu
93712120Sar4jc@virginia.edu
93812120Sar4jc@virginia.edu    DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \
93912120Sar4jc@virginia.edu                  "readytime is %lld busbusyuntil is %lld. " \
94012120Sar4jc@virginia.edu                  "Scheduling at readyTime\n", dram_pkt->addr,
94112120Sar4jc@virginia.edu                   curTick(), accessLat, dram_pkt->readyTime, busBusyUntil);
94212120Sar4jc@virginia.edu
94312120Sar4jc@virginia.edu    // Make sure requests are not overlapping on the databus
94412120Sar4jc@virginia.edu    assert (dram_pkt->readyTime - busBusyUntil >= tBURST);
94511725Sar4jc@virginia.edu
94612120Sar4jc@virginia.edu    // Update bus state
94712120Sar4jc@virginia.edu    busBusyUntil = dram_pkt->readyTime;
94812120Sar4jc@virginia.edu
94912120Sar4jc@virginia.edu    DPRINTF(DRAM,"Access time is %lld\n",
95012120Sar4jc@virginia.edu            dram_pkt->readyTime - dram_pkt->entryTime);
95112120Sar4jc@virginia.edu
95212120Sar4jc@virginia.edu    // Update stats
95312120Sar4jc@virginia.edu    totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
95412120Sar4jc@virginia.edu    totBankLat += bankLat;
95512120Sar4jc@virginia.edu    totBusLat += tBURST;
95612120Sar4jc@virginia.edu    totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST;
95712120Sar4jc@virginia.edu
95812120Sar4jc@virginia.edu    if (rowHitFlag)
95912120Sar4jc@virginia.edu        readRowHits++;
96012120Sar4jc@virginia.edu
96112120Sar4jc@virginia.edu    // At this point we're done dealing with the request
96212120Sar4jc@virginia.edu    // It will be moved to a separate response queue with a
96312120Sar4jc@virginia.edu    // correct readyTime, and eventually be sent back at that
96412120Sar4jc@virginia.edu    //time
96511725Sar4jc@virginia.edu    moveToRespQ();
96612120Sar4jc@virginia.edu
96712120Sar4jc@virginia.edu    // The absolute soonest you have to start thinking about the
96812120Sar4jc@virginia.edu    // next request is the longest access time that can occur before
96912120Sar4jc@virginia.edu    // busBusyUntil. Assuming you need to meet tRAS, then precharge,
97012120Sar4jc@virginia.edu    // open a new row, and access, it is ~4*tRCD.
97112120Sar4jc@virginia.edu
97212120Sar4jc@virginia.edu
97312120Sar4jc@virginia.edu    Tick newTime = (busBusyUntil > 4 * tRCD) ?
97412120Sar4jc@virginia.edu                   std::max(busBusyUntil - 4 * tRCD, curTick()) :
97512120Sar4jc@virginia.edu                   curTick();
97612120Sar4jc@virginia.edu
97712120Sar4jc@virginia.edu    if (!nextReqEvent.scheduled() && !stopReads){
97812120Sar4jc@virginia.edu        schedule(&nextReqEvent, newTime);
97912120Sar4jc@virginia.edu    } else {
98012120Sar4jc@virginia.edu        if (newTime < nextReqEvent.when())
98112120Sar4jc@virginia.edu            reschedule(&nextReqEvent, newTime);
98212120Sar4jc@virginia.edu    }
98312120Sar4jc@virginia.edu
98412120Sar4jc@virginia.edu
98512120Sar4jc@virginia.edu}
98612120Sar4jc@virginia.edu
98712120Sar4jc@virginia.eduvoid
98811725Sar4jc@virginia.eduSimpleDRAM::moveToRespQ()
98912120Sar4jc@virginia.edu{
99012120Sar4jc@virginia.edu    // Remove from read queue
99112120Sar4jc@virginia.edu    DRAMPacket* dram_pkt = dramReadQueue.front();
99212120Sar4jc@virginia.edu    dramReadQueue.pop_front();
99311725Sar4jc@virginia.edu
99411725Sar4jc@virginia.edu    // Insert into response queue sorted by readyTime
99511725Sar4jc@virginia.edu    // It will be sent back to the requestor at its
99611725Sar4jc@virginia.edu    // readyTime
99711725Sar4jc@virginia.edu    if (dramRespQueue.empty()) {
99812120Sar4jc@virginia.edu        dramRespQueue.push_front(dram_pkt);
99912120Sar4jc@virginia.edu        assert(!respondEvent.scheduled());
100012120Sar4jc@virginia.edu        assert(dram_pkt->readyTime >= curTick());
100112120Sar4jc@virginia.edu        schedule(&respondEvent, dram_pkt->readyTime);
100212120Sar4jc@virginia.edu    } else {
100311725Sar4jc@virginia.edu        bool done = false;
100412120Sar4jc@virginia.edu        std::list<DRAMPacket*>::iterator i = dramRespQueue.begin();
100511725Sar4jc@virginia.edu        while (!done && i != dramRespQueue.end()) {
100611725Sar4jc@virginia.edu            if ((*i)->readyTime > dram_pkt->readyTime) {
100712120Sar4jc@virginia.edu                dramRespQueue.insert(i, dram_pkt);
100812120Sar4jc@virginia.edu                done = true;
100912120Sar4jc@virginia.edu            }
101012120Sar4jc@virginia.edu            ++i;
101112120Sar4jc@virginia.edu        }
101212120Sar4jc@virginia.edu
101312120Sar4jc@virginia.edu        if (!done)
101412120Sar4jc@virginia.edu            dramRespQueue.push_back(dram_pkt);
101512120Sar4jc@virginia.edu
101612120Sar4jc@virginia.edu        assert(respondEvent.scheduled());
101712120Sar4jc@virginia.edu
101812120Sar4jc@virginia.edu        if (dramRespQueue.front()->readyTime < respondEvent.when()) {
101911725Sar4jc@virginia.edu            assert(dramRespQueue.front()->readyTime >= curTick());
102011725Sar4jc@virginia.edu            reschedule(&respondEvent, dramRespQueue.front()->readyTime);
102111725Sar4jc@virginia.edu        }
102211725Sar4jc@virginia.edu    }
102311725Sar4jc@virginia.edu
102412120Sar4jc@virginia.edu    if (retryRdReq) {
102512120Sar4jc@virginia.edu         retryRdReq = false;
102612120Sar4jc@virginia.edu         port.sendRetry();
102712120Sar4jc@virginia.edu     }
102812120Sar4jc@virginia.edu}
102911725Sar4jc@virginia.edu
103012120Sar4jc@virginia.eduvoid
103111725Sar4jc@virginia.eduSimpleDRAM::scheduleNextReq()
103211725Sar4jc@virginia.edu{
103312120Sar4jc@virginia.edu    DPRINTF(DRAM, "Reached scheduleNextReq()\n");
103412120Sar4jc@virginia.edu
103512120Sar4jc@virginia.edu    // Figure out which request goes next, and move it to front()
103612120Sar4jc@virginia.edu    if (!chooseNextReq()) {
103712120Sar4jc@virginia.edu        // In the case there is no read request to go next, see if we
103812120Sar4jc@virginia.edu        // are asked to drain, and if so trigger writes, this also
103912120Sar4jc@virginia.edu        // ensures that if we hit the write limit we will do this
104012120Sar4jc@virginia.edu        // multiple times until we are completely drained
104112120Sar4jc@virginia.edu        if (drainManager && !dramWriteQueue.empty() && !writeEvent.scheduled())
104212120Sar4jc@virginia.edu            triggerWrites();
104312120Sar4jc@virginia.edu    } else {
104412120Sar4jc@virginia.edu        doDRAMAccess(dramReadQueue.front());
104511725Sar4jc@virginia.edu    }
104611725Sar4jc@virginia.edu}
104711725Sar4jc@virginia.edu
104811725Sar4jc@virginia.eduTick
104911725Sar4jc@virginia.eduSimpleDRAM::maxBankFreeAt() const
105012120Sar4jc@virginia.edu{
105112120Sar4jc@virginia.edu    Tick banksFree = 0;
105212120Sar4jc@virginia.edu
105312120Sar4jc@virginia.edu    for(int i = 0; i < ranksPerChannel; i++)
105412120Sar4jc@virginia.edu        for(int j = 0; j < banksPerRank; j++)
105511725Sar4jc@virginia.edu            banksFree = std::max(banks[i][j].freeAt, banksFree);
105612120Sar4jc@virginia.edu
105711725Sar4jc@virginia.edu    return banksFree;
105811725Sar4jc@virginia.edu}
105912120Sar4jc@virginia.edu
106012120Sar4jc@virginia.eduvoid
106112120Sar4jc@virginia.eduSimpleDRAM::processRefreshEvent()
106212120Sar4jc@virginia.edu{
106312120Sar4jc@virginia.edu    DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick());
106412120Sar4jc@virginia.edu
106512120Sar4jc@virginia.edu    Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC;
106611725Sar4jc@virginia.edu
106712120Sar4jc@virginia.edu    for(int i = 0; i < ranksPerChannel; i++)
106811725Sar4jc@virginia.edu        for(int j = 0; j < banksPerRank; j++)
106912120Sar4jc@virginia.edu            banks[i][j].freeAt = banksFree;
107012120Sar4jc@virginia.edu
107111725Sar4jc@virginia.edu    schedule(&refreshEvent, curTick() + tREFI);
107211725Sar4jc@virginia.edu}
107311725Sar4jc@virginia.edu
107411725Sar4jc@virginia.eduvoid
107511725Sar4jc@virginia.eduSimpleDRAM::regStats()
107612120Sar4jc@virginia.edu{
107712120Sar4jc@virginia.edu    using namespace Stats;
107812120Sar4jc@virginia.edu
107912120Sar4jc@virginia.edu    AbstractMemory::regStats();
108012120Sar4jc@virginia.edu
108112120Sar4jc@virginia.edu    readReqs
108212120Sar4jc@virginia.edu        .name(name() + ".readReqs")
108312120Sar4jc@virginia.edu        .desc("Total number of read requests seen");
108412120Sar4jc@virginia.edu
108512120Sar4jc@virginia.edu    writeReqs
108612120Sar4jc@virginia.edu        .name(name() + ".writeReqs")
108712120Sar4jc@virginia.edu        .desc("Total number of write requests seen");
108812120Sar4jc@virginia.edu
108912120Sar4jc@virginia.edu    servicedByWrQ
109012120Sar4jc@virginia.edu        .name(name() + ".servicedByWrQ")
109112120Sar4jc@virginia.edu        .desc("Number of read reqs serviced by write Q");
109212120Sar4jc@virginia.edu
109312120Sar4jc@virginia.edu    cpuReqs
109412120Sar4jc@virginia.edu        .name(name() + ".cpureqs")
109512120Sar4jc@virginia.edu        .desc("Reqs generatd by CPU via cache - shady");
109612120Sar4jc@virginia.edu
109712120Sar4jc@virginia.edu    neitherReadNorWrite
109812120Sar4jc@virginia.edu        .name(name() + ".neitherReadNorWrite")
109912120Sar4jc@virginia.edu        .desc("Reqs where no action is needed");
110012120Sar4jc@virginia.edu
110112120Sar4jc@virginia.edu    perBankRdReqs
110212120Sar4jc@virginia.edu        .init(banksPerRank * ranksPerChannel)
110312120Sar4jc@virginia.edu        .name(name() + ".perBankRdReqs")
110412120Sar4jc@virginia.edu        .desc("Track reads on a per bank basis");
110512120Sar4jc@virginia.edu
110612120Sar4jc@virginia.edu    perBankWrReqs
110712120Sar4jc@virginia.edu        .init(banksPerRank * ranksPerChannel)
110812120Sar4jc@virginia.edu        .name(name() + ".perBankWrReqs")
110912120Sar4jc@virginia.edu        .desc("Track writes on a per bank basis");
111012120Sar4jc@virginia.edu
111112120Sar4jc@virginia.edu    avgRdQLen
111212120Sar4jc@virginia.edu        .name(name() + ".avgRdQLen")
111312120Sar4jc@virginia.edu        .desc("Average read queue length over time")
111412120Sar4jc@virginia.edu        .precision(2);
111512120Sar4jc@virginia.edu
111612120Sar4jc@virginia.edu    avgWrQLen
111712120Sar4jc@virginia.edu        .name(name() + ".avgWrQLen")
111812120Sar4jc@virginia.edu        .desc("Average write queue length over time")
111912120Sar4jc@virginia.edu        .precision(2);
112012120Sar4jc@virginia.edu
112112120Sar4jc@virginia.edu    totQLat
112212120Sar4jc@virginia.edu        .name(name() + ".totQLat")
112312120Sar4jc@virginia.edu        .desc("Total cycles spent in queuing delays");
112412120Sar4jc@virginia.edu
112512120Sar4jc@virginia.edu    totBankLat
112612120Sar4jc@virginia.edu        .name(name() + ".totBankLat")
112712120Sar4jc@virginia.edu        .desc("Total cycles spent in bank access");
112812120Sar4jc@virginia.edu
112912120Sar4jc@virginia.edu    totBusLat
113012120Sar4jc@virginia.edu        .name(name() + ".totBusLat")
113112120Sar4jc@virginia.edu        .desc("Total cycles spent in databus access");
113212120Sar4jc@virginia.edu
113312120Sar4jc@virginia.edu    totMemAccLat
113412120Sar4jc@virginia.edu        .name(name() + ".totMemAccLat")
113512120Sar4jc@virginia.edu        .desc("Sum of mem lat for all requests");
113612120Sar4jc@virginia.edu
113712120Sar4jc@virginia.edu    avgQLat
113812120Sar4jc@virginia.edu        .name(name() + ".avgQLat")
113912120Sar4jc@virginia.edu        .desc("Average queueing delay per request")
114012120Sar4jc@virginia.edu        .precision(2);
114112120Sar4jc@virginia.edu
114212120Sar4jc@virginia.edu    avgQLat = totQLat / (readReqs - servicedByWrQ);
114312120Sar4jc@virginia.edu
114412120Sar4jc@virginia.edu    avgBankLat
114512120Sar4jc@virginia.edu        .name(name() + ".avgBankLat")
114612120Sar4jc@virginia.edu        .desc("Average bank access latency per request")
114712120Sar4jc@virginia.edu        .precision(2);
114812120Sar4jc@virginia.edu
114912120Sar4jc@virginia.edu    avgBankLat = totBankLat / (readReqs - servicedByWrQ);
115012120Sar4jc@virginia.edu
115112120Sar4jc@virginia.edu    avgBusLat
115212120Sar4jc@virginia.edu        .name(name() + ".avgBusLat")
115312120Sar4jc@virginia.edu        .desc("Average bus latency per request")
115412120Sar4jc@virginia.edu        .precision(2);
115512120Sar4jc@virginia.edu
115612120Sar4jc@virginia.edu    avgBusLat = totBusLat / (readReqs - servicedByWrQ);
115712120Sar4jc@virginia.edu
115812120Sar4jc@virginia.edu    avgMemAccLat
115912120Sar4jc@virginia.edu        .name(name() + ".avgMemAccLat")
116012120Sar4jc@virginia.edu        .desc("Average memory access latency")
116112120Sar4jc@virginia.edu        .precision(2);
116212120Sar4jc@virginia.edu
116312120Sar4jc@virginia.edu    avgMemAccLat = totMemAccLat / (readReqs - servicedByWrQ);
116412120Sar4jc@virginia.edu
116512120Sar4jc@virginia.edu    numRdRetry
116612120Sar4jc@virginia.edu        .name(name() + ".numRdRetry")
116712120Sar4jc@virginia.edu        .desc("Number of times rd buffer was full causing retry");
116812120Sar4jc@virginia.edu
116912120Sar4jc@virginia.edu    numWrRetry
117012120Sar4jc@virginia.edu        .name(name() + ".numWrRetry")
117112120Sar4jc@virginia.edu        .desc("Number of times wr buffer was full causing retry");
117212120Sar4jc@virginia.edu
117312120Sar4jc@virginia.edu    readRowHits
117412120Sar4jc@virginia.edu        .name(name() + ".readRowHits")
117512120Sar4jc@virginia.edu        .desc("Number of row buffer hits during reads");
117612120Sar4jc@virginia.edu
117712120Sar4jc@virginia.edu    writeRowHits
117812120Sar4jc@virginia.edu        .name(name() + ".writeRowHits")
117912120Sar4jc@virginia.edu        .desc("Number of row buffer hits during writes");
118012120Sar4jc@virginia.edu
118112120Sar4jc@virginia.edu    readRowHitRate
118212120Sar4jc@virginia.edu        .name(name() + ".readRowHitRate")
118312120Sar4jc@virginia.edu        .desc("Row buffer hit rate for reads")
118412120Sar4jc@virginia.edu        .precision(2);
118512120Sar4jc@virginia.edu
118612120Sar4jc@virginia.edu    readRowHitRate = (readRowHits / (readReqs - servicedByWrQ)) * 100;
118712120Sar4jc@virginia.edu
118812120Sar4jc@virginia.edu    writeRowHitRate
118912120Sar4jc@virginia.edu        .name(name() + ".writeRowHitRate")
119012120Sar4jc@virginia.edu        .desc("Row buffer hit rate for writes")
119112120Sar4jc@virginia.edu        .precision(2);
119212120Sar4jc@virginia.edu
119312120Sar4jc@virginia.edu    writeRowHitRate = (writeRowHits / writeReqs) * 100;
119412120Sar4jc@virginia.edu
119512120Sar4jc@virginia.edu    readPktSize
119612120Sar4jc@virginia.edu        .init(log2(bytesPerCacheLine)+3)
119712120Sar4jc@virginia.edu        .name(name() + ".readPktSize")
119812120Sar4jc@virginia.edu        .desc("Categorize read packet sizes");
119912120Sar4jc@virginia.edu
120012120Sar4jc@virginia.edu     writePktSize
120112120Sar4jc@virginia.edu        .init(log2(bytesPerCacheLine)+3)
120212120Sar4jc@virginia.edu        .name(name() + ".writePktSize")
120312120Sar4jc@virginia.edu        .desc("categorize write packet sizes");
120412120Sar4jc@virginia.edu
120512120Sar4jc@virginia.edu     neitherPktSize
120612120Sar4jc@virginia.edu        .init(log2(bytesPerCacheLine)+3)
120712120Sar4jc@virginia.edu        .name(name() + ".neitherpktsize")
120812120Sar4jc@virginia.edu        .desc("categorize neither packet sizes");
120912120Sar4jc@virginia.edu
121012120Sar4jc@virginia.edu     rdQLenPdf
121112120Sar4jc@virginia.edu        .init(readBufferSize + 1)
121212120Sar4jc@virginia.edu        .name(name() + ".rdQLenPdf")
121312120Sar4jc@virginia.edu        .desc("What read queue length does an incoming req see");
121412120Sar4jc@virginia.edu
121512120Sar4jc@virginia.edu     wrQLenPdf
121612120Sar4jc@virginia.edu        .init(writeBufferSize + 1)
121712120Sar4jc@virginia.edu        .name(name() + ".wrQLenPdf")
121812120Sar4jc@virginia.edu        .desc("What write queue length does an incoming req see");
121912120Sar4jc@virginia.edu
122012120Sar4jc@virginia.edu
122112120Sar4jc@virginia.edu    bytesRead
122212120Sar4jc@virginia.edu        .name(name() + ".bytesRead")
122312120Sar4jc@virginia.edu        .desc("Total number of bytes read from memory");
122412120Sar4jc@virginia.edu
122512120Sar4jc@virginia.edu    bytesWritten
122612120Sar4jc@virginia.edu        .name(name() + ".bytesWritten")
122712120Sar4jc@virginia.edu        .desc("Total number of bytes written to memory");
122812120Sar4jc@virginia.edu
122912120Sar4jc@virginia.edu    bytesConsumedRd
123012120Sar4jc@virginia.edu        .name(name() + ".bytesConsumedRd")
123111725Sar4jc@virginia.edu        .desc("bytesRead derated as per pkt->getSize()");
123211725Sar4jc@virginia.edu
123312120Sar4jc@virginia.edu    bytesConsumedWr
123411725Sar4jc@virginia.edu        .name(name() + ".bytesConsumedWr")
123511725Sar4jc@virginia.edu        .desc("bytesWritten derated as per pkt->getSize()");
123612120Sar4jc@virginia.edu
123712120Sar4jc@virginia.edu    avgRdBW
123812120Sar4jc@virginia.edu        .name(name() + ".avgRdBW")
123911725Sar4jc@virginia.edu        .desc("Average achieved read bandwidth in MB/s")
124011725Sar4jc@virginia.edu        .precision(2);
124111725Sar4jc@virginia.edu
124212120Sar4jc@virginia.edu    avgRdBW = (bytesRead / 1000000) / simSeconds;
124312120Sar4jc@virginia.edu
124411725Sar4jc@virginia.edu    avgWrBW
124511725Sar4jc@virginia.edu        .name(name() + ".avgWrBW")
124612120Sar4jc@virginia.edu        .desc("Average achieved write bandwidth in MB/s")
124711725Sar4jc@virginia.edu        .precision(2);
124811725Sar4jc@virginia.edu
124912120Sar4jc@virginia.edu    avgWrBW = (bytesWritten / 1000000) / simSeconds;
125012120Sar4jc@virginia.edu
125111725Sar4jc@virginia.edu    avgConsumedRdBW
125211725Sar4jc@virginia.edu        .name(name() + ".avgConsumedRdBW")
125312120Sar4jc@virginia.edu        .desc("Average consumed read bandwidth in MB/s")
125411725Sar4jc@virginia.edu        .precision(2);
125512120Sar4jc@virginia.edu
125611725Sar4jc@virginia.edu    avgConsumedRdBW = (bytesConsumedRd / 1000000) / simSeconds;
125712120Sar4jc@virginia.edu
125812120Sar4jc@virginia.edu    avgConsumedWrBW
125912120Sar4jc@virginia.edu        .name(name() + ".avgConsumedWrBW")
126012120Sar4jc@virginia.edu        .desc("Average consumed write bandwidth in MB/s")
126112120Sar4jc@virginia.edu        .precision(2);
126212120Sar4jc@virginia.edu
126312120Sar4jc@virginia.edu    avgConsumedWrBW = (bytesConsumedWr / 1000000) / simSeconds;
126412120Sar4jc@virginia.edu
126512120Sar4jc@virginia.edu    peakBW
126612120Sar4jc@virginia.edu        .name(name() + ".peakBW")
126712120Sar4jc@virginia.edu        .desc("Theoretical peak bandwidth in MB/s")
126812120Sar4jc@virginia.edu        .precision(2);
126912120Sar4jc@virginia.edu
127012120Sar4jc@virginia.edu    peakBW = (SimClock::Frequency / tBURST) * bytesPerCacheLine / 1000000;
127112120Sar4jc@virginia.edu
127212120Sar4jc@virginia.edu    busUtil
127312120Sar4jc@virginia.edu        .name(name() + ".busUtil")
127412120Sar4jc@virginia.edu        .desc("Data bus utilization in percentage")
127512120Sar4jc@virginia.edu        .precision(2);
127611725Sar4jc@virginia.edu
127712120Sar4jc@virginia.edu    busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
127812120Sar4jc@virginia.edu
127912120Sar4jc@virginia.edu    totGap
128012120Sar4jc@virginia.edu        .name(name() + ".totGap")
128112120Sar4jc@virginia.edu        .desc("Total gap between requests");
128212120Sar4jc@virginia.edu
128312120Sar4jc@virginia.edu    avgGap
128412120Sar4jc@virginia.edu        .name(name() + ".avgGap")
128512120Sar4jc@virginia.edu        .desc("Average gap between requests")
128612120Sar4jc@virginia.edu        .precision(2);
128712120Sar4jc@virginia.edu
128812120Sar4jc@virginia.edu    avgGap = totGap / (readReqs + writeReqs);
128912120Sar4jc@virginia.edu}
129012120Sar4jc@virginia.edu
129112120Sar4jc@virginia.eduvoid
129212120Sar4jc@virginia.eduSimpleDRAM::recvFunctional(PacketPtr pkt)
129312120Sar4jc@virginia.edu{
129412120Sar4jc@virginia.edu    // rely on the abstract memory
129512120Sar4jc@virginia.edu    functionalAccess(pkt);
129612120Sar4jc@virginia.edu}
129712120Sar4jc@virginia.edu
129812120Sar4jc@virginia.eduBaseSlavePort&
129912120Sar4jc@virginia.eduSimpleDRAM::getSlavePort(const string &if_name, PortID idx)
130012120Sar4jc@virginia.edu{
130112120Sar4jc@virginia.edu    if (if_name != "port") {
130212120Sar4jc@virginia.edu        return MemObject::getSlavePort(if_name, idx);
130312120Sar4jc@virginia.edu    } else {
130412120Sar4jc@virginia.edu        return port;
130512120Sar4jc@virginia.edu    }
130612120Sar4jc@virginia.edu}
130712120Sar4jc@virginia.edu
130812120Sar4jc@virginia.eduunsigned int
130912120Sar4jc@virginia.eduSimpleDRAM::drain(DrainManager *dm)
131012120Sar4jc@virginia.edu{
131112120Sar4jc@virginia.edu    unsigned int count = port.drain(dm);
131212120Sar4jc@virginia.edu
131312120Sar4jc@virginia.edu    // if there is anything in any of our internal queues, keep track
131412120Sar4jc@virginia.edu    // of that as well
131512120Sar4jc@virginia.edu    if (!(dramWriteQueue.empty() && dramReadQueue.empty() &&
131612120Sar4jc@virginia.edu          dramRespQueue.empty())) {
131712120Sar4jc@virginia.edu        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
131812120Sar4jc@virginia.edu                " resp: %d\n", dramWriteQueue.size(), dramReadQueue.size(),
131912120Sar4jc@virginia.edu                dramRespQueue.size());
132012120Sar4jc@virginia.edu        ++count;
132112120Sar4jc@virginia.edu        drainManager = dm;
132212120Sar4jc@virginia.edu        // the only part that is not drained automatically over time
132312120Sar4jc@virginia.edu        // is the write queue, thus trigger writes if there are any
132412120Sar4jc@virginia.edu        // waiting and no reads waiting, otherwise wait until the
132512120Sar4jc@virginia.edu        // reads are done
132612120Sar4jc@virginia.edu        if (dramReadQueue.empty() && !dramWriteQueue.empty() &&
132712120Sar4jc@virginia.edu            !writeEvent.scheduled())
132812120Sar4jc@virginia.edu            triggerWrites();
132912120Sar4jc@virginia.edu    }
133012120Sar4jc@virginia.edu
133112120Sar4jc@virginia.edu    if (count)
133212120Sar4jc@virginia.edu        setDrainState(Drainable::Draining);
133312120Sar4jc@virginia.edu    else
133412120Sar4jc@virginia.edu        setDrainState(Drainable::Drained);
133512120Sar4jc@virginia.edu    return count;
133612120Sar4jc@virginia.edu}
133712120Sar4jc@virginia.edu
133812120Sar4jc@virginia.eduSimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory)
133912120Sar4jc@virginia.edu    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
134012120Sar4jc@virginia.edu      memory(_memory)
134112120Sar4jc@virginia.edu{ }
134212120Sar4jc@virginia.edu
134312120Sar4jc@virginia.eduAddrRangeList
134412120Sar4jc@virginia.eduSimpleDRAM::MemoryPort::getAddrRanges() const
134512120Sar4jc@virginia.edu{
134612120Sar4jc@virginia.edu    AddrRangeList ranges;
134712120Sar4jc@virginia.edu    ranges.push_back(memory.getAddrRange());
134812120Sar4jc@virginia.edu    return ranges;
134912120Sar4jc@virginia.edu}
135012120Sar4jc@virginia.edu
135112120Sar4jc@virginia.eduvoid
135212120Sar4jc@virginia.eduSimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt)
135312120Sar4jc@virginia.edu{
135412120Sar4jc@virginia.edu    pkt->pushLabel(memory.name());
135512120Sar4jc@virginia.edu
135612120Sar4jc@virginia.edu    if (!queue.checkFunctional(pkt)) {
135712120Sar4jc@virginia.edu        // Default implementation of SimpleTimingPort::recvFunctional()
135812120Sar4jc@virginia.edu        // calls recvAtomic() and throws away the latency; we can save a
135912120Sar4jc@virginia.edu        // little here by just not calculating the latency.
136012120Sar4jc@virginia.edu        memory.recvFunctional(pkt);
136112120Sar4jc@virginia.edu    }
136212120Sar4jc@virginia.edu
136312120Sar4jc@virginia.edu    pkt->popLabel();
136412120Sar4jc@virginia.edu}
136512120Sar4jc@virginia.edu
136612120Sar4jc@virginia.eduTick
136712120Sar4jc@virginia.eduSimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt)
136812120Sar4jc@virginia.edu{
136912120Sar4jc@virginia.edu    return memory.recvAtomic(pkt);
137012120Sar4jc@virginia.edu}
137112120Sar4jc@virginia.edu
137212120Sar4jc@virginia.edubool
137312120Sar4jc@virginia.eduSimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt)
137412120Sar4jc@virginia.edu{
137512120Sar4jc@virginia.edu    // pass it to the memory controller
137612120Sar4jc@virginia.edu    return memory.recvTimingReq(pkt);
137712120Sar4jc@virginia.edu}
137812120Sar4jc@virginia.edu
137912120Sar4jc@virginia.eduSimpleDRAM*
138012120Sar4jc@virginia.eduSimpleDRAMParams::create()
138112120Sar4jc@virginia.edu{
138212120Sar4jc@virginia.edu    return new SimpleDRAM(this);
138312120Sar4jc@virginia.edu}
138412120Sar4jc@virginia.edu