dram_ctrl.cc revision 9726
110447Snilay@cs.wisc.edu/*
210447Snilay@cs.wisc.edu * Copyright (c) 2010-2012 ARM Limited
310447Snilay@cs.wisc.edu * All rights reserved
410447Snilay@cs.wisc.edu *
510447Snilay@cs.wisc.edu * The license below extends only to copyright in the software and shall
610447Snilay@cs.wisc.edu * not be construed as granting a license to any other intellectual
710447Snilay@cs.wisc.edu * property including but not limited to intellectual property relating
810447Snilay@cs.wisc.edu * to a hardware implementation of the functionality of the software
910447Snilay@cs.wisc.edu * licensed hereunder.  You may use the software subject to the license
1010447Snilay@cs.wisc.edu * terms below provided that you ensure that this notice is replicated
1110447Snilay@cs.wisc.edu * unmodified and in its entirety in all distributions of the software,
1210447Snilay@cs.wisc.edu * modified or unmodified, in source code or in binary form.
1310447Snilay@cs.wisc.edu *
1410447Snilay@cs.wisc.edu * Redistribution and use in source and binary forms, with or without
1510447Snilay@cs.wisc.edu * modification, are permitted provided that the following conditions are
1610447Snilay@cs.wisc.edu * met: redistributions of source code must retain the above copyright
1710447Snilay@cs.wisc.edu * notice, this list of conditions and the following disclaimer;
1810447Snilay@cs.wisc.edu * redistributions in binary form must reproduce the above copyright
1910447Snilay@cs.wisc.edu * notice, this list of conditions and the following disclaimer in the
2010447Snilay@cs.wisc.edu * documentation and/or other materials provided with the distribution;
2110447Snilay@cs.wisc.edu * neither the name of the copyright holders nor the names of its
2210447Snilay@cs.wisc.edu * contributors may be used to endorse or promote products derived from
2310447Snilay@cs.wisc.edu * this software without specific prior written permission.
2410447Snilay@cs.wisc.edu *
2510447Snilay@cs.wisc.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2610447Snilay@cs.wisc.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2710447Snilay@cs.wisc.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2810447Snilay@cs.wisc.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2910447Snilay@cs.wisc.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3010447Snilay@cs.wisc.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3110447Snilay@cs.wisc.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3210447Snilay@cs.wisc.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3310447Snilay@cs.wisc.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3410447Snilay@cs.wisc.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3510447Snilay@cs.wisc.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3610447Snilay@cs.wisc.edu *
3710447Snilay@cs.wisc.edu * Authors: Andreas Hansson
3810447Snilay@cs.wisc.edu *          Ani Udipi
3910447Snilay@cs.wisc.edu */
4010447Snilay@cs.wisc.edu
4110447Snilay@cs.wisc.edu#include "base/trace.hh"
4210447Snilay@cs.wisc.edu#include "debug/Drain.hh"
4310447Snilay@cs.wisc.edu#include "debug/DRAM.hh"
4410447Snilay@cs.wisc.edu#include "debug/DRAMWR.hh"
4510447Snilay@cs.wisc.edu#include "mem/simple_dram.hh"
4610447Snilay@cs.wisc.edu
4710447Snilay@cs.wisc.eduusing namespace std;
4810447Snilay@cs.wisc.edu
4910447Snilay@cs.wisc.eduSimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
5010447Snilay@cs.wisc.edu    AbstractMemory(p),
5110447Snilay@cs.wisc.edu    port(name() + ".port", *this),
5210447Snilay@cs.wisc.edu    retryRdReq(false), retryWrReq(false),
5310447Snilay@cs.wisc.edu    rowHitFlag(false), stopReads(false), actTicks(p->activation_limit, 0),
5410447Snilay@cs.wisc.edu    writeEvent(this), respondEvent(this),
5510447Snilay@cs.wisc.edu    refreshEvent(this), nextReqEvent(this), drainManager(NULL),
5610447Snilay@cs.wisc.edu    bytesPerCacheLine(0),
5710447Snilay@cs.wisc.edu    linesPerRowBuffer(p->lines_per_rowbuffer),
5810447Snilay@cs.wisc.edu    ranksPerChannel(p->ranks_per_channel),
5910447Snilay@cs.wisc.edu    banksPerRank(p->banks_per_rank), channels(p->channels), rowsPerBank(0),
6010447Snilay@cs.wisc.edu    readBufferSize(p->read_buffer_size),
6110447Snilay@cs.wisc.edu    writeBufferSize(p->write_buffer_size),
6210447Snilay@cs.wisc.edu    writeThresholdPerc(p->write_thresh_perc),
6310447Snilay@cs.wisc.edu    tWTR(p->tWTR), tBURST(p->tBURST),
6410447Snilay@cs.wisc.edu    tRCD(p->tRCD), tCL(p->tCL), tRP(p->tRP),
6510447Snilay@cs.wisc.edu    tRFC(p->tRFC), tREFI(p->tREFI),
6610447Snilay@cs.wisc.edu    tXAW(p->tXAW), activationLimit(p->activation_limit),
6710447Snilay@cs.wisc.edu    memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
6810447Snilay@cs.wisc.edu    pageMgmt(p->page_policy),
6910447Snilay@cs.wisc.edu    frontendLatency(p->static_frontend_latency),
7010447Snilay@cs.wisc.edu    backendLatency(p->static_backend_latency),
7110447Snilay@cs.wisc.edu    busBusyUntil(0), writeStartTime(0),
7210447Snilay@cs.wisc.edu    prevArrival(0), numReqs(0)
7310447Snilay@cs.wisc.edu{
7410447Snilay@cs.wisc.edu    // create the bank states based on the dimensions of the ranks and
7510447Snilay@cs.wisc.edu    // banks
7610447Snilay@cs.wisc.edu    banks.resize(ranksPerChannel);
7710447Snilay@cs.wisc.edu    for (size_t c = 0; c < ranksPerChannel; ++c) {
7810447Snilay@cs.wisc.edu        banks[c].resize(banksPerRank);
7910447Snilay@cs.wisc.edu    }
8010447Snilay@cs.wisc.edu
8110447Snilay@cs.wisc.edu    // round the write threshold percent to a whole number of entries
8210447Snilay@cs.wisc.edu    // in the buffer
8310447Snilay@cs.wisc.edu    writeThreshold = writeBufferSize * writeThresholdPerc / 100.0;
8410447Snilay@cs.wisc.edu}
8510447Snilay@cs.wisc.edu
8610447Snilay@cs.wisc.eduvoid
8710447Snilay@cs.wisc.eduSimpleDRAM::init()
8810447Snilay@cs.wisc.edu{
8910447Snilay@cs.wisc.edu    if (!port.isConnected()) {
9010447Snilay@cs.wisc.edu        fatal("SimpleDRAM %s is unconnected!\n", name());
9110447Snilay@cs.wisc.edu    } else {
9210447Snilay@cs.wisc.edu        port.sendRangeChange();
9310447Snilay@cs.wisc.edu    }
9410447Snilay@cs.wisc.edu
9510447Snilay@cs.wisc.edu    // get the burst size from the connected port as it is currently
9610447Snilay@cs.wisc.edu    // assumed to be equal to the cache line size
9710447Snilay@cs.wisc.edu    bytesPerCacheLine = port.peerBlockSize();
9810447Snilay@cs.wisc.edu
9910447Snilay@cs.wisc.edu    // we could deal with plenty options here, but for now do a quick
10010447Snilay@cs.wisc.edu    // sanity check
10110447Snilay@cs.wisc.edu    if (bytesPerCacheLine != 64 && bytesPerCacheLine != 32)
10210447Snilay@cs.wisc.edu        panic("Unexpected burst size %d", bytesPerCacheLine);
10310447Snilay@cs.wisc.edu
10410447Snilay@cs.wisc.edu    // determine the rows per bank by looking at the total capacity
10510447Snilay@cs.wisc.edu    uint64_t capacity = ULL(1) << ceilLog2(AbstractMemory::size());
10610447Snilay@cs.wisc.edu
10710447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
10810447Snilay@cs.wisc.edu            AbstractMemory::size());
10910447Snilay@cs.wisc.edu    rowsPerBank = capacity / (bytesPerCacheLine * linesPerRowBuffer *
11010447Snilay@cs.wisc.edu                              banksPerRank * ranksPerChannel);
11110447Snilay@cs.wisc.edu
11210447Snilay@cs.wisc.edu    if (range.interleaved()) {
11310447Snilay@cs.wisc.edu        if (channels != range.stripes())
11410447Snilay@cs.wisc.edu            panic("%s has %d interleaved address stripes but %d channel(s)\n",
11510447Snilay@cs.wisc.edu                  name(), range.stripes(), channels);
11610447Snilay@cs.wisc.edu
11710447Snilay@cs.wisc.edu        if (addrMapping == Enums::RaBaChCo) {
11810447Snilay@cs.wisc.edu            if (bytesPerCacheLine * linesPerRowBuffer !=
11910447Snilay@cs.wisc.edu                range.granularity()) {
12010447Snilay@cs.wisc.edu                panic("Interleaving of %s doesn't match RaBaChCo address map\n",
12110447Snilay@cs.wisc.edu                      name());
12210447Snilay@cs.wisc.edu            }
12310447Snilay@cs.wisc.edu        } else if (addrMapping == Enums::RaBaCoCh) {
12410447Snilay@cs.wisc.edu            if (bytesPerCacheLine != range.granularity()) {
12510447Snilay@cs.wisc.edu                panic("Interleaving of %s doesn't match RaBaCoCh address map\n",
12610447Snilay@cs.wisc.edu                      name());
12710447Snilay@cs.wisc.edu            }
12810447Snilay@cs.wisc.edu        } else if (addrMapping == Enums::CoRaBaCh) {
12910447Snilay@cs.wisc.edu            if (bytesPerCacheLine != range.granularity())
13010447Snilay@cs.wisc.edu                panic("Interleaving of %s doesn't match CoRaBaCh address map\n",
13110447Snilay@cs.wisc.edu                      name());
13210447Snilay@cs.wisc.edu        }
13310447Snilay@cs.wisc.edu    }
13410447Snilay@cs.wisc.edu}
13510447Snilay@cs.wisc.edu
13610447Snilay@cs.wisc.eduvoid
13710447Snilay@cs.wisc.eduSimpleDRAM::startup()
13810447Snilay@cs.wisc.edu{
13910447Snilay@cs.wisc.edu    // print the configuration of the controller
14010447Snilay@cs.wisc.edu    printParams();
14110447Snilay@cs.wisc.edu
14210447Snilay@cs.wisc.edu    // kick off the refresh
14310447Snilay@cs.wisc.edu    schedule(refreshEvent, curTick() + tREFI);
14410447Snilay@cs.wisc.edu}
14510447Snilay@cs.wisc.edu
14610447Snilay@cs.wisc.eduTick
14710447Snilay@cs.wisc.eduSimpleDRAM::recvAtomic(PacketPtr pkt)
14810447Snilay@cs.wisc.edu{
14910447Snilay@cs.wisc.edu    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
15010447Snilay@cs.wisc.edu
15110447Snilay@cs.wisc.edu    // do the actual memory access and turn the packet into a response
15210447Snilay@cs.wisc.edu    access(pkt);
15310447Snilay@cs.wisc.edu
15410447Snilay@cs.wisc.edu    Tick latency = 0;
15510447Snilay@cs.wisc.edu    if (!pkt->memInhibitAsserted() && pkt->hasData()) {
15610447Snilay@cs.wisc.edu        // this value is not supposed to be accurate, just enough to
15710447Snilay@cs.wisc.edu        // keep things going, mimic a closed page
15810447Snilay@cs.wisc.edu        latency = tRP + tRCD + tCL;
15910447Snilay@cs.wisc.edu    }
16010447Snilay@cs.wisc.edu    return latency;
16110447Snilay@cs.wisc.edu}
16210447Snilay@cs.wisc.edu
16310447Snilay@cs.wisc.edubool
16410447Snilay@cs.wisc.eduSimpleDRAM::readQueueFull() const
16510447Snilay@cs.wisc.edu{
16610447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Read queue limit %d current size %d\n",
16710447Snilay@cs.wisc.edu            readBufferSize, readQueue.size() + respQueue.size());
16810447Snilay@cs.wisc.edu
16910447Snilay@cs.wisc.edu    return (readQueue.size() + respQueue.size()) == readBufferSize;
17010447Snilay@cs.wisc.edu}
17110447Snilay@cs.wisc.edu
17210447Snilay@cs.wisc.edubool
17310447Snilay@cs.wisc.eduSimpleDRAM::writeQueueFull() const
17410447Snilay@cs.wisc.edu{
17510447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Write queue limit %d current size %d\n",
17610447Snilay@cs.wisc.edu            writeBufferSize, writeQueue.size());
17710447Snilay@cs.wisc.edu    return writeQueue.size() == writeBufferSize;
17810447Snilay@cs.wisc.edu}
17910447Snilay@cs.wisc.edu
18010447Snilay@cs.wisc.eduSimpleDRAM::DRAMPacket*
18110447Snilay@cs.wisc.eduSimpleDRAM::decodeAddr(PacketPtr pkt)
18210447Snilay@cs.wisc.edu{
18310447Snilay@cs.wisc.edu    // decode the address based on the address mapping scheme, with
18410447Snilay@cs.wisc.edu    // Ra, Co, Ba and Ch denoting rank, column, bank and channel,
18510447Snilay@cs.wisc.edu    // respectively
18610447Snilay@cs.wisc.edu    uint8_t rank;
18710447Snilay@cs.wisc.edu    uint16_t bank;
18810447Snilay@cs.wisc.edu    uint16_t row;
18910447Snilay@cs.wisc.edu
19010447Snilay@cs.wisc.edu    Addr addr = pkt->getAddr();
19110447Snilay@cs.wisc.edu
19210447Snilay@cs.wisc.edu    // truncate the address to the access granularity
19310447Snilay@cs.wisc.edu    addr = addr / bytesPerCacheLine;
19410447Snilay@cs.wisc.edu
19510447Snilay@cs.wisc.edu    // we have removed the lowest order address bits that denote the
19610447Snilay@cs.wisc.edu    // position within the cache line
19710447Snilay@cs.wisc.edu    if (addrMapping == Enums::RaBaChCo) {
19810447Snilay@cs.wisc.edu        // the lowest order bits denote the column to ensure that
19910447Snilay@cs.wisc.edu        // sequential cache lines occupy the same row
20010447Snilay@cs.wisc.edu        addr = addr / linesPerRowBuffer;
20110447Snilay@cs.wisc.edu
20210447Snilay@cs.wisc.edu        // take out the channel part of the address
20310447Snilay@cs.wisc.edu        addr = addr / channels;
20410447Snilay@cs.wisc.edu
20510447Snilay@cs.wisc.edu        // after the channel bits, get the bank bits to interleave
20610447Snilay@cs.wisc.edu        // over the banks
20710447Snilay@cs.wisc.edu        bank = addr % banksPerRank;
20810447Snilay@cs.wisc.edu        addr = addr / banksPerRank;
20910447Snilay@cs.wisc.edu
21010447Snilay@cs.wisc.edu        // after the bank, we get the rank bits which thus interleaves
21110447Snilay@cs.wisc.edu        // over the ranks
21210447Snilay@cs.wisc.edu        rank = addr % ranksPerChannel;
21310447Snilay@cs.wisc.edu        addr = addr / ranksPerChannel;
21410447Snilay@cs.wisc.edu
21510447Snilay@cs.wisc.edu        // lastly, get the row bits
21610447Snilay@cs.wisc.edu        row = addr % rowsPerBank;
21710447Snilay@cs.wisc.edu        addr = addr / rowsPerBank;
21810447Snilay@cs.wisc.edu    } else if (addrMapping == Enums::RaBaCoCh) {
21910447Snilay@cs.wisc.edu        // take out the channel part of the address
22010447Snilay@cs.wisc.edu        addr = addr / channels;
22110447Snilay@cs.wisc.edu
22210447Snilay@cs.wisc.edu        // next, the column
22310447Snilay@cs.wisc.edu        addr = addr / linesPerRowBuffer;
22410447Snilay@cs.wisc.edu
22510447Snilay@cs.wisc.edu        // after the column bits, we get the bank bits to interleave
22610447Snilay@cs.wisc.edu        // over the banks
22710447Snilay@cs.wisc.edu        bank = addr % banksPerRank;
22810447Snilay@cs.wisc.edu        addr = addr / banksPerRank;
22910447Snilay@cs.wisc.edu
23010447Snilay@cs.wisc.edu        // after the bank, we get the rank bits which thus interleaves
23110447Snilay@cs.wisc.edu        // over the ranks
23210447Snilay@cs.wisc.edu        rank = addr % ranksPerChannel;
23310447Snilay@cs.wisc.edu        addr = addr / ranksPerChannel;
23410447Snilay@cs.wisc.edu
23510447Snilay@cs.wisc.edu        // lastly, get the row bits
23610447Snilay@cs.wisc.edu        row = addr % rowsPerBank;
23710447Snilay@cs.wisc.edu        addr = addr / rowsPerBank;
23810447Snilay@cs.wisc.edu    } else if (addrMapping == Enums::CoRaBaCh) {
23910447Snilay@cs.wisc.edu        // optimise for closed page mode and utilise maximum
24010447Snilay@cs.wisc.edu        // parallelism of the DRAM (at the cost of power)
24110447Snilay@cs.wisc.edu
24210447Snilay@cs.wisc.edu        // take out the channel part of the address, not that this has
24310447Snilay@cs.wisc.edu        // to match with how accesses are interleaved between the
24410447Snilay@cs.wisc.edu        // controllers in the address mapping
24510447Snilay@cs.wisc.edu        addr = addr / channels;
24610447Snilay@cs.wisc.edu
24710447Snilay@cs.wisc.edu        // start with the bank bits, as this provides the maximum
24810447Snilay@cs.wisc.edu        // opportunity for parallelism between requests
24910447Snilay@cs.wisc.edu        bank = addr % banksPerRank;
25010447Snilay@cs.wisc.edu        addr = addr / banksPerRank;
25110447Snilay@cs.wisc.edu
25210447Snilay@cs.wisc.edu        // next get the rank bits
25310447Snilay@cs.wisc.edu        rank = addr % ranksPerChannel;
25410447Snilay@cs.wisc.edu        addr = addr / ranksPerChannel;
25510447Snilay@cs.wisc.edu
25610447Snilay@cs.wisc.edu        // next the column bits which we do not need to keep track of
25710447Snilay@cs.wisc.edu        // and simply skip past
25810447Snilay@cs.wisc.edu        addr = addr / linesPerRowBuffer;
25910447Snilay@cs.wisc.edu
26010447Snilay@cs.wisc.edu        // lastly, get the row bits
26110447Snilay@cs.wisc.edu        row = addr % rowsPerBank;
26210447Snilay@cs.wisc.edu        addr = addr / rowsPerBank;
26310447Snilay@cs.wisc.edu    } else
26410447Snilay@cs.wisc.edu        panic("Unknown address mapping policy chosen!");
26510447Snilay@cs.wisc.edu
26610447Snilay@cs.wisc.edu    assert(rank < ranksPerChannel);
26710447Snilay@cs.wisc.edu    assert(bank < banksPerRank);
26810447Snilay@cs.wisc.edu    assert(row < rowsPerBank);
26910447Snilay@cs.wisc.edu
27010447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Address: %lld Rank %d Bank %d Row %d\n",
27110447Snilay@cs.wisc.edu            pkt->getAddr(), rank, bank, row);
27210447Snilay@cs.wisc.edu
27310447Snilay@cs.wisc.edu    // create the corresponding DRAM packet with the entry time and
27410447Snilay@cs.wisc.edu    // ready time set to the current tick, the latter will be updated
27510447Snilay@cs.wisc.edu    // later
27610447Snilay@cs.wisc.edu    return new DRAMPacket(pkt, rank, bank, row, pkt->getAddr(),
27710447Snilay@cs.wisc.edu                          banks[rank][bank]);
27810447Snilay@cs.wisc.edu}
27910447Snilay@cs.wisc.edu
28010447Snilay@cs.wisc.eduvoid
28110447Snilay@cs.wisc.eduSimpleDRAM::addToReadQueue(PacketPtr pkt)
28210447Snilay@cs.wisc.edu{
28310447Snilay@cs.wisc.edu    // only add to the read queue here. whenever the request is
28410447Snilay@cs.wisc.edu    // eventually done, set the readyTime, and call schedule()
28510447Snilay@cs.wisc.edu    assert(!pkt->isWrite());
28610447Snilay@cs.wisc.edu
28710447Snilay@cs.wisc.edu    // First check write buffer to see if the data is already at
28810447Snilay@cs.wisc.edu    // the controller
28910447Snilay@cs.wisc.edu    list<DRAMPacket*>::const_iterator i;
29010447Snilay@cs.wisc.edu    Addr addr = pkt->getAddr();
29110447Snilay@cs.wisc.edu
29210447Snilay@cs.wisc.edu    // @todo: add size check
29310447Snilay@cs.wisc.edu    for (i = writeQueue.begin(); i != writeQueue.end(); ++i) {
29410447Snilay@cs.wisc.edu        if ((*i)->addr == addr){
29510447Snilay@cs.wisc.edu            servicedByWrQ++;
29610447Snilay@cs.wisc.edu            DPRINTF(DRAM, "Read to %lld serviced by write queue\n", addr);
29710447Snilay@cs.wisc.edu            bytesRead += bytesPerCacheLine;
29810447Snilay@cs.wisc.edu            bytesConsumedRd += pkt->getSize();
29910447Snilay@cs.wisc.edu            accessAndRespond(pkt, frontendLatency);
30010447Snilay@cs.wisc.edu            return;
30110447Snilay@cs.wisc.edu        }
30210447Snilay@cs.wisc.edu    }
30310447Snilay@cs.wisc.edu
30410447Snilay@cs.wisc.edu    DRAMPacket* dram_pkt = decodeAddr(pkt);
30510447Snilay@cs.wisc.edu
30610447Snilay@cs.wisc.edu    assert(readQueue.size() + respQueue.size() < readBufferSize);
30710447Snilay@cs.wisc.edu    rdQLenPdf[readQueue.size() + respQueue.size()]++;
30810447Snilay@cs.wisc.edu
30910447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Adding to read queue\n");
31010447Snilay@cs.wisc.edu
31110447Snilay@cs.wisc.edu    readQueue.push_back(dram_pkt);
31210447Snilay@cs.wisc.edu
31310447Snilay@cs.wisc.edu    // Update stats
31410447Snilay@cs.wisc.edu    uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
31510447Snilay@cs.wisc.edu    assert(bank_id < ranksPerChannel * banksPerRank);
31610447Snilay@cs.wisc.edu    perBankRdReqs[bank_id]++;
31710447Snilay@cs.wisc.edu
31810447Snilay@cs.wisc.edu    avgRdQLen = readQueue.size() + respQueue.size();
31910447Snilay@cs.wisc.edu
32010447Snilay@cs.wisc.edu    // If we are not already scheduled to get the read request out of
32110447Snilay@cs.wisc.edu    // the queue, do so now
32210447Snilay@cs.wisc.edu    if (!nextReqEvent.scheduled() && !stopReads) {
32310447Snilay@cs.wisc.edu        DPRINTF(DRAM, "Request scheduled immediately\n");
32410447Snilay@cs.wisc.edu        schedule(nextReqEvent, curTick());
32510447Snilay@cs.wisc.edu    }
32610447Snilay@cs.wisc.edu}
32710447Snilay@cs.wisc.edu
32810447Snilay@cs.wisc.eduvoid
32910447Snilay@cs.wisc.eduSimpleDRAM::processWriteEvent()
33010447Snilay@cs.wisc.edu{
33110447Snilay@cs.wisc.edu    assert(!writeQueue.empty());
33210447Snilay@cs.wisc.edu    uint32_t numWritesThisTime = 0;
33310447Snilay@cs.wisc.edu
33410447Snilay@cs.wisc.edu    DPRINTF(DRAMWR, "Beginning DRAM Writes\n");
33510447Snilay@cs.wisc.edu    Tick temp1 M5_VAR_USED = std::max(curTick(), busBusyUntil);
33610447Snilay@cs.wisc.edu    Tick temp2 M5_VAR_USED = std::max(curTick(), maxBankFreeAt());
33710447Snilay@cs.wisc.edu
33810447Snilay@cs.wisc.edu    // @todo: are there any dangers with the untimed while loop?
33910447Snilay@cs.wisc.edu    while (!writeQueue.empty()) {
34010447Snilay@cs.wisc.edu        if (numWritesThisTime > writeThreshold) {
34110447Snilay@cs.wisc.edu            DPRINTF(DRAMWR, "Hit write threshold %d\n", writeThreshold);
34210447Snilay@cs.wisc.edu            break;
34310447Snilay@cs.wisc.edu        }
34410447Snilay@cs.wisc.edu
34510447Snilay@cs.wisc.edu        chooseNextWrite();
34610447Snilay@cs.wisc.edu        DRAMPacket* dram_pkt = writeQueue.front();
34710447Snilay@cs.wisc.edu        // What's the earliest the request can be put on the bus
34810447Snilay@cs.wisc.edu        Tick schedTime = std::max(curTick(), busBusyUntil);
34910447Snilay@cs.wisc.edu
35010447Snilay@cs.wisc.edu        DPRINTF(DRAMWR, "Asking for latency estimate at %lld\n",
35110447Snilay@cs.wisc.edu                schedTime + tBURST);
35210447Snilay@cs.wisc.edu
35310447Snilay@cs.wisc.edu        pair<Tick, Tick> lat = estimateLatency(dram_pkt, schedTime + tBURST);
35410447Snilay@cs.wisc.edu        Tick accessLat = lat.second;
35510447Snilay@cs.wisc.edu
35610447Snilay@cs.wisc.edu        // look at the rowHitFlag set by estimateLatency
35710447Snilay@cs.wisc.edu        if (rowHitFlag)
35810447Snilay@cs.wisc.edu            writeRowHits++;
35910447Snilay@cs.wisc.edu
36010447Snilay@cs.wisc.edu        Bank& bank = dram_pkt->bank_ref;
36110447Snilay@cs.wisc.edu
36210447Snilay@cs.wisc.edu        if (pageMgmt == Enums::open) {
36310447Snilay@cs.wisc.edu            bank.openRow = dram_pkt->row;
36410447Snilay@cs.wisc.edu            bank.freeAt = schedTime + tBURST + std::max(accessLat, tCL);
36510447Snilay@cs.wisc.edu            busBusyUntil = bank.freeAt - tCL;
36610447Snilay@cs.wisc.edu
36710447Snilay@cs.wisc.edu            if (!rowHitFlag) {
36810447Snilay@cs.wisc.edu                bank.tRASDoneAt = bank.freeAt + tRP;
36910447Snilay@cs.wisc.edu                recordActivate(bank.freeAt - tCL - tRCD);
37010447Snilay@cs.wisc.edu                busBusyUntil = bank.freeAt - tCL - tRCD;
37110447Snilay@cs.wisc.edu            }
37210447Snilay@cs.wisc.edu        } else if (pageMgmt == Enums::close) {
37310447Snilay@cs.wisc.edu            bank.freeAt = schedTime + tBURST + accessLat + tRP + tRP;
37410447Snilay@cs.wisc.edu            // Work backwards from bank.freeAt to determine activate time
37510447Snilay@cs.wisc.edu            recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD);
37610447Snilay@cs.wisc.edu            busBusyUntil = bank.freeAt - tRP - tRP - tCL - tRCD;
37710447Snilay@cs.wisc.edu            DPRINTF(DRAMWR, "processWriteEvent::bank.freeAt for "
37810447Snilay@cs.wisc.edu                    "banks_id %d is %lld\n",
37910447Snilay@cs.wisc.edu                    dram_pkt->rank * banksPerRank + dram_pkt->bank,
38010447Snilay@cs.wisc.edu                    bank.freeAt);
38110447Snilay@cs.wisc.edu        } else
38210447Snilay@cs.wisc.edu            panic("Unknown page management policy chosen\n");
38310447Snilay@cs.wisc.edu
38410447Snilay@cs.wisc.edu        DPRINTF(DRAMWR, "Done writing to address %lld\n", dram_pkt->addr);
38510447Snilay@cs.wisc.edu
38610447Snilay@cs.wisc.edu        DPRINTF(DRAMWR, "schedtime is %lld, tBURST is %lld, "
38710447Snilay@cs.wisc.edu                "busbusyuntil is %lld\n",
38810447Snilay@cs.wisc.edu                schedTime, tBURST, busBusyUntil);
38910447Snilay@cs.wisc.edu
39010447Snilay@cs.wisc.edu        writeQueue.pop_front();
39110447Snilay@cs.wisc.edu        delete dram_pkt;
39210447Snilay@cs.wisc.edu
39310447Snilay@cs.wisc.edu        numWritesThisTime++;
39410447Snilay@cs.wisc.edu    }
39510447Snilay@cs.wisc.edu
39610447Snilay@cs.wisc.edu    DPRINTF(DRAMWR, "Completed %d writes, bus busy for %lld ticks,"\
39710447Snilay@cs.wisc.edu            "banks busy for %lld ticks\n", numWritesThisTime,
39810447Snilay@cs.wisc.edu            busBusyUntil - temp1, maxBankFreeAt() - temp2);
39910447Snilay@cs.wisc.edu
40010447Snilay@cs.wisc.edu    // Update stats
40110447Snilay@cs.wisc.edu    avgWrQLen = writeQueue.size();
40210447Snilay@cs.wisc.edu
40310447Snilay@cs.wisc.edu    // turn the bus back around for reads again
40410447Snilay@cs.wisc.edu    busBusyUntil += tWTR;
40510447Snilay@cs.wisc.edu    stopReads = false;
40610447Snilay@cs.wisc.edu
40710447Snilay@cs.wisc.edu    if (retryWrReq) {
40810447Snilay@cs.wisc.edu        retryWrReq = false;
40910447Snilay@cs.wisc.edu        port.sendRetry();
41010447Snilay@cs.wisc.edu    }
41110447Snilay@cs.wisc.edu
41210447Snilay@cs.wisc.edu    // if there is nothing left in any queue, signal a drain
41310447Snilay@cs.wisc.edu    if (writeQueue.empty() && readQueue.empty() &&
41410447Snilay@cs.wisc.edu        respQueue.empty () && drainManager) {
41510447Snilay@cs.wisc.edu        drainManager->signalDrainDone();
41610447Snilay@cs.wisc.edu        drainManager = NULL;
41710447Snilay@cs.wisc.edu    }
41810447Snilay@cs.wisc.edu
41910447Snilay@cs.wisc.edu    // Once you're done emptying the write queue, check if there's
42010447Snilay@cs.wisc.edu    // anything in the read queue, and call schedule if required. The
42110447Snilay@cs.wisc.edu    // retry above could already have caused it to be scheduled, so
42210447Snilay@cs.wisc.edu    // first check
42310447Snilay@cs.wisc.edu    if (!nextReqEvent.scheduled())
42410447Snilay@cs.wisc.edu        schedule(nextReqEvent, busBusyUntil);
42510447Snilay@cs.wisc.edu}
42610447Snilay@cs.wisc.edu
42710447Snilay@cs.wisc.eduvoid
42810447Snilay@cs.wisc.eduSimpleDRAM::triggerWrites()
42910447Snilay@cs.wisc.edu{
43010447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Writes triggered at %lld\n", curTick());
43110447Snilay@cs.wisc.edu    // Flag variable to stop any more read scheduling
43210447Snilay@cs.wisc.edu    stopReads = true;
43310447Snilay@cs.wisc.edu
43410447Snilay@cs.wisc.edu    writeStartTime = std::max(busBusyUntil, curTick()) + tWTR;
43510447Snilay@cs.wisc.edu
43610447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Writes scheduled at %lld\n", writeStartTime);
43710447Snilay@cs.wisc.edu
43810447Snilay@cs.wisc.edu    assert(writeStartTime >= curTick());
43910447Snilay@cs.wisc.edu    assert(!writeEvent.scheduled());
44010447Snilay@cs.wisc.edu    schedule(writeEvent, writeStartTime);
44110447Snilay@cs.wisc.edu}
44210447Snilay@cs.wisc.edu
44310447Snilay@cs.wisc.eduvoid
44410447Snilay@cs.wisc.eduSimpleDRAM::addToWriteQueue(PacketPtr pkt)
44510447Snilay@cs.wisc.edu{
44610447Snilay@cs.wisc.edu    // only add to the write queue here. whenever the request is
44710447Snilay@cs.wisc.edu    // eventually done, set the readyTime, and call schedule()
44810447Snilay@cs.wisc.edu    assert(pkt->isWrite());
44910447Snilay@cs.wisc.edu
45010447Snilay@cs.wisc.edu    DRAMPacket* dram_pkt = decodeAddr(pkt);
45110447Snilay@cs.wisc.edu
45210447Snilay@cs.wisc.edu    assert(writeQueue.size() < writeBufferSize);
45310447Snilay@cs.wisc.edu    wrQLenPdf[writeQueue.size()]++;
45410447Snilay@cs.wisc.edu
45510447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Adding to write queue\n");
45610447Snilay@cs.wisc.edu
45710447Snilay@cs.wisc.edu    writeQueue.push_back(dram_pkt);
45810447Snilay@cs.wisc.edu
45910447Snilay@cs.wisc.edu    // Update stats
46010447Snilay@cs.wisc.edu    uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
46110447Snilay@cs.wisc.edu    assert(bank_id < ranksPerChannel * banksPerRank);
46210447Snilay@cs.wisc.edu    perBankWrReqs[bank_id]++;
46310447Snilay@cs.wisc.edu
46410447Snilay@cs.wisc.edu    avgWrQLen = writeQueue.size();
46510447Snilay@cs.wisc.edu
46610447Snilay@cs.wisc.edu    // we do not wait for the writes to be send to the actual memory,
46710447Snilay@cs.wisc.edu    // but instead take responsibility for the consistency here and
46810447Snilay@cs.wisc.edu    // snoop the write queue for any upcoming reads
46910447Snilay@cs.wisc.edu
47010447Snilay@cs.wisc.edu    bytesConsumedWr += pkt->getSize();
47110447Snilay@cs.wisc.edu    bytesWritten += bytesPerCacheLine;
47210447Snilay@cs.wisc.edu    accessAndRespond(pkt, frontendLatency);
47310447Snilay@cs.wisc.edu
47410447Snilay@cs.wisc.edu    // If your write buffer is starting to fill up, drain it!
47510447Snilay@cs.wisc.edu    if (writeQueue.size() > writeThreshold && !stopReads){
47610447Snilay@cs.wisc.edu        triggerWrites();
47710447Snilay@cs.wisc.edu    }
47810447Snilay@cs.wisc.edu}
47910447Snilay@cs.wisc.edu
48010447Snilay@cs.wisc.eduvoid
48110447Snilay@cs.wisc.eduSimpleDRAM::printParams() const
48210447Snilay@cs.wisc.edu{
48310447Snilay@cs.wisc.edu    // Sanity check print of important parameters
48410447Snilay@cs.wisc.edu    DPRINTF(DRAM,
48510447Snilay@cs.wisc.edu            "Memory controller %s physical organization\n"      \
48610447Snilay@cs.wisc.edu            "Bytes per cacheline  %d\n"                         \
48710447Snilay@cs.wisc.edu            "Lines per row buffer %d\n"                         \
48810447Snilay@cs.wisc.edu            "Rows  per bank       %d\n"                         \
48910447Snilay@cs.wisc.edu            "Banks per rank       %d\n"                         \
49010447Snilay@cs.wisc.edu            "Ranks per channel    %d\n"                         \
49110447Snilay@cs.wisc.edu            "Total mem capacity   %u\n",
49210447Snilay@cs.wisc.edu            name(), bytesPerCacheLine, linesPerRowBuffer, rowsPerBank,
49310447Snilay@cs.wisc.edu            banksPerRank, ranksPerChannel, bytesPerCacheLine *
49410447Snilay@cs.wisc.edu            linesPerRowBuffer * rowsPerBank * banksPerRank * ranksPerChannel);
49510447Snilay@cs.wisc.edu
49610447Snilay@cs.wisc.edu    string scheduler =  memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS";
49710447Snilay@cs.wisc.edu    string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" :
49810447Snilay@cs.wisc.edu        (addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh");
49910447Snilay@cs.wisc.edu    string page_policy = pageMgmt == Enums::open ? "OPEN" : "CLOSE";
50010447Snilay@cs.wisc.edu
50110447Snilay@cs.wisc.edu    DPRINTF(DRAM,
50210447Snilay@cs.wisc.edu            "Memory controller %s characteristics\n"    \
50310447Snilay@cs.wisc.edu            "Read buffer size     %d\n"                 \
50410447Snilay@cs.wisc.edu            "Write buffer size    %d\n"                 \
50510447Snilay@cs.wisc.edu            "Write buffer thresh  %d\n"                 \
50610447Snilay@cs.wisc.edu            "Scheduler            %s\n"                 \
50710447Snilay@cs.wisc.edu            "Address mapping      %s\n"                 \
50810447Snilay@cs.wisc.edu            "Page policy          %s\n",
50910447Snilay@cs.wisc.edu            name(), readBufferSize, writeBufferSize, writeThreshold,
51010447Snilay@cs.wisc.edu            scheduler, address_mapping, page_policy);
51110447Snilay@cs.wisc.edu
51210447Snilay@cs.wisc.edu    DPRINTF(DRAM, "Memory controller %s timing specs\n" \
51310447Snilay@cs.wisc.edu            "tRCD      %d ticks\n"                        \
51410447Snilay@cs.wisc.edu            "tCL       %d ticks\n"                        \
51510447Snilay@cs.wisc.edu            "tRP       %d ticks\n"                        \
51610447Snilay@cs.wisc.edu            "tBURST    %d ticks\n"                        \
51710447Snilay@cs.wisc.edu            "tRFC      %d ticks\n"                        \
51810447Snilay@cs.wisc.edu            "tREFI     %d ticks\n"                        \
51910447Snilay@cs.wisc.edu            "tWTR      %d ticks\n"                        \
52010447Snilay@cs.wisc.edu            "tXAW (%d) %d ticks\n",
52110447Snilay@cs.wisc.edu            name(), tRCD, tCL, tRP, tBURST, tRFC, tREFI, tWTR,
52210447Snilay@cs.wisc.edu            activationLimit, tXAW);
52310447Snilay@cs.wisc.edu}
52410447Snilay@cs.wisc.edu
52510447Snilay@cs.wisc.eduvoid
52610447Snilay@cs.wisc.eduSimpleDRAM::printQs() const {
52710447Snilay@cs.wisc.edu
52810447Snilay@cs.wisc.edu    list<DRAMPacket*>::const_iterator i;
52910447Snilay@cs.wisc.edu
53010447Snilay@cs.wisc.edu    DPRINTF(DRAM, "===READ QUEUE===\n\n");
53110447Snilay@cs.wisc.edu    for (i = readQueue.begin() ;  i != readQueue.end() ; ++i) {
53210447Snilay@cs.wisc.edu        DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
53310447Snilay@cs.wisc.edu    }
53410447Snilay@cs.wisc.edu    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
53510447Snilay@cs.wisc.edu    for (i = respQueue.begin() ;  i != respQueue.end() ; ++i) {
53610447Snilay@cs.wisc.edu        DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
53710447Snilay@cs.wisc.edu    }
53810447Snilay@cs.wisc.edu    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
53910447Snilay@cs.wisc.edu    for (i = writeQueue.begin() ;  i != writeQueue.end() ; ++i) {
54010447Snilay@cs.wisc.edu        DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
54110447Snilay@cs.wisc.edu    }
54210447Snilay@cs.wisc.edu}
54310447Snilay@cs.wisc.edu
54410447Snilay@cs.wisc.edubool
54510447Snilay@cs.wisc.eduSimpleDRAM::recvTimingReq(PacketPtr pkt)
54610447Snilay@cs.wisc.edu{
54710447Snilay@cs.wisc.edu    /// @todo temporary hack to deal with memory corruption issues until
54810447Snilay@cs.wisc.edu    /// 4-phase transactions are complete
54910447Snilay@cs.wisc.edu    for (int x = 0; x < pendingDelete.size(); x++)
55010447Snilay@cs.wisc.edu        delete pendingDelete[x];
55110447Snilay@cs.wisc.edu    pendingDelete.clear();
55210447Snilay@cs.wisc.edu
55310447Snilay@cs.wisc.edu    // This is where we enter from the outside world
55410447Snilay@cs.wisc.edu    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
55510447Snilay@cs.wisc.edu            pkt->cmdString(),pkt->getAddr(), pkt->getSize());
55610447Snilay@cs.wisc.edu
55710447Snilay@cs.wisc.edu    // simply drop inhibited packets for now
55810447Snilay@cs.wisc.edu    if (pkt->memInhibitAsserted()) {
55910447Snilay@cs.wisc.edu        DPRINTF(DRAM,"Inhibited packet -- Dropping it now\n");
56010447Snilay@cs.wisc.edu        pendingDelete.push_back(pkt);
56110447Snilay@cs.wisc.edu        return true;
56210447Snilay@cs.wisc.edu    }
56310447Snilay@cs.wisc.edu
56410447Snilay@cs.wisc.edu   if (pkt->getSize() == bytesPerCacheLine)
56510447Snilay@cs.wisc.edu       cpuReqs++;
56610447Snilay@cs.wisc.edu
56710447Snilay@cs.wisc.edu   // Every million accesses, print the state of the queues
56810447Snilay@cs.wisc.edu   if (numReqs % 1000000 == 0)
56910447Snilay@cs.wisc.edu       printQs();
57010447Snilay@cs.wisc.edu
57110447Snilay@cs.wisc.edu    // Calc avg gap between requests
57210447Snilay@cs.wisc.edu    if (prevArrival != 0) {
57310447Snilay@cs.wisc.edu        totGap += curTick() - prevArrival;
57410447Snilay@cs.wisc.edu    }
57510447Snilay@cs.wisc.edu    prevArrival = curTick();
57610447Snilay@cs.wisc.edu
57710447Snilay@cs.wisc.edu    unsigned size = pkt->getSize();
57810447Snilay@cs.wisc.edu    if (size > bytesPerCacheLine)
57910447Snilay@cs.wisc.edu        panic("Request size %d is greater than burst size %d",
58010447Snilay@cs.wisc.edu              size, bytesPerCacheLine);
58110447Snilay@cs.wisc.edu
58210447Snilay@cs.wisc.edu    // check local buffers and do not accept if full
58310447Snilay@cs.wisc.edu    if (pkt->isRead()) {
58410447Snilay@cs.wisc.edu        assert(size != 0);
58510447Snilay@cs.wisc.edu        if (readQueueFull()) {
58610447Snilay@cs.wisc.edu            DPRINTF(DRAM, "Read queue full, not accepting\n");
58710447Snilay@cs.wisc.edu            // remember that we have to retry this port
58810447Snilay@cs.wisc.edu            retryRdReq = true;
58910447Snilay@cs.wisc.edu            numRdRetry++;
59010447Snilay@cs.wisc.edu            return false;
59110447Snilay@cs.wisc.edu        } else {
59210447Snilay@cs.wisc.edu            readPktSize[ceilLog2(size)]++;
59310447Snilay@cs.wisc.edu            addToReadQueue(pkt);
59410447Snilay@cs.wisc.edu            readReqs++;
59510447Snilay@cs.wisc.edu            numReqs++;
59610447Snilay@cs.wisc.edu        }
59710447Snilay@cs.wisc.edu    } else if (pkt->isWrite()) {
59810447Snilay@cs.wisc.edu        assert(size != 0);
59910447Snilay@cs.wisc.edu        if (writeQueueFull()) {
60010447Snilay@cs.wisc.edu            DPRINTF(DRAM, "Write queue full, not accepting\n");
60110447Snilay@cs.wisc.edu            // remember that we have to retry this port
60210447Snilay@cs.wisc.edu            retryWrReq = true;
60310447Snilay@cs.wisc.edu            numWrRetry++;
60410447Snilay@cs.wisc.edu            return false;
60510447Snilay@cs.wisc.edu        } else {
60610447Snilay@cs.wisc.edu            writePktSize[ceilLog2(size)]++;
60710447Snilay@cs.wisc.edu            addToWriteQueue(pkt);
60810447Snilay@cs.wisc.edu            writeReqs++;
60910447Snilay@cs.wisc.edu            numReqs++;
61010447Snilay@cs.wisc.edu        }
61110447Snilay@cs.wisc.edu    } else {
61210447Snilay@cs.wisc.edu        DPRINTF(DRAM,"Neither read nor write, ignore timing\n");
61310447Snilay@cs.wisc.edu        neitherReadNorWrite++;
61410447Snilay@cs.wisc.edu        accessAndRespond(pkt, 1);
61510447Snilay@cs.wisc.edu    }
61610447Snilay@cs.wisc.edu
61710447Snilay@cs.wisc.edu    retryRdReq = false;
61810447Snilay@cs.wisc.edu    retryWrReq = false;
61910447Snilay@cs.wisc.edu    return true;
62010447Snilay@cs.wisc.edu}
62110447Snilay@cs.wisc.edu
62210447Snilay@cs.wisc.eduvoid
62310447Snilay@cs.wisc.eduSimpleDRAM::processRespondEvent()
62410447Snilay@cs.wisc.edu{
62510447Snilay@cs.wisc.edu    DPRINTF(DRAM,
62610447Snilay@cs.wisc.edu            "processRespondEvent(): Some req has reached its readyTime\n");
62710447Snilay@cs.wisc.edu
62810447Snilay@cs.wisc.edu     PacketPtr pkt = respQueue.front()->pkt;
62910447Snilay@cs.wisc.edu
63010447Snilay@cs.wisc.edu     // Actually responds to the requestor
63110447Snilay@cs.wisc.edu     bytesConsumedRd += pkt->getSize();
63210447Snilay@cs.wisc.edu     bytesRead += bytesPerCacheLine;
63310447Snilay@cs.wisc.edu     accessAndRespond(pkt, frontendLatency + backendLatency);
63410447Snilay@cs.wisc.edu
63510447Snilay@cs.wisc.edu     delete respQueue.front();
63610447Snilay@cs.wisc.edu     respQueue.pop_front();
63710447Snilay@cs.wisc.edu
63810447Snilay@cs.wisc.edu     // Update stats
63910447Snilay@cs.wisc.edu     avgRdQLen = readQueue.size() + respQueue.size();
64010447Snilay@cs.wisc.edu
64110447Snilay@cs.wisc.edu     if (!respQueue.empty()) {
64210447Snilay@cs.wisc.edu         assert(respQueue.front()->readyTime >= curTick());
64310447Snilay@cs.wisc.edu         assert(!respondEvent.scheduled());
64410447Snilay@cs.wisc.edu         schedule(respondEvent, respQueue.front()->readyTime);
64510447Snilay@cs.wisc.edu     } else {
64610447Snilay@cs.wisc.edu         // if there is nothing left in any queue, signal a drain
64710447Snilay@cs.wisc.edu         if (writeQueue.empty() && readQueue.empty() &&
64810447Snilay@cs.wisc.edu             drainManager) {
64910447Snilay@cs.wisc.edu             drainManager->signalDrainDone();
65010447Snilay@cs.wisc.edu             drainManager = NULL;
65110447Snilay@cs.wisc.edu         }
65210447Snilay@cs.wisc.edu     }
65310447Snilay@cs.wisc.edu
65410447Snilay@cs.wisc.edu     // We have made a location in the queue available at this point,
65510447Snilay@cs.wisc.edu     // so if there is a read that was forced to wait, retry now
65610447Snilay@cs.wisc.edu     if (retryRdReq) {
65710447Snilay@cs.wisc.edu         retryRdReq = false;
65810447Snilay@cs.wisc.edu         port.sendRetry();
65910447Snilay@cs.wisc.edu     }
66010447Snilay@cs.wisc.edu}
66110447Snilay@cs.wisc.edu
66210447Snilay@cs.wisc.eduvoid
66310447Snilay@cs.wisc.eduSimpleDRAM::chooseNextWrite()
66410447Snilay@cs.wisc.edu{
66510447Snilay@cs.wisc.edu    // This method does the arbitration between write requests. The
66610447Snilay@cs.wisc.edu    // chosen packet is simply moved to the head of the write
66710447Snilay@cs.wisc.edu    // queue. The other methods know that this is the place to
66810447Snilay@cs.wisc.edu    // look. For example, with FCFS, this method does nothing
66910447Snilay@cs.wisc.edu    assert(!writeQueue.empty());
67010447Snilay@cs.wisc.edu
67110447Snilay@cs.wisc.edu    if (writeQueue.size() == 1) {
67210447Snilay@cs.wisc.edu        DPRINTF(DRAMWR, "Single write request, nothing to do\n");
67310447Snilay@cs.wisc.edu        return;
67410447Snilay@cs.wisc.edu    }
67510447Snilay@cs.wisc.edu
67610447Snilay@cs.wisc.edu    if (memSchedPolicy == Enums::fcfs) {
67710447Snilay@cs.wisc.edu        // Do nothing, since the correct request is already head
67810447Snilay@cs.wisc.edu    } else if (memSchedPolicy == Enums::frfcfs) {
67910447Snilay@cs.wisc.edu        list<DRAMPacket*>::iterator i = writeQueue.begin();
68010447Snilay@cs.wisc.edu        bool foundRowHit = false;
68110447Snilay@cs.wisc.edu        while (!foundRowHit && i != writeQueue.end()) {
68210447Snilay@cs.wisc.edu            DRAMPacket* dram_pkt = *i;
68310447Snilay@cs.wisc.edu            const Bank& bank = dram_pkt->bank_ref;
68410447Snilay@cs.wisc.edu            if (bank.openRow == dram_pkt->row) { //FR part
68510447Snilay@cs.wisc.edu                DPRINTF(DRAMWR, "Write row buffer hit\n");
68610447Snilay@cs.wisc.edu                writeQueue.erase(i);
68710447Snilay@cs.wisc.edu                writeQueue.push_front(dram_pkt);
68810447Snilay@cs.wisc.edu                foundRowHit = true;
68910447Snilay@cs.wisc.edu            } else { //FCFS part
69010447Snilay@cs.wisc.edu                ;
69110447Snilay@cs.wisc.edu            }
69210447Snilay@cs.wisc.edu            ++i;
69310447Snilay@cs.wisc.edu        }
69410447Snilay@cs.wisc.edu    } else
69510447Snilay@cs.wisc.edu        panic("No scheduling policy chosen\n");
69610447Snilay@cs.wisc.edu
69710447Snilay@cs.wisc.edu    DPRINTF(DRAMWR, "Selected next write request\n");
69810447Snilay@cs.wisc.edu}
699
700bool
701SimpleDRAM::chooseNextRead()
702{
703    // This method does the arbitration between read requests. The
704    // chosen packet is simply moved to the head of the queue. The
705    // other methods know that this is the place to look. For example,
706    // with FCFS, this method does nothing
707    if (readQueue.empty()) {
708        DPRINTF(DRAM, "No read request to select\n");
709        return false;
710    }
711
712    // If there is only one request then there is nothing left to do
713    if (readQueue.size() == 1)
714        return true;
715
716    if (memSchedPolicy == Enums::fcfs) {
717        // Do nothing, since the request to serve is already the first
718        // one in the read queue
719    } else if (memSchedPolicy == Enums::frfcfs) {
720        for (list<DRAMPacket*>::iterator i = readQueue.begin();
721             i != readQueue.end() ; ++i) {
722            DRAMPacket* dram_pkt = *i;
723            const Bank& bank = dram_pkt->bank_ref;
724            // Check if it is a row hit
725            if (bank.openRow == dram_pkt->row) { //FR part
726                DPRINTF(DRAM, "Row buffer hit\n");
727                readQueue.erase(i);
728                readQueue.push_front(dram_pkt);
729                break;
730            } else { //FCFS part
731                ;
732            }
733        }
734    } else
735        panic("No scheduling policy chosen!\n");
736
737    DPRINTF(DRAM, "Selected next read request\n");
738    return true;
739}
740
741void
742SimpleDRAM::accessAndRespond(PacketPtr pkt, Tick static_latency)
743{
744    DPRINTF(DRAM, "Responding to Address %lld.. ",pkt->getAddr());
745
746    bool needsResponse = pkt->needsResponse();
747    // do the actual memory access which also turns the packet into a
748    // response
749    access(pkt);
750
751    // turn packet around to go back to requester if response expected
752    if (needsResponse) {
753        // access already turned the packet into a response
754        assert(pkt->isResponse());
755
756        // @todo someone should pay for this
757        pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
758
759        // queue the packet in the response queue to be sent out after
760        // the static latency has passed
761        port.schedTimingResp(pkt, curTick() + static_latency);
762    } else {
763        // @todo the packet is going to be deleted, and the DRAMPacket
764        // is still having a pointer to it
765        pendingDelete.push_back(pkt);
766    }
767
768    DPRINTF(DRAM, "Done\n");
769
770    return;
771}
772
773pair<Tick, Tick>
774SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
775{
776    // If a request reaches a bank at tick 'inTime', how much time
777    // *after* that does it take to finish the request, depending
778    // on bank status and page open policy. Note that this method
779    // considers only the time taken for the actual read or write
780    // to complete, NOT any additional time thereafter for tRAS or
781    // tRP.
782    Tick accLat = 0;
783    Tick bankLat = 0;
784    rowHitFlag = false;
785
786    const Bank& bank = dram_pkt->bank_ref;
787    if (pageMgmt == Enums::open) { // open-page policy
788        if (bank.openRow == dram_pkt->row) {
789            // When we have a row-buffer hit,
790            // we don't care about tRAS having expired or not,
791            // but do care about bank being free for access
792            rowHitFlag = true;
793
794            if (bank.freeAt < inTime) {
795               // CAS latency only
796               accLat += tCL;
797               bankLat += tCL;
798            } else {
799                accLat += 0;
800                bankLat += 0;
801            }
802
803        } else {
804            // Row-buffer miss, need to close existing row
805            // once tRAS has expired, then open the new one,
806            // then add cas latency.
807            Tick freeTime = std::max(bank.tRASDoneAt, bank.freeAt);
808
809            if (freeTime > inTime)
810               accLat += freeTime - inTime;
811
812            accLat += tRP + tRCD + tCL;
813            bankLat += tRP + tRCD + tCL;
814        }
815    } else if (pageMgmt == Enums::close) {
816        // With a close page policy, no notion of
817        // bank.tRASDoneAt
818        if (bank.freeAt > inTime)
819            accLat += bank.freeAt - inTime;
820
821        // page already closed, simply open the row, and
822        // add cas latency
823        accLat += tRCD + tCL;
824        bankLat += tRCD + tCL;
825    } else
826        panic("No page management policy chosen\n");
827
828    DPRINTF(DRAM, "Returning < %lld, %lld > from estimateLatency()\n",
829            bankLat, accLat);
830
831    return make_pair(bankLat, accLat);
832}
833
834void
835SimpleDRAM::processNextReqEvent()
836{
837    scheduleNextReq();
838}
839
840void
841SimpleDRAM::recordActivate(Tick act_tick)
842{
843    assert(actTicks.size() == activationLimit);
844
845    DPRINTF(DRAM, "Activate at tick %d\n", act_tick);
846
847    // sanity check
848    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
849        panic("Got %d activates in window %d (%d - %d) which is smaller "
850              "than %d\n", activationLimit, act_tick - actTicks.back(),
851              act_tick, actTicks.back(), tXAW);
852    }
853
854    // shift the times used for the book keeping, the last element
855    // (highest index) is the oldest one and hence the lowest value
856    actTicks.pop_back();
857
858    // record an new activation (in the future)
859    actTicks.push_front(act_tick);
860
861    // cannot activate more than X times in time window tXAW, push the
862    // next one (the X + 1'st activate) to be tXAW away from the
863    // oldest in our window of X
864    if (actTicks.back() && (act_tick - actTicks.back()) < tXAW) {
865        DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate no earlier "
866                "than %d\n", activationLimit, actTicks.back() + tXAW);
867        for(int i = 0; i < ranksPerChannel; i++)
868            for(int j = 0; j < banksPerRank; j++)
869                // next activate must not happen before end of window
870                banks[i][j].freeAt = std::max(banks[i][j].freeAt,
871                                              actTicks.back() + tXAW);
872    }
873}
874
875void
876SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
877{
878
879    DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
880            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
881
882    // estimate the bank and access latency
883    pair<Tick, Tick> lat = estimateLatency(dram_pkt, curTick());
884    Tick bankLat = lat.first;
885    Tick accessLat = lat.second;
886
887    // This request was woken up at this time based on a prior call
888    // to estimateLatency(). However, between then and now, both the
889    // accessLatency and/or busBusyUntil may have changed. We need
890    // to correct for that.
891
892    Tick addDelay = (curTick() + accessLat < busBusyUntil) ?
893        busBusyUntil - (curTick() + accessLat) : 0;
894
895    Bank& bank = dram_pkt->bank_ref;
896
897    // Update bank state
898    if (pageMgmt == Enums::open) {
899        bank.openRow = dram_pkt->row;
900        bank.freeAt = curTick() + addDelay + accessLat;
901        // If you activated a new row do to this access, the next access
902        // will have to respect tRAS for this bank. Assume tRAS ~= 3 * tRP.
903        // Also need to account for t_XAW
904        if (!rowHitFlag) {
905            bank.tRASDoneAt = bank.freeAt + tRP;
906            recordActivate(bank.freeAt - tCL - tRCD); //since this is open page,
907                                                      //no tRP by default
908        }
909    } else if (pageMgmt == Enums::close) { // accounting for tRAS also
910        // assuming that tRAS ~= 3 * tRP, and tRC ~= 4 * tRP, as is common
911        // (refer Jacob/Ng/Wang and Micron datasheets)
912        bank.freeAt = curTick() + addDelay + accessLat + tRP + tRP;
913        recordActivate(bank.freeAt - tRP - tRP - tCL - tRCD); //essentially (freeAt - tRC)
914        DPRINTF(DRAM,"doDRAMAccess::bank.freeAt is %lld\n",bank.freeAt);
915    } else
916        panic("No page management policy chosen\n");
917
918    // Update request parameters
919    dram_pkt->readyTime = curTick() + addDelay + accessLat + tBURST;
920
921
922    DPRINTF(DRAM, "Req %lld: curtick is %lld accessLat is %d " \
923                  "readytime is %lld busbusyuntil is %lld. " \
924                  "Scheduling at readyTime\n", dram_pkt->addr,
925                   curTick(), accessLat, dram_pkt->readyTime, busBusyUntil);
926
927    // Make sure requests are not overlapping on the databus
928    assert (dram_pkt->readyTime - busBusyUntil >= tBURST);
929
930    // Update bus state
931    busBusyUntil = dram_pkt->readyTime;
932
933    DPRINTF(DRAM,"Access time is %lld\n",
934            dram_pkt->readyTime - dram_pkt->entryTime);
935
936    // Update stats
937    totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
938    totBankLat += bankLat;
939    totBusLat += tBURST;
940    totQLat += dram_pkt->readyTime - dram_pkt->entryTime - bankLat - tBURST;
941
942    if (rowHitFlag)
943        readRowHits++;
944
945    // At this point we're done dealing with the request
946    // It will be moved to a separate response queue with a
947    // correct readyTime, and eventually be sent back at that
948    //time
949    moveToRespQ();
950
951    // The absolute soonest you have to start thinking about the
952    // next request is the longest access time that can occur before
953    // busBusyUntil. Assuming you need to meet tRAS, then precharge,
954    // open a new row, and access, it is ~4*tRCD.
955
956
957    Tick newTime = (busBusyUntil > 4 * tRCD) ?
958                   std::max(busBusyUntil - 4 * tRCD, curTick()) :
959                   curTick();
960
961    if (!nextReqEvent.scheduled() && !stopReads){
962        schedule(nextReqEvent, newTime);
963    } else {
964        if (newTime < nextReqEvent.when())
965            reschedule(nextReqEvent, newTime);
966    }
967
968
969}
970
971void
972SimpleDRAM::moveToRespQ()
973{
974    // Remove from read queue
975    DRAMPacket* dram_pkt = readQueue.front();
976    readQueue.pop_front();
977
978    // Insert into response queue sorted by readyTime
979    // It will be sent back to the requestor at its
980    // readyTime
981    if (respQueue.empty()) {
982        respQueue.push_front(dram_pkt);
983        assert(!respondEvent.scheduled());
984        assert(dram_pkt->readyTime >= curTick());
985        schedule(respondEvent, dram_pkt->readyTime);
986    } else {
987        bool done = false;
988        list<DRAMPacket*>::iterator i = respQueue.begin();
989        while (!done && i != respQueue.end()) {
990            if ((*i)->readyTime > dram_pkt->readyTime) {
991                respQueue.insert(i, dram_pkt);
992                done = true;
993            }
994            ++i;
995        }
996
997        if (!done)
998            respQueue.push_back(dram_pkt);
999
1000        assert(respondEvent.scheduled());
1001
1002        if (respQueue.front()->readyTime < respondEvent.when()) {
1003            assert(respQueue.front()->readyTime >= curTick());
1004            reschedule(respondEvent, respQueue.front()->readyTime);
1005        }
1006    }
1007}
1008
1009void
1010SimpleDRAM::scheduleNextReq()
1011{
1012    DPRINTF(DRAM, "Reached scheduleNextReq()\n");
1013
1014    // Figure out which read request goes next, and move it to the
1015    // front of the read queue
1016    if (!chooseNextRead()) {
1017        // In the case there is no read request to go next, see if we
1018        // are asked to drain, and if so trigger writes, this also
1019        // ensures that if we hit the write limit we will do this
1020        // multiple times until we are completely drained
1021        if (drainManager && !writeQueue.empty() && !writeEvent.scheduled())
1022            triggerWrites();
1023    } else {
1024        doDRAMAccess(readQueue.front());
1025    }
1026}
1027
1028Tick
1029SimpleDRAM::maxBankFreeAt() const
1030{
1031    Tick banksFree = 0;
1032
1033    for(int i = 0; i < ranksPerChannel; i++)
1034        for(int j = 0; j < banksPerRank; j++)
1035            banksFree = std::max(banks[i][j].freeAt, banksFree);
1036
1037    return banksFree;
1038}
1039
1040void
1041SimpleDRAM::processRefreshEvent()
1042{
1043    DPRINTF(DRAM, "Refreshing at tick %ld\n", curTick());
1044
1045    Tick banksFree = std::max(curTick(), maxBankFreeAt()) + tRFC;
1046
1047    for(int i = 0; i < ranksPerChannel; i++)
1048        for(int j = 0; j < banksPerRank; j++)
1049            banks[i][j].freeAt = banksFree;
1050
1051    schedule(refreshEvent, curTick() + tREFI);
1052}
1053
1054void
1055SimpleDRAM::regStats()
1056{
1057    using namespace Stats;
1058
1059    AbstractMemory::regStats();
1060
1061    readReqs
1062        .name(name() + ".readReqs")
1063        .desc("Total number of read requests seen");
1064
1065    writeReqs
1066        .name(name() + ".writeReqs")
1067        .desc("Total number of write requests seen");
1068
1069    servicedByWrQ
1070        .name(name() + ".servicedByWrQ")
1071        .desc("Number of read reqs serviced by write Q");
1072
1073    cpuReqs
1074        .name(name() + ".cpureqs")
1075        .desc("Reqs generatd by CPU via cache - shady");
1076
1077    neitherReadNorWrite
1078        .name(name() + ".neitherReadNorWrite")
1079        .desc("Reqs where no action is needed");
1080
1081    perBankRdReqs
1082        .init(banksPerRank * ranksPerChannel)
1083        .name(name() + ".perBankRdReqs")
1084        .desc("Track reads on a per bank basis");
1085
1086    perBankWrReqs
1087        .init(banksPerRank * ranksPerChannel)
1088        .name(name() + ".perBankWrReqs")
1089        .desc("Track writes on a per bank basis");
1090
1091    avgRdQLen
1092        .name(name() + ".avgRdQLen")
1093        .desc("Average read queue length over time")
1094        .precision(2);
1095
1096    avgWrQLen
1097        .name(name() + ".avgWrQLen")
1098        .desc("Average write queue length over time")
1099        .precision(2);
1100
1101    totQLat
1102        .name(name() + ".totQLat")
1103        .desc("Total cycles spent in queuing delays");
1104
1105    totBankLat
1106        .name(name() + ".totBankLat")
1107        .desc("Total cycles spent in bank access");
1108
1109    totBusLat
1110        .name(name() + ".totBusLat")
1111        .desc("Total cycles spent in databus access");
1112
1113    totMemAccLat
1114        .name(name() + ".totMemAccLat")
1115        .desc("Sum of mem lat for all requests");
1116
1117    avgQLat
1118        .name(name() + ".avgQLat")
1119        .desc("Average queueing delay per request")
1120        .precision(2);
1121
1122    avgQLat = totQLat / (readReqs - servicedByWrQ);
1123
1124    avgBankLat
1125        .name(name() + ".avgBankLat")
1126        .desc("Average bank access latency per request")
1127        .precision(2);
1128
1129    avgBankLat = totBankLat / (readReqs - servicedByWrQ);
1130
1131    avgBusLat
1132        .name(name() + ".avgBusLat")
1133        .desc("Average bus latency per request")
1134        .precision(2);
1135
1136    avgBusLat = totBusLat / (readReqs - servicedByWrQ);
1137
1138    avgMemAccLat
1139        .name(name() + ".avgMemAccLat")
1140        .desc("Average memory access latency")
1141        .precision(2);
1142
1143    avgMemAccLat = totMemAccLat / (readReqs - servicedByWrQ);
1144
1145    numRdRetry
1146        .name(name() + ".numRdRetry")
1147        .desc("Number of times rd buffer was full causing retry");
1148
1149    numWrRetry
1150        .name(name() + ".numWrRetry")
1151        .desc("Number of times wr buffer was full causing retry");
1152
1153    readRowHits
1154        .name(name() + ".readRowHits")
1155        .desc("Number of row buffer hits during reads");
1156
1157    writeRowHits
1158        .name(name() + ".writeRowHits")
1159        .desc("Number of row buffer hits during writes");
1160
1161    readRowHitRate
1162        .name(name() + ".readRowHitRate")
1163        .desc("Row buffer hit rate for reads")
1164        .precision(2);
1165
1166    readRowHitRate = (readRowHits / (readReqs - servicedByWrQ)) * 100;
1167
1168    writeRowHitRate
1169        .name(name() + ".writeRowHitRate")
1170        .desc("Row buffer hit rate for writes")
1171        .precision(2);
1172
1173    writeRowHitRate = (writeRowHits / writeReqs) * 100;
1174
1175    readPktSize
1176        .init(ceilLog2(bytesPerCacheLine) + 1)
1177        .name(name() + ".readPktSize")
1178        .desc("Categorize read packet sizes");
1179
1180     writePktSize
1181        .init(ceilLog2(bytesPerCacheLine) + 1)
1182        .name(name() + ".writePktSize")
1183        .desc("Categorize write packet sizes");
1184
1185     rdQLenPdf
1186        .init(readBufferSize)
1187        .name(name() + ".rdQLenPdf")
1188        .desc("What read queue length does an incoming req see");
1189
1190     wrQLenPdf
1191        .init(writeBufferSize)
1192        .name(name() + ".wrQLenPdf")
1193        .desc("What write queue length does an incoming req see");
1194
1195
1196    bytesRead
1197        .name(name() + ".bytesRead")
1198        .desc("Total number of bytes read from memory");
1199
1200    bytesWritten
1201        .name(name() + ".bytesWritten")
1202        .desc("Total number of bytes written to memory");
1203
1204    bytesConsumedRd
1205        .name(name() + ".bytesConsumedRd")
1206        .desc("bytesRead derated as per pkt->getSize()");
1207
1208    bytesConsumedWr
1209        .name(name() + ".bytesConsumedWr")
1210        .desc("bytesWritten derated as per pkt->getSize()");
1211
1212    avgRdBW
1213        .name(name() + ".avgRdBW")
1214        .desc("Average achieved read bandwidth in MB/s")
1215        .precision(2);
1216
1217    avgRdBW = (bytesRead / 1000000) / simSeconds;
1218
1219    avgWrBW
1220        .name(name() + ".avgWrBW")
1221        .desc("Average achieved write bandwidth in MB/s")
1222        .precision(2);
1223
1224    avgWrBW = (bytesWritten / 1000000) / simSeconds;
1225
1226    avgConsumedRdBW
1227        .name(name() + ".avgConsumedRdBW")
1228        .desc("Average consumed read bandwidth in MB/s")
1229        .precision(2);
1230
1231    avgConsumedRdBW = (bytesConsumedRd / 1000000) / simSeconds;
1232
1233    avgConsumedWrBW
1234        .name(name() + ".avgConsumedWrBW")
1235        .desc("Average consumed write bandwidth in MB/s")
1236        .precision(2);
1237
1238    avgConsumedWrBW = (bytesConsumedWr / 1000000) / simSeconds;
1239
1240    peakBW
1241        .name(name() + ".peakBW")
1242        .desc("Theoretical peak bandwidth in MB/s")
1243        .precision(2);
1244
1245    peakBW = (SimClock::Frequency / tBURST) * bytesPerCacheLine / 1000000;
1246
1247    busUtil
1248        .name(name() + ".busUtil")
1249        .desc("Data bus utilization in percentage")
1250        .precision(2);
1251
1252    busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1253
1254    totGap
1255        .name(name() + ".totGap")
1256        .desc("Total gap between requests");
1257
1258    avgGap
1259        .name(name() + ".avgGap")
1260        .desc("Average gap between requests")
1261        .precision(2);
1262
1263    avgGap = totGap / (readReqs + writeReqs);
1264}
1265
1266void
1267SimpleDRAM::recvFunctional(PacketPtr pkt)
1268{
1269    // rely on the abstract memory
1270    functionalAccess(pkt);
1271}
1272
1273BaseSlavePort&
1274SimpleDRAM::getSlavePort(const string &if_name, PortID idx)
1275{
1276    if (if_name != "port") {
1277        return MemObject::getSlavePort(if_name, idx);
1278    } else {
1279        return port;
1280    }
1281}
1282
1283unsigned int
1284SimpleDRAM::drain(DrainManager *dm)
1285{
1286    unsigned int count = port.drain(dm);
1287
1288    // if there is anything in any of our internal queues, keep track
1289    // of that as well
1290    if (!(writeQueue.empty() && readQueue.empty() &&
1291          respQueue.empty())) {
1292        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
1293                " resp: %d\n", writeQueue.size(), readQueue.size(),
1294                respQueue.size());
1295        ++count;
1296        drainManager = dm;
1297        // the only part that is not drained automatically over time
1298        // is the write queue, thus trigger writes if there are any
1299        // waiting and no reads waiting, otherwise wait until the
1300        // reads are done
1301        if (readQueue.empty() && !writeQueue.empty() &&
1302            !writeEvent.scheduled())
1303            triggerWrites();
1304    }
1305
1306    if (count)
1307        setDrainState(Drainable::Draining);
1308    else
1309        setDrainState(Drainable::Drained);
1310    return count;
1311}
1312
1313SimpleDRAM::MemoryPort::MemoryPort(const std::string& name, SimpleDRAM& _memory)
1314    : QueuedSlavePort(name, &_memory, queue), queue(_memory, *this),
1315      memory(_memory)
1316{ }
1317
1318AddrRangeList
1319SimpleDRAM::MemoryPort::getAddrRanges() const
1320{
1321    AddrRangeList ranges;
1322    ranges.push_back(memory.getAddrRange());
1323    return ranges;
1324}
1325
1326void
1327SimpleDRAM::MemoryPort::recvFunctional(PacketPtr pkt)
1328{
1329    pkt->pushLabel(memory.name());
1330
1331    if (!queue.checkFunctional(pkt)) {
1332        // Default implementation of SimpleTimingPort::recvFunctional()
1333        // calls recvAtomic() and throws away the latency; we can save a
1334        // little here by just not calculating the latency.
1335        memory.recvFunctional(pkt);
1336    }
1337
1338    pkt->popLabel();
1339}
1340
1341Tick
1342SimpleDRAM::MemoryPort::recvAtomic(PacketPtr pkt)
1343{
1344    return memory.recvAtomic(pkt);
1345}
1346
1347bool
1348SimpleDRAM::MemoryPort::recvTimingReq(PacketPtr pkt)
1349{
1350    // pass it to the memory controller
1351    return memory.recvTimingReq(pkt);
1352}
1353
1354SimpleDRAM*
1355SimpleDRAMParams::create()
1356{
1357    return new SimpleDRAM(this);
1358}
1359