55a56
> #include "debug/QOS.hh"
62c63
< AbstractMemory(p),
---
> QoS::MemCtrl(p),
65,66d65
< busState(READ),
< busStateNext(READ),
109a109,110
> readQueue.resize(p->qos_priorities);
> writeQueue.resize(p->qos_priorities);
110a112
>
189c191
< AbstractMemory::init();
---
> MemCtrl::init();
286c288
< readBufferSize, readQueue.size() + respQueue.size(),
---
> readBufferSize, totalReadQueueSize + respQueue.size(),
289,290c291,292
< return
< (readQueue.size() + respQueue.size() + neededEntries) > readBufferSize;
---
> auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
> return rdsize_new > readBufferSize;
297,298c299,302
< writeBufferSize, writeQueue.size(), neededEntries);
< return (writeQueue.size() + neededEntries) > writeBufferSize;
---
> writeBufferSize, totalWriteQueueSize, neededEntries);
>
> auto wrsize_new = (totalWriteQueueSize + neededEntries);
> return wrsize_new > writeBufferSize;
429a434
> masterReadAccesses[pkt->masterId()]++;
438,448c443,459
< for (const auto& p : writeQueue) {
< // check if the read is subsumed in the write queue
< // packet we are looking at
< if (p->addr <= addr && (addr + size) <= (p->addr + p->size)) {
< foundInWrQ = true;
< servicedByWrQ++;
< pktsServicedByWrQ++;
< DPRINTF(DRAM, "Read to addr %lld with size %d serviced by "
< "write queue\n", addr, size);
< bytesReadWrQ += burstSize;
< break;
---
> for (const auto& vec : writeQueue) {
> for (const auto& p : vec) {
> // check if the read is subsumed in the write queue
> // packet we are looking at
> if (p->addr <= addr &&
> ((addr + size) <= (p->addr + p->size))) {
>
> foundInWrQ = true;
> servicedByWrQ++;
> pktsServicedByWrQ++;
> DPRINTF(DRAM,
> "Read to addr %lld with size %d serviced by "
> "write queue\n",
> addr, size);
> bytesReadWrQ += burstSize;
> break;
> }
468c479
< rdQLenPdf[readQueue.size() + respQueue.size()]++;
---
> rdQLenPdf[totalReadQueueSize + respQueue.size()]++;
472c483
< readQueue.push_back(dram_pkt);
---
> readQueue[dram_pkt->qosValue()].push_back(dram_pkt);
474d484
< // increment read entries of the rank
476a487,490
> // log packet
> logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(),
> dram_pkt->addr, 1);
>
478c492
< avgRdQLen = readQueue.size() + respQueue.size();
---
> avgRdQLen = totalReadQueueSize + respQueue.size();
517a532
> masterWriteAccesses[pkt->masterId()]++;
529,530c544,545
< assert(writeQueue.size() < writeBufferSize);
< wrQLenPdf[writeQueue.size()]++;
---
> assert(totalWriteQueueSize < writeBufferSize);
> wrQLenPdf[totalWriteQueueSize]++;
534c549
< writeQueue.push_back(dram_pkt);
---
> writeQueue[dram_pkt->qosValue()].push_back(dram_pkt);
536d550
< assert(writeQueue.size() == isInWriteQueue.size());
537a552,557
> // log packet
> logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(),
> dram_pkt->addr, 1);
>
> assert(totalWriteQueueSize == isInWriteQueue.size());
>
539c559
< avgWrQLen = writeQueue.size();
---
> avgWrQLen = totalWriteQueueSize;
571c591,593
< DRAMCtrl::printQs() const {
---
> DRAMCtrl::printQs() const
> {
> #if TRACING_ON
573,574c595,598
< for (auto i = readQueue.begin() ; i != readQueue.end() ; ++i) {
< DPRINTF(DRAM, "Read %lu\n", (*i)->addr);
---
> for (const auto& queue : readQueue) {
> for (const auto& packet : queue) {
> DPRINTF(DRAM, "Read %lu\n", packet->addr);
> }
575a600
>
577,578c602,603
< for (auto i = respQueue.begin() ; i != respQueue.end() ; ++i) {
< DPRINTF(DRAM, "Response %lu\n", (*i)->addr);
---
> for (const auto& packet : respQueue) {
> DPRINTF(DRAM, "Response %lu\n", packet->addr);
579a605
>
581,582c607,610
< for (auto i = writeQueue.begin() ; i != writeQueue.end() ; ++i) {
< DPRINTF(DRAM, "Write %lu\n", (*i)->addr);
---
> for (const auto& queue : writeQueue) {
> for (const auto& packet : queue) {
> DPRINTF(DRAM, "Write %lu\n", packet->addr);
> }
583a612
> #endif // TRACING_ON
613a643,645
> // run the QoS scheduler and assign a QoS priority value to the packet
> qosSchedule( { &readQueue, &writeQueue }, burstSize, pkt);
>
724c756
< writeQueue.empty() && readQueue.empty() && allRanksDrained()) {
---
> !totalWriteQueueSize && !totalReadQueueSize && allRanksDrained()) {
739,740c771,772
< bool
< DRAMCtrl::chooseNext(std::deque<DRAMPacket*>& queue, Tick extra_col_delay)
---
> DRAMCtrl::DRAMPacketQueue::iterator
> DRAMCtrl::chooseNext(DRAMPacketQueue& queue, Tick extra_col_delay)
742,746c774
< // This method does the arbitration between requests. The chosen
< // packet is simply moved to the head of the queue. The other
< // methods know that this is the place to look. For example, with
< // FCFS, this method does nothing
< assert(!queue.empty());
---
> // This method does the arbitration between requests.
748,760c776
< // bool to indicate if a packet to an available rank is found
< bool found_packet = false;
< if (queue.size() == 1) {
< DRAMPacket* dram_pkt = queue.front();
< // available rank corresponds to state refresh idle
< if (ranks[dram_pkt->rank]->inRefIdleState()) {
< found_packet = true;
< DPRINTF(DRAM, "Single request, going to a free rank\n");
< } else {
< DPRINTF(DRAM, "Single request, going to a busy rank\n");
< }
< return found_packet;
< }
---
> DRAMCtrl::DRAMPacketQueue::iterator ret = queue.end();
762,765c778,781
< if (memSchedPolicy == Enums::fcfs) {
< // check if there is a packet going to a free rank
< for (auto i = queue.begin(); i != queue.end() ; ++i) {
< DRAMPacket* dram_pkt = *i;
---
> if (!queue.empty()) {
> if (queue.size() == 1) {
> // available rank corresponds to state refresh idle
> DRAMPacket* dram_pkt = *(queue.begin());
767,770c783,786
< queue.erase(i);
< queue.push_front(dram_pkt);
< found_packet = true;
< break;
---
> ret = queue.begin();
> DPRINTF(DRAM, "Single request, going to a free rank\n");
> } else {
> DPRINTF(DRAM, "Single request, going to a busy rank\n");
771a788,800
> } else if (memSchedPolicy == Enums::fcfs) {
> // check if there is a packet going to a free rank
> for (auto i = queue.begin(); i != queue.end(); ++i) {
> DRAMPacket* dram_pkt = *i;
> if (ranks[dram_pkt->rank]->inRefIdleState()) {
> ret = i;
> break;
> }
> }
> } else if (memSchedPolicy == Enums::frfcfs) {
> ret = chooseNextFRFCFS(queue, extra_col_delay);
> } else {
> panic("No scheduling policy chosen\n");
773,777c802,803
< } else if (memSchedPolicy == Enums::frfcfs) {
< found_packet = reorderQueue(queue, extra_col_delay);
< } else
< panic("No scheduling policy chosen\n");
< return found_packet;
---
> }
> return ret;
780,781c806,807
< bool
< DRAMCtrl::reorderQueue(std::deque<DRAMPacket*>& queue, Tick extra_col_delay)
---
> DRAMCtrl::DRAMPacketQueue::iterator
> DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick extra_col_delay)
814,815c840,841
< const Tick col_allowed_at = dram_pkt->isRead ? bank.rdAllowedAt :
< bank.wrAllowedAt;
---
> const Tick col_allowed_at = dram_pkt->isRead() ? bank.rdAllowedAt :
> bank.wrAllowedAt;
816a843,845
> DPRINTF(DRAM, "%s checking packet in bank %d\n",
> __func__, dram_pkt->bankRef.bank);
>
819a849,853
>
> DPRINTF(DRAM,
> "%s bank %d - Rank %d available\n", __func__,
> dram_pkt->bankRef.bank, dram_pkt->rankRef.rank);
>
830c864
< DPRINTF(DRAM, "Seamless row buffer hit\n");
---
> DPRINTF(DRAM, "%s Seamless row buffer hit\n", __func__);
841c875
< DPRINTF(DRAM, "Prepped row buffer hit\n");
---
> DPRINTF(DRAM, "%s Prepped row buffer hit\n", __func__);
868a903,905
> } else {
> DPRINTF(DRAM, "%s bank %d - Rank %d not available\n", __func__,
> dram_pkt->bankRef.bank, dram_pkt->rankRef.rank);
872,876c909,910
< if (selected_pkt_it != queue.end()) {
< DRAMPacket* selected_pkt = *selected_pkt_it;
< queue.erase(selected_pkt_it);
< queue.push_front(selected_pkt);
< return true;
---
> if (selected_pkt_it == queue.end()) {
> DPRINTF(DRAM, "%s no available ranks found\n", __func__);
879c913
< return false;
---
> return selected_pkt_it;
1112c1146
< const Tick col_allowed_at = dram_pkt->isRead ?
---
> const Tick col_allowed_at = dram_pkt->isRead() ?
1139c1173
< dly_to_rd_cmd = dram_pkt->isRead ?
---
> dly_to_rd_cmd = dram_pkt->isRead() ?
1141c1175
< dly_to_wr_cmd = dram_pkt->isRead ?
---
> dly_to_wr_cmd = dram_pkt->isRead() ?
1146,1147c1180,1181
< dly_to_rd_cmd = dram_pkt->isRead ? tBURST : wrToRdDly;
< dly_to_wr_cmd = dram_pkt->isRead ? rdToWrDly : tBURST;
---
> dly_to_rd_cmd = dram_pkt->isRead() ? tBURST : wrToRdDly;
> dly_to_wr_cmd = dram_pkt->isRead() ? rdToWrDly : tBURST;
1170c1204
< dram_pkt->isRead ? cmd_at + tRTP :
---
> dram_pkt->isRead() ? cmd_at + tRTP :
1198,1203c1232,1233
< const deque<DRAMPacket*>& queue = dram_pkt->isRead ? readQueue :
< writeQueue;
< auto p = queue.begin();
< // make sure we are not considering the packet that we are
< // currently dealing with (which is the head of the queue)
< ++p;
---
> const std::vector<DRAMPacketQueue>& queue =
> dram_pkt->isRead() ? readQueue : writeQueue;
1205,1216c1235,1257
< // keep on looking until we find a hit or reach the end of the queue
< // 1) if a hit is found, then both open and close adaptive policies keep
< // the page open
< // 2) if no hit is found, got_bank_conflict is set to true if a bank
< // conflict request is waiting in the queue
< while (!got_more_hits && p != queue.end()) {
< bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
< (dram_pkt->bank == (*p)->bank);
< bool same_row = dram_pkt->row == (*p)->row;
< got_more_hits |= same_rank_bank && same_row;
< got_bank_conflict |= same_rank_bank && !same_row;
< ++p;
---
> for (uint8_t i = 0; i < numPriorities(); ++i) {
> auto p = queue[i].begin();
> // keep on looking until we find a hit or reach the end of the queue
> // 1) if a hit is found, then both open and close adaptive policies keep
> // the page open
> // 2) if no hit is found, got_bank_conflict is set to true if a bank
> // conflict request is waiting in the queue
> // 3) make sure we are not considering the packet that we are
> // currently dealing with
> while (!got_more_hits && p != queue[i].end()) {
> if (dram_pkt != (*p)) {
> bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
> (dram_pkt->bank == (*p)->bank);
>
> bool same_row = dram_pkt->row == (*p)->row;
> got_more_hits |= same_rank_bank && same_row;
> got_bank_conflict |= same_rank_bank && !same_row;
> }
> ++p;
> }
>
> if (got_more_hits)
> break;
1228c1269
< std::string mem_cmd = dram_pkt->isRead ? "RD" : "WR";
---
> std::string mem_cmd = dram_pkt->isRead() ? "RD" : "WR";
1263c1304
< if (dram_pkt->isRead) {
---
> if (dram_pkt->isRead()) {
1271a1313,1315
> masterReadTotalLat[dram_pkt->masterId()] +=
> dram_pkt->readyTime - dram_pkt->entryTime;
>
1273a1318
> masterReadBytes[dram_pkt->masterId()] += dram_pkt->size;
1279a1325,1327
> masterWriteBytes[dram_pkt->masterId()] += dram_pkt->size;
> masterWriteTotalLat[dram_pkt->masterId()] +=
> dram_pkt->readyTime - dram_pkt->entryTime;
1285a1334,1369
> // transition is handled by QoS algorithm if enabled
> if (turnPolicy) {
> // select bus state - only done if QoS algorithms are in use
> busStateNext = selectNextBusState();
> }
>
> // detect bus state change
> bool switched_cmd_type = (busState != busStateNext);
> // record stats
> recordTurnaroundStats();
>
> DPRINTF(DRAM, "QoS Turnarounds selected state %s %s\n",
> (busState==MemCtrl::READ)?"READ":"WRITE",
> switched_cmd_type?"[turnaround triggered]":"");
>
> if (switched_cmd_type) {
> if (busState == READ) {
> DPRINTF(DRAM,
> "Switching to writes after %d reads with %d reads "
> "waiting\n", readsThisTime, totalReadQueueSize);
> rdPerTurnAround.sample(readsThisTime);
> readsThisTime = 0;
> } else {
> DPRINTF(DRAM,
> "Switching to reads after %d writes with %d writes "
> "waiting\n", writesThisTime, totalWriteQueueSize);
> wrPerTurnAround.sample(writesThisTime);
> writesThisTime = 0;
> }
> }
>
> // updates current state
> busState = busStateNext;
>
> // check ranks for refresh/wakeup - uses busStateNext, so done after turnaround
> // decisions
1326,1353d1409
< // pre-emptively set to false. Overwrite if in transitioning to
< // a new state
< bool switched_cmd_type = false;
< if (busState != busStateNext) {
< if (busState == READ) {
< DPRINTF(DRAM, "Switching to writes after %d reads with %d reads "
< "waiting\n", readsThisTime, readQueue.size());
<
< // sample and reset the read-related stats as we are now
< // transitioning to writes, and all reads are done
< rdPerTurnAround.sample(readsThisTime);
< readsThisTime = 0;
<
< // now proceed to do the actual writes
< switched_cmd_type = true;
< } else {
< DPRINTF(DRAM, "Switching to reads after %d writes with %d writes "
< "waiting\n", writesThisTime, writeQueue.size());
<
< wrPerTurnAround.sample(writesThisTime);
< writesThisTime = 0;
<
< switched_cmd_type = true;
< }
< // update busState to match next state until next transition
< busState = busStateNext;
< }
<
1360c1416
< if (readQueue.empty()) {
---
> if (totalReadQueueSize == 0) {
1364c1420
< if (!writeQueue.empty() &&
---
> if (!(totalWriteQueueSize == 0) &&
1366c1422
< writeQueue.size() > writeLowThreshold)) {
---
> totalWriteQueueSize > writeLowThreshold)) {
1367a1424
> DPRINTF(DRAM, "Switching to writes due to read queue empty\n");
1386,1387d1442
< // bool to check if there is a read to a free rank
< bool found_read = false;
1389,1393c1444,1446
< // Figure out which read request goes next, and move it to the
< // front of the read queue
< // If we are changing command type, incorporate the minimum
< // bus turnaround delay which will be tCS (different rank) case
< found_read = chooseNext(readQueue, switched_cmd_type ? tCS : 0);
---
> bool read_found = false;
> DRAMPacketQueue::iterator to_read;
> uint8_t prio = numPriorities();
1394a1448,1468
> for (auto queue = readQueue.rbegin();
> queue != readQueue.rend(); ++queue) {
>
> prio--;
>
> DPRINTF(QOS,
> "DRAM controller checking READ queue [%d] priority [%d elements]\n",
> prio, queue->size());
>
> // Figure out which read request goes next
> // If we are changing command type, incorporate the minimum
> // bus turnaround delay which will be tCS (different rank) case
> to_read = chooseNext((*queue), switched_cmd_type ? tCS : 0);
>
> if (to_read != queue->end()) {
> // candidate read found
> read_found = true;
> break;
> }
> }
>
1400c1474,1475
< if (!found_read)
---
> if (!read_found) {
> DPRINTF(DRAM, "No Reads Found - exiting\n");
1401a1477
> }
1403c1479,1480
< DRAMPacket* dram_pkt = readQueue.front();
---
> auto dram_pkt = *to_read;
>
1408,1410d1484
< // At this point we're done dealing with the request
< readQueue.pop_front();
<
1413d1486
<
1417a1491,1496
> // log the response
> logResponse(MemCtrl::READ, (*to_read)->masterId(),
> dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
> dram_pkt->readyTime - dram_pkt->entryTime);
>
>
1419c1498
< // requestor at its readyTime
---
> // requester at its readyTime
1431c1510
< if (writeQueue.size() > writeHighThreshold) {
---
> if (totalWriteQueueSize > writeHighThreshold) {
1433a1513,1515
>
> // remove the request from the queue - the iterator is no longer valid .
> readQueue[dram_pkt->qosValue()].erase(to_read);
1444,1445d1525
< // bool to check if write to free rank is found
< bool found_write = false;
1447,1450c1527,1529
< // If we are changing command type, incorporate the minimum
< // bus turnaround delay
< found_write = chooseNext(writeQueue,
< switched_cmd_type ? std::min(tRTW, tCS) : 0);
---
> bool write_found = false;
> DRAMPacketQueue::iterator to_write;
> uint8_t prio = numPriorities();
1451a1531,1550
> for (auto queue = writeQueue.rbegin();
> queue != writeQueue.rend(); ++queue) {
>
> prio--;
>
> DPRINTF(QOS,
> "DRAM controller checking WRITE queue [%d] priority [%d elements]\n",
> prio, queue->size());
>
> // If we are changing command type, incorporate the minimum
> // bus turnaround delay
> to_write = chooseNext((*queue),
> switched_cmd_type ? std::min(tRTW, tCS) : 0);
>
> if (to_write != queue->end()) {
> write_found = true;
> break;
> }
> }
>
1457c1556,1557
< if (!found_write)
---
> if (!write_found) {
> DPRINTF(DRAM, "No Writes Found - exiting\n");
1458a1559
> }
1460c1561,1562
< DRAMPacket* dram_pkt = writeQueue.front();
---
> auto dram_pkt = *to_write;
>
1467,1468d1568
< writeQueue.pop_front();
<
1484c1584,1585
< dram_pkt-> readyTime) {
---
> dram_pkt->readyTime) {
>
1488a1590,1599
>
> // log the response
> logResponse(MemCtrl::WRITE, dram_pkt->masterId(),
> dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
> dram_pkt->readyTime - dram_pkt->entryTime);
>
>
> // remove the request from the queue - the iterator is no longer valid
> writeQueue[dram_pkt->qosValue()].erase(to_write);
>
1495,1498c1606,1612
< if (writeQueue.empty() ||
< (writeQueue.size() + minWritesPerSwitch < writeLowThreshold &&
< drainState() != DrainState::Draining) ||
< (!readQueue.empty() && writesThisTime >= minWritesPerSwitch)) {
---
> bool below_threshold =
> totalWriteQueueSize + minWritesPerSwitch < writeLowThreshold;
>
> if (totalWriteQueueSize == 0 ||
> (below_threshold && drainState() != DrainState::Draining) ||
> (totalReadQueueSize && writesThisTime >= minWritesPerSwitch)) {
>
1517c1631
< if (retryWrReq && writeQueue.size() < writeBufferSize) {
---
> if (retryWrReq && totalWriteQueueSize < writeBufferSize) {
1524c1638
< DRAMCtrl::minBankPrep(const deque<DRAMPacket*>& queue,
---
> DRAMCtrl::minBankPrep(const DRAMPacketQueue& queue,
2389c2503
< AbstractMemory::regStats();
---
> MemCtrl::regStats();
2532c2646
< .init(maxAccessesPerRow)
---
> .init(maxAccessesPerRow ? maxAccessesPerRow : rowBufferSize)
2642a2757,2838
>
> // per-master bytes read and written to memory
> masterReadBytes
> .init(_system->maxMasters())
> .name(name() + ".masterReadBytes")
> .desc("Per-master bytes read from memory")
> .flags(nozero | nonan);
>
> masterWriteBytes
> .init(_system->maxMasters())
> .name(name() + ".masterWriteBytes")
> .desc("Per-master bytes write to memory")
> .flags(nozero | nonan);
>
> // per-master bytes read and written to memory rate
> masterReadRate.name(name() + ".masterReadRate")
> .desc("Per-master bytes read from memory rate (Bytes/sec)")
> .flags(nozero | nonan)
> .precision(12);
>
> masterReadRate = masterReadBytes/simSeconds;
>
> masterWriteRate
> .name(name() + ".masterWriteRate")
> .desc("Per-master bytes write to memory rate (Bytes/sec)")
> .flags(nozero | nonan)
> .precision(12);
>
> masterWriteRate = masterWriteBytes/simSeconds;
>
> masterReadAccesses
> .init(_system->maxMasters())
> .name(name() + ".masterReadAccesses")
> .desc("Per-master read serviced memory accesses")
> .flags(nozero);
>
> masterWriteAccesses
> .init(_system->maxMasters())
> .name(name() + ".masterWriteAccesses")
> .desc("Per-master write serviced memory accesses")
> .flags(nozero);
>
>
> masterReadTotalLat
> .init(_system->maxMasters())
> .name(name() + ".masterReadTotalLat")
> .desc("Per-master read total memory access latency")
> .flags(nozero | nonan);
>
> masterReadAvgLat.name(name() + ".masterReadAvgLat")
> .desc("Per-master read average memory access latency")
> .flags(nonan)
> .precision(2);
>
> masterReadAvgLat = masterReadTotalLat/masterReadAccesses;
>
> masterWriteTotalLat
> .init(_system->maxMasters())
> .name(name() + ".masterWriteTotalLat")
> .desc("Per-master write total memory access latency")
> .flags(nozero | nonan);
>
> masterWriteAvgLat.name(name() + ".masterWriteAvgLat")
> .desc("Per-master write average memory access latency")
> .flags(nonan)
> .precision(2);
>
> masterWriteAvgLat = masterWriteTotalLat/masterWriteAccesses;
>
> for (int i = 0; i < _system->maxMasters(); i++) {
> const std::string master = _system->getMasterName(i);
> masterReadBytes.subname(i, master);
> masterReadRate.subname(i, master);
> masterWriteBytes.subname(i, master);
> masterWriteRate.subname(i, master);
> masterReadAccesses.subname(i, master);
> masterWriteAccesses.subname(i, master);
> masterReadTotalLat.subname(i, master);
> masterReadAvgLat.subname(i, master);
> masterWriteTotalLat.subname(i, master);
> masterWriteAvgLat.subname(i, master);
> }
2667c2863
< if (!(writeQueue.empty() && readQueue.empty() && respQueue.empty() &&
---
> if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
2671c2867
< " resp: %d\n", writeQueue.size(), readQueue.size(),
---
> " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
2676c2872
< if (!writeQueue.empty() && !nextReqEvent.scheduled()) {
---
> if (!totalWriteQueueSize && !nextReqEvent.scheduled()) {