mem_sink.cc revision 13892
12207SN/A/*
22207SN/A * Copyright (c) 2018 ARM Limited
32207SN/A * All rights reserved
42207SN/A *
52207SN/A * The license below extends only to copyright in the software and shall
62207SN/A * not be construed as granting a license to any other intellectual
72207SN/A * property including but not limited to intellectual property relating
82207SN/A * to a hardware implementation of the functionality of the software
92207SN/A * licensed hereunder.  You may use the software subject to the license
102207SN/A * terms below provided that you ensure that this notice is replicated
112207SN/A * unmodified and in its entirety in all distributions of the software,
122207SN/A * modified or unmodified, in source code or in binary form.
132207SN/A *
142207SN/A * Redistribution and use in source and binary forms, with or without
152207SN/A * modification, are permitted provided that the following conditions are
162207SN/A * met: redistributions of source code must retain the above copyright
172207SN/A * notice, this list of conditions and the following disclaimer;
182207SN/A * redistributions in binary form must reproduce the above copyright
192207SN/A * notice, this list of conditions and the following disclaimer in the
202207SN/A * documentation and/or other materials provided with the distribution;
212207SN/A * neither the name of the copyright holders nor the names of its
222207SN/A * contributors may be used to endorse or promote products derived from
232207SN/A * this software without specific prior written permission.
242207SN/A *
252207SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
262207SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
272665Ssaidi@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
282665Ssaidi@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
292665Ssaidi@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
302207SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
312207SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
322972Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
332207SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
342454SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
352454SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
362680Sktlim@umich.edu *
372474SN/A * Author: Matteo Andreozzi
382207SN/A */
392474SN/A
402474SN/A#include "debug/Drain.hh"
412474SN/A#include "debug/QOS.hh"
425569Snate@binkert.org#include "mem_sink.hh"
435569Snate@binkert.org#include "sim/system.hh"
445154Sgblack@eecs.umich.edu
452474SN/Anamespace QoS {
462474SN/A
472474SN/AMemSinkCtrl::MemSinkCtrl(const QoSMemSinkCtrlParams* p)
482474SN/A  : MemCtrl(p), requestLatency(p->request_latency),
492474SN/A    responseLatency(p->response_latency),
502474SN/A    memoryPacketSize(p->memory_packet_size),
512474SN/A    readBufferSize(p->read_buffer_size),
522474SN/A    writeBufferSize(p->write_buffer_size), port(name() + ".port", *this),
532474SN/A    retryRdReq(false), retryWrReq(false), nextRequest(0), nextReqEvent(this)
542474SN/A{
552474SN/A    // Resize read and write queue to allocate space
562474SN/A    // for configured QoS priorities
572474SN/A    readQueue.resize(numPriorities());
582474SN/A    writeQueue.resize(numPriorities());
592474SN/A}
602474SN/A
612474SN/AMemSinkCtrl::~MemSinkCtrl()
622474SN/A{}
632474SN/A
642474SN/Avoid
655183Ssaidi@eecs.umich.eduMemSinkCtrl::init()
665183Ssaidi@eecs.umich.edu{
675183Ssaidi@eecs.umich.edu    MemCtrl::init();
682474SN/A
692474SN/A    // Allow unconnected memories as this is used in several ruby
702680Sktlim@umich.edu    // systems at the moment
714997Sgblack@eecs.umich.edu    if (port.isConnected()) {
724997Sgblack@eecs.umich.edu        port.sendRangeChange();
734997Sgblack@eecs.umich.edu    }
744997Sgblack@eecs.umich.edu}
754997Sgblack@eecs.umich.edu
764997Sgblack@eecs.umich.edubool
772474SN/AMemSinkCtrl::readQueueFull(const uint64_t packets) const
782474SN/A{
79    return (totalReadQueueSize + packets > readBufferSize);
80}
81
82bool
83MemSinkCtrl::writeQueueFull(const uint64_t packets) const
84{
85    return (totalWriteQueueSize + packets > writeBufferSize);
86}
87
88Tick
89MemSinkCtrl::recvAtomic(PacketPtr pkt)
90{
91    panic_if(pkt->cacheResponding(),
92             "%s Should not see packets where cache is responding\n",
93             __func__);
94
95    access(pkt);
96    return responseLatency;
97}
98
99void
100MemSinkCtrl::recvFunctional(PacketPtr pkt)
101{
102    pkt->pushLabel(name());
103
104    functionalAccess(pkt);
105
106    pkt->popLabel();
107}
108
109Port &
110MemSinkCtrl::getPort(const std::string &interface, PortID idx)
111{
112    if (interface != "port") {
113        return MemCtrl::getPort(interface, idx);
114    } else {
115        return port;
116    }
117}
118
119bool
120MemSinkCtrl::recvTimingReq(PacketPtr pkt)
121{
122    // Request accepted
123    bool req_accepted = true;
124
125    panic_if(!(pkt->isRead() || pkt->isWrite()),
126             "%s. Should only see "
127             "read and writes at memory controller\n",
128             __func__);
129
130    panic_if(pkt->cacheResponding(),
131             "%s. Should not see packets where cache is responding\n",
132             __func__);
133
134    DPRINTF(QOS,
135            "%s: MASTER %s request %s addr %lld size %d\n",
136            __func__,
137            _system->getMasterName(pkt->req->masterId()),
138            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
139
140    uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize);
141
142    assert(required_entries);
143
144    // Schedule packet
145    uint8_t pkt_priority = qosSchedule({&readQueue, &writeQueue},
146                                       memoryPacketSize, pkt);
147
148    if (pkt->isRead()) {
149        if (readQueueFull(required_entries)) {
150            DPRINTF(QOS,
151                    "%s Read queue full, not accepting\n", __func__);
152            // Remember that we have to retry this port
153            retryRdReq = true;
154            numReadRetries++;
155            req_accepted = false;
156        } else {
157            // Enqueue the incoming packet into corresponding
158            // QoS priority queue
159            readQueue.at(pkt_priority).push_back(pkt);
160            queuePolicy->enqueuePacket(pkt);
161        }
162    } else {
163        if (writeQueueFull(required_entries)) {
164            DPRINTF(QOS,
165                    "%s Write queue full, not accepting\n", __func__);
166            // Remember that we have to retry this port
167            retryWrReq = true;
168            numWriteRetries++;
169            req_accepted = false;
170        } else {
171            // Enqueue the incoming packet into corresponding QoS
172            // priority queue
173            writeQueue.at(pkt_priority).push_back(pkt);
174            queuePolicy->enqueuePacket(pkt);
175        }
176    }
177
178    if (req_accepted) {
179        // The packet is accepted - log it
180        logRequest(pkt->isRead()? READ : WRITE,
181                   pkt->req->masterId(),
182                   pkt->qosValue(),
183                   pkt->getAddr(),
184                   required_entries);
185    }
186
187    // Check if we have to process next request event
188    if (!nextReqEvent.scheduled()) {
189        DPRINTF(QOS,
190                "%s scheduling next request at "
191                "time %d (next is %d)\n", __func__,
192                std::max(curTick(), nextRequest), nextRequest);
193        schedule(nextReqEvent, std::max(curTick(), nextRequest));
194    }
195    return req_accepted;
196}
197
198void
199MemSinkCtrl::processNextReqEvent()
200{
201    PacketPtr pkt = nullptr;
202
203    // Evaluate bus direction
204    busStateNext = selectNextBusState();
205
206    // Record turnaround stats and update current state direction
207    recordTurnaroundStats();
208
209    // Set current bus state
210    setCurrentBusState();
211
212    // Access current direction buffer
213    std::vector<PacketQueue>* queue_ptr = (busState == READ ? &readQueue :
214                                                              &writeQueue);
215
216    DPRINTF(QOS,
217            "%s DUMPING %s queues status\n", __func__,
218            (busState == WRITE ? "WRITE" : "READ"));
219
220    if (DTRACE(QOS)) {
221        for (uint8_t i = 0; i < numPriorities(); ++i) {
222            std::string plist = "";
223            for (auto& e : (busState == WRITE ? writeQueue[i]: readQueue[i])) {
224                plist += (std::to_string(e->req->masterId())) + " ";
225            }
226            DPRINTF(QOS,
227                    "%s priority Queue [%i] contains %i elements, "
228                    "packets are: [%s]\n", __func__, i,
229                    busState == WRITE ? writeQueueSizes[i] :
230                                        readQueueSizes[i],
231                    plist);
232        }
233    }
234
235    uint8_t curr_prio = numPriorities();
236
237    for (auto queue = (*queue_ptr).rbegin();
238         queue != (*queue_ptr).rend(); ++queue) {
239
240        curr_prio--;
241
242        DPRINTF(QOS,
243                "%s checking %s queue [%d] priority [%d packets]\n",
244                __func__, (busState == READ? "READ" : "WRITE"),
245                curr_prio, queue->size());
246
247        if (!queue->empty()) {
248            // Call the queue policy to select packet from priority queue
249            auto p_it = queuePolicy->selectPacket(&(*queue));
250            pkt = *p_it;
251            queue->erase(p_it);
252
253            DPRINTF(QOS,
254                    "%s scheduling packet address %d for master %s from "
255                    "priority queue %d\n", __func__, pkt->getAddr(),
256                    _system->getMasterName(pkt->req->masterId()),
257                    curr_prio);
258            break;
259        }
260    }
261
262    assert(pkt);
263
264    // Setup next request service time - do it here as retry request
265    // hands over control to the port
266    nextRequest = curTick() + requestLatency;
267
268    uint64_t removed_entries = divCeil(pkt->getSize(), memoryPacketSize);
269
270    DPRINTF(QOS,
271            "%s scheduled packet address %d for master %s size is %d, "
272            "corresponds to %d memory packets\n", __func__, pkt->getAddr(),
273            _system->getMasterName(pkt->req->masterId()),
274            pkt->getSize(), removed_entries);
275
276    // Schedule response
277    panic_if(!pkt->needsResponse(),
278        "%s response not required\n", __func__);
279
280    // Do the actual memory access which also turns the packet
281    // into a response
282    access(pkt);
283
284    // Log the response
285    logResponse(pkt->isRead()? READ : WRITE,
286                pkt->req->masterId(),
287                pkt->qosValue(),
288                pkt->getAddr(),
289                removed_entries, responseLatency);
290
291    // Schedule the response
292    port.schedTimingResp(pkt, curTick() + responseLatency);
293    DPRINTF(QOS,
294            "%s response scheduled at time %d\n",
295            __func__, curTick() + responseLatency);
296
297    // Finally - handle retry requests - this handles control
298    // to the port, so do it last
299    if (busState == READ && retryRdReq) {
300        retryRdReq = false;
301        port.sendRetryReq();
302    } else if (busState == WRITE && retryWrReq) {
303        retryWrReq = false;
304        port.sendRetryReq();
305    }
306
307    // Check if we have to schedule another request event
308    if ((totalReadQueueSize || totalWriteQueueSize) &&
309        !nextReqEvent.scheduled()) {
310
311        schedule(nextReqEvent, curTick() + requestLatency);
312        DPRINTF(QOS,
313                "%s scheduling next request event at tick %d\n",
314                __func__, curTick() + requestLatency);
315    }
316}
317
318DrainState
319MemSinkCtrl::drain()
320{
321    if (totalReadQueueSize || totalWriteQueueSize) {
322        DPRINTF(Drain,
323                "%s queues have requests, waiting to drain\n",
324                __func__);
325        return DrainState::Draining;
326    } else {
327        return DrainState::Drained;
328    }
329}
330
331void
332MemSinkCtrl::regStats()
333{
334    MemCtrl::regStats();
335
336    // Initialize all the stats
337    using namespace Stats;
338
339    numReadRetries.name(name() + ".numReadRetries")
340        .desc("Number of read retries");
341    numWriteRetries.name(name() + ".numWriteRetries")
342        .desc("Number of write retries");
343}
344
345MemSinkCtrl::MemoryPort::MemoryPort(const std::string& n,
346                                    MemSinkCtrl& m)
347  : QueuedSlavePort(n, &m, queue, true), memory(m), queue(memory, *this, true)
348{}
349
350AddrRangeList
351MemSinkCtrl::MemoryPort::getAddrRanges() const
352{
353    AddrRangeList ranges;
354    ranges.push_back(memory.getAddrRange());
355    return ranges;
356}
357
358Tick
359MemSinkCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
360{
361    return memory.recvAtomic(pkt);
362}
363
364void
365MemSinkCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
366{
367    pkt->pushLabel(memory.name());
368
369    if (!queue.trySatisfyFunctional(pkt)) {
370        // Default implementation of SimpleTimingPort::recvFunctional()
371        // calls recvAtomic() and throws away the latency; we can save a
372        // little here by just not calculating the latency.
373        memory.recvFunctional(pkt);
374    }
375
376    pkt->popLabel();
377}
378
379bool
380MemSinkCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
381{
382    return memory.recvTimingReq(pkt);
383}
384
385} // namespace QoS
386
387QoS::MemSinkCtrl*
388QoSMemSinkCtrlParams::create()
389{
390    return new QoS::MemSinkCtrl(this);
391}
392
393