DMASequencer.cc revision 10910:32f3d1c454ec
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
31#include "debug/Config.hh"
32#include "debug/Drain.hh"
33#include "debug/RubyDma.hh"
34#include "debug/RubyStats.hh"
35#include "mem/protocol/SequencerMsg.hh"
36#include "mem/ruby/system/DMASequencer.hh"
37#include "mem/ruby/system/System.hh"
38#include "sim/system.hh"
39
40DMASequencer::DMASequencer(const Params *p)
41    : MemObject(p), m_version(p->version), m_controller(NULL),
42      m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
43      slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system,
44                 p->ruby_system->getAccessBackingStore()),
45      drainManager(NULL), system(p->system), retry(false)
46{
47    assert(m_version != -1);
48}
49
50void
51DMASequencer::init()
52{
53    MemObject::init();
54    assert(m_controller != NULL);
55    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
56    m_mandatory_q_ptr->setSender(this);
57    m_is_busy = false;
58    m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
59
60    slave_port.sendRangeChange();
61}
62
63BaseSlavePort &
64DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
65{
66    // used by the CPUs to connect the caches to the interconnect, and
67    // for the x86 case also the interrupt master
68    if (if_name != "slave") {
69        // pass it along to our super class
70        return MemObject::getSlavePort(if_name, idx);
71    } else {
72        return slave_port;
73    }
74}
75
76DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
77    DMASequencer *_port, PortID id, RubySystem* _ruby_system,
78    bool _access_backing_store)
79    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
80      ruby_system(_ruby_system), access_backing_store(_access_backing_store)
81{
82    DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
83}
84
85bool
86DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
87{
88    DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
89            pkt->getAddr(), id);
90    DMASequencer *seq = static_cast<DMASequencer *>(&owner);
91
92    if (pkt->memInhibitAsserted())
93        panic("DMASequencer should never see an inhibited request\n");
94
95    assert(isPhysMemAddress(pkt->getAddr()));
96    assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
97           RubySystem::getBlockSizeBytes());
98
99    // Submit the ruby request
100    RequestStatus requestStatus = seq->makeRequest(pkt);
101
102    // If the request successfully issued then we should return true.
103    // Otherwise, we need to tell the port to retry at a later point
104    // and return false.
105    if (requestStatus == RequestStatus_Issued) {
106        DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
107                pkt->getAddr());
108        return true;
109    }
110
111    // Unless one is using the ruby tester, record the stalled M5 port for
112    // later retry when the sequencer becomes free.
113    if (!seq->m_usingRubyTester) {
114        seq->retry = true;
115    }
116
117    DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
118            pkt->getAddr(), RequestStatus_to_string(requestStatus));
119
120    return false;
121}
122
123void
124DMASequencer::ruby_hit_callback(PacketPtr pkt)
125{
126    DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
127            pkt->getAddr());
128
129    // The packet was destined for memory and has not yet been turned
130    // into a response
131    assert(system->isMemAddr(pkt->getAddr()));
132    assert(pkt->isRequest());
133    slave_port.hitCallback(pkt);
134
135    // If we had to stall the slave ports, wake it up because
136    // the sequencer likely has free resources now.
137    if (retry) {
138        retry = false;
139        DPRINTF(RubyDma,"Sequencer may now be free.  SendRetry to port %s\n",
140                slave_port.name());
141        slave_port.sendRetryReq();
142    }
143
144    testDrainComplete();
145}
146
147void
148DMASequencer::testDrainComplete()
149{
150    //If we weren't able to drain before, we might be able to now.
151    if (drainManager != NULL) {
152        unsigned int drainCount = outstandingCount();
153        DPRINTF(Drain, "Drain count: %u\n", drainCount);
154        if (drainCount == 0) {
155            DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
156            drainManager->signalDrainDone();
157            // Clear the drain manager once we're done with it.
158            drainManager = NULL;
159        }
160    }
161}
162
163unsigned int
164DMASequencer::getChildDrainCount(DrainManager *dm)
165{
166    int count = 0;
167    count += slave_port.drain(dm);
168    DPRINTF(Config, "count after slave port check %d\n", count);
169    return count;
170}
171
172unsigned int
173DMASequencer::drain(DrainManager *dm)
174{
175    if (isDeadlockEventScheduled()) {
176        descheduleDeadlockEvent();
177    }
178
179    // If the DMASequencer is not empty, then it needs to clear all outstanding
180    // requests before it should call drainManager->signalDrainDone()
181    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
182    bool need_drain = outstandingCount() > 0;
183
184    //
185    // Also, get the number of child ports that will also need to clear
186    // their buffered requests before they call drainManager->signalDrainDone()
187    //
188    unsigned int child_drain_count = getChildDrainCount(dm);
189
190    // Set status
191    if (need_drain) {
192        drainManager = dm;
193
194        DPRINTF(Drain, "DMASequencer not drained\n");
195        setDrainState(DrainState::Draining);
196        return child_drain_count + 1;
197    }
198
199    drainManager = NULL;
200    setDrainState(DrainState::Drained);
201    return child_drain_count;
202}
203
204void
205DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
206{
207    bool needsResponse = pkt->needsResponse();
208    assert(!pkt->isLLSC());
209    assert(!pkt->isFlush());
210
211    DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
212
213    // turn packet around to go back to requester if response expected
214
215    if (access_backing_store) {
216        ruby_system->getPhysMem()->access(pkt);
217    } else if (needsResponse) {
218        pkt->makeResponse();
219    }
220
221    if (needsResponse) {
222        DPRINTF(RubyDma, "Sending packet back over port\n");
223        // send next cycle
224        schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
225    } else {
226        delete pkt;
227    }
228
229    DPRINTF(RubyDma, "Hit callback done!\n");
230}
231
232bool
233DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
234{
235    DMASequencer *seq = static_cast<DMASequencer *>(&owner);
236    return seq->system->isMemAddr(addr);
237}
238
239RequestStatus
240DMASequencer::makeRequest(PacketPtr pkt)
241{
242    if (m_is_busy) {
243        return RequestStatus_BufferFull;
244    }
245
246    uint64_t paddr = pkt->getAddr();
247    uint8_t* data =  pkt->getPtr<uint8_t>();
248    int len = pkt->getSize();
249    bool write = pkt->isWrite();
250
251    assert(!m_is_busy);  // only support one outstanding DMA request
252    m_is_busy = true;
253
254    active_request.start_paddr = paddr;
255    active_request.write = write;
256    active_request.data = data;
257    active_request.len = len;
258    active_request.bytes_completed = 0;
259    active_request.bytes_issued = 0;
260    active_request.pkt = pkt;
261
262    std::shared_ptr<SequencerMsg> msg =
263        std::make_shared<SequencerMsg>(clockEdge());
264    msg->getPhysicalAddress() = Address(paddr);
265    msg->getLineAddress() = line_address(msg->getPhysicalAddress());
266    msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
267    int offset = paddr & m_data_block_mask;
268
269    msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
270        len : RubySystem::getBlockSizeBytes() - offset;
271
272    if (write && (data != NULL)) {
273        if (active_request.data != NULL) {
274            msg->getDataBlk().setData(data, offset, msg->getLen());
275        }
276    }
277
278    assert(m_mandatory_q_ptr != NULL);
279    m_mandatory_q_ptr->enqueue(msg);
280    active_request.bytes_issued += msg->getLen();
281
282    return RequestStatus_Issued;
283}
284
285void
286DMASequencer::issueNext()
287{
288    assert(m_is_busy);
289    active_request.bytes_completed = active_request.bytes_issued;
290    if (active_request.len == active_request.bytes_completed) {
291        //
292        // Must unset the busy flag before calling back the dma port because
293        // the callback may cause a previously nacked request to be reissued
294        //
295        DPRINTF(RubyDma, "DMA request completed\n");
296        m_is_busy = false;
297        ruby_hit_callback(active_request.pkt);
298        return;
299    }
300
301    std::shared_ptr<SequencerMsg> msg =
302        std::make_shared<SequencerMsg>(clockEdge());
303    msg->getPhysicalAddress() = Address(active_request.start_paddr +
304                                       active_request.bytes_completed);
305
306    assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
307    msg->getLineAddress() = line_address(msg->getPhysicalAddress());
308
309    msg->getType() = (active_request.write ? SequencerRequestType_ST :
310                     SequencerRequestType_LD);
311
312    msg->getLen() =
313        (active_request.len -
314         active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
315         active_request.len - active_request.bytes_completed :
316         RubySystem::getBlockSizeBytes());
317
318    if (active_request.write) {
319        msg->getDataBlk().
320            setData(&active_request.data[active_request.bytes_completed],
321                    0, msg->getLen());
322        msg->getType() = SequencerRequestType_ST;
323    } else {
324        msg->getType() = SequencerRequestType_LD;
325    }
326
327    assert(m_mandatory_q_ptr != NULL);
328    m_mandatory_q_ptr->enqueue(msg);
329    active_request.bytes_issued += msg->getLen();
330    DPRINTF(RubyDma,
331            "DMA request bytes issued %d, bytes completed %d, total len %d\n",
332            active_request.bytes_issued, active_request.bytes_completed,
333            active_request.len);
334}
335
336void
337DMASequencer::dataCallback(const DataBlock & dblk)
338{
339    assert(m_is_busy);
340    int len = active_request.bytes_issued - active_request.bytes_completed;
341    int offset = 0;
342    if (active_request.bytes_completed == 0)
343        offset = active_request.start_paddr & m_data_block_mask;
344    assert(!active_request.write);
345    if (active_request.data != NULL) {
346        memcpy(&active_request.data[active_request.bytes_completed],
347               dblk.getData(offset, len), len);
348    }
349    issueNext();
350}
351
352void
353DMASequencer::ackCallback()
354{
355    issueNext();
356}
357
358void
359DMASequencer::recordRequestType(DMASequencerRequestType requestType)
360{
361    DPRINTF(RubyStats, "Recorded statistic: %s\n",
362            DMASequencerRequestType_to_string(requestType));
363}
364
365DMASequencer *
366DMASequencerParams::create()
367{
368    return new DMASequencer(this);
369}
370