DMASequencer.cc revision 10706:4206946d60fe
12207SN/A/*
22207SN/A * Copyright (c) 2008 Mark D. Hill and David A. Wood
32207SN/A * All rights reserved.
42207SN/A *
52207SN/A * Redistribution and use in source and binary forms, with or without
62207SN/A * modification, are permitted provided that the following conditions are
72207SN/A * met: redistributions of source code must retain the above copyright
82207SN/A * notice, this list of conditions and the following disclaimer;
92207SN/A * redistributions in binary form must reproduce the above copyright
102207SN/A * notice, this list of conditions and the following disclaimer in the
112207SN/A * documentation and/or other materials provided with the distribution;
122207SN/A * neither the name of the copyright holders nor the names of its
132207SN/A * contributors may be used to endorse or promote products derived from
142207SN/A * this software without specific prior written permission.
152207SN/A *
162207SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
172207SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
182207SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192207SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
202207SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
212207SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
222207SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
232207SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
242207SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
252207SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
262207SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
272665Ssaidi@eecs.umich.edu */
282665Ssaidi@eecs.umich.edu
292665Ssaidi@eecs.umich.edu#include <memory>
302207SN/A
312207SN/A#include "debug/Config.hh"
322972Sgblack@eecs.umich.edu#include "debug/Drain.hh"
332207SN/A#include "debug/RubyDma.hh"
342454SN/A#include "debug/RubyStats.hh"
355759Shsul@eecs.umich.edu#include "mem/protocol/SequencerMsg.hh"
362454SN/A#include "mem/ruby/system/DMASequencer.hh"
372680Sktlim@umich.edu#include "mem/ruby/system/System.hh"
385759Shsul@eecs.umich.edu#include "sim/system.hh"
395759Shsul@eecs.umich.edu
402474SN/ADMASequencer::DMASequencer(const Params *p)
412207SN/A    : MemObject(p), m_version(p->version), m_controller(NULL),
422474SN/A      m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
432474SN/A      slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system,
442474SN/A                 p->ruby_system->getAccessBackingStore()),
455569Snate@binkert.org      drainManager(NULL), system(p->system), retry(false)
465569Snate@binkert.org{
475154Sgblack@eecs.umich.edu    assert(m_version != -1);
482474SN/A}
492474SN/A
502474SN/Avoid
512474SN/ADMASequencer::init()
522474SN/A{
532474SN/A    MemObject::init();
542474SN/A    assert(m_controller != NULL);
552474SN/A    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
562474SN/A    m_mandatory_q_ptr->setSender(this);
572474SN/A    m_is_busy = false;
582474SN/A    m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
592474SN/A
602474SN/A    slave_port.sendRangeChange();
612474SN/A}
622474SN/A
632474SN/ABaseSlavePort &
642474SN/ADMASequencer::getSlavePort(const std::string &if_name, PortID idx)
652474SN/A{
665759Shsul@eecs.umich.edu    // used by the CPUs to connect the caches to the interconnect, and
675759Shsul@eecs.umich.edu    // for the x86 case also the interrupt master
685759Shsul@eecs.umich.edu    if (if_name != "slave") {
695759Shsul@eecs.umich.edu        // pass it along to our super class
705771Shsul@eecs.umich.edu        return MemObject::getSlavePort(if_name, idx);
715759Shsul@eecs.umich.edu    } else {
725759Shsul@eecs.umich.edu        return slave_port;
735759Shsul@eecs.umich.edu    }
745759Shsul@eecs.umich.edu}
755759Shsul@eecs.umich.edu
765759Shsul@eecs.umich.eduDMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
775759Shsul@eecs.umich.edu    DMASequencer *_port, PortID id, RubySystem* _ruby_system,
785759Shsul@eecs.umich.edu    bool _access_backing_store)
795759Shsul@eecs.umich.edu    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
805759Shsul@eecs.umich.edu      ruby_system(_ruby_system), access_backing_store(_access_backing_store)
815759Shsul@eecs.umich.edu{
825759Shsul@eecs.umich.edu    DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
835759Shsul@eecs.umich.edu}
845759Shsul@eecs.umich.edu
855759Shsul@eecs.umich.edubool
865759Shsul@eecs.umich.eduDMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
875759Shsul@eecs.umich.edu{
885759Shsul@eecs.umich.edu    DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
895759Shsul@eecs.umich.edu            pkt->getAddr(), id);
905759Shsul@eecs.umich.edu    DMASequencer *seq = static_cast<DMASequencer *>(&owner);
915759Shsul@eecs.umich.edu
925759Shsul@eecs.umich.edu    if (pkt->memInhibitAsserted())
935759Shsul@eecs.umich.edu        panic("DMASequencer should never see an inhibited request\n");
945759Shsul@eecs.umich.edu
955759Shsul@eecs.umich.edu    assert(isPhysMemAddress(pkt->getAddr()));
965759Shsul@eecs.umich.edu    assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
975759Shsul@eecs.umich.edu           RubySystem::getBlockSizeBytes());
985759Shsul@eecs.umich.edu
995759Shsul@eecs.umich.edu    // Submit the ruby request
1005759Shsul@eecs.umich.edu    RequestStatus requestStatus = seq->makeRequest(pkt);
1015759Shsul@eecs.umich.edu
1025759Shsul@eecs.umich.edu    // If the request successfully issued then we should return true.
1036227Snate@binkert.org    // Otherwise, we need to tell the port to retry at a later point
1045759Shsul@eecs.umich.edu    // and return false.
1055759Shsul@eecs.umich.edu    if (requestStatus == RequestStatus_Issued) {
1065759Shsul@eecs.umich.edu        DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
1076227Snate@binkert.org                pkt->getAddr());
1085759Shsul@eecs.umich.edu        return true;
1095759Shsul@eecs.umich.edu    }
1105759Shsul@eecs.umich.edu
1115759Shsul@eecs.umich.edu    // Unless one is using the ruby tester, record the stalled M5 port for
1125759Shsul@eecs.umich.edu    // later retry when the sequencer becomes free.
1135759Shsul@eecs.umich.edu    if (!seq->m_usingRubyTester) {
1145759Shsul@eecs.umich.edu        seq->retry = true;
1155759Shsul@eecs.umich.edu    }
1165759Shsul@eecs.umich.edu
1175759Shsul@eecs.umich.edu    DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
1185759Shsul@eecs.umich.edu            pkt->getAddr(), RequestStatus_to_string(requestStatus));
1195759Shsul@eecs.umich.edu
1205759Shsul@eecs.umich.edu    return false;
1215759Shsul@eecs.umich.edu}
1225759Shsul@eecs.umich.edu
1235759Shsul@eecs.umich.eduvoid
1245759Shsul@eecs.umich.eduDMASequencer::ruby_hit_callback(PacketPtr pkt)
1255759Shsul@eecs.umich.edu{
1265759Shsul@eecs.umich.edu    DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
1275759Shsul@eecs.umich.edu            pkt->getAddr());
1285759Shsul@eecs.umich.edu
1295759Shsul@eecs.umich.edu    // The packet was destined for memory and has not yet been turned
1305759Shsul@eecs.umich.edu    // into a response
1315759Shsul@eecs.umich.edu    assert(system->isMemAddr(pkt->getAddr()));
1325759Shsul@eecs.umich.edu    assert(pkt->isRequest());
1335759Shsul@eecs.umich.edu    slave_port.hitCallback(pkt);
1345759Shsul@eecs.umich.edu
1355759Shsul@eecs.umich.edu    // If we had to stall the slave ports, wake it up because
1365759Shsul@eecs.umich.edu    // the sequencer likely has free resources now.
1375759Shsul@eecs.umich.edu    if (retry) {
1385759Shsul@eecs.umich.edu        retry = false;
1395759Shsul@eecs.umich.edu        DPRINTF(RubyDma,"Sequencer may now be free.  SendRetry to port %s\n",
1405759Shsul@eecs.umich.edu                slave_port.name());
1415759Shsul@eecs.umich.edu        slave_port.sendRetry();
1425759Shsul@eecs.umich.edu    }
1435759Shsul@eecs.umich.edu
1445759Shsul@eecs.umich.edu    testDrainComplete();
1455759Shsul@eecs.umich.edu}
1465759Shsul@eecs.umich.edu
1475759Shsul@eecs.umich.eduvoid
1485759Shsul@eecs.umich.eduDMASequencer::testDrainComplete()
1495759Shsul@eecs.umich.edu{
1505759Shsul@eecs.umich.edu    //If we weren't able to drain before, we might be able to now.
1516227Snate@binkert.org    if (drainManager != NULL) {
1525759Shsul@eecs.umich.edu        unsigned int drainCount = outstandingCount();
1535759Shsul@eecs.umich.edu        DPRINTF(Drain, "Drain count: %u\n", drainCount);
1545759Shsul@eecs.umich.edu        if (drainCount == 0) {
1555759Shsul@eecs.umich.edu            DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
1565759Shsul@eecs.umich.edu            drainManager->signalDrainDone();
1575759Shsul@eecs.umich.edu            // Clear the drain manager once we're done with it.
1585759Shsul@eecs.umich.edu            drainManager = NULL;
1595759Shsul@eecs.umich.edu        }
1605958Sgblack@eecs.umich.edu    }
1615958Sgblack@eecs.umich.edu}
1625759Shsul@eecs.umich.edu
1635759Shsul@eecs.umich.eduunsigned int
1645759Shsul@eecs.umich.eduDMASequencer::getChildDrainCount(DrainManager *dm)
1655759Shsul@eecs.umich.edu{
1665759Shsul@eecs.umich.edu    int count = 0;
1675759Shsul@eecs.umich.edu    count += slave_port.drain(dm);
1686180Sksewell@umich.edu    DPRINTF(Config, "count after slave port check %d\n", count);
1696180Sksewell@umich.edu    return count;
1706180Sksewell@umich.edu}
1716180Sksewell@umich.edu
1725759Shsul@eecs.umich.eduunsigned int
1735759Shsul@eecs.umich.eduDMASequencer::drain(DrainManager *dm)
1745759Shsul@eecs.umich.edu{
1755759Shsul@eecs.umich.edu    if (isDeadlockEventScheduled()) {
1762474SN/A        descheduleDeadlockEvent();
1772474SN/A    }
1785183Ssaidi@eecs.umich.edu
1795183Ssaidi@eecs.umich.edu    // If the DMASequencer is not empty, then it needs to clear all outstanding
1805183Ssaidi@eecs.umich.edu    // requests before it should call drainManager->signalDrainDone()
1815759Shsul@eecs.umich.edu    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
1825759Shsul@eecs.umich.edu    bool need_drain = outstandingCount() > 0;
1832474SN/A
1842474SN/A    //
1855713Shsul@eecs.umich.edu    // Also, get the number of child ports that will also need to clear
1865713Shsul@eecs.umich.edu    // their buffered requests before they call drainManager->signalDrainDone()
1875713Shsul@eecs.umich.edu    //
1885713Shsul@eecs.umich.edu    unsigned int child_drain_count = getChildDrainCount(dm);
1894997Sgblack@eecs.umich.edu
1905713Shsul@eecs.umich.edu    // Set status
1914997Sgblack@eecs.umich.edu    if (need_drain) {
1925713Shsul@eecs.umich.edu        drainManager = dm;
1932474SN/A
1942474SN/A        DPRINTF(Drain, "DMASequencer not drained\n");
1955958Sgblack@eecs.umich.edu        setDrainState(Drainable::Draining);
1965958Sgblack@eecs.umich.edu        return child_drain_count + 1;
1975958Sgblack@eecs.umich.edu    }
1985958Sgblack@eecs.umich.edu
1995958Sgblack@eecs.umich.edu    drainManager = NULL;
2005958Sgblack@eecs.umich.edu    setDrainState(Drainable::Drained);
2015958Sgblack@eecs.umich.edu    return child_drain_count;
2025958Sgblack@eecs.umich.edu}
2035958Sgblack@eecs.umich.edu
2045958Sgblack@eecs.umich.eduvoid
2055958Sgblack@eecs.umich.eduDMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
2065958Sgblack@eecs.umich.edu{
2075958Sgblack@eecs.umich.edu    bool needsResponse = pkt->needsResponse();
2085958Sgblack@eecs.umich.edu    assert(!pkt->isLLSC());
2095958Sgblack@eecs.umich.edu    assert(!pkt->isFlush());
2105958Sgblack@eecs.umich.edu
2115958Sgblack@eecs.umich.edu    DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
2125958Sgblack@eecs.umich.edu
2135958Sgblack@eecs.umich.edu    // turn packet around to go back to requester if response expected
2145958Sgblack@eecs.umich.edu
2155958Sgblack@eecs.umich.edu    if (access_backing_store) {
2165958Sgblack@eecs.umich.edu        ruby_system->getPhysMem()->access(pkt);
2175958Sgblack@eecs.umich.edu    } else if (needsResponse) {
2185958Sgblack@eecs.umich.edu        pkt->makeResponse();
2195958Sgblack@eecs.umich.edu    }
2205958Sgblack@eecs.umich.edu
2215958Sgblack@eecs.umich.edu    if (needsResponse) {
2225958Sgblack@eecs.umich.edu        DPRINTF(RubyDma, "Sending packet back over port\n");
2235958Sgblack@eecs.umich.edu        // send next cycle
2245958Sgblack@eecs.umich.edu        schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
2255958Sgblack@eecs.umich.edu    } else {
2265958Sgblack@eecs.umich.edu        delete pkt;
227    }
228
229    DPRINTF(RubyDma, "Hit callback done!\n");
230}
231
232bool
233DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
234{
235    DMASequencer *seq = static_cast<DMASequencer *>(&owner);
236    return seq->system->isMemAddr(addr);
237}
238
239RequestStatus
240DMASequencer::makeRequest(PacketPtr pkt)
241{
242    if (m_is_busy) {
243        return RequestStatus_BufferFull;
244    }
245
246    uint64_t paddr = pkt->getAddr();
247    uint8_t* data =  pkt->getPtr<uint8_t>();
248    int len = pkt->getSize();
249    bool write = pkt->isWrite();
250
251    assert(!m_is_busy);  // only support one outstanding DMA request
252    m_is_busy = true;
253
254    active_request.start_paddr = paddr;
255    active_request.write = write;
256    active_request.data = data;
257    active_request.len = len;
258    active_request.bytes_completed = 0;
259    active_request.bytes_issued = 0;
260    active_request.pkt = pkt;
261
262    std::shared_ptr<SequencerMsg> msg =
263        std::make_shared<SequencerMsg>(clockEdge());
264    msg->getPhysicalAddress() = Address(paddr);
265    msg->getLineAddress() = line_address(msg->getPhysicalAddress());
266    msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
267    int offset = paddr & m_data_block_mask;
268
269    msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
270        len : RubySystem::getBlockSizeBytes() - offset;
271
272    if (write && (data != NULL)) {
273        if (active_request.data != NULL) {
274            msg->getDataBlk().setData(data, offset, msg->getLen());
275        }
276    }
277
278    assert(m_mandatory_q_ptr != NULL);
279    m_mandatory_q_ptr->enqueue(msg);
280    active_request.bytes_issued += msg->getLen();
281
282    return RequestStatus_Issued;
283}
284
285void
286DMASequencer::issueNext()
287{
288    assert(m_is_busy);
289    active_request.bytes_completed = active_request.bytes_issued;
290    if (active_request.len == active_request.bytes_completed) {
291        //
292        // Must unset the busy flag before calling back the dma port because
293        // the callback may cause a previously nacked request to be reissued
294        //
295        DPRINTF(RubyDma, "DMA request completed\n");
296        m_is_busy = false;
297        ruby_hit_callback(active_request.pkt);
298        return;
299    }
300
301    std::shared_ptr<SequencerMsg> msg =
302        std::make_shared<SequencerMsg>(clockEdge());
303    msg->getPhysicalAddress() = Address(active_request.start_paddr +
304                                       active_request.bytes_completed);
305
306    assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
307    msg->getLineAddress() = line_address(msg->getPhysicalAddress());
308
309    msg->getType() = (active_request.write ? SequencerRequestType_ST :
310                     SequencerRequestType_LD);
311
312    msg->getLen() =
313        (active_request.len -
314         active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
315         active_request.len - active_request.bytes_completed :
316         RubySystem::getBlockSizeBytes());
317
318    if (active_request.write) {
319        msg->getDataBlk().
320            setData(&active_request.data[active_request.bytes_completed],
321                    0, msg->getLen());
322        msg->getType() = SequencerRequestType_ST;
323    } else {
324        msg->getType() = SequencerRequestType_LD;
325    }
326
327    assert(m_mandatory_q_ptr != NULL);
328    m_mandatory_q_ptr->enqueue(msg);
329    active_request.bytes_issued += msg->getLen();
330    DPRINTF(RubyDma,
331            "DMA request bytes issued %d, bytes completed %d, total len %d\n",
332            active_request.bytes_issued, active_request.bytes_completed,
333            active_request.len);
334}
335
336void
337DMASequencer::dataCallback(const DataBlock & dblk)
338{
339    assert(m_is_busy);
340    int len = active_request.bytes_issued - active_request.bytes_completed;
341    int offset = 0;
342    if (active_request.bytes_completed == 0)
343        offset = active_request.start_paddr & m_data_block_mask;
344    assert(!active_request.write);
345    if (active_request.data != NULL) {
346        memcpy(&active_request.data[active_request.bytes_completed],
347               dblk.getData(offset, len), len);
348    }
349    issueNext();
350}
351
352void
353DMASequencer::ackCallback()
354{
355    issueNext();
356}
357
358void
359DMASequencer::recordRequestType(DMASequencerRequestType requestType)
360{
361    DPRINTF(RubyStats, "Recorded statistic: %s\n",
362            DMASequencerRequestType_to_string(requestType));
363}
364
365DMASequencer *
366DMASequencerParams::create()
367{
368    return new DMASequencer(this);
369}
370