DMASequencer.cc revision 10713
17008Snate@binkert.org/* 27008Snate@binkert.org * Copyright (c) 2008 Mark D. Hill and David A. Wood 37008Snate@binkert.org * All rights reserved. 47008Snate@binkert.org * 57008Snate@binkert.org * Redistribution and use in source and binary forms, with or without 67008Snate@binkert.org * modification, are permitted provided that the following conditions are 77008Snate@binkert.org * met: redistributions of source code must retain the above copyright 87008Snate@binkert.org * notice, this list of conditions and the following disclaimer; 97008Snate@binkert.org * redistributions in binary form must reproduce the above copyright 107008Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 117008Snate@binkert.org * documentation and/or other materials provided with the distribution; 127008Snate@binkert.org * neither the name of the copyright holders nor the names of its 137008Snate@binkert.org * contributors may be used to endorse or promote products derived from 147008Snate@binkert.org * this software without specific prior written permission. 157008Snate@binkert.org * 167008Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 177008Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 187008Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 197008Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 207008Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 217008Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 227008Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 237008Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 247008Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 257008Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 267008Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 277008Snate@binkert.org */ 286285Snate@binkert.org 2910472Sandreas.hansson@arm.com#include <memory> 3010472Sandreas.hansson@arm.com 3110518Snilay@cs.wisc.edu#include "debug/Config.hh" 3210518Snilay@cs.wisc.edu#include "debug/Drain.hh" 338232Snate@binkert.org#include "debug/RubyDma.hh" 349104Shestness@cs.utexas.edu#include "debug/RubyStats.hh" 357039Snate@binkert.org#include "mem/protocol/SequencerMsg.hh" 367039Snate@binkert.org#include "mem/ruby/system/DMASequencer.hh" 376285Snate@binkert.org#include "mem/ruby/system/System.hh" 3810518Snilay@cs.wisc.edu#include "sim/system.hh" 396285Snate@binkert.org 406876Ssteve.reinhardt@amd.comDMASequencer::DMASequencer(const Params *p) 4110518Snilay@cs.wisc.edu : MemObject(p), m_version(p->version), m_controller(NULL), 4210518Snilay@cs.wisc.edu m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester), 4310706Spower.jg@gmail.com slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system, 4410706Spower.jg@gmail.com p->ruby_system->getAccessBackingStore()), 4510519Snilay@cs.wisc.edu drainManager(NULL), system(p->system), retry(false) 466285Snate@binkert.org{ 4710518Snilay@cs.wisc.edu assert(m_version != -1); 486285Snate@binkert.org} 496285Snate@binkert.org 507039Snate@binkert.orgvoid 517039Snate@binkert.orgDMASequencer::init() 526285Snate@binkert.org{ 5310518Snilay@cs.wisc.edu MemObject::init(); 5410518Snilay@cs.wisc.edu assert(m_controller != NULL); 5510518Snilay@cs.wisc.edu m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 5610518Snilay@cs.wisc.edu m_mandatory_q_ptr->setSender(this); 577039Snate@binkert.org m_is_busy = false; 587039Snate@binkert.org m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits()); 5910519Snilay@cs.wisc.edu 6010519Snilay@cs.wisc.edu slave_port.sendRangeChange(); 616285Snate@binkert.org} 626285Snate@binkert.org 6310518Snilay@cs.wisc.eduBaseSlavePort & 6410518Snilay@cs.wisc.eduDMASequencer::getSlavePort(const std::string &if_name, PortID idx) 6510518Snilay@cs.wisc.edu{ 6610518Snilay@cs.wisc.edu // used by the CPUs to connect the caches to the interconnect, and 6710518Snilay@cs.wisc.edu // for the x86 case also the interrupt master 6810518Snilay@cs.wisc.edu if (if_name != "slave") { 6910518Snilay@cs.wisc.edu // pass it along to our super class 7010518Snilay@cs.wisc.edu return MemObject::getSlavePort(if_name, idx); 7110518Snilay@cs.wisc.edu } else { 7210518Snilay@cs.wisc.edu return slave_port; 7310518Snilay@cs.wisc.edu } 7410518Snilay@cs.wisc.edu} 7510518Snilay@cs.wisc.edu 7610518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::MemSlavePort(const std::string &_name, 7710706Spower.jg@gmail.com DMASequencer *_port, PortID id, RubySystem* _ruby_system, 7810706Spower.jg@gmail.com bool _access_backing_store) 7910706Spower.jg@gmail.com : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this), 8010706Spower.jg@gmail.com ruby_system(_ruby_system), access_backing_store(_access_backing_store) 8110518Snilay@cs.wisc.edu{ 8210518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name); 8310518Snilay@cs.wisc.edu} 8410518Snilay@cs.wisc.edu 8510518Snilay@cs.wisc.edubool 8610518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt) 8710518Snilay@cs.wisc.edu{ 8810518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Timing request for address %#x on port %d\n", 8910518Snilay@cs.wisc.edu pkt->getAddr(), id); 9010518Snilay@cs.wisc.edu DMASequencer *seq = static_cast<DMASequencer *>(&owner); 9110518Snilay@cs.wisc.edu 9210518Snilay@cs.wisc.edu if (pkt->memInhibitAsserted()) 9310518Snilay@cs.wisc.edu panic("DMASequencer should never see an inhibited request\n"); 9410518Snilay@cs.wisc.edu 9510518Snilay@cs.wisc.edu assert(isPhysMemAddress(pkt->getAddr())); 9610518Snilay@cs.wisc.edu assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <= 9710518Snilay@cs.wisc.edu RubySystem::getBlockSizeBytes()); 9810518Snilay@cs.wisc.edu 9910518Snilay@cs.wisc.edu // Submit the ruby request 10010518Snilay@cs.wisc.edu RequestStatus requestStatus = seq->makeRequest(pkt); 10110518Snilay@cs.wisc.edu 10210518Snilay@cs.wisc.edu // If the request successfully issued then we should return true. 10310518Snilay@cs.wisc.edu // Otherwise, we need to tell the port to retry at a later point 10410518Snilay@cs.wisc.edu // and return false. 10510518Snilay@cs.wisc.edu if (requestStatus == RequestStatus_Issued) { 10610518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(), 10710518Snilay@cs.wisc.edu pkt->getAddr()); 10810518Snilay@cs.wisc.edu return true; 10910518Snilay@cs.wisc.edu } 11010518Snilay@cs.wisc.edu 11110518Snilay@cs.wisc.edu // Unless one is using the ruby tester, record the stalled M5 port for 11210518Snilay@cs.wisc.edu // later retry when the sequencer becomes free. 11310518Snilay@cs.wisc.edu if (!seq->m_usingRubyTester) { 11410518Snilay@cs.wisc.edu seq->retry = true; 11510518Snilay@cs.wisc.edu } 11610518Snilay@cs.wisc.edu 11710518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n", 11810518Snilay@cs.wisc.edu pkt->getAddr(), RequestStatus_to_string(requestStatus)); 11910518Snilay@cs.wisc.edu 12010518Snilay@cs.wisc.edu return false; 12110518Snilay@cs.wisc.edu} 12210518Snilay@cs.wisc.edu 12310518Snilay@cs.wisc.eduvoid 12410518Snilay@cs.wisc.eduDMASequencer::ruby_hit_callback(PacketPtr pkt) 12510518Snilay@cs.wisc.edu{ 12610518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(), 12710518Snilay@cs.wisc.edu pkt->getAddr()); 12810518Snilay@cs.wisc.edu 12910518Snilay@cs.wisc.edu // The packet was destined for memory and has not yet been turned 13010518Snilay@cs.wisc.edu // into a response 13110518Snilay@cs.wisc.edu assert(system->isMemAddr(pkt->getAddr())); 13210518Snilay@cs.wisc.edu assert(pkt->isRequest()); 13310518Snilay@cs.wisc.edu slave_port.hitCallback(pkt); 13410518Snilay@cs.wisc.edu 13510518Snilay@cs.wisc.edu // If we had to stall the slave ports, wake it up because 13610518Snilay@cs.wisc.edu // the sequencer likely has free resources now. 13710518Snilay@cs.wisc.edu if (retry) { 13810518Snilay@cs.wisc.edu retry = false; 13910518Snilay@cs.wisc.edu DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n", 14010518Snilay@cs.wisc.edu slave_port.name()); 14110713Sandreas.hansson@arm.com slave_port.sendRetryReq(); 14210518Snilay@cs.wisc.edu } 14310518Snilay@cs.wisc.edu 14410518Snilay@cs.wisc.edu testDrainComplete(); 14510518Snilay@cs.wisc.edu} 14610518Snilay@cs.wisc.edu 14710518Snilay@cs.wisc.eduvoid 14810518Snilay@cs.wisc.eduDMASequencer::testDrainComplete() 14910518Snilay@cs.wisc.edu{ 15010518Snilay@cs.wisc.edu //If we weren't able to drain before, we might be able to now. 15110518Snilay@cs.wisc.edu if (drainManager != NULL) { 15210518Snilay@cs.wisc.edu unsigned int drainCount = outstandingCount(); 15310518Snilay@cs.wisc.edu DPRINTF(Drain, "Drain count: %u\n", drainCount); 15410518Snilay@cs.wisc.edu if (drainCount == 0) { 15510518Snilay@cs.wisc.edu DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n"); 15610518Snilay@cs.wisc.edu drainManager->signalDrainDone(); 15710518Snilay@cs.wisc.edu // Clear the drain manager once we're done with it. 15810518Snilay@cs.wisc.edu drainManager = NULL; 15910518Snilay@cs.wisc.edu } 16010518Snilay@cs.wisc.edu } 16110518Snilay@cs.wisc.edu} 16210518Snilay@cs.wisc.edu 16310518Snilay@cs.wisc.eduunsigned int 16410518Snilay@cs.wisc.eduDMASequencer::getChildDrainCount(DrainManager *dm) 16510518Snilay@cs.wisc.edu{ 16610518Snilay@cs.wisc.edu int count = 0; 16710518Snilay@cs.wisc.edu count += slave_port.drain(dm); 16810518Snilay@cs.wisc.edu DPRINTF(Config, "count after slave port check %d\n", count); 16910518Snilay@cs.wisc.edu return count; 17010518Snilay@cs.wisc.edu} 17110518Snilay@cs.wisc.edu 17210518Snilay@cs.wisc.eduunsigned int 17310518Snilay@cs.wisc.eduDMASequencer::drain(DrainManager *dm) 17410518Snilay@cs.wisc.edu{ 17510518Snilay@cs.wisc.edu if (isDeadlockEventScheduled()) { 17610518Snilay@cs.wisc.edu descheduleDeadlockEvent(); 17710518Snilay@cs.wisc.edu } 17810518Snilay@cs.wisc.edu 17910518Snilay@cs.wisc.edu // If the DMASequencer is not empty, then it needs to clear all outstanding 18010518Snilay@cs.wisc.edu // requests before it should call drainManager->signalDrainDone() 18110518Snilay@cs.wisc.edu DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 18210518Snilay@cs.wisc.edu bool need_drain = outstandingCount() > 0; 18310518Snilay@cs.wisc.edu 18410518Snilay@cs.wisc.edu // 18510518Snilay@cs.wisc.edu // Also, get the number of child ports that will also need to clear 18610518Snilay@cs.wisc.edu // their buffered requests before they call drainManager->signalDrainDone() 18710518Snilay@cs.wisc.edu // 18810518Snilay@cs.wisc.edu unsigned int child_drain_count = getChildDrainCount(dm); 18910518Snilay@cs.wisc.edu 19010518Snilay@cs.wisc.edu // Set status 19110518Snilay@cs.wisc.edu if (need_drain) { 19210518Snilay@cs.wisc.edu drainManager = dm; 19310518Snilay@cs.wisc.edu 19410518Snilay@cs.wisc.edu DPRINTF(Drain, "DMASequencer not drained\n"); 19510518Snilay@cs.wisc.edu setDrainState(Drainable::Draining); 19610518Snilay@cs.wisc.edu return child_drain_count + 1; 19710518Snilay@cs.wisc.edu } 19810518Snilay@cs.wisc.edu 19910518Snilay@cs.wisc.edu drainManager = NULL; 20010518Snilay@cs.wisc.edu setDrainState(Drainable::Drained); 20110518Snilay@cs.wisc.edu return child_drain_count; 20210518Snilay@cs.wisc.edu} 20310518Snilay@cs.wisc.edu 20410518Snilay@cs.wisc.eduvoid 20510518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::hitCallback(PacketPtr pkt) 20610518Snilay@cs.wisc.edu{ 20710518Snilay@cs.wisc.edu bool needsResponse = pkt->needsResponse(); 20810518Snilay@cs.wisc.edu assert(!pkt->isLLSC()); 20910518Snilay@cs.wisc.edu assert(!pkt->isFlush()); 21010518Snilay@cs.wisc.edu 21110518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse); 21210518Snilay@cs.wisc.edu 21310518Snilay@cs.wisc.edu // turn packet around to go back to requester if response expected 21410706Spower.jg@gmail.com 21510706Spower.jg@gmail.com if (access_backing_store) { 21610706Spower.jg@gmail.com ruby_system->getPhysMem()->access(pkt); 21710706Spower.jg@gmail.com } else if (needsResponse) { 21810706Spower.jg@gmail.com pkt->makeResponse(); 21910706Spower.jg@gmail.com } 22010706Spower.jg@gmail.com 22110518Snilay@cs.wisc.edu if (needsResponse) { 22210518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Sending packet back over port\n"); 22310518Snilay@cs.wisc.edu // send next cycle 22410518Snilay@cs.wisc.edu schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod()); 22510518Snilay@cs.wisc.edu } else { 22610518Snilay@cs.wisc.edu delete pkt; 22710518Snilay@cs.wisc.edu } 22810519Snilay@cs.wisc.edu 22910518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Hit callback done!\n"); 23010518Snilay@cs.wisc.edu} 23110518Snilay@cs.wisc.edu 23210518Snilay@cs.wisc.edubool 23310518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const 23410518Snilay@cs.wisc.edu{ 23510518Snilay@cs.wisc.edu DMASequencer *seq = static_cast<DMASequencer *>(&owner); 23610518Snilay@cs.wisc.edu return seq->system->isMemAddr(addr); 23710518Snilay@cs.wisc.edu} 23810518Snilay@cs.wisc.edu 2397039Snate@binkert.orgRequestStatus 2408615Snilay@cs.wisc.eduDMASequencer::makeRequest(PacketPtr pkt) 2416285Snate@binkert.org{ 2427544SBrad.Beckmann@amd.com if (m_is_busy) { 2437544SBrad.Beckmann@amd.com return RequestStatus_BufferFull; 2447544SBrad.Beckmann@amd.com } 2457544SBrad.Beckmann@amd.com 2468615Snilay@cs.wisc.edu uint64_t paddr = pkt->getAddr(); 24710562Sandreas.hansson@arm.com uint8_t* data = pkt->getPtr<uint8_t>(); 2488615Snilay@cs.wisc.edu int len = pkt->getSize(); 2498615Snilay@cs.wisc.edu bool write = pkt->isWrite(); 2506285Snate@binkert.org 2517039Snate@binkert.org assert(!m_is_busy); // only support one outstanding DMA request 2527039Snate@binkert.org m_is_busy = true; 2536285Snate@binkert.org 2547039Snate@binkert.org active_request.start_paddr = paddr; 2557039Snate@binkert.org active_request.write = write; 2567039Snate@binkert.org active_request.data = data; 2577039Snate@binkert.org active_request.len = len; 2587039Snate@binkert.org active_request.bytes_completed = 0; 2597039Snate@binkert.org active_request.bytes_issued = 0; 2608615Snilay@cs.wisc.edu active_request.pkt = pkt; 2616285Snate@binkert.org 26210472Sandreas.hansson@arm.com std::shared_ptr<SequencerMsg> msg = 26310472Sandreas.hansson@arm.com std::make_shared<SequencerMsg>(clockEdge()); 2647453Snate@binkert.org msg->getPhysicalAddress() = Address(paddr); 2657453Snate@binkert.org msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 2667453Snate@binkert.org msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD; 2677039Snate@binkert.org int offset = paddr & m_data_block_mask; 2686888SBrad.Beckmann@amd.com 2697453Snate@binkert.org msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ? 2707039Snate@binkert.org len : RubySystem::getBlockSizeBytes() - offset; 2716888SBrad.Beckmann@amd.com 2727915SBrad.Beckmann@amd.com if (write && (data != NULL)) { 2737915SBrad.Beckmann@amd.com if (active_request.data != NULL) { 2747915SBrad.Beckmann@amd.com msg->getDataBlk().setData(data, offset, msg->getLen()); 2757915SBrad.Beckmann@amd.com } 2767039Snate@binkert.org } 2776888SBrad.Beckmann@amd.com 2787039Snate@binkert.org assert(m_mandatory_q_ptr != NULL); 2797039Snate@binkert.org m_mandatory_q_ptr->enqueue(msg); 2807453Snate@binkert.org active_request.bytes_issued += msg->getLen(); 2816285Snate@binkert.org 2827039Snate@binkert.org return RequestStatus_Issued; 2836285Snate@binkert.org} 2846285Snate@binkert.org 2857039Snate@binkert.orgvoid 2867039Snate@binkert.orgDMASequencer::issueNext() 2876285Snate@binkert.org{ 28810231Ssteve.reinhardt@amd.com assert(m_is_busy); 2897039Snate@binkert.org active_request.bytes_completed = active_request.bytes_issued; 2907039Snate@binkert.org if (active_request.len == active_request.bytes_completed) { 2918162SBrad.Beckmann@amd.com // 2928162SBrad.Beckmann@amd.com // Must unset the busy flag before calling back the dma port because 2938162SBrad.Beckmann@amd.com // the callback may cause a previously nacked request to be reissued 2948162SBrad.Beckmann@amd.com // 2958162SBrad.Beckmann@amd.com DPRINTF(RubyDma, "DMA request completed\n"); 2968162SBrad.Beckmann@amd.com m_is_busy = false; 2977039Snate@binkert.org ruby_hit_callback(active_request.pkt); 2987039Snate@binkert.org return; 2997039Snate@binkert.org } 3006285Snate@binkert.org 30110472Sandreas.hansson@arm.com std::shared_ptr<SequencerMsg> msg = 30210472Sandreas.hansson@arm.com std::make_shared<SequencerMsg>(clockEdge()); 3037453Snate@binkert.org msg->getPhysicalAddress() = Address(active_request.start_paddr + 3047039Snate@binkert.org active_request.bytes_completed); 3056888SBrad.Beckmann@amd.com 3067453Snate@binkert.org assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0); 3077453Snate@binkert.org msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 3086888SBrad.Beckmann@amd.com 3097453Snate@binkert.org msg->getType() = (active_request.write ? SequencerRequestType_ST : 3107039Snate@binkert.org SequencerRequestType_LD); 3116888SBrad.Beckmann@amd.com 3127453Snate@binkert.org msg->getLen() = 3137039Snate@binkert.org (active_request.len - 3147039Snate@binkert.org active_request.bytes_completed < RubySystem::getBlockSizeBytes() ? 3157039Snate@binkert.org active_request.len - active_request.bytes_completed : 3167039Snate@binkert.org RubySystem::getBlockSizeBytes()); 3176888SBrad.Beckmann@amd.com 3187039Snate@binkert.org if (active_request.write) { 3197453Snate@binkert.org msg->getDataBlk(). 3207039Snate@binkert.org setData(&active_request.data[active_request.bytes_completed], 3217453Snate@binkert.org 0, msg->getLen()); 3227453Snate@binkert.org msg->getType() = SequencerRequestType_ST; 3237039Snate@binkert.org } else { 3247453Snate@binkert.org msg->getType() = SequencerRequestType_LD; 3257039Snate@binkert.org } 3266888SBrad.Beckmann@amd.com 3277039Snate@binkert.org assert(m_mandatory_q_ptr != NULL); 3287039Snate@binkert.org m_mandatory_q_ptr->enqueue(msg); 3297453Snate@binkert.org active_request.bytes_issued += msg->getLen(); 3308160SBrad.Beckmann@amd.com DPRINTF(RubyDma, 3318160SBrad.Beckmann@amd.com "DMA request bytes issued %d, bytes completed %d, total len %d\n", 3328160SBrad.Beckmann@amd.com active_request.bytes_issued, active_request.bytes_completed, 3338160SBrad.Beckmann@amd.com active_request.len); 3346285Snate@binkert.org} 3356285Snate@binkert.org 3367039Snate@binkert.orgvoid 3377039Snate@binkert.orgDMASequencer::dataCallback(const DataBlock & dblk) 3386285Snate@binkert.org{ 33910231Ssteve.reinhardt@amd.com assert(m_is_busy); 3407039Snate@binkert.org int len = active_request.bytes_issued - active_request.bytes_completed; 3417039Snate@binkert.org int offset = 0; 3427039Snate@binkert.org if (active_request.bytes_completed == 0) 3437039Snate@binkert.org offset = active_request.start_paddr & m_data_block_mask; 34410231Ssteve.reinhardt@amd.com assert(!active_request.write); 3457915SBrad.Beckmann@amd.com if (active_request.data != NULL) { 3467915SBrad.Beckmann@amd.com memcpy(&active_request.data[active_request.bytes_completed], 3477915SBrad.Beckmann@amd.com dblk.getData(offset, len), len); 3487915SBrad.Beckmann@amd.com } 3497039Snate@binkert.org issueNext(); 3506285Snate@binkert.org} 3516285Snate@binkert.org 3527039Snate@binkert.orgvoid 3537039Snate@binkert.orgDMASequencer::ackCallback() 3546285Snate@binkert.org{ 3557039Snate@binkert.org issueNext(); 3566285Snate@binkert.org} 3576285Snate@binkert.org 3587039Snate@binkert.orgvoid 35910518Snilay@cs.wisc.eduDMASequencer::recordRequestType(DMASequencerRequestType requestType) 36010518Snilay@cs.wisc.edu{ 3619104Shestness@cs.utexas.edu DPRINTF(RubyStats, "Recorded statistic: %s\n", 3629104Shestness@cs.utexas.edu DMASequencerRequestType_to_string(requestType)); 3639104Shestness@cs.utexas.edu} 3649104Shestness@cs.utexas.edu 3656876Ssteve.reinhardt@amd.comDMASequencer * 3666876Ssteve.reinhardt@amd.comDMASequencerParams::create() 3676876Ssteve.reinhardt@amd.com{ 3686876Ssteve.reinhardt@amd.com return new DMASequencer(this); 3696876Ssteve.reinhardt@amd.com} 370