DMASequencer.cc revision 10519
17008Snate@binkert.org/* 27008Snate@binkert.org * Copyright (c) 2008 Mark D. Hill and David A. Wood 37008Snate@binkert.org * All rights reserved. 47008Snate@binkert.org * 57008Snate@binkert.org * Redistribution and use in source and binary forms, with or without 67008Snate@binkert.org * modification, are permitted provided that the following conditions are 77008Snate@binkert.org * met: redistributions of source code must retain the above copyright 87008Snate@binkert.org * notice, this list of conditions and the following disclaimer; 97008Snate@binkert.org * redistributions in binary form must reproduce the above copyright 107008Snate@binkert.org * notice, this list of conditions and the following disclaimer in the 117008Snate@binkert.org * documentation and/or other materials provided with the distribution; 127008Snate@binkert.org * neither the name of the copyright holders nor the names of its 137008Snate@binkert.org * contributors may be used to endorse or promote products derived from 147008Snate@binkert.org * this software without specific prior written permission. 157008Snate@binkert.org * 167008Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 177008Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 187008Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 197008Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 207008Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 217008Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 227008Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 237008Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 247008Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 257008Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 267008Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 277008Snate@binkert.org */ 286285Snate@binkert.org 2910472Sandreas.hansson@arm.com#include <memory> 3010472Sandreas.hansson@arm.com 3110518Snilay@cs.wisc.edu#include "debug/Config.hh" 3210518Snilay@cs.wisc.edu#include "debug/Drain.hh" 338232Snate@binkert.org#include "debug/RubyDma.hh" 349104Shestness@cs.utexas.edu#include "debug/RubyStats.hh" 357039Snate@binkert.org#include "mem/protocol/SequencerMsg.hh" 367039Snate@binkert.org#include "mem/ruby/system/DMASequencer.hh" 376285Snate@binkert.org#include "mem/ruby/system/System.hh" 3810518Snilay@cs.wisc.edu#include "sim/system.hh" 396285Snate@binkert.org 406876Ssteve.reinhardt@amd.comDMASequencer::DMASequencer(const Params *p) 4110518Snilay@cs.wisc.edu : MemObject(p), m_version(p->version), m_controller(NULL), 4210518Snilay@cs.wisc.edu m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester), 4310519Snilay@cs.wisc.edu slave_port(csprintf("%s.slave", name()), this, 0), 4410519Snilay@cs.wisc.edu drainManager(NULL), system(p->system), retry(false) 456285Snate@binkert.org{ 4610518Snilay@cs.wisc.edu assert(m_version != -1); 476285Snate@binkert.org} 486285Snate@binkert.org 497039Snate@binkert.orgvoid 507039Snate@binkert.orgDMASequencer::init() 516285Snate@binkert.org{ 5210518Snilay@cs.wisc.edu MemObject::init(); 5310518Snilay@cs.wisc.edu assert(m_controller != NULL); 5410518Snilay@cs.wisc.edu m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 5510518Snilay@cs.wisc.edu m_mandatory_q_ptr->setSender(this); 567039Snate@binkert.org m_is_busy = false; 577039Snate@binkert.org m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits()); 5810519Snilay@cs.wisc.edu 5910519Snilay@cs.wisc.edu slave_port.sendRangeChange(); 606285Snate@binkert.org} 616285Snate@binkert.org 6210518Snilay@cs.wisc.eduBaseSlavePort & 6310518Snilay@cs.wisc.eduDMASequencer::getSlavePort(const std::string &if_name, PortID idx) 6410518Snilay@cs.wisc.edu{ 6510518Snilay@cs.wisc.edu // used by the CPUs to connect the caches to the interconnect, and 6610518Snilay@cs.wisc.edu // for the x86 case also the interrupt master 6710518Snilay@cs.wisc.edu if (if_name != "slave") { 6810518Snilay@cs.wisc.edu // pass it along to our super class 6910518Snilay@cs.wisc.edu return MemObject::getSlavePort(if_name, idx); 7010518Snilay@cs.wisc.edu } else { 7110518Snilay@cs.wisc.edu return slave_port; 7210518Snilay@cs.wisc.edu } 7310518Snilay@cs.wisc.edu} 7410518Snilay@cs.wisc.edu 7510518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::MemSlavePort(const std::string &_name, 7610519Snilay@cs.wisc.edu DMASequencer *_port, PortID id) 7710519Snilay@cs.wisc.edu : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this) 7810518Snilay@cs.wisc.edu{ 7910518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name); 8010518Snilay@cs.wisc.edu} 8110518Snilay@cs.wisc.edu 8210518Snilay@cs.wisc.edubool 8310518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt) 8410518Snilay@cs.wisc.edu{ 8510518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Timing request for address %#x on port %d\n", 8610518Snilay@cs.wisc.edu pkt->getAddr(), id); 8710518Snilay@cs.wisc.edu DMASequencer *seq = static_cast<DMASequencer *>(&owner); 8810518Snilay@cs.wisc.edu 8910518Snilay@cs.wisc.edu if (pkt->memInhibitAsserted()) 9010518Snilay@cs.wisc.edu panic("DMASequencer should never see an inhibited request\n"); 9110518Snilay@cs.wisc.edu 9210518Snilay@cs.wisc.edu assert(isPhysMemAddress(pkt->getAddr())); 9310518Snilay@cs.wisc.edu assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <= 9410518Snilay@cs.wisc.edu RubySystem::getBlockSizeBytes()); 9510518Snilay@cs.wisc.edu 9610518Snilay@cs.wisc.edu // Submit the ruby request 9710518Snilay@cs.wisc.edu RequestStatus requestStatus = seq->makeRequest(pkt); 9810518Snilay@cs.wisc.edu 9910518Snilay@cs.wisc.edu // If the request successfully issued then we should return true. 10010518Snilay@cs.wisc.edu // Otherwise, we need to tell the port to retry at a later point 10110518Snilay@cs.wisc.edu // and return false. 10210518Snilay@cs.wisc.edu if (requestStatus == RequestStatus_Issued) { 10310518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(), 10410518Snilay@cs.wisc.edu pkt->getAddr()); 10510518Snilay@cs.wisc.edu return true; 10610518Snilay@cs.wisc.edu } 10710518Snilay@cs.wisc.edu 10810518Snilay@cs.wisc.edu // Unless one is using the ruby tester, record the stalled M5 port for 10910518Snilay@cs.wisc.edu // later retry when the sequencer becomes free. 11010518Snilay@cs.wisc.edu if (!seq->m_usingRubyTester) { 11110518Snilay@cs.wisc.edu seq->retry = true; 11210518Snilay@cs.wisc.edu } 11310518Snilay@cs.wisc.edu 11410518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n", 11510518Snilay@cs.wisc.edu pkt->getAddr(), RequestStatus_to_string(requestStatus)); 11610518Snilay@cs.wisc.edu 11710518Snilay@cs.wisc.edu return false; 11810518Snilay@cs.wisc.edu} 11910518Snilay@cs.wisc.edu 12010518Snilay@cs.wisc.eduvoid 12110518Snilay@cs.wisc.eduDMASequencer::ruby_hit_callback(PacketPtr pkt) 12210518Snilay@cs.wisc.edu{ 12310518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(), 12410518Snilay@cs.wisc.edu pkt->getAddr()); 12510518Snilay@cs.wisc.edu 12610518Snilay@cs.wisc.edu // The packet was destined for memory and has not yet been turned 12710518Snilay@cs.wisc.edu // into a response 12810518Snilay@cs.wisc.edu assert(system->isMemAddr(pkt->getAddr())); 12910518Snilay@cs.wisc.edu assert(pkt->isRequest()); 13010518Snilay@cs.wisc.edu slave_port.hitCallback(pkt); 13110518Snilay@cs.wisc.edu 13210518Snilay@cs.wisc.edu // If we had to stall the slave ports, wake it up because 13310518Snilay@cs.wisc.edu // the sequencer likely has free resources now. 13410518Snilay@cs.wisc.edu if (retry) { 13510518Snilay@cs.wisc.edu retry = false; 13610518Snilay@cs.wisc.edu DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n", 13710518Snilay@cs.wisc.edu slave_port.name()); 13810518Snilay@cs.wisc.edu slave_port.sendRetry(); 13910518Snilay@cs.wisc.edu } 14010518Snilay@cs.wisc.edu 14110518Snilay@cs.wisc.edu testDrainComplete(); 14210518Snilay@cs.wisc.edu} 14310518Snilay@cs.wisc.edu 14410518Snilay@cs.wisc.eduvoid 14510518Snilay@cs.wisc.eduDMASequencer::testDrainComplete() 14610518Snilay@cs.wisc.edu{ 14710518Snilay@cs.wisc.edu //If we weren't able to drain before, we might be able to now. 14810518Snilay@cs.wisc.edu if (drainManager != NULL) { 14910518Snilay@cs.wisc.edu unsigned int drainCount = outstandingCount(); 15010518Snilay@cs.wisc.edu DPRINTF(Drain, "Drain count: %u\n", drainCount); 15110518Snilay@cs.wisc.edu if (drainCount == 0) { 15210518Snilay@cs.wisc.edu DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n"); 15310518Snilay@cs.wisc.edu drainManager->signalDrainDone(); 15410518Snilay@cs.wisc.edu // Clear the drain manager once we're done with it. 15510518Snilay@cs.wisc.edu drainManager = NULL; 15610518Snilay@cs.wisc.edu } 15710518Snilay@cs.wisc.edu } 15810518Snilay@cs.wisc.edu} 15910518Snilay@cs.wisc.edu 16010518Snilay@cs.wisc.eduunsigned int 16110518Snilay@cs.wisc.eduDMASequencer::getChildDrainCount(DrainManager *dm) 16210518Snilay@cs.wisc.edu{ 16310518Snilay@cs.wisc.edu int count = 0; 16410518Snilay@cs.wisc.edu count += slave_port.drain(dm); 16510518Snilay@cs.wisc.edu DPRINTF(Config, "count after slave port check %d\n", count); 16610518Snilay@cs.wisc.edu return count; 16710518Snilay@cs.wisc.edu} 16810518Snilay@cs.wisc.edu 16910518Snilay@cs.wisc.eduunsigned int 17010518Snilay@cs.wisc.eduDMASequencer::drain(DrainManager *dm) 17110518Snilay@cs.wisc.edu{ 17210518Snilay@cs.wisc.edu if (isDeadlockEventScheduled()) { 17310518Snilay@cs.wisc.edu descheduleDeadlockEvent(); 17410518Snilay@cs.wisc.edu } 17510518Snilay@cs.wisc.edu 17610518Snilay@cs.wisc.edu // If the DMASequencer is not empty, then it needs to clear all outstanding 17710518Snilay@cs.wisc.edu // requests before it should call drainManager->signalDrainDone() 17810518Snilay@cs.wisc.edu DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 17910518Snilay@cs.wisc.edu bool need_drain = outstandingCount() > 0; 18010518Snilay@cs.wisc.edu 18110518Snilay@cs.wisc.edu // 18210518Snilay@cs.wisc.edu // Also, get the number of child ports that will also need to clear 18310518Snilay@cs.wisc.edu // their buffered requests before they call drainManager->signalDrainDone() 18410518Snilay@cs.wisc.edu // 18510518Snilay@cs.wisc.edu unsigned int child_drain_count = getChildDrainCount(dm); 18610518Snilay@cs.wisc.edu 18710518Snilay@cs.wisc.edu // Set status 18810518Snilay@cs.wisc.edu if (need_drain) { 18910518Snilay@cs.wisc.edu drainManager = dm; 19010518Snilay@cs.wisc.edu 19110518Snilay@cs.wisc.edu DPRINTF(Drain, "DMASequencer not drained\n"); 19210518Snilay@cs.wisc.edu setDrainState(Drainable::Draining); 19310518Snilay@cs.wisc.edu return child_drain_count + 1; 19410518Snilay@cs.wisc.edu } 19510518Snilay@cs.wisc.edu 19610518Snilay@cs.wisc.edu drainManager = NULL; 19710518Snilay@cs.wisc.edu setDrainState(Drainable::Drained); 19810518Snilay@cs.wisc.edu return child_drain_count; 19910518Snilay@cs.wisc.edu} 20010518Snilay@cs.wisc.edu 20110518Snilay@cs.wisc.eduvoid 20210518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::hitCallback(PacketPtr pkt) 20310518Snilay@cs.wisc.edu{ 20410518Snilay@cs.wisc.edu bool needsResponse = pkt->needsResponse(); 20510518Snilay@cs.wisc.edu assert(!pkt->isLLSC()); 20610518Snilay@cs.wisc.edu assert(!pkt->isFlush()); 20710518Snilay@cs.wisc.edu 20810518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse); 20910518Snilay@cs.wisc.edu 21010518Snilay@cs.wisc.edu // turn packet around to go back to requester if response expected 21110518Snilay@cs.wisc.edu if (needsResponse) { 21210519Snilay@cs.wisc.edu pkt->makeResponse(); 21310518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Sending packet back over port\n"); 21410518Snilay@cs.wisc.edu // send next cycle 21510518Snilay@cs.wisc.edu schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod()); 21610518Snilay@cs.wisc.edu } else { 21710518Snilay@cs.wisc.edu delete pkt; 21810518Snilay@cs.wisc.edu } 21910519Snilay@cs.wisc.edu 22010518Snilay@cs.wisc.edu DPRINTF(RubyDma, "Hit callback done!\n"); 22110518Snilay@cs.wisc.edu} 22210518Snilay@cs.wisc.edu 22310518Snilay@cs.wisc.edubool 22410518Snilay@cs.wisc.eduDMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const 22510518Snilay@cs.wisc.edu{ 22610518Snilay@cs.wisc.edu DMASequencer *seq = static_cast<DMASequencer *>(&owner); 22710518Snilay@cs.wisc.edu return seq->system->isMemAddr(addr); 22810518Snilay@cs.wisc.edu} 22910518Snilay@cs.wisc.edu 2307039Snate@binkert.orgRequestStatus 2318615Snilay@cs.wisc.eduDMASequencer::makeRequest(PacketPtr pkt) 2326285Snate@binkert.org{ 2337544SBrad.Beckmann@amd.com if (m_is_busy) { 2347544SBrad.Beckmann@amd.com return RequestStatus_BufferFull; 2357544SBrad.Beckmann@amd.com } 2367544SBrad.Beckmann@amd.com 2378615Snilay@cs.wisc.edu uint64_t paddr = pkt->getAddr(); 2388615Snilay@cs.wisc.edu uint8_t* data = pkt->getPtr<uint8_t>(true); 2398615Snilay@cs.wisc.edu int len = pkt->getSize(); 2408615Snilay@cs.wisc.edu bool write = pkt->isWrite(); 2416285Snate@binkert.org 2427039Snate@binkert.org assert(!m_is_busy); // only support one outstanding DMA request 2437039Snate@binkert.org m_is_busy = true; 2446285Snate@binkert.org 2457039Snate@binkert.org active_request.start_paddr = paddr; 2467039Snate@binkert.org active_request.write = write; 2477039Snate@binkert.org active_request.data = data; 2487039Snate@binkert.org active_request.len = len; 2497039Snate@binkert.org active_request.bytes_completed = 0; 2507039Snate@binkert.org active_request.bytes_issued = 0; 2518615Snilay@cs.wisc.edu active_request.pkt = pkt; 2526285Snate@binkert.org 25310472Sandreas.hansson@arm.com std::shared_ptr<SequencerMsg> msg = 25410472Sandreas.hansson@arm.com std::make_shared<SequencerMsg>(clockEdge()); 2557453Snate@binkert.org msg->getPhysicalAddress() = Address(paddr); 2567453Snate@binkert.org msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 2577453Snate@binkert.org msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD; 2587039Snate@binkert.org int offset = paddr & m_data_block_mask; 2596888SBrad.Beckmann@amd.com 2607453Snate@binkert.org msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ? 2617039Snate@binkert.org len : RubySystem::getBlockSizeBytes() - offset; 2626888SBrad.Beckmann@amd.com 2637915SBrad.Beckmann@amd.com if (write && (data != NULL)) { 2647915SBrad.Beckmann@amd.com if (active_request.data != NULL) { 2657915SBrad.Beckmann@amd.com msg->getDataBlk().setData(data, offset, msg->getLen()); 2667915SBrad.Beckmann@amd.com } 2677039Snate@binkert.org } 2686888SBrad.Beckmann@amd.com 2697039Snate@binkert.org assert(m_mandatory_q_ptr != NULL); 2707039Snate@binkert.org m_mandatory_q_ptr->enqueue(msg); 2717453Snate@binkert.org active_request.bytes_issued += msg->getLen(); 2726285Snate@binkert.org 2737039Snate@binkert.org return RequestStatus_Issued; 2746285Snate@binkert.org} 2756285Snate@binkert.org 2767039Snate@binkert.orgvoid 2777039Snate@binkert.orgDMASequencer::issueNext() 2786285Snate@binkert.org{ 27910231Ssteve.reinhardt@amd.com assert(m_is_busy); 2807039Snate@binkert.org active_request.bytes_completed = active_request.bytes_issued; 2817039Snate@binkert.org if (active_request.len == active_request.bytes_completed) { 2828162SBrad.Beckmann@amd.com // 2838162SBrad.Beckmann@amd.com // Must unset the busy flag before calling back the dma port because 2848162SBrad.Beckmann@amd.com // the callback may cause a previously nacked request to be reissued 2858162SBrad.Beckmann@amd.com // 2868162SBrad.Beckmann@amd.com DPRINTF(RubyDma, "DMA request completed\n"); 2878162SBrad.Beckmann@amd.com m_is_busy = false; 2887039Snate@binkert.org ruby_hit_callback(active_request.pkt); 2897039Snate@binkert.org return; 2907039Snate@binkert.org } 2916285Snate@binkert.org 29210472Sandreas.hansson@arm.com std::shared_ptr<SequencerMsg> msg = 29310472Sandreas.hansson@arm.com std::make_shared<SequencerMsg>(clockEdge()); 2947453Snate@binkert.org msg->getPhysicalAddress() = Address(active_request.start_paddr + 2957039Snate@binkert.org active_request.bytes_completed); 2966888SBrad.Beckmann@amd.com 2977453Snate@binkert.org assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0); 2987453Snate@binkert.org msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 2996888SBrad.Beckmann@amd.com 3007453Snate@binkert.org msg->getType() = (active_request.write ? SequencerRequestType_ST : 3017039Snate@binkert.org SequencerRequestType_LD); 3026888SBrad.Beckmann@amd.com 3037453Snate@binkert.org msg->getLen() = 3047039Snate@binkert.org (active_request.len - 3057039Snate@binkert.org active_request.bytes_completed < RubySystem::getBlockSizeBytes() ? 3067039Snate@binkert.org active_request.len - active_request.bytes_completed : 3077039Snate@binkert.org RubySystem::getBlockSizeBytes()); 3086888SBrad.Beckmann@amd.com 3097039Snate@binkert.org if (active_request.write) { 3107453Snate@binkert.org msg->getDataBlk(). 3117039Snate@binkert.org setData(&active_request.data[active_request.bytes_completed], 3127453Snate@binkert.org 0, msg->getLen()); 3137453Snate@binkert.org msg->getType() = SequencerRequestType_ST; 3147039Snate@binkert.org } else { 3157453Snate@binkert.org msg->getType() = SequencerRequestType_LD; 3167039Snate@binkert.org } 3176888SBrad.Beckmann@amd.com 3187039Snate@binkert.org assert(m_mandatory_q_ptr != NULL); 3197039Snate@binkert.org m_mandatory_q_ptr->enqueue(msg); 3207453Snate@binkert.org active_request.bytes_issued += msg->getLen(); 3218160SBrad.Beckmann@amd.com DPRINTF(RubyDma, 3228160SBrad.Beckmann@amd.com "DMA request bytes issued %d, bytes completed %d, total len %d\n", 3238160SBrad.Beckmann@amd.com active_request.bytes_issued, active_request.bytes_completed, 3248160SBrad.Beckmann@amd.com active_request.len); 3256285Snate@binkert.org} 3266285Snate@binkert.org 3277039Snate@binkert.orgvoid 3287039Snate@binkert.orgDMASequencer::dataCallback(const DataBlock & dblk) 3296285Snate@binkert.org{ 33010231Ssteve.reinhardt@amd.com assert(m_is_busy); 3317039Snate@binkert.org int len = active_request.bytes_issued - active_request.bytes_completed; 3327039Snate@binkert.org int offset = 0; 3337039Snate@binkert.org if (active_request.bytes_completed == 0) 3347039Snate@binkert.org offset = active_request.start_paddr & m_data_block_mask; 33510231Ssteve.reinhardt@amd.com assert(!active_request.write); 3367915SBrad.Beckmann@amd.com if (active_request.data != NULL) { 3377915SBrad.Beckmann@amd.com memcpy(&active_request.data[active_request.bytes_completed], 3387915SBrad.Beckmann@amd.com dblk.getData(offset, len), len); 3397915SBrad.Beckmann@amd.com } 3407039Snate@binkert.org issueNext(); 3416285Snate@binkert.org} 3426285Snate@binkert.org 3437039Snate@binkert.orgvoid 3447039Snate@binkert.orgDMASequencer::ackCallback() 3456285Snate@binkert.org{ 3467039Snate@binkert.org issueNext(); 3476285Snate@binkert.org} 3486285Snate@binkert.org 3497039Snate@binkert.orgvoid 35010518Snilay@cs.wisc.eduDMASequencer::recordRequestType(DMASequencerRequestType requestType) 35110518Snilay@cs.wisc.edu{ 3529104Shestness@cs.utexas.edu DPRINTF(RubyStats, "Recorded statistic: %s\n", 3539104Shestness@cs.utexas.edu DMASequencerRequestType_to_string(requestType)); 3549104Shestness@cs.utexas.edu} 3559104Shestness@cs.utexas.edu 3566876Ssteve.reinhardt@amd.comDMASequencer * 3576876Ssteve.reinhardt@amd.comDMASequencerParams::create() 3586876Ssteve.reinhardt@amd.com{ 3596876Ssteve.reinhardt@amd.com return new DMASequencer(this); 3606876Ssteve.reinhardt@amd.com} 361