DMASequencer.cc revision 11025
1/* 2 * Copyright (c) 2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <memory> 30 31#include "debug/Config.hh" 32#include "debug/Drain.hh" 33#include "debug/RubyDma.hh" 34#include "debug/RubyStats.hh" 35#include "mem/protocol/SequencerMsg.hh" 36#include "mem/ruby/system/DMASequencer.hh" 37#include "mem/ruby/system/System.hh" 38#include "sim/system.hh" 39 40DMASequencer::DMASequencer(const Params *p) 41 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version), 42 m_controller(NULL), m_mandatory_q_ptr(NULL), 43 m_usingRubyTester(p->using_ruby_tester), 44 slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system, 45 p->ruby_system->getAccessBackingStore()), 46 system(p->system), retry(false) 47{ 48 assert(m_version != -1); 49} 50 51void 52DMASequencer::init() 53{ 54 MemObject::init(); 55 assert(m_controller != NULL); 56 m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 57 m_mandatory_q_ptr->setSender(this); 58 m_is_busy = false; 59 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits()); 60 61 slave_port.sendRangeChange(); 62} 63 64BaseSlavePort & 65DMASequencer::getSlavePort(const std::string &if_name, PortID idx) 66{ 67 // used by the CPUs to connect the caches to the interconnect, and 68 // for the x86 case also the interrupt master 69 if (if_name != "slave") { 70 // pass it along to our super class 71 return MemObject::getSlavePort(if_name, idx); 72 } else { 73 return slave_port; 74 } 75} 76 77DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name, 78 DMASequencer *_port, PortID id, RubySystem* _ruby_system, 79 bool _access_backing_store) 80 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this), 81 m_ruby_system(_ruby_system), access_backing_store(_access_backing_store) 82{ 83 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name); 84} 85 86bool 87DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt) 88{ 89 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n", 90 pkt->getAddr(), id); 91 DMASequencer *seq = static_cast<DMASequencer *>(&owner); 92 93 if (pkt->memInhibitAsserted()) 94 panic("DMASequencer should never see an inhibited request\n"); 95 96 assert(isPhysMemAddress(pkt->getAddr())); 97 assert(getOffset(pkt->getAddr()) + pkt->getSize() <= 98 RubySystem::getBlockSizeBytes()); 99 100 // Submit the ruby request 101 RequestStatus requestStatus = seq->makeRequest(pkt); 102 103 // If the request successfully issued then we should return true. 104 // Otherwise, we need to tell the port to retry at a later point 105 // and return false. 106 if (requestStatus == RequestStatus_Issued) { 107 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(), 108 pkt->getAddr()); 109 return true; 110 } 111 112 // Unless one is using the ruby tester, record the stalled M5 port for 113 // later retry when the sequencer becomes free. 114 if (!seq->m_usingRubyTester) { 115 seq->retry = true; 116 } 117 118 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n", 119 pkt->getAddr(), RequestStatus_to_string(requestStatus)); 120 121 return false; 122} 123 124void 125DMASequencer::ruby_hit_callback(PacketPtr pkt) 126{ 127 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(), 128 pkt->getAddr()); 129 130 // The packet was destined for memory and has not yet been turned 131 // into a response 132 assert(system->isMemAddr(pkt->getAddr())); 133 assert(pkt->isRequest()); 134 slave_port.hitCallback(pkt); 135 136 // If we had to stall the slave ports, wake it up because 137 // the sequencer likely has free resources now. 138 if (retry) { 139 retry = false; 140 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n", 141 slave_port.name()); 142 slave_port.sendRetryReq(); 143 } 144 145 testDrainComplete(); 146} 147 148void 149DMASequencer::testDrainComplete() 150{ 151 //If we weren't able to drain before, we might be able to now. 152 if (drainState() == DrainState::Draining) { 153 unsigned int drainCount = outstandingCount(); 154 DPRINTF(Drain, "Drain count: %u\n", drainCount); 155 if (drainCount == 0) { 156 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n"); 157 signalDrainDone(); 158 } 159 } 160} 161 162DrainState 163DMASequencer::drain() 164{ 165 if (isDeadlockEventScheduled()) { 166 descheduleDeadlockEvent(); 167 } 168 169 // If the DMASequencer is not empty, then it needs to clear all outstanding 170 // requests before it should call signalDrainDone() 171 DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 172 173 // Set status 174 if (outstandingCount() > 0) { 175 DPRINTF(Drain, "DMASequencer not drained\n"); 176 return DrainState::Draining; 177 } else { 178 return DrainState::Drained; 179 } 180} 181 182void 183DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt) 184{ 185 bool needsResponse = pkt->needsResponse(); 186 assert(!pkt->isLLSC()); 187 assert(!pkt->isFlush()); 188 189 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse); 190 191 // turn packet around to go back to requester if response expected 192 193 if (access_backing_store) { 194 m_ruby_system->getPhysMem()->access(pkt); 195 } else if (needsResponse) { 196 pkt->makeResponse(); 197 } 198 199 if (needsResponse) { 200 DPRINTF(RubyDma, "Sending packet back over port\n"); 201 // send next cycle 202 DMASequencer *seq = static_cast<DMASequencer *>(&owner); 203 RubySystem *rs = seq->m_ruby_system; 204 schedTimingResp(pkt, curTick() + rs->clockPeriod()); 205 } else { 206 delete pkt; 207 } 208 209 DPRINTF(RubyDma, "Hit callback done!\n"); 210} 211 212bool 213DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const 214{ 215 DMASequencer *seq = static_cast<DMASequencer *>(&owner); 216 return seq->system->isMemAddr(addr); 217} 218 219RequestStatus 220DMASequencer::makeRequest(PacketPtr pkt) 221{ 222 if (m_is_busy) { 223 return RequestStatus_BufferFull; 224 } 225 226 Addr paddr = pkt->getAddr(); 227 uint8_t* data = pkt->getPtr<uint8_t>(); 228 int len = pkt->getSize(); 229 bool write = pkt->isWrite(); 230 231 assert(!m_is_busy); // only support one outstanding DMA request 232 m_is_busy = true; 233 234 active_request.start_paddr = paddr; 235 active_request.write = write; 236 active_request.data = data; 237 active_request.len = len; 238 active_request.bytes_completed = 0; 239 active_request.bytes_issued = 0; 240 active_request.pkt = pkt; 241 242 std::shared_ptr<SequencerMsg> msg = 243 std::make_shared<SequencerMsg>(clockEdge()); 244 msg->getPhysicalAddress() = paddr; 245 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress()); 246 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD; 247 int offset = paddr & m_data_block_mask; 248 249 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ? 250 len : RubySystem::getBlockSizeBytes() - offset; 251 252 if (write && (data != NULL)) { 253 if (active_request.data != NULL) { 254 msg->getDataBlk().setData(data, offset, msg->getLen()); 255 } 256 } 257 258 assert(m_mandatory_q_ptr != NULL); 259 m_mandatory_q_ptr->enqueue(msg); 260 active_request.bytes_issued += msg->getLen(); 261 262 return RequestStatus_Issued; 263} 264 265void 266DMASequencer::issueNext() 267{ 268 assert(m_is_busy); 269 active_request.bytes_completed = active_request.bytes_issued; 270 if (active_request.len == active_request.bytes_completed) { 271 // 272 // Must unset the busy flag before calling back the dma port because 273 // the callback may cause a previously nacked request to be reissued 274 // 275 DPRINTF(RubyDma, "DMA request completed\n"); 276 m_is_busy = false; 277 ruby_hit_callback(active_request.pkt); 278 return; 279 } 280 281 std::shared_ptr<SequencerMsg> msg = 282 std::make_shared<SequencerMsg>(clockEdge()); 283 msg->getPhysicalAddress() = active_request.start_paddr + 284 active_request.bytes_completed; 285 286 assert((msg->getPhysicalAddress() & m_data_block_mask) == 0); 287 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress()); 288 289 msg->getType() = (active_request.write ? SequencerRequestType_ST : 290 SequencerRequestType_LD); 291 292 msg->getLen() = 293 (active_request.len - 294 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ? 295 active_request.len - active_request.bytes_completed : 296 RubySystem::getBlockSizeBytes()); 297 298 if (active_request.write) { 299 msg->getDataBlk(). 300 setData(&active_request.data[active_request.bytes_completed], 301 0, msg->getLen()); 302 } 303 304 assert(m_mandatory_q_ptr != NULL); 305 m_mandatory_q_ptr->enqueue(msg); 306 active_request.bytes_issued += msg->getLen(); 307 DPRINTF(RubyDma, 308 "DMA request bytes issued %d, bytes completed %d, total len %d\n", 309 active_request.bytes_issued, active_request.bytes_completed, 310 active_request.len); 311} 312 313void 314DMASequencer::dataCallback(const DataBlock & dblk) 315{ 316 assert(m_is_busy); 317 int len = active_request.bytes_issued - active_request.bytes_completed; 318 int offset = 0; 319 if (active_request.bytes_completed == 0) 320 offset = active_request.start_paddr & m_data_block_mask; 321 assert(!active_request.write); 322 if (active_request.data != NULL) { 323 memcpy(&active_request.data[active_request.bytes_completed], 324 dblk.getData(offset, len), len); 325 } 326 issueNext(); 327} 328 329void 330DMASequencer::ackCallback() 331{ 332 issueNext(); 333} 334 335void 336DMASequencer::recordRequestType(DMASequencerRequestType requestType) 337{ 338 DPRINTF(RubyStats, "Recorded statistic: %s\n", 339 DMASequencerRequestType_to_string(requestType)); 340} 341 342DMASequencer * 343DMASequencerParams::create() 344{ 345 return new DMASequencer(this); 346} 347