DMASequencer.cc revision 10519
1/* 2 * Copyright (c) 2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <memory> 30 31#include "debug/Config.hh" 32#include "debug/Drain.hh" 33#include "debug/RubyDma.hh" 34#include "debug/RubyStats.hh" 35#include "mem/protocol/SequencerMsg.hh" 36#include "mem/ruby/system/DMASequencer.hh" 37#include "mem/ruby/system/System.hh" 38#include "sim/system.hh" 39 40DMASequencer::DMASequencer(const Params *p) 41 : MemObject(p), m_version(p->version), m_controller(NULL), 42 m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester), 43 slave_port(csprintf("%s.slave", name()), this, 0), 44 drainManager(NULL), system(p->system), retry(false) 45{ 46 assert(m_version != -1); 47} 48 49void 50DMASequencer::init() 51{ 52 MemObject::init(); 53 assert(m_controller != NULL); 54 m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 55 m_mandatory_q_ptr->setSender(this); 56 m_is_busy = false; 57 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits()); 58 59 slave_port.sendRangeChange(); 60} 61 62BaseSlavePort & 63DMASequencer::getSlavePort(const std::string &if_name, PortID idx) 64{ 65 // used by the CPUs to connect the caches to the interconnect, and 66 // for the x86 case also the interrupt master 67 if (if_name != "slave") { 68 // pass it along to our super class 69 return MemObject::getSlavePort(if_name, idx); 70 } else { 71 return slave_port; 72 } 73} 74 75DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name, 76 DMASequencer *_port, PortID id) 77 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this) 78{ 79 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name); 80} 81 82bool 83DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt) 84{ 85 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n", 86 pkt->getAddr(), id); 87 DMASequencer *seq = static_cast<DMASequencer *>(&owner); 88 89 if (pkt->memInhibitAsserted()) 90 panic("DMASequencer should never see an inhibited request\n"); 91 92 assert(isPhysMemAddress(pkt->getAddr())); 93 assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <= 94 RubySystem::getBlockSizeBytes()); 95 96 // Submit the ruby request 97 RequestStatus requestStatus = seq->makeRequest(pkt); 98 99 // If the request successfully issued then we should return true. 100 // Otherwise, we need to tell the port to retry at a later point 101 // and return false. 102 if (requestStatus == RequestStatus_Issued) { 103 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(), 104 pkt->getAddr()); 105 return true; 106 } 107 108 // Unless one is using the ruby tester, record the stalled M5 port for 109 // later retry when the sequencer becomes free. 110 if (!seq->m_usingRubyTester) { 111 seq->retry = true; 112 } 113 114 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n", 115 pkt->getAddr(), RequestStatus_to_string(requestStatus)); 116 117 return false; 118} 119 120void 121DMASequencer::ruby_hit_callback(PacketPtr pkt) 122{ 123 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(), 124 pkt->getAddr()); 125 126 // The packet was destined for memory and has not yet been turned 127 // into a response 128 assert(system->isMemAddr(pkt->getAddr())); 129 assert(pkt->isRequest()); 130 slave_port.hitCallback(pkt); 131 132 // If we had to stall the slave ports, wake it up because 133 // the sequencer likely has free resources now. 134 if (retry) { 135 retry = false; 136 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n", 137 slave_port.name()); 138 slave_port.sendRetry(); 139 } 140 141 testDrainComplete(); 142} 143 144void 145DMASequencer::testDrainComplete() 146{ 147 //If we weren't able to drain before, we might be able to now. 148 if (drainManager != NULL) { 149 unsigned int drainCount = outstandingCount(); 150 DPRINTF(Drain, "Drain count: %u\n", drainCount); 151 if (drainCount == 0) { 152 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n"); 153 drainManager->signalDrainDone(); 154 // Clear the drain manager once we're done with it. 155 drainManager = NULL; 156 } 157 } 158} 159 160unsigned int 161DMASequencer::getChildDrainCount(DrainManager *dm) 162{ 163 int count = 0; 164 count += slave_port.drain(dm); 165 DPRINTF(Config, "count after slave port check %d\n", count); 166 return count; 167} 168 169unsigned int 170DMASequencer::drain(DrainManager *dm) 171{ 172 if (isDeadlockEventScheduled()) { 173 descheduleDeadlockEvent(); 174 } 175 176 // If the DMASequencer is not empty, then it needs to clear all outstanding 177 // requests before it should call drainManager->signalDrainDone() 178 DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 179 bool need_drain = outstandingCount() > 0; 180 181 // 182 // Also, get the number of child ports that will also need to clear 183 // their buffered requests before they call drainManager->signalDrainDone() 184 // 185 unsigned int child_drain_count = getChildDrainCount(dm); 186 187 // Set status 188 if (need_drain) { 189 drainManager = dm; 190 191 DPRINTF(Drain, "DMASequencer not drained\n"); 192 setDrainState(Drainable::Draining); 193 return child_drain_count + 1; 194 } 195 196 drainManager = NULL; 197 setDrainState(Drainable::Drained); 198 return child_drain_count; 199} 200 201void 202DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt) 203{ 204 bool needsResponse = pkt->needsResponse(); 205 assert(!pkt->isLLSC()); 206 assert(!pkt->isFlush()); 207 208 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse); 209 210 // turn packet around to go back to requester if response expected 211 if (needsResponse) { 212 pkt->makeResponse(); 213 DPRINTF(RubyDma, "Sending packet back over port\n"); 214 // send next cycle 215 schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod()); 216 } else { 217 delete pkt; 218 } 219 220 DPRINTF(RubyDma, "Hit callback done!\n"); 221} 222 223bool 224DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const 225{ 226 DMASequencer *seq = static_cast<DMASequencer *>(&owner); 227 return seq->system->isMemAddr(addr); 228} 229 230RequestStatus 231DMASequencer::makeRequest(PacketPtr pkt) 232{ 233 if (m_is_busy) { 234 return RequestStatus_BufferFull; 235 } 236 237 uint64_t paddr = pkt->getAddr(); 238 uint8_t* data = pkt->getPtr<uint8_t>(true); 239 int len = pkt->getSize(); 240 bool write = pkt->isWrite(); 241 242 assert(!m_is_busy); // only support one outstanding DMA request 243 m_is_busy = true; 244 245 active_request.start_paddr = paddr; 246 active_request.write = write; 247 active_request.data = data; 248 active_request.len = len; 249 active_request.bytes_completed = 0; 250 active_request.bytes_issued = 0; 251 active_request.pkt = pkt; 252 253 std::shared_ptr<SequencerMsg> msg = 254 std::make_shared<SequencerMsg>(clockEdge()); 255 msg->getPhysicalAddress() = Address(paddr); 256 msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 257 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD; 258 int offset = paddr & m_data_block_mask; 259 260 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ? 261 len : RubySystem::getBlockSizeBytes() - offset; 262 263 if (write && (data != NULL)) { 264 if (active_request.data != NULL) { 265 msg->getDataBlk().setData(data, offset, msg->getLen()); 266 } 267 } 268 269 assert(m_mandatory_q_ptr != NULL); 270 m_mandatory_q_ptr->enqueue(msg); 271 active_request.bytes_issued += msg->getLen(); 272 273 return RequestStatus_Issued; 274} 275 276void 277DMASequencer::issueNext() 278{ 279 assert(m_is_busy); 280 active_request.bytes_completed = active_request.bytes_issued; 281 if (active_request.len == active_request.bytes_completed) { 282 // 283 // Must unset the busy flag before calling back the dma port because 284 // the callback may cause a previously nacked request to be reissued 285 // 286 DPRINTF(RubyDma, "DMA request completed\n"); 287 m_is_busy = false; 288 ruby_hit_callback(active_request.pkt); 289 return; 290 } 291 292 std::shared_ptr<SequencerMsg> msg = 293 std::make_shared<SequencerMsg>(clockEdge()); 294 msg->getPhysicalAddress() = Address(active_request.start_paddr + 295 active_request.bytes_completed); 296 297 assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0); 298 msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 299 300 msg->getType() = (active_request.write ? SequencerRequestType_ST : 301 SequencerRequestType_LD); 302 303 msg->getLen() = 304 (active_request.len - 305 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ? 306 active_request.len - active_request.bytes_completed : 307 RubySystem::getBlockSizeBytes()); 308 309 if (active_request.write) { 310 msg->getDataBlk(). 311 setData(&active_request.data[active_request.bytes_completed], 312 0, msg->getLen()); 313 msg->getType() = SequencerRequestType_ST; 314 } else { 315 msg->getType() = SequencerRequestType_LD; 316 } 317 318 assert(m_mandatory_q_ptr != NULL); 319 m_mandatory_q_ptr->enqueue(msg); 320 active_request.bytes_issued += msg->getLen(); 321 DPRINTF(RubyDma, 322 "DMA request bytes issued %d, bytes completed %d, total len %d\n", 323 active_request.bytes_issued, active_request.bytes_completed, 324 active_request.len); 325} 326 327void 328DMASequencer::dataCallback(const DataBlock & dblk) 329{ 330 assert(m_is_busy); 331 int len = active_request.bytes_issued - active_request.bytes_completed; 332 int offset = 0; 333 if (active_request.bytes_completed == 0) 334 offset = active_request.start_paddr & m_data_block_mask; 335 assert(!active_request.write); 336 if (active_request.data != NULL) { 337 memcpy(&active_request.data[active_request.bytes_completed], 338 dblk.getData(offset, len), len); 339 } 340 issueNext(); 341} 342 343void 344DMASequencer::ackCallback() 345{ 346 issueNext(); 347} 348 349void 350DMASequencer::recordRequestType(DMASequencerRequestType requestType) 351{ 352 DPRINTF(RubyStats, "Recorded statistic: %s\n", 353 DMASequencerRequestType_to_string(requestType)); 354} 355 356DMASequencer * 357DMASequencerParams::create() 358{ 359 return new DMASequencer(this); 360} 361