DMASequencer.cc revision 10917
1/* 2 * Copyright (c) 2008 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include <memory> 30 31#include "debug/Config.hh" 32#include "debug/Drain.hh" 33#include "debug/RubyDma.hh" 34#include "debug/RubyStats.hh" 35#include "mem/protocol/SequencerMsg.hh" 36#include "mem/ruby/system/DMASequencer.hh" 37#include "mem/ruby/system/System.hh" 38#include "sim/system.hh" 39 40DMASequencer::DMASequencer(const Params *p) 41 : MemObject(p), m_version(p->version), m_controller(NULL), 42 m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester), 43 slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system, 44 p->ruby_system->getAccessBackingStore()), 45 system(p->system), retry(false) 46{ 47 assert(m_version != -1); 48} 49 50void 51DMASequencer::init() 52{ 53 MemObject::init(); 54 assert(m_controller != NULL); 55 m_mandatory_q_ptr = m_controller->getMandatoryQueue(); 56 m_mandatory_q_ptr->setSender(this); 57 m_is_busy = false; 58 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits()); 59 60 slave_port.sendRangeChange(); 61} 62 63BaseSlavePort & 64DMASequencer::getSlavePort(const std::string &if_name, PortID idx) 65{ 66 // used by the CPUs to connect the caches to the interconnect, and 67 // for the x86 case also the interrupt master 68 if (if_name != "slave") { 69 // pass it along to our super class 70 return MemObject::getSlavePort(if_name, idx); 71 } else { 72 return slave_port; 73 } 74} 75 76DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name, 77 DMASequencer *_port, PortID id, RubySystem* _ruby_system, 78 bool _access_backing_store) 79 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this), 80 ruby_system(_ruby_system), access_backing_store(_access_backing_store) 81{ 82 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name); 83} 84 85bool 86DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt) 87{ 88 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n", 89 pkt->getAddr(), id); 90 DMASequencer *seq = static_cast<DMASequencer *>(&owner); 91 92 if (pkt->memInhibitAsserted()) 93 panic("DMASequencer should never see an inhibited request\n"); 94 95 assert(isPhysMemAddress(pkt->getAddr())); 96 assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <= 97 RubySystem::getBlockSizeBytes()); 98 99 // Submit the ruby request 100 RequestStatus requestStatus = seq->makeRequest(pkt); 101 102 // If the request successfully issued then we should return true. 103 // Otherwise, we need to tell the port to retry at a later point 104 // and return false. 105 if (requestStatus == RequestStatus_Issued) { 106 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(), 107 pkt->getAddr()); 108 return true; 109 } 110 111 // Unless one is using the ruby tester, record the stalled M5 port for 112 // later retry when the sequencer becomes free. 113 if (!seq->m_usingRubyTester) { 114 seq->retry = true; 115 } 116 117 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n", 118 pkt->getAddr(), RequestStatus_to_string(requestStatus)); 119 120 return false; 121} 122 123void 124DMASequencer::ruby_hit_callback(PacketPtr pkt) 125{ 126 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(), 127 pkt->getAddr()); 128 129 // The packet was destined for memory and has not yet been turned 130 // into a response 131 assert(system->isMemAddr(pkt->getAddr())); 132 assert(pkt->isRequest()); 133 slave_port.hitCallback(pkt); 134 135 // If we had to stall the slave ports, wake it up because 136 // the sequencer likely has free resources now. 137 if (retry) { 138 retry = false; 139 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n", 140 slave_port.name()); 141 slave_port.sendRetryReq(); 142 } 143 144 testDrainComplete(); 145} 146 147void 148DMASequencer::testDrainComplete() 149{ 150 //If we weren't able to drain before, we might be able to now. 151 if (drainState() == DrainState::Draining) { 152 unsigned int drainCount = outstandingCount(); 153 DPRINTF(Drain, "Drain count: %u\n", drainCount); 154 if (drainCount == 0) { 155 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n"); 156 signalDrainDone(); 157 } 158 } 159} 160 161DrainState 162DMASequencer::drain() 163{ 164 if (isDeadlockEventScheduled()) { 165 descheduleDeadlockEvent(); 166 } 167 168 // If the DMASequencer is not empty, then it needs to clear all outstanding 169 // requests before it should call signalDrainDone() 170 DPRINTF(Config, "outstanding count %d\n", outstandingCount()); 171 172 // Set status 173 if (outstandingCount() > 0) { 174 DPRINTF(Drain, "DMASequencer not drained\n"); 175 return DrainState::Draining; 176 } else { 177 return DrainState::Drained; 178 } 179} 180 181void 182DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt) 183{ 184 bool needsResponse = pkt->needsResponse(); 185 assert(!pkt->isLLSC()); 186 assert(!pkt->isFlush()); 187 188 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse); 189 190 // turn packet around to go back to requester if response expected 191 192 if (access_backing_store) { 193 ruby_system->getPhysMem()->access(pkt); 194 } else if (needsResponse) { 195 pkt->makeResponse(); 196 } 197 198 if (needsResponse) { 199 DPRINTF(RubyDma, "Sending packet back over port\n"); 200 // send next cycle 201 schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod()); 202 } else { 203 delete pkt; 204 } 205 206 DPRINTF(RubyDma, "Hit callback done!\n"); 207} 208 209bool 210DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const 211{ 212 DMASequencer *seq = static_cast<DMASequencer *>(&owner); 213 return seq->system->isMemAddr(addr); 214} 215 216RequestStatus 217DMASequencer::makeRequest(PacketPtr pkt) 218{ 219 if (m_is_busy) { 220 return RequestStatus_BufferFull; 221 } 222 223 uint64_t paddr = pkt->getAddr(); 224 uint8_t* data = pkt->getPtr<uint8_t>(); 225 int len = pkt->getSize(); 226 bool write = pkt->isWrite(); 227 228 assert(!m_is_busy); // only support one outstanding DMA request 229 m_is_busy = true; 230 231 active_request.start_paddr = paddr; 232 active_request.write = write; 233 active_request.data = data; 234 active_request.len = len; 235 active_request.bytes_completed = 0; 236 active_request.bytes_issued = 0; 237 active_request.pkt = pkt; 238 239 std::shared_ptr<SequencerMsg> msg = 240 std::make_shared<SequencerMsg>(clockEdge()); 241 msg->getPhysicalAddress() = Address(paddr); 242 msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 243 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD; 244 int offset = paddr & m_data_block_mask; 245 246 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ? 247 len : RubySystem::getBlockSizeBytes() - offset; 248 249 if (write && (data != NULL)) { 250 if (active_request.data != NULL) { 251 msg->getDataBlk().setData(data, offset, msg->getLen()); 252 } 253 } 254 255 assert(m_mandatory_q_ptr != NULL); 256 m_mandatory_q_ptr->enqueue(msg); 257 active_request.bytes_issued += msg->getLen(); 258 259 return RequestStatus_Issued; 260} 261 262void 263DMASequencer::issueNext() 264{ 265 assert(m_is_busy); 266 active_request.bytes_completed = active_request.bytes_issued; 267 if (active_request.len == active_request.bytes_completed) { 268 // 269 // Must unset the busy flag before calling back the dma port because 270 // the callback may cause a previously nacked request to be reissued 271 // 272 DPRINTF(RubyDma, "DMA request completed\n"); 273 m_is_busy = false; 274 ruby_hit_callback(active_request.pkt); 275 return; 276 } 277 278 std::shared_ptr<SequencerMsg> msg = 279 std::make_shared<SequencerMsg>(clockEdge()); 280 msg->getPhysicalAddress() = Address(active_request.start_paddr + 281 active_request.bytes_completed); 282 283 assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0); 284 msg->getLineAddress() = line_address(msg->getPhysicalAddress()); 285 286 msg->getType() = (active_request.write ? SequencerRequestType_ST : 287 SequencerRequestType_LD); 288 289 msg->getLen() = 290 (active_request.len - 291 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ? 292 active_request.len - active_request.bytes_completed : 293 RubySystem::getBlockSizeBytes()); 294 295 if (active_request.write) { 296 msg->getDataBlk(). 297 setData(&active_request.data[active_request.bytes_completed], 298 0, msg->getLen()); 299 msg->getType() = SequencerRequestType_ST; 300 } else { 301 msg->getType() = SequencerRequestType_LD; 302 } 303 304 assert(m_mandatory_q_ptr != NULL); 305 m_mandatory_q_ptr->enqueue(msg); 306 active_request.bytes_issued += msg->getLen(); 307 DPRINTF(RubyDma, 308 "DMA request bytes issued %d, bytes completed %d, total len %d\n", 309 active_request.bytes_issued, active_request.bytes_completed, 310 active_request.len); 311} 312 313void 314DMASequencer::dataCallback(const DataBlock & dblk) 315{ 316 assert(m_is_busy); 317 int len = active_request.bytes_issued - active_request.bytes_completed; 318 int offset = 0; 319 if (active_request.bytes_completed == 0) 320 offset = active_request.start_paddr & m_data_block_mask; 321 assert(!active_request.write); 322 if (active_request.data != NULL) { 323 memcpy(&active_request.data[active_request.bytes_completed], 324 dblk.getData(offset, len), len); 325 } 326 issueNext(); 327} 328 329void 330DMASequencer::ackCallback() 331{ 332 issueNext(); 333} 334 335void 336DMASequencer::recordRequestType(DMASequencerRequestType requestType) 337{ 338 DPRINTF(RubyStats, "Recorded statistic: %s\n", 339 DMASequencerRequestType_to_string(requestType)); 340} 341 342DMASequencer * 343DMASequencerParams::create() 344{ 345 return new DMASequencer(this); 346} 347