1/* 2 * Copyright (c) 2012, 2015, 2017 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2006 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Andreas Hansson 43 * Andreas Sandberg 44 */ 45 46#include "dev/dma_device.hh" 47 48#include <utility> 49 50#include "base/chunk_generator.hh" 51#include "debug/DMA.hh" 52#include "debug/Drain.hh" 53#include "mem/port_proxy.hh"
| 1/* 2 * Copyright (c) 2012, 2015, 2017 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2006 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Andreas Hansson 43 * Andreas Sandberg 44 */ 45 46#include "dev/dma_device.hh" 47 48#include <utility> 49 50#include "base/chunk_generator.hh" 51#include "debug/DMA.hh" 52#include "debug/Drain.hh" 53#include "mem/port_proxy.hh"
|
57 : MasterPort(dev->name() + ".dma", dev), 58 device(dev), sys(s), masterId(s->getMasterId(dev)), 59 sendEvent([this]{ sendDma(); }, dev->name()), 60 pendingCount(0), inRetry(false) 61{ } 62 63void 64DmaPort::handleResp(PacketPtr pkt, Tick delay) 65{ 66 // should always see a response with a sender state 67 assert(pkt->isResponse()); 68 69 // get the DMA sender state 70 DmaReqState *state = dynamic_cast<DmaReqState*>(pkt->senderState); 71 assert(state); 72 73 DPRINTF(DMA, "Received response %s for addr: %#x size: %d nb: %d," \ 74 " tot: %d sched %d\n", 75 pkt->cmdString(), pkt->getAddr(), pkt->req->getSize(), 76 state->numBytes, state->totBytes, 77 state->completionEvent ? 78 state->completionEvent->scheduled() : 0); 79 80 assert(pendingCount != 0); 81 pendingCount--; 82 83 // update the number of bytes received based on the request rather 84 // than the packet as the latter could be rounded up to line sizes 85 state->numBytes += pkt->req->getSize(); 86 assert(state->totBytes >= state->numBytes); 87 88 // if we have reached the total number of bytes for this DMA 89 // request, then signal the completion and delete the sate 90 if (state->totBytes == state->numBytes) { 91 if (state->completionEvent) { 92 delay += state->delay; 93 device->schedule(state->completionEvent, curTick() + delay); 94 } 95 delete state; 96 } 97 98 // delete the packet 99 delete pkt; 100 101 // we might be drained at this point, if so signal the drain event 102 if (pendingCount == 0) 103 signalDrainDone(); 104} 105 106bool 107DmaPort::recvTimingResp(PacketPtr pkt) 108{ 109 // We shouldn't ever get a cacheable block in Modified state 110 assert(pkt->req->isUncacheable() || 111 !(pkt->cacheResponding() && !pkt->hasSharers())); 112 113 handleResp(pkt); 114 115 return true; 116} 117 118DmaDevice::DmaDevice(const Params *p) 119 : PioDevice(p), dmaPort(this, sys) 120{ } 121 122void 123DmaDevice::init() 124{ 125 if (!dmaPort.isConnected()) 126 panic("DMA port of %s not connected to anything!", name()); 127 PioDevice::init(); 128} 129 130DrainState 131DmaPort::drain() 132{ 133 if (pendingCount == 0) { 134 return DrainState::Drained; 135 } else { 136 DPRINTF(Drain, "DmaPort not drained\n"); 137 return DrainState::Draining; 138 } 139} 140 141void 142DmaPort::recvReqRetry() 143{ 144 assert(transmitList.size()); 145 trySendTimingReq(); 146} 147 148RequestPtr 149DmaPort::dmaAction(Packet::Command cmd, Addr addr, int size, Event *event, 150 uint8_t *data, Tick delay, Request::Flags flag) 151{ 152 // one DMA request sender state for every action, that is then 153 // split into many requests and packets based on the block size, 154 // i.e. cache line size 155 DmaReqState *reqState = new DmaReqState(event, size, delay); 156 157 // (functionality added for Table Walker statistics) 158 // We're only interested in this when there will only be one request. 159 // For simplicity, we return the last request, which would also be 160 // the only request in that case. 161 RequestPtr req = NULL; 162 163 DPRINTF(DMA, "Starting DMA for addr: %#x size: %d sched: %d\n", addr, size, 164 event ? event->scheduled() : -1); 165 for (ChunkGenerator gen(addr, size, sys->cacheLineSize()); 166 !gen.done(); gen.next()) { 167 168 req = std::make_shared<Request>( 169 gen.addr(), gen.size(), flag, masterId); 170 171 req->taskId(ContextSwitchTaskId::DMA); 172 PacketPtr pkt = new Packet(req, cmd); 173 174 // Increment the data pointer on a write 175 if (data) 176 pkt->dataStatic(data + gen.complete()); 177 178 pkt->senderState = reqState; 179 180 DPRINTF(DMA, "--Queuing DMA for addr: %#x size: %d\n", gen.addr(), 181 gen.size()); 182 queueDma(pkt); 183 } 184 185 // in zero time also initiate the sending of the packets we have 186 // just created, for atomic this involves actually completing all 187 // the requests 188 sendDma(); 189 190 return req; 191} 192 193void 194DmaPort::queueDma(PacketPtr pkt) 195{ 196 transmitList.push_back(pkt); 197 198 // remember that we have another packet pending, this will only be 199 // decremented once a response comes back 200 pendingCount++; 201} 202 203void 204DmaPort::trySendTimingReq() 205{ 206 // send the first packet on the transmit list and schedule the 207 // following send if it is successful 208 PacketPtr pkt = transmitList.front(); 209 210 DPRINTF(DMA, "Trying to send %s addr %#x\n", pkt->cmdString(), 211 pkt->getAddr()); 212 213 inRetry = !sendTimingReq(pkt); 214 if (!inRetry) { 215 transmitList.pop_front(); 216 DPRINTF(DMA, "-- Done\n"); 217 // if there is more to do, then do so 218 if (!transmitList.empty()) 219 // this should ultimately wait for as many cycles as the 220 // device needs to send the packet, but currently the port 221 // does not have any known width so simply wait a single 222 // cycle 223 device->schedule(sendEvent, device->clockEdge(Cycles(1))); 224 } else { 225 DPRINTF(DMA, "-- Failed, waiting for retry\n"); 226 } 227 228 DPRINTF(DMA, "TransmitList: %d, inRetry: %d\n", 229 transmitList.size(), inRetry); 230} 231 232void 233DmaPort::sendDma() 234{ 235 // some kind of selcetion between access methods 236 // more work is going to have to be done to make 237 // switching actually work 238 assert(transmitList.size()); 239 240 if (sys->isTimingMode()) { 241 // if we are either waiting for a retry or are still waiting 242 // after sending the last packet, then do not proceed 243 if (inRetry || sendEvent.scheduled()) { 244 DPRINTF(DMA, "Can't send immediately, waiting to send\n"); 245 return; 246 } 247 248 trySendTimingReq(); 249 } else if (sys->isAtomicMode()) { 250 // send everything there is to send in zero time 251 while (!transmitList.empty()) { 252 PacketPtr pkt = transmitList.front(); 253 transmitList.pop_front(); 254 255 DPRINTF(DMA, "Sending DMA for addr: %#x size: %d\n", 256 pkt->req->getPaddr(), pkt->req->getSize()); 257 Tick lat = sendAtomic(pkt); 258 259 handleResp(pkt, lat); 260 } 261 } else 262 panic("Unknown memory mode."); 263} 264 265Port & 266DmaDevice::getPort(const std::string &if_name, PortID idx) 267{ 268 if (if_name == "dma") { 269 return dmaPort; 270 } 271 return PioDevice::getPort(if_name, idx); 272} 273 274 275 276 277 278DmaReadFifo::DmaReadFifo(DmaPort &_port, size_t size, 279 unsigned max_req_size, 280 unsigned max_pending, 281 Request::Flags flags) 282 : maxReqSize(max_req_size), fifoSize(size), 283 reqFlags(flags), port(_port), 284 buffer(size), 285 nextAddr(0), endAddr(0) 286{ 287 freeRequests.resize(max_pending); 288 for (auto &e : freeRequests) 289 e.reset(new DmaDoneEvent(this, max_req_size)); 290 291} 292 293DmaReadFifo::~DmaReadFifo() 294{ 295 for (auto &p : pendingRequests) { 296 DmaDoneEvent *e(p.release()); 297 298 if (e->done()) { 299 delete e; 300 } else { 301 // We can't kill in-flight DMAs, so we'll just transfer 302 // ownership to the event queue so that they get freed 303 // when they are done. 304 e->kill(); 305 } 306 } 307} 308 309void 310DmaReadFifo::serialize(CheckpointOut &cp) const 311{ 312 assert(pendingRequests.empty()); 313 314 SERIALIZE_CONTAINER(buffer); 315 SERIALIZE_SCALAR(endAddr); 316 SERIALIZE_SCALAR(nextAddr); 317} 318 319void 320DmaReadFifo::unserialize(CheckpointIn &cp) 321{ 322 UNSERIALIZE_CONTAINER(buffer); 323 UNSERIALIZE_SCALAR(endAddr); 324 UNSERIALIZE_SCALAR(nextAddr); 325} 326 327bool 328DmaReadFifo::tryGet(uint8_t *dst, size_t len) 329{ 330 if (buffer.size() >= len) { 331 buffer.read(dst, len); 332 resumeFill(); 333 return true; 334 } else { 335 return false; 336 } 337} 338 339void 340DmaReadFifo::get(uint8_t *dst, size_t len) 341{ 342 const bool success(tryGet(dst, len)); 343 panic_if(!success, "Buffer underrun in DmaReadFifo::get()\n"); 344} 345 346void 347DmaReadFifo::startFill(Addr start, size_t size) 348{ 349 assert(atEndOfBlock()); 350 351 nextAddr = start; 352 endAddr = start + size; 353 resumeFill(); 354} 355 356void 357DmaReadFifo::stopFill() 358{ 359 // Prevent new DMA requests by setting the next address to the end 360 // address. Pending requests will still complete. 361 nextAddr = endAddr; 362 363 // Flag in-flight accesses as canceled. This prevents their data 364 // from being written to the FIFO. 365 for (auto &p : pendingRequests) 366 p->cancel(); 367} 368 369void 370DmaReadFifo::resumeFill() 371{ 372 // Don't try to fetch more data if we are draining. This ensures 373 // that the DMA engine settles down before we checkpoint it. 374 if (drainState() == DrainState::Draining) 375 return; 376 377 const bool old_eob(atEndOfBlock()); 378 379 if (port.sys->bypassCaches()) 380 resumeFillFunctional(); 381 else 382 resumeFillTiming(); 383 384 if (!old_eob && atEndOfBlock()) 385 onEndOfBlock(); 386} 387 388void 389DmaReadFifo::resumeFillFunctional() 390{ 391 const size_t fifo_space = buffer.capacity() - buffer.size(); 392 const size_t kvm_watermark = port.sys->cacheLineSize(); 393 if (fifo_space >= kvm_watermark || buffer.capacity() < kvm_watermark) { 394 const size_t block_remaining = endAddr - nextAddr; 395 const size_t xfer_size = std::min(fifo_space, block_remaining); 396 std::vector<uint8_t> tmp_buffer(xfer_size); 397 398 assert(pendingRequests.empty()); 399 DPRINTF(DMA, "KVM Bypassing startAddr=%#x xfer_size=%#x " \ 400 "fifo_space=%#x block_remaining=%#x\n", 401 nextAddr, xfer_size, fifo_space, block_remaining); 402 403 port.sys->physProxy.readBlob(nextAddr, tmp_buffer.data(), xfer_size); 404 buffer.write(tmp_buffer.begin(), xfer_size); 405 nextAddr += xfer_size; 406 } 407} 408 409void 410DmaReadFifo::resumeFillTiming() 411{ 412 size_t size_pending(0); 413 for (auto &e : pendingRequests) 414 size_pending += e->requestSize(); 415 416 while (!freeRequests.empty() && !atEndOfBlock()) { 417 const size_t req_size(std::min(maxReqSize, endAddr - nextAddr)); 418 if (buffer.size() + size_pending + req_size > fifoSize) 419 break; 420 421 DmaDoneEventUPtr event(std::move(freeRequests.front())); 422 freeRequests.pop_front(); 423 assert(event); 424 425 event->reset(req_size); 426 port.dmaAction(MemCmd::ReadReq, nextAddr, req_size, event.get(), 427 event->data(), 0, reqFlags); 428 nextAddr += req_size; 429 size_pending += req_size; 430 431 pendingRequests.emplace_back(std::move(event)); 432 } 433} 434 435void 436DmaReadFifo::dmaDone() 437{ 438 const bool old_active(isActive()); 439 440 handlePending(); 441 resumeFill(); 442 443 if (old_active && !isActive()) 444 onIdle(); 445} 446 447void 448DmaReadFifo::handlePending() 449{ 450 while (!pendingRequests.empty() && pendingRequests.front()->done()) { 451 // Get the first finished pending request 452 DmaDoneEventUPtr event(std::move(pendingRequests.front())); 453 pendingRequests.pop_front(); 454 455 if (!event->canceled()) 456 buffer.write(event->data(), event->requestSize()); 457 458 // Move the event to the list of free requests 459 freeRequests.emplace_back(std::move(event)); 460 } 461 462 if (pendingRequests.empty()) 463 signalDrainDone(); 464} 465 466DrainState 467DmaReadFifo::drain() 468{ 469 return pendingRequests.empty() ? DrainState::Drained : DrainState::Draining; 470} 471 472 473DmaReadFifo::DmaDoneEvent::DmaDoneEvent(DmaReadFifo *_parent, 474 size_t max_size) 475 : parent(_parent), _done(false), _canceled(false), _data(max_size, 0) 476{ 477} 478 479void 480DmaReadFifo::DmaDoneEvent::kill() 481{ 482 parent = nullptr; 483 setFlags(AutoDelete); 484} 485 486void 487DmaReadFifo::DmaDoneEvent::cancel() 488{ 489 _canceled = true; 490} 491 492void 493DmaReadFifo::DmaDoneEvent::reset(size_t size) 494{ 495 assert(size <= _data.size()); 496 _done = false; 497 _canceled = false; 498 _requestSize = size; 499} 500 501void 502DmaReadFifo::DmaDoneEvent::process() 503{ 504 if (!parent) 505 return; 506 507 assert(!_done); 508 _done = true; 509 parent->dmaDone(); 510}
| 58 : MasterPort(dev->name() + ".dma", dev), 59 device(dev), sys(s), masterId(s->getMasterId(dev)), 60 sendEvent([this]{ sendDma(); }, dev->name()), 61 pendingCount(0), inRetry(false) 62{ } 63 64void 65DmaPort::handleResp(PacketPtr pkt, Tick delay) 66{ 67 // should always see a response with a sender state 68 assert(pkt->isResponse()); 69 70 // get the DMA sender state 71 DmaReqState *state = dynamic_cast<DmaReqState*>(pkt->senderState); 72 assert(state); 73 74 DPRINTF(DMA, "Received response %s for addr: %#x size: %d nb: %d," \ 75 " tot: %d sched %d\n", 76 pkt->cmdString(), pkt->getAddr(), pkt->req->getSize(), 77 state->numBytes, state->totBytes, 78 state->completionEvent ? 79 state->completionEvent->scheduled() : 0); 80 81 assert(pendingCount != 0); 82 pendingCount--; 83 84 // update the number of bytes received based on the request rather 85 // than the packet as the latter could be rounded up to line sizes 86 state->numBytes += pkt->req->getSize(); 87 assert(state->totBytes >= state->numBytes); 88 89 // if we have reached the total number of bytes for this DMA 90 // request, then signal the completion and delete the sate 91 if (state->totBytes == state->numBytes) { 92 if (state->completionEvent) { 93 delay += state->delay; 94 device->schedule(state->completionEvent, curTick() + delay); 95 } 96 delete state; 97 } 98 99 // delete the packet 100 delete pkt; 101 102 // we might be drained at this point, if so signal the drain event 103 if (pendingCount == 0) 104 signalDrainDone(); 105} 106 107bool 108DmaPort::recvTimingResp(PacketPtr pkt) 109{ 110 // We shouldn't ever get a cacheable block in Modified state 111 assert(pkt->req->isUncacheable() || 112 !(pkt->cacheResponding() && !pkt->hasSharers())); 113 114 handleResp(pkt); 115 116 return true; 117} 118 119DmaDevice::DmaDevice(const Params *p) 120 : PioDevice(p), dmaPort(this, sys) 121{ } 122 123void 124DmaDevice::init() 125{ 126 if (!dmaPort.isConnected()) 127 panic("DMA port of %s not connected to anything!", name()); 128 PioDevice::init(); 129} 130 131DrainState 132DmaPort::drain() 133{ 134 if (pendingCount == 0) { 135 return DrainState::Drained; 136 } else { 137 DPRINTF(Drain, "DmaPort not drained\n"); 138 return DrainState::Draining; 139 } 140} 141 142void 143DmaPort::recvReqRetry() 144{ 145 assert(transmitList.size()); 146 trySendTimingReq(); 147} 148 149RequestPtr 150DmaPort::dmaAction(Packet::Command cmd, Addr addr, int size, Event *event, 151 uint8_t *data, Tick delay, Request::Flags flag) 152{ 153 // one DMA request sender state for every action, that is then 154 // split into many requests and packets based on the block size, 155 // i.e. cache line size 156 DmaReqState *reqState = new DmaReqState(event, size, delay); 157 158 // (functionality added for Table Walker statistics) 159 // We're only interested in this when there will only be one request. 160 // For simplicity, we return the last request, which would also be 161 // the only request in that case. 162 RequestPtr req = NULL; 163 164 DPRINTF(DMA, "Starting DMA for addr: %#x size: %d sched: %d\n", addr, size, 165 event ? event->scheduled() : -1); 166 for (ChunkGenerator gen(addr, size, sys->cacheLineSize()); 167 !gen.done(); gen.next()) { 168 169 req = std::make_shared<Request>( 170 gen.addr(), gen.size(), flag, masterId); 171 172 req->taskId(ContextSwitchTaskId::DMA); 173 PacketPtr pkt = new Packet(req, cmd); 174 175 // Increment the data pointer on a write 176 if (data) 177 pkt->dataStatic(data + gen.complete()); 178 179 pkt->senderState = reqState; 180 181 DPRINTF(DMA, "--Queuing DMA for addr: %#x size: %d\n", gen.addr(), 182 gen.size()); 183 queueDma(pkt); 184 } 185 186 // in zero time also initiate the sending of the packets we have 187 // just created, for atomic this involves actually completing all 188 // the requests 189 sendDma(); 190 191 return req; 192} 193 194void 195DmaPort::queueDma(PacketPtr pkt) 196{ 197 transmitList.push_back(pkt); 198 199 // remember that we have another packet pending, this will only be 200 // decremented once a response comes back 201 pendingCount++; 202} 203 204void 205DmaPort::trySendTimingReq() 206{ 207 // send the first packet on the transmit list and schedule the 208 // following send if it is successful 209 PacketPtr pkt = transmitList.front(); 210 211 DPRINTF(DMA, "Trying to send %s addr %#x\n", pkt->cmdString(), 212 pkt->getAddr()); 213 214 inRetry = !sendTimingReq(pkt); 215 if (!inRetry) { 216 transmitList.pop_front(); 217 DPRINTF(DMA, "-- Done\n"); 218 // if there is more to do, then do so 219 if (!transmitList.empty()) 220 // this should ultimately wait for as many cycles as the 221 // device needs to send the packet, but currently the port 222 // does not have any known width so simply wait a single 223 // cycle 224 device->schedule(sendEvent, device->clockEdge(Cycles(1))); 225 } else { 226 DPRINTF(DMA, "-- Failed, waiting for retry\n"); 227 } 228 229 DPRINTF(DMA, "TransmitList: %d, inRetry: %d\n", 230 transmitList.size(), inRetry); 231} 232 233void 234DmaPort::sendDma() 235{ 236 // some kind of selcetion between access methods 237 // more work is going to have to be done to make 238 // switching actually work 239 assert(transmitList.size()); 240 241 if (sys->isTimingMode()) { 242 // if we are either waiting for a retry or are still waiting 243 // after sending the last packet, then do not proceed 244 if (inRetry || sendEvent.scheduled()) { 245 DPRINTF(DMA, "Can't send immediately, waiting to send\n"); 246 return; 247 } 248 249 trySendTimingReq(); 250 } else if (sys->isAtomicMode()) { 251 // send everything there is to send in zero time 252 while (!transmitList.empty()) { 253 PacketPtr pkt = transmitList.front(); 254 transmitList.pop_front(); 255 256 DPRINTF(DMA, "Sending DMA for addr: %#x size: %d\n", 257 pkt->req->getPaddr(), pkt->req->getSize()); 258 Tick lat = sendAtomic(pkt); 259 260 handleResp(pkt, lat); 261 } 262 } else 263 panic("Unknown memory mode."); 264} 265 266Port & 267DmaDevice::getPort(const std::string &if_name, PortID idx) 268{ 269 if (if_name == "dma") { 270 return dmaPort; 271 } 272 return PioDevice::getPort(if_name, idx); 273} 274 275 276 277 278 279DmaReadFifo::DmaReadFifo(DmaPort &_port, size_t size, 280 unsigned max_req_size, 281 unsigned max_pending, 282 Request::Flags flags) 283 : maxReqSize(max_req_size), fifoSize(size), 284 reqFlags(flags), port(_port), 285 buffer(size), 286 nextAddr(0), endAddr(0) 287{ 288 freeRequests.resize(max_pending); 289 for (auto &e : freeRequests) 290 e.reset(new DmaDoneEvent(this, max_req_size)); 291 292} 293 294DmaReadFifo::~DmaReadFifo() 295{ 296 for (auto &p : pendingRequests) { 297 DmaDoneEvent *e(p.release()); 298 299 if (e->done()) { 300 delete e; 301 } else { 302 // We can't kill in-flight DMAs, so we'll just transfer 303 // ownership to the event queue so that they get freed 304 // when they are done. 305 e->kill(); 306 } 307 } 308} 309 310void 311DmaReadFifo::serialize(CheckpointOut &cp) const 312{ 313 assert(pendingRequests.empty()); 314 315 SERIALIZE_CONTAINER(buffer); 316 SERIALIZE_SCALAR(endAddr); 317 SERIALIZE_SCALAR(nextAddr); 318} 319 320void 321DmaReadFifo::unserialize(CheckpointIn &cp) 322{ 323 UNSERIALIZE_CONTAINER(buffer); 324 UNSERIALIZE_SCALAR(endAddr); 325 UNSERIALIZE_SCALAR(nextAddr); 326} 327 328bool 329DmaReadFifo::tryGet(uint8_t *dst, size_t len) 330{ 331 if (buffer.size() >= len) { 332 buffer.read(dst, len); 333 resumeFill(); 334 return true; 335 } else { 336 return false; 337 } 338} 339 340void 341DmaReadFifo::get(uint8_t *dst, size_t len) 342{ 343 const bool success(tryGet(dst, len)); 344 panic_if(!success, "Buffer underrun in DmaReadFifo::get()\n"); 345} 346 347void 348DmaReadFifo::startFill(Addr start, size_t size) 349{ 350 assert(atEndOfBlock()); 351 352 nextAddr = start; 353 endAddr = start + size; 354 resumeFill(); 355} 356 357void 358DmaReadFifo::stopFill() 359{ 360 // Prevent new DMA requests by setting the next address to the end 361 // address. Pending requests will still complete. 362 nextAddr = endAddr; 363 364 // Flag in-flight accesses as canceled. This prevents their data 365 // from being written to the FIFO. 366 for (auto &p : pendingRequests) 367 p->cancel(); 368} 369 370void 371DmaReadFifo::resumeFill() 372{ 373 // Don't try to fetch more data if we are draining. This ensures 374 // that the DMA engine settles down before we checkpoint it. 375 if (drainState() == DrainState::Draining) 376 return; 377 378 const bool old_eob(atEndOfBlock()); 379 380 if (port.sys->bypassCaches()) 381 resumeFillFunctional(); 382 else 383 resumeFillTiming(); 384 385 if (!old_eob && atEndOfBlock()) 386 onEndOfBlock(); 387} 388 389void 390DmaReadFifo::resumeFillFunctional() 391{ 392 const size_t fifo_space = buffer.capacity() - buffer.size(); 393 const size_t kvm_watermark = port.sys->cacheLineSize(); 394 if (fifo_space >= kvm_watermark || buffer.capacity() < kvm_watermark) { 395 const size_t block_remaining = endAddr - nextAddr; 396 const size_t xfer_size = std::min(fifo_space, block_remaining); 397 std::vector<uint8_t> tmp_buffer(xfer_size); 398 399 assert(pendingRequests.empty()); 400 DPRINTF(DMA, "KVM Bypassing startAddr=%#x xfer_size=%#x " \ 401 "fifo_space=%#x block_remaining=%#x\n", 402 nextAddr, xfer_size, fifo_space, block_remaining); 403 404 port.sys->physProxy.readBlob(nextAddr, tmp_buffer.data(), xfer_size); 405 buffer.write(tmp_buffer.begin(), xfer_size); 406 nextAddr += xfer_size; 407 } 408} 409 410void 411DmaReadFifo::resumeFillTiming() 412{ 413 size_t size_pending(0); 414 for (auto &e : pendingRequests) 415 size_pending += e->requestSize(); 416 417 while (!freeRequests.empty() && !atEndOfBlock()) { 418 const size_t req_size(std::min(maxReqSize, endAddr - nextAddr)); 419 if (buffer.size() + size_pending + req_size > fifoSize) 420 break; 421 422 DmaDoneEventUPtr event(std::move(freeRequests.front())); 423 freeRequests.pop_front(); 424 assert(event); 425 426 event->reset(req_size); 427 port.dmaAction(MemCmd::ReadReq, nextAddr, req_size, event.get(), 428 event->data(), 0, reqFlags); 429 nextAddr += req_size; 430 size_pending += req_size; 431 432 pendingRequests.emplace_back(std::move(event)); 433 } 434} 435 436void 437DmaReadFifo::dmaDone() 438{ 439 const bool old_active(isActive()); 440 441 handlePending(); 442 resumeFill(); 443 444 if (old_active && !isActive()) 445 onIdle(); 446} 447 448void 449DmaReadFifo::handlePending() 450{ 451 while (!pendingRequests.empty() && pendingRequests.front()->done()) { 452 // Get the first finished pending request 453 DmaDoneEventUPtr event(std::move(pendingRequests.front())); 454 pendingRequests.pop_front(); 455 456 if (!event->canceled()) 457 buffer.write(event->data(), event->requestSize()); 458 459 // Move the event to the list of free requests 460 freeRequests.emplace_back(std::move(event)); 461 } 462 463 if (pendingRequests.empty()) 464 signalDrainDone(); 465} 466 467DrainState 468DmaReadFifo::drain() 469{ 470 return pendingRequests.empty() ? DrainState::Drained : DrainState::Draining; 471} 472 473 474DmaReadFifo::DmaDoneEvent::DmaDoneEvent(DmaReadFifo *_parent, 475 size_t max_size) 476 : parent(_parent), _done(false), _canceled(false), _data(max_size, 0) 477{ 478} 479 480void 481DmaReadFifo::DmaDoneEvent::kill() 482{ 483 parent = nullptr; 484 setFlags(AutoDelete); 485} 486 487void 488DmaReadFifo::DmaDoneEvent::cancel() 489{ 490 _canceled = true; 491} 492 493void 494DmaReadFifo::DmaDoneEvent::reset(size_t size) 495{ 496 assert(size <= _data.size()); 497 _done = false; 498 _canceled = false; 499 _requestSize = size; 500} 501 502void 503DmaReadFifo::DmaDoneEvent::process() 504{ 505 if (!parent) 506 return; 507 508 assert(!_done); 509 _done = true; 510 parent->dmaDone(); 511}
|