dma_device.cc (11010:034378be28a2) dma_device.cc (11284:b3926db25371)
1/*
2 * Copyright (c) 2012, 2015 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Andreas Hansson
43 * Andreas Sandberg
44 */
45
46#include "dev/dma_device.hh"
47
48#include <utility>
49
50#include "base/chunk_generator.hh"
51#include "debug/DMA.hh"
52#include "debug/Drain.hh"
53#include "sim/system.hh"
54
55DmaPort::DmaPort(MemObject *dev, System *s)
56 : MasterPort(dev->name() + ".dma", dev),
57 device(dev), sys(s), masterId(s->getMasterId(dev->name())),
58 sendEvent(this), pendingCount(0), inRetry(false)
59{ }
60
61void
62DmaPort::handleResp(PacketPtr pkt, Tick delay)
63{
64 // should always see a response with a sender state
65 assert(pkt->isResponse());
66
67 // get the DMA sender state
68 DmaReqState *state = dynamic_cast<DmaReqState*>(pkt->senderState);
69 assert(state);
70
71 DPRINTF(DMA, "Received response %s for addr: %#x size: %d nb: %d," \
72 " tot: %d sched %d\n",
73 pkt->cmdString(), pkt->getAddr(), pkt->req->getSize(),
74 state->numBytes, state->totBytes,
75 state->completionEvent ?
76 state->completionEvent->scheduled() : 0);
77
78 assert(pendingCount != 0);
79 pendingCount--;
80
81 // update the number of bytes received based on the request rather
82 // than the packet as the latter could be rounded up to line sizes
83 state->numBytes += pkt->req->getSize();
84 assert(state->totBytes >= state->numBytes);
85
86 // if we have reached the total number of bytes for this DMA
87 // request, then signal the completion and delete the sate
88 if (state->totBytes == state->numBytes) {
89 if (state->completionEvent) {
90 delay += state->delay;
91 device->schedule(state->completionEvent, curTick() + delay);
92 }
93 delete state;
94 }
95
96 // delete the request that we created and also the packet
97 delete pkt->req;
98 delete pkt;
99
100 // we might be drained at this point, if so signal the drain event
101 if (pendingCount == 0)
102 signalDrainDone();
103}
104
105bool
106DmaPort::recvTimingResp(PacketPtr pkt)
107{
1/*
2 * Copyright (c) 2012, 2015 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Andreas Hansson
43 * Andreas Sandberg
44 */
45
46#include "dev/dma_device.hh"
47
48#include <utility>
49
50#include "base/chunk_generator.hh"
51#include "debug/DMA.hh"
52#include "debug/Drain.hh"
53#include "sim/system.hh"
54
55DmaPort::DmaPort(MemObject *dev, System *s)
56 : MasterPort(dev->name() + ".dma", dev),
57 device(dev), sys(s), masterId(s->getMasterId(dev->name())),
58 sendEvent(this), pendingCount(0), inRetry(false)
59{ }
60
61void
62DmaPort::handleResp(PacketPtr pkt, Tick delay)
63{
64 // should always see a response with a sender state
65 assert(pkt->isResponse());
66
67 // get the DMA sender state
68 DmaReqState *state = dynamic_cast<DmaReqState*>(pkt->senderState);
69 assert(state);
70
71 DPRINTF(DMA, "Received response %s for addr: %#x size: %d nb: %d," \
72 " tot: %d sched %d\n",
73 pkt->cmdString(), pkt->getAddr(), pkt->req->getSize(),
74 state->numBytes, state->totBytes,
75 state->completionEvent ?
76 state->completionEvent->scheduled() : 0);
77
78 assert(pendingCount != 0);
79 pendingCount--;
80
81 // update the number of bytes received based on the request rather
82 // than the packet as the latter could be rounded up to line sizes
83 state->numBytes += pkt->req->getSize();
84 assert(state->totBytes >= state->numBytes);
85
86 // if we have reached the total number of bytes for this DMA
87 // request, then signal the completion and delete the sate
88 if (state->totBytes == state->numBytes) {
89 if (state->completionEvent) {
90 delay += state->delay;
91 device->schedule(state->completionEvent, curTick() + delay);
92 }
93 delete state;
94 }
95
96 // delete the request that we created and also the packet
97 delete pkt->req;
98 delete pkt;
99
100 // we might be drained at this point, if so signal the drain event
101 if (pendingCount == 0)
102 signalDrainDone();
103}
104
105bool
106DmaPort::recvTimingResp(PacketPtr pkt)
107{
108 // We shouldn't ever get a cacheable block in ownership state
108 // We shouldn't ever get a cacheable block in Modified state
109 assert(pkt->req->isUncacheable() ||
109 assert(pkt->req->isUncacheable() ||
110 !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
110 !(pkt->cacheResponding() && !pkt->hasSharers()));
111
112 handleResp(pkt);
113
114 return true;
115}
116
117DmaDevice::DmaDevice(const Params *p)
118 : PioDevice(p), dmaPort(this, sys)
119{ }
120
121void
122DmaDevice::init()
123{
124 if (!dmaPort.isConnected())
125 panic("DMA port of %s not connected to anything!", name());
126 PioDevice::init();
127}
128
129DrainState
130DmaPort::drain()
131{
132 if (pendingCount == 0) {
133 return DrainState::Drained;
134 } else {
135 DPRINTF(Drain, "DmaPort not drained\n");
136 return DrainState::Draining;
137 }
138}
139
140void
141DmaPort::recvReqRetry()
142{
143 assert(transmitList.size());
144 trySendTimingReq();
145}
146
147RequestPtr
148DmaPort::dmaAction(Packet::Command cmd, Addr addr, int size, Event *event,
149 uint8_t *data, Tick delay, Request::Flags flag)
150{
151 // one DMA request sender state for every action, that is then
152 // split into many requests and packets based on the block size,
153 // i.e. cache line size
154 DmaReqState *reqState = new DmaReqState(event, size, delay);
155
156 // (functionality added for Table Walker statistics)
157 // We're only interested in this when there will only be one request.
158 // For simplicity, we return the last request, which would also be
159 // the only request in that case.
160 RequestPtr req = NULL;
161
162 DPRINTF(DMA, "Starting DMA for addr: %#x size: %d sched: %d\n", addr, size,
163 event ? event->scheduled() : -1);
164 for (ChunkGenerator gen(addr, size, sys->cacheLineSize());
165 !gen.done(); gen.next()) {
166 req = new Request(gen.addr(), gen.size(), flag, masterId);
167 req->taskId(ContextSwitchTaskId::DMA);
168 PacketPtr pkt = new Packet(req, cmd);
169
170 // Increment the data pointer on a write
171 if (data)
172 pkt->dataStatic(data + gen.complete());
173
174 pkt->senderState = reqState;
175
176 DPRINTF(DMA, "--Queuing DMA for addr: %#x size: %d\n", gen.addr(),
177 gen.size());
178 queueDma(pkt);
179 }
180
181 // in zero time also initiate the sending of the packets we have
182 // just created, for atomic this involves actually completing all
183 // the requests
184 sendDma();
185
186 return req;
187}
188
189void
190DmaPort::queueDma(PacketPtr pkt)
191{
192 transmitList.push_back(pkt);
193
194 // remember that we have another packet pending, this will only be
195 // decremented once a response comes back
196 pendingCount++;
197}
198
199void
200DmaPort::trySendTimingReq()
201{
202 // send the first packet on the transmit list and schedule the
203 // following send if it is successful
204 PacketPtr pkt = transmitList.front();
205
206 DPRINTF(DMA, "Trying to send %s addr %#x\n", pkt->cmdString(),
207 pkt->getAddr());
208
209 inRetry = !sendTimingReq(pkt);
210 if (!inRetry) {
211 transmitList.pop_front();
212 DPRINTF(DMA, "-- Done\n");
213 // if there is more to do, then do so
214 if (!transmitList.empty())
215 // this should ultimately wait for as many cycles as the
216 // device needs to send the packet, but currently the port
217 // does not have any known width so simply wait a single
218 // cycle
219 device->schedule(sendEvent, device->clockEdge(Cycles(1)));
220 } else {
221 DPRINTF(DMA, "-- Failed, waiting for retry\n");
222 }
223
224 DPRINTF(DMA, "TransmitList: %d, inRetry: %d\n",
225 transmitList.size(), inRetry);
226}
227
228void
229DmaPort::sendDma()
230{
231 // some kind of selcetion between access methods
232 // more work is going to have to be done to make
233 // switching actually work
234 assert(transmitList.size());
235
236 if (sys->isTimingMode()) {
237 // if we are either waiting for a retry or are still waiting
238 // after sending the last packet, then do not proceed
239 if (inRetry || sendEvent.scheduled()) {
240 DPRINTF(DMA, "Can't send immediately, waiting to send\n");
241 return;
242 }
243
244 trySendTimingReq();
245 } else if (sys->isAtomicMode()) {
246 // send everything there is to send in zero time
247 while (!transmitList.empty()) {
248 PacketPtr pkt = transmitList.front();
249 transmitList.pop_front();
250
251 DPRINTF(DMA, "Sending DMA for addr: %#x size: %d\n",
252 pkt->req->getPaddr(), pkt->req->getSize());
253 Tick lat = sendAtomic(pkt);
254
255 handleResp(pkt, lat);
256 }
257 } else
258 panic("Unknown memory mode.");
259}
260
261BaseMasterPort &
262DmaDevice::getMasterPort(const std::string &if_name, PortID idx)
263{
264 if (if_name == "dma") {
265 return dmaPort;
266 }
267 return PioDevice::getMasterPort(if_name, idx);
268}
269
270
271
272
273
274DmaReadFifo::DmaReadFifo(DmaPort &_port, size_t size,
275 unsigned max_req_size,
276 unsigned max_pending,
277 Request::Flags flags)
278 : maxReqSize(max_req_size), fifoSize(size),
279 reqFlags(flags), port(_port),
280 buffer(size),
281 nextAddr(0), endAddr(0)
282{
283 freeRequests.resize(max_pending);
284 for (auto &e : freeRequests)
285 e.reset(new DmaDoneEvent(this, max_req_size));
286
287}
288
289DmaReadFifo::~DmaReadFifo()
290{
291 for (auto &p : pendingRequests) {
292 DmaDoneEvent *e(p.release());
293
294 if (e->done()) {
295 delete e;
296 } else {
297 // We can't kill in-flight DMAs, so we'll just transfer
298 // ownership to the event queue so that they get freed
299 // when they are done.
300 e->kill();
301 }
302 }
303}
304
305void
306DmaReadFifo::serialize(CheckpointOut &cp) const
307{
308 assert(pendingRequests.empty());
309
310 SERIALIZE_CONTAINER(buffer);
311 SERIALIZE_SCALAR(endAddr);
312 SERIALIZE_SCALAR(nextAddr);
313}
314
315void
316DmaReadFifo::unserialize(CheckpointIn &cp)
317{
318 UNSERIALIZE_CONTAINER(buffer);
319 UNSERIALIZE_SCALAR(endAddr);
320 UNSERIALIZE_SCALAR(nextAddr);
321}
322
323bool
324DmaReadFifo::tryGet(uint8_t *dst, size_t len)
325{
326 if (buffer.size() >= len) {
327 buffer.read(dst, len);
328 resumeFill();
329 return true;
330 } else {
331 return false;
332 }
333}
334
335void
336DmaReadFifo::get(uint8_t *dst, size_t len)
337{
338 const bool success(tryGet(dst, len));
339 panic_if(!success, "Buffer underrun in DmaReadFifo::get()\n");
340}
341
342void
343DmaReadFifo::startFill(Addr start, size_t size)
344{
345 assert(atEndOfBlock());
346
347 nextAddr = start;
348 endAddr = start + size;
349 resumeFill();
350}
351
352void
353DmaReadFifo::stopFill()
354{
355 // Prevent new DMA requests by setting the next address to the end
356 // address. Pending requests will still complete.
357 nextAddr = endAddr;
358
359 // Flag in-flight accesses as canceled. This prevents their data
360 // from being written to the FIFO.
361 for (auto &p : pendingRequests)
362 p->cancel();
363}
364
365void
366DmaReadFifo::resumeFill()
367{
368 // Don't try to fetch more data if we are draining. This ensures
369 // that the DMA engine settles down before we checkpoint it.
370 if (drainState() == DrainState::Draining)
371 return;
372
373 const bool old_eob(atEndOfBlock());
374 size_t size_pending(0);
375 for (auto &e : pendingRequests)
376 size_pending += e->requestSize();
377
378 while (!freeRequests.empty() && !atEndOfBlock()) {
379 const size_t req_size(std::min(maxReqSize, endAddr - nextAddr));
380 if (buffer.size() + size_pending + req_size > fifoSize)
381 break;
382
383 DmaDoneEventUPtr event(std::move(freeRequests.front()));
384 freeRequests.pop_front();
385 assert(event);
386
387 event->reset(req_size);
388 port.dmaAction(MemCmd::ReadReq, nextAddr, req_size, event.get(),
389 event->data(), 0, reqFlags);
390 nextAddr += req_size;
391 size_pending += req_size;
392
393 pendingRequests.emplace_back(std::move(event));
394 }
395
396 // EOB can be set before a call to dmaDone() if in-flight accesses
397 // have been canceled.
398 if (!old_eob && atEndOfBlock())
399 onEndOfBlock();
400}
401
402void
403DmaReadFifo::dmaDone()
404{
405 const bool old_active(isActive());
406
407 handlePending();
408 resumeFill();
409
410 if (!old_active && isActive())
411 onIdle();
412}
413
414void
415DmaReadFifo::handlePending()
416{
417 while (!pendingRequests.empty() && pendingRequests.front()->done()) {
418 // Get the first finished pending request
419 DmaDoneEventUPtr event(std::move(pendingRequests.front()));
420 pendingRequests.pop_front();
421
422 if (!event->canceled())
423 buffer.write(event->data(), event->requestSize());
424
425 // Move the event to the list of free requests
426 freeRequests.emplace_back(std::move(event));
427 }
428
429 if (pendingRequests.empty())
430 signalDrainDone();
431}
432
433
434
435DrainState
436DmaReadFifo::drain()
437{
438 return pendingRequests.empty() ? DrainState::Drained : DrainState::Draining;
439}
440
441
442DmaReadFifo::DmaDoneEvent::DmaDoneEvent(DmaReadFifo *_parent,
443 size_t max_size)
444 : parent(_parent), _done(false), _canceled(false), _data(max_size, 0)
445{
446}
447
448void
449DmaReadFifo::DmaDoneEvent::kill()
450{
451 parent = nullptr;
452 setFlags(AutoDelete);
453}
454
455void
456DmaReadFifo::DmaDoneEvent::cancel()
457{
458 _canceled = true;
459}
460
461void
462DmaReadFifo::DmaDoneEvent::reset(size_t size)
463{
464 assert(size <= _data.size());
465 _done = false;
466 _canceled = false;
467 _requestSize = size;
468}
469
470void
471DmaReadFifo::DmaDoneEvent::process()
472{
473 if (!parent)
474 return;
475
476 assert(!_done);
477 _done = true;
478 parent->dmaDone();
479}
111
112 handleResp(pkt);
113
114 return true;
115}
116
117DmaDevice::DmaDevice(const Params *p)
118 : PioDevice(p), dmaPort(this, sys)
119{ }
120
121void
122DmaDevice::init()
123{
124 if (!dmaPort.isConnected())
125 panic("DMA port of %s not connected to anything!", name());
126 PioDevice::init();
127}
128
129DrainState
130DmaPort::drain()
131{
132 if (pendingCount == 0) {
133 return DrainState::Drained;
134 } else {
135 DPRINTF(Drain, "DmaPort not drained\n");
136 return DrainState::Draining;
137 }
138}
139
140void
141DmaPort::recvReqRetry()
142{
143 assert(transmitList.size());
144 trySendTimingReq();
145}
146
147RequestPtr
148DmaPort::dmaAction(Packet::Command cmd, Addr addr, int size, Event *event,
149 uint8_t *data, Tick delay, Request::Flags flag)
150{
151 // one DMA request sender state for every action, that is then
152 // split into many requests and packets based on the block size,
153 // i.e. cache line size
154 DmaReqState *reqState = new DmaReqState(event, size, delay);
155
156 // (functionality added for Table Walker statistics)
157 // We're only interested in this when there will only be one request.
158 // For simplicity, we return the last request, which would also be
159 // the only request in that case.
160 RequestPtr req = NULL;
161
162 DPRINTF(DMA, "Starting DMA for addr: %#x size: %d sched: %d\n", addr, size,
163 event ? event->scheduled() : -1);
164 for (ChunkGenerator gen(addr, size, sys->cacheLineSize());
165 !gen.done(); gen.next()) {
166 req = new Request(gen.addr(), gen.size(), flag, masterId);
167 req->taskId(ContextSwitchTaskId::DMA);
168 PacketPtr pkt = new Packet(req, cmd);
169
170 // Increment the data pointer on a write
171 if (data)
172 pkt->dataStatic(data + gen.complete());
173
174 pkt->senderState = reqState;
175
176 DPRINTF(DMA, "--Queuing DMA for addr: %#x size: %d\n", gen.addr(),
177 gen.size());
178 queueDma(pkt);
179 }
180
181 // in zero time also initiate the sending of the packets we have
182 // just created, for atomic this involves actually completing all
183 // the requests
184 sendDma();
185
186 return req;
187}
188
189void
190DmaPort::queueDma(PacketPtr pkt)
191{
192 transmitList.push_back(pkt);
193
194 // remember that we have another packet pending, this will only be
195 // decremented once a response comes back
196 pendingCount++;
197}
198
199void
200DmaPort::trySendTimingReq()
201{
202 // send the first packet on the transmit list and schedule the
203 // following send if it is successful
204 PacketPtr pkt = transmitList.front();
205
206 DPRINTF(DMA, "Trying to send %s addr %#x\n", pkt->cmdString(),
207 pkt->getAddr());
208
209 inRetry = !sendTimingReq(pkt);
210 if (!inRetry) {
211 transmitList.pop_front();
212 DPRINTF(DMA, "-- Done\n");
213 // if there is more to do, then do so
214 if (!transmitList.empty())
215 // this should ultimately wait for as many cycles as the
216 // device needs to send the packet, but currently the port
217 // does not have any known width so simply wait a single
218 // cycle
219 device->schedule(sendEvent, device->clockEdge(Cycles(1)));
220 } else {
221 DPRINTF(DMA, "-- Failed, waiting for retry\n");
222 }
223
224 DPRINTF(DMA, "TransmitList: %d, inRetry: %d\n",
225 transmitList.size(), inRetry);
226}
227
228void
229DmaPort::sendDma()
230{
231 // some kind of selcetion between access methods
232 // more work is going to have to be done to make
233 // switching actually work
234 assert(transmitList.size());
235
236 if (sys->isTimingMode()) {
237 // if we are either waiting for a retry or are still waiting
238 // after sending the last packet, then do not proceed
239 if (inRetry || sendEvent.scheduled()) {
240 DPRINTF(DMA, "Can't send immediately, waiting to send\n");
241 return;
242 }
243
244 trySendTimingReq();
245 } else if (sys->isAtomicMode()) {
246 // send everything there is to send in zero time
247 while (!transmitList.empty()) {
248 PacketPtr pkt = transmitList.front();
249 transmitList.pop_front();
250
251 DPRINTF(DMA, "Sending DMA for addr: %#x size: %d\n",
252 pkt->req->getPaddr(), pkt->req->getSize());
253 Tick lat = sendAtomic(pkt);
254
255 handleResp(pkt, lat);
256 }
257 } else
258 panic("Unknown memory mode.");
259}
260
261BaseMasterPort &
262DmaDevice::getMasterPort(const std::string &if_name, PortID idx)
263{
264 if (if_name == "dma") {
265 return dmaPort;
266 }
267 return PioDevice::getMasterPort(if_name, idx);
268}
269
270
271
272
273
274DmaReadFifo::DmaReadFifo(DmaPort &_port, size_t size,
275 unsigned max_req_size,
276 unsigned max_pending,
277 Request::Flags flags)
278 : maxReqSize(max_req_size), fifoSize(size),
279 reqFlags(flags), port(_port),
280 buffer(size),
281 nextAddr(0), endAddr(0)
282{
283 freeRequests.resize(max_pending);
284 for (auto &e : freeRequests)
285 e.reset(new DmaDoneEvent(this, max_req_size));
286
287}
288
289DmaReadFifo::~DmaReadFifo()
290{
291 for (auto &p : pendingRequests) {
292 DmaDoneEvent *e(p.release());
293
294 if (e->done()) {
295 delete e;
296 } else {
297 // We can't kill in-flight DMAs, so we'll just transfer
298 // ownership to the event queue so that they get freed
299 // when they are done.
300 e->kill();
301 }
302 }
303}
304
305void
306DmaReadFifo::serialize(CheckpointOut &cp) const
307{
308 assert(pendingRequests.empty());
309
310 SERIALIZE_CONTAINER(buffer);
311 SERIALIZE_SCALAR(endAddr);
312 SERIALIZE_SCALAR(nextAddr);
313}
314
315void
316DmaReadFifo::unserialize(CheckpointIn &cp)
317{
318 UNSERIALIZE_CONTAINER(buffer);
319 UNSERIALIZE_SCALAR(endAddr);
320 UNSERIALIZE_SCALAR(nextAddr);
321}
322
323bool
324DmaReadFifo::tryGet(uint8_t *dst, size_t len)
325{
326 if (buffer.size() >= len) {
327 buffer.read(dst, len);
328 resumeFill();
329 return true;
330 } else {
331 return false;
332 }
333}
334
335void
336DmaReadFifo::get(uint8_t *dst, size_t len)
337{
338 const bool success(tryGet(dst, len));
339 panic_if(!success, "Buffer underrun in DmaReadFifo::get()\n");
340}
341
342void
343DmaReadFifo::startFill(Addr start, size_t size)
344{
345 assert(atEndOfBlock());
346
347 nextAddr = start;
348 endAddr = start + size;
349 resumeFill();
350}
351
352void
353DmaReadFifo::stopFill()
354{
355 // Prevent new DMA requests by setting the next address to the end
356 // address. Pending requests will still complete.
357 nextAddr = endAddr;
358
359 // Flag in-flight accesses as canceled. This prevents their data
360 // from being written to the FIFO.
361 for (auto &p : pendingRequests)
362 p->cancel();
363}
364
365void
366DmaReadFifo::resumeFill()
367{
368 // Don't try to fetch more data if we are draining. This ensures
369 // that the DMA engine settles down before we checkpoint it.
370 if (drainState() == DrainState::Draining)
371 return;
372
373 const bool old_eob(atEndOfBlock());
374 size_t size_pending(0);
375 for (auto &e : pendingRequests)
376 size_pending += e->requestSize();
377
378 while (!freeRequests.empty() && !atEndOfBlock()) {
379 const size_t req_size(std::min(maxReqSize, endAddr - nextAddr));
380 if (buffer.size() + size_pending + req_size > fifoSize)
381 break;
382
383 DmaDoneEventUPtr event(std::move(freeRequests.front()));
384 freeRequests.pop_front();
385 assert(event);
386
387 event->reset(req_size);
388 port.dmaAction(MemCmd::ReadReq, nextAddr, req_size, event.get(),
389 event->data(), 0, reqFlags);
390 nextAddr += req_size;
391 size_pending += req_size;
392
393 pendingRequests.emplace_back(std::move(event));
394 }
395
396 // EOB can be set before a call to dmaDone() if in-flight accesses
397 // have been canceled.
398 if (!old_eob && atEndOfBlock())
399 onEndOfBlock();
400}
401
402void
403DmaReadFifo::dmaDone()
404{
405 const bool old_active(isActive());
406
407 handlePending();
408 resumeFill();
409
410 if (!old_active && isActive())
411 onIdle();
412}
413
414void
415DmaReadFifo::handlePending()
416{
417 while (!pendingRequests.empty() && pendingRequests.front()->done()) {
418 // Get the first finished pending request
419 DmaDoneEventUPtr event(std::move(pendingRequests.front()));
420 pendingRequests.pop_front();
421
422 if (!event->canceled())
423 buffer.write(event->data(), event->requestSize());
424
425 // Move the event to the list of free requests
426 freeRequests.emplace_back(std::move(event));
427 }
428
429 if (pendingRequests.empty())
430 signalDrainDone();
431}
432
433
434
435DrainState
436DmaReadFifo::drain()
437{
438 return pendingRequests.empty() ? DrainState::Drained : DrainState::Draining;
439}
440
441
442DmaReadFifo::DmaDoneEvent::DmaDoneEvent(DmaReadFifo *_parent,
443 size_t max_size)
444 : parent(_parent), _done(false), _canceled(false), _data(max_size, 0)
445{
446}
447
448void
449DmaReadFifo::DmaDoneEvent::kill()
450{
451 parent = nullptr;
452 setFlags(AutoDelete);
453}
454
455void
456DmaReadFifo::DmaDoneEvent::cancel()
457{
458 _canceled = true;
459}
460
461void
462DmaReadFifo::DmaDoneEvent::reset(size_t size)
463{
464 assert(size <= _data.size());
465 _done = false;
466 _canceled = false;
467 _requestSize = size;
468}
469
470void
471DmaReadFifo::DmaDoneEvent::process()
472{
473 if (!parent)
474 return;
475
476 assert(!_done);
477 _done = true;
478 parent->dmaDone();
479}