dma_device.hh (13784:1941dc118243) dma_device.hh (13892:0182a0601f66)
1/*
2 * Copyright (c) 2012-2013, 2015, 2017 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Andreas Sandberg
43 */
44
45#ifndef __DEV_DMA_DEVICE_HH__
46#define __DEV_DMA_DEVICE_HH__
47
48#include <deque>
49#include <memory>
50
51#include "base/circlebuf.hh"
52#include "dev/io_device.hh"
53#include "params/DmaDevice.hh"
54#include "sim/drain.hh"
55#include "sim/system.hh"
56
1/*
2 * Copyright (c) 2012-2013, 2015, 2017 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2004-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Nathan Binkert
42 * Andreas Sandberg
43 */
44
45#ifndef __DEV_DMA_DEVICE_HH__
46#define __DEV_DMA_DEVICE_HH__
47
48#include <deque>
49#include <memory>
50
51#include "base/circlebuf.hh"
52#include "dev/io_device.hh"
53#include "params/DmaDevice.hh"
54#include "sim/drain.hh"
55#include "sim/system.hh"
56
57class ClockedObject;
58
57class DmaPort : public MasterPort, public Drainable
58{
59 private:
60
61 /**
62 * Take the first packet of the transmit list and attempt to send
63 * it as a timing request. If it is successful, schedule the
64 * sending of the next packet, otherwise remember that we are
65 * waiting for a retry.
66 */
67 void trySendTimingReq();
68
69 /**
70 * For timing, attempt to send the first item on the transmit
71 * list, and if it is successful and there are more packets
72 * waiting, then schedule the sending of the next packet. For
73 * atomic, simply send and process everything on the transmit
74 * list.
75 */
76 void sendDma();
77
78 /**
79 * Handle a response packet by updating the corresponding DMA
80 * request state to reflect the bytes received, and also update
81 * the pending request counter. If the DMA request that this
82 * packet is part of is complete, then signal the completion event
83 * if present, potentially with a delay added to it.
84 *
85 * @param pkt Response packet to handler
86 * @param delay Additional delay for scheduling the completion event
87 */
88 void handleResp(PacketPtr pkt, Tick delay = 0);
89
90 struct DmaReqState : public Packet::SenderState
91 {
92 /** Event to call on the device when this transaction (all packets)
93 * complete. */
94 Event *completionEvent;
95
96 /** Total number of bytes that this transaction involves. */
97 const Addr totBytes;
98
99 /** Number of bytes that have been acked for this transaction. */
100 Addr numBytes;
101
102 /** Amount to delay completion of dma by */
103 const Tick delay;
104
105 DmaReqState(Event *ce, Addr tb, Tick _delay)
106 : completionEvent(ce), totBytes(tb), numBytes(0), delay(_delay)
107 {}
108 };
109
110 public:
111 /** The device that owns this port. */
59class DmaPort : public MasterPort, public Drainable
60{
61 private:
62
63 /**
64 * Take the first packet of the transmit list and attempt to send
65 * it as a timing request. If it is successful, schedule the
66 * sending of the next packet, otherwise remember that we are
67 * waiting for a retry.
68 */
69 void trySendTimingReq();
70
71 /**
72 * For timing, attempt to send the first item on the transmit
73 * list, and if it is successful and there are more packets
74 * waiting, then schedule the sending of the next packet. For
75 * atomic, simply send and process everything on the transmit
76 * list.
77 */
78 void sendDma();
79
80 /**
81 * Handle a response packet by updating the corresponding DMA
82 * request state to reflect the bytes received, and also update
83 * the pending request counter. If the DMA request that this
84 * packet is part of is complete, then signal the completion event
85 * if present, potentially with a delay added to it.
86 *
87 * @param pkt Response packet to handler
88 * @param delay Additional delay for scheduling the completion event
89 */
90 void handleResp(PacketPtr pkt, Tick delay = 0);
91
92 struct DmaReqState : public Packet::SenderState
93 {
94 /** Event to call on the device when this transaction (all packets)
95 * complete. */
96 Event *completionEvent;
97
98 /** Total number of bytes that this transaction involves. */
99 const Addr totBytes;
100
101 /** Number of bytes that have been acked for this transaction. */
102 Addr numBytes;
103
104 /** Amount to delay completion of dma by */
105 const Tick delay;
106
107 DmaReqState(Event *ce, Addr tb, Tick _delay)
108 : completionEvent(ce), totBytes(tb), numBytes(0), delay(_delay)
109 {}
110 };
111
112 public:
113 /** The device that owns this port. */
112 MemObject *const device;
114 ClockedObject *const device;
113
114 /** The system that device/port are in. This is used to select which mode
115 * we are currently operating in. */
116 System *const sys;
117
118 /** Id for all requests */
119 const MasterID masterId;
120
121 protected:
122 /** Use a deque as we never do any insertion or removal in the middle */
123 std::deque<PacketPtr> transmitList;
124
125 /** Event used to schedule a future sending from the transmit list. */
126 EventFunctionWrapper sendEvent;
127
128 /** Number of outstanding packets the dma port has. */
129 uint32_t pendingCount;
130
131 /** If the port is currently waiting for a retry before it can
132 * send whatever it is that it's sending. */
133 bool inRetry;
134
135 protected:
136
137 bool recvTimingResp(PacketPtr pkt) override;
138 void recvReqRetry() override;
139
140 void queueDma(PacketPtr pkt);
141
142 public:
143
115
116 /** The system that device/port are in. This is used to select which mode
117 * we are currently operating in. */
118 System *const sys;
119
120 /** Id for all requests */
121 const MasterID masterId;
122
123 protected:
124 /** Use a deque as we never do any insertion or removal in the middle */
125 std::deque<PacketPtr> transmitList;
126
127 /** Event used to schedule a future sending from the transmit list. */
128 EventFunctionWrapper sendEvent;
129
130 /** Number of outstanding packets the dma port has. */
131 uint32_t pendingCount;
132
133 /** If the port is currently waiting for a retry before it can
134 * send whatever it is that it's sending. */
135 bool inRetry;
136
137 protected:
138
139 bool recvTimingResp(PacketPtr pkt) override;
140 void recvReqRetry() override;
141
142 void queueDma(PacketPtr pkt);
143
144 public:
145
144 DmaPort(MemObject *dev, System *s);
146 DmaPort(ClockedObject *dev, System *s);
145
146 RequestPtr dmaAction(Packet::Command cmd, Addr addr, int size, Event *event,
147 uint8_t *data, Tick delay, Request::Flags flag = 0);
148
149 bool dmaPending() const { return pendingCount > 0; }
150
151 DrainState drain() override;
152};
153
154class DmaDevice : public PioDevice
155{
156 protected:
157 DmaPort dmaPort;
158
159 public:
160 typedef DmaDeviceParams Params;
161 DmaDevice(const Params *p);
162 virtual ~DmaDevice() { }
163
164 void dmaWrite(Addr addr, int size, Event *event, uint8_t *data,
165 Tick delay = 0)
166 {
167 dmaPort.dmaAction(MemCmd::WriteReq, addr, size, event, data, delay);
168 }
169
170 void dmaRead(Addr addr, int size, Event *event, uint8_t *data,
171 Tick delay = 0)
172 {
173 dmaPort.dmaAction(MemCmd::ReadReq, addr, size, event, data, delay);
174 }
175
176 bool dmaPending() const { return dmaPort.dmaPending(); }
177
178 void init() override;
179
180 unsigned int cacheBlockSize() const { return sys->cacheLineSize(); }
181
182 Port &getPort(const std::string &if_name,
183 PortID idx=InvalidPortID) override;
184
185};
186
187/**
188 * DMA callback class.
189 *
190 * Allows one to register for a callback event after a sequence of (potentially
191 * non-contiguous) DMA transfers on a DmaPort completes. Derived classes must
192 * implement the process() method and use getChunkEvent() to allocate a
193 * callback event for each participating DMA.
194 */
195class DmaCallback : public Drainable
196{
197 public:
198 virtual const std::string name() const { return "DmaCallback"; }
199
200 /**
201 * DmaPort ensures that all oustanding DMA accesses have completed before
202 * it finishes draining. However, DmaChunkEvents scheduled with a delay
203 * might still be sitting on the event queue. Therefore, draining is not
204 * complete until count is 0, which ensures that all outstanding
205 * DmaChunkEvents associated with this DmaCallback have fired.
206 */
207 DrainState drain() override
208 {
209 return count ? DrainState::Draining : DrainState::Drained;
210 }
211
212 protected:
213 int count;
214
215 DmaCallback()
216 : count(0)
217 { }
218
219 virtual ~DmaCallback() { }
220
221 /**
222 * Callback function invoked on completion of all chunks.
223 */
224 virtual void process() = 0;
225
226 private:
227 /**
228 * Called by DMA engine completion event on each chunk completion.
229 * Since the object may delete itself here, callers should not use
230 * the object pointer after calling this function.
231 */
232 void chunkComplete()
233 {
234 if (--count == 0) {
235 process();
236 // Need to notify DrainManager that this object is finished
237 // draining, even though it is immediately deleted.
238 signalDrainDone();
239 delete this;
240 }
241 }
242
243 public:
244
245 /**
246 * Request a chunk event. Chunks events should be provided to each DMA
247 * request that wishes to participate in this DmaCallback.
248 */
249 Event *getChunkEvent()
250 {
251 ++count;
252 return new EventFunctionWrapper([this]{ chunkComplete(); }, name(),
253 true);
254 }
255};
256
257/**
258 * Buffered DMA engine helper class
259 *
260 * This class implements a simple DMA engine that feeds a FIFO
261 * buffer. The size of the buffer, the maximum number of pending
262 * requests and the maximum request size are all set when the engine
263 * is instantiated.
264 *
265 * An <i>asynchronous</i> transfer of a <i>block</i> of data
266 * (designated by a start address and a size) is started by calling
267 * the startFill() method. The DMA engine will aggressively try to
268 * keep the internal FIFO full. As soon as there is room in the FIFO
269 * for more data <i>and</i> there are free request slots, a new fill
270 * will be started.
271 *
272 * Data in the FIFO can be read back using the get() and tryGet()
273 * methods. Both request a block of data from the FIFO. However, get()
274 * panics if the block cannot be satisfied, while tryGet() simply
275 * returns false. The latter call makes it possible to implement
276 * custom buffer underrun handling.
277 *
278 * A simple use case would be something like this:
279 * \code{.cpp}
280 * // Create a DMA engine with a 1KiB buffer. Issue up to 8 concurrent
281 * // uncacheable 64 byte (maximum) requests.
282 * DmaReadFifo *dma = new DmaReadFifo(port, 1024, 64, 8,
283 * Request::UNCACHEABLE);
284 *
285 * // Start copying 4KiB data from 0xFF000000
286 * dma->startFill(0xFF000000, 0x1000);
287 *
288 * // Some time later when there is data in the FIFO.
289 * uint8_t data[8];
290 * dma->get(data, sizeof(data))
291 * \endcode
292 *
293 *
294 * The DMA engine allows new blocks to be requested as soon as the
295 * last request for a block has been sent (i.e., there is no need to
296 * wait for pending requests to complete). This can be queried with
297 * the atEndOfBlock() method and more advanced implementations may
298 * override the onEndOfBlock() callback.
299 */
300class DmaReadFifo : public Drainable, public Serializable
301{
302 public:
303 DmaReadFifo(DmaPort &port, size_t size,
304 unsigned max_req_size,
305 unsigned max_pending,
306 Request::Flags flags = 0);
307
308 ~DmaReadFifo();
309
310 public: // Serializable
311 void serialize(CheckpointOut &cp) const override;
312 void unserialize(CheckpointIn &cp) override;
313
314 public: // Drainable
315 DrainState drain() override;
316
317 public: // FIFO access
318 /**
319 * @{
320 * @name FIFO access
321 */
322 /**
323 * Try to read data from the FIFO.
324 *
325 * This method reads len bytes of data from the FIFO and stores
326 * them in the memory location pointed to by dst. The method
327 * fails, and no data is written to the buffer, if the FIFO
328 * doesn't contain enough data to satisfy the request.
329 *
330 * @param dst Pointer to a destination buffer
331 * @param len Amount of data to read.
332 * @return true on success, false otherwise.
333 */
334 bool tryGet(uint8_t *dst, size_t len);
335
336 template<typename T>
337 bool tryGet(T &value) {
338 return tryGet(static_cast<T *>(&value), sizeof(T));
339 };
340
341 /**
342 * Read data from the FIFO and panic on failure.
343 *
344 * @see tryGet()
345 *
346 * @param dst Pointer to a destination buffer
347 * @param len Amount of data to read.
348 */
349 void get(uint8_t *dst, size_t len);
350
351 template<typename T>
352 T get() {
353 T value;
354 get(static_cast<uint8_t *>(&value), sizeof(T));
355 return value;
356 };
357
358 /** Get the amount of data stored in the FIFO */
359 size_t size() const { return buffer.size(); }
360 /** Flush the FIFO */
361 void flush() { buffer.flush(); }
362
363 /** @} */
364 public: // FIFO fill control
365 /**
366 * @{
367 * @name FIFO fill control
368 */
369 /**
370 * Start filling the FIFO.
371 *
372 * @warn It's considered an error to call start on an active DMA
373 * engine unless the last request from the active block has been
374 * sent (i.e., atEndOfBlock() is true).
375 *
376 * @param start Physical address to copy from.
377 * @param size Size of the block to copy.
378 */
379 void startFill(Addr start, size_t size);
380
381 /**
382 * Stop the DMA engine.
383 *
384 * Stop filling the FIFO and ignore incoming responses for pending
385 * requests. The onEndOfBlock() callback will not be called after
386 * this method has been invoked. However, once the last response
387 * has been received, the onIdle() callback will still be called.
388 */
389 void stopFill();
390
391 /**
392 * Has the DMA engine sent out the last request for the active
393 * block?
394 */
395 bool atEndOfBlock() const {
396 return nextAddr == endAddr;
397 }
398
399 /**
400 * Is the DMA engine active (i.e., are there still in-flight
401 * accesses)?
402 */
403 bool isActive() const {
404 return !(pendingRequests.empty() && atEndOfBlock());
405 }
406
407 /** @} */
408 protected: // Callbacks
409 /**
410 * @{
411 * @name Callbacks
412 */
413 /**
414 * End of block callback
415 *
416 * This callback is called <i>once</i> after the last access in a
417 * block has been sent. It is legal for a derived class to call
418 * startFill() from this method to initiate a transfer.
419 */
420 virtual void onEndOfBlock() {};
421
422 /**
423 * Last response received callback
424 *
425 * This callback is called when the DMA engine becomes idle (i.e.,
426 * there are no pending requests).
427 *
428 * It is possible for a DMA engine to reach the end of block and
429 * become idle at the same tick. In such a case, the
430 * onEndOfBlock() callback will be called first. This callback
431 * will <i>NOT</i> be called if that callback initiates a new DMA transfer.
432 */
433 virtual void onIdle() {};
434
435 /** @} */
436 private: // Configuration
437 /** Maximum request size in bytes */
438 const Addr maxReqSize;
439 /** Maximum FIFO size in bytes */
440 const size_t fifoSize;
441 /** Request flags */
442 const Request::Flags reqFlags;
443
444 DmaPort &port;
445
446 private:
447 class DmaDoneEvent : public Event
448 {
449 public:
450 DmaDoneEvent(DmaReadFifo *_parent, size_t max_size);
451
452 void kill();
453 void cancel();
454 bool canceled() const { return _canceled; }
455 void reset(size_t size);
456 void process();
457
458 bool done() const { return _done; }
459 size_t requestSize() const { return _requestSize; }
460 const uint8_t *data() const { return _data.data(); }
461 uint8_t *data() { return _data.data(); }
462
463 private:
464 DmaReadFifo *parent;
465 bool _done;
466 bool _canceled;
467 size_t _requestSize;
468 std::vector<uint8_t> _data;
469 };
470
471 typedef std::unique_ptr<DmaDoneEvent> DmaDoneEventUPtr;
472
473 /**
474 * DMA request done, handle incoming data and issue new
475 * request.
476 */
477 void dmaDone();
478
479 /** Handle pending requests that have been flagged as done. */
480 void handlePending();
481
482 /** Try to issue new DMA requests or bypass DMA requests*/
483 void resumeFill();
484
485 /** Try to issue new DMA requests during normal execution*/
486 void resumeFillTiming();
487
488 /** Try to bypass DMA requests in KVM execution mode */
489 void resumeFillFunctional();
490
491 private: // Internal state
492 Fifo<uint8_t> buffer;
493
494 Addr nextAddr;
495 Addr endAddr;
496
497 std::deque<DmaDoneEventUPtr> pendingRequests;
498 std::deque<DmaDoneEventUPtr> freeRequests;
499};
500
501#endif // __DEV_DMA_DEVICE_HH__
147
148 RequestPtr dmaAction(Packet::Command cmd, Addr addr, int size, Event *event,
149 uint8_t *data, Tick delay, Request::Flags flag = 0);
150
151 bool dmaPending() const { return pendingCount > 0; }
152
153 DrainState drain() override;
154};
155
156class DmaDevice : public PioDevice
157{
158 protected:
159 DmaPort dmaPort;
160
161 public:
162 typedef DmaDeviceParams Params;
163 DmaDevice(const Params *p);
164 virtual ~DmaDevice() { }
165
166 void dmaWrite(Addr addr, int size, Event *event, uint8_t *data,
167 Tick delay = 0)
168 {
169 dmaPort.dmaAction(MemCmd::WriteReq, addr, size, event, data, delay);
170 }
171
172 void dmaRead(Addr addr, int size, Event *event, uint8_t *data,
173 Tick delay = 0)
174 {
175 dmaPort.dmaAction(MemCmd::ReadReq, addr, size, event, data, delay);
176 }
177
178 bool dmaPending() const { return dmaPort.dmaPending(); }
179
180 void init() override;
181
182 unsigned int cacheBlockSize() const { return sys->cacheLineSize(); }
183
184 Port &getPort(const std::string &if_name,
185 PortID idx=InvalidPortID) override;
186
187};
188
189/**
190 * DMA callback class.
191 *
192 * Allows one to register for a callback event after a sequence of (potentially
193 * non-contiguous) DMA transfers on a DmaPort completes. Derived classes must
194 * implement the process() method and use getChunkEvent() to allocate a
195 * callback event for each participating DMA.
196 */
197class DmaCallback : public Drainable
198{
199 public:
200 virtual const std::string name() const { return "DmaCallback"; }
201
202 /**
203 * DmaPort ensures that all oustanding DMA accesses have completed before
204 * it finishes draining. However, DmaChunkEvents scheduled with a delay
205 * might still be sitting on the event queue. Therefore, draining is not
206 * complete until count is 0, which ensures that all outstanding
207 * DmaChunkEvents associated with this DmaCallback have fired.
208 */
209 DrainState drain() override
210 {
211 return count ? DrainState::Draining : DrainState::Drained;
212 }
213
214 protected:
215 int count;
216
217 DmaCallback()
218 : count(0)
219 { }
220
221 virtual ~DmaCallback() { }
222
223 /**
224 * Callback function invoked on completion of all chunks.
225 */
226 virtual void process() = 0;
227
228 private:
229 /**
230 * Called by DMA engine completion event on each chunk completion.
231 * Since the object may delete itself here, callers should not use
232 * the object pointer after calling this function.
233 */
234 void chunkComplete()
235 {
236 if (--count == 0) {
237 process();
238 // Need to notify DrainManager that this object is finished
239 // draining, even though it is immediately deleted.
240 signalDrainDone();
241 delete this;
242 }
243 }
244
245 public:
246
247 /**
248 * Request a chunk event. Chunks events should be provided to each DMA
249 * request that wishes to participate in this DmaCallback.
250 */
251 Event *getChunkEvent()
252 {
253 ++count;
254 return new EventFunctionWrapper([this]{ chunkComplete(); }, name(),
255 true);
256 }
257};
258
259/**
260 * Buffered DMA engine helper class
261 *
262 * This class implements a simple DMA engine that feeds a FIFO
263 * buffer. The size of the buffer, the maximum number of pending
264 * requests and the maximum request size are all set when the engine
265 * is instantiated.
266 *
267 * An <i>asynchronous</i> transfer of a <i>block</i> of data
268 * (designated by a start address and a size) is started by calling
269 * the startFill() method. The DMA engine will aggressively try to
270 * keep the internal FIFO full. As soon as there is room in the FIFO
271 * for more data <i>and</i> there are free request slots, a new fill
272 * will be started.
273 *
274 * Data in the FIFO can be read back using the get() and tryGet()
275 * methods. Both request a block of data from the FIFO. However, get()
276 * panics if the block cannot be satisfied, while tryGet() simply
277 * returns false. The latter call makes it possible to implement
278 * custom buffer underrun handling.
279 *
280 * A simple use case would be something like this:
281 * \code{.cpp}
282 * // Create a DMA engine with a 1KiB buffer. Issue up to 8 concurrent
283 * // uncacheable 64 byte (maximum) requests.
284 * DmaReadFifo *dma = new DmaReadFifo(port, 1024, 64, 8,
285 * Request::UNCACHEABLE);
286 *
287 * // Start copying 4KiB data from 0xFF000000
288 * dma->startFill(0xFF000000, 0x1000);
289 *
290 * // Some time later when there is data in the FIFO.
291 * uint8_t data[8];
292 * dma->get(data, sizeof(data))
293 * \endcode
294 *
295 *
296 * The DMA engine allows new blocks to be requested as soon as the
297 * last request for a block has been sent (i.e., there is no need to
298 * wait for pending requests to complete). This can be queried with
299 * the atEndOfBlock() method and more advanced implementations may
300 * override the onEndOfBlock() callback.
301 */
302class DmaReadFifo : public Drainable, public Serializable
303{
304 public:
305 DmaReadFifo(DmaPort &port, size_t size,
306 unsigned max_req_size,
307 unsigned max_pending,
308 Request::Flags flags = 0);
309
310 ~DmaReadFifo();
311
312 public: // Serializable
313 void serialize(CheckpointOut &cp) const override;
314 void unserialize(CheckpointIn &cp) override;
315
316 public: // Drainable
317 DrainState drain() override;
318
319 public: // FIFO access
320 /**
321 * @{
322 * @name FIFO access
323 */
324 /**
325 * Try to read data from the FIFO.
326 *
327 * This method reads len bytes of data from the FIFO and stores
328 * them in the memory location pointed to by dst. The method
329 * fails, and no data is written to the buffer, if the FIFO
330 * doesn't contain enough data to satisfy the request.
331 *
332 * @param dst Pointer to a destination buffer
333 * @param len Amount of data to read.
334 * @return true on success, false otherwise.
335 */
336 bool tryGet(uint8_t *dst, size_t len);
337
338 template<typename T>
339 bool tryGet(T &value) {
340 return tryGet(static_cast<T *>(&value), sizeof(T));
341 };
342
343 /**
344 * Read data from the FIFO and panic on failure.
345 *
346 * @see tryGet()
347 *
348 * @param dst Pointer to a destination buffer
349 * @param len Amount of data to read.
350 */
351 void get(uint8_t *dst, size_t len);
352
353 template<typename T>
354 T get() {
355 T value;
356 get(static_cast<uint8_t *>(&value), sizeof(T));
357 return value;
358 };
359
360 /** Get the amount of data stored in the FIFO */
361 size_t size() const { return buffer.size(); }
362 /** Flush the FIFO */
363 void flush() { buffer.flush(); }
364
365 /** @} */
366 public: // FIFO fill control
367 /**
368 * @{
369 * @name FIFO fill control
370 */
371 /**
372 * Start filling the FIFO.
373 *
374 * @warn It's considered an error to call start on an active DMA
375 * engine unless the last request from the active block has been
376 * sent (i.e., atEndOfBlock() is true).
377 *
378 * @param start Physical address to copy from.
379 * @param size Size of the block to copy.
380 */
381 void startFill(Addr start, size_t size);
382
383 /**
384 * Stop the DMA engine.
385 *
386 * Stop filling the FIFO and ignore incoming responses for pending
387 * requests. The onEndOfBlock() callback will not be called after
388 * this method has been invoked. However, once the last response
389 * has been received, the onIdle() callback will still be called.
390 */
391 void stopFill();
392
393 /**
394 * Has the DMA engine sent out the last request for the active
395 * block?
396 */
397 bool atEndOfBlock() const {
398 return nextAddr == endAddr;
399 }
400
401 /**
402 * Is the DMA engine active (i.e., are there still in-flight
403 * accesses)?
404 */
405 bool isActive() const {
406 return !(pendingRequests.empty() && atEndOfBlock());
407 }
408
409 /** @} */
410 protected: // Callbacks
411 /**
412 * @{
413 * @name Callbacks
414 */
415 /**
416 * End of block callback
417 *
418 * This callback is called <i>once</i> after the last access in a
419 * block has been sent. It is legal for a derived class to call
420 * startFill() from this method to initiate a transfer.
421 */
422 virtual void onEndOfBlock() {};
423
424 /**
425 * Last response received callback
426 *
427 * This callback is called when the DMA engine becomes idle (i.e.,
428 * there are no pending requests).
429 *
430 * It is possible for a DMA engine to reach the end of block and
431 * become idle at the same tick. In such a case, the
432 * onEndOfBlock() callback will be called first. This callback
433 * will <i>NOT</i> be called if that callback initiates a new DMA transfer.
434 */
435 virtual void onIdle() {};
436
437 /** @} */
438 private: // Configuration
439 /** Maximum request size in bytes */
440 const Addr maxReqSize;
441 /** Maximum FIFO size in bytes */
442 const size_t fifoSize;
443 /** Request flags */
444 const Request::Flags reqFlags;
445
446 DmaPort &port;
447
448 private:
449 class DmaDoneEvent : public Event
450 {
451 public:
452 DmaDoneEvent(DmaReadFifo *_parent, size_t max_size);
453
454 void kill();
455 void cancel();
456 bool canceled() const { return _canceled; }
457 void reset(size_t size);
458 void process();
459
460 bool done() const { return _done; }
461 size_t requestSize() const { return _requestSize; }
462 const uint8_t *data() const { return _data.data(); }
463 uint8_t *data() { return _data.data(); }
464
465 private:
466 DmaReadFifo *parent;
467 bool _done;
468 bool _canceled;
469 size_t _requestSize;
470 std::vector<uint8_t> _data;
471 };
472
473 typedef std::unique_ptr<DmaDoneEvent> DmaDoneEventUPtr;
474
475 /**
476 * DMA request done, handle incoming data and issue new
477 * request.
478 */
479 void dmaDone();
480
481 /** Handle pending requests that have been flagged as done. */
482 void handlePending();
483
484 /** Try to issue new DMA requests or bypass DMA requests*/
485 void resumeFill();
486
487 /** Try to issue new DMA requests during normal execution*/
488 void resumeFillTiming();
489
490 /** Try to bypass DMA requests in KVM execution mode */
491 void resumeFillFunctional();
492
493 private: // Internal state
494 Fifo<uint8_t> buffer;
495
496 Addr nextAddr;
497 Addr endAddr;
498
499 std::deque<DmaDoneEventUPtr> pendingRequests;
500 std::deque<DmaDoneEventUPtr> freeRequests;
501};
502
503#endif // __DEV_DMA_DEVICE_HH__