copy_engine.cc (11522:348411ec525a) copy_engine.cc (12087:0e082672ac6b)
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2008 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 */
42
43/* @file
44 * Device model for Intel's I/O AT DMA copy engine.
45 */
46
47#include "dev/pci/copy_engine.hh"
48
49#include <algorithm>
50
51#include "base/cp_annotate.hh"
52#include "base/trace.hh"
53#include "debug/DMACopyEngine.hh"
54#include "debug/Drain.hh"
55#include "mem/packet.hh"
56#include "mem/packet_access.hh"
57#include "params/CopyEngine.hh"
58#include "sim/stats.hh"
59#include "sim/system.hh"
60
61using namespace CopyEngineReg;
62
63CopyEngine::CopyEngine(const Params *p)
64 : PciDevice(p)
65{
66 // All Reg regs are initialized to 0 by default
67 regs.chanCount = p->ChanCnt;
68 regs.xferCap = findMsbSet(p->XferCap);
69 regs.attnStatus = 0;
70
71 if (regs.chanCount > 64)
72 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
73
74 for (int x = 0; x < regs.chanCount; x++) {
75 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
76 chan.push_back(ch);
77 }
78}
79
80
81CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
82 : cePort(_ce, _ce->sys),
83 ce(_ce), channelId(cid), busy(false), underReset(false),
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2008 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 */
42
43/* @file
44 * Device model for Intel's I/O AT DMA copy engine.
45 */
46
47#include "dev/pci/copy_engine.hh"
48
49#include <algorithm>
50
51#include "base/cp_annotate.hh"
52#include "base/trace.hh"
53#include "debug/DMACopyEngine.hh"
54#include "debug/Drain.hh"
55#include "mem/packet.hh"
56#include "mem/packet_access.hh"
57#include "params/CopyEngine.hh"
58#include "sim/stats.hh"
59#include "sim/system.hh"
60
61using namespace CopyEngineReg;
62
63CopyEngine::CopyEngine(const Params *p)
64 : PciDevice(p)
65{
66 // All Reg regs are initialized to 0 by default
67 regs.chanCount = p->ChanCnt;
68 regs.xferCap = findMsbSet(p->XferCap);
69 regs.attnStatus = 0;
70
71 if (regs.chanCount > 64)
72 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
73
74 for (int x = 0; x < regs.chanCount; x++) {
75 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
76 chan.push_back(ch);
77 }
78}
79
80
81CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
82 : cePort(_ce, _ce->sys),
83 ce(_ce), channelId(cid), busy(false), underReset(false),
84 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
85 latAfterCompletion(ce->params()->latAfterCompletion),
86 completionDataReg(0), nextState(Idle),
87 fetchCompleteEvent(this), addrCompleteEvent(this),
88 readCompleteEvent(this), writeCompleteEvent(this),
89 statusCompleteEvent(this)
84 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
85 latAfterCompletion(ce->params()->latAfterCompletion),
86 completionDataReg(0), nextState(Idle),
87 fetchCompleteEvent([this]{ fetchDescComplete(); }, name()),
88 addrCompleteEvent([this]{ fetchAddrComplete(); }, name()),
89 readCompleteEvent([this]{ readCopyBytesComplete(); }, name()),
90 writeCompleteEvent([this]{ writeCopyBytesComplete(); }, name()),
91 statusCompleteEvent([this]{ writeStatusComplete(); }, name())
90
91{
92 cr.status.dma_transfer_status(3);
93 cr.descChainAddr = 0;
94 cr.completionAddr = 0;
95
96 curDmaDesc = new DmaDesc;
97 memset(curDmaDesc, 0, sizeof(DmaDesc));
98 copyBuffer = new uint8_t[ce->params()->XferCap];
99}
100
101CopyEngine::~CopyEngine()
102{
103 for (int x = 0; x < chan.size(); x++) {
104 delete chan[x];
105 }
106}
107
108CopyEngine::CopyEngineChannel::~CopyEngineChannel()
109{
110 delete curDmaDesc;
111 delete [] copyBuffer;
112}
113
114BaseMasterPort &
115CopyEngine::getMasterPort(const std::string &if_name, PortID idx)
116{
117 if (if_name != "dma") {
118 // pass it along to our super class
119 return PciDevice::getMasterPort(if_name, idx);
120 } else {
121 if (idx >= static_cast<int>(chan.size())) {
122 panic("CopyEngine::getMasterPort: unknown index %d\n", idx);
123 }
124
125 return chan[idx]->getMasterPort();
126 }
127}
128
129
130BaseMasterPort &
131CopyEngine::CopyEngineChannel::getMasterPort()
132{
133 return cePort;
134}
135
136void
137CopyEngine::CopyEngineChannel::recvCommand()
138{
139 if (cr.command.start_dma()) {
140 assert(!busy);
141 cr.status.dma_transfer_status(0);
142 nextState = DescriptorFetch;
143 fetchAddress = cr.descChainAddr;
144 if (ce->drainState() == DrainState::Running)
145 fetchDescriptor(cr.descChainAddr);
146 } else if (cr.command.append_dma()) {
147 if (!busy) {
148 nextState = AddressFetch;
149 if (ce->drainState() == DrainState::Running)
150 fetchNextAddr(lastDescriptorAddr);
151 } else
152 refreshNext = true;
153 } else if (cr.command.reset_dma()) {
154 if (busy)
155 underReset = true;
156 else {
157 cr.status.dma_transfer_status(3);
158 nextState = Idle;
159 }
160 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
161 cr.command.suspend_dma())
162 panic("Resume, Abort, and Suspend are not supported\n");
163 cr.command(0);
164}
165
166Tick
167CopyEngine::read(PacketPtr pkt)
168{
169 int bar;
170 Addr daddr;
171
172 if (!getBAR(pkt->getAddr(), bar, daddr))
173 panic("Invalid PCI memory access to unmapped memory.\n");
174
175 // Only Memory register BAR is allowed
176 assert(bar == 0);
177
178 int size = pkt->getSize();
179 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
180 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
181 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
182 }
183
184 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
185
186 ///
187 /// Handle read of register here
188 ///
189
190 if (daddr < 0x80) {
191 switch (daddr) {
192 case GEN_CHANCOUNT:
193 assert(size == sizeof(regs.chanCount));
194 pkt->set<uint8_t>(regs.chanCount);
195 break;
196 case GEN_XFERCAP:
197 assert(size == sizeof(regs.xferCap));
198 pkt->set<uint8_t>(regs.xferCap);
199 break;
200 case GEN_INTRCTRL:
201 assert(size == sizeof(uint8_t));
202 pkt->set<uint8_t>(regs.intrctrl());
203 regs.intrctrl.master_int_enable(0);
204 break;
205 case GEN_ATTNSTATUS:
206 assert(size == sizeof(regs.attnStatus));
207 pkt->set<uint32_t>(regs.attnStatus);
208 regs.attnStatus = 0;
209 break;
210 default:
211 panic("Read request to unknown register number: %#x\n", daddr);
212 }
213 pkt->makeAtomicResponse();
214 return pioDelay;
215 }
216
217
218 // Find which channel we're accessing
219 int chanid = 0;
220 daddr -= 0x80;
221 while (daddr >= 0x80) {
222 chanid++;
223 daddr -= 0x80;
224 }
225
226 if (chanid >= regs.chanCount)
227 panic("Access to channel %d (device only configured for %d channels)",
228 chanid, regs.chanCount);
229
230 ///
231 /// Channel registers are handled here
232 ///
233 chan[chanid]->channelRead(pkt, daddr, size);
234
235 pkt->makeAtomicResponse();
236 return pioDelay;
237}
238
239void
240CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
241{
242 switch (daddr) {
243 case CHAN_CONTROL:
244 assert(size == sizeof(uint16_t));
245 pkt->set<uint16_t>(cr.ctrl());
246 cr.ctrl.in_use(1);
247 break;
248 case CHAN_STATUS:
249 assert(size == sizeof(uint64_t));
250 pkt->set<uint64_t>(cr.status() | ~busy);
251 break;
252 case CHAN_CHAINADDR:
253 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
254 if (size == sizeof(uint64_t))
255 pkt->set<uint64_t>(cr.descChainAddr);
256 else
257 pkt->set<uint32_t>(bits(cr.descChainAddr,0,31));
258 break;
259 case CHAN_CHAINADDR_HIGH:
260 assert(size == sizeof(uint32_t));
261 pkt->set<uint32_t>(bits(cr.descChainAddr,32,63));
262 break;
263 case CHAN_COMMAND:
264 assert(size == sizeof(uint8_t));
265 pkt->set<uint32_t>(cr.command());
266 break;
267 case CHAN_CMPLNADDR:
268 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
269 if (size == sizeof(uint64_t))
270 pkt->set<uint64_t>(cr.completionAddr);
271 else
272 pkt->set<uint32_t>(bits(cr.completionAddr,0,31));
273 break;
274 case CHAN_CMPLNADDR_HIGH:
275 assert(size == sizeof(uint32_t));
276 pkt->set<uint32_t>(bits(cr.completionAddr,32,63));
277 break;
278 case CHAN_ERROR:
279 assert(size == sizeof(uint32_t));
280 pkt->set<uint32_t>(cr.error());
281 break;
282 default:
283 panic("Read request to unknown channel register number: (%d)%#x\n",
284 channelId, daddr);
285 }
286}
287
288
289Tick
290CopyEngine::write(PacketPtr pkt)
291{
292 int bar;
293 Addr daddr;
294
295
296 if (!getBAR(pkt->getAddr(), bar, daddr))
297 panic("Invalid PCI memory access to unmapped memory.\n");
298
299 // Only Memory register BAR is allowed
300 assert(bar == 0);
301
302 int size = pkt->getSize();
303
304 ///
305 /// Handle write of register here
306 ///
307
308 if (size == sizeof(uint64_t)) {
309 uint64_t val M5_VAR_USED = pkt->get<uint64_t>();
310 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
311 } else if (size == sizeof(uint32_t)) {
312 uint32_t val M5_VAR_USED = pkt->get<uint32_t>();
313 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
314 } else if (size == sizeof(uint16_t)) {
315 uint16_t val M5_VAR_USED = pkt->get<uint16_t>();
316 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
317 } else if (size == sizeof(uint8_t)) {
318 uint8_t val M5_VAR_USED = pkt->get<uint8_t>();
319 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
320 } else {
321 panic("Unknown size for MMIO access: %d\n", size);
322 }
323
324 if (daddr < 0x80) {
325 switch (daddr) {
326 case GEN_CHANCOUNT:
327 case GEN_XFERCAP:
328 case GEN_ATTNSTATUS:
329 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
330 daddr);
331 break;
332 case GEN_INTRCTRL:
333 regs.intrctrl.master_int_enable(bits(pkt->get<uint8_t>(),0,1));
334 break;
335 default:
336 panic("Read request to unknown register number: %#x\n", daddr);
337 }
338 pkt->makeAtomicResponse();
339 return pioDelay;
340 }
341
342 // Find which channel we're accessing
343 int chanid = 0;
344 daddr -= 0x80;
345 while (daddr >= 0x80) {
346 chanid++;
347 daddr -= 0x80;
348 }
349
350 if (chanid >= regs.chanCount)
351 panic("Access to channel %d (device only configured for %d channels)",
352 chanid, regs.chanCount);
353
354 ///
355 /// Channel registers are handled here
356 ///
357 chan[chanid]->channelWrite(pkt, daddr, size);
358
359 pkt->makeAtomicResponse();
360 return pioDelay;
361}
362
363void
364CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
365{
366 switch (daddr) {
367 case CHAN_CONTROL:
368 assert(size == sizeof(uint16_t));
369 int old_int_disable;
370 old_int_disable = cr.ctrl.interrupt_disable();
371 cr.ctrl(pkt->get<uint16_t>());
372 if (cr.ctrl.interrupt_disable())
373 cr.ctrl.interrupt_disable(0);
374 else
375 cr.ctrl.interrupt_disable(old_int_disable);
376 break;
377 case CHAN_STATUS:
378 assert(size == sizeof(uint64_t));
379 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
380 daddr);
381 break;
382 case CHAN_CHAINADDR:
383 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
384 if (size == sizeof(uint64_t))
385 cr.descChainAddr = pkt->get<uint64_t>();
386 else
387 cr.descChainAddr = (uint64_t)pkt->get<uint32_t>() |
388 (cr.descChainAddr & ~mask(32));
389 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
390 break;
391 case CHAN_CHAINADDR_HIGH:
392 assert(size == sizeof(uint32_t));
393 cr.descChainAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
394 (cr.descChainAddr & mask(32));
395 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
396 break;
397 case CHAN_COMMAND:
398 assert(size == sizeof(uint8_t));
399 cr.command(pkt->get<uint8_t>());
400 recvCommand();
401 break;
402 case CHAN_CMPLNADDR:
403 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
404 if (size == sizeof(uint64_t))
405 cr.completionAddr = pkt->get<uint64_t>();
406 else
407 cr.completionAddr = pkt->get<uint32_t>() |
408 (cr.completionAddr & ~mask(32));
409 break;
410 case CHAN_CMPLNADDR_HIGH:
411 assert(size == sizeof(uint32_t));
412 cr.completionAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
413 (cr.completionAddr & mask(32));
414 break;
415 case CHAN_ERROR:
416 assert(size == sizeof(uint32_t));
417 cr.error(~pkt->get<uint32_t>() & cr.error());
418 break;
419 default:
420 panic("Read request to unknown channel register number: (%d)%#x\n",
421 channelId, daddr);
422 }
423}
424
425void
426CopyEngine::regStats()
427{
428 PciDevice::regStats();
429
430 using namespace Stats;
431 bytesCopied
432 .init(regs.chanCount)
433 .name(name() + ".bytes_copied")
434 .desc("Number of bytes copied by each engine")
435 .flags(total)
436 ;
437 copiesProcessed
438 .init(regs.chanCount)
439 .name(name() + ".copies_processed")
440 .desc("Number of copies processed by each engine")
441 .flags(total)
442 ;
443}
444
445void
446CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
447{
448 anDq();
449 anBegin("FetchDescriptor");
450 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
451 address, ce->pciToDma(address));
452 assert(address);
453 busy = true;
454
455 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
456 ce->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
457
458 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(address),
459 sizeof(DmaDesc), &fetchCompleteEvent,
460 (uint8_t*)curDmaDesc, latBeforeBegin);
461 lastDescriptorAddr = address;
462}
463
464void
465CopyEngine::CopyEngineChannel::fetchDescComplete()
466{
467 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
468
469 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
470 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
471 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
472 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
473 panic("Shouldn't be able to get here\n");
474 nextState = CompletionWrite;
475 if (inDrain()) return;
476 writeCompletionStatus();
477 } else {
478 anBegin("Idle");
479 anWait();
480 busy = false;
481 nextState = Idle;
482 inDrain();
483 }
484 return;
485 }
486
487 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
488 panic("Descriptor has flag other that completion status set\n");
489
490 nextState = DMARead;
491 if (inDrain()) return;
492 readCopyBytes();
493}
494
495void
496CopyEngine::CopyEngineChannel::readCopyBytes()
497{
498 anBegin("ReadCopyBytes");
499 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
500 curDmaDesc->len, curDmaDesc->dest,
501 ce->pciToDma(curDmaDesc->src));
502 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(curDmaDesc->src),
503 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
504}
505
506void
507CopyEngine::CopyEngineChannel::readCopyBytesComplete()
508{
509 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
510
511 nextState = DMAWrite;
512 if (inDrain()) return;
513 writeCopyBytes();
514}
515
516void
517CopyEngine::CopyEngineChannel::writeCopyBytes()
518{
519 anBegin("WriteCopyBytes");
520 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
521 curDmaDesc->len, curDmaDesc->dest,
522 ce->pciToDma(curDmaDesc->dest));
523
524 cePort.dmaAction(MemCmd::WriteReq, ce->pciToDma(curDmaDesc->dest),
525 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
526
527 ce->bytesCopied[channelId] += curDmaDesc->len;
528 ce->copiesProcessed[channelId]++;
529}
530
531void
532CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
533{
534 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
535 curDmaDesc->user1);
536
537 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
538 completionDataReg = cr.status() | 1;
539
540 anQ("DMAUsedDescQ", channelId, 1);
541 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
542 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
543 nextState = CompletionWrite;
544 if (inDrain()) return;
545 writeCompletionStatus();
546 return;
547 }
548
549 continueProcessing();
550}
551
552void
553CopyEngine::CopyEngineChannel::continueProcessing()
554{
555 busy = false;
556
557 if (underReset) {
558 anBegin("Reset");
559 anWait();
560 underReset = false;
561 refreshNext = false;
562 busy = false;
563 nextState = Idle;
564 return;
565 }
566
567 if (curDmaDesc->next) {
568 nextState = DescriptorFetch;
569 fetchAddress = curDmaDesc->next;
570 if (inDrain()) return;
571 fetchDescriptor(curDmaDesc->next);
572 } else if (refreshNext) {
573 nextState = AddressFetch;
574 refreshNext = false;
575 if (inDrain()) return;
576 fetchNextAddr(lastDescriptorAddr);
577 } else {
578 inDrain();
579 nextState = Idle;
580 anWait();
581 anBegin("Idle");
582 }
583}
584
585void
586CopyEngine::CopyEngineChannel::writeCompletionStatus()
587{
588 anBegin("WriteCompletionStatus");
589 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
590 completionDataReg, cr.completionAddr,
591 ce->pciToDma(cr.completionAddr));
592
593 cePort.dmaAction(MemCmd::WriteReq,
594 ce->pciToDma(cr.completionAddr),
595 sizeof(completionDataReg), &statusCompleteEvent,
596 (uint8_t*)&completionDataReg, latAfterCompletion);
597}
598
599void
600CopyEngine::CopyEngineChannel::writeStatusComplete()
601{
602 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
603 continueProcessing();
604}
605
606void
607CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
608{
609 anBegin("FetchNextAddr");
610 DPRINTF(DMACopyEngine, "Fetching next address...\n");
611 busy = true;
612 cePort.dmaAction(MemCmd::ReadReq,
613 ce->pciToDma(address + offsetof(DmaDesc, next)),
614 sizeof(Addr), &addrCompleteEvent,
615 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
616}
617
618void
619CopyEngine::CopyEngineChannel::fetchAddrComplete()
620{
621 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
622 curDmaDesc->next);
623 if (!curDmaDesc->next) {
624 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
625 busy = false;
626 nextState = Idle;
627 anWait();
628 anBegin("Idle");
629 inDrain();
630 return;
631 }
632 nextState = DescriptorFetch;
633 fetchAddress = curDmaDesc->next;
634 if (inDrain()) return;
635 fetchDescriptor(curDmaDesc->next);
636}
637
638bool
639CopyEngine::CopyEngineChannel::inDrain()
640{
641 if (drainState() == DrainState::Draining) {
642 DPRINTF(Drain, "CopyEngine done draining, processing drain event\n");
643 signalDrainDone();
644 }
645
646 return ce->drainState() != DrainState::Running;
647}
648
649DrainState
650CopyEngine::CopyEngineChannel::drain()
651{
652 if (nextState == Idle || ce->drainState() != DrainState::Running) {
653 return DrainState::Drained;
654 } else {
655 DPRINTF(Drain, "CopyEngineChannel not drained\n");
656 return DrainState::Draining;
657 }
658}
659
660void
661CopyEngine::serialize(CheckpointOut &cp) const
662{
663 PciDevice::serialize(cp);
664 regs.serialize(cp);
665 for (int x =0; x < chan.size(); x++)
666 chan[x]->serializeSection(cp, csprintf("channel%d", x));
667}
668
669void
670CopyEngine::unserialize(CheckpointIn &cp)
671{
672 PciDevice::unserialize(cp);
673 regs.unserialize(cp);
674 for (int x = 0; x < chan.size(); x++)
675 chan[x]->unserializeSection(cp, csprintf("channel%d", x));
676}
677
678void
679CopyEngine::CopyEngineChannel::serialize(CheckpointOut &cp) const
680{
681 SERIALIZE_SCALAR(channelId);
682 SERIALIZE_SCALAR(busy);
683 SERIALIZE_SCALAR(underReset);
684 SERIALIZE_SCALAR(refreshNext);
685 SERIALIZE_SCALAR(lastDescriptorAddr);
686 SERIALIZE_SCALAR(completionDataReg);
687 SERIALIZE_SCALAR(fetchAddress);
688 int nextState = this->nextState;
689 SERIALIZE_SCALAR(nextState);
690 arrayParamOut(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
691 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
692 cr.serialize(cp);
693
694}
695void
696CopyEngine::CopyEngineChannel::unserialize(CheckpointIn &cp)
697{
698 UNSERIALIZE_SCALAR(channelId);
699 UNSERIALIZE_SCALAR(busy);
700 UNSERIALIZE_SCALAR(underReset);
701 UNSERIALIZE_SCALAR(refreshNext);
702 UNSERIALIZE_SCALAR(lastDescriptorAddr);
703 UNSERIALIZE_SCALAR(completionDataReg);
704 UNSERIALIZE_SCALAR(fetchAddress);
705 int nextState;
706 UNSERIALIZE_SCALAR(nextState);
707 this->nextState = (ChannelState)nextState;
708 arrayParamIn(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
709 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
710 cr.unserialize(cp);
711
712}
713
714void
715CopyEngine::CopyEngineChannel::restartStateMachine()
716{
717 switch(nextState) {
718 case AddressFetch:
719 fetchNextAddr(lastDescriptorAddr);
720 break;
721 case DescriptorFetch:
722 fetchDescriptor(fetchAddress);
723 break;
724 case DMARead:
725 readCopyBytes();
726 break;
727 case DMAWrite:
728 writeCopyBytes();
729 break;
730 case CompletionWrite:
731 writeCompletionStatus();
732 break;
733 case Idle:
734 break;
735 default:
736 panic("Unknown state for CopyEngineChannel\n");
737 }
738}
739
740void
741CopyEngine::CopyEngineChannel::drainResume()
742{
743 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
744 restartStateMachine();
745}
746
747CopyEngine *
748CopyEngineParams::create()
749{
750 return new CopyEngine(this);
751}
92
93{
94 cr.status.dma_transfer_status(3);
95 cr.descChainAddr = 0;
96 cr.completionAddr = 0;
97
98 curDmaDesc = new DmaDesc;
99 memset(curDmaDesc, 0, sizeof(DmaDesc));
100 copyBuffer = new uint8_t[ce->params()->XferCap];
101}
102
103CopyEngine::~CopyEngine()
104{
105 for (int x = 0; x < chan.size(); x++) {
106 delete chan[x];
107 }
108}
109
110CopyEngine::CopyEngineChannel::~CopyEngineChannel()
111{
112 delete curDmaDesc;
113 delete [] copyBuffer;
114}
115
116BaseMasterPort &
117CopyEngine::getMasterPort(const std::string &if_name, PortID idx)
118{
119 if (if_name != "dma") {
120 // pass it along to our super class
121 return PciDevice::getMasterPort(if_name, idx);
122 } else {
123 if (idx >= static_cast<int>(chan.size())) {
124 panic("CopyEngine::getMasterPort: unknown index %d\n", idx);
125 }
126
127 return chan[idx]->getMasterPort();
128 }
129}
130
131
132BaseMasterPort &
133CopyEngine::CopyEngineChannel::getMasterPort()
134{
135 return cePort;
136}
137
138void
139CopyEngine::CopyEngineChannel::recvCommand()
140{
141 if (cr.command.start_dma()) {
142 assert(!busy);
143 cr.status.dma_transfer_status(0);
144 nextState = DescriptorFetch;
145 fetchAddress = cr.descChainAddr;
146 if (ce->drainState() == DrainState::Running)
147 fetchDescriptor(cr.descChainAddr);
148 } else if (cr.command.append_dma()) {
149 if (!busy) {
150 nextState = AddressFetch;
151 if (ce->drainState() == DrainState::Running)
152 fetchNextAddr(lastDescriptorAddr);
153 } else
154 refreshNext = true;
155 } else if (cr.command.reset_dma()) {
156 if (busy)
157 underReset = true;
158 else {
159 cr.status.dma_transfer_status(3);
160 nextState = Idle;
161 }
162 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
163 cr.command.suspend_dma())
164 panic("Resume, Abort, and Suspend are not supported\n");
165 cr.command(0);
166}
167
168Tick
169CopyEngine::read(PacketPtr pkt)
170{
171 int bar;
172 Addr daddr;
173
174 if (!getBAR(pkt->getAddr(), bar, daddr))
175 panic("Invalid PCI memory access to unmapped memory.\n");
176
177 // Only Memory register BAR is allowed
178 assert(bar == 0);
179
180 int size = pkt->getSize();
181 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
182 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
183 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
184 }
185
186 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
187
188 ///
189 /// Handle read of register here
190 ///
191
192 if (daddr < 0x80) {
193 switch (daddr) {
194 case GEN_CHANCOUNT:
195 assert(size == sizeof(regs.chanCount));
196 pkt->set<uint8_t>(regs.chanCount);
197 break;
198 case GEN_XFERCAP:
199 assert(size == sizeof(regs.xferCap));
200 pkt->set<uint8_t>(regs.xferCap);
201 break;
202 case GEN_INTRCTRL:
203 assert(size == sizeof(uint8_t));
204 pkt->set<uint8_t>(regs.intrctrl());
205 regs.intrctrl.master_int_enable(0);
206 break;
207 case GEN_ATTNSTATUS:
208 assert(size == sizeof(regs.attnStatus));
209 pkt->set<uint32_t>(regs.attnStatus);
210 regs.attnStatus = 0;
211 break;
212 default:
213 panic("Read request to unknown register number: %#x\n", daddr);
214 }
215 pkt->makeAtomicResponse();
216 return pioDelay;
217 }
218
219
220 // Find which channel we're accessing
221 int chanid = 0;
222 daddr -= 0x80;
223 while (daddr >= 0x80) {
224 chanid++;
225 daddr -= 0x80;
226 }
227
228 if (chanid >= regs.chanCount)
229 panic("Access to channel %d (device only configured for %d channels)",
230 chanid, regs.chanCount);
231
232 ///
233 /// Channel registers are handled here
234 ///
235 chan[chanid]->channelRead(pkt, daddr, size);
236
237 pkt->makeAtomicResponse();
238 return pioDelay;
239}
240
241void
242CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
243{
244 switch (daddr) {
245 case CHAN_CONTROL:
246 assert(size == sizeof(uint16_t));
247 pkt->set<uint16_t>(cr.ctrl());
248 cr.ctrl.in_use(1);
249 break;
250 case CHAN_STATUS:
251 assert(size == sizeof(uint64_t));
252 pkt->set<uint64_t>(cr.status() | ~busy);
253 break;
254 case CHAN_CHAINADDR:
255 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
256 if (size == sizeof(uint64_t))
257 pkt->set<uint64_t>(cr.descChainAddr);
258 else
259 pkt->set<uint32_t>(bits(cr.descChainAddr,0,31));
260 break;
261 case CHAN_CHAINADDR_HIGH:
262 assert(size == sizeof(uint32_t));
263 pkt->set<uint32_t>(bits(cr.descChainAddr,32,63));
264 break;
265 case CHAN_COMMAND:
266 assert(size == sizeof(uint8_t));
267 pkt->set<uint32_t>(cr.command());
268 break;
269 case CHAN_CMPLNADDR:
270 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
271 if (size == sizeof(uint64_t))
272 pkt->set<uint64_t>(cr.completionAddr);
273 else
274 pkt->set<uint32_t>(bits(cr.completionAddr,0,31));
275 break;
276 case CHAN_CMPLNADDR_HIGH:
277 assert(size == sizeof(uint32_t));
278 pkt->set<uint32_t>(bits(cr.completionAddr,32,63));
279 break;
280 case CHAN_ERROR:
281 assert(size == sizeof(uint32_t));
282 pkt->set<uint32_t>(cr.error());
283 break;
284 default:
285 panic("Read request to unknown channel register number: (%d)%#x\n",
286 channelId, daddr);
287 }
288}
289
290
291Tick
292CopyEngine::write(PacketPtr pkt)
293{
294 int bar;
295 Addr daddr;
296
297
298 if (!getBAR(pkt->getAddr(), bar, daddr))
299 panic("Invalid PCI memory access to unmapped memory.\n");
300
301 // Only Memory register BAR is allowed
302 assert(bar == 0);
303
304 int size = pkt->getSize();
305
306 ///
307 /// Handle write of register here
308 ///
309
310 if (size == sizeof(uint64_t)) {
311 uint64_t val M5_VAR_USED = pkt->get<uint64_t>();
312 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
313 } else if (size == sizeof(uint32_t)) {
314 uint32_t val M5_VAR_USED = pkt->get<uint32_t>();
315 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
316 } else if (size == sizeof(uint16_t)) {
317 uint16_t val M5_VAR_USED = pkt->get<uint16_t>();
318 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
319 } else if (size == sizeof(uint8_t)) {
320 uint8_t val M5_VAR_USED = pkt->get<uint8_t>();
321 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
322 } else {
323 panic("Unknown size for MMIO access: %d\n", size);
324 }
325
326 if (daddr < 0x80) {
327 switch (daddr) {
328 case GEN_CHANCOUNT:
329 case GEN_XFERCAP:
330 case GEN_ATTNSTATUS:
331 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
332 daddr);
333 break;
334 case GEN_INTRCTRL:
335 regs.intrctrl.master_int_enable(bits(pkt->get<uint8_t>(),0,1));
336 break;
337 default:
338 panic("Read request to unknown register number: %#x\n", daddr);
339 }
340 pkt->makeAtomicResponse();
341 return pioDelay;
342 }
343
344 // Find which channel we're accessing
345 int chanid = 0;
346 daddr -= 0x80;
347 while (daddr >= 0x80) {
348 chanid++;
349 daddr -= 0x80;
350 }
351
352 if (chanid >= regs.chanCount)
353 panic("Access to channel %d (device only configured for %d channels)",
354 chanid, regs.chanCount);
355
356 ///
357 /// Channel registers are handled here
358 ///
359 chan[chanid]->channelWrite(pkt, daddr, size);
360
361 pkt->makeAtomicResponse();
362 return pioDelay;
363}
364
365void
366CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
367{
368 switch (daddr) {
369 case CHAN_CONTROL:
370 assert(size == sizeof(uint16_t));
371 int old_int_disable;
372 old_int_disable = cr.ctrl.interrupt_disable();
373 cr.ctrl(pkt->get<uint16_t>());
374 if (cr.ctrl.interrupt_disable())
375 cr.ctrl.interrupt_disable(0);
376 else
377 cr.ctrl.interrupt_disable(old_int_disable);
378 break;
379 case CHAN_STATUS:
380 assert(size == sizeof(uint64_t));
381 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
382 daddr);
383 break;
384 case CHAN_CHAINADDR:
385 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
386 if (size == sizeof(uint64_t))
387 cr.descChainAddr = pkt->get<uint64_t>();
388 else
389 cr.descChainAddr = (uint64_t)pkt->get<uint32_t>() |
390 (cr.descChainAddr & ~mask(32));
391 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
392 break;
393 case CHAN_CHAINADDR_HIGH:
394 assert(size == sizeof(uint32_t));
395 cr.descChainAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
396 (cr.descChainAddr & mask(32));
397 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
398 break;
399 case CHAN_COMMAND:
400 assert(size == sizeof(uint8_t));
401 cr.command(pkt->get<uint8_t>());
402 recvCommand();
403 break;
404 case CHAN_CMPLNADDR:
405 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
406 if (size == sizeof(uint64_t))
407 cr.completionAddr = pkt->get<uint64_t>();
408 else
409 cr.completionAddr = pkt->get<uint32_t>() |
410 (cr.completionAddr & ~mask(32));
411 break;
412 case CHAN_CMPLNADDR_HIGH:
413 assert(size == sizeof(uint32_t));
414 cr.completionAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
415 (cr.completionAddr & mask(32));
416 break;
417 case CHAN_ERROR:
418 assert(size == sizeof(uint32_t));
419 cr.error(~pkt->get<uint32_t>() & cr.error());
420 break;
421 default:
422 panic("Read request to unknown channel register number: (%d)%#x\n",
423 channelId, daddr);
424 }
425}
426
427void
428CopyEngine::regStats()
429{
430 PciDevice::regStats();
431
432 using namespace Stats;
433 bytesCopied
434 .init(regs.chanCount)
435 .name(name() + ".bytes_copied")
436 .desc("Number of bytes copied by each engine")
437 .flags(total)
438 ;
439 copiesProcessed
440 .init(regs.chanCount)
441 .name(name() + ".copies_processed")
442 .desc("Number of copies processed by each engine")
443 .flags(total)
444 ;
445}
446
447void
448CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
449{
450 anDq();
451 anBegin("FetchDescriptor");
452 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
453 address, ce->pciToDma(address));
454 assert(address);
455 busy = true;
456
457 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
458 ce->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
459
460 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(address),
461 sizeof(DmaDesc), &fetchCompleteEvent,
462 (uint8_t*)curDmaDesc, latBeforeBegin);
463 lastDescriptorAddr = address;
464}
465
466void
467CopyEngine::CopyEngineChannel::fetchDescComplete()
468{
469 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
470
471 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
472 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
473 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
474 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
475 panic("Shouldn't be able to get here\n");
476 nextState = CompletionWrite;
477 if (inDrain()) return;
478 writeCompletionStatus();
479 } else {
480 anBegin("Idle");
481 anWait();
482 busy = false;
483 nextState = Idle;
484 inDrain();
485 }
486 return;
487 }
488
489 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
490 panic("Descriptor has flag other that completion status set\n");
491
492 nextState = DMARead;
493 if (inDrain()) return;
494 readCopyBytes();
495}
496
497void
498CopyEngine::CopyEngineChannel::readCopyBytes()
499{
500 anBegin("ReadCopyBytes");
501 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
502 curDmaDesc->len, curDmaDesc->dest,
503 ce->pciToDma(curDmaDesc->src));
504 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(curDmaDesc->src),
505 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
506}
507
508void
509CopyEngine::CopyEngineChannel::readCopyBytesComplete()
510{
511 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
512
513 nextState = DMAWrite;
514 if (inDrain()) return;
515 writeCopyBytes();
516}
517
518void
519CopyEngine::CopyEngineChannel::writeCopyBytes()
520{
521 anBegin("WriteCopyBytes");
522 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
523 curDmaDesc->len, curDmaDesc->dest,
524 ce->pciToDma(curDmaDesc->dest));
525
526 cePort.dmaAction(MemCmd::WriteReq, ce->pciToDma(curDmaDesc->dest),
527 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
528
529 ce->bytesCopied[channelId] += curDmaDesc->len;
530 ce->copiesProcessed[channelId]++;
531}
532
533void
534CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
535{
536 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
537 curDmaDesc->user1);
538
539 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
540 completionDataReg = cr.status() | 1;
541
542 anQ("DMAUsedDescQ", channelId, 1);
543 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
544 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
545 nextState = CompletionWrite;
546 if (inDrain()) return;
547 writeCompletionStatus();
548 return;
549 }
550
551 continueProcessing();
552}
553
554void
555CopyEngine::CopyEngineChannel::continueProcessing()
556{
557 busy = false;
558
559 if (underReset) {
560 anBegin("Reset");
561 anWait();
562 underReset = false;
563 refreshNext = false;
564 busy = false;
565 nextState = Idle;
566 return;
567 }
568
569 if (curDmaDesc->next) {
570 nextState = DescriptorFetch;
571 fetchAddress = curDmaDesc->next;
572 if (inDrain()) return;
573 fetchDescriptor(curDmaDesc->next);
574 } else if (refreshNext) {
575 nextState = AddressFetch;
576 refreshNext = false;
577 if (inDrain()) return;
578 fetchNextAddr(lastDescriptorAddr);
579 } else {
580 inDrain();
581 nextState = Idle;
582 anWait();
583 anBegin("Idle");
584 }
585}
586
587void
588CopyEngine::CopyEngineChannel::writeCompletionStatus()
589{
590 anBegin("WriteCompletionStatus");
591 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
592 completionDataReg, cr.completionAddr,
593 ce->pciToDma(cr.completionAddr));
594
595 cePort.dmaAction(MemCmd::WriteReq,
596 ce->pciToDma(cr.completionAddr),
597 sizeof(completionDataReg), &statusCompleteEvent,
598 (uint8_t*)&completionDataReg, latAfterCompletion);
599}
600
601void
602CopyEngine::CopyEngineChannel::writeStatusComplete()
603{
604 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
605 continueProcessing();
606}
607
608void
609CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
610{
611 anBegin("FetchNextAddr");
612 DPRINTF(DMACopyEngine, "Fetching next address...\n");
613 busy = true;
614 cePort.dmaAction(MemCmd::ReadReq,
615 ce->pciToDma(address + offsetof(DmaDesc, next)),
616 sizeof(Addr), &addrCompleteEvent,
617 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
618}
619
620void
621CopyEngine::CopyEngineChannel::fetchAddrComplete()
622{
623 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
624 curDmaDesc->next);
625 if (!curDmaDesc->next) {
626 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
627 busy = false;
628 nextState = Idle;
629 anWait();
630 anBegin("Idle");
631 inDrain();
632 return;
633 }
634 nextState = DescriptorFetch;
635 fetchAddress = curDmaDesc->next;
636 if (inDrain()) return;
637 fetchDescriptor(curDmaDesc->next);
638}
639
640bool
641CopyEngine::CopyEngineChannel::inDrain()
642{
643 if (drainState() == DrainState::Draining) {
644 DPRINTF(Drain, "CopyEngine done draining, processing drain event\n");
645 signalDrainDone();
646 }
647
648 return ce->drainState() != DrainState::Running;
649}
650
651DrainState
652CopyEngine::CopyEngineChannel::drain()
653{
654 if (nextState == Idle || ce->drainState() != DrainState::Running) {
655 return DrainState::Drained;
656 } else {
657 DPRINTF(Drain, "CopyEngineChannel not drained\n");
658 return DrainState::Draining;
659 }
660}
661
662void
663CopyEngine::serialize(CheckpointOut &cp) const
664{
665 PciDevice::serialize(cp);
666 regs.serialize(cp);
667 for (int x =0; x < chan.size(); x++)
668 chan[x]->serializeSection(cp, csprintf("channel%d", x));
669}
670
671void
672CopyEngine::unserialize(CheckpointIn &cp)
673{
674 PciDevice::unserialize(cp);
675 regs.unserialize(cp);
676 for (int x = 0; x < chan.size(); x++)
677 chan[x]->unserializeSection(cp, csprintf("channel%d", x));
678}
679
680void
681CopyEngine::CopyEngineChannel::serialize(CheckpointOut &cp) const
682{
683 SERIALIZE_SCALAR(channelId);
684 SERIALIZE_SCALAR(busy);
685 SERIALIZE_SCALAR(underReset);
686 SERIALIZE_SCALAR(refreshNext);
687 SERIALIZE_SCALAR(lastDescriptorAddr);
688 SERIALIZE_SCALAR(completionDataReg);
689 SERIALIZE_SCALAR(fetchAddress);
690 int nextState = this->nextState;
691 SERIALIZE_SCALAR(nextState);
692 arrayParamOut(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
693 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
694 cr.serialize(cp);
695
696}
697void
698CopyEngine::CopyEngineChannel::unserialize(CheckpointIn &cp)
699{
700 UNSERIALIZE_SCALAR(channelId);
701 UNSERIALIZE_SCALAR(busy);
702 UNSERIALIZE_SCALAR(underReset);
703 UNSERIALIZE_SCALAR(refreshNext);
704 UNSERIALIZE_SCALAR(lastDescriptorAddr);
705 UNSERIALIZE_SCALAR(completionDataReg);
706 UNSERIALIZE_SCALAR(fetchAddress);
707 int nextState;
708 UNSERIALIZE_SCALAR(nextState);
709 this->nextState = (ChannelState)nextState;
710 arrayParamIn(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
711 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
712 cr.unserialize(cp);
713
714}
715
716void
717CopyEngine::CopyEngineChannel::restartStateMachine()
718{
719 switch(nextState) {
720 case AddressFetch:
721 fetchNextAddr(lastDescriptorAddr);
722 break;
723 case DescriptorFetch:
724 fetchDescriptor(fetchAddress);
725 break;
726 case DMARead:
727 readCopyBytes();
728 break;
729 case DMAWrite:
730 writeCopyBytes();
731 break;
732 case CompletionWrite:
733 writeCompletionStatus();
734 break;
735 case Idle:
736 break;
737 default:
738 panic("Unknown state for CopyEngineChannel\n");
739 }
740}
741
742void
743CopyEngine::CopyEngineChannel::drainResume()
744{
745 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
746 restartStateMachine();
747}
748
749CopyEngine *
750CopyEngineParams::create()
751{
752 return new CopyEngine(this);
753}