1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2008 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 */
42
43/* @file
44 * Device model for Intel's I/O AT DMA copy engine.
45 */
46
47#include "dev/pci/copy_engine.hh"
48
49#include <algorithm>
50
51#include "base/cp_annotate.hh"
52#include "base/trace.hh"
53#include "debug/DMACopyEngine.hh"
54#include "debug/Drain.hh"
55#include "mem/packet.hh"
56#include "mem/packet_access.hh"
57#include "params/CopyEngine.hh"
58#include "sim/stats.hh"
59#include "sim/system.hh"
60
61using namespace CopyEngineReg;
62
63CopyEngine::CopyEngine(const Params *p)
64 : PciDevice(p)
65{
66 // All Reg regs are initialized to 0 by default
67 regs.chanCount = p->ChanCnt;
68 regs.xferCap = findMsbSet(p->XferCap);
69 regs.attnStatus = 0;
70
71 if (regs.chanCount > 64)
72 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
73
74 for (int x = 0; x < regs.chanCount; x++) {
75 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
76 chan.push_back(ch);
77 }
78}
79
80
81CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
82 : cePort(_ce, _ce->sys),
83 ce(_ce), channelId(cid), busy(false), underReset(false),
84 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
85 latAfterCompletion(ce->params()->latAfterCompletion),
86 completionDataReg(0), nextState(Idle),
87 fetchCompleteEvent(this), addrCompleteEvent(this),
88 readCompleteEvent(this), writeCompleteEvent(this),
89 statusCompleteEvent(this)
84 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
85 latAfterCompletion(ce->params()->latAfterCompletion),
86 completionDataReg(0), nextState(Idle),
87 fetchCompleteEvent([this]{ fetchDescComplete(); }, name()),
88 addrCompleteEvent([this]{ fetchAddrComplete(); }, name()),
89 readCompleteEvent([this]{ readCopyBytesComplete(); }, name()),
90 writeCompleteEvent([this]{ writeCopyBytesComplete(); }, name()),
91 statusCompleteEvent([this]{ writeStatusComplete(); }, name())
92
93{
94 cr.status.dma_transfer_status(3);
95 cr.descChainAddr = 0;
96 cr.completionAddr = 0;
97
98 curDmaDesc = new DmaDesc;
99 memset(curDmaDesc, 0, sizeof(DmaDesc));
100 copyBuffer = new uint8_t[ce->params()->XferCap];
101}
102
103CopyEngine::~CopyEngine()
104{
105 for (int x = 0; x < chan.size(); x++) {
106 delete chan[x];
107 }
108}
109
110CopyEngine::CopyEngineChannel::~CopyEngineChannel()
111{
112 delete curDmaDesc;
113 delete [] copyBuffer;
114}
115
116BaseMasterPort &
117CopyEngine::getMasterPort(const std::string &if_name, PortID idx)
118{
119 if (if_name != "dma") {
120 // pass it along to our super class
121 return PciDevice::getMasterPort(if_name, idx);
122 } else {
123 if (idx >= static_cast<int>(chan.size())) {
124 panic("CopyEngine::getMasterPort: unknown index %d\n", idx);
125 }
126
127 return chan[idx]->getMasterPort();
128 }
129}
130
131
132BaseMasterPort &
133CopyEngine::CopyEngineChannel::getMasterPort()
134{
135 return cePort;
136}
137
138void
139CopyEngine::CopyEngineChannel::recvCommand()
140{
141 if (cr.command.start_dma()) {
142 assert(!busy);
143 cr.status.dma_transfer_status(0);
144 nextState = DescriptorFetch;
145 fetchAddress = cr.descChainAddr;
146 if (ce->drainState() == DrainState::Running)
147 fetchDescriptor(cr.descChainAddr);
148 } else if (cr.command.append_dma()) {
149 if (!busy) {
150 nextState = AddressFetch;
151 if (ce->drainState() == DrainState::Running)
152 fetchNextAddr(lastDescriptorAddr);
153 } else
154 refreshNext = true;
155 } else if (cr.command.reset_dma()) {
156 if (busy)
157 underReset = true;
158 else {
159 cr.status.dma_transfer_status(3);
160 nextState = Idle;
161 }
162 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
163 cr.command.suspend_dma())
164 panic("Resume, Abort, and Suspend are not supported\n");
165 cr.command(0);
166}
167
168Tick
169CopyEngine::read(PacketPtr pkt)
170{
171 int bar;
172 Addr daddr;
173
174 if (!getBAR(pkt->getAddr(), bar, daddr))
175 panic("Invalid PCI memory access to unmapped memory.\n");
176
177 // Only Memory register BAR is allowed
178 assert(bar == 0);
179
180 int size = pkt->getSize();
181 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
182 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
183 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
184 }
185
186 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
187
188 ///
189 /// Handle read of register here
190 ///
191
192 if (daddr < 0x80) {
193 switch (daddr) {
194 case GEN_CHANCOUNT:
195 assert(size == sizeof(regs.chanCount));
196 pkt->set<uint8_t>(regs.chanCount);
197 break;
198 case GEN_XFERCAP:
199 assert(size == sizeof(regs.xferCap));
200 pkt->set<uint8_t>(regs.xferCap);
201 break;
202 case GEN_INTRCTRL:
203 assert(size == sizeof(uint8_t));
204 pkt->set<uint8_t>(regs.intrctrl());
205 regs.intrctrl.master_int_enable(0);
206 break;
207 case GEN_ATTNSTATUS:
208 assert(size == sizeof(regs.attnStatus));
209 pkt->set<uint32_t>(regs.attnStatus);
210 regs.attnStatus = 0;
211 break;
212 default:
213 panic("Read request to unknown register number: %#x\n", daddr);
214 }
215 pkt->makeAtomicResponse();
216 return pioDelay;
217 }
218
219
220 // Find which channel we're accessing
221 int chanid = 0;
222 daddr -= 0x80;
223 while (daddr >= 0x80) {
224 chanid++;
225 daddr -= 0x80;
226 }
227
228 if (chanid >= regs.chanCount)
229 panic("Access to channel %d (device only configured for %d channels)",
230 chanid, regs.chanCount);
231
232 ///
233 /// Channel registers are handled here
234 ///
235 chan[chanid]->channelRead(pkt, daddr, size);
236
237 pkt->makeAtomicResponse();
238 return pioDelay;
239}
240
241void
242CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
243{
244 switch (daddr) {
245 case CHAN_CONTROL:
246 assert(size == sizeof(uint16_t));
247 pkt->set<uint16_t>(cr.ctrl());
248 cr.ctrl.in_use(1);
249 break;
250 case CHAN_STATUS:
251 assert(size == sizeof(uint64_t));
252 pkt->set<uint64_t>(cr.status() | ~busy);
253 break;
254 case CHAN_CHAINADDR:
255 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
256 if (size == sizeof(uint64_t))
257 pkt->set<uint64_t>(cr.descChainAddr);
258 else
259 pkt->set<uint32_t>(bits(cr.descChainAddr,0,31));
260 break;
261 case CHAN_CHAINADDR_HIGH:
262 assert(size == sizeof(uint32_t));
263 pkt->set<uint32_t>(bits(cr.descChainAddr,32,63));
264 break;
265 case CHAN_COMMAND:
266 assert(size == sizeof(uint8_t));
267 pkt->set<uint32_t>(cr.command());
268 break;
269 case CHAN_CMPLNADDR:
270 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
271 if (size == sizeof(uint64_t))
272 pkt->set<uint64_t>(cr.completionAddr);
273 else
274 pkt->set<uint32_t>(bits(cr.completionAddr,0,31));
275 break;
276 case CHAN_CMPLNADDR_HIGH:
277 assert(size == sizeof(uint32_t));
278 pkt->set<uint32_t>(bits(cr.completionAddr,32,63));
279 break;
280 case CHAN_ERROR:
281 assert(size == sizeof(uint32_t));
282 pkt->set<uint32_t>(cr.error());
283 break;
284 default:
285 panic("Read request to unknown channel register number: (%d)%#x\n",
286 channelId, daddr);
287 }
288}
289
290
291Tick
292CopyEngine::write(PacketPtr pkt)
293{
294 int bar;
295 Addr daddr;
296
297
298 if (!getBAR(pkt->getAddr(), bar, daddr))
299 panic("Invalid PCI memory access to unmapped memory.\n");
300
301 // Only Memory register BAR is allowed
302 assert(bar == 0);
303
304 int size = pkt->getSize();
305
306 ///
307 /// Handle write of register here
308 ///
309
310 if (size == sizeof(uint64_t)) {
311 uint64_t val M5_VAR_USED = pkt->get<uint64_t>();
312 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
313 } else if (size == sizeof(uint32_t)) {
314 uint32_t val M5_VAR_USED = pkt->get<uint32_t>();
315 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
316 } else if (size == sizeof(uint16_t)) {
317 uint16_t val M5_VAR_USED = pkt->get<uint16_t>();
318 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
319 } else if (size == sizeof(uint8_t)) {
320 uint8_t val M5_VAR_USED = pkt->get<uint8_t>();
321 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
322 } else {
323 panic("Unknown size for MMIO access: %d\n", size);
324 }
325
326 if (daddr < 0x80) {
327 switch (daddr) {
328 case GEN_CHANCOUNT:
329 case GEN_XFERCAP:
330 case GEN_ATTNSTATUS:
331 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
332 daddr);
333 break;
334 case GEN_INTRCTRL:
335 regs.intrctrl.master_int_enable(bits(pkt->get<uint8_t>(),0,1));
336 break;
337 default:
338 panic("Read request to unknown register number: %#x\n", daddr);
339 }
340 pkt->makeAtomicResponse();
341 return pioDelay;
342 }
343
344 // Find which channel we're accessing
345 int chanid = 0;
346 daddr -= 0x80;
347 while (daddr >= 0x80) {
348 chanid++;
349 daddr -= 0x80;
350 }
351
352 if (chanid >= regs.chanCount)
353 panic("Access to channel %d (device only configured for %d channels)",
354 chanid, regs.chanCount);
355
356 ///
357 /// Channel registers are handled here
358 ///
359 chan[chanid]->channelWrite(pkt, daddr, size);
360
361 pkt->makeAtomicResponse();
362 return pioDelay;
363}
364
365void
366CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
367{
368 switch (daddr) {
369 case CHAN_CONTROL:
370 assert(size == sizeof(uint16_t));
371 int old_int_disable;
372 old_int_disable = cr.ctrl.interrupt_disable();
373 cr.ctrl(pkt->get<uint16_t>());
374 if (cr.ctrl.interrupt_disable())
375 cr.ctrl.interrupt_disable(0);
376 else
377 cr.ctrl.interrupt_disable(old_int_disable);
378 break;
379 case CHAN_STATUS:
380 assert(size == sizeof(uint64_t));
381 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
382 daddr);
383 break;
384 case CHAN_CHAINADDR:
385 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
386 if (size == sizeof(uint64_t))
387 cr.descChainAddr = pkt->get<uint64_t>();
388 else
389 cr.descChainAddr = (uint64_t)pkt->get<uint32_t>() |
390 (cr.descChainAddr & ~mask(32));
391 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
392 break;
393 case CHAN_CHAINADDR_HIGH:
394 assert(size == sizeof(uint32_t));
395 cr.descChainAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
396 (cr.descChainAddr & mask(32));
397 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
398 break;
399 case CHAN_COMMAND:
400 assert(size == sizeof(uint8_t));
401 cr.command(pkt->get<uint8_t>());
402 recvCommand();
403 break;
404 case CHAN_CMPLNADDR:
405 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
406 if (size == sizeof(uint64_t))
407 cr.completionAddr = pkt->get<uint64_t>();
408 else
409 cr.completionAddr = pkt->get<uint32_t>() |
410 (cr.completionAddr & ~mask(32));
411 break;
412 case CHAN_CMPLNADDR_HIGH:
413 assert(size == sizeof(uint32_t));
414 cr.completionAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
415 (cr.completionAddr & mask(32));
416 break;
417 case CHAN_ERROR:
418 assert(size == sizeof(uint32_t));
419 cr.error(~pkt->get<uint32_t>() & cr.error());
420 break;
421 default:
422 panic("Read request to unknown channel register number: (%d)%#x\n",
423 channelId, daddr);
424 }
425}
426
427void
428CopyEngine::regStats()
429{
430 PciDevice::regStats();
431
432 using namespace Stats;
433 bytesCopied
434 .init(regs.chanCount)
435 .name(name() + ".bytes_copied")
436 .desc("Number of bytes copied by each engine")
437 .flags(total)
438 ;
439 copiesProcessed
440 .init(regs.chanCount)
441 .name(name() + ".copies_processed")
442 .desc("Number of copies processed by each engine")
443 .flags(total)
444 ;
445}
446
447void
448CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
449{
450 anDq();
451 anBegin("FetchDescriptor");
452 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
453 address, ce->pciToDma(address));
454 assert(address);
455 busy = true;
456
457 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
458 ce->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
459
460 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(address),
461 sizeof(DmaDesc), &fetchCompleteEvent,
462 (uint8_t*)curDmaDesc, latBeforeBegin);
463 lastDescriptorAddr = address;
464}
465
466void
467CopyEngine::CopyEngineChannel::fetchDescComplete()
468{
469 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
470
471 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
472 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
473 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
474 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
475 panic("Shouldn't be able to get here\n");
476 nextState = CompletionWrite;
477 if (inDrain()) return;
478 writeCompletionStatus();
479 } else {
480 anBegin("Idle");
481 anWait();
482 busy = false;
483 nextState = Idle;
484 inDrain();
485 }
486 return;
487 }
488
489 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
490 panic("Descriptor has flag other that completion status set\n");
491
492 nextState = DMARead;
493 if (inDrain()) return;
494 readCopyBytes();
495}
496
497void
498CopyEngine::CopyEngineChannel::readCopyBytes()
499{
500 anBegin("ReadCopyBytes");
501 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
502 curDmaDesc->len, curDmaDesc->dest,
503 ce->pciToDma(curDmaDesc->src));
504 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(curDmaDesc->src),
505 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
506}
507
508void
509CopyEngine::CopyEngineChannel::readCopyBytesComplete()
510{
511 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
512
513 nextState = DMAWrite;
514 if (inDrain()) return;
515 writeCopyBytes();
516}
517
518void
519CopyEngine::CopyEngineChannel::writeCopyBytes()
520{
521 anBegin("WriteCopyBytes");
522 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
523 curDmaDesc->len, curDmaDesc->dest,
524 ce->pciToDma(curDmaDesc->dest));
525
526 cePort.dmaAction(MemCmd::WriteReq, ce->pciToDma(curDmaDesc->dest),
527 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
528
529 ce->bytesCopied[channelId] += curDmaDesc->len;
530 ce->copiesProcessed[channelId]++;
531}
532
533void
534CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
535{
536 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
537 curDmaDesc->user1);
538
539 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
540 completionDataReg = cr.status() | 1;
541
542 anQ("DMAUsedDescQ", channelId, 1);
543 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
544 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
545 nextState = CompletionWrite;
546 if (inDrain()) return;
547 writeCompletionStatus();
548 return;
549 }
550
551 continueProcessing();
552}
553
554void
555CopyEngine::CopyEngineChannel::continueProcessing()
556{
557 busy = false;
558
559 if (underReset) {
560 anBegin("Reset");
561 anWait();
562 underReset = false;
563 refreshNext = false;
564 busy = false;
565 nextState = Idle;
566 return;
567 }
568
569 if (curDmaDesc->next) {
570 nextState = DescriptorFetch;
571 fetchAddress = curDmaDesc->next;
572 if (inDrain()) return;
573 fetchDescriptor(curDmaDesc->next);
574 } else if (refreshNext) {
575 nextState = AddressFetch;
576 refreshNext = false;
577 if (inDrain()) return;
578 fetchNextAddr(lastDescriptorAddr);
579 } else {
580 inDrain();
581 nextState = Idle;
582 anWait();
583 anBegin("Idle");
584 }
585}
586
587void
588CopyEngine::CopyEngineChannel::writeCompletionStatus()
589{
590 anBegin("WriteCompletionStatus");
591 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
592 completionDataReg, cr.completionAddr,
593 ce->pciToDma(cr.completionAddr));
594
595 cePort.dmaAction(MemCmd::WriteReq,
596 ce->pciToDma(cr.completionAddr),
597 sizeof(completionDataReg), &statusCompleteEvent,
598 (uint8_t*)&completionDataReg, latAfterCompletion);
599}
600
601void
602CopyEngine::CopyEngineChannel::writeStatusComplete()
603{
604 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
605 continueProcessing();
606}
607
608void
609CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
610{
611 anBegin("FetchNextAddr");
612 DPRINTF(DMACopyEngine, "Fetching next address...\n");
613 busy = true;
614 cePort.dmaAction(MemCmd::ReadReq,
615 ce->pciToDma(address + offsetof(DmaDesc, next)),
616 sizeof(Addr), &addrCompleteEvent,
617 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
618}
619
620void
621CopyEngine::CopyEngineChannel::fetchAddrComplete()
622{
623 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
624 curDmaDesc->next);
625 if (!curDmaDesc->next) {
626 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
627 busy = false;
628 nextState = Idle;
629 anWait();
630 anBegin("Idle");
631 inDrain();
632 return;
633 }
634 nextState = DescriptorFetch;
635 fetchAddress = curDmaDesc->next;
636 if (inDrain()) return;
637 fetchDescriptor(curDmaDesc->next);
638}
639
640bool
641CopyEngine::CopyEngineChannel::inDrain()
642{
643 if (drainState() == DrainState::Draining) {
644 DPRINTF(Drain, "CopyEngine done draining, processing drain event\n");
645 signalDrainDone();
646 }
647
648 return ce->drainState() != DrainState::Running;
649}
650
651DrainState
652CopyEngine::CopyEngineChannel::drain()
653{
654 if (nextState == Idle || ce->drainState() != DrainState::Running) {
655 return DrainState::Drained;
656 } else {
657 DPRINTF(Drain, "CopyEngineChannel not drained\n");
658 return DrainState::Draining;
659 }
660}
661
662void
663CopyEngine::serialize(CheckpointOut &cp) const
664{
665 PciDevice::serialize(cp);
666 regs.serialize(cp);
667 for (int x =0; x < chan.size(); x++)
668 chan[x]->serializeSection(cp, csprintf("channel%d", x));
669}
670
671void
672CopyEngine::unserialize(CheckpointIn &cp)
673{
674 PciDevice::unserialize(cp);
675 regs.unserialize(cp);
676 for (int x = 0; x < chan.size(); x++)
677 chan[x]->unserializeSection(cp, csprintf("channel%d", x));
678}
679
680void
681CopyEngine::CopyEngineChannel::serialize(CheckpointOut &cp) const
682{
683 SERIALIZE_SCALAR(channelId);
684 SERIALIZE_SCALAR(busy);
685 SERIALIZE_SCALAR(underReset);
686 SERIALIZE_SCALAR(refreshNext);
687 SERIALIZE_SCALAR(lastDescriptorAddr);
688 SERIALIZE_SCALAR(completionDataReg);
689 SERIALIZE_SCALAR(fetchAddress);
690 int nextState = this->nextState;
691 SERIALIZE_SCALAR(nextState);
692 arrayParamOut(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
693 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
694 cr.serialize(cp);
695
696}
697void
698CopyEngine::CopyEngineChannel::unserialize(CheckpointIn &cp)
699{
700 UNSERIALIZE_SCALAR(channelId);
701 UNSERIALIZE_SCALAR(busy);
702 UNSERIALIZE_SCALAR(underReset);
703 UNSERIALIZE_SCALAR(refreshNext);
704 UNSERIALIZE_SCALAR(lastDescriptorAddr);
705 UNSERIALIZE_SCALAR(completionDataReg);
706 UNSERIALIZE_SCALAR(fetchAddress);
707 int nextState;
708 UNSERIALIZE_SCALAR(nextState);
709 this->nextState = (ChannelState)nextState;
710 arrayParamIn(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
711 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
712 cr.unserialize(cp);
713
714}
715
716void
717CopyEngine::CopyEngineChannel::restartStateMachine()
718{
719 switch(nextState) {
720 case AddressFetch:
721 fetchNextAddr(lastDescriptorAddr);
722 break;
723 case DescriptorFetch:
724 fetchDescriptor(fetchAddress);
725 break;
726 case DMARead:
727 readCopyBytes();
728 break;
729 case DMAWrite:
730 writeCopyBytes();
731 break;
732 case CompletionWrite:
733 writeCompletionStatus();
734 break;
735 case Idle:
736 break;
737 default:
738 panic("Unknown state for CopyEngineChannel\n");
739 }
740}
741
742void
743CopyEngine::CopyEngineChannel::drainResume()
744{
745 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
746 restartStateMachine();
747}
748
749CopyEngine *
750CopyEngineParams::create()
751{
752 return new CopyEngine(this);
753}