DMASequencer.cc (11111:6da33e720481) DMASequencer.cc (11284:b3926db25371)
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
31#include "debug/Config.hh"
32#include "debug/Drain.hh"
33#include "debug/RubyDma.hh"
34#include "debug/RubyStats.hh"
35#include "mem/protocol/SequencerMsg.hh"
36#include "mem/ruby/system/DMASequencer.hh"
37#include "mem/ruby/system/RubySystem.hh"
38#include "sim/system.hh"
39
40DMASequencer::DMASequencer(const Params *p)
41 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
42 m_controller(NULL), m_mandatory_q_ptr(NULL),
43 m_usingRubyTester(p->using_ruby_tester),
44 slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system,
45 p->ruby_system->getAccessBackingStore()),
46 system(p->system), retry(false)
47{
48 assert(m_version != -1);
49}
50
51void
52DMASequencer::init()
53{
54 MemObject::init();
55 assert(m_controller != NULL);
56 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
57 m_is_busy = false;
58 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
59
60 slave_port.sendRangeChange();
61}
62
63BaseSlavePort &
64DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
65{
66 // used by the CPUs to connect the caches to the interconnect, and
67 // for the x86 case also the interrupt master
68 if (if_name != "slave") {
69 // pass it along to our super class
70 return MemObject::getSlavePort(if_name, idx);
71 } else {
72 return slave_port;
73 }
74}
75
76DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
77 DMASequencer *_port, PortID id, RubySystem* _ruby_system,
78 bool _access_backing_store)
79 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
80 m_ruby_system(_ruby_system), access_backing_store(_access_backing_store)
81{
82 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
83}
84
85bool
86DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
87{
88 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
89 pkt->getAddr(), id);
90 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
91
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
31#include "debug/Config.hh"
32#include "debug/Drain.hh"
33#include "debug/RubyDma.hh"
34#include "debug/RubyStats.hh"
35#include "mem/protocol/SequencerMsg.hh"
36#include "mem/ruby/system/DMASequencer.hh"
37#include "mem/ruby/system/RubySystem.hh"
38#include "sim/system.hh"
39
40DMASequencer::DMASequencer(const Params *p)
41 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
42 m_controller(NULL), m_mandatory_q_ptr(NULL),
43 m_usingRubyTester(p->using_ruby_tester),
44 slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system,
45 p->ruby_system->getAccessBackingStore()),
46 system(p->system), retry(false)
47{
48 assert(m_version != -1);
49}
50
51void
52DMASequencer::init()
53{
54 MemObject::init();
55 assert(m_controller != NULL);
56 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
57 m_is_busy = false;
58 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
59
60 slave_port.sendRangeChange();
61}
62
63BaseSlavePort &
64DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
65{
66 // used by the CPUs to connect the caches to the interconnect, and
67 // for the x86 case also the interrupt master
68 if (if_name != "slave") {
69 // pass it along to our super class
70 return MemObject::getSlavePort(if_name, idx);
71 } else {
72 return slave_port;
73 }
74}
75
76DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
77 DMASequencer *_port, PortID id, RubySystem* _ruby_system,
78 bool _access_backing_store)
79 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
80 m_ruby_system(_ruby_system), access_backing_store(_access_backing_store)
81{
82 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
83}
84
85bool
86DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
87{
88 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
89 pkt->getAddr(), id);
90 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
91
92 if (pkt->memInhibitAsserted())
93 panic("DMASequencer should never see an inhibited request\n");
92 if (pkt->cacheResponding())
93 panic("DMASequencer should never see a request with the "
94 "cacheResponding flag set\n");
94
95 assert(isPhysMemAddress(pkt->getAddr()));
96 assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
97 RubySystem::getBlockSizeBytes());
98
99 // Submit the ruby request
100 RequestStatus requestStatus = seq->makeRequest(pkt);
101
102 // If the request successfully issued then we should return true.
103 // Otherwise, we need to tell the port to retry at a later point
104 // and return false.
105 if (requestStatus == RequestStatus_Issued) {
106 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
107 pkt->getAddr());
108 return true;
109 }
110
111 // Unless one is using the ruby tester, record the stalled M5 port for
112 // later retry when the sequencer becomes free.
113 if (!seq->m_usingRubyTester) {
114 seq->retry = true;
115 }
116
117 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
118 pkt->getAddr(), RequestStatus_to_string(requestStatus));
119
120 return false;
121}
122
123void
124DMASequencer::ruby_hit_callback(PacketPtr pkt)
125{
126 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
127 pkt->getAddr());
128
129 // The packet was destined for memory and has not yet been turned
130 // into a response
131 assert(system->isMemAddr(pkt->getAddr()));
132 assert(pkt->isRequest());
133 slave_port.hitCallback(pkt);
134
135 // If we had to stall the slave ports, wake it up because
136 // the sequencer likely has free resources now.
137 if (retry) {
138 retry = false;
139 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
140 slave_port.name());
141 slave_port.sendRetryReq();
142 }
143
144 testDrainComplete();
145}
146
147void
148DMASequencer::testDrainComplete()
149{
150 //If we weren't able to drain before, we might be able to now.
151 if (drainState() == DrainState::Draining) {
152 unsigned int drainCount = outstandingCount();
153 DPRINTF(Drain, "Drain count: %u\n", drainCount);
154 if (drainCount == 0) {
155 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
156 signalDrainDone();
157 }
158 }
159}
160
161DrainState
162DMASequencer::drain()
163{
164 if (isDeadlockEventScheduled()) {
165 descheduleDeadlockEvent();
166 }
167
168 // If the DMASequencer is not empty, then it needs to clear all outstanding
169 // requests before it should call signalDrainDone()
170 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
171
172 // Set status
173 if (outstandingCount() > 0) {
174 DPRINTF(Drain, "DMASequencer not drained\n");
175 return DrainState::Draining;
176 } else {
177 return DrainState::Drained;
178 }
179}
180
181void
182DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
183{
184 bool needsResponse = pkt->needsResponse();
185 assert(!pkt->isLLSC());
186 assert(!pkt->isFlush());
187
188 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
189
190 // turn packet around to go back to requester if response expected
191
192 if (access_backing_store) {
193 m_ruby_system->getPhysMem()->access(pkt);
194 } else if (needsResponse) {
195 pkt->makeResponse();
196 }
197
198 if (needsResponse) {
199 DPRINTF(RubyDma, "Sending packet back over port\n");
200 // send next cycle
201 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
202 RubySystem *rs = seq->m_ruby_system;
203 schedTimingResp(pkt, curTick() + rs->clockPeriod());
204 } else {
205 delete pkt;
206 }
207
208 DPRINTF(RubyDma, "Hit callback done!\n");
209}
210
211bool
212DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
213{
214 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
215 return seq->system->isMemAddr(addr);
216}
217
218RequestStatus
219DMASequencer::makeRequest(PacketPtr pkt)
220{
221 if (m_is_busy) {
222 return RequestStatus_BufferFull;
223 }
224
225 Addr paddr = pkt->getAddr();
226 uint8_t* data = pkt->getPtr<uint8_t>();
227 int len = pkt->getSize();
228 bool write = pkt->isWrite();
229
230 assert(!m_is_busy); // only support one outstanding DMA request
231 m_is_busy = true;
232
233 active_request.start_paddr = paddr;
234 active_request.write = write;
235 active_request.data = data;
236 active_request.len = len;
237 active_request.bytes_completed = 0;
238 active_request.bytes_issued = 0;
239 active_request.pkt = pkt;
240
241 std::shared_ptr<SequencerMsg> msg =
242 std::make_shared<SequencerMsg>(clockEdge());
243 msg->getPhysicalAddress() = paddr;
244 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
245 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
246 int offset = paddr & m_data_block_mask;
247
248 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
249 len : RubySystem::getBlockSizeBytes() - offset;
250
251 if (write && (data != NULL)) {
252 if (active_request.data != NULL) {
253 msg->getDataBlk().setData(data, offset, msg->getLen());
254 }
255 }
256
257 assert(m_mandatory_q_ptr != NULL);
258 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
259 active_request.bytes_issued += msg->getLen();
260
261 return RequestStatus_Issued;
262}
263
264void
265DMASequencer::issueNext()
266{
267 assert(m_is_busy);
268 active_request.bytes_completed = active_request.bytes_issued;
269 if (active_request.len == active_request.bytes_completed) {
270 //
271 // Must unset the busy flag before calling back the dma port because
272 // the callback may cause a previously nacked request to be reissued
273 //
274 DPRINTF(RubyDma, "DMA request completed\n");
275 m_is_busy = false;
276 ruby_hit_callback(active_request.pkt);
277 return;
278 }
279
280 std::shared_ptr<SequencerMsg> msg =
281 std::make_shared<SequencerMsg>(clockEdge());
282 msg->getPhysicalAddress() = active_request.start_paddr +
283 active_request.bytes_completed;
284
285 assert((msg->getPhysicalAddress() & m_data_block_mask) == 0);
286 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
287
288 msg->getType() = (active_request.write ? SequencerRequestType_ST :
289 SequencerRequestType_LD);
290
291 msg->getLen() =
292 (active_request.len -
293 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
294 active_request.len - active_request.bytes_completed :
295 RubySystem::getBlockSizeBytes());
296
297 if (active_request.write) {
298 msg->getDataBlk().
299 setData(&active_request.data[active_request.bytes_completed],
300 0, msg->getLen());
301 }
302
303 assert(m_mandatory_q_ptr != NULL);
304 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
305 active_request.bytes_issued += msg->getLen();
306 DPRINTF(RubyDma,
307 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
308 active_request.bytes_issued, active_request.bytes_completed,
309 active_request.len);
310}
311
312void
313DMASequencer::dataCallback(const DataBlock & dblk)
314{
315 assert(m_is_busy);
316 int len = active_request.bytes_issued - active_request.bytes_completed;
317 int offset = 0;
318 if (active_request.bytes_completed == 0)
319 offset = active_request.start_paddr & m_data_block_mask;
320 assert(!active_request.write);
321 if (active_request.data != NULL) {
322 memcpy(&active_request.data[active_request.bytes_completed],
323 dblk.getData(offset, len), len);
324 }
325 issueNext();
326}
327
328void
329DMASequencer::ackCallback()
330{
331 issueNext();
332}
333
334void
335DMASequencer::recordRequestType(DMASequencerRequestType requestType)
336{
337 DPRINTF(RubyStats, "Recorded statistic: %s\n",
338 DMASequencerRequestType_to_string(requestType));
339}
340
341DMASequencer *
342DMASequencerParams::create()
343{
344 return new DMASequencer(this);
345}
95
96 assert(isPhysMemAddress(pkt->getAddr()));
97 assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
98 RubySystem::getBlockSizeBytes());
99
100 // Submit the ruby request
101 RequestStatus requestStatus = seq->makeRequest(pkt);
102
103 // If the request successfully issued then we should return true.
104 // Otherwise, we need to tell the port to retry at a later point
105 // and return false.
106 if (requestStatus == RequestStatus_Issued) {
107 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
108 pkt->getAddr());
109 return true;
110 }
111
112 // Unless one is using the ruby tester, record the stalled M5 port for
113 // later retry when the sequencer becomes free.
114 if (!seq->m_usingRubyTester) {
115 seq->retry = true;
116 }
117
118 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
119 pkt->getAddr(), RequestStatus_to_string(requestStatus));
120
121 return false;
122}
123
124void
125DMASequencer::ruby_hit_callback(PacketPtr pkt)
126{
127 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
128 pkt->getAddr());
129
130 // The packet was destined for memory and has not yet been turned
131 // into a response
132 assert(system->isMemAddr(pkt->getAddr()));
133 assert(pkt->isRequest());
134 slave_port.hitCallback(pkt);
135
136 // If we had to stall the slave ports, wake it up because
137 // the sequencer likely has free resources now.
138 if (retry) {
139 retry = false;
140 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
141 slave_port.name());
142 slave_port.sendRetryReq();
143 }
144
145 testDrainComplete();
146}
147
148void
149DMASequencer::testDrainComplete()
150{
151 //If we weren't able to drain before, we might be able to now.
152 if (drainState() == DrainState::Draining) {
153 unsigned int drainCount = outstandingCount();
154 DPRINTF(Drain, "Drain count: %u\n", drainCount);
155 if (drainCount == 0) {
156 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
157 signalDrainDone();
158 }
159 }
160}
161
162DrainState
163DMASequencer::drain()
164{
165 if (isDeadlockEventScheduled()) {
166 descheduleDeadlockEvent();
167 }
168
169 // If the DMASequencer is not empty, then it needs to clear all outstanding
170 // requests before it should call signalDrainDone()
171 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
172
173 // Set status
174 if (outstandingCount() > 0) {
175 DPRINTF(Drain, "DMASequencer not drained\n");
176 return DrainState::Draining;
177 } else {
178 return DrainState::Drained;
179 }
180}
181
182void
183DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
184{
185 bool needsResponse = pkt->needsResponse();
186 assert(!pkt->isLLSC());
187 assert(!pkt->isFlush());
188
189 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
190
191 // turn packet around to go back to requester if response expected
192
193 if (access_backing_store) {
194 m_ruby_system->getPhysMem()->access(pkt);
195 } else if (needsResponse) {
196 pkt->makeResponse();
197 }
198
199 if (needsResponse) {
200 DPRINTF(RubyDma, "Sending packet back over port\n");
201 // send next cycle
202 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
203 RubySystem *rs = seq->m_ruby_system;
204 schedTimingResp(pkt, curTick() + rs->clockPeriod());
205 } else {
206 delete pkt;
207 }
208
209 DPRINTF(RubyDma, "Hit callback done!\n");
210}
211
212bool
213DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
214{
215 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
216 return seq->system->isMemAddr(addr);
217}
218
219RequestStatus
220DMASequencer::makeRequest(PacketPtr pkt)
221{
222 if (m_is_busy) {
223 return RequestStatus_BufferFull;
224 }
225
226 Addr paddr = pkt->getAddr();
227 uint8_t* data = pkt->getPtr<uint8_t>();
228 int len = pkt->getSize();
229 bool write = pkt->isWrite();
230
231 assert(!m_is_busy); // only support one outstanding DMA request
232 m_is_busy = true;
233
234 active_request.start_paddr = paddr;
235 active_request.write = write;
236 active_request.data = data;
237 active_request.len = len;
238 active_request.bytes_completed = 0;
239 active_request.bytes_issued = 0;
240 active_request.pkt = pkt;
241
242 std::shared_ptr<SequencerMsg> msg =
243 std::make_shared<SequencerMsg>(clockEdge());
244 msg->getPhysicalAddress() = paddr;
245 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
246 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
247 int offset = paddr & m_data_block_mask;
248
249 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
250 len : RubySystem::getBlockSizeBytes() - offset;
251
252 if (write && (data != NULL)) {
253 if (active_request.data != NULL) {
254 msg->getDataBlk().setData(data, offset, msg->getLen());
255 }
256 }
257
258 assert(m_mandatory_q_ptr != NULL);
259 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
260 active_request.bytes_issued += msg->getLen();
261
262 return RequestStatus_Issued;
263}
264
265void
266DMASequencer::issueNext()
267{
268 assert(m_is_busy);
269 active_request.bytes_completed = active_request.bytes_issued;
270 if (active_request.len == active_request.bytes_completed) {
271 //
272 // Must unset the busy flag before calling back the dma port because
273 // the callback may cause a previously nacked request to be reissued
274 //
275 DPRINTF(RubyDma, "DMA request completed\n");
276 m_is_busy = false;
277 ruby_hit_callback(active_request.pkt);
278 return;
279 }
280
281 std::shared_ptr<SequencerMsg> msg =
282 std::make_shared<SequencerMsg>(clockEdge());
283 msg->getPhysicalAddress() = active_request.start_paddr +
284 active_request.bytes_completed;
285
286 assert((msg->getPhysicalAddress() & m_data_block_mask) == 0);
287 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
288
289 msg->getType() = (active_request.write ? SequencerRequestType_ST :
290 SequencerRequestType_LD);
291
292 msg->getLen() =
293 (active_request.len -
294 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
295 active_request.len - active_request.bytes_completed :
296 RubySystem::getBlockSizeBytes());
297
298 if (active_request.write) {
299 msg->getDataBlk().
300 setData(&active_request.data[active_request.bytes_completed],
301 0, msg->getLen());
302 }
303
304 assert(m_mandatory_q_ptr != NULL);
305 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
306 active_request.bytes_issued += msg->getLen();
307 DPRINTF(RubyDma,
308 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
309 active_request.bytes_issued, active_request.bytes_completed,
310 active_request.len);
311}
312
313void
314DMASequencer::dataCallback(const DataBlock & dblk)
315{
316 assert(m_is_busy);
317 int len = active_request.bytes_issued - active_request.bytes_completed;
318 int offset = 0;
319 if (active_request.bytes_completed == 0)
320 offset = active_request.start_paddr & m_data_block_mask;
321 assert(!active_request.write);
322 if (active_request.data != NULL) {
323 memcpy(&active_request.data[active_request.bytes_completed],
324 dblk.getData(offset, len), len);
325 }
326 issueNext();
327}
328
329void
330DMASequencer::ackCallback()
331{
332 issueNext();
333}
334
335void
336DMASequencer::recordRequestType(DMASequencerRequestType requestType)
337{
338 DPRINTF(RubyStats, "Recorded statistic: %s\n",
339 DMASequencerRequestType_to_string(requestType));
340}
341
342DMASequencer *
343DMASequencerParams::create()
344{
345 return new DMASequencer(this);
346}