DMASequencer.cc (11284:b3926db25371) DMASequencer.cc (11339:c45bfadcd51b)
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
31#include "debug/Config.hh"
32#include "debug/Drain.hh"
33#include "debug/RubyDma.hh"
34#include "debug/RubyStats.hh"
35#include "mem/protocol/SequencerMsg.hh"
31#include "debug/RubyDma.hh"
32#include "debug/RubyStats.hh"
33#include "mem/protocol/SequencerMsg.hh"
34#include "mem/protocol/SequencerRequestType.hh"
36#include "mem/ruby/system/DMASequencer.hh"
37#include "mem/ruby/system/RubySystem.hh"
35#include "mem/ruby/system/DMASequencer.hh"
36#include "mem/ruby/system/RubySystem.hh"
38#include "sim/system.hh"
39
40DMASequencer::DMASequencer(const Params *p)
37
38DMASequencer::DMASequencer(const Params *p)
41 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
42 m_controller(NULL), m_mandatory_q_ptr(NULL),
43 m_usingRubyTester(p->using_ruby_tester),
44 slave_port(csprintf("%s.slave", name()), this, 0, p->ruby_system,
45 p->ruby_system->getAccessBackingStore()),
46 system(p->system), retry(false)
39 : RubyPort(p)
47{
40{
48 assert(m_version != -1);
49}
50
51void
52DMASequencer::init()
53{
41}
42
43void
44DMASequencer::init()
45{
54 MemObject::init();
55 assert(m_controller != NULL);
56 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
46 RubyPort::init();
57 m_is_busy = false;
58 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
47 m_is_busy = false;
48 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
59
60 slave_port.sendRangeChange();
61}
62
49}
50
63BaseSlavePort &
64DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
65{
66 // used by the CPUs to connect the caches to the interconnect, and
67 // for the x86 case also the interrupt master
68 if (if_name != "slave") {
69 // pass it along to our super class
70 return MemObject::getSlavePort(if_name, idx);
71 } else {
72 return slave_port;
73 }
74}
75
76DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
77 DMASequencer *_port, PortID id, RubySystem* _ruby_system,
78 bool _access_backing_store)
79 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
80 m_ruby_system(_ruby_system), access_backing_store(_access_backing_store)
81{
82 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
83}
84
85bool
86DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
87{
88 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
89 pkt->getAddr(), id);
90 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
91
92 if (pkt->cacheResponding())
93 panic("DMASequencer should never see a request with the "
94 "cacheResponding flag set\n");
95
96 assert(isPhysMemAddress(pkt->getAddr()));
97 assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
98 RubySystem::getBlockSizeBytes());
99
100 // Submit the ruby request
101 RequestStatus requestStatus = seq->makeRequest(pkt);
102
103 // If the request successfully issued then we should return true.
104 // Otherwise, we need to tell the port to retry at a later point
105 // and return false.
106 if (requestStatus == RequestStatus_Issued) {
107 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
108 pkt->getAddr());
109 return true;
110 }
111
112 // Unless one is using the ruby tester, record the stalled M5 port for
113 // later retry when the sequencer becomes free.
114 if (!seq->m_usingRubyTester) {
115 seq->retry = true;
116 }
117
118 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
119 pkt->getAddr(), RequestStatus_to_string(requestStatus));
120
121 return false;
122}
123
124void
125DMASequencer::ruby_hit_callback(PacketPtr pkt)
126{
127 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
128 pkt->getAddr());
129
130 // The packet was destined for memory and has not yet been turned
131 // into a response
132 assert(system->isMemAddr(pkt->getAddr()));
133 assert(pkt->isRequest());
134 slave_port.hitCallback(pkt);
135
136 // If we had to stall the slave ports, wake it up because
137 // the sequencer likely has free resources now.
138 if (retry) {
139 retry = false;
140 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
141 slave_port.name());
142 slave_port.sendRetryReq();
143 }
144
145 testDrainComplete();
146}
147
148void
149DMASequencer::testDrainComplete()
150{
151 //If we weren't able to drain before, we might be able to now.
152 if (drainState() == DrainState::Draining) {
153 unsigned int drainCount = outstandingCount();
154 DPRINTF(Drain, "Drain count: %u\n", drainCount);
155 if (drainCount == 0) {
156 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
157 signalDrainDone();
158 }
159 }
160}
161
162DrainState
163DMASequencer::drain()
164{
165 if (isDeadlockEventScheduled()) {
166 descheduleDeadlockEvent();
167 }
168
169 // If the DMASequencer is not empty, then it needs to clear all outstanding
170 // requests before it should call signalDrainDone()
171 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
172
173 // Set status
174 if (outstandingCount() > 0) {
175 DPRINTF(Drain, "DMASequencer not drained\n");
176 return DrainState::Draining;
177 } else {
178 return DrainState::Drained;
179 }
180}
181
182void
183DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
184{
185 bool needsResponse = pkt->needsResponse();
186 assert(!pkt->isLLSC());
187 assert(!pkt->isFlush());
188
189 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
190
191 // turn packet around to go back to requester if response expected
192
193 if (access_backing_store) {
194 m_ruby_system->getPhysMem()->access(pkt);
195 } else if (needsResponse) {
196 pkt->makeResponse();
197 }
198
199 if (needsResponse) {
200 DPRINTF(RubyDma, "Sending packet back over port\n");
201 // send next cycle
202 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
203 RubySystem *rs = seq->m_ruby_system;
204 schedTimingResp(pkt, curTick() + rs->clockPeriod());
205 } else {
206 delete pkt;
207 }
208
209 DPRINTF(RubyDma, "Hit callback done!\n");
210}
211
212bool
213DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
214{
215 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
216 return seq->system->isMemAddr(addr);
217}
218
219RequestStatus
220DMASequencer::makeRequest(PacketPtr pkt)
221{
222 if (m_is_busy) {
223 return RequestStatus_BufferFull;
224 }
225
226 Addr paddr = pkt->getAddr();
227 uint8_t* data = pkt->getPtr<uint8_t>();
228 int len = pkt->getSize();
229 bool write = pkt->isWrite();
230
231 assert(!m_is_busy); // only support one outstanding DMA request
232 m_is_busy = true;
233
234 active_request.start_paddr = paddr;
235 active_request.write = write;
236 active_request.data = data;
237 active_request.len = len;
238 active_request.bytes_completed = 0;
239 active_request.bytes_issued = 0;
240 active_request.pkt = pkt;
241
242 std::shared_ptr<SequencerMsg> msg =
243 std::make_shared<SequencerMsg>(clockEdge());
244 msg->getPhysicalAddress() = paddr;
245 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
246 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
247 int offset = paddr & m_data_block_mask;
248
249 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
250 len : RubySystem::getBlockSizeBytes() - offset;
251
252 if (write && (data != NULL)) {
253 if (active_request.data != NULL) {
254 msg->getDataBlk().setData(data, offset, msg->getLen());
255 }
256 }
257
258 assert(m_mandatory_q_ptr != NULL);
259 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
260 active_request.bytes_issued += msg->getLen();
261
262 return RequestStatus_Issued;
263}
264
265void
266DMASequencer::issueNext()
267{
268 assert(m_is_busy);
269 active_request.bytes_completed = active_request.bytes_issued;
270 if (active_request.len == active_request.bytes_completed) {
271 //
272 // Must unset the busy flag before calling back the dma port because
273 // the callback may cause a previously nacked request to be reissued
274 //
275 DPRINTF(RubyDma, "DMA request completed\n");
276 m_is_busy = false;
277 ruby_hit_callback(active_request.pkt);
278 return;
279 }
280
281 std::shared_ptr<SequencerMsg> msg =
282 std::make_shared<SequencerMsg>(clockEdge());
283 msg->getPhysicalAddress() = active_request.start_paddr +
284 active_request.bytes_completed;
285
286 assert((msg->getPhysicalAddress() & m_data_block_mask) == 0);
287 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
288
289 msg->getType() = (active_request.write ? SequencerRequestType_ST :
290 SequencerRequestType_LD);
291
292 msg->getLen() =
293 (active_request.len -
294 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
295 active_request.len - active_request.bytes_completed :
296 RubySystem::getBlockSizeBytes());
297
298 if (active_request.write) {
299 msg->getDataBlk().
300 setData(&active_request.data[active_request.bytes_completed],
301 0, msg->getLen());
302 }
303
304 assert(m_mandatory_q_ptr != NULL);
305 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
306 active_request.bytes_issued += msg->getLen();
307 DPRINTF(RubyDma,
308 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
309 active_request.bytes_issued, active_request.bytes_completed,
310 active_request.len);
311}
312
313void
314DMASequencer::dataCallback(const DataBlock & dblk)
315{
316 assert(m_is_busy);
317 int len = active_request.bytes_issued - active_request.bytes_completed;
318 int offset = 0;
319 if (active_request.bytes_completed == 0)
320 offset = active_request.start_paddr & m_data_block_mask;
321 assert(!active_request.write);
322 if (active_request.data != NULL) {
323 memcpy(&active_request.data[active_request.bytes_completed],
324 dblk.getData(offset, len), len);
325 }
326 issueNext();
327}
328
329void
330DMASequencer::ackCallback()
331{
332 issueNext();
333}
334
335void
336DMASequencer::recordRequestType(DMASequencerRequestType requestType)
337{
338 DPRINTF(RubyStats, "Recorded statistic: %s\n",
339 DMASequencerRequestType_to_string(requestType));
340}
341
342DMASequencer *
343DMASequencerParams::create()
344{
345 return new DMASequencer(this);
346}
51RequestStatus
52DMASequencer::makeRequest(PacketPtr pkt)
53{
54 if (m_is_busy) {
55 return RequestStatus_BufferFull;
56 }
57
58 Addr paddr = pkt->getAddr();
59 uint8_t* data = pkt->getPtr<uint8_t>();
60 int len = pkt->getSize();
61 bool write = pkt->isWrite();
62
63 assert(!m_is_busy); // only support one outstanding DMA request
64 m_is_busy = true;
65
66 active_request.start_paddr = paddr;
67 active_request.write = write;
68 active_request.data = data;
69 active_request.len = len;
70 active_request.bytes_completed = 0;
71 active_request.bytes_issued = 0;
72 active_request.pkt = pkt;
73
74 std::shared_ptr<SequencerMsg> msg =
75 std::make_shared<SequencerMsg>(clockEdge());
76 msg->getPhysicalAddress() = paddr;
77 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
78 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
79 int offset = paddr & m_data_block_mask;
80
81 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
82 len : RubySystem::getBlockSizeBytes() - offset;
83
84 if (write && (data != NULL)) {
85 if (active_request.data != NULL) {
86 msg->getDataBlk().setData(data, offset, msg->getLen());
87 }
88 }
89
90 assert(m_mandatory_q_ptr != NULL);
91 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
92 active_request.bytes_issued += msg->getLen();
93
94 return RequestStatus_Issued;
95}
96
97void
98DMASequencer::issueNext()
99{
100 assert(m_is_busy);
101 active_request.bytes_completed = active_request.bytes_issued;
102 if (active_request.len == active_request.bytes_completed) {
103 //
104 // Must unset the busy flag before calling back the dma port because
105 // the callback may cause a previously nacked request to be reissued
106 //
107 DPRINTF(RubyDma, "DMA request completed\n");
108 m_is_busy = false;
109 ruby_hit_callback(active_request.pkt);
110 return;
111 }
112
113 std::shared_ptr<SequencerMsg> msg =
114 std::make_shared<SequencerMsg>(clockEdge());
115 msg->getPhysicalAddress() = active_request.start_paddr +
116 active_request.bytes_completed;
117
118 assert((msg->getPhysicalAddress() & m_data_block_mask) == 0);
119 msg->getLineAddress() = makeLineAddress(msg->getPhysicalAddress());
120
121 msg->getType() = (active_request.write ? SequencerRequestType_ST :
122 SequencerRequestType_LD);
123
124 msg->getLen() =
125 (active_request.len -
126 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
127 active_request.len - active_request.bytes_completed :
128 RubySystem::getBlockSizeBytes());
129
130 if (active_request.write) {
131 msg->getDataBlk().
132 setData(&active_request.data[active_request.bytes_completed],
133 0, msg->getLen());
134 }
135
136 assert(m_mandatory_q_ptr != NULL);
137 m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
138 active_request.bytes_issued += msg->getLen();
139 DPRINTF(RubyDma,
140 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
141 active_request.bytes_issued, active_request.bytes_completed,
142 active_request.len);
143}
144
145void
146DMASequencer::dataCallback(const DataBlock & dblk)
147{
148 assert(m_is_busy);
149 int len = active_request.bytes_issued - active_request.bytes_completed;
150 int offset = 0;
151 if (active_request.bytes_completed == 0)
152 offset = active_request.start_paddr & m_data_block_mask;
153 assert(!active_request.write);
154 if (active_request.data != NULL) {
155 memcpy(&active_request.data[active_request.bytes_completed],
156 dblk.getData(offset, len), len);
157 }
158 issueNext();
159}
160
161void
162DMASequencer::ackCallback()
163{
164 issueNext();
165}
166
167void
168DMASequencer::recordRequestType(DMASequencerRequestType requestType)
169{
170 DPRINTF(RubyStats, "Recorded statistic: %s\n",
171 DMASequencerRequestType_to_string(requestType));
172}
173
174DMASequencer *
175DMASequencerParams::create()
176{
177 return new DMASequencer(this);
178}