DMASequencer.cc (10472:399f35ed5cca) DMASequencer.cc (10518:30e3715c9405)
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
31#include "debug/Config.hh"
32#include "debug/Drain.hh"
31#include "debug/RubyDma.hh"
32#include "debug/RubyStats.hh"
33#include "mem/protocol/SequencerMsg.hh"
33#include "debug/RubyDma.hh"
34#include "debug/RubyStats.hh"
35#include "mem/protocol/SequencerMsg.hh"
34#include "mem/protocol/SequencerRequestType.hh"
35#include "mem/ruby/system/DMASequencer.hh"
36#include "mem/ruby/system/System.hh"
36#include "mem/ruby/system/DMASequencer.hh"
37#include "mem/ruby/system/System.hh"
38#include "sim/system.hh"
37
38DMASequencer::DMASequencer(const Params *p)
39
40DMASequencer::DMASequencer(const Params *p)
39 : RubyPort(p)
41 : MemObject(p), m_version(p->version), m_controller(NULL),
42 m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
43 slave_port(csprintf("%s.slave", name()), this, access_phys_mem, 0),
44 drainManager(NULL), system(p->system), retry(false),
45 access_phys_mem(p->access_phys_mem)
40{
46{
47 assert(m_version != -1);
41}
42
43void
44DMASequencer::init()
45{
48}
49
50void
51DMASequencer::init()
52{
46 RubyPort::init();
53 MemObject::init();
54 assert(m_controller != NULL);
55 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
56 m_mandatory_q_ptr->setSender(this);
47 m_is_busy = false;
48 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
49}
50
57 m_is_busy = false;
58 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
59}
60
61BaseSlavePort &
62DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
63{
64 // used by the CPUs to connect the caches to the interconnect, and
65 // for the x86 case also the interrupt master
66 if (if_name != "slave") {
67 // pass it along to our super class
68 return MemObject::getSlavePort(if_name, idx);
69 } else {
70 return slave_port;
71 }
72}
73
74DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
75 DMASequencer *_port, bool _access_phys_mem, PortID id)
76 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
77 access_phys_mem(_access_phys_mem)
78{
79 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
80}
81
82bool
83DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
84{
85 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
86 pkt->getAddr(), id);
87 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
88
89 if (pkt->memInhibitAsserted())
90 panic("DMASequencer should never see an inhibited request\n");
91
92 assert(isPhysMemAddress(pkt->getAddr()));
93 assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
94 RubySystem::getBlockSizeBytes());
95
96 // Submit the ruby request
97 RequestStatus requestStatus = seq->makeRequest(pkt);
98
99 // If the request successfully issued then we should return true.
100 // Otherwise, we need to tell the port to retry at a later point
101 // and return false.
102 if (requestStatus == RequestStatus_Issued) {
103 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
104 pkt->getAddr());
105 return true;
106 }
107
108 // Unless one is using the ruby tester, record the stalled M5 port for
109 // later retry when the sequencer becomes free.
110 if (!seq->m_usingRubyTester) {
111 seq->retry = true;
112 }
113
114 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
115 pkt->getAddr(), RequestStatus_to_string(requestStatus));
116
117 return false;
118}
119
120void
121DMASequencer::ruby_hit_callback(PacketPtr pkt)
122{
123 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
124 pkt->getAddr());
125
126 // The packet was destined for memory and has not yet been turned
127 // into a response
128 assert(system->isMemAddr(pkt->getAddr()));
129 assert(pkt->isRequest());
130 slave_port.hitCallback(pkt);
131
132 // If we had to stall the slave ports, wake it up because
133 // the sequencer likely has free resources now.
134 if (retry) {
135 retry = false;
136 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
137 slave_port.name());
138 slave_port.sendRetry();
139 }
140
141 testDrainComplete();
142}
143
144void
145DMASequencer::testDrainComplete()
146{
147 //If we weren't able to drain before, we might be able to now.
148 if (drainManager != NULL) {
149 unsigned int drainCount = outstandingCount();
150 DPRINTF(Drain, "Drain count: %u\n", drainCount);
151 if (drainCount == 0) {
152 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
153 drainManager->signalDrainDone();
154 // Clear the drain manager once we're done with it.
155 drainManager = NULL;
156 }
157 }
158}
159
160unsigned int
161DMASequencer::getChildDrainCount(DrainManager *dm)
162{
163 int count = 0;
164 count += slave_port.drain(dm);
165 DPRINTF(Config, "count after slave port check %d\n", count);
166 return count;
167}
168
169unsigned int
170DMASequencer::drain(DrainManager *dm)
171{
172 if (isDeadlockEventScheduled()) {
173 descheduleDeadlockEvent();
174 }
175
176 // If the DMASequencer is not empty, then it needs to clear all outstanding
177 // requests before it should call drainManager->signalDrainDone()
178 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
179 bool need_drain = outstandingCount() > 0;
180
181 //
182 // Also, get the number of child ports that will also need to clear
183 // their buffered requests before they call drainManager->signalDrainDone()
184 //
185 unsigned int child_drain_count = getChildDrainCount(dm);
186
187 // Set status
188 if (need_drain) {
189 drainManager = dm;
190
191 DPRINTF(Drain, "DMASequencer not drained\n");
192 setDrainState(Drainable::Draining);
193 return child_drain_count + 1;
194 }
195
196 drainManager = NULL;
197 setDrainState(Drainable::Drained);
198 return child_drain_count;
199}
200
201void
202DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
203{
204 bool needsResponse = pkt->needsResponse();
205 bool accessPhysMem = access_phys_mem;
206
207 assert(!pkt->isLLSC());
208 assert(!pkt->isFlush());
209
210 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
211
212 if (accessPhysMem) {
213 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
214 seq->system->getPhysMem().access(pkt);
215 } else if (needsResponse) {
216 pkt->makeResponse();
217 }
218
219 // turn packet around to go back to requester if response expected
220 if (needsResponse) {
221 DPRINTF(RubyDma, "Sending packet back over port\n");
222 // send next cycle
223 schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
224 } else {
225 delete pkt;
226 }
227 DPRINTF(RubyDma, "Hit callback done!\n");
228}
229
230bool
231DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
232{
233 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
234 return seq->system->isMemAddr(addr);
235}
236
51RequestStatus
52DMASequencer::makeRequest(PacketPtr pkt)
53{
54 if (m_is_busy) {
55 return RequestStatus_BufferFull;
56 }
57
58 uint64_t paddr = pkt->getAddr();
59 uint8_t* data = pkt->getPtr<uint8_t>(true);
60 int len = pkt->getSize();
61 bool write = pkt->isWrite();
62
63 assert(!m_is_busy); // only support one outstanding DMA request
64 m_is_busy = true;
65
66 active_request.start_paddr = paddr;
67 active_request.write = write;
68 active_request.data = data;
69 active_request.len = len;
70 active_request.bytes_completed = 0;
71 active_request.bytes_issued = 0;
72 active_request.pkt = pkt;
73
74 std::shared_ptr<SequencerMsg> msg =
75 std::make_shared<SequencerMsg>(clockEdge());
76 msg->getPhysicalAddress() = Address(paddr);
77 msg->getLineAddress() = line_address(msg->getPhysicalAddress());
78 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
79 int offset = paddr & m_data_block_mask;
80
81 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
82 len : RubySystem::getBlockSizeBytes() - offset;
83
84 if (write && (data != NULL)) {
85 if (active_request.data != NULL) {
86 msg->getDataBlk().setData(data, offset, msg->getLen());
87 }
88 }
89
90 assert(m_mandatory_q_ptr != NULL);
91 m_mandatory_q_ptr->enqueue(msg);
92 active_request.bytes_issued += msg->getLen();
93
94 return RequestStatus_Issued;
95}
96
97void
98DMASequencer::issueNext()
99{
100 assert(m_is_busy);
101 active_request.bytes_completed = active_request.bytes_issued;
102 if (active_request.len == active_request.bytes_completed) {
103 //
104 // Must unset the busy flag before calling back the dma port because
105 // the callback may cause a previously nacked request to be reissued
106 //
107 DPRINTF(RubyDma, "DMA request completed\n");
108 m_is_busy = false;
109 ruby_hit_callback(active_request.pkt);
110 return;
111 }
112
113 std::shared_ptr<SequencerMsg> msg =
114 std::make_shared<SequencerMsg>(clockEdge());
115 msg->getPhysicalAddress() = Address(active_request.start_paddr +
116 active_request.bytes_completed);
117
118 assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
119 msg->getLineAddress() = line_address(msg->getPhysicalAddress());
120
121 msg->getType() = (active_request.write ? SequencerRequestType_ST :
122 SequencerRequestType_LD);
123
124 msg->getLen() =
125 (active_request.len -
126 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
127 active_request.len - active_request.bytes_completed :
128 RubySystem::getBlockSizeBytes());
129
130 if (active_request.write) {
131 msg->getDataBlk().
132 setData(&active_request.data[active_request.bytes_completed],
133 0, msg->getLen());
134 msg->getType() = SequencerRequestType_ST;
135 } else {
136 msg->getType() = SequencerRequestType_LD;
137 }
138
139 assert(m_mandatory_q_ptr != NULL);
140 m_mandatory_q_ptr->enqueue(msg);
141 active_request.bytes_issued += msg->getLen();
142 DPRINTF(RubyDma,
143 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
144 active_request.bytes_issued, active_request.bytes_completed,
145 active_request.len);
146}
147
148void
149DMASequencer::dataCallback(const DataBlock & dblk)
150{
151 assert(m_is_busy);
152 int len = active_request.bytes_issued - active_request.bytes_completed;
153 int offset = 0;
154 if (active_request.bytes_completed == 0)
155 offset = active_request.start_paddr & m_data_block_mask;
156 assert(!active_request.write);
157 if (active_request.data != NULL) {
158 memcpy(&active_request.data[active_request.bytes_completed],
159 dblk.getData(offset, len), len);
160 }
161 issueNext();
162}
163
164void
165DMASequencer::ackCallback()
166{
167 issueNext();
168}
169
170void
237RequestStatus
238DMASequencer::makeRequest(PacketPtr pkt)
239{
240 if (m_is_busy) {
241 return RequestStatus_BufferFull;
242 }
243
244 uint64_t paddr = pkt->getAddr();
245 uint8_t* data = pkt->getPtr<uint8_t>(true);
246 int len = pkt->getSize();
247 bool write = pkt->isWrite();
248
249 assert(!m_is_busy); // only support one outstanding DMA request
250 m_is_busy = true;
251
252 active_request.start_paddr = paddr;
253 active_request.write = write;
254 active_request.data = data;
255 active_request.len = len;
256 active_request.bytes_completed = 0;
257 active_request.bytes_issued = 0;
258 active_request.pkt = pkt;
259
260 std::shared_ptr<SequencerMsg> msg =
261 std::make_shared<SequencerMsg>(clockEdge());
262 msg->getPhysicalAddress() = Address(paddr);
263 msg->getLineAddress() = line_address(msg->getPhysicalAddress());
264 msg->getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
265 int offset = paddr & m_data_block_mask;
266
267 msg->getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
268 len : RubySystem::getBlockSizeBytes() - offset;
269
270 if (write && (data != NULL)) {
271 if (active_request.data != NULL) {
272 msg->getDataBlk().setData(data, offset, msg->getLen());
273 }
274 }
275
276 assert(m_mandatory_q_ptr != NULL);
277 m_mandatory_q_ptr->enqueue(msg);
278 active_request.bytes_issued += msg->getLen();
279
280 return RequestStatus_Issued;
281}
282
283void
284DMASequencer::issueNext()
285{
286 assert(m_is_busy);
287 active_request.bytes_completed = active_request.bytes_issued;
288 if (active_request.len == active_request.bytes_completed) {
289 //
290 // Must unset the busy flag before calling back the dma port because
291 // the callback may cause a previously nacked request to be reissued
292 //
293 DPRINTF(RubyDma, "DMA request completed\n");
294 m_is_busy = false;
295 ruby_hit_callback(active_request.pkt);
296 return;
297 }
298
299 std::shared_ptr<SequencerMsg> msg =
300 std::make_shared<SequencerMsg>(clockEdge());
301 msg->getPhysicalAddress() = Address(active_request.start_paddr +
302 active_request.bytes_completed);
303
304 assert((msg->getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
305 msg->getLineAddress() = line_address(msg->getPhysicalAddress());
306
307 msg->getType() = (active_request.write ? SequencerRequestType_ST :
308 SequencerRequestType_LD);
309
310 msg->getLen() =
311 (active_request.len -
312 active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
313 active_request.len - active_request.bytes_completed :
314 RubySystem::getBlockSizeBytes());
315
316 if (active_request.write) {
317 msg->getDataBlk().
318 setData(&active_request.data[active_request.bytes_completed],
319 0, msg->getLen());
320 msg->getType() = SequencerRequestType_ST;
321 } else {
322 msg->getType() = SequencerRequestType_LD;
323 }
324
325 assert(m_mandatory_q_ptr != NULL);
326 m_mandatory_q_ptr->enqueue(msg);
327 active_request.bytes_issued += msg->getLen();
328 DPRINTF(RubyDma,
329 "DMA request bytes issued %d, bytes completed %d, total len %d\n",
330 active_request.bytes_issued, active_request.bytes_completed,
331 active_request.len);
332}
333
334void
335DMASequencer::dataCallback(const DataBlock & dblk)
336{
337 assert(m_is_busy);
338 int len = active_request.bytes_issued - active_request.bytes_completed;
339 int offset = 0;
340 if (active_request.bytes_completed == 0)
341 offset = active_request.start_paddr & m_data_block_mask;
342 assert(!active_request.write);
343 if (active_request.data != NULL) {
344 memcpy(&active_request.data[active_request.bytes_completed],
345 dblk.getData(offset, len), len);
346 }
347 issueNext();
348}
349
350void
351DMASequencer::ackCallback()
352{
353 issueNext();
354}
355
356void
171DMASequencer::recordRequestType(DMASequencerRequestType requestType) {
357DMASequencer::recordRequestType(DMASequencerRequestType requestType)
358{
172 DPRINTF(RubyStats, "Recorded statistic: %s\n",
173 DMASequencerRequestType_to_string(requestType));
174}
175
176DMASequencer *
177DMASequencerParams::create()
178{
179 return new DMASequencer(this);
180}
359 DPRINTF(RubyStats, "Recorded statistic: %s\n",
360 DMASequencerRequestType_to_string(requestType));
361}
362
363DMASequencer *
364DMASequencerParams::create()
365{
366 return new DMASequencer(this);
367}