DMASequencer.cc (10472:399f35ed5cca) DMASequencer.cc (10518:30e3715c9405)
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 14 unchanged lines hidden (view full) ---

23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
1/*
2 * Copyright (c) 2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;

--- 14 unchanged lines hidden (view full) ---

23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <memory>
30
31#include "debug/Config.hh"
32#include "debug/Drain.hh"
31#include "debug/RubyDma.hh"
32#include "debug/RubyStats.hh"
33#include "mem/protocol/SequencerMsg.hh"
33#include "debug/RubyDma.hh"
34#include "debug/RubyStats.hh"
35#include "mem/protocol/SequencerMsg.hh"
34#include "mem/protocol/SequencerRequestType.hh"
35#include "mem/ruby/system/DMASequencer.hh"
36#include "mem/ruby/system/System.hh"
36#include "mem/ruby/system/DMASequencer.hh"
37#include "mem/ruby/system/System.hh"
38#include "sim/system.hh"
37
38DMASequencer::DMASequencer(const Params *p)
39
40DMASequencer::DMASequencer(const Params *p)
39 : RubyPort(p)
41 : MemObject(p), m_version(p->version), m_controller(NULL),
42 m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
43 slave_port(csprintf("%s.slave", name()), this, access_phys_mem, 0),
44 drainManager(NULL), system(p->system), retry(false),
45 access_phys_mem(p->access_phys_mem)
40{
46{
47 assert(m_version != -1);
41}
42
43void
44DMASequencer::init()
45{
48}
49
50void
51DMASequencer::init()
52{
46 RubyPort::init();
53 MemObject::init();
54 assert(m_controller != NULL);
55 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
56 m_mandatory_q_ptr->setSender(this);
47 m_is_busy = false;
48 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
49}
50
57 m_is_busy = false;
58 m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
59}
60
61BaseSlavePort &
62DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
63{
64 // used by the CPUs to connect the caches to the interconnect, and
65 // for the x86 case also the interrupt master
66 if (if_name != "slave") {
67 // pass it along to our super class
68 return MemObject::getSlavePort(if_name, idx);
69 } else {
70 return slave_port;
71 }
72}
73
74DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
75 DMASequencer *_port, bool _access_phys_mem, PortID id)
76 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
77 access_phys_mem(_access_phys_mem)
78{
79 DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
80}
81
82bool
83DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
84{
85 DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
86 pkt->getAddr(), id);
87 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
88
89 if (pkt->memInhibitAsserted())
90 panic("DMASequencer should never see an inhibited request\n");
91
92 assert(isPhysMemAddress(pkt->getAddr()));
93 assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
94 RubySystem::getBlockSizeBytes());
95
96 // Submit the ruby request
97 RequestStatus requestStatus = seq->makeRequest(pkt);
98
99 // If the request successfully issued then we should return true.
100 // Otherwise, we need to tell the port to retry at a later point
101 // and return false.
102 if (requestStatus == RequestStatus_Issued) {
103 DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
104 pkt->getAddr());
105 return true;
106 }
107
108 // Unless one is using the ruby tester, record the stalled M5 port for
109 // later retry when the sequencer becomes free.
110 if (!seq->m_usingRubyTester) {
111 seq->retry = true;
112 }
113
114 DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
115 pkt->getAddr(), RequestStatus_to_string(requestStatus));
116
117 return false;
118}
119
120void
121DMASequencer::ruby_hit_callback(PacketPtr pkt)
122{
123 DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
124 pkt->getAddr());
125
126 // The packet was destined for memory and has not yet been turned
127 // into a response
128 assert(system->isMemAddr(pkt->getAddr()));
129 assert(pkt->isRequest());
130 slave_port.hitCallback(pkt);
131
132 // If we had to stall the slave ports, wake it up because
133 // the sequencer likely has free resources now.
134 if (retry) {
135 retry = false;
136 DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
137 slave_port.name());
138 slave_port.sendRetry();
139 }
140
141 testDrainComplete();
142}
143
144void
145DMASequencer::testDrainComplete()
146{
147 //If we weren't able to drain before, we might be able to now.
148 if (drainManager != NULL) {
149 unsigned int drainCount = outstandingCount();
150 DPRINTF(Drain, "Drain count: %u\n", drainCount);
151 if (drainCount == 0) {
152 DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
153 drainManager->signalDrainDone();
154 // Clear the drain manager once we're done with it.
155 drainManager = NULL;
156 }
157 }
158}
159
160unsigned int
161DMASequencer::getChildDrainCount(DrainManager *dm)
162{
163 int count = 0;
164 count += slave_port.drain(dm);
165 DPRINTF(Config, "count after slave port check %d\n", count);
166 return count;
167}
168
169unsigned int
170DMASequencer::drain(DrainManager *dm)
171{
172 if (isDeadlockEventScheduled()) {
173 descheduleDeadlockEvent();
174 }
175
176 // If the DMASequencer is not empty, then it needs to clear all outstanding
177 // requests before it should call drainManager->signalDrainDone()
178 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
179 bool need_drain = outstandingCount() > 0;
180
181 //
182 // Also, get the number of child ports that will also need to clear
183 // their buffered requests before they call drainManager->signalDrainDone()
184 //
185 unsigned int child_drain_count = getChildDrainCount(dm);
186
187 // Set status
188 if (need_drain) {
189 drainManager = dm;
190
191 DPRINTF(Drain, "DMASequencer not drained\n");
192 setDrainState(Drainable::Draining);
193 return child_drain_count + 1;
194 }
195
196 drainManager = NULL;
197 setDrainState(Drainable::Drained);
198 return child_drain_count;
199}
200
201void
202DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
203{
204 bool needsResponse = pkt->needsResponse();
205 bool accessPhysMem = access_phys_mem;
206
207 assert(!pkt->isLLSC());
208 assert(!pkt->isFlush());
209
210 DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
211
212 if (accessPhysMem) {
213 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
214 seq->system->getPhysMem().access(pkt);
215 } else if (needsResponse) {
216 pkt->makeResponse();
217 }
218
219 // turn packet around to go back to requester if response expected
220 if (needsResponse) {
221 DPRINTF(RubyDma, "Sending packet back over port\n");
222 // send next cycle
223 schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
224 } else {
225 delete pkt;
226 }
227 DPRINTF(RubyDma, "Hit callback done!\n");
228}
229
230bool
231DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
232{
233 DMASequencer *seq = static_cast<DMASequencer *>(&owner);
234 return seq->system->isMemAddr(addr);
235}
236
51RequestStatus
52DMASequencer::makeRequest(PacketPtr pkt)
53{
54 if (m_is_busy) {
55 return RequestStatus_BufferFull;
56 }
57
58 uint64_t paddr = pkt->getAddr();

--- 104 unchanged lines hidden (view full) ---

163
164void
165DMASequencer::ackCallback()
166{
167 issueNext();
168}
169
170void
237RequestStatus
238DMASequencer::makeRequest(PacketPtr pkt)
239{
240 if (m_is_busy) {
241 return RequestStatus_BufferFull;
242 }
243
244 uint64_t paddr = pkt->getAddr();

--- 104 unchanged lines hidden (view full) ---

349
350void
351DMASequencer::ackCallback()
352{
353 issueNext();
354}
355
356void
171DMASequencer::recordRequestType(DMASequencerRequestType requestType) {
357DMASequencer::recordRequestType(DMASequencerRequestType requestType)
358{
172 DPRINTF(RubyStats, "Recorded statistic: %s\n",
173 DMASequencerRequestType_to_string(requestType));
174}
175
176DMASequencer *
177DMASequencerParams::create()
178{
179 return new DMASequencer(this);
180}
359 DPRINTF(RubyStats, "Recorded statistic: %s\n",
360 DMASequencerRequestType_to_string(requestType));
361}
362
363DMASequencer *
364DMASequencerParams::create()
365{
366 return new DMASequencer(this);
367}