RubyPort.cc (9662:59a7df953d5e) RubyPort.cc (9814:7ad2b0186a32)
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2009 Advanced Micro Devices, Inc.
15 * Copyright (c) 2011 Mark D. Hill and David A. Wood
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "cpu/testers/rubytest/RubyTester.hh"
43#include "debug/Config.hh"
44#include "debug/Drain.hh"
45#include "debug/Ruby.hh"
46#include "mem/protocol/AccessPermission.hh"
47#include "mem/ruby/slicc_interface/AbstractController.hh"
48#include "mem/ruby/system/RubyPort.hh"
49#include "sim/system.hh"
50
51RubyPort::RubyPort(const Params *p)
52 : MemObject(p), m_version(p->version), m_controller(NULL),
53 m_mandatory_q_ptr(NULL),
54 pio_port(csprintf("%s-pio-port", name()), this),
55 m_usingRubyTester(p->using_ruby_tester), m_request_cnt(0),
56 drainManager(NULL), ruby_system(p->ruby_system), system(p->system),
57 waitingOnSequencer(false), access_phys_mem(p->access_phys_mem)
58{
59 assert(m_version != -1);
60
61 // create the slave ports based on the number of connected ports
62 for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
63 slave_ports.push_back(new M5Port(csprintf("%s-slave%d", name(), i),
64 this, ruby_system, access_phys_mem));
65 }
66
67 // create the master ports based on the number of connected ports
68 for (size_t i = 0; i < p->port_master_connection_count; ++i) {
69 master_ports.push_back(new PioPort(csprintf("%s-master%d", name(), i),
70 this));
71 }
72}
73
74void
75RubyPort::init()
76{
77 assert(m_controller != NULL);
78 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
79 m_mandatory_q_ptr->setSender(this);
80}
81
82BaseMasterPort &
83RubyPort::getMasterPort(const std::string &if_name, PortID idx)
84{
85 if (if_name == "pio_port") {
86 return pio_port;
87 }
88
89 // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
90 // port
91 if (if_name != "master") {
92 // pass it along to our super class
93 return MemObject::getMasterPort(if_name, idx);
94 } else {
95 if (idx >= static_cast<PortID>(master_ports.size())) {
96 panic("RubyPort::getMasterPort: unknown index %d\n", idx);
97 }
98
99 return *master_ports[idx];
100 }
101}
102
103BaseSlavePort &
104RubyPort::getSlavePort(const std::string &if_name, PortID idx)
105{
106 // used by the CPUs to connect the caches to the interconnect, and
107 // for the x86 case also the interrupt master
108 if (if_name != "slave") {
109 // pass it along to our super class
110 return MemObject::getSlavePort(if_name, idx);
111 } else {
112 if (idx >= static_cast<PortID>(slave_ports.size())) {
113 panic("RubyPort::getSlavePort: unknown index %d\n", idx);
114 }
115
116 return *slave_ports[idx];
117 }
118}
119
120RubyPort::PioPort::PioPort(const std::string &_name,
121 RubyPort *_port)
122 : QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
123{
124 DPRINTF(RubyPort, "creating master port on ruby sequencer %s\n", _name);
125}
126
127RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
128 RubySystem *_system, bool _access_phys_mem)
129 : QueuedSlavePort(_name, _port, queue), queue(*_port, *this),
130 ruby_port(_port), ruby_system(_system),
131 _onRetryList(false), access_phys_mem(_access_phys_mem)
132{
133 DPRINTF(RubyPort, "creating slave port on ruby sequencer %s\n", _name);
134}
135
136Tick
137RubyPort::M5Port::recvAtomic(PacketPtr pkt)
138{
139 panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
140 return 0;
141}
142
143
144bool
145RubyPort::PioPort::recvTimingResp(PacketPtr pkt)
146{
147 // In FS mode, ruby memory will receive pio responses from devices
148 // and it must forward these responses back to the particular CPU.
149 DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr());
150
151 // First we must retrieve the request port from the sender State
152 RubyPort::SenderState *senderState =
153 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
154 M5Port *port = senderState->port;
155 assert(port != NULL);
156 delete senderState;
157
158 port->sendTimingResp(pkt);
159
160 return true;
161}
162
163bool
164RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
165{
166 DPRINTF(RubyPort,
167 "Timing access caught for address %#x\n", pkt->getAddr());
168
169 //dsm: based on SimpleTimingPort::recvTimingReq(pkt);
170
171 if (pkt->memInhibitAsserted())
172 panic("RubyPort should never see an inhibited request\n");
173
174 // Save the port in the sender state object to be used later to
175 // route the response
176 pkt->pushSenderState(new SenderState(this));
177
178 // Check for pio requests and directly send them to the dedicated
179 // pio port.
180 if (!isPhysMemAddress(pkt->getAddr())) {
181 assert(ruby_port->pio_port.isConnected());
182 DPRINTF(RubyPort,
183 "Request for address 0x%#x is assumed to be a pio request\n",
184 pkt->getAddr());
185
186 // send next cycle
187 ruby_port->pio_port.schedTimingReq(pkt,
188 curTick() + g_system_ptr->clockPeriod());
189 return true;
190 }
191
192 assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
193 RubySystem::getBlockSizeBytes());
194
195 // Submit the ruby request
196 RequestStatus requestStatus = ruby_port->makeRequest(pkt);
197
198 // If the request successfully issued then we should return true.
199 // Otherwise, we need to delete the senderStatus we just created and return
200 // false.
201 if (requestStatus == RequestStatus_Issued) {
202 DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr());
203 return true;
204 }
205
206 //
207 // Unless one is using the ruby tester, record the stalled M5 port for
208 // later retry when the sequencer becomes free.
209 //
210 if (!ruby_port->m_usingRubyTester) {
211 ruby_port->addToRetryList(this);
212 }
213
214 DPRINTF(RubyPort,
215 "Request for address %#x did not issue because %s\n",
216 pkt->getAddr(), RequestStatus_to_string(requestStatus));
217
218 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
219 pkt->senderState = senderState->predecessor;
220 delete senderState;
221 return false;
222}
223
224void
225RubyPort::M5Port::recvFunctional(PacketPtr pkt)
226{
227 DPRINTF(RubyPort, "Functional access caught for address %#x\n",
228 pkt->getAddr());
229
230 // Check for pio requests and directly send them to the dedicated
231 // pio port.
232 if (!isPhysMemAddress(pkt->getAddr())) {
233 assert(ruby_port->pio_port.isConnected());
234 DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n",
235 pkt->getAddr());
236 panic("RubyPort::PioPort::recvFunctional() not implemented!\n");
237 }
238
239 assert(pkt->getAddr() + pkt->getSize() <=
240 line_address(Address(pkt->getAddr())).getAddress() +
241 RubySystem::getBlockSizeBytes());
242
243 bool accessSucceeded = false;
244 bool needsResponse = pkt->needsResponse();
245
246 // Do the functional access on ruby memory
247 if (pkt->isRead()) {
248 accessSucceeded = ruby_system->functionalRead(pkt);
249 } else if (pkt->isWrite()) {
250 accessSucceeded = ruby_system->functionalWrite(pkt);
251 } else {
252 panic("RubyPort: unsupported functional command %s\n",
253 pkt->cmdString());
254 }
255
256 // Unless the requester explicitly said otherwise, generate an error if
257 // the functional request failed
258 if (!accessSucceeded && !pkt->suppressFuncError()) {
259 fatal("Ruby functional %s failed for address %#x\n",
260 pkt->isWrite() ? "write" : "read", pkt->getAddr());
261 }
262
263 if (access_phys_mem) {
264 // The attached physmem contains the official version of data.
265 // The following command performs the real functional access.
266 // This line should be removed once Ruby supplies the official version
267 // of data.
268 ruby_port->system->getPhysMem().functionalAccess(pkt);
269 }
270
271 // turn packet around to go back to requester if response expected
272 if (needsResponse) {
273 pkt->setFunctionalResponseStatus(accessSucceeded);
274
275 // @todo There should not be a reverse call since the response is
276 // communicated through the packet pointer
277 // DPRINTF(RubyPort, "Sending packet back over port\n");
278 // sendFunctional(pkt);
279 }
280 DPRINTF(RubyPort, "Functional access %s!\n",
281 accessSucceeded ? "successful":"failed");
282}
283
284void
285RubyPort::ruby_hit_callback(PacketPtr pkt)
286{
287 // Retrieve the request port from the sender State
288 RubyPort::SenderState *senderState =
289 safe_cast<RubyPort::SenderState *>(pkt->senderState);
290 M5Port *port = senderState->port;
291 assert(port != NULL);
292
293 // pop the sender state from the packet
294 pkt->senderState = senderState->predecessor;
295 delete senderState;
296
297 port->hitCallback(pkt);
298
299 //
300 // If we had to stall the M5Ports, wake them up because the sequencer
301 // likely has free resources now.
302 //
303 if (waitingOnSequencer) {
304 //
305 // Record the current list of ports to retry on a temporary list before
306 // calling sendRetry on those ports. sendRetry will cause an
307 // immediate retry, which may result in the ports being put back on the
308 // list. Therefore we want to clear the retryList before calling
309 // sendRetry.
310 //
311 std::list<M5Port*> curRetryList(retryList);
312
313 retryList.clear();
314 waitingOnSequencer = false;
315
316 for (std::list<M5Port*>::iterator i = curRetryList.begin();
317 i != curRetryList.end(); ++i) {
318 DPRINTF(RubyPort,
319 "Sequencer may now be free. SendRetry to port %s\n",
320 (*i)->name());
321 (*i)->onRetryList(false);
322 (*i)->sendRetry();
323 }
324 }
325
326 testDrainComplete();
327}
328
329void
330RubyPort::testDrainComplete()
331{
332 //If we weren't able to drain before, we might be able to now.
333 if (drainManager != NULL) {
334 unsigned int drainCount = outstandingCount();
335 DPRINTF(Drain, "Drain count: %u\n", drainCount);
336 if (drainCount == 0) {
337 DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
338 drainManager->signalDrainDone();
339 // Clear the drain manager once we're done with it.
340 drainManager = NULL;
341 }
342 }
343}
344
345unsigned int
346RubyPort::getChildDrainCount(DrainManager *dm)
347{
348 int count = 0;
349
350 if (pio_port.isConnected()) {
351 count += pio_port.drain(dm);
352 DPRINTF(Config, "count after pio check %d\n", count);
353 }
354
355 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
356 count += (*p)->drain(dm);
357 DPRINTF(Config, "count after slave port check %d\n", count);
358 }
359
360 for (std::vector<PioPort*>::iterator p = master_ports.begin();
361 p != master_ports.end(); ++p) {
362 count += (*p)->drain(dm);
363 DPRINTF(Config, "count after master port check %d\n", count);
364 }
365
366 DPRINTF(Config, "final count %d\n", count);
367
368 return count;
369}
370
371unsigned int
372RubyPort::drain(DrainManager *dm)
373{
374 if (isDeadlockEventScheduled()) {
375 descheduleDeadlockEvent();
376 }
377
378 //
379 // If the RubyPort is not empty, then it needs to clear all outstanding
380 // requests before it should call drainManager->signalDrainDone()
381 //
382 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
383 bool need_drain = outstandingCount() > 0;
384
385 //
386 // Also, get the number of child ports that will also need to clear
387 // their buffered requests before they call drainManager->signalDrainDone()
388 //
389 unsigned int child_drain_count = getChildDrainCount(dm);
390
391 // Set status
392 if (need_drain) {
393 drainManager = dm;
394
395 DPRINTF(Drain, "RubyPort not drained\n");
396 setDrainState(Drainable::Draining);
397 return child_drain_count + 1;
398 }
399
400 drainManager = NULL;
401 setDrainState(Drainable::Drained);
402 return child_drain_count;
403}
404
405void
406RubyPort::M5Port::hitCallback(PacketPtr pkt)
407{
408 bool needsResponse = pkt->needsResponse();
409
410 //
411 // Unless specified at configuraiton, all responses except failed SC
412 // and Flush operations access M5 physical memory.
413 //
414 bool accessPhysMem = access_phys_mem;
415
416 if (pkt->isLLSC()) {
417 if (pkt->isWrite()) {
418 if (pkt->req->getExtraData() != 0) {
419 //
420 // Successful SC packets convert to normal writes
421 //
422 pkt->convertScToWrite();
423 } else {
424 //
425 // Failed SC packets don't access physical memory and thus
426 // the RubyPort itself must convert it to a response.
427 //
428 accessPhysMem = false;
429 }
430 } else {
431 //
432 // All LL packets convert to normal loads so that M5 PhysMem does
433 // not lock the blocks.
434 //
435 pkt->convertLlToRead();
436 }
437 }
438
439 //
440 // Flush requests don't access physical memory
441 //
442 if (pkt->isFlush()) {
443 accessPhysMem = false;
444 }
445
446 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
447
448 if (accessPhysMem) {
449 ruby_port->system->getPhysMem().access(pkt);
450 } else if (needsResponse) {
451 pkt->makeResponse();
452 }
453
454 // turn packet around to go back to requester if response expected
455 if (needsResponse) {
456 DPRINTF(RubyPort, "Sending packet back over port\n");
457 // send next cycle
458 schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
459 } else {
460 delete pkt;
461 }
462 DPRINTF(RubyPort, "Hit callback done!\n");
463}
464
465AddrRangeList
466RubyPort::M5Port::getAddrRanges() const
467{
468 // at the moment the assumption is that the master does not care
469 AddrRangeList ranges;
470 return ranges;
471}
472
473bool
474RubyPort::M5Port::isPhysMemAddress(Addr addr)
475{
476 return ruby_port->system->isMemAddr(addr);
477}
478
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2009 Advanced Micro Devices, Inc.
15 * Copyright (c) 2011 Mark D. Hill and David A. Wood
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "cpu/testers/rubytest/RubyTester.hh"
43#include "debug/Config.hh"
44#include "debug/Drain.hh"
45#include "debug/Ruby.hh"
46#include "mem/protocol/AccessPermission.hh"
47#include "mem/ruby/slicc_interface/AbstractController.hh"
48#include "mem/ruby/system/RubyPort.hh"
49#include "sim/system.hh"
50
51RubyPort::RubyPort(const Params *p)
52 : MemObject(p), m_version(p->version), m_controller(NULL),
53 m_mandatory_q_ptr(NULL),
54 pio_port(csprintf("%s-pio-port", name()), this),
55 m_usingRubyTester(p->using_ruby_tester), m_request_cnt(0),
56 drainManager(NULL), ruby_system(p->ruby_system), system(p->system),
57 waitingOnSequencer(false), access_phys_mem(p->access_phys_mem)
58{
59 assert(m_version != -1);
60
61 // create the slave ports based on the number of connected ports
62 for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
63 slave_ports.push_back(new M5Port(csprintf("%s-slave%d", name(), i),
64 this, ruby_system, access_phys_mem));
65 }
66
67 // create the master ports based on the number of connected ports
68 for (size_t i = 0; i < p->port_master_connection_count; ++i) {
69 master_ports.push_back(new PioPort(csprintf("%s-master%d", name(), i),
70 this));
71 }
72}
73
74void
75RubyPort::init()
76{
77 assert(m_controller != NULL);
78 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
79 m_mandatory_q_ptr->setSender(this);
80}
81
82BaseMasterPort &
83RubyPort::getMasterPort(const std::string &if_name, PortID idx)
84{
85 if (if_name == "pio_port") {
86 return pio_port;
87 }
88
89 // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
90 // port
91 if (if_name != "master") {
92 // pass it along to our super class
93 return MemObject::getMasterPort(if_name, idx);
94 } else {
95 if (idx >= static_cast<PortID>(master_ports.size())) {
96 panic("RubyPort::getMasterPort: unknown index %d\n", idx);
97 }
98
99 return *master_ports[idx];
100 }
101}
102
103BaseSlavePort &
104RubyPort::getSlavePort(const std::string &if_name, PortID idx)
105{
106 // used by the CPUs to connect the caches to the interconnect, and
107 // for the x86 case also the interrupt master
108 if (if_name != "slave") {
109 // pass it along to our super class
110 return MemObject::getSlavePort(if_name, idx);
111 } else {
112 if (idx >= static_cast<PortID>(slave_ports.size())) {
113 panic("RubyPort::getSlavePort: unknown index %d\n", idx);
114 }
115
116 return *slave_ports[idx];
117 }
118}
119
120RubyPort::PioPort::PioPort(const std::string &_name,
121 RubyPort *_port)
122 : QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
123{
124 DPRINTF(RubyPort, "creating master port on ruby sequencer %s\n", _name);
125}
126
127RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
128 RubySystem *_system, bool _access_phys_mem)
129 : QueuedSlavePort(_name, _port, queue), queue(*_port, *this),
130 ruby_port(_port), ruby_system(_system),
131 _onRetryList(false), access_phys_mem(_access_phys_mem)
132{
133 DPRINTF(RubyPort, "creating slave port on ruby sequencer %s\n", _name);
134}
135
136Tick
137RubyPort::M5Port::recvAtomic(PacketPtr pkt)
138{
139 panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
140 return 0;
141}
142
143
144bool
145RubyPort::PioPort::recvTimingResp(PacketPtr pkt)
146{
147 // In FS mode, ruby memory will receive pio responses from devices
148 // and it must forward these responses back to the particular CPU.
149 DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr());
150
151 // First we must retrieve the request port from the sender State
152 RubyPort::SenderState *senderState =
153 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
154 M5Port *port = senderState->port;
155 assert(port != NULL);
156 delete senderState;
157
158 port->sendTimingResp(pkt);
159
160 return true;
161}
162
163bool
164RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
165{
166 DPRINTF(RubyPort,
167 "Timing access caught for address %#x\n", pkt->getAddr());
168
169 //dsm: based on SimpleTimingPort::recvTimingReq(pkt);
170
171 if (pkt->memInhibitAsserted())
172 panic("RubyPort should never see an inhibited request\n");
173
174 // Save the port in the sender state object to be used later to
175 // route the response
176 pkt->pushSenderState(new SenderState(this));
177
178 // Check for pio requests and directly send them to the dedicated
179 // pio port.
180 if (!isPhysMemAddress(pkt->getAddr())) {
181 assert(ruby_port->pio_port.isConnected());
182 DPRINTF(RubyPort,
183 "Request for address 0x%#x is assumed to be a pio request\n",
184 pkt->getAddr());
185
186 // send next cycle
187 ruby_port->pio_port.schedTimingReq(pkt,
188 curTick() + g_system_ptr->clockPeriod());
189 return true;
190 }
191
192 assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
193 RubySystem::getBlockSizeBytes());
194
195 // Submit the ruby request
196 RequestStatus requestStatus = ruby_port->makeRequest(pkt);
197
198 // If the request successfully issued then we should return true.
199 // Otherwise, we need to delete the senderStatus we just created and return
200 // false.
201 if (requestStatus == RequestStatus_Issued) {
202 DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr());
203 return true;
204 }
205
206 //
207 // Unless one is using the ruby tester, record the stalled M5 port for
208 // later retry when the sequencer becomes free.
209 //
210 if (!ruby_port->m_usingRubyTester) {
211 ruby_port->addToRetryList(this);
212 }
213
214 DPRINTF(RubyPort,
215 "Request for address %#x did not issue because %s\n",
216 pkt->getAddr(), RequestStatus_to_string(requestStatus));
217
218 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
219 pkt->senderState = senderState->predecessor;
220 delete senderState;
221 return false;
222}
223
224void
225RubyPort::M5Port::recvFunctional(PacketPtr pkt)
226{
227 DPRINTF(RubyPort, "Functional access caught for address %#x\n",
228 pkt->getAddr());
229
230 // Check for pio requests and directly send them to the dedicated
231 // pio port.
232 if (!isPhysMemAddress(pkt->getAddr())) {
233 assert(ruby_port->pio_port.isConnected());
234 DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n",
235 pkt->getAddr());
236 panic("RubyPort::PioPort::recvFunctional() not implemented!\n");
237 }
238
239 assert(pkt->getAddr() + pkt->getSize() <=
240 line_address(Address(pkt->getAddr())).getAddress() +
241 RubySystem::getBlockSizeBytes());
242
243 bool accessSucceeded = false;
244 bool needsResponse = pkt->needsResponse();
245
246 // Do the functional access on ruby memory
247 if (pkt->isRead()) {
248 accessSucceeded = ruby_system->functionalRead(pkt);
249 } else if (pkt->isWrite()) {
250 accessSucceeded = ruby_system->functionalWrite(pkt);
251 } else {
252 panic("RubyPort: unsupported functional command %s\n",
253 pkt->cmdString());
254 }
255
256 // Unless the requester explicitly said otherwise, generate an error if
257 // the functional request failed
258 if (!accessSucceeded && !pkt->suppressFuncError()) {
259 fatal("Ruby functional %s failed for address %#x\n",
260 pkt->isWrite() ? "write" : "read", pkt->getAddr());
261 }
262
263 if (access_phys_mem) {
264 // The attached physmem contains the official version of data.
265 // The following command performs the real functional access.
266 // This line should be removed once Ruby supplies the official version
267 // of data.
268 ruby_port->system->getPhysMem().functionalAccess(pkt);
269 }
270
271 // turn packet around to go back to requester if response expected
272 if (needsResponse) {
273 pkt->setFunctionalResponseStatus(accessSucceeded);
274
275 // @todo There should not be a reverse call since the response is
276 // communicated through the packet pointer
277 // DPRINTF(RubyPort, "Sending packet back over port\n");
278 // sendFunctional(pkt);
279 }
280 DPRINTF(RubyPort, "Functional access %s!\n",
281 accessSucceeded ? "successful":"failed");
282}
283
284void
285RubyPort::ruby_hit_callback(PacketPtr pkt)
286{
287 // Retrieve the request port from the sender State
288 RubyPort::SenderState *senderState =
289 safe_cast<RubyPort::SenderState *>(pkt->senderState);
290 M5Port *port = senderState->port;
291 assert(port != NULL);
292
293 // pop the sender state from the packet
294 pkt->senderState = senderState->predecessor;
295 delete senderState;
296
297 port->hitCallback(pkt);
298
299 //
300 // If we had to stall the M5Ports, wake them up because the sequencer
301 // likely has free resources now.
302 //
303 if (waitingOnSequencer) {
304 //
305 // Record the current list of ports to retry on a temporary list before
306 // calling sendRetry on those ports. sendRetry will cause an
307 // immediate retry, which may result in the ports being put back on the
308 // list. Therefore we want to clear the retryList before calling
309 // sendRetry.
310 //
311 std::list<M5Port*> curRetryList(retryList);
312
313 retryList.clear();
314 waitingOnSequencer = false;
315
316 for (std::list<M5Port*>::iterator i = curRetryList.begin();
317 i != curRetryList.end(); ++i) {
318 DPRINTF(RubyPort,
319 "Sequencer may now be free. SendRetry to port %s\n",
320 (*i)->name());
321 (*i)->onRetryList(false);
322 (*i)->sendRetry();
323 }
324 }
325
326 testDrainComplete();
327}
328
329void
330RubyPort::testDrainComplete()
331{
332 //If we weren't able to drain before, we might be able to now.
333 if (drainManager != NULL) {
334 unsigned int drainCount = outstandingCount();
335 DPRINTF(Drain, "Drain count: %u\n", drainCount);
336 if (drainCount == 0) {
337 DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
338 drainManager->signalDrainDone();
339 // Clear the drain manager once we're done with it.
340 drainManager = NULL;
341 }
342 }
343}
344
345unsigned int
346RubyPort::getChildDrainCount(DrainManager *dm)
347{
348 int count = 0;
349
350 if (pio_port.isConnected()) {
351 count += pio_port.drain(dm);
352 DPRINTF(Config, "count after pio check %d\n", count);
353 }
354
355 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
356 count += (*p)->drain(dm);
357 DPRINTF(Config, "count after slave port check %d\n", count);
358 }
359
360 for (std::vector<PioPort*>::iterator p = master_ports.begin();
361 p != master_ports.end(); ++p) {
362 count += (*p)->drain(dm);
363 DPRINTF(Config, "count after master port check %d\n", count);
364 }
365
366 DPRINTF(Config, "final count %d\n", count);
367
368 return count;
369}
370
371unsigned int
372RubyPort::drain(DrainManager *dm)
373{
374 if (isDeadlockEventScheduled()) {
375 descheduleDeadlockEvent();
376 }
377
378 //
379 // If the RubyPort is not empty, then it needs to clear all outstanding
380 // requests before it should call drainManager->signalDrainDone()
381 //
382 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
383 bool need_drain = outstandingCount() > 0;
384
385 //
386 // Also, get the number of child ports that will also need to clear
387 // their buffered requests before they call drainManager->signalDrainDone()
388 //
389 unsigned int child_drain_count = getChildDrainCount(dm);
390
391 // Set status
392 if (need_drain) {
393 drainManager = dm;
394
395 DPRINTF(Drain, "RubyPort not drained\n");
396 setDrainState(Drainable::Draining);
397 return child_drain_count + 1;
398 }
399
400 drainManager = NULL;
401 setDrainState(Drainable::Drained);
402 return child_drain_count;
403}
404
405void
406RubyPort::M5Port::hitCallback(PacketPtr pkt)
407{
408 bool needsResponse = pkt->needsResponse();
409
410 //
411 // Unless specified at configuraiton, all responses except failed SC
412 // and Flush operations access M5 physical memory.
413 //
414 bool accessPhysMem = access_phys_mem;
415
416 if (pkt->isLLSC()) {
417 if (pkt->isWrite()) {
418 if (pkt->req->getExtraData() != 0) {
419 //
420 // Successful SC packets convert to normal writes
421 //
422 pkt->convertScToWrite();
423 } else {
424 //
425 // Failed SC packets don't access physical memory and thus
426 // the RubyPort itself must convert it to a response.
427 //
428 accessPhysMem = false;
429 }
430 } else {
431 //
432 // All LL packets convert to normal loads so that M5 PhysMem does
433 // not lock the blocks.
434 //
435 pkt->convertLlToRead();
436 }
437 }
438
439 //
440 // Flush requests don't access physical memory
441 //
442 if (pkt->isFlush()) {
443 accessPhysMem = false;
444 }
445
446 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
447
448 if (accessPhysMem) {
449 ruby_port->system->getPhysMem().access(pkt);
450 } else if (needsResponse) {
451 pkt->makeResponse();
452 }
453
454 // turn packet around to go back to requester if response expected
455 if (needsResponse) {
456 DPRINTF(RubyPort, "Sending packet back over port\n");
457 // send next cycle
458 schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
459 } else {
460 delete pkt;
461 }
462 DPRINTF(RubyPort, "Hit callback done!\n");
463}
464
465AddrRangeList
466RubyPort::M5Port::getAddrRanges() const
467{
468 // at the moment the assumption is that the master does not care
469 AddrRangeList ranges;
470 return ranges;
471}
472
473bool
474RubyPort::M5Port::isPhysMemAddress(Addr addr)
475{
476 return ruby_port->system->isMemAddr(addr);
477}
478
479unsigned
480RubyPort::M5Port::deviceBlockSize() const
481{
482 return (unsigned) RubySystem::getBlockSizeBytes();
483}
484
485void
486RubyPort::ruby_eviction_callback(const Address& address)
487{
488 DPRINTF(RubyPort, "Sending invalidations.\n");
489 // This request is deleted in the stack-allocated packet destructor
490 // when this function exits
491 // TODO: should this really be using funcMasterId?
492 RequestPtr req =
493 new Request(address.getAddress(), 0, 0, Request::funcMasterId);
494 // Use a single packet to signal all snooping ports of the invalidation.
495 // This assumes that snooping ports do NOT modify the packet/request
496 Packet pkt(req, MemCmd::InvalidationReq);
497 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
498 // check if the connected master port is snooping
499 if ((*p)->isSnooping()) {
500 // send as a snoop request
501 (*p)->sendTimingSnoopReq(&pkt);
502 }
503 }
504}
479void
480RubyPort::ruby_eviction_callback(const Address& address)
481{
482 DPRINTF(RubyPort, "Sending invalidations.\n");
483 // This request is deleted in the stack-allocated packet destructor
484 // when this function exits
485 // TODO: should this really be using funcMasterId?
486 RequestPtr req =
487 new Request(address.getAddress(), 0, 0, Request::funcMasterId);
488 // Use a single packet to signal all snooping ports of the invalidation.
489 // This assumes that snooping ports do NOT modify the packet/request
490 Packet pkt(req, MemCmd::InvalidationReq);
491 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
492 // check if the connected master port is snooping
493 if ((*p)->isSnooping()) {
494 // send as a snoop request
495 (*p)->sendTimingSnoopReq(&pkt);
496 }
497 }
498}