RubyPort.cc (11343:e777659dcff6) RubyPort.cc (11346:64e862d3758f)
1/*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2009-2013 Advanced Micro Devices, Inc.
15 * Copyright (c) 2011 Mark D. Hill and David A. Wood
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "cpu/testers/rubytest/RubyTester.hh"
43#include "debug/Config.hh"
44#include "debug/Drain.hh"
45#include "debug/Ruby.hh"
46#include "mem/protocol/AccessPermission.hh"
47#include "mem/ruby/slicc_interface/AbstractController.hh"
48#include "mem/ruby/system/RubyPort.hh"
49#include "mem/simple_mem.hh"
50#include "sim/full_system.hh"
51#include "sim/system.hh"
52
53RubyPort::RubyPort(const Params *p)
54 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
55 m_controller(NULL), m_mandatory_q_ptr(NULL),
56 m_usingRubyTester(p->using_ruby_tester), system(p->system),
57 pioMasterPort(csprintf("%s.pio-master-port", name()), this),
58 pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
59 memMasterPort(csprintf("%s.mem-master-port", name()), this),
60 memSlavePort(csprintf("%s-mem-slave-port", name()), this,
61 p->ruby_system->getAccessBackingStore(), -1,
62 p->no_retry_on_stall),
63 gotAddrRanges(p->port_master_connection_count),
64 m_isCPUSequencer(p->is_cpu_sequencer)
65{
66 assert(m_version != -1);
67
68 // create the slave ports based on the number of connected ports
69 for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
70 slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
71 i), this, p->ruby_system->getAccessBackingStore(),
72 i, p->no_retry_on_stall));
73 }
74
75 // create the master ports based on the number of connected ports
76 for (size_t i = 0; i < p->port_master_connection_count; ++i) {
77 master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
78 name(), i), this));
79 }
80}
81
82void
83RubyPort::init()
84{
85 assert(m_controller != NULL);
86 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
1/*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2009-2013 Advanced Micro Devices, Inc.
15 * Copyright (c) 2011 Mark D. Hill and David A. Wood
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "cpu/testers/rubytest/RubyTester.hh"
43#include "debug/Config.hh"
44#include "debug/Drain.hh"
45#include "debug/Ruby.hh"
46#include "mem/protocol/AccessPermission.hh"
47#include "mem/ruby/slicc_interface/AbstractController.hh"
48#include "mem/ruby/system/RubyPort.hh"
49#include "mem/simple_mem.hh"
50#include "sim/full_system.hh"
51#include "sim/system.hh"
52
53RubyPort::RubyPort(const Params *p)
54 : MemObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
55 m_controller(NULL), m_mandatory_q_ptr(NULL),
56 m_usingRubyTester(p->using_ruby_tester), system(p->system),
57 pioMasterPort(csprintf("%s.pio-master-port", name()), this),
58 pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
59 memMasterPort(csprintf("%s.mem-master-port", name()), this),
60 memSlavePort(csprintf("%s-mem-slave-port", name()), this,
61 p->ruby_system->getAccessBackingStore(), -1,
62 p->no_retry_on_stall),
63 gotAddrRanges(p->port_master_connection_count),
64 m_isCPUSequencer(p->is_cpu_sequencer)
65{
66 assert(m_version != -1);
67
68 // create the slave ports based on the number of connected ports
69 for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
70 slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
71 i), this, p->ruby_system->getAccessBackingStore(),
72 i, p->no_retry_on_stall));
73 }
74
75 // create the master ports based on the number of connected ports
76 for (size_t i = 0; i < p->port_master_connection_count; ++i) {
77 master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
78 name(), i), this));
79 }
80}
81
82void
83RubyPort::init()
84{
85 assert(m_controller != NULL);
86 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
87
88 for (const auto &s_port : slave_ports)
89 s_port->sendRangeChange();
90}
91
92BaseMasterPort &
93RubyPort::getMasterPort(const std::string &if_name, PortID idx)
94{
95 if (if_name == "mem_master_port") {
96 return memMasterPort;
97 }
98
99 if (if_name == "pio_master_port") {
100 return pioMasterPort;
101 }
102
103 // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
104 // port
105 if (if_name != "master") {
106 // pass it along to our super class
107 return MemObject::getMasterPort(if_name, idx);
108 } else {
109 if (idx >= static_cast<PortID>(master_ports.size())) {
110 panic("RubyPort::getMasterPort: unknown index %d\n", idx);
111 }
112
113 return *master_ports[idx];
114 }
115}
116
117BaseSlavePort &
118RubyPort::getSlavePort(const std::string &if_name, PortID idx)
119{
120 if (if_name == "mem_slave_port") {
121 return memSlavePort;
122 }
123
124 if (if_name == "pio_slave_port")
125 return pioSlavePort;
126
127 // used by the CPUs to connect the caches to the interconnect, and
128 // for the x86 case also the interrupt master
129 if (if_name != "slave") {
130 // pass it along to our super class
131 return MemObject::getSlavePort(if_name, idx);
132 } else {
133 if (idx >= static_cast<PortID>(slave_ports.size())) {
134 panic("RubyPort::getSlavePort: unknown index %d\n", idx);
135 }
136
137 return *slave_ports[idx];
138 }
139}
140
141RubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
142 RubyPort *_port)
143 : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
144 reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
145{
146 DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
147}
148
149RubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
150 RubyPort *_port)
151 : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
152{
153 DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
154}
155
156RubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
157 RubyPort *_port)
158 : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
159 reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
160{
161 DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
162}
163
164RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
165 bool _access_backing_store, PortID id,
166 bool _no_retry_on_stall)
167 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
168 access_backing_store(_access_backing_store),
169 no_retry_on_stall(_no_retry_on_stall)
170{
171 DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
172}
173
174bool
175RubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
176{
177 RubyPort *rp = static_cast<RubyPort *>(&owner);
178 DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
179
180 // send next cycle
181 rp->pioSlavePort.schedTimingResp(
182 pkt, curTick() + rp->m_ruby_system->clockPeriod());
183 return true;
184}
185
186bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
187{
188 // got a response from a device
189 assert(pkt->isResponse());
190
191 // First we must retrieve the request port from the sender State
192 RubyPort::SenderState *senderState =
193 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
194 MemSlavePort *port = senderState->port;
195 assert(port != NULL);
196 delete senderState;
197
198 // In FS mode, ruby memory will receive pio responses from devices
199 // and it must forward these responses back to the particular CPU.
200 DPRINTF(RubyPort, "Pio response for address %#x, going to %s\n",
201 pkt->getAddr(), port->name());
202
203 // attempt to send the response in the next cycle
204 RubyPort *rp = static_cast<RubyPort *>(&owner);
205 port->schedTimingResp(pkt, curTick() + rp->m_ruby_system->clockPeriod());
206
207 return true;
208}
209
210bool
211RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
212{
213 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
214
215 for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
216 AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
217 for (auto it = l.begin(); it != l.end(); ++it) {
218 if (it->contains(pkt->getAddr())) {
219 // generally it is not safe to assume success here as
220 // the port could be blocked
221 bool M5_VAR_USED success =
222 ruby_port->master_ports[i]->sendTimingReq(pkt);
223 assert(success);
224 return true;
225 }
226 }
227 }
228 panic("Should never reach here!\n");
229}
230
231bool
232RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
233{
234 DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
235 pkt->getAddr(), id);
236 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
237
238 if (pkt->cacheResponding())
239 panic("RubyPort should never see request with the "
240 "cacheResponding flag set\n");
241
242 // Check for pio requests and directly send them to the dedicated
243 // pio port.
244 if (pkt->cmd != MemCmd::MemFenceReq) {
245 if (!isPhysMemAddress(pkt->getAddr())) {
246 assert(ruby_port->memMasterPort.isConnected());
247 DPRINTF(RubyPort, "Request address %#x assumed to be a "
248 "pio address\n", pkt->getAddr());
249
250 // Save the port in the sender state object to be used later to
251 // route the response
252 pkt->pushSenderState(new SenderState(this));
253
254 // send next cycle
255 RubySystem *rs = ruby_port->m_ruby_system;
256 ruby_port->memMasterPort.schedTimingReq(pkt,
257 curTick() + rs->clockPeriod());
258 return true;
259 }
260
261 assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
262 RubySystem::getBlockSizeBytes());
263 }
264
265 // Submit the ruby request
266 RequestStatus requestStatus = ruby_port->makeRequest(pkt);
267
268 // If the request successfully issued then we should return true.
269 // Otherwise, we need to tell the port to retry at a later point
270 // and return false.
271 if (requestStatus == RequestStatus_Issued) {
272 // Save the port in the sender state object to be used later to
273 // route the response
274 pkt->pushSenderState(new SenderState(this));
275
276 DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(),
277 pkt->getAddr());
278 return true;
279 }
280
281 if (pkt->cmd != MemCmd::MemFenceReq) {
282 DPRINTF(RubyPort,
283 "Request for address %#x did not issued because %s\n",
284 pkt->getAddr(), RequestStatus_to_string(requestStatus));
285 }
286
287 addToRetryList();
288
289 return false;
290}
291
292void
293RubyPort::MemSlavePort::addToRetryList()
294{
295 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
296
297 //
298 // Unless the requestor do not want retries (e.g., the Ruby tester),
299 // record the stalled M5 port for later retry when the sequencer
300 // becomes free.
301 //
302 if (!no_retry_on_stall && !ruby_port->onRetryList(this)) {
303 ruby_port->addToRetryList(this);
304 }
305}
306
307void
308RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
309{
310 DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
311
312 RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner);
313 RubySystem *rs = rp->m_ruby_system;
314
315 // Check for pio requests and directly send them to the dedicated
316 // pio port.
317 if (!isPhysMemAddress(pkt->getAddr())) {
318 assert(rp->memMasterPort.isConnected());
319 DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
320 panic("RubyPort::PioMasterPort::recvFunctional() not implemented!\n");
321 }
322
323 assert(pkt->getAddr() + pkt->getSize() <=
324 makeLineAddress(pkt->getAddr()) + RubySystem::getBlockSizeBytes());
325
326 if (access_backing_store) {
327 // The attached physmem contains the official version of data.
328 // The following command performs the real functional access.
329 // This line should be removed once Ruby supplies the official version
330 // of data.
331 rs->getPhysMem()->functionalAccess(pkt);
332 } else {
333 bool accessSucceeded = false;
334 bool needsResponse = pkt->needsResponse();
335
336 // Do the functional access on ruby memory
337 if (pkt->isRead()) {
338 accessSucceeded = rs->functionalRead(pkt);
339 } else if (pkt->isWrite()) {
340 accessSucceeded = rs->functionalWrite(pkt);
341 } else {
342 panic("Unsupported functional command %s\n", pkt->cmdString());
343 }
344
345 // Unless the requester explicitly said otherwise, generate an error if
346 // the functional request failed
347 if (!accessSucceeded && !pkt->suppressFuncError()) {
348 fatal("Ruby functional %s failed for address %#x\n",
349 pkt->isWrite() ? "write" : "read", pkt->getAddr());
350 }
351
352 // turn packet around to go back to requester if response expected
353 if (needsResponse) {
354 pkt->setFunctionalResponseStatus(accessSucceeded);
355 }
356
357 DPRINTF(RubyPort, "Functional access %s!\n",
358 accessSucceeded ? "successful":"failed");
359 }
360}
361
362void
363RubyPort::ruby_hit_callback(PacketPtr pkt)
364{
365 DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
366 pkt->getAddr());
367
368 // The packet was destined for memory and has not yet been turned
369 // into a response
370 assert(system->isMemAddr(pkt->getAddr()));
371 assert(pkt->isRequest());
372
373 // First we must retrieve the request port from the sender State
374 RubyPort::SenderState *senderState =
375 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
376 MemSlavePort *port = senderState->port;
377 assert(port != NULL);
378 delete senderState;
379
380 port->hitCallback(pkt);
381
382 trySendRetries();
383}
384
385void
386RubyPort::trySendRetries()
387{
388 //
389 // If we had to stall the MemSlavePorts, wake them up because the sequencer
390 // likely has free resources now.
391 //
392 if (!retryList.empty()) {
393 // Record the current list of ports to retry on a temporary list
394 // before calling sendRetryReq on those ports. sendRetryReq will cause
395 // an immediate retry, which may result in the ports being put back on
396 // the list. Therefore we want to clear the retryList before calling
397 // sendRetryReq.
398 std::vector<MemSlavePort *> curRetryList(retryList);
399
400 retryList.clear();
401
402 for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
403 DPRINTF(RubyPort,
404 "Sequencer may now be free. SendRetry to port %s\n",
405 (*i)->name());
406 (*i)->sendRetryReq();
407 }
408 }
409}
410
411void
412RubyPort::testDrainComplete()
413{
414 //If we weren't able to drain before, we might be able to now.
415 if (drainState() == DrainState::Draining) {
416 unsigned int drainCount = outstandingCount();
417 DPRINTF(Drain, "Drain count: %u\n", drainCount);
418 if (drainCount == 0) {
419 DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
420 signalDrainDone();
421 }
422 }
423}
424
425DrainState
426RubyPort::drain()
427{
428 if (isDeadlockEventScheduled()) {
429 descheduleDeadlockEvent();
430 }
431
432 //
433 // If the RubyPort is not empty, then it needs to clear all outstanding
434 // requests before it should call signalDrainDone()
435 //
436 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
437 if (outstandingCount() > 0) {
438 DPRINTF(Drain, "RubyPort not drained\n");
439 return DrainState::Draining;
440 } else {
441 return DrainState::Drained;
442 }
443}
444
445void
446RubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
447{
448 bool needsResponse = pkt->needsResponse();
449
450 // Unless specified at configuraiton, all responses except failed SC
451 // and Flush operations access M5 physical memory.
452 bool accessPhysMem = access_backing_store;
453
454 if (pkt->isLLSC()) {
455 if (pkt->isWrite()) {
456 if (pkt->req->getExtraData() != 0) {
457 //
458 // Successful SC packets convert to normal writes
459 //
460 pkt->convertScToWrite();
461 } else {
462 //
463 // Failed SC packets don't access physical memory and thus
464 // the RubyPort itself must convert it to a response.
465 //
466 accessPhysMem = false;
467 }
468 } else {
469 //
470 // All LL packets convert to normal loads so that M5 PhysMem does
471 // not lock the blocks.
472 //
473 pkt->convertLlToRead();
474 }
475 }
476
477 // Flush, acquire, release requests don't access physical memory
478 if (pkt->isFlush() || pkt->cmd == MemCmd::MemFenceReq) {
479 accessPhysMem = false;
480 }
481
482 if (pkt->req->isKernel()) {
483 accessPhysMem = false;
484 needsResponse = true;
485 }
486
487 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
488
489 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
490 RubySystem *rs = ruby_port->m_ruby_system;
491 if (accessPhysMem) {
492 rs->getPhysMem()->access(pkt);
493 } else if (needsResponse) {
494 pkt->makeResponse();
495 }
496
497 // turn packet around to go back to requester if response expected
498 if (needsResponse) {
499 DPRINTF(RubyPort, "Sending packet back over port\n");
500 // Send a response in the same cycle. There is no need to delay the
501 // response because the response latency is already incurred in the
502 // Ruby protocol.
503 schedTimingResp(pkt, curTick());
504 } else {
505 delete pkt;
506 }
507
508 DPRINTF(RubyPort, "Hit callback done!\n");
509}
510
511AddrRangeList
512RubyPort::PioSlavePort::getAddrRanges() const
513{
514 // at the moment the assumption is that the master does not care
515 AddrRangeList ranges;
516 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
517
518 for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
519 ranges.splice(ranges.begin(),
520 ruby_port->master_ports[i]->getAddrRanges());
521 }
522 for (const auto M5_VAR_USED &r : ranges)
523 DPRINTF(RubyPort, "%s\n", r.to_string());
524 return ranges;
525}
526
527bool
528RubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
529{
530 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
531 return ruby_port->system->isMemAddr(addr);
532}
533
534void
535RubyPort::ruby_eviction_callback(Addr address)
536{
537 DPRINTF(RubyPort, "Sending invalidations.\n");
538 // Allocate the invalidate request and packet on the stack, as it is
539 // assumed they will not be modified or deleted by receivers.
540 // TODO: should this really be using funcMasterId?
541 Request request(address, RubySystem::getBlockSizeBytes(), 0,
542 Request::funcMasterId);
543 // Use a single packet to signal all snooping ports of the invalidation.
544 // This assumes that snooping ports do NOT modify the packet/request
545 Packet pkt(&request, MemCmd::InvalidateReq);
546 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
547 // check if the connected master port is snooping
548 if ((*p)->isSnooping()) {
549 // send as a snoop request
550 (*p)->sendTimingSnoopReq(&pkt);
551 }
552 }
553}
554
555void
556RubyPort::PioMasterPort::recvRangeChange()
557{
558 RubyPort &r = static_cast<RubyPort &>(owner);
559 r.gotAddrRanges--;
560 if (r.gotAddrRanges == 0 && FullSystem) {
561 r.pioSlavePort.sendRangeChange();
562 }
563}
87}
88
89BaseMasterPort &
90RubyPort::getMasterPort(const std::string &if_name, PortID idx)
91{
92 if (if_name == "mem_master_port") {
93 return memMasterPort;
94 }
95
96 if (if_name == "pio_master_port") {
97 return pioMasterPort;
98 }
99
100 // used by the x86 CPUs to connect the interrupt PIO and interrupt slave
101 // port
102 if (if_name != "master") {
103 // pass it along to our super class
104 return MemObject::getMasterPort(if_name, idx);
105 } else {
106 if (idx >= static_cast<PortID>(master_ports.size())) {
107 panic("RubyPort::getMasterPort: unknown index %d\n", idx);
108 }
109
110 return *master_ports[idx];
111 }
112}
113
114BaseSlavePort &
115RubyPort::getSlavePort(const std::string &if_name, PortID idx)
116{
117 if (if_name == "mem_slave_port") {
118 return memSlavePort;
119 }
120
121 if (if_name == "pio_slave_port")
122 return pioSlavePort;
123
124 // used by the CPUs to connect the caches to the interconnect, and
125 // for the x86 case also the interrupt master
126 if (if_name != "slave") {
127 // pass it along to our super class
128 return MemObject::getSlavePort(if_name, idx);
129 } else {
130 if (idx >= static_cast<PortID>(slave_ports.size())) {
131 panic("RubyPort::getSlavePort: unknown index %d\n", idx);
132 }
133
134 return *slave_ports[idx];
135 }
136}
137
138RubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
139 RubyPort *_port)
140 : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
141 reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
142{
143 DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
144}
145
146RubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
147 RubyPort *_port)
148 : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
149{
150 DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
151}
152
153RubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
154 RubyPort *_port)
155 : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
156 reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
157{
158 DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
159}
160
161RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
162 bool _access_backing_store, PortID id,
163 bool _no_retry_on_stall)
164 : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
165 access_backing_store(_access_backing_store),
166 no_retry_on_stall(_no_retry_on_stall)
167{
168 DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
169}
170
171bool
172RubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
173{
174 RubyPort *rp = static_cast<RubyPort *>(&owner);
175 DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
176
177 // send next cycle
178 rp->pioSlavePort.schedTimingResp(
179 pkt, curTick() + rp->m_ruby_system->clockPeriod());
180 return true;
181}
182
183bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
184{
185 // got a response from a device
186 assert(pkt->isResponse());
187
188 // First we must retrieve the request port from the sender State
189 RubyPort::SenderState *senderState =
190 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
191 MemSlavePort *port = senderState->port;
192 assert(port != NULL);
193 delete senderState;
194
195 // In FS mode, ruby memory will receive pio responses from devices
196 // and it must forward these responses back to the particular CPU.
197 DPRINTF(RubyPort, "Pio response for address %#x, going to %s\n",
198 pkt->getAddr(), port->name());
199
200 // attempt to send the response in the next cycle
201 RubyPort *rp = static_cast<RubyPort *>(&owner);
202 port->schedTimingResp(pkt, curTick() + rp->m_ruby_system->clockPeriod());
203
204 return true;
205}
206
207bool
208RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
209{
210 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
211
212 for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
213 AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
214 for (auto it = l.begin(); it != l.end(); ++it) {
215 if (it->contains(pkt->getAddr())) {
216 // generally it is not safe to assume success here as
217 // the port could be blocked
218 bool M5_VAR_USED success =
219 ruby_port->master_ports[i]->sendTimingReq(pkt);
220 assert(success);
221 return true;
222 }
223 }
224 }
225 panic("Should never reach here!\n");
226}
227
228bool
229RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
230{
231 DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
232 pkt->getAddr(), id);
233 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
234
235 if (pkt->cacheResponding())
236 panic("RubyPort should never see request with the "
237 "cacheResponding flag set\n");
238
239 // Check for pio requests and directly send them to the dedicated
240 // pio port.
241 if (pkt->cmd != MemCmd::MemFenceReq) {
242 if (!isPhysMemAddress(pkt->getAddr())) {
243 assert(ruby_port->memMasterPort.isConnected());
244 DPRINTF(RubyPort, "Request address %#x assumed to be a "
245 "pio address\n", pkt->getAddr());
246
247 // Save the port in the sender state object to be used later to
248 // route the response
249 pkt->pushSenderState(new SenderState(this));
250
251 // send next cycle
252 RubySystem *rs = ruby_port->m_ruby_system;
253 ruby_port->memMasterPort.schedTimingReq(pkt,
254 curTick() + rs->clockPeriod());
255 return true;
256 }
257
258 assert(getOffset(pkt->getAddr()) + pkt->getSize() <=
259 RubySystem::getBlockSizeBytes());
260 }
261
262 // Submit the ruby request
263 RequestStatus requestStatus = ruby_port->makeRequest(pkt);
264
265 // If the request successfully issued then we should return true.
266 // Otherwise, we need to tell the port to retry at a later point
267 // and return false.
268 if (requestStatus == RequestStatus_Issued) {
269 // Save the port in the sender state object to be used later to
270 // route the response
271 pkt->pushSenderState(new SenderState(this));
272
273 DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(),
274 pkt->getAddr());
275 return true;
276 }
277
278 if (pkt->cmd != MemCmd::MemFenceReq) {
279 DPRINTF(RubyPort,
280 "Request for address %#x did not issued because %s\n",
281 pkt->getAddr(), RequestStatus_to_string(requestStatus));
282 }
283
284 addToRetryList();
285
286 return false;
287}
288
289void
290RubyPort::MemSlavePort::addToRetryList()
291{
292 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
293
294 //
295 // Unless the requestor do not want retries (e.g., the Ruby tester),
296 // record the stalled M5 port for later retry when the sequencer
297 // becomes free.
298 //
299 if (!no_retry_on_stall && !ruby_port->onRetryList(this)) {
300 ruby_port->addToRetryList(this);
301 }
302}
303
304void
305RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
306{
307 DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
308
309 RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner);
310 RubySystem *rs = rp->m_ruby_system;
311
312 // Check for pio requests and directly send them to the dedicated
313 // pio port.
314 if (!isPhysMemAddress(pkt->getAddr())) {
315 assert(rp->memMasterPort.isConnected());
316 DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
317 panic("RubyPort::PioMasterPort::recvFunctional() not implemented!\n");
318 }
319
320 assert(pkt->getAddr() + pkt->getSize() <=
321 makeLineAddress(pkt->getAddr()) + RubySystem::getBlockSizeBytes());
322
323 if (access_backing_store) {
324 // The attached physmem contains the official version of data.
325 // The following command performs the real functional access.
326 // This line should be removed once Ruby supplies the official version
327 // of data.
328 rs->getPhysMem()->functionalAccess(pkt);
329 } else {
330 bool accessSucceeded = false;
331 bool needsResponse = pkt->needsResponse();
332
333 // Do the functional access on ruby memory
334 if (pkt->isRead()) {
335 accessSucceeded = rs->functionalRead(pkt);
336 } else if (pkt->isWrite()) {
337 accessSucceeded = rs->functionalWrite(pkt);
338 } else {
339 panic("Unsupported functional command %s\n", pkt->cmdString());
340 }
341
342 // Unless the requester explicitly said otherwise, generate an error if
343 // the functional request failed
344 if (!accessSucceeded && !pkt->suppressFuncError()) {
345 fatal("Ruby functional %s failed for address %#x\n",
346 pkt->isWrite() ? "write" : "read", pkt->getAddr());
347 }
348
349 // turn packet around to go back to requester if response expected
350 if (needsResponse) {
351 pkt->setFunctionalResponseStatus(accessSucceeded);
352 }
353
354 DPRINTF(RubyPort, "Functional access %s!\n",
355 accessSucceeded ? "successful":"failed");
356 }
357}
358
359void
360RubyPort::ruby_hit_callback(PacketPtr pkt)
361{
362 DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
363 pkt->getAddr());
364
365 // The packet was destined for memory and has not yet been turned
366 // into a response
367 assert(system->isMemAddr(pkt->getAddr()));
368 assert(pkt->isRequest());
369
370 // First we must retrieve the request port from the sender State
371 RubyPort::SenderState *senderState =
372 safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
373 MemSlavePort *port = senderState->port;
374 assert(port != NULL);
375 delete senderState;
376
377 port->hitCallback(pkt);
378
379 trySendRetries();
380}
381
382void
383RubyPort::trySendRetries()
384{
385 //
386 // If we had to stall the MemSlavePorts, wake them up because the sequencer
387 // likely has free resources now.
388 //
389 if (!retryList.empty()) {
390 // Record the current list of ports to retry on a temporary list
391 // before calling sendRetryReq on those ports. sendRetryReq will cause
392 // an immediate retry, which may result in the ports being put back on
393 // the list. Therefore we want to clear the retryList before calling
394 // sendRetryReq.
395 std::vector<MemSlavePort *> curRetryList(retryList);
396
397 retryList.clear();
398
399 for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
400 DPRINTF(RubyPort,
401 "Sequencer may now be free. SendRetry to port %s\n",
402 (*i)->name());
403 (*i)->sendRetryReq();
404 }
405 }
406}
407
408void
409RubyPort::testDrainComplete()
410{
411 //If we weren't able to drain before, we might be able to now.
412 if (drainState() == DrainState::Draining) {
413 unsigned int drainCount = outstandingCount();
414 DPRINTF(Drain, "Drain count: %u\n", drainCount);
415 if (drainCount == 0) {
416 DPRINTF(Drain, "RubyPort done draining, signaling drain done\n");
417 signalDrainDone();
418 }
419 }
420}
421
422DrainState
423RubyPort::drain()
424{
425 if (isDeadlockEventScheduled()) {
426 descheduleDeadlockEvent();
427 }
428
429 //
430 // If the RubyPort is not empty, then it needs to clear all outstanding
431 // requests before it should call signalDrainDone()
432 //
433 DPRINTF(Config, "outstanding count %d\n", outstandingCount());
434 if (outstandingCount() > 0) {
435 DPRINTF(Drain, "RubyPort not drained\n");
436 return DrainState::Draining;
437 } else {
438 return DrainState::Drained;
439 }
440}
441
442void
443RubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
444{
445 bool needsResponse = pkt->needsResponse();
446
447 // Unless specified at configuraiton, all responses except failed SC
448 // and Flush operations access M5 physical memory.
449 bool accessPhysMem = access_backing_store;
450
451 if (pkt->isLLSC()) {
452 if (pkt->isWrite()) {
453 if (pkt->req->getExtraData() != 0) {
454 //
455 // Successful SC packets convert to normal writes
456 //
457 pkt->convertScToWrite();
458 } else {
459 //
460 // Failed SC packets don't access physical memory and thus
461 // the RubyPort itself must convert it to a response.
462 //
463 accessPhysMem = false;
464 }
465 } else {
466 //
467 // All LL packets convert to normal loads so that M5 PhysMem does
468 // not lock the blocks.
469 //
470 pkt->convertLlToRead();
471 }
472 }
473
474 // Flush, acquire, release requests don't access physical memory
475 if (pkt->isFlush() || pkt->cmd == MemCmd::MemFenceReq) {
476 accessPhysMem = false;
477 }
478
479 if (pkt->req->isKernel()) {
480 accessPhysMem = false;
481 needsResponse = true;
482 }
483
484 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
485
486 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
487 RubySystem *rs = ruby_port->m_ruby_system;
488 if (accessPhysMem) {
489 rs->getPhysMem()->access(pkt);
490 } else if (needsResponse) {
491 pkt->makeResponse();
492 }
493
494 // turn packet around to go back to requester if response expected
495 if (needsResponse) {
496 DPRINTF(RubyPort, "Sending packet back over port\n");
497 // Send a response in the same cycle. There is no need to delay the
498 // response because the response latency is already incurred in the
499 // Ruby protocol.
500 schedTimingResp(pkt, curTick());
501 } else {
502 delete pkt;
503 }
504
505 DPRINTF(RubyPort, "Hit callback done!\n");
506}
507
508AddrRangeList
509RubyPort::PioSlavePort::getAddrRanges() const
510{
511 // at the moment the assumption is that the master does not care
512 AddrRangeList ranges;
513 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
514
515 for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
516 ranges.splice(ranges.begin(),
517 ruby_port->master_ports[i]->getAddrRanges());
518 }
519 for (const auto M5_VAR_USED &r : ranges)
520 DPRINTF(RubyPort, "%s\n", r.to_string());
521 return ranges;
522}
523
524bool
525RubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
526{
527 RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
528 return ruby_port->system->isMemAddr(addr);
529}
530
531void
532RubyPort::ruby_eviction_callback(Addr address)
533{
534 DPRINTF(RubyPort, "Sending invalidations.\n");
535 // Allocate the invalidate request and packet on the stack, as it is
536 // assumed they will not be modified or deleted by receivers.
537 // TODO: should this really be using funcMasterId?
538 Request request(address, RubySystem::getBlockSizeBytes(), 0,
539 Request::funcMasterId);
540 // Use a single packet to signal all snooping ports of the invalidation.
541 // This assumes that snooping ports do NOT modify the packet/request
542 Packet pkt(&request, MemCmd::InvalidateReq);
543 for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
544 // check if the connected master port is snooping
545 if ((*p)->isSnooping()) {
546 // send as a snoop request
547 (*p)->sendTimingSnoopReq(&pkt);
548 }
549 }
550}
551
552void
553RubyPort::PioMasterPort::recvRangeChange()
554{
555 RubyPort &r = static_cast<RubyPort &>(owner);
556 r.gotAddrRanges--;
557 if (r.gotAddrRanges == 0 && FullSystem) {
558 r.pioSlavePort.sendRangeChange();
559 }
560}