Deleted Added
sdiff udiff text old ( 8532:8f27cf8971fe ) new ( 8615:e66a566f2cfa )
full compact
1/*
2 * Copyright (c) 2009 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "config/the_isa.hh"
30#if THE_ISA == X86_ISA
31#include "arch/x86/insts/microldstop.hh"
32#endif // X86_ISA
33#include "cpu/testers/rubytest/RubyTester.hh"
34#include "debug/Ruby.hh"
35#include "mem/protocol/AccessPermission.hh"
36#include "mem/ruby/slicc_interface/AbstractController.hh"
37#include "mem/ruby/system/RubyPort.hh"
38#include "mem/physical.hh"
39
40RubyPort::RubyPort(const Params *p)
41 : MemObject(p)
42{
43 m_version = p->version;
44 assert(m_version != -1);
45
46 physmem = p->physmem;
47
48 m_controller = NULL;
49 m_mandatory_q_ptr = NULL;
50
51 m_request_cnt = 0;
52 pio_port = NULL;
53 physMemPort = NULL;
54
55 m_usingRubyTester = p->using_ruby_tester;
56 access_phys_mem = p->access_phys_mem;
57
58 ruby_system = p->ruby_system;
59 waitingOnSequencer = false;
60}
61
62void
63RubyPort::init()
64{
65 assert(m_controller != NULL);
66 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
67}
68
69Port *
70RubyPort::getPort(const std::string &if_name, int idx)
71{
72 if (if_name == "port") {
73 return new M5Port(csprintf("%s-port%d", name(), idx), this,
74 ruby_system, access_phys_mem);
75 }
76
77 if (if_name == "pio_port") {
78 // ensure there is only one pio port
79 assert(pio_port == NULL);
80
81 pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this);
82
83 return pio_port;
84 }
85
86 if (if_name == "physMemPort") {
87 // RubyPort should only have one port to physical memory
88 assert (physMemPort == NULL);
89
90 physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this,
91 ruby_system, access_phys_mem);
92
93 return physMemPort;
94 }
95
96 if (if_name == "functional") {
97 // Calls for the functional port only want to access
98 // functional memory. Therefore, directly pass these calls
99 // ports to physmem.
100 assert(physmem != NULL);
101 return physmem->getPort(if_name, idx);
102 }
103
104 return NULL;
105}
106
107RubyPort::PioPort::PioPort(const std::string &_name,
108 RubyPort *_port)
109 : SimpleTimingPort(_name, _port)
110{
111 DPRINTF(RubyPort, "creating port to ruby sequencer to cpu %s\n", _name);
112 ruby_port = _port;
113}
114
115RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
116 RubySystem *_system, bool _access_phys_mem)
117 : SimpleTimingPort(_name, _port)
118{
119 DPRINTF(RubyPort, "creating port from ruby sequcner to cpu %s\n", _name);
120 ruby_port = _port;
121 ruby_system = _system;
122 _onRetryList = false;
123 access_phys_mem = _access_phys_mem;
124}
125
126Tick
127RubyPort::PioPort::recvAtomic(PacketPtr pkt)
128{
129 panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
130 return 0;
131}
132
133Tick
134RubyPort::M5Port::recvAtomic(PacketPtr pkt)
135{
136 panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
137 return 0;
138}
139
140
141bool
142RubyPort::PioPort::recvTiming(PacketPtr pkt)
143{
144 // In FS mode, ruby memory will receive pio responses from devices
145 // and it must forward these responses back to the particular CPU.
146 DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr());
147
148 assert(pkt->isResponse());
149
150 // First we must retrieve the request port from the sender State
151 RubyPort::SenderState *senderState =
152 safe_cast<RubyPort::SenderState *>(pkt->senderState);
153 M5Port *port = senderState->port;
154 assert(port != NULL);
155
156 // pop the sender state from the packet
157 pkt->senderState = senderState->saved;
158 delete senderState;
159
160 port->sendTiming(pkt);
161
162 return true;
163}
164
165bool
166RubyPort::M5Port::recvTiming(PacketPtr pkt)
167{
168 DPRINTF(RubyPort,
169 "Timing access caught for address %#x\n", pkt->getAddr());
170
171 //dsm: based on SimpleTimingPort::recvTiming(pkt);
172
173 // The received packets should only be M5 requests, which should never
174 // get nacked. There used to be code to hanldle nacks here, but
175 // I'm pretty sure it didn't work correctly with the drain code,
176 // so that would need to be fixed if we ever added it back.
177 assert(pkt->isRequest());
178
179 if (pkt->memInhibitAsserted()) {
180 warn("memInhibitAsserted???");
181 // snooper will supply based on copy of packet
182 // still target's responsibility to delete packet
183 delete pkt;
184 return true;
185 }
186
187 // Save the port in the sender state object to be used later to
188 // route the response
189 pkt->senderState = new SenderState(this, pkt->senderState);
190
191 // Check for pio requests and directly send them to the dedicated
192 // pio port.
193 if (!isPhysMemAddress(pkt->getAddr())) {
194 assert(ruby_port->pio_port != NULL);
195 DPRINTF(RubyPort,
196 "Request for address 0x%#x is assumed to be a pio request\n",
197 pkt->getAddr());
198
199 return ruby_port->pio_port->sendTiming(pkt);
200 }
201
202 // For DMA and CPU requests, translate them to ruby requests before
203 // sending them to our assigned ruby port.
204 RubyRequestType type = RubyRequestType_NULL;
205
206 // If valid, copy the pc to the ruby request
207 Addr pc = 0;
208 if (pkt->req->hasPC()) {
209 pc = pkt->req->getPC();
210 }
211
212 if (pkt->isLLSC()) {
213 if (pkt->isWrite()) {
214 DPRINTF(RubyPort, "Issuing SC\n");
215 type = RubyRequestType_Store_Conditional;
216 } else {
217 DPRINTF(RubyPort, "Issuing LL\n");
218 assert(pkt->isRead());
219 type = RubyRequestType_Load_Linked;
220 }
221 } else if (pkt->req->isLocked()) {
222 if (pkt->isWrite()) {
223 DPRINTF(RubyPort, "Issuing Locked RMW Write\n");
224 type = RubyRequestType_Locked_RMW_Write;
225 } else {
226 DPRINTF(RubyPort, "Issuing Locked RMW Read\n");
227 assert(pkt->isRead());
228 type = RubyRequestType_Locked_RMW_Read;
229 }
230 } else {
231 if (pkt->isRead()) {
232 if (pkt->req->isInstFetch()) {
233 type = RubyRequestType_IFETCH;
234 } else {
235#if THE_ISA == X86_ISA
236 uint32_t flags = pkt->req->getFlags();
237 bool storeCheck = flags &
238 (TheISA::StoreCheck << TheISA::FlagShift);
239#else
240 bool storeCheck = false;
241#endif // X86_ISA
242 if (storeCheck) {
243 type = RubyRequestType_RMW_Read;
244 } else {
245 type = RubyRequestType_LD;
246 }
247 }
248 } else if (pkt->isWrite()) {
249 //
250 // Note: M5 packets do not differentiate ST from RMW_Write
251 //
252 type = RubyRequestType_ST;
253 } else if (pkt->isFlush()) {
254 type = RubyRequestType_FLUSH;
255 } else {
256 panic("Unsupported ruby packet type\n");
257 }
258 }
259
260 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(true),
261 pkt->getSize(), pc, type,
262 RubyAccessMode_Supervisor, pkt);
263
264 assert(ruby_request.m_PhysicalAddress.getOffset() + ruby_request.m_Size <=
265 RubySystem::getBlockSizeBytes());
266
267 // Submit the ruby request
268 RequestStatus requestStatus = ruby_port->makeRequest(ruby_request);
269
270 // If the request successfully issued then we should return true.
271 // Otherwise, we need to delete the senderStatus we just created and return
272 // false.
273 if (requestStatus == RequestStatus_Issued) {
274 DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr());
275 return true;
276 }
277
278 //
279 // Unless one is using the ruby tester, record the stalled M5 port for
280 // later retry when the sequencer becomes free.
281 //
282 if (!ruby_port->m_usingRubyTester) {
283 ruby_port->addToRetryList(this);
284 }
285
286 DPRINTF(RubyPort,
287 "Request for address %#x did not issue because %s\n",
288 pkt->getAddr(), RequestStatus_to_string(requestStatus));
289
290 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
291 pkt->senderState = senderState->saved;
292 delete senderState;
293 return false;
294}
295
296bool
297RubyPort::M5Port::doFunctionalRead(PacketPtr pkt)
298{
299 Address address(pkt->getAddr());
300 Address line_address(address);
301 line_address.makeLineAddress();
302
303 AccessPermission access_perm = AccessPermission_NotPresent;
304 int num_controllers = ruby_system->m_abs_cntrl_vec.size();
305
306 DPRINTF(RubyPort, "Functional Read request for %s\n",address);
307
308 unsigned int num_ro = 0;
309 unsigned int num_rw = 0;
310 unsigned int num_busy = 0;
311 unsigned int num_backing_store = 0;
312 unsigned int num_invalid = 0;
313
314 // In this loop we count the number of controllers that have the given
315 // address in read only, read write and busy states.
316 for (int i = 0; i < num_controllers; ++i) {
317 access_perm = ruby_system->m_abs_cntrl_vec[i]->
318 getAccessPermission(line_address);
319 if (access_perm == AccessPermission_Read_Only)
320 num_ro++;
321 else if (access_perm == AccessPermission_Read_Write)
322 num_rw++;
323 else if (access_perm == AccessPermission_Busy)
324 num_busy++;
325 else if (access_perm == AccessPermission_Backing_Store)
326 // See RubySlicc_Exports.sm for details, but Backing_Store is meant
327 // to represent blocks in memory *for Broadcast/Snooping protocols*,
328 // where memory has no idea whether it has an exclusive copy of data
329 // or not.
330 num_backing_store++;
331 else if (access_perm == AccessPermission_Invalid ||
332 access_perm == AccessPermission_NotPresent)
333 num_invalid++;
334 }
335 assert(num_rw <= 1);
336
337 uint8* data = pkt->getPtr<uint8_t>(true);
338 unsigned int size_in_bytes = pkt->getSize();
339 unsigned startByte = address.getAddress() - line_address.getAddress();
340
341 // This if case is meant to capture what happens in a Broadcast/Snoop
342 // protocol where the block does not exist in the cache hierarchy. You
343 // only want to read from the Backing_Store memory if there is no copy in
344 // the cache hierarchy, otherwise you want to try to read the RO or RW
345 // copies existing in the cache hierarchy (covered by the else statement).
346 // The reason is because the Backing_Store memory could easily be stale, if
347 // there are copies floating around the cache hierarchy, so you want to read
348 // it only if it's not in the cache hierarchy at all.
349 if (num_invalid == (num_controllers - 1) &&
350 num_backing_store == 1)
351 {
352 DPRINTF(RubyPort, "only copy in Backing_Store memory, read from it\n");
353 for (int i = 0; i < num_controllers; ++i) {
354 access_perm = ruby_system->m_abs_cntrl_vec[i]
355 ->getAccessPermission(line_address);
356 if (access_perm == AccessPermission_Backing_Store) {
357 DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
358 ->getDataBlock(line_address);
359
360 DPRINTF(RubyPort, "reading from %s block %s\n",
361 ruby_system->m_abs_cntrl_vec[i]->name(), block);
362 for (unsigned i = 0; i < size_in_bytes; ++i) {
363 data[i] = block.getByte(i + startByte);
364 }
365 return true;
366 }
367 }
368 } else {
369 // In Broadcast/Snoop protocols, this covers if you know the block
370 // exists somewhere in the caching hierarchy, then you want to read any
371 // valid RO or RW block. In directory protocols, same thing, you want
372 // to read any valid readable copy of the block.
373 DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n",
374 num_busy, num_ro, num_rw);
375 // In this loop, we try to figure which controller has a read only or
376 // a read write copy of the given address. Any valid copy would suffice
377 // for a functional read.
378 for(int i = 0;i < num_controllers;++i) {
379 access_perm = ruby_system->m_abs_cntrl_vec[i]
380 ->getAccessPermission(line_address);
381 if(access_perm == AccessPermission_Read_Only ||
382 access_perm == AccessPermission_Read_Write)
383 {
384 DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
385 ->getDataBlock(line_address);
386
387 DPRINTF(RubyPort, "reading from %s block %s\n",
388 ruby_system->m_abs_cntrl_vec[i]->name(), block);
389 for (unsigned i = 0; i < size_in_bytes; ++i) {
390 data[i] = block.getByte(i + startByte);
391 }
392 return true;
393 }
394 }
395 }
396 return false;
397}
398
399bool
400RubyPort::M5Port::doFunctionalWrite(PacketPtr pkt)
401{
402 Address addr(pkt->getAddr());
403 Address line_addr = line_address(addr);
404 AccessPermission access_perm = AccessPermission_NotPresent;
405 int num_controllers = ruby_system->m_abs_cntrl_vec.size();
406
407 DPRINTF(RubyPort, "Functional Write request for %s\n",addr);
408
409 unsigned int num_ro = 0;
410 unsigned int num_rw = 0;
411 unsigned int num_busy = 0;
412 unsigned int num_backing_store = 0;
413 unsigned int num_invalid = 0;
414
415 // In this loop we count the number of controllers that have the given
416 // address in read only, read write and busy states.
417 for(int i = 0;i < num_controllers;++i) {
418 access_perm = ruby_system->m_abs_cntrl_vec[i]->
419 getAccessPermission(line_addr);
420 if (access_perm == AccessPermission_Read_Only)
421 num_ro++;
422 else if (access_perm == AccessPermission_Read_Write)
423 num_rw++;
424 else if (access_perm == AccessPermission_Busy)
425 num_busy++;
426 else if (access_perm == AccessPermission_Backing_Store)
427 // See RubySlicc_Exports.sm for details, but Backing_Store is meant
428 // to represent blocks in memory *for Broadcast/Snooping protocols*,
429 // where memory has no idea whether it has an exclusive copy of data
430 // or not.
431 num_backing_store++;
432 else if (access_perm == AccessPermission_Invalid ||
433 access_perm == AccessPermission_NotPresent)
434 num_invalid++;
435 }
436
437 // If the number of read write copies is more than 1, then there is bug in
438 // coherence protocol. Otherwise, if all copies are in stable states, i.e.
439 // num_busy == 0, we update all the copies. If there is at least one copy
440 // in busy state, then we check if there is read write copy. If yes, then
441 // also we let the access go through. Or, if there is no copy in the cache
442 // hierarchy at all, we still want to do the write to the memory
443 // (Backing_Store) instead of failing.
444
445 DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n",
446 num_busy, num_ro, num_rw);
447 assert(num_rw <= 1);
448
449 uint8* data = pkt->getPtr<uint8_t>(true);
450 unsigned int size_in_bytes = pkt->getSize();
451 unsigned startByte = addr.getAddress() - line_addr.getAddress();
452
453 if ((num_busy == 0 && num_ro > 0) || num_rw == 1 ||
454 (num_invalid == (num_controllers - 1) && num_backing_store == 1))
455 {
456 for(int i = 0; i < num_controllers;++i) {
457 access_perm = ruby_system->m_abs_cntrl_vec[i]->
458 getAccessPermission(line_addr);
459 if(access_perm == AccessPermission_Read_Only ||
460 access_perm == AccessPermission_Read_Write||
461 access_perm == AccessPermission_Maybe_Stale ||
462 access_perm == AccessPermission_Backing_Store)
463 {
464 DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
465 ->getDataBlock(line_addr);
466
467 DPRINTF(RubyPort, "%s\n",block);
468 for (unsigned i = 0; i < size_in_bytes; ++i) {
469 block.setByte(i + startByte, data[i]);
470 }
471 DPRINTF(RubyPort, "%s\n",block);
472 }
473 }
474 return true;
475 }
476 return false;
477}
478
479void
480RubyPort::M5Port::recvFunctional(PacketPtr pkt)
481{
482 DPRINTF(RubyPort, "Functional access caught for address %#x\n",
483 pkt->getAddr());
484
485 // Check for pio requests and directly send them to the dedicated
486 // pio port.
487 if (!isPhysMemAddress(pkt->getAddr())) {
488 assert(ruby_port->pio_port != NULL);
489 DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n",
490 pkt->getAddr());
491 panic("RubyPort::PioPort::recvFunctional() not implemented!\n");
492 }
493
494 assert(pkt->getAddr() + pkt->getSize() <=
495 line_address(Address(pkt->getAddr())).getAddress() +
496 RubySystem::getBlockSizeBytes());
497
498 bool accessSucceeded = false;
499 bool needsResponse = pkt->needsResponse();
500
501 // Do the functional access on ruby memory
502 if (pkt->isRead()) {
503 accessSucceeded = doFunctionalRead(pkt);
504 } else if (pkt->isWrite()) {
505 accessSucceeded = doFunctionalWrite(pkt);
506 } else {
507 panic("RubyPort: unsupported functional command %s\n",
508 pkt->cmdString());
509 }
510
511 // Unless the requester explicitly said otherwise, generate an error if
512 // the functional request failed
513 if (!accessSucceeded && !pkt->suppressFuncError()) {
514 fatal("Ruby functional %s failed for address %#x\n",
515 pkt->isWrite() ? "write" : "read", pkt->getAddr());
516 }
517
518 if (access_phys_mem) {
519 // The attached physmem contains the official version of data.
520 // The following command performs the real functional access.
521 // This line should be removed once Ruby supplies the official version
522 // of data.
523 ruby_port->physMemPort->sendFunctional(pkt);
524 }
525
526 // turn packet around to go back to requester if response expected
527 if (needsResponse) {
528 pkt->setFunctionalResponseStatus(accessSucceeded);
529 DPRINTF(RubyPort, "Sending packet back over port\n");
530 sendFunctional(pkt);
531 }
532 DPRINTF(RubyPort, "Functional access %s!\n",
533 accessSucceeded ? "successful":"failed");
534}
535
536void
537RubyPort::ruby_hit_callback(PacketPtr pkt)
538{
539 // Retrieve the request port from the sender State
540 RubyPort::SenderState *senderState =
541 safe_cast<RubyPort::SenderState *>(pkt->senderState);
542 M5Port *port = senderState->port;
543 assert(port != NULL);
544
545 // pop the sender state from the packet
546 pkt->senderState = senderState->saved;
547 delete senderState;
548
549 port->hitCallback(pkt);
550
551 //
552 // If we had to stall the M5Ports, wake them up because the sequencer
553 // likely has free resources now.
554 //
555 if (waitingOnSequencer) {
556 //
557 // Record the current list of ports to retry on a temporary list before
558 // calling sendRetry on those ports. sendRetry will cause an
559 // immediate retry, which may result in the ports being put back on the
560 // list. Therefore we want to clear the retryList before calling
561 // sendRetry.
562 //
563 std::list<M5Port*> curRetryList(retryList);
564
565 retryList.clear();
566 waitingOnSequencer = false;
567
568 for (std::list<M5Port*>::iterator i = curRetryList.begin();
569 i != curRetryList.end(); ++i) {
570 DPRINTF(RubyPort,
571 "Sequencer may now be free. SendRetry to port %s\n",
572 (*i)->name());
573 (*i)->onRetryList(false);
574 (*i)->sendRetry();
575 }
576 }
577}
578
579void
580RubyPort::M5Port::hitCallback(PacketPtr pkt)
581{
582 bool needsResponse = pkt->needsResponse();
583
584 //
585 // Unless specified at configuraiton, all responses except failed SC
586 // and Flush operations access M5 physical memory.
587 //
588 bool accessPhysMem = access_phys_mem;
589
590 if (pkt->isLLSC()) {
591 if (pkt->isWrite()) {
592 if (pkt->req->getExtraData() != 0) {
593 //
594 // Successful SC packets convert to normal writes
595 //
596 pkt->convertScToWrite();
597 } else {
598 //
599 // Failed SC packets don't access physical memory and thus
600 // the RubyPort itself must convert it to a response.
601 //
602 accessPhysMem = false;
603 }
604 } else {
605 //
606 // All LL packets convert to normal loads so that M5 PhysMem does
607 // not lock the blocks.
608 //
609 pkt->convertLlToRead();
610 }
611 }
612
613 //
614 // Flush requests don't access physical memory
615 //
616 if (pkt->isFlush()) {
617 accessPhysMem = false;
618 }
619
620 DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
621
622 if (accessPhysMem) {
623 ruby_port->physMemPort->sendAtomic(pkt);
624 } else if (needsResponse) {
625 pkt->makeResponse();
626 }
627
628 // turn packet around to go back to requester if response expected
629 if (needsResponse) {
630 DPRINTF(RubyPort, "Sending packet back over port\n");
631 sendTiming(pkt);
632 } else {
633 delete pkt;
634 }
635 DPRINTF(RubyPort, "Hit callback done!\n");
636}
637
638bool
639RubyPort::M5Port::sendTiming(PacketPtr pkt)
640{
641 //minimum latency, must be > 0
642 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
643 return true;
644}
645
646bool
647RubyPort::PioPort::sendTiming(PacketPtr pkt)
648{
649 //minimum latency, must be > 0
650 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
651 return true;
652}
653
654bool
655RubyPort::M5Port::isPhysMemAddress(Addr addr)
656{
657 AddrRangeList physMemAddrList;
658 bool snoop = false;
659 ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop);
660 for (AddrRangeIter iter = physMemAddrList.begin();
661 iter != physMemAddrList.end();
662 iter++) {
663 if (addr >= iter->start && addr <= iter->end) {
664 DPRINTF(RubyPort, "Request found in %#llx - %#llx range\n",
665 iter->start, iter->end);
666 return true;
667 }
668 }
669 return false;
670}
671
672unsigned
673RubyPort::M5Port::deviceBlockSize() const
674{
675 return (unsigned) RubySystem::getBlockSizeBytes();
676}