RubyPort.cc (7910:8a92b39be50e) RubyPort.cc (7915:bc39c93a5519)
1/*
2 * Copyright (c) 2009 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "config/the_isa.hh"
30#if THE_ISA == X86_ISA
31#include "arch/x86/insts/microldstop.hh"
32#endif // X86_ISA
33#include "cpu/testers/rubytest/RubyTester.hh"
34#include "mem/physical.hh"
35#include "mem/ruby/slicc_interface/AbstractController.hh"
36#include "mem/ruby/system/RubyPort.hh"
37
38RubyPort::RubyPort(const Params *p)
39 : MemObject(p)
40{
41 m_version = p->version;
42 assert(m_version != -1);
43
44 physmem = p->physmem;
45
46 m_controller = NULL;
47 m_mandatory_q_ptr = NULL;
48
49 m_request_cnt = 0;
50 pio_port = NULL;
51 physMemPort = NULL;
52
53 m_usingRubyTester = p->using_ruby_tester;
1/*
2 * Copyright (c) 2009 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "config/the_isa.hh"
30#if THE_ISA == X86_ISA
31#include "arch/x86/insts/microldstop.hh"
32#endif // X86_ISA
33#include "cpu/testers/rubytest/RubyTester.hh"
34#include "mem/physical.hh"
35#include "mem/ruby/slicc_interface/AbstractController.hh"
36#include "mem/ruby/system/RubyPort.hh"
37
38RubyPort::RubyPort(const Params *p)
39 : MemObject(p)
40{
41 m_version = p->version;
42 assert(m_version != -1);
43
44 physmem = p->physmem;
45
46 m_controller = NULL;
47 m_mandatory_q_ptr = NULL;
48
49 m_request_cnt = 0;
50 pio_port = NULL;
51 physMemPort = NULL;
52
53 m_usingRubyTester = p->using_ruby_tester;
54 access_phys_mem = p->access_phys_mem;
54}
55
56void
57RubyPort::init()
58{
59 assert(m_controller != NULL);
60 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
61}
62
63Port *
64RubyPort::getPort(const std::string &if_name, int idx)
65{
66 if (if_name == "port") {
55}
56
57void
58RubyPort::init()
59{
60 assert(m_controller != NULL);
61 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
62}
63
64Port *
65RubyPort::getPort(const std::string &if_name, int idx)
66{
67 if (if_name == "port") {
67 return new M5Port(csprintf("%s-port%d", name(), idx), this);
68 return new M5Port(csprintf("%s-port%d", name(), idx), this,
69 access_phys_mem);
68 }
69
70 if (if_name == "pio_port") {
71 // ensure there is only one pio port
72 assert(pio_port == NULL);
73
74 pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this);
75
76 return pio_port;
77 }
78
79 if (if_name == "physMemPort") {
80 // RubyPort should only have one port to physical memory
81 assert (physMemPort == NULL);
82
70 }
71
72 if (if_name == "pio_port") {
73 // ensure there is only one pio port
74 assert(pio_port == NULL);
75
76 pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this);
77
78 return pio_port;
79 }
80
81 if (if_name == "physMemPort") {
82 // RubyPort should only have one port to physical memory
83 assert (physMemPort == NULL);
84
83 physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this);
85 physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this,
86 access_phys_mem);
84
85 return physMemPort;
86 }
87
88 if (if_name == "functional") {
89 // Calls for the functional port only want to access
90 // functional memory. Therefore, directly pass these calls
91 // ports to physmem.
92 assert(physmem != NULL);
93 return physmem->getPort(if_name, idx);
94 }
95
96 return NULL;
97}
98
99RubyPort::PioPort::PioPort(const std::string &_name,
100 RubyPort *_port)
101 : SimpleTimingPort(_name, _port)
102{
103 DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
104 ruby_port = _port;
105}
106
107RubyPort::M5Port::M5Port(const std::string &_name,
87
88 return physMemPort;
89 }
90
91 if (if_name == "functional") {
92 // Calls for the functional port only want to access
93 // functional memory. Therefore, directly pass these calls
94 // ports to physmem.
95 assert(physmem != NULL);
96 return physmem->getPort(if_name, idx);
97 }
98
99 return NULL;
100}
101
102RubyPort::PioPort::PioPort(const std::string &_name,
103 RubyPort *_port)
104 : SimpleTimingPort(_name, _port)
105{
106 DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
107 ruby_port = _port;
108}
109
110RubyPort::M5Port::M5Port(const std::string &_name,
108 RubyPort *_port)
111 RubyPort *_port, bool _access_phys_mem)
109 : SimpleTimingPort(_name, _port)
110{
111 DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
112 ruby_port = _port;
113 _onRetryList = false;
112 : SimpleTimingPort(_name, _port)
113{
114 DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
115 ruby_port = _port;
116 _onRetryList = false;
117 access_phys_mem = _access_phys_mem;
114}
115
116Tick
117RubyPort::PioPort::recvAtomic(PacketPtr pkt)
118{
119 panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
120 return 0;
121}
122
123Tick
124RubyPort::M5Port::recvAtomic(PacketPtr pkt)
125{
126 panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
127 return 0;
128}
129
130
131bool
132RubyPort::PioPort::recvTiming(PacketPtr pkt)
133{
134 // In FS mode, ruby memory will receive pio responses from devices
135 // and it must forward these responses back to the particular CPU.
136 DPRINTF(MemoryAccess, "Pio response for address %#x\n", pkt->getAddr());
137
138 assert(pkt->isResponse());
139
140 // First we must retrieve the request port from the sender State
141 RubyPort::SenderState *senderState =
142 safe_cast<RubyPort::SenderState *>(pkt->senderState);
143 M5Port *port = senderState->port;
144 assert(port != NULL);
145
146 // pop the sender state from the packet
147 pkt->senderState = senderState->saved;
148 delete senderState;
149
150 port->sendTiming(pkt);
151
152 return true;
153}
154
155bool
156RubyPort::M5Port::recvTiming(PacketPtr pkt)
157{
158 DPRINTF(MemoryAccess,
159 "Timing access caught for address %#x\n", pkt->getAddr());
160
161 //dsm: based on SimpleTimingPort::recvTiming(pkt);
162
163 // The received packets should only be M5 requests, which should never
164 // get nacked. There used to be code to hanldle nacks here, but
165 // I'm pretty sure it didn't work correctly with the drain code,
166 // so that would need to be fixed if we ever added it back.
167 assert(pkt->isRequest());
168
169 if (pkt->memInhibitAsserted()) {
170 warn("memInhibitAsserted???");
171 // snooper will supply based on copy of packet
172 // still target's responsibility to delete packet
173 delete pkt;
174 return true;
175 }
176
177 // Save the port in the sender state object to be used later to
178 // route the response
179 pkt->senderState = new SenderState(this, pkt->senderState);
180
181 // Check for pio requests and directly send them to the dedicated
182 // pio port.
183 if (!isPhysMemAddress(pkt->getAddr())) {
184 assert(ruby_port->pio_port != NULL);
185 DPRINTF(MemoryAccess,
186 "Request for address 0x%#x is assumed to be a pio request\n",
187 pkt->getAddr());
188
189 return ruby_port->pio_port->sendTiming(pkt);
190 }
191
192 // For DMA and CPU requests, translate them to ruby requests before
193 // sending them to our assigned ruby port.
194 RubyRequestType type = RubyRequestType_NULL;
195
196 // If valid, copy the pc to the ruby request
197 Addr pc = 0;
198 if (pkt->req->hasPC()) {
199 pc = pkt->req->getPC();
200 }
201
202 if (pkt->isLLSC()) {
203 if (pkt->isWrite()) {
204 DPRINTF(MemoryAccess, "Issuing SC\n");
205 type = RubyRequestType_Store_Conditional;
206 } else {
207 DPRINTF(MemoryAccess, "Issuing LL\n");
208 assert(pkt->isRead());
209 type = RubyRequestType_Load_Linked;
210 }
211 } else if (pkt->req->isLocked()) {
212 if (pkt->isWrite()) {
213 DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n");
214 type = RubyRequestType_Locked_RMW_Write;
215 } else {
216 DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n");
217 assert(pkt->isRead());
218 type = RubyRequestType_Locked_RMW_Read;
219 }
220 } else {
221 if (pkt->isRead()) {
222 if (pkt->req->isInstFetch()) {
223 type = RubyRequestType_IFETCH;
224 } else {
225#if THE_ISA == X86_ISA
226 uint32_t flags = pkt->req->getFlags();
227 bool storeCheck = flags &
228 (TheISA::StoreCheck << TheISA::FlagShift);
229#else
230 bool storeCheck = false;
231#endif // X86_ISA
232 if (storeCheck) {
233 type = RubyRequestType_RMW_Read;
234 } else {
235 type = RubyRequestType_LD;
236 }
237 }
238 } else if (pkt->isWrite()) {
239 //
240 // Note: M5 packets do not differentiate ST from RMW_Write
241 //
242 type = RubyRequestType_ST;
243 } else {
244 panic("Unsupported ruby packet type\n");
245 }
246 }
247
118}
119
120Tick
121RubyPort::PioPort::recvAtomic(PacketPtr pkt)
122{
123 panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
124 return 0;
125}
126
127Tick
128RubyPort::M5Port::recvAtomic(PacketPtr pkt)
129{
130 panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
131 return 0;
132}
133
134
135bool
136RubyPort::PioPort::recvTiming(PacketPtr pkt)
137{
138 // In FS mode, ruby memory will receive pio responses from devices
139 // and it must forward these responses back to the particular CPU.
140 DPRINTF(MemoryAccess, "Pio response for address %#x\n", pkt->getAddr());
141
142 assert(pkt->isResponse());
143
144 // First we must retrieve the request port from the sender State
145 RubyPort::SenderState *senderState =
146 safe_cast<RubyPort::SenderState *>(pkt->senderState);
147 M5Port *port = senderState->port;
148 assert(port != NULL);
149
150 // pop the sender state from the packet
151 pkt->senderState = senderState->saved;
152 delete senderState;
153
154 port->sendTiming(pkt);
155
156 return true;
157}
158
159bool
160RubyPort::M5Port::recvTiming(PacketPtr pkt)
161{
162 DPRINTF(MemoryAccess,
163 "Timing access caught for address %#x\n", pkt->getAddr());
164
165 //dsm: based on SimpleTimingPort::recvTiming(pkt);
166
167 // The received packets should only be M5 requests, which should never
168 // get nacked. There used to be code to hanldle nacks here, but
169 // I'm pretty sure it didn't work correctly with the drain code,
170 // so that would need to be fixed if we ever added it back.
171 assert(pkt->isRequest());
172
173 if (pkt->memInhibitAsserted()) {
174 warn("memInhibitAsserted???");
175 // snooper will supply based on copy of packet
176 // still target's responsibility to delete packet
177 delete pkt;
178 return true;
179 }
180
181 // Save the port in the sender state object to be used later to
182 // route the response
183 pkt->senderState = new SenderState(this, pkt->senderState);
184
185 // Check for pio requests and directly send them to the dedicated
186 // pio port.
187 if (!isPhysMemAddress(pkt->getAddr())) {
188 assert(ruby_port->pio_port != NULL);
189 DPRINTF(MemoryAccess,
190 "Request for address 0x%#x is assumed to be a pio request\n",
191 pkt->getAddr());
192
193 return ruby_port->pio_port->sendTiming(pkt);
194 }
195
196 // For DMA and CPU requests, translate them to ruby requests before
197 // sending them to our assigned ruby port.
198 RubyRequestType type = RubyRequestType_NULL;
199
200 // If valid, copy the pc to the ruby request
201 Addr pc = 0;
202 if (pkt->req->hasPC()) {
203 pc = pkt->req->getPC();
204 }
205
206 if (pkt->isLLSC()) {
207 if (pkt->isWrite()) {
208 DPRINTF(MemoryAccess, "Issuing SC\n");
209 type = RubyRequestType_Store_Conditional;
210 } else {
211 DPRINTF(MemoryAccess, "Issuing LL\n");
212 assert(pkt->isRead());
213 type = RubyRequestType_Load_Linked;
214 }
215 } else if (pkt->req->isLocked()) {
216 if (pkt->isWrite()) {
217 DPRINTF(MemoryAccess, "Issuing Locked RMW Write\n");
218 type = RubyRequestType_Locked_RMW_Write;
219 } else {
220 DPRINTF(MemoryAccess, "Issuing Locked RMW Read\n");
221 assert(pkt->isRead());
222 type = RubyRequestType_Locked_RMW_Read;
223 }
224 } else {
225 if (pkt->isRead()) {
226 if (pkt->req->isInstFetch()) {
227 type = RubyRequestType_IFETCH;
228 } else {
229#if THE_ISA == X86_ISA
230 uint32_t flags = pkt->req->getFlags();
231 bool storeCheck = flags &
232 (TheISA::StoreCheck << TheISA::FlagShift);
233#else
234 bool storeCheck = false;
235#endif // X86_ISA
236 if (storeCheck) {
237 type = RubyRequestType_RMW_Read;
238 } else {
239 type = RubyRequestType_LD;
240 }
241 }
242 } else if (pkt->isWrite()) {
243 //
244 // Note: M5 packets do not differentiate ST from RMW_Write
245 //
246 type = RubyRequestType_ST;
247 } else {
248 panic("Unsupported ruby packet type\n");
249 }
250 }
251
248 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr(),
252 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(true),
249 pkt->getSize(), pc, type,
250 RubyAccessMode_Supervisor, pkt);
251
252 assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <=
253 RubySystem::getBlockSizeBytes());
254
255 // Submit the ruby request
256 RequestStatus requestStatus = ruby_port->makeRequest(ruby_request);
257
258 // If the request successfully issued then we should return true.
259 // Otherwise, we need to delete the senderStatus we just created and return
260 // false.
261 if (requestStatus == RequestStatus_Issued) {
262 DPRINTF(MemoryAccess, "Request %x issued\n", pkt->getAddr());
263 return true;
264 }
265
266 //
267 // Unless one is using the ruby tester, record the stalled M5 port for
268 // later retry when the sequencer becomes free.
269 //
270 if (!ruby_port->m_usingRubyTester) {
271 ruby_port->addToRetryList(this);
272 }
273
274 DPRINTF(MemoryAccess,
275 "Request for address %#x did not issue because %s\n",
276 pkt->getAddr(), RequestStatus_to_string(requestStatus));
277
278 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
279 pkt->senderState = senderState->saved;
280 delete senderState;
281 return false;
282}
283
284void
285RubyPort::ruby_hit_callback(PacketPtr pkt)
286{
287 // Retrieve the request port from the sender State
288 RubyPort::SenderState *senderState =
289 safe_cast<RubyPort::SenderState *>(pkt->senderState);
290 M5Port *port = senderState->port;
291 assert(port != NULL);
292
293 // pop the sender state from the packet
294 pkt->senderState = senderState->saved;
295 delete senderState;
296
297 port->hitCallback(pkt);
298
299 //
300 // If we had to stall the M5Ports, wake them up because the sequencer
301 // likely has free resources now.
302 //
303 if (waitingOnSequencer) {
304 for (std::list<M5Port*>::iterator i = retryList.begin();
305 i != retryList.end(); ++i) {
306 (*i)->sendRetry();
307 (*i)->onRetryList(false);
308 DPRINTF(MemoryAccess,
309 "Sequencer may now be free. SendRetry to port %s\n",
310 (*i)->name());
311 }
312 retryList.clear();
313 waitingOnSequencer = false;
314 }
315}
316
317void
318RubyPort::M5Port::hitCallback(PacketPtr pkt)
319{
320 bool needsResponse = pkt->needsResponse();
321
322 //
253 pkt->getSize(), pc, type,
254 RubyAccessMode_Supervisor, pkt);
255
256 assert(Address(ruby_request.paddr).getOffset() + ruby_request.len <=
257 RubySystem::getBlockSizeBytes());
258
259 // Submit the ruby request
260 RequestStatus requestStatus = ruby_port->makeRequest(ruby_request);
261
262 // If the request successfully issued then we should return true.
263 // Otherwise, we need to delete the senderStatus we just created and return
264 // false.
265 if (requestStatus == RequestStatus_Issued) {
266 DPRINTF(MemoryAccess, "Request %x issued\n", pkt->getAddr());
267 return true;
268 }
269
270 //
271 // Unless one is using the ruby tester, record the stalled M5 port for
272 // later retry when the sequencer becomes free.
273 //
274 if (!ruby_port->m_usingRubyTester) {
275 ruby_port->addToRetryList(this);
276 }
277
278 DPRINTF(MemoryAccess,
279 "Request for address %#x did not issue because %s\n",
280 pkt->getAddr(), RequestStatus_to_string(requestStatus));
281
282 SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
283 pkt->senderState = senderState->saved;
284 delete senderState;
285 return false;
286}
287
288void
289RubyPort::ruby_hit_callback(PacketPtr pkt)
290{
291 // Retrieve the request port from the sender State
292 RubyPort::SenderState *senderState =
293 safe_cast<RubyPort::SenderState *>(pkt->senderState);
294 M5Port *port = senderState->port;
295 assert(port != NULL);
296
297 // pop the sender state from the packet
298 pkt->senderState = senderState->saved;
299 delete senderState;
300
301 port->hitCallback(pkt);
302
303 //
304 // If we had to stall the M5Ports, wake them up because the sequencer
305 // likely has free resources now.
306 //
307 if (waitingOnSequencer) {
308 for (std::list<M5Port*>::iterator i = retryList.begin();
309 i != retryList.end(); ++i) {
310 (*i)->sendRetry();
311 (*i)->onRetryList(false);
312 DPRINTF(MemoryAccess,
313 "Sequencer may now be free. SendRetry to port %s\n",
314 (*i)->name());
315 }
316 retryList.clear();
317 waitingOnSequencer = false;
318 }
319}
320
321void
322RubyPort::M5Port::hitCallback(PacketPtr pkt)
323{
324 bool needsResponse = pkt->needsResponse();
325
326 //
323 // All responses except failed SC operations access M5 physical memory
327 // Unless specified at configuraiton, all responses except failed SC
328 // operations access M5 physical memory.
324 //
329 //
325 bool accessPhysMem = true;
330 bool accessPhysMem = access_phys_mem;
326
327 if (pkt->isLLSC()) {
328 if (pkt->isWrite()) {
329 if (pkt->req->getExtraData() != 0) {
330 //
331 // Successful SC packets convert to normal writes
332 //
333 pkt->convertScToWrite();
334 } else {
335 //
336 // Failed SC packets don't access physical memory and thus
337 // the RubyPort itself must convert it to a response.
338 //
339 accessPhysMem = false;
340 pkt->makeAtomicResponse();
341 }
342 } else {
343 //
344 // All LL packets convert to normal loads so that M5 PhysMem does
345 // not lock the blocks.
346 //
347 pkt->convertLlToRead();
348 }
349 }
350 DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse);
351
352 if (accessPhysMem) {
353 ruby_port->physMemPort->sendAtomic(pkt);
331
332 if (pkt->isLLSC()) {
333 if (pkt->isWrite()) {
334 if (pkt->req->getExtraData() != 0) {
335 //
336 // Successful SC packets convert to normal writes
337 //
338 pkt->convertScToWrite();
339 } else {
340 //
341 // Failed SC packets don't access physical memory and thus
342 // the RubyPort itself must convert it to a response.
343 //
344 accessPhysMem = false;
345 pkt->makeAtomicResponse();
346 }
347 } else {
348 //
349 // All LL packets convert to normal loads so that M5 PhysMem does
350 // not lock the blocks.
351 //
352 pkt->convertLlToRead();
353 }
354 }
355 DPRINTF(MemoryAccess, "Hit callback needs response %d\n", needsResponse);
356
357 if (accessPhysMem) {
358 ruby_port->physMemPort->sendAtomic(pkt);
359 } else {
360 pkt->makeResponse();
354 }
355
356 // turn packet around to go back to requester if response expected
357 if (needsResponse) {
361 }
362
363 // turn packet around to go back to requester if response expected
364 if (needsResponse) {
358 // sendAtomic() should already have turned packet into
359 // atomic response
360 assert(pkt->isResponse());
361 DPRINTF(MemoryAccess, "Sending packet back over port\n");
362 sendTiming(pkt);
363 } else {
364 delete pkt;
365 }
366 DPRINTF(MemoryAccess, "Hit callback done!\n");
367}
368
369bool
370RubyPort::M5Port::sendTiming(PacketPtr pkt)
371{
372 //minimum latency, must be > 0
373 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
374 return true;
375}
376
377bool
378RubyPort::PioPort::sendTiming(PacketPtr pkt)
379{
380 //minimum latency, must be > 0
381 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
382 return true;
383}
384
385bool
386RubyPort::M5Port::isPhysMemAddress(Addr addr)
387{
388 AddrRangeList physMemAddrList;
389 bool snoop = false;
390 ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop);
391 for (AddrRangeIter iter = physMemAddrList.begin();
392 iter != physMemAddrList.end();
393 iter++) {
394 if (addr >= iter->start && addr <= iter->end) {
395 DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
396 iter->start, iter->end);
397 return true;
398 }
399 }
400 return false;
401}
402
403unsigned
404RubyPort::M5Port::deviceBlockSize() const
405{
406 return (unsigned) RubySystem::getBlockSizeBytes();
407}
365 DPRINTF(MemoryAccess, "Sending packet back over port\n");
366 sendTiming(pkt);
367 } else {
368 delete pkt;
369 }
370 DPRINTF(MemoryAccess, "Hit callback done!\n");
371}
372
373bool
374RubyPort::M5Port::sendTiming(PacketPtr pkt)
375{
376 //minimum latency, must be > 0
377 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
378 return true;
379}
380
381bool
382RubyPort::PioPort::sendTiming(PacketPtr pkt)
383{
384 //minimum latency, must be > 0
385 schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
386 return true;
387}
388
389bool
390RubyPort::M5Port::isPhysMemAddress(Addr addr)
391{
392 AddrRangeList physMemAddrList;
393 bool snoop = false;
394 ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop);
395 for (AddrRangeIter iter = physMemAddrList.begin();
396 iter != physMemAddrList.end();
397 iter++) {
398 if (addr >= iter->start && addr <= iter->end) {
399 DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
400 iter->start, iter->end);
401 return true;
402 }
403 }
404 return false;
405}
406
407unsigned
408RubyPort::M5Port::deviceBlockSize() const
409{
410 return (unsigned) RubySystem::getBlockSizeBytes();
411}