RubyPort.cc (6882:898047a3672c) RubyPort.cc (6893:9cdf9b65d946)
1
2/*
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/physical.hh"
31#include "mem/ruby/system/RubyPort.hh"
32#include "mem/ruby/slicc_interface/AbstractController.hh"
33
34uint16_t RubyPort::m_num_ports = 0;
35
36RubyPort::RequestMap RubyPort::pending_cpu_requests;
37
38RubyPort::RubyPort(const Params *p)
1
2/*
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/physical.hh"
31#include "mem/ruby/system/RubyPort.hh"
32#include "mem/ruby/slicc_interface/AbstractController.hh"
33
34uint16_t RubyPort::m_num_ports = 0;
35
36RubyPort::RequestMap RubyPort::pending_cpu_requests;
37
38RubyPort::RubyPort(const Params *p)
39 : MemObject(p),
40 funcMemPort(csprintf("%s-funcmem_port", name()), this)
39 : MemObject(p)
41{
42 m_version = p->version;
43 assert(m_version != -1);
44
40{
41 m_version = p->version;
42 assert(m_version != -1);
43
44 physmem = p->physmem;
45
45 m_controller = NULL;
46 m_mandatory_q_ptr = NULL;
47
48 m_port_id = m_num_ports++;
49 m_request_cnt = 0;
50 m_hit_callback = ruby_hit_callback;
51 pio_port = NULL;
46 m_controller = NULL;
47 m_mandatory_q_ptr = NULL;
48
49 m_port_id = m_num_ports++;
50 m_request_cnt = 0;
51 m_hit_callback = ruby_hit_callback;
52 pio_port = NULL;
53 physMemPort = NULL;
52 assert(m_num_ports <= 2048); // see below for reason
53}
54
55void RubyPort::init()
56{
57 assert(m_controller != NULL);
58 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
59}
60
61Port *
62RubyPort::getPort(const std::string &if_name, int idx)
63{
64 if (if_name == "port") {
65 return new M5Port(csprintf("%s-port%d", name(), idx), this);
66 } else if (if_name == "pio_port") {
67 //
68 // ensure there is only one pio port
69 //
70 assert(pio_port == NULL);
71
72 pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx),
73 this);
74
75 return pio_port;
54 assert(m_num_ports <= 2048); // see below for reason
55}
56
57void RubyPort::init()
58{
59 assert(m_controller != NULL);
60 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
61}
62
63Port *
64RubyPort::getPort(const std::string &if_name, int idx)
65{
66 if (if_name == "port") {
67 return new M5Port(csprintf("%s-port%d", name(), idx), this);
68 } else if (if_name == "pio_port") {
69 //
70 // ensure there is only one pio port
71 //
72 assert(pio_port == NULL);
73
74 pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx),
75 this);
76
77 return pio_port;
76 } else if (if_name == "funcmem_port") {
77 return &funcMemPort;
78 } else if (if_name == "physMemPort") {
79 //
80 // RubyPort should only have one port to physical memory
81 //
82 assert (physMemPort == NULL);
83
84 physMemPort = new M5Port(csprintf("%s-physMemPort", name()),
85 this);
86
87 return physMemPort;
88 } else if (if_name == "functional") {
89 //
90 // Calls for the functional port only want to access functional memory.
91 // Therefore, directly pass these calls ports to physmem.
92 //
93 assert(physmem != NULL);
94 return physmem->getPort(if_name, idx);
78 }
79 return NULL;
80}
81
82RubyPort::PioPort::PioPort(const std::string &_name,
83 RubyPort *_port)
84 : SimpleTimingPort(_name, _port)
85{
86 DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
87 ruby_port = _port;
88}
89
90RubyPort::M5Port::M5Port(const std::string &_name,
91 RubyPort *_port)
92 : SimpleTimingPort(_name, _port)
93{
94 DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
95 ruby_port = _port;
96}
97
98Tick
99RubyPort::PioPort::recvAtomic(PacketPtr pkt)
100{
101 panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
102 return 0;
103}
104
105
106Tick
107RubyPort::M5Port::recvAtomic(PacketPtr pkt)
108{
109 panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
110 return 0;
111}
112
113
114bool
115RubyPort::PioPort::recvTiming(PacketPtr pkt)
116{
117 //
118 // In FS mode, ruby memory will receive pio responses from devices and
119 // it must forward these responses back to the particular CPU.
120 //
121 DPRINTF(MemoryAccess,
122 "Pio response for address %#x\n",
123 pkt->getAddr());
124
125 assert(pkt->isResponse());
126
127 //
128 // First we must retrieve the request port from the sender State
129 //
130 RubyPort::SenderState *senderState =
131 safe_cast<RubyPort::SenderState *>(pkt->senderState);
132 M5Port *port = senderState->port;
133 assert(port != NULL);
134
135 // pop the sender state from the packet
136 pkt->senderState = senderState->saved;
137 delete senderState;
138
139 port->sendTiming(pkt);
140
141 return true;
142}
143
144bool
145RubyPort::M5Port::recvTiming(PacketPtr pkt)
146{
147 DPRINTF(MemoryAccess,
148 "Timing access caught for address %#x\n",
149 pkt->getAddr());
150
151 //dsm: based on SimpleTimingPort::recvTiming(pkt);
152
153 //
154 // After checking for pio responses, the remainder of packets
155 // received by ruby should only be M5 requests, which should never
156 // get nacked. There used to be code to hanldle nacks here, but
157 // I'm pretty sure it didn't work correctly with the drain code,
158 // so that would need to be fixed if we ever added it back.
159 //
160 assert(pkt->isRequest());
161
162 if (pkt->memInhibitAsserted()) {
163 warn("memInhibitAsserted???");
164 // snooper will supply based on copy of packet
165 // still target's responsibility to delete packet
166 delete pkt;
167 return true;
168 }
169
170 //
171 // Check for pio requests and directly send them to the dedicated
172 // pio port.
173 //
174 if (!isPhysMemAddress(pkt->getAddr())) {
175 assert(ruby_port->pio_port != NULL);
176
177 //
178 // Save the port in the sender state object to be used later to
179 // route the response
180 //
181 pkt->senderState = new SenderState(this, pkt->senderState);
182
183 return ruby_port->pio_port->sendTiming(pkt);
184 }
185
186 //
187 // For DMA and CPU requests, translate them to ruby requests before
188 // sending them to our assigned ruby port.
189 //
190 RubyRequestType type = RubyRequestType_NULL;
191 Addr pc = 0;
192 if (pkt->isRead()) {
193 if (pkt->req->isInstFetch()) {
194 type = RubyRequestType_IFETCH;
195 pc = pkt->req->getPC();
196 } else {
197 type = RubyRequestType_LD;
198 }
199 } else if (pkt->isWrite()) {
200 type = RubyRequestType_ST;
201 } else if (pkt->isReadWrite()) {
202 type = RubyRequestType_RMW_Write;
203 }
204
205 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
206 pkt->getSize(), pc, type,
207 RubyAccessMode_Supervisor);
208
209 // Submit the ruby request
210 int64_t req_id = ruby_port->makeRequest(ruby_request);
211 if (req_id == -1) {
212 return false;
213 }
214
215 // Save the request for the callback
216 RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this);
217
218 return true;
219}
220
221void
222RubyPort::ruby_hit_callback(int64_t req_id)
223{
224 //
225 // Note: This single fuction can be called by cpu and dma ports,
226 // as well as the functional port.
227 //
228 RequestMap::iterator i = pending_cpu_requests.find(req_id);
229 if (i == pending_cpu_requests.end())
230 panic("could not find pending request %d\n", req_id);
231
232 RequestCookie *cookie = i->second;
233 pending_cpu_requests.erase(i);
234
235 Packet *pkt = cookie->pkt;
236 M5Port *port = cookie->m5Port;
237 delete cookie;
238
239 port->hitCallback(pkt);
240}
241
242void
243RubyPort::M5Port::hitCallback(PacketPtr pkt)
244{
245
246 bool needsResponse = pkt->needsResponse();
247
248 DPRINTF(MemoryAccess, "Hit callback needs response %d\n",
249 needsResponse);
250
95 }
96 return NULL;
97}
98
99RubyPort::PioPort::PioPort(const std::string &_name,
100 RubyPort *_port)
101 : SimpleTimingPort(_name, _port)
102{
103 DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
104 ruby_port = _port;
105}
106
107RubyPort::M5Port::M5Port(const std::string &_name,
108 RubyPort *_port)
109 : SimpleTimingPort(_name, _port)
110{
111 DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
112 ruby_port = _port;
113}
114
115Tick
116RubyPort::PioPort::recvAtomic(PacketPtr pkt)
117{
118 panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
119 return 0;
120}
121
122
123Tick
124RubyPort::M5Port::recvAtomic(PacketPtr pkt)
125{
126 panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
127 return 0;
128}
129
130
131bool
132RubyPort::PioPort::recvTiming(PacketPtr pkt)
133{
134 //
135 // In FS mode, ruby memory will receive pio responses from devices and
136 // it must forward these responses back to the particular CPU.
137 //
138 DPRINTF(MemoryAccess,
139 "Pio response for address %#x\n",
140 pkt->getAddr());
141
142 assert(pkt->isResponse());
143
144 //
145 // First we must retrieve the request port from the sender State
146 //
147 RubyPort::SenderState *senderState =
148 safe_cast<RubyPort::SenderState *>(pkt->senderState);
149 M5Port *port = senderState->port;
150 assert(port != NULL);
151
152 // pop the sender state from the packet
153 pkt->senderState = senderState->saved;
154 delete senderState;
155
156 port->sendTiming(pkt);
157
158 return true;
159}
160
161bool
162RubyPort::M5Port::recvTiming(PacketPtr pkt)
163{
164 DPRINTF(MemoryAccess,
165 "Timing access caught for address %#x\n",
166 pkt->getAddr());
167
168 //dsm: based on SimpleTimingPort::recvTiming(pkt);
169
170 //
171 // After checking for pio responses, the remainder of packets
172 // received by ruby should only be M5 requests, which should never
173 // get nacked. There used to be code to hanldle nacks here, but
174 // I'm pretty sure it didn't work correctly with the drain code,
175 // so that would need to be fixed if we ever added it back.
176 //
177 assert(pkt->isRequest());
178
179 if (pkt->memInhibitAsserted()) {
180 warn("memInhibitAsserted???");
181 // snooper will supply based on copy of packet
182 // still target's responsibility to delete packet
183 delete pkt;
184 return true;
185 }
186
187 //
188 // Check for pio requests and directly send them to the dedicated
189 // pio port.
190 //
191 if (!isPhysMemAddress(pkt->getAddr())) {
192 assert(ruby_port->pio_port != NULL);
193
194 //
195 // Save the port in the sender state object to be used later to
196 // route the response
197 //
198 pkt->senderState = new SenderState(this, pkt->senderState);
199
200 return ruby_port->pio_port->sendTiming(pkt);
201 }
202
203 //
204 // For DMA and CPU requests, translate them to ruby requests before
205 // sending them to our assigned ruby port.
206 //
207 RubyRequestType type = RubyRequestType_NULL;
208 Addr pc = 0;
209 if (pkt->isRead()) {
210 if (pkt->req->isInstFetch()) {
211 type = RubyRequestType_IFETCH;
212 pc = pkt->req->getPC();
213 } else {
214 type = RubyRequestType_LD;
215 }
216 } else if (pkt->isWrite()) {
217 type = RubyRequestType_ST;
218 } else if (pkt->isReadWrite()) {
219 type = RubyRequestType_RMW_Write;
220 }
221
222 RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
223 pkt->getSize(), pc, type,
224 RubyAccessMode_Supervisor);
225
226 // Submit the ruby request
227 int64_t req_id = ruby_port->makeRequest(ruby_request);
228 if (req_id == -1) {
229 return false;
230 }
231
232 // Save the request for the callback
233 RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this);
234
235 return true;
236}
237
238void
239RubyPort::ruby_hit_callback(int64_t req_id)
240{
241 //
242 // Note: This single fuction can be called by cpu and dma ports,
243 // as well as the functional port.
244 //
245 RequestMap::iterator i = pending_cpu_requests.find(req_id);
246 if (i == pending_cpu_requests.end())
247 panic("could not find pending request %d\n", req_id);
248
249 RequestCookie *cookie = i->second;
250 pending_cpu_requests.erase(i);
251
252 Packet *pkt = cookie->pkt;
253 M5Port *port = cookie->m5Port;
254 delete cookie;
255
256 port->hitCallback(pkt);
257}
258
259void
260RubyPort::M5Port::hitCallback(PacketPtr pkt)
261{
262
263 bool needsResponse = pkt->needsResponse();
264
265 DPRINTF(MemoryAccess, "Hit callback needs response %d\n",
266 needsResponse);
267
251 ruby_port->funcMemPort.sendFunctional(pkt);
268 ruby_port->physMemPort->sendAtomic(pkt);
252
253 // turn packet around to go back to requester if response expected
254 if (needsResponse) {
269
270 // turn packet around to go back to requester if response expected
271 if (needsResponse) {
255 // recvAtomic() should already have turned packet into
272 // sendAtomic() should already have turned packet into
256 // atomic response
257 assert(pkt->isResponse());
258 DPRINTF(MemoryAccess, "Sending packet back over port\n");
259 sendTiming(pkt);
260 } else {
261 delete pkt;
262 }
263 DPRINTF(MemoryAccess, "Hit callback done!\n");
264}
265
266bool
267RubyPort::M5Port::sendTiming(PacketPtr pkt)
268{
269 schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
270 return true;
271}
272
273bool
274RubyPort::PioPort::sendTiming(PacketPtr pkt)
275{
276 schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
277 return true;
278}
279
280bool
281RubyPort::M5Port::isPhysMemAddress(Addr addr)
282{
283 AddrRangeList physMemAddrList;
284 bool snoop = false;
273 // atomic response
274 assert(pkt->isResponse());
275 DPRINTF(MemoryAccess, "Sending packet back over port\n");
276 sendTiming(pkt);
277 } else {
278 delete pkt;
279 }
280 DPRINTF(MemoryAccess, "Hit callback done!\n");
281}
282
283bool
284RubyPort::M5Port::sendTiming(PacketPtr pkt)
285{
286 schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
287 return true;
288}
289
290bool
291RubyPort::PioPort::sendTiming(PacketPtr pkt)
292{
293 schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
294 return true;
295}
296
297bool
298RubyPort::M5Port::isPhysMemAddress(Addr addr)
299{
300 AddrRangeList physMemAddrList;
301 bool snoop = false;
285 ruby_port->funcMemPort.getPeerAddressRanges(physMemAddrList, snoop);
302 ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop);
286 for(AddrRangeIter iter = physMemAddrList.begin();
287 iter != physMemAddrList.end();
288 iter++) {
289 if (addr >= iter->start && addr <= iter->end) {
290 DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
291 iter->start, iter->end);
292 return true;
293 }
294 }
303 for(AddrRangeIter iter = physMemAddrList.begin();
304 iter != physMemAddrList.end();
305 iter++) {
306 if (addr >= iter->start && addr <= iter->end) {
307 DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
308 iter->start, iter->end);
309 return true;
310 }
311 }
295 assert(isPioAddress(addr));
296 return false;
297}
312 return false;
313}
298
299bool
300RubyPort::M5Port::isPioAddress(Addr addr)
301{
302 AddrRangeList pioAddrList;
303 bool snoop = false;
304 if (ruby_port->pio_port == NULL) {
305 return false;
306 }
307
308 ruby_port->pio_port->getPeerAddressRanges(pioAddrList, snoop);
309 for(AddrRangeIter iter = pioAddrList.begin();
310 iter != pioAddrList.end();
311 iter++) {
312 if (addr >= iter->start && addr <= iter->end) {
313 DPRINTF(MemoryAccess, "Pio request found in %#llx - %#llx range\n",
314 iter->start, iter->end);
315 return true;
316 }
317 }
318 return false;
319}
320