AbstractController.cc (10986:4fbe4b0adb4d) AbstractController.cc (11021:e8a6637afa4c)
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/system/Sequencer.hh"
34#include "mem/ruby/system/System.hh"
35#include "sim/system.hh"
36
37AbstractController::AbstractController(const Params *p)
38 : MemObject(p), Consumer(this), m_version(p->version),
39 m_clusterID(p->cluster_id),
40 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
41 m_number_of_TBEs(p->number_of_TBEs),
42 m_transitions_per_cycle(p->transitions_per_cycle),
43 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/system/Sequencer.hh"
34#include "mem/ruby/system/System.hh"
35#include "sim/system.hh"
36
37AbstractController::AbstractController(const Params *p)
38 : MemObject(p), Consumer(this), m_version(p->version),
39 m_clusterID(p->cluster_id),
40 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
41 m_number_of_TBEs(p->number_of_TBEs),
42 m_transitions_per_cycle(p->transitions_per_cycle),
43 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
44 memoryPort(csprintf("%s.memory", name()), this, ""),
45 m_responseFromMemory_ptr(new MessageBuffer())
44 memoryPort(csprintf("%s.memory", name()), this, "")
46{
45{
47 // Set the sender pointer of the response message buffer from the
48 // memory controller.
49 // This pointer is used for querying for the current time.
50 m_responseFromMemory_ptr->setSender(this);
51 m_responseFromMemory_ptr->setReceiver(this);
52 m_responseFromMemory_ptr->setOrdering(false);
53
54 if (m_version == 0) {
55 // Combine the statistics from all controllers
56 // of this particular type.
57 Stats::registerDumpCallback(new StatsCallback(this));
58 }
59}
60
61void
62AbstractController::init()
63{
64 params()->ruby_system->registerAbstractController(this);
65 m_delayHistogram.init(10);
66 uint32_t size = Network::getNumberOfVirtualNetworks();
67 for (uint32_t i = 0; i < size; i++) {
68 m_delayVCHistogram.push_back(new Stats::Histogram());
69 m_delayVCHistogram[i]->init(10);
70 }
46 if (m_version == 0) {
47 // Combine the statistics from all controllers
48 // of this particular type.
49 Stats::registerDumpCallback(new StatsCallback(this));
50 }
51}
52
53void
54AbstractController::init()
55{
56 params()->ruby_system->registerAbstractController(this);
57 m_delayHistogram.init(10);
58 uint32_t size = Network::getNumberOfVirtualNetworks();
59 for (uint32_t i = 0; i < size; i++) {
60 m_delayVCHistogram.push_back(new Stats::Histogram());
61 m_delayVCHistogram[i]->init(10);
62 }
63 if (getMemoryQueue()) {
64 getMemoryQueue()->setSender(this);
65 }
71}
72
73void
74AbstractController::resetStats()
75{
76 m_delayHistogram.reset();
77 uint32_t size = Network::getNumberOfVirtualNetworks();
78 for (uint32_t i = 0; i < size; i++) {
79 m_delayVCHistogram[i]->reset();
80 }
81}
82
83void
84AbstractController::regStats()
85{
86 m_fully_busy_cycles
87 .name(name() + ".fully_busy_cycles")
88 .desc("cycles for which number of transistions == max transitions")
89 .flags(Stats::nozero);
90}
91
92void
93AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
94{
95 assert(virtualNetwork < m_delayVCHistogram.size());
96 m_delayHistogram.sample(delay);
97 m_delayVCHistogram[virtualNetwork]->sample(delay);
98}
99
100void
101AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
102{
103 if (m_waiting_buffers.count(addr) == 0) {
104 MsgVecType* msgVec = new MsgVecType;
105 msgVec->resize(m_in_ports, NULL);
106 m_waiting_buffers[addr] = msgVec;
107 }
108 DPRINTF(RubyQueue, "stalling %s port %d addr %s\n", buf, m_cur_in_port,
109 addr);
110 assert(m_in_ports > m_cur_in_port);
111 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
112}
113
114void
115AbstractController::wakeUpBuffers(Address addr)
116{
117 if (m_waiting_buffers.count(addr) > 0) {
118 //
119 // Wake up all possible lower rank (i.e. lower priority) buffers that could
120 // be waiting on this message.
121 //
122 for (int in_port_rank = m_cur_in_port - 1;
123 in_port_rank >= 0;
124 in_port_rank--) {
125 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
126 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
127 }
128 }
129 delete m_waiting_buffers[addr];
130 m_waiting_buffers.erase(addr);
131 }
132}
133
134void
135AbstractController::wakeUpAllBuffers(Address addr)
136{
137 if (m_waiting_buffers.count(addr) > 0) {
138 //
139 // Wake up all possible lower rank (i.e. lower priority) buffers that could
140 // be waiting on this message.
141 //
142 for (int in_port_rank = m_in_ports - 1;
143 in_port_rank >= 0;
144 in_port_rank--) {
145 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
146 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
147 }
148 }
149 delete m_waiting_buffers[addr];
150 m_waiting_buffers.erase(addr);
151 }
152}
153
154void
155AbstractController::wakeUpAllBuffers()
156{
157 //
158 // Wake up all possible buffers that could be waiting on any message.
159 //
160
161 std::vector<MsgVecType*> wokeUpMsgVecs;
162 MsgBufType wokeUpMsgBufs;
163
164 if(m_waiting_buffers.size() > 0) {
165 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
166 buf_iter != m_waiting_buffers.end();
167 ++buf_iter) {
168 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
169 vec_iter != buf_iter->second->end();
170 ++vec_iter) {
171 //
172 // Make sure the MessageBuffer has not already be reanalyzed
173 //
174 if (*vec_iter != NULL &&
175 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
176 (*vec_iter)->reanalyzeAllMessages();
177 wokeUpMsgBufs.insert(*vec_iter);
178 }
179 }
180 wokeUpMsgVecs.push_back(buf_iter->second);
181 }
182
183 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
184 wb_iter != wokeUpMsgVecs.end();
185 ++wb_iter) {
186 delete (*wb_iter);
187 }
188
189 m_waiting_buffers.clear();
190 }
191}
192
193void
194AbstractController::blockOnQueue(Address addr, MessageBuffer* port)
195{
196 m_is_blocking = true;
197 m_block_map[addr] = port;
198}
199
200void
201AbstractController::unblock(Address addr)
202{
203 m_block_map.erase(addr);
204 if (m_block_map.size() == 0) {
205 m_is_blocking = false;
206 }
207}
208
209BaseMasterPort &
210AbstractController::getMasterPort(const std::string &if_name,
211 PortID idx)
212{
213 return memoryPort;
214}
215
216void
217AbstractController::queueMemoryRead(const MachineID &id, Address addr,
218 Cycles latency)
219{
220 RequestPtr req = new Request(addr.getAddress(),
221 RubySystem::getBlockSizeBytes(), 0,
222 m_masterId);
223
224 PacketPtr pkt = Packet::createRead(req);
225 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
226 pkt->dataDynamic(newData);
227
228 SenderState *s = new SenderState(id);
229 pkt->pushSenderState(s);
230
231 // Use functional rather than timing accesses during warmup
232 if (RubySystem::getWarmupEnabled()) {
233 memoryPort.sendFunctional(pkt);
234 recvTimingResp(pkt);
235 return;
236 }
237
238 memoryPort.schedTimingReq(pkt, clockEdge(latency));
239}
240
241void
242AbstractController::queueMemoryWrite(const MachineID &id, Address addr,
243 Cycles latency, const DataBlock &block)
244{
245 RequestPtr req = new Request(addr.getAddress(),
246 RubySystem::getBlockSizeBytes(), 0,
247 m_masterId);
248
249 PacketPtr pkt = Packet::createWrite(req);
250 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
251 pkt->dataDynamic(newData);
252 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
253 RubySystem::getBlockSizeBytes());
254
255 SenderState *s = new SenderState(id);
256 pkt->pushSenderState(s);
257
258 // Use functional rather than timing accesses during warmup
259 if (RubySystem::getWarmupEnabled()) {
260 memoryPort.sendFunctional(pkt);
261 recvTimingResp(pkt);
262 return;
263 }
264
265 // Create a block and copy data from the block.
266 memoryPort.schedTimingReq(pkt, clockEdge(latency));
267}
268
269void
270AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr,
271 Cycles latency,
272 const DataBlock &block, int size)
273{
274 RequestPtr req = new Request(addr.getAddress(),
275 RubySystem::getBlockSizeBytes(), 0,
276 m_masterId);
277
278 PacketPtr pkt = Packet::createWrite(req);
279 uint8_t *newData = new uint8_t[size];
280 pkt->dataDynamic(newData);
281 memcpy(newData, block.getData(addr.getOffset(), size), size);
282
283 SenderState *s = new SenderState(id);
284 pkt->pushSenderState(s);
285
286 // Create a block and copy data from the block.
287 memoryPort.schedTimingReq(pkt, clockEdge(latency));
288}
289
290void
291AbstractController::functionalMemoryRead(PacketPtr pkt)
292{
293 memoryPort.sendFunctional(pkt);
294}
295
296int
297AbstractController::functionalMemoryWrite(PacketPtr pkt)
298{
299 int num_functional_writes = 0;
300
66}
67
68void
69AbstractController::resetStats()
70{
71 m_delayHistogram.reset();
72 uint32_t size = Network::getNumberOfVirtualNetworks();
73 for (uint32_t i = 0; i < size; i++) {
74 m_delayVCHistogram[i]->reset();
75 }
76}
77
78void
79AbstractController::regStats()
80{
81 m_fully_busy_cycles
82 .name(name() + ".fully_busy_cycles")
83 .desc("cycles for which number of transistions == max transitions")
84 .flags(Stats::nozero);
85}
86
87void
88AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
89{
90 assert(virtualNetwork < m_delayVCHistogram.size());
91 m_delayHistogram.sample(delay);
92 m_delayVCHistogram[virtualNetwork]->sample(delay);
93}
94
95void
96AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
97{
98 if (m_waiting_buffers.count(addr) == 0) {
99 MsgVecType* msgVec = new MsgVecType;
100 msgVec->resize(m_in_ports, NULL);
101 m_waiting_buffers[addr] = msgVec;
102 }
103 DPRINTF(RubyQueue, "stalling %s port %d addr %s\n", buf, m_cur_in_port,
104 addr);
105 assert(m_in_ports > m_cur_in_port);
106 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
107}
108
109void
110AbstractController::wakeUpBuffers(Address addr)
111{
112 if (m_waiting_buffers.count(addr) > 0) {
113 //
114 // Wake up all possible lower rank (i.e. lower priority) buffers that could
115 // be waiting on this message.
116 //
117 for (int in_port_rank = m_cur_in_port - 1;
118 in_port_rank >= 0;
119 in_port_rank--) {
120 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
121 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
122 }
123 }
124 delete m_waiting_buffers[addr];
125 m_waiting_buffers.erase(addr);
126 }
127}
128
129void
130AbstractController::wakeUpAllBuffers(Address addr)
131{
132 if (m_waiting_buffers.count(addr) > 0) {
133 //
134 // Wake up all possible lower rank (i.e. lower priority) buffers that could
135 // be waiting on this message.
136 //
137 for (int in_port_rank = m_in_ports - 1;
138 in_port_rank >= 0;
139 in_port_rank--) {
140 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
141 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
142 }
143 }
144 delete m_waiting_buffers[addr];
145 m_waiting_buffers.erase(addr);
146 }
147}
148
149void
150AbstractController::wakeUpAllBuffers()
151{
152 //
153 // Wake up all possible buffers that could be waiting on any message.
154 //
155
156 std::vector<MsgVecType*> wokeUpMsgVecs;
157 MsgBufType wokeUpMsgBufs;
158
159 if(m_waiting_buffers.size() > 0) {
160 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
161 buf_iter != m_waiting_buffers.end();
162 ++buf_iter) {
163 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
164 vec_iter != buf_iter->second->end();
165 ++vec_iter) {
166 //
167 // Make sure the MessageBuffer has not already be reanalyzed
168 //
169 if (*vec_iter != NULL &&
170 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
171 (*vec_iter)->reanalyzeAllMessages();
172 wokeUpMsgBufs.insert(*vec_iter);
173 }
174 }
175 wokeUpMsgVecs.push_back(buf_iter->second);
176 }
177
178 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
179 wb_iter != wokeUpMsgVecs.end();
180 ++wb_iter) {
181 delete (*wb_iter);
182 }
183
184 m_waiting_buffers.clear();
185 }
186}
187
188void
189AbstractController::blockOnQueue(Address addr, MessageBuffer* port)
190{
191 m_is_blocking = true;
192 m_block_map[addr] = port;
193}
194
195void
196AbstractController::unblock(Address addr)
197{
198 m_block_map.erase(addr);
199 if (m_block_map.size() == 0) {
200 m_is_blocking = false;
201 }
202}
203
204BaseMasterPort &
205AbstractController::getMasterPort(const std::string &if_name,
206 PortID idx)
207{
208 return memoryPort;
209}
210
211void
212AbstractController::queueMemoryRead(const MachineID &id, Address addr,
213 Cycles latency)
214{
215 RequestPtr req = new Request(addr.getAddress(),
216 RubySystem::getBlockSizeBytes(), 0,
217 m_masterId);
218
219 PacketPtr pkt = Packet::createRead(req);
220 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
221 pkt->dataDynamic(newData);
222
223 SenderState *s = new SenderState(id);
224 pkt->pushSenderState(s);
225
226 // Use functional rather than timing accesses during warmup
227 if (RubySystem::getWarmupEnabled()) {
228 memoryPort.sendFunctional(pkt);
229 recvTimingResp(pkt);
230 return;
231 }
232
233 memoryPort.schedTimingReq(pkt, clockEdge(latency));
234}
235
236void
237AbstractController::queueMemoryWrite(const MachineID &id, Address addr,
238 Cycles latency, const DataBlock &block)
239{
240 RequestPtr req = new Request(addr.getAddress(),
241 RubySystem::getBlockSizeBytes(), 0,
242 m_masterId);
243
244 PacketPtr pkt = Packet::createWrite(req);
245 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
246 pkt->dataDynamic(newData);
247 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
248 RubySystem::getBlockSizeBytes());
249
250 SenderState *s = new SenderState(id);
251 pkt->pushSenderState(s);
252
253 // Use functional rather than timing accesses during warmup
254 if (RubySystem::getWarmupEnabled()) {
255 memoryPort.sendFunctional(pkt);
256 recvTimingResp(pkt);
257 return;
258 }
259
260 // Create a block and copy data from the block.
261 memoryPort.schedTimingReq(pkt, clockEdge(latency));
262}
263
264void
265AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr,
266 Cycles latency,
267 const DataBlock &block, int size)
268{
269 RequestPtr req = new Request(addr.getAddress(),
270 RubySystem::getBlockSizeBytes(), 0,
271 m_masterId);
272
273 PacketPtr pkt = Packet::createWrite(req);
274 uint8_t *newData = new uint8_t[size];
275 pkt->dataDynamic(newData);
276 memcpy(newData, block.getData(addr.getOffset(), size), size);
277
278 SenderState *s = new SenderState(id);
279 pkt->pushSenderState(s);
280
281 // Create a block and copy data from the block.
282 memoryPort.schedTimingReq(pkt, clockEdge(latency));
283}
284
285void
286AbstractController::functionalMemoryRead(PacketPtr pkt)
287{
288 memoryPort.sendFunctional(pkt);
289}
290
291int
292AbstractController::functionalMemoryWrite(PacketPtr pkt)
293{
294 int num_functional_writes = 0;
295
301 // Check the message buffer that runs from the memory to the controller.
302 num_functional_writes += m_responseFromMemory_ptr->functionalWrite(pkt);
303
304 // Check the buffer from the controller to the memory.
305 if (memoryPort.checkFunctional(pkt)) {
306 num_functional_writes++;
307 }
308
309 // Update memory itself.
310 memoryPort.sendFunctional(pkt);
311 return num_functional_writes + 1;
312}
313
314void
315AbstractController::recvTimingResp(PacketPtr pkt)
316{
296 // Check the buffer from the controller to the memory.
297 if (memoryPort.checkFunctional(pkt)) {
298 num_functional_writes++;
299 }
300
301 // Update memory itself.
302 memoryPort.sendFunctional(pkt);
303 return num_functional_writes + 1;
304}
305
306void
307AbstractController::recvTimingResp(PacketPtr pkt)
308{
309 assert(getMemoryQueue());
317 assert(pkt->isResponse());
318
319 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
320 (*msg).m_Addr.setAddress(pkt->getAddr());
321 (*msg).m_Sender = m_machineID;
322
323 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
324 (*msg).m_OriginalRequestorMachId = s->id;
325 delete s;
326
327 if (pkt->isRead()) {
328 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
329 (*msg).m_MessageSize = MessageSizeType_Response_Data;
330
331 // Copy data from the packet
332 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
333 RubySystem::getBlockSizeBytes());
334 } else if (pkt->isWrite()) {
335 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
336 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
337 } else {
338 panic("Incorrect packet type received from memory controller!");
339 }
340
310 assert(pkt->isResponse());
311
312 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
313 (*msg).m_Addr.setAddress(pkt->getAddr());
314 (*msg).m_Sender = m_machineID;
315
316 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
317 (*msg).m_OriginalRequestorMachId = s->id;
318 delete s;
319
320 if (pkt->isRead()) {
321 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
322 (*msg).m_MessageSize = MessageSizeType_Response_Data;
323
324 // Copy data from the packet
325 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
326 RubySystem::getBlockSizeBytes());
327 } else if (pkt->isWrite()) {
328 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
329 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
330 } else {
331 panic("Incorrect packet type received from memory controller!");
332 }
333
341 m_responseFromMemory_ptr->enqueue(msg);
334 getMemoryQueue()->enqueue(msg);
342 delete pkt;
343}
344
345bool
346AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
347{
348 controller->recvTimingResp(pkt);
349 return true;
350}
351
352AbstractController::MemoryPort::MemoryPort(const std::string &_name,
353 AbstractController *_controller,
354 const std::string &_label)
355 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
356 reqQueue(*_controller, *this, _label),
357 snoopRespQueue(*_controller, *this, _label),
358 controller(_controller)
359{
360}
335 delete pkt;
336}
337
338bool
339AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
340{
341 controller->recvTimingResp(pkt);
342 return true;
343}
344
345AbstractController::MemoryPort::MemoryPort(const std::string &_name,
346 AbstractController *_controller,
347 const std::string &_label)
348 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
349 reqQueue(*_controller, *this, _label),
350 snoopRespQueue(*_controller, *this, _label),
351 controller(_controller)
352{
353}