AbstractController.cc (10783:631e736554c9) AbstractController.cc (10837:ecbab2522757)
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/protocol/MemoryMsg.hh"
30#include "mem/ruby/slicc_interface/AbstractController.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "sim/system.hh"
34
35AbstractController::AbstractController(const Params *p)
36 : MemObject(p), Consumer(this), m_version(p->version),
37 m_clusterID(p->cluster_id),
38 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
39 m_number_of_TBEs(p->number_of_TBEs),
40 m_transitions_per_cycle(p->transitions_per_cycle),
41 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
42 memoryPort(csprintf("%s.memory", name()), this, ""),
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/protocol/MemoryMsg.hh"
30#include "mem/ruby/slicc_interface/AbstractController.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "sim/system.hh"
34
35AbstractController::AbstractController(const Params *p)
36 : MemObject(p), Consumer(this), m_version(p->version),
37 m_clusterID(p->cluster_id),
38 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
39 m_number_of_TBEs(p->number_of_TBEs),
40 m_transitions_per_cycle(p->transitions_per_cycle),
41 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
42 memoryPort(csprintf("%s.memory", name()), this, ""),
43 m_responseFromMemory_ptr(new MessageBuffer()),
44 m_rubySystem(p->ruby_system)
43 m_responseFromMemory_ptr(new MessageBuffer())
45{
46 // Set the sender pointer of the response message buffer from the
47 // memory controller.
48 // This pointer is used for querying for the current time.
49 m_responseFromMemory_ptr->setSender(this);
50 m_responseFromMemory_ptr->setReceiver(this);
51 m_responseFromMemory_ptr->setOrdering(false);
52
53 if (m_version == 0) {
54 // Combine the statistics from all controllers
55 // of this particular type.
56 Stats::registerDumpCallback(new StatsCallback(this));
57 }
58}
59
60void
61AbstractController::init()
62{
63 params()->ruby_system->registerAbstractController(this);
64 m_delayHistogram.init(10);
65 uint32_t size = Network::getNumberOfVirtualNetworks();
66 for (uint32_t i = 0; i < size; i++) {
67 m_delayVCHistogram.push_back(new Stats::Histogram());
68 m_delayVCHistogram[i]->init(10);
69 }
70}
71
72void
73AbstractController::resetStats()
74{
75 m_delayHistogram.reset();
76 uint32_t size = Network::getNumberOfVirtualNetworks();
77 for (uint32_t i = 0; i < size; i++) {
78 m_delayVCHistogram[i]->reset();
79 }
80}
81
82void
83AbstractController::regStats()
84{
85 m_fully_busy_cycles
86 .name(name() + ".fully_busy_cycles")
87 .desc("cycles for which number of transistions == max transitions")
88 .flags(Stats::nozero);
89}
90
91void
92AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
93{
94 assert(virtualNetwork < m_delayVCHistogram.size());
95 m_delayHistogram.sample(delay);
96 m_delayVCHistogram[virtualNetwork]->sample(delay);
97}
98
99void
100AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
101{
102 if (m_waiting_buffers.count(addr) == 0) {
103 MsgVecType* msgVec = new MsgVecType;
104 msgVec->resize(m_in_ports, NULL);
105 m_waiting_buffers[addr] = msgVec;
106 }
107 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
108}
109
110void
111AbstractController::wakeUpBuffers(Address addr)
112{
113 if (m_waiting_buffers.count(addr) > 0) {
114 //
115 // Wake up all possible lower rank (i.e. lower priority) buffers that could
116 // be waiting on this message.
117 //
118 for (int in_port_rank = m_cur_in_port - 1;
119 in_port_rank >= 0;
120 in_port_rank--) {
121 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
122 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
123 }
124 }
125 delete m_waiting_buffers[addr];
126 m_waiting_buffers.erase(addr);
127 }
128}
129
130void
131AbstractController::wakeUpAllBuffers(Address addr)
132{
133 if (m_waiting_buffers.count(addr) > 0) {
134 //
135 // Wake up all possible lower rank (i.e. lower priority) buffers that could
136 // be waiting on this message.
137 //
138 for (int in_port_rank = m_in_ports - 1;
139 in_port_rank >= 0;
140 in_port_rank--) {
141 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
142 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
143 }
144 }
145 delete m_waiting_buffers[addr];
146 m_waiting_buffers.erase(addr);
147 }
148}
149
150void
151AbstractController::wakeUpAllBuffers()
152{
153 //
154 // Wake up all possible buffers that could be waiting on any message.
155 //
156
157 std::vector<MsgVecType*> wokeUpMsgVecs;
158
159 if(m_waiting_buffers.size() > 0) {
160 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
161 buf_iter != m_waiting_buffers.end();
162 ++buf_iter) {
163 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
164 vec_iter != buf_iter->second->end();
165 ++vec_iter) {
166 if (*vec_iter != NULL) {
167 (*vec_iter)->reanalyzeAllMessages();
168 }
169 }
170 wokeUpMsgVecs.push_back(buf_iter->second);
171 }
172
173 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
174 wb_iter != wokeUpMsgVecs.end();
175 ++wb_iter) {
176 delete (*wb_iter);
177 }
178
179 m_waiting_buffers.clear();
180 }
181}
182
183void
184AbstractController::blockOnQueue(Address addr, MessageBuffer* port)
185{
186 m_is_blocking = true;
187 m_block_map[addr] = port;
188}
189
190void
191AbstractController::unblock(Address addr)
192{
193 m_block_map.erase(addr);
194 if (m_block_map.size() == 0) {
195 m_is_blocking = false;
196 }
197}
198
199BaseMasterPort &
200AbstractController::getMasterPort(const std::string &if_name,
201 PortID idx)
202{
203 return memoryPort;
204}
205
206void
207AbstractController::queueMemoryRead(const MachineID &id, Address addr,
208 Cycles latency)
209{
210 RequestPtr req = new Request(addr.getAddress(),
211 RubySystem::getBlockSizeBytes(), 0,
212 m_masterId);
213
214 PacketPtr pkt = Packet::createRead(req);
215 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
216 pkt->dataDynamic(newData);
217
218 SenderState *s = new SenderState(id);
219 pkt->pushSenderState(s);
220
221 // Use functional rather than timing accesses during warmup
44{
45 // Set the sender pointer of the response message buffer from the
46 // memory controller.
47 // This pointer is used for querying for the current time.
48 m_responseFromMemory_ptr->setSender(this);
49 m_responseFromMemory_ptr->setReceiver(this);
50 m_responseFromMemory_ptr->setOrdering(false);
51
52 if (m_version == 0) {
53 // Combine the statistics from all controllers
54 // of this particular type.
55 Stats::registerDumpCallback(new StatsCallback(this));
56 }
57}
58
59void
60AbstractController::init()
61{
62 params()->ruby_system->registerAbstractController(this);
63 m_delayHistogram.init(10);
64 uint32_t size = Network::getNumberOfVirtualNetworks();
65 for (uint32_t i = 0; i < size; i++) {
66 m_delayVCHistogram.push_back(new Stats::Histogram());
67 m_delayVCHistogram[i]->init(10);
68 }
69}
70
71void
72AbstractController::resetStats()
73{
74 m_delayHistogram.reset();
75 uint32_t size = Network::getNumberOfVirtualNetworks();
76 for (uint32_t i = 0; i < size; i++) {
77 m_delayVCHistogram[i]->reset();
78 }
79}
80
81void
82AbstractController::regStats()
83{
84 m_fully_busy_cycles
85 .name(name() + ".fully_busy_cycles")
86 .desc("cycles for which number of transistions == max transitions")
87 .flags(Stats::nozero);
88}
89
90void
91AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
92{
93 assert(virtualNetwork < m_delayVCHistogram.size());
94 m_delayHistogram.sample(delay);
95 m_delayVCHistogram[virtualNetwork]->sample(delay);
96}
97
98void
99AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
100{
101 if (m_waiting_buffers.count(addr) == 0) {
102 MsgVecType* msgVec = new MsgVecType;
103 msgVec->resize(m_in_ports, NULL);
104 m_waiting_buffers[addr] = msgVec;
105 }
106 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
107}
108
109void
110AbstractController::wakeUpBuffers(Address addr)
111{
112 if (m_waiting_buffers.count(addr) > 0) {
113 //
114 // Wake up all possible lower rank (i.e. lower priority) buffers that could
115 // be waiting on this message.
116 //
117 for (int in_port_rank = m_cur_in_port - 1;
118 in_port_rank >= 0;
119 in_port_rank--) {
120 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
121 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
122 }
123 }
124 delete m_waiting_buffers[addr];
125 m_waiting_buffers.erase(addr);
126 }
127}
128
129void
130AbstractController::wakeUpAllBuffers(Address addr)
131{
132 if (m_waiting_buffers.count(addr) > 0) {
133 //
134 // Wake up all possible lower rank (i.e. lower priority) buffers that could
135 // be waiting on this message.
136 //
137 for (int in_port_rank = m_in_ports - 1;
138 in_port_rank >= 0;
139 in_port_rank--) {
140 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
141 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
142 }
143 }
144 delete m_waiting_buffers[addr];
145 m_waiting_buffers.erase(addr);
146 }
147}
148
149void
150AbstractController::wakeUpAllBuffers()
151{
152 //
153 // Wake up all possible buffers that could be waiting on any message.
154 //
155
156 std::vector<MsgVecType*> wokeUpMsgVecs;
157
158 if(m_waiting_buffers.size() > 0) {
159 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
160 buf_iter != m_waiting_buffers.end();
161 ++buf_iter) {
162 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
163 vec_iter != buf_iter->second->end();
164 ++vec_iter) {
165 if (*vec_iter != NULL) {
166 (*vec_iter)->reanalyzeAllMessages();
167 }
168 }
169 wokeUpMsgVecs.push_back(buf_iter->second);
170 }
171
172 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
173 wb_iter != wokeUpMsgVecs.end();
174 ++wb_iter) {
175 delete (*wb_iter);
176 }
177
178 m_waiting_buffers.clear();
179 }
180}
181
182void
183AbstractController::blockOnQueue(Address addr, MessageBuffer* port)
184{
185 m_is_blocking = true;
186 m_block_map[addr] = port;
187}
188
189void
190AbstractController::unblock(Address addr)
191{
192 m_block_map.erase(addr);
193 if (m_block_map.size() == 0) {
194 m_is_blocking = false;
195 }
196}
197
198BaseMasterPort &
199AbstractController::getMasterPort(const std::string &if_name,
200 PortID idx)
201{
202 return memoryPort;
203}
204
205void
206AbstractController::queueMemoryRead(const MachineID &id, Address addr,
207 Cycles latency)
208{
209 RequestPtr req = new Request(addr.getAddress(),
210 RubySystem::getBlockSizeBytes(), 0,
211 m_masterId);
212
213 PacketPtr pkt = Packet::createRead(req);
214 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
215 pkt->dataDynamic(newData);
216
217 SenderState *s = new SenderState(id);
218 pkt->pushSenderState(s);
219
220 // Use functional rather than timing accesses during warmup
222 if (m_rubySystem->m_warmup_enabled) {
221 if (RubySystem::getWarmupEnabled()) {
223 memoryPort.sendFunctional(pkt);
224 recvTimingResp(pkt);
225 return;
226 }
227
228 memoryPort.schedTimingReq(pkt, clockEdge(latency));
229}
230
231void
232AbstractController::queueMemoryWrite(const MachineID &id, Address addr,
233 Cycles latency, const DataBlock &block)
234{
235 RequestPtr req = new Request(addr.getAddress(),
236 RubySystem::getBlockSizeBytes(), 0,
237 m_masterId);
238
239 PacketPtr pkt = Packet::createWrite(req);
240 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
241 pkt->dataDynamic(newData);
242 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
243 RubySystem::getBlockSizeBytes());
244
245 SenderState *s = new SenderState(id);
246 pkt->pushSenderState(s);
247
248 // Use functional rather than timing accesses during warmup
222 memoryPort.sendFunctional(pkt);
223 recvTimingResp(pkt);
224 return;
225 }
226
227 memoryPort.schedTimingReq(pkt, clockEdge(latency));
228}
229
230void
231AbstractController::queueMemoryWrite(const MachineID &id, Address addr,
232 Cycles latency, const DataBlock &block)
233{
234 RequestPtr req = new Request(addr.getAddress(),
235 RubySystem::getBlockSizeBytes(), 0,
236 m_masterId);
237
238 PacketPtr pkt = Packet::createWrite(req);
239 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
240 pkt->dataDynamic(newData);
241 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
242 RubySystem::getBlockSizeBytes());
243
244 SenderState *s = new SenderState(id);
245 pkt->pushSenderState(s);
246
247 // Use functional rather than timing accesses during warmup
249 if (m_rubySystem->m_warmup_enabled) {
248 if (RubySystem::getWarmupEnabled()) {
250 memoryPort.sendFunctional(pkt);
251 recvTimingResp(pkt);
252 return;
253 }
254
255 // Create a block and copy data from the block.
256 memoryPort.schedTimingReq(pkt, clockEdge(latency));
257}
258
259void
260AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr,
261 Cycles latency,
262 const DataBlock &block, int size)
263{
264 RequestPtr req = new Request(addr.getAddress(),
265 RubySystem::getBlockSizeBytes(), 0,
266 m_masterId);
267
268 PacketPtr pkt = Packet::createWrite(req);
269 uint8_t *newData = new uint8_t[size];
270 pkt->dataDynamic(newData);
271 memcpy(newData, block.getData(addr.getOffset(), size), size);
272
273 SenderState *s = new SenderState(id);
274 pkt->pushSenderState(s);
275
276 // Create a block and copy data from the block.
277 memoryPort.schedTimingReq(pkt, clockEdge(latency));
278}
279
280void
281AbstractController::functionalMemoryRead(PacketPtr pkt)
282{
283 memoryPort.sendFunctional(pkt);
284}
285
286int
287AbstractController::functionalMemoryWrite(PacketPtr pkt)
288{
289 int num_functional_writes = 0;
290
291 // Check the message buffer that runs from the memory to the controller.
292 num_functional_writes += m_responseFromMemory_ptr->functionalWrite(pkt);
293
294 // Check the buffer from the controller to the memory.
295 if (memoryPort.checkFunctional(pkt)) {
296 num_functional_writes++;
297 }
298
299 // Update memory itself.
300 memoryPort.sendFunctional(pkt);
301 return num_functional_writes + 1;
302}
303
304void
305AbstractController::recvTimingResp(PacketPtr pkt)
306{
307 assert(pkt->isResponse());
308
309 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
310 (*msg).m_Addr.setAddress(pkt->getAddr());
311 (*msg).m_Sender = m_machineID;
312
313 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
314 (*msg).m_OriginalRequestorMachId = s->id;
315 delete s;
316
317 if (pkt->isRead()) {
318 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
319 (*msg).m_MessageSize = MessageSizeType_Response_Data;
320
321 // Copy data from the packet
322 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
323 RubySystem::getBlockSizeBytes());
324 } else if (pkt->isWrite()) {
325 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
326 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
327 } else {
328 panic("Incorrect packet type received from memory controller!");
329 }
330
331 m_responseFromMemory_ptr->enqueue(msg);
332 delete pkt;
333}
334
335bool
336AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
337{
338 controller->recvTimingResp(pkt);
339 return true;
340}
341
342AbstractController::MemoryPort::MemoryPort(const std::string &_name,
343 AbstractController *_controller,
344 const std::string &_label)
345 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
346 reqQueue(*_controller, *this, _label),
347 snoopRespQueue(*_controller, *this, _label),
348 controller(_controller)
349{
350}
249 memoryPort.sendFunctional(pkt);
250 recvTimingResp(pkt);
251 return;
252 }
253
254 // Create a block and copy data from the block.
255 memoryPort.schedTimingReq(pkt, clockEdge(latency));
256}
257
258void
259AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr,
260 Cycles latency,
261 const DataBlock &block, int size)
262{
263 RequestPtr req = new Request(addr.getAddress(),
264 RubySystem::getBlockSizeBytes(), 0,
265 m_masterId);
266
267 PacketPtr pkt = Packet::createWrite(req);
268 uint8_t *newData = new uint8_t[size];
269 pkt->dataDynamic(newData);
270 memcpy(newData, block.getData(addr.getOffset(), size), size);
271
272 SenderState *s = new SenderState(id);
273 pkt->pushSenderState(s);
274
275 // Create a block and copy data from the block.
276 memoryPort.schedTimingReq(pkt, clockEdge(latency));
277}
278
279void
280AbstractController::functionalMemoryRead(PacketPtr pkt)
281{
282 memoryPort.sendFunctional(pkt);
283}
284
285int
286AbstractController::functionalMemoryWrite(PacketPtr pkt)
287{
288 int num_functional_writes = 0;
289
290 // Check the message buffer that runs from the memory to the controller.
291 num_functional_writes += m_responseFromMemory_ptr->functionalWrite(pkt);
292
293 // Check the buffer from the controller to the memory.
294 if (memoryPort.checkFunctional(pkt)) {
295 num_functional_writes++;
296 }
297
298 // Update memory itself.
299 memoryPort.sendFunctional(pkt);
300 return num_functional_writes + 1;
301}
302
303void
304AbstractController::recvTimingResp(PacketPtr pkt)
305{
306 assert(pkt->isResponse());
307
308 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
309 (*msg).m_Addr.setAddress(pkt->getAddr());
310 (*msg).m_Sender = m_machineID;
311
312 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
313 (*msg).m_OriginalRequestorMachId = s->id;
314 delete s;
315
316 if (pkt->isRead()) {
317 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
318 (*msg).m_MessageSize = MessageSizeType_Response_Data;
319
320 // Copy data from the packet
321 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
322 RubySystem::getBlockSizeBytes());
323 } else if (pkt->isWrite()) {
324 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
325 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
326 } else {
327 panic("Incorrect packet type received from memory controller!");
328 }
329
330 m_responseFromMemory_ptr->enqueue(msg);
331 delete pkt;
332}
333
334bool
335AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
336{
337 controller->recvTimingResp(pkt);
338 return true;
339}
340
341AbstractController::MemoryPort::MemoryPort(const std::string &_name,
342 AbstractController *_controller,
343 const std::string &_label)
344 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
345 reqQueue(*_controller, *this, _label),
346 snoopRespQueue(*_controller, *this, _label),
347 controller(_controller)
348{
349}