AbstractController.cc (11108:6342ddf6d733) AbstractController.cc (11111:6da33e720481)
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/system/RubySystem.hh"
34#include "mem/ruby/system/Sequencer.hh"
35#include "sim/system.hh"
36
37AbstractController::AbstractController(const Params *p)
38 : MemObject(p), Consumer(this), m_version(p->version),
39 m_clusterID(p->cluster_id),
40 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
41 m_number_of_TBEs(p->number_of_TBEs),
42 m_transitions_per_cycle(p->transitions_per_cycle),
43 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
44 memoryPort(csprintf("%s.memory", name()), this, "")
45{
46 if (m_version == 0) {
47 // Combine the statistics from all controllers
48 // of this particular type.
49 Stats::registerDumpCallback(new StatsCallback(this));
50 }
51}
52
53void
54AbstractController::init()
55{
56 params()->ruby_system->registerAbstractController(this);
57 m_delayHistogram.init(10);
58 uint32_t size = Network::getNumberOfVirtualNetworks();
59 for (uint32_t i = 0; i < size; i++) {
60 m_delayVCHistogram.push_back(new Stats::Histogram());
61 m_delayVCHistogram[i]->init(10);
62 }
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/system/RubySystem.hh"
34#include "mem/ruby/system/Sequencer.hh"
35#include "sim/system.hh"
36
37AbstractController::AbstractController(const Params *p)
38 : MemObject(p), Consumer(this), m_version(p->version),
39 m_clusterID(p->cluster_id),
40 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
41 m_number_of_TBEs(p->number_of_TBEs),
42 m_transitions_per_cycle(p->transitions_per_cycle),
43 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
44 memoryPort(csprintf("%s.memory", name()), this, "")
45{
46 if (m_version == 0) {
47 // Combine the statistics from all controllers
48 // of this particular type.
49 Stats::registerDumpCallback(new StatsCallback(this));
50 }
51}
52
53void
54AbstractController::init()
55{
56 params()->ruby_system->registerAbstractController(this);
57 m_delayHistogram.init(10);
58 uint32_t size = Network::getNumberOfVirtualNetworks();
59 for (uint32_t i = 0; i < size; i++) {
60 m_delayVCHistogram.push_back(new Stats::Histogram());
61 m_delayVCHistogram[i]->init(10);
62 }
63 if (getMemoryQueue()) {
64 getMemoryQueue()->setSender(this);
65 }
66}
67
68void
69AbstractController::resetStats()
70{
71 m_delayHistogram.reset();
72 uint32_t size = Network::getNumberOfVirtualNetworks();
73 for (uint32_t i = 0; i < size; i++) {
74 m_delayVCHistogram[i]->reset();
75 }
76}
77
78void
79AbstractController::regStats()
80{
81 m_fully_busy_cycles
82 .name(name() + ".fully_busy_cycles")
83 .desc("cycles for which number of transistions == max transitions")
84 .flags(Stats::nozero);
85}
86
87void
88AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
89{
90 assert(virtualNetwork < m_delayVCHistogram.size());
91 m_delayHistogram.sample(delay);
92 m_delayVCHistogram[virtualNetwork]->sample(delay);
93}
94
95void
96AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
97{
98 if (m_waiting_buffers.count(addr) == 0) {
99 MsgVecType* msgVec = new MsgVecType;
100 msgVec->resize(m_in_ports, NULL);
101 m_waiting_buffers[addr] = msgVec;
102 }
103 DPRINTF(RubyQueue, "stalling %s port %d addr %s\n", buf, m_cur_in_port,
104 addr);
105 assert(m_in_ports > m_cur_in_port);
106 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
107}
108
109void
110AbstractController::wakeUpBuffers(Addr addr)
111{
112 if (m_waiting_buffers.count(addr) > 0) {
113 //
114 // Wake up all possible lower rank (i.e. lower priority) buffers that could
115 // be waiting on this message.
116 //
117 for (int in_port_rank = m_cur_in_port - 1;
118 in_port_rank >= 0;
119 in_port_rank--) {
120 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
63}
64
65void
66AbstractController::resetStats()
67{
68 m_delayHistogram.reset();
69 uint32_t size = Network::getNumberOfVirtualNetworks();
70 for (uint32_t i = 0; i < size; i++) {
71 m_delayVCHistogram[i]->reset();
72 }
73}
74
75void
76AbstractController::regStats()
77{
78 m_fully_busy_cycles
79 .name(name() + ".fully_busy_cycles")
80 .desc("cycles for which number of transistions == max transitions")
81 .flags(Stats::nozero);
82}
83
84void
85AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
86{
87 assert(virtualNetwork < m_delayVCHistogram.size());
88 m_delayHistogram.sample(delay);
89 m_delayVCHistogram[virtualNetwork]->sample(delay);
90}
91
92void
93AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
94{
95 if (m_waiting_buffers.count(addr) == 0) {
96 MsgVecType* msgVec = new MsgVecType;
97 msgVec->resize(m_in_ports, NULL);
98 m_waiting_buffers[addr] = msgVec;
99 }
100 DPRINTF(RubyQueue, "stalling %s port %d addr %s\n", buf, m_cur_in_port,
101 addr);
102 assert(m_in_ports > m_cur_in_port);
103 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
104}
105
106void
107AbstractController::wakeUpBuffers(Addr addr)
108{
109 if (m_waiting_buffers.count(addr) > 0) {
110 //
111 // Wake up all possible lower rank (i.e. lower priority) buffers that could
112 // be waiting on this message.
113 //
114 for (int in_port_rank = m_cur_in_port - 1;
115 in_port_rank >= 0;
116 in_port_rank--) {
117 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
121 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
118 (*(m_waiting_buffers[addr]))[in_port_rank]->
119 reanalyzeMessages(addr, clockEdge());
122 }
123 }
124 delete m_waiting_buffers[addr];
125 m_waiting_buffers.erase(addr);
126 }
127}
128
129void
130AbstractController::wakeUpAllBuffers(Addr addr)
131{
132 if (m_waiting_buffers.count(addr) > 0) {
133 //
134 // Wake up all possible lower rank (i.e. lower priority) buffers that could
135 // be waiting on this message.
136 //
137 for (int in_port_rank = m_in_ports - 1;
138 in_port_rank >= 0;
139 in_port_rank--) {
140 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
120 }
121 }
122 delete m_waiting_buffers[addr];
123 m_waiting_buffers.erase(addr);
124 }
125}
126
127void
128AbstractController::wakeUpAllBuffers(Addr addr)
129{
130 if (m_waiting_buffers.count(addr) > 0) {
131 //
132 // Wake up all possible lower rank (i.e. lower priority) buffers that could
133 // be waiting on this message.
134 //
135 for (int in_port_rank = m_in_ports - 1;
136 in_port_rank >= 0;
137 in_port_rank--) {
138 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
141 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
139 (*(m_waiting_buffers[addr]))[in_port_rank]->
140 reanalyzeMessages(addr, clockEdge());
142 }
143 }
144 delete m_waiting_buffers[addr];
145 m_waiting_buffers.erase(addr);
146 }
147}
148
149void
150AbstractController::wakeUpAllBuffers()
151{
152 //
153 // Wake up all possible buffers that could be waiting on any message.
154 //
155
156 std::vector<MsgVecType*> wokeUpMsgVecs;
157 MsgBufType wokeUpMsgBufs;
158
159 if(m_waiting_buffers.size() > 0) {
160 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
161 buf_iter != m_waiting_buffers.end();
162 ++buf_iter) {
163 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
164 vec_iter != buf_iter->second->end();
165 ++vec_iter) {
166 //
167 // Make sure the MessageBuffer has not already be reanalyzed
168 //
169 if (*vec_iter != NULL &&
170 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
141 }
142 }
143 delete m_waiting_buffers[addr];
144 m_waiting_buffers.erase(addr);
145 }
146}
147
148void
149AbstractController::wakeUpAllBuffers()
150{
151 //
152 // Wake up all possible buffers that could be waiting on any message.
153 //
154
155 std::vector<MsgVecType*> wokeUpMsgVecs;
156 MsgBufType wokeUpMsgBufs;
157
158 if(m_waiting_buffers.size() > 0) {
159 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
160 buf_iter != m_waiting_buffers.end();
161 ++buf_iter) {
162 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
163 vec_iter != buf_iter->second->end();
164 ++vec_iter) {
165 //
166 // Make sure the MessageBuffer has not already be reanalyzed
167 //
168 if (*vec_iter != NULL &&
169 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
171 (*vec_iter)->reanalyzeAllMessages();
170 (*vec_iter)->reanalyzeAllMessages(clockEdge());
172 wokeUpMsgBufs.insert(*vec_iter);
173 }
174 }
175 wokeUpMsgVecs.push_back(buf_iter->second);
176 }
177
178 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
179 wb_iter != wokeUpMsgVecs.end();
180 ++wb_iter) {
181 delete (*wb_iter);
182 }
183
184 m_waiting_buffers.clear();
185 }
186}
187
188void
189AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
190{
191 m_is_blocking = true;
192 m_block_map[addr] = port;
193}
194
195void
196AbstractController::unblock(Addr addr)
197{
198 m_block_map.erase(addr);
199 if (m_block_map.size() == 0) {
200 m_is_blocking = false;
201 }
202}
203
204BaseMasterPort &
205AbstractController::getMasterPort(const std::string &if_name,
206 PortID idx)
207{
208 return memoryPort;
209}
210
211void
212AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
213 Cycles latency)
214{
215 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
216 m_masterId);
217
218 PacketPtr pkt = Packet::createRead(req);
219 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
220 pkt->dataDynamic(newData);
221
222 SenderState *s = new SenderState(id);
223 pkt->pushSenderState(s);
224
225 // Use functional rather than timing accesses during warmup
226 if (RubySystem::getWarmupEnabled()) {
227 memoryPort.sendFunctional(pkt);
228 recvTimingResp(pkt);
229 return;
230 }
231
232 memoryPort.schedTimingReq(pkt, clockEdge(latency));
233}
234
235void
236AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
237 Cycles latency, const DataBlock &block)
238{
239 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
240 m_masterId);
241
242 PacketPtr pkt = Packet::createWrite(req);
243 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
244 pkt->dataDynamic(newData);
245 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
246 RubySystem::getBlockSizeBytes());
247
248 SenderState *s = new SenderState(id);
249 pkt->pushSenderState(s);
250
251 // Use functional rather than timing accesses during warmup
252 if (RubySystem::getWarmupEnabled()) {
253 memoryPort.sendFunctional(pkt);
254 recvTimingResp(pkt);
255 return;
256 }
257
258 // Create a block and copy data from the block.
259 memoryPort.schedTimingReq(pkt, clockEdge(latency));
260}
261
262void
263AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
264 Cycles latency,
265 const DataBlock &block, int size)
266{
267 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
268 m_masterId);
269
270 PacketPtr pkt = Packet::createWrite(req);
271 uint8_t *newData = new uint8_t[size];
272 pkt->dataDynamic(newData);
273 memcpy(newData, block.getData(getOffset(addr), size), size);
274
275 SenderState *s = new SenderState(id);
276 pkt->pushSenderState(s);
277
278 // Create a block and copy data from the block.
279 memoryPort.schedTimingReq(pkt, clockEdge(latency));
280}
281
282void
283AbstractController::functionalMemoryRead(PacketPtr pkt)
284{
285 memoryPort.sendFunctional(pkt);
286}
287
288int
289AbstractController::functionalMemoryWrite(PacketPtr pkt)
290{
291 int num_functional_writes = 0;
292
293 // Check the buffer from the controller to the memory.
294 if (memoryPort.checkFunctional(pkt)) {
295 num_functional_writes++;
296 }
297
298 // Update memory itself.
299 memoryPort.sendFunctional(pkt);
300 return num_functional_writes + 1;
301}
302
303void
304AbstractController::recvTimingResp(PacketPtr pkt)
305{
306 assert(getMemoryQueue());
307 assert(pkt->isResponse());
308
309 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
310 (*msg).m_addr = pkt->getAddr();
311 (*msg).m_Sender = m_machineID;
312
313 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
314 (*msg).m_OriginalRequestorMachId = s->id;
315 delete s;
316
317 if (pkt->isRead()) {
318 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
319 (*msg).m_MessageSize = MessageSizeType_Response_Data;
320
321 // Copy data from the packet
322 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
323 RubySystem::getBlockSizeBytes());
324 } else if (pkt->isWrite()) {
325 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
326 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
327 } else {
328 panic("Incorrect packet type received from memory controller!");
329 }
330
171 wokeUpMsgBufs.insert(*vec_iter);
172 }
173 }
174 wokeUpMsgVecs.push_back(buf_iter->second);
175 }
176
177 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
178 wb_iter != wokeUpMsgVecs.end();
179 ++wb_iter) {
180 delete (*wb_iter);
181 }
182
183 m_waiting_buffers.clear();
184 }
185}
186
187void
188AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
189{
190 m_is_blocking = true;
191 m_block_map[addr] = port;
192}
193
194void
195AbstractController::unblock(Addr addr)
196{
197 m_block_map.erase(addr);
198 if (m_block_map.size() == 0) {
199 m_is_blocking = false;
200 }
201}
202
203BaseMasterPort &
204AbstractController::getMasterPort(const std::string &if_name,
205 PortID idx)
206{
207 return memoryPort;
208}
209
210void
211AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
212 Cycles latency)
213{
214 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
215 m_masterId);
216
217 PacketPtr pkt = Packet::createRead(req);
218 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
219 pkt->dataDynamic(newData);
220
221 SenderState *s = new SenderState(id);
222 pkt->pushSenderState(s);
223
224 // Use functional rather than timing accesses during warmup
225 if (RubySystem::getWarmupEnabled()) {
226 memoryPort.sendFunctional(pkt);
227 recvTimingResp(pkt);
228 return;
229 }
230
231 memoryPort.schedTimingReq(pkt, clockEdge(latency));
232}
233
234void
235AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
236 Cycles latency, const DataBlock &block)
237{
238 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
239 m_masterId);
240
241 PacketPtr pkt = Packet::createWrite(req);
242 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
243 pkt->dataDynamic(newData);
244 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
245 RubySystem::getBlockSizeBytes());
246
247 SenderState *s = new SenderState(id);
248 pkt->pushSenderState(s);
249
250 // Use functional rather than timing accesses during warmup
251 if (RubySystem::getWarmupEnabled()) {
252 memoryPort.sendFunctional(pkt);
253 recvTimingResp(pkt);
254 return;
255 }
256
257 // Create a block and copy data from the block.
258 memoryPort.schedTimingReq(pkt, clockEdge(latency));
259}
260
261void
262AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
263 Cycles latency,
264 const DataBlock &block, int size)
265{
266 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
267 m_masterId);
268
269 PacketPtr pkt = Packet::createWrite(req);
270 uint8_t *newData = new uint8_t[size];
271 pkt->dataDynamic(newData);
272 memcpy(newData, block.getData(getOffset(addr), size), size);
273
274 SenderState *s = new SenderState(id);
275 pkt->pushSenderState(s);
276
277 // Create a block and copy data from the block.
278 memoryPort.schedTimingReq(pkt, clockEdge(latency));
279}
280
281void
282AbstractController::functionalMemoryRead(PacketPtr pkt)
283{
284 memoryPort.sendFunctional(pkt);
285}
286
287int
288AbstractController::functionalMemoryWrite(PacketPtr pkt)
289{
290 int num_functional_writes = 0;
291
292 // Check the buffer from the controller to the memory.
293 if (memoryPort.checkFunctional(pkt)) {
294 num_functional_writes++;
295 }
296
297 // Update memory itself.
298 memoryPort.sendFunctional(pkt);
299 return num_functional_writes + 1;
300}
301
302void
303AbstractController::recvTimingResp(PacketPtr pkt)
304{
305 assert(getMemoryQueue());
306 assert(pkt->isResponse());
307
308 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
309 (*msg).m_addr = pkt->getAddr();
310 (*msg).m_Sender = m_machineID;
311
312 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
313 (*msg).m_OriginalRequestorMachId = s->id;
314 delete s;
315
316 if (pkt->isRead()) {
317 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
318 (*msg).m_MessageSize = MessageSizeType_Response_Data;
319
320 // Copy data from the packet
321 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
322 RubySystem::getBlockSizeBytes());
323 } else if (pkt->isWrite()) {
324 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
325 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
326 } else {
327 panic("Incorrect packet type received from memory controller!");
328 }
329
331 getMemoryQueue()->enqueue(msg);
330 getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
332 delete pkt;
333}
334
335bool
336AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
337{
338 controller->recvTimingResp(pkt);
339 return true;
340}
341
342AbstractController::MemoryPort::MemoryPort(const std::string &_name,
343 AbstractController *_controller,
344 const std::string &_label)
345 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
346 reqQueue(*_controller, *this, _label),
347 snoopRespQueue(*_controller, *this, _label),
348 controller(_controller)
349{
350}
331 delete pkt;
332}
333
334bool
335AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
336{
337 controller->recvTimingResp(pkt);
338 return true;
339}
340
341AbstractController::MemoryPort::MemoryPort(const std::string &_name,
342 AbstractController *_controller,
343 const std::string &_label)
344 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
345 reqQueue(*_controller, *this, _label),
346 snoopRespQueue(*_controller, *this, _label),
347 controller(_controller)
348{
349}