AbstractController.cc (11793:ef606668d247) AbstractController.cc (12065:e3e51756dfef)
1/*
1/*
2 * Copyright (c) 2017 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/network/Network.hh"
34#include "mem/ruby/system/GPUCoalescer.hh"
35#include "mem/ruby/system/RubySystem.hh"
36#include "mem/ruby/system/Sequencer.hh"
37#include "sim/system.hh"
38
39AbstractController::AbstractController(const Params *p)
40 : MemObject(p), Consumer(this), m_version(p->version),
41 m_clusterID(p->cluster_id),
42 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
43 m_number_of_TBEs(p->number_of_TBEs),
44 m_transitions_per_cycle(p->transitions_per_cycle),
45 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
14 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "mem/ruby/slicc_interface/AbstractController.hh"
42
43#include "debug/RubyQueue.hh"
44#include "mem/protocol/MemoryMsg.hh"
45#include "mem/ruby/network/Network.hh"
46#include "mem/ruby/system/GPUCoalescer.hh"
47#include "mem/ruby/system/RubySystem.hh"
48#include "mem/ruby/system/Sequencer.hh"
49#include "sim/system.hh"
50
51AbstractController::AbstractController(const Params *p)
52 : MemObject(p), Consumer(this), m_version(p->version),
53 m_clusterID(p->cluster_id),
54 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
55 m_number_of_TBEs(p->number_of_TBEs),
56 m_transitions_per_cycle(p->transitions_per_cycle),
57 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
46 memoryPort(csprintf("%s.memory", name()), this, "")
58 memoryPort(csprintf("%s.memory", name()), this, ""),
59 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end())
47{
48 if (m_version == 0) {
49 // Combine the statistics from all controllers
50 // of this particular type.
51 Stats::registerDumpCallback(new StatsCallback(this));
52 }
53}
54
55void
56AbstractController::init()
57{
58 params()->ruby_system->registerAbstractController(this);
59 m_delayHistogram.init(10);
60 uint32_t size = Network::getNumberOfVirtualNetworks();
61 for (uint32_t i = 0; i < size; i++) {
62 m_delayVCHistogram.push_back(new Stats::Histogram());
63 m_delayVCHistogram[i]->init(10);
64 }
65}
66
67void
68AbstractController::resetStats()
69{
70 m_delayHistogram.reset();
71 uint32_t size = Network::getNumberOfVirtualNetworks();
72 for (uint32_t i = 0; i < size; i++) {
73 m_delayVCHistogram[i]->reset();
74 }
75}
76
77void
78AbstractController::regStats()
79{
80 MemObject::regStats();
81
82 m_fully_busy_cycles
83 .name(name() + ".fully_busy_cycles")
84 .desc("cycles for which number of transistions == max transitions")
85 .flags(Stats::nozero);
86}
87
88void
89AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
90{
91 assert(virtualNetwork < m_delayVCHistogram.size());
92 m_delayHistogram.sample(delay);
93 m_delayVCHistogram[virtualNetwork]->sample(delay);
94}
95
96void
97AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
98{
99 if (m_waiting_buffers.count(addr) == 0) {
100 MsgVecType* msgVec = new MsgVecType;
101 msgVec->resize(m_in_ports, NULL);
102 m_waiting_buffers[addr] = msgVec;
103 }
104 DPRINTF(RubyQueue, "stalling %s port %d addr %#x\n", buf, m_cur_in_port,
105 addr);
106 assert(m_in_ports > m_cur_in_port);
107 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
108}
109
110void
111AbstractController::wakeUpBuffers(Addr addr)
112{
113 if (m_waiting_buffers.count(addr) > 0) {
114 //
115 // Wake up all possible lower rank (i.e. lower priority) buffers that could
116 // be waiting on this message.
117 //
118 for (int in_port_rank = m_cur_in_port - 1;
119 in_port_rank >= 0;
120 in_port_rank--) {
121 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
122 (*(m_waiting_buffers[addr]))[in_port_rank]->
123 reanalyzeMessages(addr, clockEdge());
124 }
125 }
126 delete m_waiting_buffers[addr];
127 m_waiting_buffers.erase(addr);
128 }
129}
130
131void
132AbstractController::wakeUpAllBuffers(Addr addr)
133{
134 if (m_waiting_buffers.count(addr) > 0) {
135 //
136 // Wake up all possible lower rank (i.e. lower priority) buffers that could
137 // be waiting on this message.
138 //
139 for (int in_port_rank = m_in_ports - 1;
140 in_port_rank >= 0;
141 in_port_rank--) {
142 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
143 (*(m_waiting_buffers[addr]))[in_port_rank]->
144 reanalyzeMessages(addr, clockEdge());
145 }
146 }
147 delete m_waiting_buffers[addr];
148 m_waiting_buffers.erase(addr);
149 }
150}
151
152void
153AbstractController::wakeUpAllBuffers()
154{
155 //
156 // Wake up all possible buffers that could be waiting on any message.
157 //
158
159 std::vector<MsgVecType*> wokeUpMsgVecs;
160 MsgBufType wokeUpMsgBufs;
161
162 if (m_waiting_buffers.size() > 0) {
163 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
164 buf_iter != m_waiting_buffers.end();
165 ++buf_iter) {
166 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
167 vec_iter != buf_iter->second->end();
168 ++vec_iter) {
169 //
170 // Make sure the MessageBuffer has not already be reanalyzed
171 //
172 if (*vec_iter != NULL &&
173 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
174 (*vec_iter)->reanalyzeAllMessages(clockEdge());
175 wokeUpMsgBufs.insert(*vec_iter);
176 }
177 }
178 wokeUpMsgVecs.push_back(buf_iter->second);
179 }
180
181 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
182 wb_iter != wokeUpMsgVecs.end();
183 ++wb_iter) {
184 delete (*wb_iter);
185 }
186
187 m_waiting_buffers.clear();
188 }
189}
190
191void
192AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
193{
194 m_is_blocking = true;
195 m_block_map[addr] = port;
196}
197
198bool
199AbstractController::isBlocked(Addr addr) const
200{
201 return m_is_blocking && (m_block_map.find(addr) != m_block_map.end());
202}
203
204void
205AbstractController::unblock(Addr addr)
206{
207 m_block_map.erase(addr);
208 if (m_block_map.size() == 0) {
209 m_is_blocking = false;
210 }
211}
212
213bool
214AbstractController::isBlocked(Addr addr)
215{
216 return (m_block_map.count(addr) > 0);
217}
218
219BaseMasterPort &
220AbstractController::getMasterPort(const std::string &if_name,
221 PortID idx)
222{
223 return memoryPort;
224}
225
226void
227AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
228 Cycles latency)
229{
230 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
231 m_masterId);
232
233 PacketPtr pkt = Packet::createRead(req);
234 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
235 pkt->dataDynamic(newData);
236
237 SenderState *s = new SenderState(id);
238 pkt->pushSenderState(s);
239
240 // Use functional rather than timing accesses during warmup
241 if (RubySystem::getWarmupEnabled()) {
242 memoryPort.sendFunctional(pkt);
243 recvTimingResp(pkt);
244 return;
245 }
246
247 memoryPort.schedTimingReq(pkt, clockEdge(latency));
248}
249
250void
251AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
252 Cycles latency, const DataBlock &block)
253{
254 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
255 m_masterId);
256
257 PacketPtr pkt = Packet::createWrite(req);
258 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
259 pkt->dataDynamic(newData);
260 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
261 RubySystem::getBlockSizeBytes());
262
263 SenderState *s = new SenderState(id);
264 pkt->pushSenderState(s);
265
266 // Use functional rather than timing accesses during warmup
267 if (RubySystem::getWarmupEnabled()) {
268 memoryPort.sendFunctional(pkt);
269 recvTimingResp(pkt);
270 return;
271 }
272
273 // Create a block and copy data from the block.
274 memoryPort.schedTimingReq(pkt, clockEdge(latency));
275}
276
277void
278AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
279 Cycles latency,
280 const DataBlock &block, int size)
281{
282 RequestPtr req = new Request(addr, size, 0, m_masterId);
283
284 PacketPtr pkt = Packet::createWrite(req);
285 uint8_t *newData = new uint8_t[size];
286 pkt->dataDynamic(newData);
287 memcpy(newData, block.getData(getOffset(addr), size), size);
288
289 SenderState *s = new SenderState(id);
290 pkt->pushSenderState(s);
291
292 // Create a block and copy data from the block.
293 memoryPort.schedTimingReq(pkt, clockEdge(latency));
294}
295
296void
297AbstractController::functionalMemoryRead(PacketPtr pkt)
298{
299 memoryPort.sendFunctional(pkt);
300}
301
302int
303AbstractController::functionalMemoryWrite(PacketPtr pkt)
304{
305 int num_functional_writes = 0;
306
307 // Check the buffer from the controller to the memory.
308 if (memoryPort.checkFunctional(pkt)) {
309 num_functional_writes++;
310 }
311
312 // Update memory itself.
313 memoryPort.sendFunctional(pkt);
314 return num_functional_writes + 1;
315}
316
317void
318AbstractController::recvTimingResp(PacketPtr pkt)
319{
320 assert(getMemoryQueue());
321 assert(pkt->isResponse());
322
323 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
324 (*msg).m_addr = pkt->getAddr();
325 (*msg).m_Sender = m_machineID;
326
327 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
328 (*msg).m_OriginalRequestorMachId = s->id;
329 delete s;
330
331 if (pkt->isRead()) {
332 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
333 (*msg).m_MessageSize = MessageSizeType_Response_Data;
334
335 // Copy data from the packet
336 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
337 RubySystem::getBlockSizeBytes());
338 } else if (pkt->isWrite()) {
339 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
340 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
341 } else {
342 panic("Incorrect packet type received from memory controller!");
343 }
344
345 getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
346 delete pkt->req;
347 delete pkt;
348}
349
60{
61 if (m_version == 0) {
62 // Combine the statistics from all controllers
63 // of this particular type.
64 Stats::registerDumpCallback(new StatsCallback(this));
65 }
66}
67
68void
69AbstractController::init()
70{
71 params()->ruby_system->registerAbstractController(this);
72 m_delayHistogram.init(10);
73 uint32_t size = Network::getNumberOfVirtualNetworks();
74 for (uint32_t i = 0; i < size; i++) {
75 m_delayVCHistogram.push_back(new Stats::Histogram());
76 m_delayVCHistogram[i]->init(10);
77 }
78}
79
80void
81AbstractController::resetStats()
82{
83 m_delayHistogram.reset();
84 uint32_t size = Network::getNumberOfVirtualNetworks();
85 for (uint32_t i = 0; i < size; i++) {
86 m_delayVCHistogram[i]->reset();
87 }
88}
89
90void
91AbstractController::regStats()
92{
93 MemObject::regStats();
94
95 m_fully_busy_cycles
96 .name(name() + ".fully_busy_cycles")
97 .desc("cycles for which number of transistions == max transitions")
98 .flags(Stats::nozero);
99}
100
101void
102AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
103{
104 assert(virtualNetwork < m_delayVCHistogram.size());
105 m_delayHistogram.sample(delay);
106 m_delayVCHistogram[virtualNetwork]->sample(delay);
107}
108
109void
110AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
111{
112 if (m_waiting_buffers.count(addr) == 0) {
113 MsgVecType* msgVec = new MsgVecType;
114 msgVec->resize(m_in_ports, NULL);
115 m_waiting_buffers[addr] = msgVec;
116 }
117 DPRINTF(RubyQueue, "stalling %s port %d addr %#x\n", buf, m_cur_in_port,
118 addr);
119 assert(m_in_ports > m_cur_in_port);
120 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
121}
122
123void
124AbstractController::wakeUpBuffers(Addr addr)
125{
126 if (m_waiting_buffers.count(addr) > 0) {
127 //
128 // Wake up all possible lower rank (i.e. lower priority) buffers that could
129 // be waiting on this message.
130 //
131 for (int in_port_rank = m_cur_in_port - 1;
132 in_port_rank >= 0;
133 in_port_rank--) {
134 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
135 (*(m_waiting_buffers[addr]))[in_port_rank]->
136 reanalyzeMessages(addr, clockEdge());
137 }
138 }
139 delete m_waiting_buffers[addr];
140 m_waiting_buffers.erase(addr);
141 }
142}
143
144void
145AbstractController::wakeUpAllBuffers(Addr addr)
146{
147 if (m_waiting_buffers.count(addr) > 0) {
148 //
149 // Wake up all possible lower rank (i.e. lower priority) buffers that could
150 // be waiting on this message.
151 //
152 for (int in_port_rank = m_in_ports - 1;
153 in_port_rank >= 0;
154 in_port_rank--) {
155 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
156 (*(m_waiting_buffers[addr]))[in_port_rank]->
157 reanalyzeMessages(addr, clockEdge());
158 }
159 }
160 delete m_waiting_buffers[addr];
161 m_waiting_buffers.erase(addr);
162 }
163}
164
165void
166AbstractController::wakeUpAllBuffers()
167{
168 //
169 // Wake up all possible buffers that could be waiting on any message.
170 //
171
172 std::vector<MsgVecType*> wokeUpMsgVecs;
173 MsgBufType wokeUpMsgBufs;
174
175 if (m_waiting_buffers.size() > 0) {
176 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
177 buf_iter != m_waiting_buffers.end();
178 ++buf_iter) {
179 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
180 vec_iter != buf_iter->second->end();
181 ++vec_iter) {
182 //
183 // Make sure the MessageBuffer has not already be reanalyzed
184 //
185 if (*vec_iter != NULL &&
186 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
187 (*vec_iter)->reanalyzeAllMessages(clockEdge());
188 wokeUpMsgBufs.insert(*vec_iter);
189 }
190 }
191 wokeUpMsgVecs.push_back(buf_iter->second);
192 }
193
194 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
195 wb_iter != wokeUpMsgVecs.end();
196 ++wb_iter) {
197 delete (*wb_iter);
198 }
199
200 m_waiting_buffers.clear();
201 }
202}
203
204void
205AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
206{
207 m_is_blocking = true;
208 m_block_map[addr] = port;
209}
210
211bool
212AbstractController::isBlocked(Addr addr) const
213{
214 return m_is_blocking && (m_block_map.find(addr) != m_block_map.end());
215}
216
217void
218AbstractController::unblock(Addr addr)
219{
220 m_block_map.erase(addr);
221 if (m_block_map.size() == 0) {
222 m_is_blocking = false;
223 }
224}
225
226bool
227AbstractController::isBlocked(Addr addr)
228{
229 return (m_block_map.count(addr) > 0);
230}
231
232BaseMasterPort &
233AbstractController::getMasterPort(const std::string &if_name,
234 PortID idx)
235{
236 return memoryPort;
237}
238
239void
240AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
241 Cycles latency)
242{
243 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
244 m_masterId);
245
246 PacketPtr pkt = Packet::createRead(req);
247 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
248 pkt->dataDynamic(newData);
249
250 SenderState *s = new SenderState(id);
251 pkt->pushSenderState(s);
252
253 // Use functional rather than timing accesses during warmup
254 if (RubySystem::getWarmupEnabled()) {
255 memoryPort.sendFunctional(pkt);
256 recvTimingResp(pkt);
257 return;
258 }
259
260 memoryPort.schedTimingReq(pkt, clockEdge(latency));
261}
262
263void
264AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
265 Cycles latency, const DataBlock &block)
266{
267 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
268 m_masterId);
269
270 PacketPtr pkt = Packet::createWrite(req);
271 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
272 pkt->dataDynamic(newData);
273 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
274 RubySystem::getBlockSizeBytes());
275
276 SenderState *s = new SenderState(id);
277 pkt->pushSenderState(s);
278
279 // Use functional rather than timing accesses during warmup
280 if (RubySystem::getWarmupEnabled()) {
281 memoryPort.sendFunctional(pkt);
282 recvTimingResp(pkt);
283 return;
284 }
285
286 // Create a block and copy data from the block.
287 memoryPort.schedTimingReq(pkt, clockEdge(latency));
288}
289
290void
291AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
292 Cycles latency,
293 const DataBlock &block, int size)
294{
295 RequestPtr req = new Request(addr, size, 0, m_masterId);
296
297 PacketPtr pkt = Packet::createWrite(req);
298 uint8_t *newData = new uint8_t[size];
299 pkt->dataDynamic(newData);
300 memcpy(newData, block.getData(getOffset(addr), size), size);
301
302 SenderState *s = new SenderState(id);
303 pkt->pushSenderState(s);
304
305 // Create a block and copy data from the block.
306 memoryPort.schedTimingReq(pkt, clockEdge(latency));
307}
308
309void
310AbstractController::functionalMemoryRead(PacketPtr pkt)
311{
312 memoryPort.sendFunctional(pkt);
313}
314
315int
316AbstractController::functionalMemoryWrite(PacketPtr pkt)
317{
318 int num_functional_writes = 0;
319
320 // Check the buffer from the controller to the memory.
321 if (memoryPort.checkFunctional(pkt)) {
322 num_functional_writes++;
323 }
324
325 // Update memory itself.
326 memoryPort.sendFunctional(pkt);
327 return num_functional_writes + 1;
328}
329
330void
331AbstractController::recvTimingResp(PacketPtr pkt)
332{
333 assert(getMemoryQueue());
334 assert(pkt->isResponse());
335
336 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
337 (*msg).m_addr = pkt->getAddr();
338 (*msg).m_Sender = m_machineID;
339
340 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
341 (*msg).m_OriginalRequestorMachId = s->id;
342 delete s;
343
344 if (pkt->isRead()) {
345 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
346 (*msg).m_MessageSize = MessageSizeType_Response_Data;
347
348 // Copy data from the packet
349 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
350 RubySystem::getBlockSizeBytes());
351 } else if (pkt->isWrite()) {
352 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
353 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
354 } else {
355 panic("Incorrect packet type received from memory controller!");
356 }
357
358 getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
359 delete pkt->req;
360 delete pkt;
361}
362
363MachineID
364AbstractController::mapAddressToMachine(Addr addr, MachineType mtype) const
365{
366 NodeID node = m_net_ptr->addressToNodeID(addr, mtype);
367 MachineID mach = {mtype, node};
368 return mach;
369}
370
350bool
351AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
352{
353 controller->recvTimingResp(pkt);
354 return true;
355}
356
357AbstractController::MemoryPort::MemoryPort(const std::string &_name,
358 AbstractController *_controller,
359 const std::string &_label)
360 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
361 reqQueue(*_controller, *this, _label),
362 snoopRespQueue(*_controller, *this, _label),
363 controller(_controller)
364{
365}
371bool
372AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
373{
374 controller->recvTimingResp(pkt);
375 return true;
376}
377
378AbstractController::MemoryPort::MemoryPort(const std::string &_name,
379 AbstractController *_controller,
380 const std::string &_label)
381 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
382 reqQueue(*_controller, *this, _label),
383 snoopRespQueue(*_controller, *this, _label),
384 controller(_controller)
385{
386}