Deleted Added
sdiff udiff text old ( 11448:8d94df4c9da4 ) new ( 11523:81332eb10367 )
full compact
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/system/RubySystem.hh"
34#include "mem/ruby/system/Sequencer.hh"
35#include "mem/ruby/system/GPUCoalescer.hh"
36#include "sim/system.hh"
37
38AbstractController::AbstractController(const Params *p)
39 : MemObject(p), Consumer(this), m_version(p->version),
40 m_clusterID(p->cluster_id),
41 m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
42 m_number_of_TBEs(p->number_of_TBEs),
43 m_transitions_per_cycle(p->transitions_per_cycle),
44 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
45 memoryPort(csprintf("%s.memory", name()), this, "")
46{
47 if (m_version == 0) {
48 // Combine the statistics from all controllers
49 // of this particular type.
50 Stats::registerDumpCallback(new StatsCallback(this));
51 }
52}
53
54void
55AbstractController::init()
56{
57 params()->ruby_system->registerAbstractController(this);
58 m_delayHistogram.init(10);
59 uint32_t size = Network::getNumberOfVirtualNetworks();
60 for (uint32_t i = 0; i < size; i++) {
61 m_delayVCHistogram.push_back(new Stats::Histogram());
62 m_delayVCHistogram[i]->init(10);
63 }
64}
65
66void
67AbstractController::resetStats()
68{
69 m_delayHistogram.reset();
70 uint32_t size = Network::getNumberOfVirtualNetworks();
71 for (uint32_t i = 0; i < size; i++) {
72 m_delayVCHistogram[i]->reset();
73 }
74}
75
76void
77AbstractController::regStats()
78{
79 MemObject::regStats();
80
81 m_fully_busy_cycles
82 .name(name() + ".fully_busy_cycles")
83 .desc("cycles for which number of transistions == max transitions")
84 .flags(Stats::nozero);
85}
86
87void
88AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
89{
90 assert(virtualNetwork < m_delayVCHistogram.size());
91 m_delayHistogram.sample(delay);
92 m_delayVCHistogram[virtualNetwork]->sample(delay);
93}
94
95void
96AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
97{
98 if (m_waiting_buffers.count(addr) == 0) {
99 MsgVecType* msgVec = new MsgVecType;
100 msgVec->resize(m_in_ports, NULL);
101 m_waiting_buffers[addr] = msgVec;
102 }
103 DPRINTF(RubyQueue, "stalling %s port %d addr %#x\n", buf, m_cur_in_port,
104 addr);
105 assert(m_in_ports > m_cur_in_port);
106 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
107}
108
109void
110AbstractController::wakeUpBuffers(Addr addr)
111{
112 if (m_waiting_buffers.count(addr) > 0) {
113 //
114 // Wake up all possible lower rank (i.e. lower priority) buffers that could
115 // be waiting on this message.
116 //
117 for (int in_port_rank = m_cur_in_port - 1;
118 in_port_rank >= 0;
119 in_port_rank--) {
120 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
121 (*(m_waiting_buffers[addr]))[in_port_rank]->
122 reanalyzeMessages(addr, clockEdge());
123 }
124 }
125 delete m_waiting_buffers[addr];
126 m_waiting_buffers.erase(addr);
127 }
128}
129
130void
131AbstractController::wakeUpAllBuffers(Addr addr)
132{
133 if (m_waiting_buffers.count(addr) > 0) {
134 //
135 // Wake up all possible lower rank (i.e. lower priority) buffers that could
136 // be waiting on this message.
137 //
138 for (int in_port_rank = m_in_ports - 1;
139 in_port_rank >= 0;
140 in_port_rank--) {
141 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
142 (*(m_waiting_buffers[addr]))[in_port_rank]->
143 reanalyzeMessages(addr, clockEdge());
144 }
145 }
146 delete m_waiting_buffers[addr];
147 m_waiting_buffers.erase(addr);
148 }
149}
150
151void
152AbstractController::wakeUpAllBuffers()
153{
154 //
155 // Wake up all possible buffers that could be waiting on any message.
156 //
157
158 std::vector<MsgVecType*> wokeUpMsgVecs;
159 MsgBufType wokeUpMsgBufs;
160
161 if (m_waiting_buffers.size() > 0) {
162 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
163 buf_iter != m_waiting_buffers.end();
164 ++buf_iter) {
165 for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
166 vec_iter != buf_iter->second->end();
167 ++vec_iter) {
168 //
169 // Make sure the MessageBuffer has not already be reanalyzed
170 //
171 if (*vec_iter != NULL &&
172 (wokeUpMsgBufs.count(*vec_iter) == 0)) {
173 (*vec_iter)->reanalyzeAllMessages(clockEdge());
174 wokeUpMsgBufs.insert(*vec_iter);
175 }
176 }
177 wokeUpMsgVecs.push_back(buf_iter->second);
178 }
179
180 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
181 wb_iter != wokeUpMsgVecs.end();
182 ++wb_iter) {
183 delete (*wb_iter);
184 }
185
186 m_waiting_buffers.clear();
187 }
188}
189
190void
191AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
192{
193 m_is_blocking = true;
194 m_block_map[addr] = port;
195}
196
197bool
198AbstractController::isBlocked(Addr addr) const
199{
200 return m_is_blocking && (m_block_map.find(addr) != m_block_map.end());
201}
202
203void
204AbstractController::unblock(Addr addr)
205{
206 m_block_map.erase(addr);
207 if (m_block_map.size() == 0) {
208 m_is_blocking = false;
209 }
210}
211
212bool
213AbstractController::isBlocked(Addr addr)
214{
215 return (m_block_map.count(addr) > 0);
216}
217
218BaseMasterPort &
219AbstractController::getMasterPort(const std::string &if_name,
220 PortID idx)
221{
222 return memoryPort;
223}
224
225void
226AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
227 Cycles latency)
228{
229 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
230 m_masterId);
231
232 PacketPtr pkt = Packet::createRead(req);
233 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
234 pkt->dataDynamic(newData);
235
236 SenderState *s = new SenderState(id);
237 pkt->pushSenderState(s);
238
239 // Use functional rather than timing accesses during warmup
240 if (RubySystem::getWarmupEnabled()) {
241 memoryPort.sendFunctional(pkt);
242 recvTimingResp(pkt);
243 return;
244 }
245
246 memoryPort.schedTimingReq(pkt, clockEdge(latency));
247}
248
249void
250AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
251 Cycles latency, const DataBlock &block)
252{
253 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
254 m_masterId);
255
256 PacketPtr pkt = Packet::createWrite(req);
257 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
258 pkt->dataDynamic(newData);
259 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
260 RubySystem::getBlockSizeBytes());
261
262 SenderState *s = new SenderState(id);
263 pkt->pushSenderState(s);
264
265 // Use functional rather than timing accesses during warmup
266 if (RubySystem::getWarmupEnabled()) {
267 memoryPort.sendFunctional(pkt);
268 recvTimingResp(pkt);
269 return;
270 }
271
272 // Create a block and copy data from the block.
273 memoryPort.schedTimingReq(pkt, clockEdge(latency));
274}
275
276void
277AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
278 Cycles latency,
279 const DataBlock &block, int size)
280{
281 RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
282 m_masterId);
283
284 PacketPtr pkt = Packet::createWrite(req);
285 uint8_t *newData = new uint8_t[size];
286 pkt->dataDynamic(newData);
287 memcpy(newData, block.getData(getOffset(addr), size), size);
288
289 SenderState *s = new SenderState(id);
290 pkt->pushSenderState(s);
291
292 // Create a block and copy data from the block.
293 memoryPort.schedTimingReq(pkt, clockEdge(latency));
294}
295
296void
297AbstractController::functionalMemoryRead(PacketPtr pkt)
298{
299 memoryPort.sendFunctional(pkt);
300}
301
302int
303AbstractController::functionalMemoryWrite(PacketPtr pkt)
304{
305 int num_functional_writes = 0;
306
307 // Check the buffer from the controller to the memory.
308 if (memoryPort.checkFunctional(pkt)) {
309 num_functional_writes++;
310 }
311
312 // Update memory itself.
313 memoryPort.sendFunctional(pkt);
314 return num_functional_writes + 1;
315}
316
317void
318AbstractController::recvTimingResp(PacketPtr pkt)
319{
320 assert(getMemoryQueue());
321 assert(pkt->isResponse());
322
323 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
324 (*msg).m_addr = pkt->getAddr();
325 (*msg).m_Sender = m_machineID;
326
327 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
328 (*msg).m_OriginalRequestorMachId = s->id;
329 delete s;
330
331 if (pkt->isRead()) {
332 (*msg).m_Type = MemoryRequestType_MEMORY_READ;
333 (*msg).m_MessageSize = MessageSizeType_Response_Data;
334
335 // Copy data from the packet
336 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
337 RubySystem::getBlockSizeBytes());
338 } else if (pkt->isWrite()) {
339 (*msg).m_Type = MemoryRequestType_MEMORY_WB;
340 (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
341 } else {
342 panic("Incorrect packet type received from memory controller!");
343 }
344
345 getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
346 delete pkt->req;
347 delete pkt;
348}
349
350bool
351AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
352{
353 controller->recvTimingResp(pkt);
354 return true;
355}
356
357AbstractController::MemoryPort::MemoryPort(const std::string &_name,
358 AbstractController *_controller,
359 const std::string &_label)
360 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
361 reqQueue(*_controller, *this, _label),
362 snoopRespQueue(*_controller, *this, _label),
363 controller(_controller)
364{
365}