AbstractController.cc revision 10783:631e736554c9
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/protocol/MemoryMsg.hh"
30#include "mem/ruby/slicc_interface/AbstractController.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "sim/system.hh"
34
35AbstractController::AbstractController(const Params *p)
36    : MemObject(p), Consumer(this), m_version(p->version),
37      m_clusterID(p->cluster_id),
38      m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
39      m_number_of_TBEs(p->number_of_TBEs),
40      m_transitions_per_cycle(p->transitions_per_cycle),
41      m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
42      memoryPort(csprintf("%s.memory", name()), this, ""),
43      m_responseFromMemory_ptr(new MessageBuffer()),
44      m_rubySystem(p->ruby_system)
45{
46    // Set the sender pointer of the response message buffer from the
47    // memory controller.
48    // This pointer is used for querying for the current time.
49    m_responseFromMemory_ptr->setSender(this);
50    m_responseFromMemory_ptr->setReceiver(this);
51    m_responseFromMemory_ptr->setOrdering(false);
52
53    if (m_version == 0) {
54        // Combine the statistics from all controllers
55        // of this particular type.
56        Stats::registerDumpCallback(new StatsCallback(this));
57    }
58}
59
60void
61AbstractController::init()
62{
63    params()->ruby_system->registerAbstractController(this);
64    m_delayHistogram.init(10);
65    uint32_t size = Network::getNumberOfVirtualNetworks();
66    for (uint32_t i = 0; i < size; i++) {
67        m_delayVCHistogram.push_back(new Stats::Histogram());
68        m_delayVCHistogram[i]->init(10);
69    }
70}
71
72void
73AbstractController::resetStats()
74{
75    m_delayHistogram.reset();
76    uint32_t size = Network::getNumberOfVirtualNetworks();
77    for (uint32_t i = 0; i < size; i++) {
78        m_delayVCHistogram[i]->reset();
79    }
80}
81
82void
83AbstractController::regStats()
84{
85    m_fully_busy_cycles
86        .name(name() + ".fully_busy_cycles")
87        .desc("cycles for which number of transistions == max transitions")
88        .flags(Stats::nozero);
89}
90
91void
92AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
93{
94    assert(virtualNetwork < m_delayVCHistogram.size());
95    m_delayHistogram.sample(delay);
96    m_delayVCHistogram[virtualNetwork]->sample(delay);
97}
98
99void
100AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
101{
102    if (m_waiting_buffers.count(addr) == 0) {
103        MsgVecType* msgVec = new MsgVecType;
104        msgVec->resize(m_in_ports, NULL);
105        m_waiting_buffers[addr] = msgVec;
106    }
107    (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
108}
109
110void
111AbstractController::wakeUpBuffers(Address addr)
112{
113    if (m_waiting_buffers.count(addr) > 0) {
114        //
115        // Wake up all possible lower rank (i.e. lower priority) buffers that could
116        // be waiting on this message.
117        //
118        for (int in_port_rank = m_cur_in_port - 1;
119             in_port_rank >= 0;
120             in_port_rank--) {
121            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
122                (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
123            }
124        }
125        delete m_waiting_buffers[addr];
126        m_waiting_buffers.erase(addr);
127    }
128}
129
130void
131AbstractController::wakeUpAllBuffers(Address addr)
132{
133    if (m_waiting_buffers.count(addr) > 0) {
134        //
135        // Wake up all possible lower rank (i.e. lower priority) buffers that could
136        // be waiting on this message.
137        //
138        for (int in_port_rank = m_in_ports - 1;
139             in_port_rank >= 0;
140             in_port_rank--) {
141            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
142                (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
143            }
144        }
145        delete m_waiting_buffers[addr];
146        m_waiting_buffers.erase(addr);
147    }
148}
149
150void
151AbstractController::wakeUpAllBuffers()
152{
153    //
154    // Wake up all possible buffers that could be waiting on any message.
155    //
156
157    std::vector<MsgVecType*> wokeUpMsgVecs;
158
159    if(m_waiting_buffers.size() > 0) {
160        for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
161             buf_iter != m_waiting_buffers.end();
162             ++buf_iter) {
163             for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
164                  vec_iter != buf_iter->second->end();
165                  ++vec_iter) {
166                  if (*vec_iter != NULL) {
167                      (*vec_iter)->reanalyzeAllMessages();
168                  }
169             }
170             wokeUpMsgVecs.push_back(buf_iter->second);
171        }
172
173        for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
174             wb_iter != wokeUpMsgVecs.end();
175             ++wb_iter) {
176             delete (*wb_iter);
177        }
178
179        m_waiting_buffers.clear();
180    }
181}
182
183void
184AbstractController::blockOnQueue(Address addr, MessageBuffer* port)
185{
186    m_is_blocking = true;
187    m_block_map[addr] = port;
188}
189
190void
191AbstractController::unblock(Address addr)
192{
193    m_block_map.erase(addr);
194    if (m_block_map.size() == 0) {
195       m_is_blocking = false;
196    }
197}
198
199BaseMasterPort &
200AbstractController::getMasterPort(const std::string &if_name,
201                                  PortID idx)
202{
203    return memoryPort;
204}
205
206void
207AbstractController::queueMemoryRead(const MachineID &id, Address addr,
208                                    Cycles latency)
209{
210    RequestPtr req = new Request(addr.getAddress(),
211                                 RubySystem::getBlockSizeBytes(), 0,
212                                 m_masterId);
213
214    PacketPtr pkt = Packet::createRead(req);
215    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
216    pkt->dataDynamic(newData);
217
218    SenderState *s = new SenderState(id);
219    pkt->pushSenderState(s);
220
221    // Use functional rather than timing accesses during warmup
222    if (m_rubySystem->m_warmup_enabled) {
223        memoryPort.sendFunctional(pkt);
224        recvTimingResp(pkt);
225        return;
226    }
227
228    memoryPort.schedTimingReq(pkt, clockEdge(latency));
229}
230
231void
232AbstractController::queueMemoryWrite(const MachineID &id, Address addr,
233                                     Cycles latency, const DataBlock &block)
234{
235    RequestPtr req = new Request(addr.getAddress(),
236                                 RubySystem::getBlockSizeBytes(), 0,
237                                 m_masterId);
238
239    PacketPtr pkt = Packet::createWrite(req);
240    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
241    pkt->dataDynamic(newData);
242    memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
243           RubySystem::getBlockSizeBytes());
244
245    SenderState *s = new SenderState(id);
246    pkt->pushSenderState(s);
247
248    // Use functional rather than timing accesses during warmup
249    if (m_rubySystem->m_warmup_enabled) {
250        memoryPort.sendFunctional(pkt);
251        recvTimingResp(pkt);
252        return;
253    }
254
255    // Create a block and copy data from the block.
256    memoryPort.schedTimingReq(pkt, clockEdge(latency));
257}
258
259void
260AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr,
261                                            Cycles latency,
262                                            const DataBlock &block, int size)
263{
264    RequestPtr req = new Request(addr.getAddress(),
265                                 RubySystem::getBlockSizeBytes(), 0,
266                                 m_masterId);
267
268    PacketPtr pkt = Packet::createWrite(req);
269    uint8_t *newData = new uint8_t[size];
270    pkt->dataDynamic(newData);
271    memcpy(newData, block.getData(addr.getOffset(), size), size);
272
273    SenderState *s = new SenderState(id);
274    pkt->pushSenderState(s);
275
276    // Create a block and copy data from the block.
277    memoryPort.schedTimingReq(pkt, clockEdge(latency));
278}
279
280void
281AbstractController::functionalMemoryRead(PacketPtr pkt)
282{
283    memoryPort.sendFunctional(pkt);
284}
285
286int
287AbstractController::functionalMemoryWrite(PacketPtr pkt)
288{
289    int num_functional_writes = 0;
290
291    // Check the message buffer that runs from the memory to the controller.
292    num_functional_writes += m_responseFromMemory_ptr->functionalWrite(pkt);
293
294    // Check the buffer from the controller to the memory.
295    if (memoryPort.checkFunctional(pkt)) {
296        num_functional_writes++;
297    }
298
299    // Update memory itself.
300    memoryPort.sendFunctional(pkt);
301    return num_functional_writes + 1;
302}
303
304void
305AbstractController::recvTimingResp(PacketPtr pkt)
306{
307    assert(pkt->isResponse());
308
309    std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
310    (*msg).m_Addr.setAddress(pkt->getAddr());
311    (*msg).m_Sender = m_machineID;
312
313    SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
314    (*msg).m_OriginalRequestorMachId = s->id;
315    delete s;
316
317    if (pkt->isRead()) {
318        (*msg).m_Type = MemoryRequestType_MEMORY_READ;
319        (*msg).m_MessageSize = MessageSizeType_Response_Data;
320
321        // Copy data from the packet
322        (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
323                                 RubySystem::getBlockSizeBytes());
324    } else if (pkt->isWrite()) {
325        (*msg).m_Type = MemoryRequestType_MEMORY_WB;
326        (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
327    } else {
328        panic("Incorrect packet type received from memory controller!");
329    }
330
331    m_responseFromMemory_ptr->enqueue(msg);
332    delete pkt;
333}
334
335bool
336AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
337{
338    controller->recvTimingResp(pkt);
339    return true;
340}
341
342AbstractController::MemoryPort::MemoryPort(const std::string &_name,
343                                           AbstractController *_controller,
344                                           const std::string &_label)
345    : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
346      reqQueue(*_controller, *this, _label),
347      snoopRespQueue(*_controller, *this, _label),
348      controller(_controller)
349{
350}
351