AbstractController.cc revision 11111:6da33e720481
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/system/RubySystem.hh"
34#include "mem/ruby/system/Sequencer.hh"
35#include "sim/system.hh"
36
37AbstractController::AbstractController(const Params *p)
38    : MemObject(p), Consumer(this), m_version(p->version),
39      m_clusterID(p->cluster_id),
40      m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
41      m_number_of_TBEs(p->number_of_TBEs),
42      m_transitions_per_cycle(p->transitions_per_cycle),
43      m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
44      memoryPort(csprintf("%s.memory", name()), this, "")
45{
46    if (m_version == 0) {
47        // Combine the statistics from all controllers
48        // of this particular type.
49        Stats::registerDumpCallback(new StatsCallback(this));
50    }
51}
52
53void
54AbstractController::init()
55{
56    params()->ruby_system->registerAbstractController(this);
57    m_delayHistogram.init(10);
58    uint32_t size = Network::getNumberOfVirtualNetworks();
59    for (uint32_t i = 0; i < size; i++) {
60        m_delayVCHistogram.push_back(new Stats::Histogram());
61        m_delayVCHistogram[i]->init(10);
62    }
63}
64
65void
66AbstractController::resetStats()
67{
68    m_delayHistogram.reset();
69    uint32_t size = Network::getNumberOfVirtualNetworks();
70    for (uint32_t i = 0; i < size; i++) {
71        m_delayVCHistogram[i]->reset();
72    }
73}
74
75void
76AbstractController::regStats()
77{
78    m_fully_busy_cycles
79        .name(name() + ".fully_busy_cycles")
80        .desc("cycles for which number of transistions == max transitions")
81        .flags(Stats::nozero);
82}
83
84void
85AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
86{
87    assert(virtualNetwork < m_delayVCHistogram.size());
88    m_delayHistogram.sample(delay);
89    m_delayVCHistogram[virtualNetwork]->sample(delay);
90}
91
92void
93AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
94{
95    if (m_waiting_buffers.count(addr) == 0) {
96        MsgVecType* msgVec = new MsgVecType;
97        msgVec->resize(m_in_ports, NULL);
98        m_waiting_buffers[addr] = msgVec;
99    }
100    DPRINTF(RubyQueue, "stalling %s port %d addr %s\n", buf, m_cur_in_port,
101            addr);
102    assert(m_in_ports > m_cur_in_port);
103    (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
104}
105
106void
107AbstractController::wakeUpBuffers(Addr addr)
108{
109    if (m_waiting_buffers.count(addr) > 0) {
110        //
111        // Wake up all possible lower rank (i.e. lower priority) buffers that could
112        // be waiting on this message.
113        //
114        for (int in_port_rank = m_cur_in_port - 1;
115             in_port_rank >= 0;
116             in_port_rank--) {
117            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
118                (*(m_waiting_buffers[addr]))[in_port_rank]->
119                    reanalyzeMessages(addr, clockEdge());
120            }
121        }
122        delete m_waiting_buffers[addr];
123        m_waiting_buffers.erase(addr);
124    }
125}
126
127void
128AbstractController::wakeUpAllBuffers(Addr addr)
129{
130    if (m_waiting_buffers.count(addr) > 0) {
131        //
132        // Wake up all possible lower rank (i.e. lower priority) buffers that could
133        // be waiting on this message.
134        //
135        for (int in_port_rank = m_in_ports - 1;
136             in_port_rank >= 0;
137             in_port_rank--) {
138            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
139                (*(m_waiting_buffers[addr]))[in_port_rank]->
140                    reanalyzeMessages(addr, clockEdge());
141            }
142        }
143        delete m_waiting_buffers[addr];
144        m_waiting_buffers.erase(addr);
145    }
146}
147
148void
149AbstractController::wakeUpAllBuffers()
150{
151    //
152    // Wake up all possible buffers that could be waiting on any message.
153    //
154
155    std::vector<MsgVecType*> wokeUpMsgVecs;
156    MsgBufType wokeUpMsgBufs;
157
158    if(m_waiting_buffers.size() > 0) {
159        for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
160             buf_iter != m_waiting_buffers.end();
161             ++buf_iter) {
162             for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
163                  vec_iter != buf_iter->second->end();
164                  ++vec_iter) {
165                  //
166                  // Make sure the MessageBuffer has not already be reanalyzed
167                  //
168                  if (*vec_iter != NULL &&
169                      (wokeUpMsgBufs.count(*vec_iter) == 0)) {
170                      (*vec_iter)->reanalyzeAllMessages(clockEdge());
171                      wokeUpMsgBufs.insert(*vec_iter);
172                  }
173             }
174             wokeUpMsgVecs.push_back(buf_iter->second);
175        }
176
177        for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
178             wb_iter != wokeUpMsgVecs.end();
179             ++wb_iter) {
180             delete (*wb_iter);
181        }
182
183        m_waiting_buffers.clear();
184    }
185}
186
187void
188AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
189{
190    m_is_blocking = true;
191    m_block_map[addr] = port;
192}
193
194void
195AbstractController::unblock(Addr addr)
196{
197    m_block_map.erase(addr);
198    if (m_block_map.size() == 0) {
199       m_is_blocking = false;
200    }
201}
202
203BaseMasterPort &
204AbstractController::getMasterPort(const std::string &if_name,
205                                  PortID idx)
206{
207    return memoryPort;
208}
209
210void
211AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
212                                    Cycles latency)
213{
214    RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
215                                 m_masterId);
216
217    PacketPtr pkt = Packet::createRead(req);
218    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
219    pkt->dataDynamic(newData);
220
221    SenderState *s = new SenderState(id);
222    pkt->pushSenderState(s);
223
224    // Use functional rather than timing accesses during warmup
225    if (RubySystem::getWarmupEnabled()) {
226        memoryPort.sendFunctional(pkt);
227        recvTimingResp(pkt);
228        return;
229    }
230
231    memoryPort.schedTimingReq(pkt, clockEdge(latency));
232}
233
234void
235AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
236                                     Cycles latency, const DataBlock &block)
237{
238    RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
239                                 m_masterId);
240
241    PacketPtr pkt = Packet::createWrite(req);
242    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
243    pkt->dataDynamic(newData);
244    memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
245           RubySystem::getBlockSizeBytes());
246
247    SenderState *s = new SenderState(id);
248    pkt->pushSenderState(s);
249
250    // Use functional rather than timing accesses during warmup
251    if (RubySystem::getWarmupEnabled()) {
252        memoryPort.sendFunctional(pkt);
253        recvTimingResp(pkt);
254        return;
255    }
256
257    // Create a block and copy data from the block.
258    memoryPort.schedTimingReq(pkt, clockEdge(latency));
259}
260
261void
262AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
263                                            Cycles latency,
264                                            const DataBlock &block, int size)
265{
266    RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
267                                 m_masterId);
268
269    PacketPtr pkt = Packet::createWrite(req);
270    uint8_t *newData = new uint8_t[size];
271    pkt->dataDynamic(newData);
272    memcpy(newData, block.getData(getOffset(addr), size), size);
273
274    SenderState *s = new SenderState(id);
275    pkt->pushSenderState(s);
276
277    // Create a block and copy data from the block.
278    memoryPort.schedTimingReq(pkt, clockEdge(latency));
279}
280
281void
282AbstractController::functionalMemoryRead(PacketPtr pkt)
283{
284    memoryPort.sendFunctional(pkt);
285}
286
287int
288AbstractController::functionalMemoryWrite(PacketPtr pkt)
289{
290    int num_functional_writes = 0;
291
292    // Check the buffer from the controller to the memory.
293    if (memoryPort.checkFunctional(pkt)) {
294        num_functional_writes++;
295    }
296
297    // Update memory itself.
298    memoryPort.sendFunctional(pkt);
299    return num_functional_writes + 1;
300}
301
302void
303AbstractController::recvTimingResp(PacketPtr pkt)
304{
305    assert(getMemoryQueue());
306    assert(pkt->isResponse());
307
308    std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
309    (*msg).m_addr = pkt->getAddr();
310    (*msg).m_Sender = m_machineID;
311
312    SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
313    (*msg).m_OriginalRequestorMachId = s->id;
314    delete s;
315
316    if (pkt->isRead()) {
317        (*msg).m_Type = MemoryRequestType_MEMORY_READ;
318        (*msg).m_MessageSize = MessageSizeType_Response_Data;
319
320        // Copy data from the packet
321        (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
322                                 RubySystem::getBlockSizeBytes());
323    } else if (pkt->isWrite()) {
324        (*msg).m_Type = MemoryRequestType_MEMORY_WB;
325        (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
326    } else {
327        panic("Incorrect packet type received from memory controller!");
328    }
329
330    getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
331    delete pkt;
332}
333
334bool
335AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
336{
337    controller->recvTimingResp(pkt);
338    return true;
339}
340
341AbstractController::MemoryPort::MemoryPort(const std::string &_name,
342                                           AbstractController *_controller,
343                                           const std::string &_label)
344    : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
345      reqQueue(*_controller, *this, _label),
346      snoopRespQueue(*_controller, *this, _label),
347      controller(_controller)
348{
349}
350