AbstractController.cc revision 11448:8d94df4c9da4
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/ruby/slicc_interface/AbstractController.hh"
30
31#include "debug/RubyQueue.hh"
32#include "mem/protocol/MemoryMsg.hh"
33#include "mem/ruby/system/RubySystem.hh"
34#include "mem/ruby/system/Sequencer.hh"
35#include "mem/ruby/system/GPUCoalescer.hh"
36#include "sim/system.hh"
37
38AbstractController::AbstractController(const Params *p)
39    : MemObject(p), Consumer(this), m_version(p->version),
40      m_clusterID(p->cluster_id),
41      m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
42      m_number_of_TBEs(p->number_of_TBEs),
43      m_transitions_per_cycle(p->transitions_per_cycle),
44      m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
45      memoryPort(csprintf("%s.memory", name()), this, "")
46{
47    if (m_version == 0) {
48        // Combine the statistics from all controllers
49        // of this particular type.
50        Stats::registerDumpCallback(new StatsCallback(this));
51    }
52}
53
54void
55AbstractController::init()
56{
57    params()->ruby_system->registerAbstractController(this);
58    m_delayHistogram.init(10);
59    uint32_t size = Network::getNumberOfVirtualNetworks();
60    for (uint32_t i = 0; i < size; i++) {
61        m_delayVCHistogram.push_back(new Stats::Histogram());
62        m_delayVCHistogram[i]->init(10);
63    }
64}
65
66void
67AbstractController::resetStats()
68{
69    m_delayHistogram.reset();
70    uint32_t size = Network::getNumberOfVirtualNetworks();
71    for (uint32_t i = 0; i < size; i++) {
72        m_delayVCHistogram[i]->reset();
73    }
74}
75
76void
77AbstractController::regStats()
78{
79    m_fully_busy_cycles
80        .name(name() + ".fully_busy_cycles")
81        .desc("cycles for which number of transistions == max transitions")
82        .flags(Stats::nozero);
83}
84
85void
86AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
87{
88    assert(virtualNetwork < m_delayVCHistogram.size());
89    m_delayHistogram.sample(delay);
90    m_delayVCHistogram[virtualNetwork]->sample(delay);
91}
92
93void
94AbstractController::stallBuffer(MessageBuffer* buf, Addr addr)
95{
96    if (m_waiting_buffers.count(addr) == 0) {
97        MsgVecType* msgVec = new MsgVecType;
98        msgVec->resize(m_in_ports, NULL);
99        m_waiting_buffers[addr] = msgVec;
100    }
101    DPRINTF(RubyQueue, "stalling %s port %d addr %#x\n", buf, m_cur_in_port,
102            addr);
103    assert(m_in_ports > m_cur_in_port);
104    (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
105}
106
107void
108AbstractController::wakeUpBuffers(Addr addr)
109{
110    if (m_waiting_buffers.count(addr) > 0) {
111        //
112        // Wake up all possible lower rank (i.e. lower priority) buffers that could
113        // be waiting on this message.
114        //
115        for (int in_port_rank = m_cur_in_port - 1;
116             in_port_rank >= 0;
117             in_port_rank--) {
118            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
119                (*(m_waiting_buffers[addr]))[in_port_rank]->
120                    reanalyzeMessages(addr, clockEdge());
121            }
122        }
123        delete m_waiting_buffers[addr];
124        m_waiting_buffers.erase(addr);
125    }
126}
127
128void
129AbstractController::wakeUpAllBuffers(Addr addr)
130{
131    if (m_waiting_buffers.count(addr) > 0) {
132        //
133        // Wake up all possible lower rank (i.e. lower priority) buffers that could
134        // be waiting on this message.
135        //
136        for (int in_port_rank = m_in_ports - 1;
137             in_port_rank >= 0;
138             in_port_rank--) {
139            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
140                (*(m_waiting_buffers[addr]))[in_port_rank]->
141                    reanalyzeMessages(addr, clockEdge());
142            }
143        }
144        delete m_waiting_buffers[addr];
145        m_waiting_buffers.erase(addr);
146    }
147}
148
149void
150AbstractController::wakeUpAllBuffers()
151{
152    //
153    // Wake up all possible buffers that could be waiting on any message.
154    //
155
156    std::vector<MsgVecType*> wokeUpMsgVecs;
157    MsgBufType wokeUpMsgBufs;
158
159    if (m_waiting_buffers.size() > 0) {
160        for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
161             buf_iter != m_waiting_buffers.end();
162             ++buf_iter) {
163             for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
164                  vec_iter != buf_iter->second->end();
165                  ++vec_iter) {
166                  //
167                  // Make sure the MessageBuffer has not already be reanalyzed
168                  //
169                  if (*vec_iter != NULL &&
170                      (wokeUpMsgBufs.count(*vec_iter) == 0)) {
171                      (*vec_iter)->reanalyzeAllMessages(clockEdge());
172                      wokeUpMsgBufs.insert(*vec_iter);
173                  }
174             }
175             wokeUpMsgVecs.push_back(buf_iter->second);
176        }
177
178        for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
179             wb_iter != wokeUpMsgVecs.end();
180             ++wb_iter) {
181             delete (*wb_iter);
182        }
183
184        m_waiting_buffers.clear();
185    }
186}
187
188void
189AbstractController::blockOnQueue(Addr addr, MessageBuffer* port)
190{
191    m_is_blocking = true;
192    m_block_map[addr] = port;
193}
194
195bool
196AbstractController::isBlocked(Addr addr) const
197{
198    return m_is_blocking && (m_block_map.find(addr) != m_block_map.end());
199}
200
201void
202AbstractController::unblock(Addr addr)
203{
204    m_block_map.erase(addr);
205    if (m_block_map.size() == 0) {
206       m_is_blocking = false;
207    }
208}
209
210bool
211AbstractController::isBlocked(Addr addr)
212{
213    return (m_block_map.count(addr) > 0);
214}
215
216BaseMasterPort &
217AbstractController::getMasterPort(const std::string &if_name,
218                                  PortID idx)
219{
220    return memoryPort;
221}
222
223void
224AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
225                                    Cycles latency)
226{
227    RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
228                                 m_masterId);
229
230    PacketPtr pkt = Packet::createRead(req);
231    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
232    pkt->dataDynamic(newData);
233
234    SenderState *s = new SenderState(id);
235    pkt->pushSenderState(s);
236
237    // Use functional rather than timing accesses during warmup
238    if (RubySystem::getWarmupEnabled()) {
239        memoryPort.sendFunctional(pkt);
240        recvTimingResp(pkt);
241        return;
242    }
243
244    memoryPort.schedTimingReq(pkt, clockEdge(latency));
245}
246
247void
248AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
249                                     Cycles latency, const DataBlock &block)
250{
251    RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
252                                 m_masterId);
253
254    PacketPtr pkt = Packet::createWrite(req);
255    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
256    pkt->dataDynamic(newData);
257    memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
258           RubySystem::getBlockSizeBytes());
259
260    SenderState *s = new SenderState(id);
261    pkt->pushSenderState(s);
262
263    // Use functional rather than timing accesses during warmup
264    if (RubySystem::getWarmupEnabled()) {
265        memoryPort.sendFunctional(pkt);
266        recvTimingResp(pkt);
267        return;
268    }
269
270    // Create a block and copy data from the block.
271    memoryPort.schedTimingReq(pkt, clockEdge(latency));
272}
273
274void
275AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
276                                            Cycles latency,
277                                            const DataBlock &block, int size)
278{
279    RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
280                                 m_masterId);
281
282    PacketPtr pkt = Packet::createWrite(req);
283    uint8_t *newData = new uint8_t[size];
284    pkt->dataDynamic(newData);
285    memcpy(newData, block.getData(getOffset(addr), size), size);
286
287    SenderState *s = new SenderState(id);
288    pkt->pushSenderState(s);
289
290    // Create a block and copy data from the block.
291    memoryPort.schedTimingReq(pkt, clockEdge(latency));
292}
293
294void
295AbstractController::functionalMemoryRead(PacketPtr pkt)
296{
297    memoryPort.sendFunctional(pkt);
298}
299
300int
301AbstractController::functionalMemoryWrite(PacketPtr pkt)
302{
303    int num_functional_writes = 0;
304
305    // Check the buffer from the controller to the memory.
306    if (memoryPort.checkFunctional(pkt)) {
307        num_functional_writes++;
308    }
309
310    // Update memory itself.
311    memoryPort.sendFunctional(pkt);
312    return num_functional_writes + 1;
313}
314
315void
316AbstractController::recvTimingResp(PacketPtr pkt)
317{
318    assert(getMemoryQueue());
319    assert(pkt->isResponse());
320
321    std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
322    (*msg).m_addr = pkt->getAddr();
323    (*msg).m_Sender = m_machineID;
324
325    SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
326    (*msg).m_OriginalRequestorMachId = s->id;
327    delete s;
328
329    if (pkt->isRead()) {
330        (*msg).m_Type = MemoryRequestType_MEMORY_READ;
331        (*msg).m_MessageSize = MessageSizeType_Response_Data;
332
333        // Copy data from the packet
334        (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
335                                 RubySystem::getBlockSizeBytes());
336    } else if (pkt->isWrite()) {
337        (*msg).m_Type = MemoryRequestType_MEMORY_WB;
338        (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
339    } else {
340        panic("Incorrect packet type received from memory controller!");
341    }
342
343    getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
344    delete pkt->req;
345    delete pkt;
346}
347
348bool
349AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
350{
351    controller->recvTimingResp(pkt);
352    return true;
353}
354
355AbstractController::MemoryPort::MemoryPort(const std::string &_name,
356                                           AbstractController *_controller,
357                                           const std::string &_label)
358    : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
359      reqQueue(*_controller, *this, _label),
360      snoopRespQueue(*_controller, *this, _label),
361      controller(_controller)
362{
363}
364