AbstractController.cc revision 10977:9b3b9be42dd9
1/*
2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "mem/protocol/MemoryMsg.hh"
30#include "mem/ruby/slicc_interface/AbstractController.hh"
31#include "mem/ruby/system/Sequencer.hh"
32#include "mem/ruby/system/System.hh"
33#include "sim/system.hh"
34
35AbstractController::AbstractController(const Params *p)
36    : MemObject(p), Consumer(this), m_version(p->version),
37      m_clusterID(p->cluster_id),
38      m_masterId(p->system->getMasterId(name())), m_is_blocking(false),
39      m_number_of_TBEs(p->number_of_TBEs),
40      m_transitions_per_cycle(p->transitions_per_cycle),
41      m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
42      memoryPort(csprintf("%s.memory", name()), this, ""),
43      m_responseFromMemory_ptr(new MessageBuffer())
44{
45    // Set the sender pointer of the response message buffer from the
46    // memory controller.
47    // This pointer is used for querying for the current time.
48    m_responseFromMemory_ptr->setSender(this);
49    m_responseFromMemory_ptr->setReceiver(this);
50    m_responseFromMemory_ptr->setOrdering(false);
51
52    if (m_version == 0) {
53        // Combine the statistics from all controllers
54        // of this particular type.
55        Stats::registerDumpCallback(new StatsCallback(this));
56    }
57}
58
59void
60AbstractController::init()
61{
62    params()->ruby_system->registerAbstractController(this);
63    m_delayHistogram.init(10);
64    uint32_t size = Network::getNumberOfVirtualNetworks();
65    for (uint32_t i = 0; i < size; i++) {
66        m_delayVCHistogram.push_back(new Stats::Histogram());
67        m_delayVCHistogram[i]->init(10);
68    }
69}
70
71void
72AbstractController::resetStats()
73{
74    m_delayHistogram.reset();
75    uint32_t size = Network::getNumberOfVirtualNetworks();
76    for (uint32_t i = 0; i < size; i++) {
77        m_delayVCHistogram[i]->reset();
78    }
79}
80
81void
82AbstractController::regStats()
83{
84    m_fully_busy_cycles
85        .name(name() + ".fully_busy_cycles")
86        .desc("cycles for which number of transistions == max transitions")
87        .flags(Stats::nozero);
88}
89
90void
91AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay)
92{
93    assert(virtualNetwork < m_delayVCHistogram.size());
94    m_delayHistogram.sample(delay);
95    m_delayVCHistogram[virtualNetwork]->sample(delay);
96}
97
98void
99AbstractController::stallBuffer(MessageBuffer* buf, Address addr)
100{
101    if (m_waiting_buffers.count(addr) == 0) {
102        MsgVecType* msgVec = new MsgVecType;
103        msgVec->resize(m_in_ports, NULL);
104        m_waiting_buffers[addr] = msgVec;
105    }
106    (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf;
107}
108
109void
110AbstractController::wakeUpBuffers(Address addr)
111{
112    if (m_waiting_buffers.count(addr) > 0) {
113        //
114        // Wake up all possible lower rank (i.e. lower priority) buffers that could
115        // be waiting on this message.
116        //
117        for (int in_port_rank = m_cur_in_port - 1;
118             in_port_rank >= 0;
119             in_port_rank--) {
120            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
121                (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
122            }
123        }
124        delete m_waiting_buffers[addr];
125        m_waiting_buffers.erase(addr);
126    }
127}
128
129void
130AbstractController::wakeUpAllBuffers(Address addr)
131{
132    if (m_waiting_buffers.count(addr) > 0) {
133        //
134        // Wake up all possible lower rank (i.e. lower priority) buffers that could
135        // be waiting on this message.
136        //
137        for (int in_port_rank = m_in_ports - 1;
138             in_port_rank >= 0;
139             in_port_rank--) {
140            if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
141                (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
142            }
143        }
144        delete m_waiting_buffers[addr];
145        m_waiting_buffers.erase(addr);
146    }
147}
148
149void
150AbstractController::wakeUpAllBuffers()
151{
152    //
153    // Wake up all possible buffers that could be waiting on any message.
154    //
155
156    std::vector<MsgVecType*> wokeUpMsgVecs;
157    MsgBufType wokeUpMsgBufs;
158
159    if(m_waiting_buffers.size() > 0) {
160        for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
161             buf_iter != m_waiting_buffers.end();
162             ++buf_iter) {
163             for (MsgVecType::iterator vec_iter = buf_iter->second->begin();
164                  vec_iter != buf_iter->second->end();
165                  ++vec_iter) {
166                  //
167                  // Make sure the MessageBuffer has not already be reanalyzed
168                  //
169                  if (*vec_iter != NULL &&
170                      (wokeUpMsgBufs.count(*vec_iter) == 0)) {
171                      (*vec_iter)->reanalyzeAllMessages();
172                      wokeUpMsgBufs.insert(*vec_iter);
173                  }
174             }
175             wokeUpMsgVecs.push_back(buf_iter->second);
176        }
177
178        for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin();
179             wb_iter != wokeUpMsgVecs.end();
180             ++wb_iter) {
181             delete (*wb_iter);
182        }
183
184        m_waiting_buffers.clear();
185    }
186}
187
188void
189AbstractController::blockOnQueue(Address addr, MessageBuffer* port)
190{
191    m_is_blocking = true;
192    m_block_map[addr] = port;
193}
194
195void
196AbstractController::unblock(Address addr)
197{
198    m_block_map.erase(addr);
199    if (m_block_map.size() == 0) {
200       m_is_blocking = false;
201    }
202}
203
204BaseMasterPort &
205AbstractController::getMasterPort(const std::string &if_name,
206                                  PortID idx)
207{
208    return memoryPort;
209}
210
211void
212AbstractController::queueMemoryRead(const MachineID &id, Address addr,
213                                    Cycles latency)
214{
215    RequestPtr req = new Request(addr.getAddress(),
216                                 RubySystem::getBlockSizeBytes(), 0,
217                                 m_masterId);
218
219    PacketPtr pkt = Packet::createRead(req);
220    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
221    pkt->dataDynamic(newData);
222
223    SenderState *s = new SenderState(id);
224    pkt->pushSenderState(s);
225
226    // Use functional rather than timing accesses during warmup
227    if (RubySystem::getWarmupEnabled()) {
228        memoryPort.sendFunctional(pkt);
229        recvTimingResp(pkt);
230        return;
231    }
232
233    memoryPort.schedTimingReq(pkt, clockEdge(latency));
234}
235
236void
237AbstractController::queueMemoryWrite(const MachineID &id, Address addr,
238                                     Cycles latency, const DataBlock &block)
239{
240    RequestPtr req = new Request(addr.getAddress(),
241                                 RubySystem::getBlockSizeBytes(), 0,
242                                 m_masterId);
243
244    PacketPtr pkt = Packet::createWrite(req);
245    uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
246    pkt->dataDynamic(newData);
247    memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()),
248           RubySystem::getBlockSizeBytes());
249
250    SenderState *s = new SenderState(id);
251    pkt->pushSenderState(s);
252
253    // Use functional rather than timing accesses during warmup
254    if (RubySystem::getWarmupEnabled()) {
255        memoryPort.sendFunctional(pkt);
256        recvTimingResp(pkt);
257        return;
258    }
259
260    // Create a block and copy data from the block.
261    memoryPort.schedTimingReq(pkt, clockEdge(latency));
262}
263
264void
265AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr,
266                                            Cycles latency,
267                                            const DataBlock &block, int size)
268{
269    RequestPtr req = new Request(addr.getAddress(),
270                                 RubySystem::getBlockSizeBytes(), 0,
271                                 m_masterId);
272
273    PacketPtr pkt = Packet::createWrite(req);
274    uint8_t *newData = new uint8_t[size];
275    pkt->dataDynamic(newData);
276    memcpy(newData, block.getData(addr.getOffset(), size), size);
277
278    SenderState *s = new SenderState(id);
279    pkt->pushSenderState(s);
280
281    // Create a block and copy data from the block.
282    memoryPort.schedTimingReq(pkt, clockEdge(latency));
283}
284
285void
286AbstractController::functionalMemoryRead(PacketPtr pkt)
287{
288    memoryPort.sendFunctional(pkt);
289}
290
291int
292AbstractController::functionalMemoryWrite(PacketPtr pkt)
293{
294    int num_functional_writes = 0;
295
296    // Check the message buffer that runs from the memory to the controller.
297    num_functional_writes += m_responseFromMemory_ptr->functionalWrite(pkt);
298
299    // Check the buffer from the controller to the memory.
300    if (memoryPort.checkFunctional(pkt)) {
301        num_functional_writes++;
302    }
303
304    // Update memory itself.
305    memoryPort.sendFunctional(pkt);
306    return num_functional_writes + 1;
307}
308
309void
310AbstractController::recvTimingResp(PacketPtr pkt)
311{
312    assert(pkt->isResponse());
313
314    std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge());
315    (*msg).m_Addr.setAddress(pkt->getAddr());
316    (*msg).m_Sender = m_machineID;
317
318    SenderState *s = dynamic_cast<SenderState *>(pkt->senderState);
319    (*msg).m_OriginalRequestorMachId = s->id;
320    delete s;
321
322    if (pkt->isRead()) {
323        (*msg).m_Type = MemoryRequestType_MEMORY_READ;
324        (*msg).m_MessageSize = MessageSizeType_Response_Data;
325
326        // Copy data from the packet
327        (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0,
328                                 RubySystem::getBlockSizeBytes());
329    } else if (pkt->isWrite()) {
330        (*msg).m_Type = MemoryRequestType_MEMORY_WB;
331        (*msg).m_MessageSize = MessageSizeType_Writeback_Control;
332    } else {
333        panic("Incorrect packet type received from memory controller!");
334    }
335
336    m_responseFromMemory_ptr->enqueue(msg);
337    delete pkt;
338}
339
340bool
341AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt)
342{
343    controller->recvTimingResp(pkt);
344    return true;
345}
346
347AbstractController::MemoryPort::MemoryPort(const std::string &_name,
348                                           AbstractController *_controller,
349                                           const std::string &_label)
350    : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue),
351      reqQueue(*_controller, *this, _label),
352      snoopRespQueue(*_controller, *this, _label),
353      controller(_controller)
354{
355}
356