AbstractController.cc revision 10986:4fbe4b0adb4d
1/* 2 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29#include "mem/ruby/slicc_interface/AbstractController.hh" 30 31#include "debug/RubyQueue.hh" 32#include "mem/protocol/MemoryMsg.hh" 33#include "mem/ruby/system/Sequencer.hh" 34#include "mem/ruby/system/System.hh" 35#include "sim/system.hh" 36 37AbstractController::AbstractController(const Params *p) 38 : MemObject(p), Consumer(this), m_version(p->version), 39 m_clusterID(p->cluster_id), 40 m_masterId(p->system->getMasterId(name())), m_is_blocking(false), 41 m_number_of_TBEs(p->number_of_TBEs), 42 m_transitions_per_cycle(p->transitions_per_cycle), 43 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency), 44 memoryPort(csprintf("%s.memory", name()), this, ""), 45 m_responseFromMemory_ptr(new MessageBuffer()) 46{ 47 // Set the sender pointer of the response message buffer from the 48 // memory controller. 49 // This pointer is used for querying for the current time. 50 m_responseFromMemory_ptr->setSender(this); 51 m_responseFromMemory_ptr->setReceiver(this); 52 m_responseFromMemory_ptr->setOrdering(false); 53 54 if (m_version == 0) { 55 // Combine the statistics from all controllers 56 // of this particular type. 57 Stats::registerDumpCallback(new StatsCallback(this)); 58 } 59} 60 61void 62AbstractController::init() 63{ 64 params()->ruby_system->registerAbstractController(this); 65 m_delayHistogram.init(10); 66 uint32_t size = Network::getNumberOfVirtualNetworks(); 67 for (uint32_t i = 0; i < size; i++) { 68 m_delayVCHistogram.push_back(new Stats::Histogram()); 69 m_delayVCHistogram[i]->init(10); 70 } 71} 72 73void 74AbstractController::resetStats() 75{ 76 m_delayHistogram.reset(); 77 uint32_t size = Network::getNumberOfVirtualNetworks(); 78 for (uint32_t i = 0; i < size; i++) { 79 m_delayVCHistogram[i]->reset(); 80 } 81} 82 83void 84AbstractController::regStats() 85{ 86 m_fully_busy_cycles 87 .name(name() + ".fully_busy_cycles") 88 .desc("cycles for which number of transistions == max transitions") 89 .flags(Stats::nozero); 90} 91 92void 93AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay) 94{ 95 assert(virtualNetwork < m_delayVCHistogram.size()); 96 m_delayHistogram.sample(delay); 97 m_delayVCHistogram[virtualNetwork]->sample(delay); 98} 99 100void 101AbstractController::stallBuffer(MessageBuffer* buf, Address addr) 102{ 103 if (m_waiting_buffers.count(addr) == 0) { 104 MsgVecType* msgVec = new MsgVecType; 105 msgVec->resize(m_in_ports, NULL); 106 m_waiting_buffers[addr] = msgVec; 107 } 108 DPRINTF(RubyQueue, "stalling %s port %d addr %s\n", buf, m_cur_in_port, 109 addr); 110 assert(m_in_ports > m_cur_in_port); 111 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf; 112} 113 114void 115AbstractController::wakeUpBuffers(Address addr) 116{ 117 if (m_waiting_buffers.count(addr) > 0) { 118 // 119 // Wake up all possible lower rank (i.e. lower priority) buffers that could 120 // be waiting on this message. 121 // 122 for (int in_port_rank = m_cur_in_port - 1; 123 in_port_rank >= 0; 124 in_port_rank--) { 125 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) { 126 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr); 127 } 128 } 129 delete m_waiting_buffers[addr]; 130 m_waiting_buffers.erase(addr); 131 } 132} 133 134void 135AbstractController::wakeUpAllBuffers(Address addr) 136{ 137 if (m_waiting_buffers.count(addr) > 0) { 138 // 139 // Wake up all possible lower rank (i.e. lower priority) buffers that could 140 // be waiting on this message. 141 // 142 for (int in_port_rank = m_in_ports - 1; 143 in_port_rank >= 0; 144 in_port_rank--) { 145 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) { 146 (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr); 147 } 148 } 149 delete m_waiting_buffers[addr]; 150 m_waiting_buffers.erase(addr); 151 } 152} 153 154void 155AbstractController::wakeUpAllBuffers() 156{ 157 // 158 // Wake up all possible buffers that could be waiting on any message. 159 // 160 161 std::vector<MsgVecType*> wokeUpMsgVecs; 162 MsgBufType wokeUpMsgBufs; 163 164 if(m_waiting_buffers.size() > 0) { 165 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin(); 166 buf_iter != m_waiting_buffers.end(); 167 ++buf_iter) { 168 for (MsgVecType::iterator vec_iter = buf_iter->second->begin(); 169 vec_iter != buf_iter->second->end(); 170 ++vec_iter) { 171 // 172 // Make sure the MessageBuffer has not already be reanalyzed 173 // 174 if (*vec_iter != NULL && 175 (wokeUpMsgBufs.count(*vec_iter) == 0)) { 176 (*vec_iter)->reanalyzeAllMessages(); 177 wokeUpMsgBufs.insert(*vec_iter); 178 } 179 } 180 wokeUpMsgVecs.push_back(buf_iter->second); 181 } 182 183 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin(); 184 wb_iter != wokeUpMsgVecs.end(); 185 ++wb_iter) { 186 delete (*wb_iter); 187 } 188 189 m_waiting_buffers.clear(); 190 } 191} 192 193void 194AbstractController::blockOnQueue(Address addr, MessageBuffer* port) 195{ 196 m_is_blocking = true; 197 m_block_map[addr] = port; 198} 199 200void 201AbstractController::unblock(Address addr) 202{ 203 m_block_map.erase(addr); 204 if (m_block_map.size() == 0) { 205 m_is_blocking = false; 206 } 207} 208 209BaseMasterPort & 210AbstractController::getMasterPort(const std::string &if_name, 211 PortID idx) 212{ 213 return memoryPort; 214} 215 216void 217AbstractController::queueMemoryRead(const MachineID &id, Address addr, 218 Cycles latency) 219{ 220 RequestPtr req = new Request(addr.getAddress(), 221 RubySystem::getBlockSizeBytes(), 0, 222 m_masterId); 223 224 PacketPtr pkt = Packet::createRead(req); 225 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()]; 226 pkt->dataDynamic(newData); 227 228 SenderState *s = new SenderState(id); 229 pkt->pushSenderState(s); 230 231 // Use functional rather than timing accesses during warmup 232 if (RubySystem::getWarmupEnabled()) { 233 memoryPort.sendFunctional(pkt); 234 recvTimingResp(pkt); 235 return; 236 } 237 238 memoryPort.schedTimingReq(pkt, clockEdge(latency)); 239} 240 241void 242AbstractController::queueMemoryWrite(const MachineID &id, Address addr, 243 Cycles latency, const DataBlock &block) 244{ 245 RequestPtr req = new Request(addr.getAddress(), 246 RubySystem::getBlockSizeBytes(), 0, 247 m_masterId); 248 249 PacketPtr pkt = Packet::createWrite(req); 250 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()]; 251 pkt->dataDynamic(newData); 252 memcpy(newData, block.getData(0, RubySystem::getBlockSizeBytes()), 253 RubySystem::getBlockSizeBytes()); 254 255 SenderState *s = new SenderState(id); 256 pkt->pushSenderState(s); 257 258 // Use functional rather than timing accesses during warmup 259 if (RubySystem::getWarmupEnabled()) { 260 memoryPort.sendFunctional(pkt); 261 recvTimingResp(pkt); 262 return; 263 } 264 265 // Create a block and copy data from the block. 266 memoryPort.schedTimingReq(pkt, clockEdge(latency)); 267} 268 269void 270AbstractController::queueMemoryWritePartial(const MachineID &id, Address addr, 271 Cycles latency, 272 const DataBlock &block, int size) 273{ 274 RequestPtr req = new Request(addr.getAddress(), 275 RubySystem::getBlockSizeBytes(), 0, 276 m_masterId); 277 278 PacketPtr pkt = Packet::createWrite(req); 279 uint8_t *newData = new uint8_t[size]; 280 pkt->dataDynamic(newData); 281 memcpy(newData, block.getData(addr.getOffset(), size), size); 282 283 SenderState *s = new SenderState(id); 284 pkt->pushSenderState(s); 285 286 // Create a block and copy data from the block. 287 memoryPort.schedTimingReq(pkt, clockEdge(latency)); 288} 289 290void 291AbstractController::functionalMemoryRead(PacketPtr pkt) 292{ 293 memoryPort.sendFunctional(pkt); 294} 295 296int 297AbstractController::functionalMemoryWrite(PacketPtr pkt) 298{ 299 int num_functional_writes = 0; 300 301 // Check the message buffer that runs from the memory to the controller. 302 num_functional_writes += m_responseFromMemory_ptr->functionalWrite(pkt); 303 304 // Check the buffer from the controller to the memory. 305 if (memoryPort.checkFunctional(pkt)) { 306 num_functional_writes++; 307 } 308 309 // Update memory itself. 310 memoryPort.sendFunctional(pkt); 311 return num_functional_writes + 1; 312} 313 314void 315AbstractController::recvTimingResp(PacketPtr pkt) 316{ 317 assert(pkt->isResponse()); 318 319 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge()); 320 (*msg).m_Addr.setAddress(pkt->getAddr()); 321 (*msg).m_Sender = m_machineID; 322 323 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState); 324 (*msg).m_OriginalRequestorMachId = s->id; 325 delete s; 326 327 if (pkt->isRead()) { 328 (*msg).m_Type = MemoryRequestType_MEMORY_READ; 329 (*msg).m_MessageSize = MessageSizeType_Response_Data; 330 331 // Copy data from the packet 332 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0, 333 RubySystem::getBlockSizeBytes()); 334 } else if (pkt->isWrite()) { 335 (*msg).m_Type = MemoryRequestType_MEMORY_WB; 336 (*msg).m_MessageSize = MessageSizeType_Writeback_Control; 337 } else { 338 panic("Incorrect packet type received from memory controller!"); 339 } 340 341 m_responseFromMemory_ptr->enqueue(msg); 342 delete pkt; 343} 344 345bool 346AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt) 347{ 348 controller->recvTimingResp(pkt); 349 return true; 350} 351 352AbstractController::MemoryPort::MemoryPort(const std::string &_name, 353 AbstractController *_controller, 354 const std::string &_label) 355 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue), 356 reqQueue(*_controller, *this, _label), 357 snoopRespQueue(*_controller, *this, _label), 358 controller(_controller) 359{ 360} 361