1/* 2 * Copyright (c) 2017,2019 ARM Limited 3 * All rights reserved. 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2011-2014 Mark D. Hill and David A. Wood 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 */ 40 41#include "mem/ruby/slicc_interface/AbstractController.hh" 42 43#include "debug/RubyQueue.hh" 44#include "mem/ruby/network/Network.hh" 45#include "mem/ruby/protocol/MemoryMsg.hh" 46#include "mem/ruby/system/GPUCoalescer.hh" 47#include "mem/ruby/system/RubySystem.hh" 48#include "mem/ruby/system/Sequencer.hh" 49#include "sim/system.hh" 50 51AbstractController::AbstractController(const Params *p) 52 : ClockedObject(p), Consumer(this), m_version(p->version), 53 m_clusterID(p->cluster_id), 54 m_masterId(p->system->getMasterId(this)), m_is_blocking(false), 55 m_number_of_TBEs(p->number_of_TBEs), 56 m_transitions_per_cycle(p->transitions_per_cycle), 57 m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency), 58 m_mandatory_queue_latency(p->mandatory_queue_latency), 59 memoryPort(csprintf("%s.memory", name()), this, ""), 60 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()) 61{ 62 if (m_version == 0) { 63 // Combine the statistics from all controllers 64 // of this particular type. 65 Stats::registerDumpCallback(new StatsCallback(this)); 66 } 67} 68 69void 70AbstractController::init() 71{ 72 params()->ruby_system->registerAbstractController(this); 73 m_delayHistogram.init(10); 74 uint32_t size = Network::getNumberOfVirtualNetworks(); 75 for (uint32_t i = 0; i < size; i++) { 76 m_delayVCHistogram.push_back(new Stats::Histogram()); 77 m_delayVCHistogram[i]->init(10); 78 } 79} 80 81void 82AbstractController::resetStats() 83{ 84 m_delayHistogram.reset(); 85 uint32_t size = Network::getNumberOfVirtualNetworks(); 86 for (uint32_t i = 0; i < size; i++) { 87 m_delayVCHistogram[i]->reset(); 88 } 89} 90 91void 92AbstractController::regStats() 93{ 94 ClockedObject::regStats(); 95 96 m_fully_busy_cycles 97 .name(name() + ".fully_busy_cycles") 98 .desc("cycles for which number of transistions == max transitions") 99 .flags(Stats::nozero); 100} 101 102void 103AbstractController::profileMsgDelay(uint32_t virtualNetwork, Cycles delay) 104{ 105 assert(virtualNetwork < m_delayVCHistogram.size()); 106 m_delayHistogram.sample(delay); 107 m_delayVCHistogram[virtualNetwork]->sample(delay); 108} 109 110void 111AbstractController::stallBuffer(MessageBuffer* buf, Addr addr) 112{ 113 if (m_waiting_buffers.count(addr) == 0) { 114 MsgVecType* msgVec = new MsgVecType; 115 msgVec->resize(m_in_ports, NULL); 116 m_waiting_buffers[addr] = msgVec; 117 } 118 DPRINTF(RubyQueue, "stalling %s port %d addr %#x\n", buf, m_cur_in_port, 119 addr); 120 assert(m_in_ports > m_cur_in_port); 121 (*(m_waiting_buffers[addr]))[m_cur_in_port] = buf; 122} 123 124void 125AbstractController::wakeUpBuffers(Addr addr) 126{ 127 if (m_waiting_buffers.count(addr) > 0) { 128 // 129 // Wake up all possible lower rank (i.e. lower priority) buffers that could 130 // be waiting on this message. 131 // 132 for (int in_port_rank = m_cur_in_port - 1; 133 in_port_rank >= 0; 134 in_port_rank--) { 135 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) { 136 (*(m_waiting_buffers[addr]))[in_port_rank]-> 137 reanalyzeMessages(addr, clockEdge()); 138 } 139 } 140 delete m_waiting_buffers[addr]; 141 m_waiting_buffers.erase(addr); 142 } 143} 144 145void 146AbstractController::wakeUpAllBuffers(Addr addr) 147{ 148 if (m_waiting_buffers.count(addr) > 0) { 149 // 150 // Wake up all possible lower rank (i.e. lower priority) buffers that could 151 // be waiting on this message. 152 // 153 for (int in_port_rank = m_in_ports - 1; 154 in_port_rank >= 0; 155 in_port_rank--) { 156 if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) { 157 (*(m_waiting_buffers[addr]))[in_port_rank]-> 158 reanalyzeMessages(addr, clockEdge()); 159 } 160 } 161 delete m_waiting_buffers[addr]; 162 m_waiting_buffers.erase(addr); 163 } 164} 165 166void 167AbstractController::wakeUpAllBuffers() 168{ 169 // 170 // Wake up all possible buffers that could be waiting on any message. 171 // 172 173 std::vector<MsgVecType*> wokeUpMsgVecs; 174 MsgBufType wokeUpMsgBufs; 175 176 if (m_waiting_buffers.size() > 0) { 177 for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin(); 178 buf_iter != m_waiting_buffers.end(); 179 ++buf_iter) { 180 for (MsgVecType::iterator vec_iter = buf_iter->second->begin(); 181 vec_iter != buf_iter->second->end(); 182 ++vec_iter) { 183 // 184 // Make sure the MessageBuffer has not already be reanalyzed 185 // 186 if (*vec_iter != NULL && 187 (wokeUpMsgBufs.count(*vec_iter) == 0)) { 188 (*vec_iter)->reanalyzeAllMessages(clockEdge()); 189 wokeUpMsgBufs.insert(*vec_iter); 190 } 191 } 192 wokeUpMsgVecs.push_back(buf_iter->second); 193 } 194 195 for (std::vector<MsgVecType*>::iterator wb_iter = wokeUpMsgVecs.begin(); 196 wb_iter != wokeUpMsgVecs.end(); 197 ++wb_iter) { 198 delete (*wb_iter); 199 } 200 201 m_waiting_buffers.clear(); 202 } 203} 204 205void 206AbstractController::blockOnQueue(Addr addr, MessageBuffer* port) 207{ 208 m_is_blocking = true; 209 m_block_map[addr] = port; 210} 211 212bool 213AbstractController::isBlocked(Addr addr) const 214{ 215 return m_is_blocking && (m_block_map.find(addr) != m_block_map.end()); 216} 217 218void 219AbstractController::unblock(Addr addr) 220{ 221 m_block_map.erase(addr); 222 if (m_block_map.size() == 0) { 223 m_is_blocking = false; 224 } 225} 226 227bool 228AbstractController::isBlocked(Addr addr) 229{ 230 return (m_block_map.count(addr) > 0); 231} 232 233Port & 234AbstractController::getPort(const std::string &if_name, PortID idx) 235{ 236 return memoryPort; 237} 238 239void 240AbstractController::queueMemoryRead(const MachineID &id, Addr addr, 241 Cycles latency) 242{ 243 RequestPtr req = std::make_shared<Request>( 244 addr, RubySystem::getBlockSizeBytes(), 0, m_masterId); 245 246 PacketPtr pkt = Packet::createRead(req); 247 uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()]; 248 pkt->dataDynamic(newData); 249 250 SenderState *s = new SenderState(id); 251 pkt->pushSenderState(s); 252 253 // Use functional rather than timing accesses during warmup 254 if (RubySystem::getWarmupEnabled()) { 255 memoryPort.sendFunctional(pkt); 256 recvTimingResp(pkt); 257 return; 258 } 259 260 memoryPort.schedTimingReq(pkt, clockEdge(latency)); 261} 262 263void 264AbstractController::queueMemoryWrite(const MachineID &id, Addr addr, 265 Cycles latency, const DataBlock &block) 266{ 267 RequestPtr req = std::make_shared<Request>( 268 addr, RubySystem::getBlockSizeBytes(), 0, m_masterId); 269 270 PacketPtr pkt = Packet::createWrite(req); 271 pkt->allocate(); 272 pkt->setData(block.getData(0, RubySystem::getBlockSizeBytes())); 273 274 SenderState *s = new SenderState(id); 275 pkt->pushSenderState(s); 276 277 // Use functional rather than timing accesses during warmup 278 if (RubySystem::getWarmupEnabled()) { 279 memoryPort.sendFunctional(pkt); 280 recvTimingResp(pkt); 281 return; 282 } 283 284 // Create a block and copy data from the block. 285 memoryPort.schedTimingReq(pkt, clockEdge(latency)); 286} 287 288void 289AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr, 290 Cycles latency, 291 const DataBlock &block, int size) 292{ 293 RequestPtr req = std::make_shared<Request>(addr, size, 0, m_masterId); 294 295 PacketPtr pkt = Packet::createWrite(req); 296 pkt->allocate(); 297 pkt->setData(block.getData(getOffset(addr), size)); 298 299 SenderState *s = new SenderState(id); 300 pkt->pushSenderState(s); 301 302 // Create a block and copy data from the block. 303 memoryPort.schedTimingReq(pkt, clockEdge(latency)); 304} 305 306void 307AbstractController::functionalMemoryRead(PacketPtr pkt) 308{ 309 memoryPort.sendFunctional(pkt); 310} 311 312int 313AbstractController::functionalMemoryWrite(PacketPtr pkt) 314{ 315 int num_functional_writes = 0; 316 317 // Check the buffer from the controller to the memory. 318 if (memoryPort.trySatisfyFunctional(pkt)) { 319 num_functional_writes++; 320 } 321 322 // Update memory itself. 323 memoryPort.sendFunctional(pkt); 324 return num_functional_writes + 1; 325} 326 327void 328AbstractController::recvTimingResp(PacketPtr pkt) 329{ 330 assert(getMemoryQueue()); 331 assert(pkt->isResponse()); 332 333 std::shared_ptr<MemoryMsg> msg = std::make_shared<MemoryMsg>(clockEdge()); 334 (*msg).m_addr = pkt->getAddr(); 335 (*msg).m_Sender = m_machineID; 336 337 SenderState *s = dynamic_cast<SenderState *>(pkt->senderState); 338 (*msg).m_OriginalRequestorMachId = s->id; 339 delete s; 340 341 if (pkt->isRead()) { 342 (*msg).m_Type = MemoryRequestType_MEMORY_READ; 343 (*msg).m_MessageSize = MessageSizeType_Response_Data; 344 345 // Copy data from the packet 346 (*msg).m_DataBlk.setData(pkt->getPtr<uint8_t>(), 0, 347 RubySystem::getBlockSizeBytes()); 348 } else if (pkt->isWrite()) { 349 (*msg).m_Type = MemoryRequestType_MEMORY_WB; 350 (*msg).m_MessageSize = MessageSizeType_Writeback_Control; 351 } else { 352 panic("Incorrect packet type received from memory controller!"); 353 } 354 355 getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1))); 356 delete pkt; 357} 358 359Tick 360AbstractController::recvAtomic(PacketPtr pkt) 361{ 362 return ticksToCycles(memoryPort.sendAtomic(pkt)); 363} 364 365MachineID 366AbstractController::mapAddressToMachine(Addr addr, MachineType mtype) const 367{ 368 NodeID node = m_net_ptr->addressToNodeID(addr, mtype); 369 MachineID mach = {mtype, node}; 370 return mach; 371} 372 373bool 374AbstractController::MemoryPort::recvTimingResp(PacketPtr pkt) 375{ 376 controller->recvTimingResp(pkt); 377 return true; 378} 379 380AbstractController::MemoryPort::MemoryPort(const std::string &_name, 381 AbstractController *_controller, 382 const std::string &_label) 383 : QueuedMasterPort(_name, _controller, reqQueue, snoopRespQueue), 384 reqQueue(*_controller, *this, _label), 385 snoopRespQueue(*_controller, *this, false, _label), 386 controller(_controller) 387{ 388} 389