coherent_xbar.cc revision 12778:ca8c50112a66
1/* 2 * Copyright (c) 2011-2017 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2006 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Andreas Hansson 42 * William Wang 43 * Nikos Nikoleris 44 */ 45 46/** 47 * @file 48 * Definition of a crossbar object. 49 */ 50 51#include "mem/coherent_xbar.hh" 52 53#include "base/logging.hh" 54#include "base/trace.hh" 55#include "debug/AddrRanges.hh" 56#include "debug/CoherentXBar.hh" 57#include "sim/system.hh" 58 59CoherentXBar::CoherentXBar(const CoherentXBarParams *p) 60 : BaseXBar(p), system(p->system), snoopFilter(p->snoop_filter), 61 snoopResponseLatency(p->snoop_response_latency), 62 pointOfCoherency(p->point_of_coherency), 63 pointOfUnification(p->point_of_unification) 64{ 65 // create the ports based on the size of the master and slave 66 // vector ports, and the presence of the default port, the ports 67 // are enumerated starting from zero 68 for (int i = 0; i < p->port_master_connection_count; ++i) { 69 std::string portName = csprintf("%s.master[%d]", name(), i); 70 MasterPort* bp = new CoherentXBarMasterPort(portName, *this, i); 71 masterPorts.push_back(bp); 72 reqLayers.push_back(new ReqLayer(*bp, *this, 73 csprintf(".reqLayer%d", i))); 74 snoopLayers.push_back(new SnoopRespLayer(*bp, *this, 75 csprintf(".snoopLayer%d", i))); 76 } 77 78 // see if we have a default slave device connected and if so add 79 // our corresponding master port 80 if (p->port_default_connection_count) { 81 defaultPortID = masterPorts.size(); 82 std::string portName = name() + ".default"; 83 MasterPort* bp = new CoherentXBarMasterPort(portName, *this, 84 defaultPortID); 85 masterPorts.push_back(bp); 86 reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d", 87 defaultPortID))); 88 snoopLayers.push_back(new SnoopRespLayer(*bp, *this, 89 csprintf(".snoopLayer%d", 90 defaultPortID))); 91 } 92 93 // create the slave ports, once again starting at zero 94 for (int i = 0; i < p->port_slave_connection_count; ++i) { 95 std::string portName = csprintf("%s.slave[%d]", name(), i); 96 QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i); 97 slavePorts.push_back(bp); 98 respLayers.push_back(new RespLayer(*bp, *this, 99 csprintf(".respLayer%d", i))); 100 snoopRespPorts.push_back(new SnoopRespPort(*bp, *this)); 101 } 102} 103 104CoherentXBar::~CoherentXBar() 105{ 106 for (auto l: reqLayers) 107 delete l; 108 for (auto l: respLayers) 109 delete l; 110 for (auto l: snoopLayers) 111 delete l; 112 for (auto p: snoopRespPorts) 113 delete p; 114} 115 116void 117CoherentXBar::init() 118{ 119 BaseXBar::init(); 120 121 // iterate over our slave ports and determine which of our 122 // neighbouring master ports are snooping and add them as snoopers 123 for (const auto& p: slavePorts) { 124 // check if the connected master port is snooping 125 if (p->isSnooping()) { 126 DPRINTF(AddrRanges, "Adding snooping master %s\n", 127 p->getMasterPort().name()); 128 snoopPorts.push_back(p); 129 } 130 } 131 132 if (snoopPorts.empty()) 133 warn("CoherentXBar %s has no snooping ports attached!\n", name()); 134 135 // inform the snoop filter about the slave ports so it can create 136 // its own internal representation 137 if (snoopFilter) 138 snoopFilter->setSlavePorts(slavePorts); 139} 140 141bool 142CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) 143{ 144 // determine the source port based on the id 145 SlavePort *src_port = slavePorts[slave_port_id]; 146 147 // remember if the packet is an express snoop 148 bool is_express_snoop = pkt->isExpressSnoop(); 149 bool cache_responding = pkt->cacheResponding(); 150 // for normal requests, going downstream, the express snoop flag 151 // and the cache responding flag should always be the same 152 assert(is_express_snoop == cache_responding); 153 154 // determine the destination based on the address 155 PortID master_port_id = findPort(pkt->getAddr()); 156 157 // test if the crossbar should be considered occupied for the current 158 // port, and exclude express snoops from the check 159 if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) { 160 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__, 161 src_port->name(), pkt->print()); 162 return false; 163 } 164 165 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 166 src_port->name(), pkt->print()); 167 168 // store size and command as they might be modified when 169 // forwarding the packet 170 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; 171 unsigned int pkt_cmd = pkt->cmdToIndex(); 172 173 // store the old header delay so we can restore it if needed 174 Tick old_header_delay = pkt->headerDelay; 175 176 // a request sees the frontend and forward latency 177 Tick xbar_delay = (frontendLatency + forwardLatency) * clockPeriod(); 178 179 // set the packet header and payload delay 180 calcPacketTiming(pkt, xbar_delay); 181 182 // determine how long to be crossbar layer is busy 183 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay; 184 185 // is this the destination point for this packet? (e.g. true if 186 // this xbar is the PoC for a cache maintenance operation to the 187 // PoC) otherwise the destination is any cache that can satisfy 188 // the request 189 const bool is_destination = isDestination(pkt); 190 191 const bool snoop_caches = !system->bypassCaches() && 192 pkt->cmd != MemCmd::WriteClean; 193 if (snoop_caches) { 194 assert(pkt->snoopDelay == 0); 195 196 if (pkt->isClean() && !is_destination) { 197 // before snooping we need to make sure that the memory 198 // below is not busy and the cache clean request can be 199 // forwarded to it 200 if (!masterPorts[master_port_id]->tryTiming(pkt)) { 201 DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__, 202 src_port->name(), pkt->print()); 203 204 // update the layer state and schedule an idle event 205 reqLayers[master_port_id]->failedTiming(src_port, 206 clockEdge(Cycles(1))); 207 return false; 208 } 209 } 210 211 212 // the packet is a memory-mapped request and should be 213 // broadcasted to our snoopers but the source 214 if (snoopFilter) { 215 // check with the snoop filter where to forward this packet 216 auto sf_res = snoopFilter->lookupRequest(pkt, *src_port); 217 // the time required by a packet to be delivered through 218 // the xbar has to be charged also with to lookup latency 219 // of the snoop filter 220 pkt->headerDelay += sf_res.second * clockPeriod(); 221 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n", 222 __func__, src_port->name(), pkt->print(), 223 sf_res.first.size(), sf_res.second); 224 225 if (pkt->isEviction()) { 226 // for block-evicting packets, i.e. writebacks and 227 // clean evictions, there is no need to snoop up, as 228 // all we do is determine if the block is cached or 229 // not, instead just set it here based on the snoop 230 // filter result 231 if (!sf_res.first.empty()) 232 pkt->setBlockCached(); 233 } else { 234 forwardTiming(pkt, slave_port_id, sf_res.first); 235 } 236 } else { 237 forwardTiming(pkt, slave_port_id); 238 } 239 240 // add the snoop delay to our header delay, and then reset it 241 pkt->headerDelay += pkt->snoopDelay; 242 pkt->snoopDelay = 0; 243 } 244 245 // set up a sensible starting point 246 bool success = true; 247 248 // remember if the packet will generate a snoop response by 249 // checking if a cache set the cacheResponding flag during the 250 // snooping above 251 const bool expect_snoop_resp = !cache_responding && pkt->cacheResponding(); 252 bool expect_response = pkt->needsResponse() && !pkt->cacheResponding(); 253 254 const bool sink_packet = sinkPacket(pkt); 255 256 // in certain cases the crossbar is responsible for responding 257 bool respond_directly = false; 258 // store the original address as an address mapper could possibly 259 // modify the address upon a sendTimingRequest 260 const Addr addr(pkt->getAddr()); 261 if (sink_packet) { 262 DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__, 263 pkt->print()); 264 } else { 265 // determine if we are forwarding the packet, or responding to 266 // it 267 if (forwardPacket(pkt)) { 268 // if we are passing on, rather than sinking, a packet to 269 // which an upstream cache has committed to responding, 270 // the line was needs writable, and the responding only 271 // had an Owned copy, so we need to immidiately let the 272 // downstream caches know, bypass any flow control 273 if (pkt->cacheResponding()) { 274 pkt->setExpressSnoop(); 275 } 276 277 // make sure that the write request (e.g., WriteClean) 278 // will stop at the memory below if this crossbar is its 279 // destination 280 if (pkt->isWrite() && is_destination) { 281 pkt->clearWriteThrough(); 282 } 283 284 // since it is a normal request, attempt to send the packet 285 success = masterPorts[master_port_id]->sendTimingReq(pkt); 286 } else { 287 // no need to forward, turn this packet around and respond 288 // directly 289 assert(pkt->needsResponse()); 290 291 respond_directly = true; 292 assert(!expect_snoop_resp); 293 expect_response = false; 294 } 295 } 296 297 if (snoopFilter && snoop_caches) { 298 // Let the snoop filter know about the success of the send operation 299 snoopFilter->finishRequest(!success, addr, pkt->isSecure()); 300 } 301 302 // check if we were successful in sending the packet onwards 303 if (!success) { 304 // express snoops should never be forced to retry 305 assert(!is_express_snoop); 306 307 // restore the header delay 308 pkt->headerDelay = old_header_delay; 309 310 DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__, 311 src_port->name(), pkt->print()); 312 313 // update the layer state and schedule an idle event 314 reqLayers[master_port_id]->failedTiming(src_port, 315 clockEdge(Cycles(1))); 316 } else { 317 // express snoops currently bypass the crossbar state entirely 318 if (!is_express_snoop) { 319 // if this particular request will generate a snoop 320 // response 321 if (expect_snoop_resp) { 322 // we should never have an exsiting request outstanding 323 assert(outstandingSnoop.find(pkt->req) == 324 outstandingSnoop.end()); 325 outstandingSnoop.insert(pkt->req); 326 327 // basic sanity check on the outstanding snoops 328 panic_if(outstandingSnoop.size() > 512, 329 "Outstanding snoop requests exceeded 512\n"); 330 } 331 332 // remember where to route the normal response to 333 if (expect_response || expect_snoop_resp) { 334 assert(routeTo.find(pkt->req) == routeTo.end()); 335 routeTo[pkt->req] = slave_port_id; 336 337 panic_if(routeTo.size() > 512, 338 "Routing table exceeds 512 packets\n"); 339 } 340 341 // update the layer state and schedule an idle event 342 reqLayers[master_port_id]->succeededTiming(packetFinishTime); 343 } 344 345 // stats updates only consider packets that were successfully sent 346 pktCount[slave_port_id][master_port_id]++; 347 pktSize[slave_port_id][master_port_id] += pkt_size; 348 transDist[pkt_cmd]++; 349 350 if (is_express_snoop) { 351 snoops++; 352 snoopTraffic += pkt_size; 353 } 354 } 355 356 if (sink_packet) 357 // queue the packet for deletion 358 pendingDelete.reset(pkt); 359 360 // normally we respond to the packet we just received if we need to 361 PacketPtr rsp_pkt = pkt; 362 PortID rsp_port_id = slave_port_id; 363 364 // If this is the destination of the cache clean operation the 365 // crossbar is responsible for responding. This crossbar will 366 // respond when the cache clean is complete. A cache clean 367 // is complete either: 368 // * direcly, if no cache above had a dirty copy of the block 369 // as indicated by the satisfied flag of the packet, or 370 // * when the crossbar has seen both the cache clean request 371 // (CleanSharedReq, CleanInvalidReq) and the corresponding 372 // write (WriteClean) which updates the block in the memory 373 // below. 374 if (success && 375 ((pkt->isClean() && pkt->satisfied()) || 376 pkt->cmd == MemCmd::WriteClean) && 377 is_destination) { 378 PacketPtr deferred_rsp = pkt->isWrite() ? nullptr : pkt; 379 auto cmo_lookup = outstandingCMO.find(pkt->id); 380 if (cmo_lookup != outstandingCMO.end()) { 381 // the cache clean request has already reached this xbar 382 respond_directly = true; 383 if (pkt->isWrite()) { 384 rsp_pkt = cmo_lookup->second; 385 assert(rsp_pkt); 386 387 // determine the destination 388 const auto route_lookup = routeTo.find(rsp_pkt->req); 389 assert(route_lookup != routeTo.end()); 390 rsp_port_id = route_lookup->second; 391 assert(rsp_port_id != InvalidPortID); 392 assert(rsp_port_id < respLayers.size()); 393 // remove the request from the routing table 394 routeTo.erase(route_lookup); 395 } 396 outstandingCMO.erase(cmo_lookup); 397 } else { 398 respond_directly = false; 399 outstandingCMO.emplace(pkt->id, deferred_rsp); 400 if (!pkt->isWrite()) { 401 assert(routeTo.find(pkt->req) == routeTo.end()); 402 routeTo[pkt->req] = slave_port_id; 403 404 panic_if(routeTo.size() > 512, 405 "Routing table exceeds 512 packets\n"); 406 } 407 } 408 } 409 410 411 if (respond_directly) { 412 assert(rsp_pkt->needsResponse()); 413 assert(success); 414 415 rsp_pkt->makeResponse(); 416 417 if (snoopFilter && !system->bypassCaches()) { 418 // let the snoop filter inspect the response and update its state 419 snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]); 420 } 421 422 // we send the response after the current packet, even if the 423 // response is not for this packet (e.g. cache clean operation 424 // where both the request and the write packet have to cross 425 // the destination xbar before the response is sent.) 426 Tick response_time = clockEdge() + pkt->headerDelay; 427 rsp_pkt->headerDelay = 0; 428 429 slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time); 430 } 431 432 return success; 433} 434 435bool 436CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id) 437{ 438 // determine the source port based on the id 439 MasterPort *src_port = masterPorts[master_port_id]; 440 441 // determine the destination 442 const auto route_lookup = routeTo.find(pkt->req); 443 assert(route_lookup != routeTo.end()); 444 const PortID slave_port_id = route_lookup->second; 445 assert(slave_port_id != InvalidPortID); 446 assert(slave_port_id < respLayers.size()); 447 448 // test if the crossbar should be considered occupied for the 449 // current port 450 if (!respLayers[slave_port_id]->tryTiming(src_port)) { 451 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__, 452 src_port->name(), pkt->print()); 453 return false; 454 } 455 456 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 457 src_port->name(), pkt->print()); 458 459 // store size and command as they might be modified when 460 // forwarding the packet 461 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; 462 unsigned int pkt_cmd = pkt->cmdToIndex(); 463 464 // a response sees the response latency 465 Tick xbar_delay = responseLatency * clockPeriod(); 466 467 // set the packet header and payload delay 468 calcPacketTiming(pkt, xbar_delay); 469 470 // determine how long to be crossbar layer is busy 471 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay; 472 473 if (snoopFilter && !system->bypassCaches()) { 474 // let the snoop filter inspect the response and update its state 475 snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]); 476 } 477 478 // send the packet through the destination slave port and pay for 479 // any outstanding header delay 480 Tick latency = pkt->headerDelay; 481 pkt->headerDelay = 0; 482 slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency); 483 484 // remove the request from the routing table 485 routeTo.erase(route_lookup); 486 487 respLayers[slave_port_id]->succeededTiming(packetFinishTime); 488 489 // stats updates 490 pktCount[slave_port_id][master_port_id]++; 491 pktSize[slave_port_id][master_port_id] += pkt_size; 492 transDist[pkt_cmd]++; 493 494 return true; 495} 496 497void 498CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id) 499{ 500 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 501 masterPorts[master_port_id]->name(), pkt->print()); 502 503 // update stats here as we know the forwarding will succeed 504 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; 505 transDist[pkt->cmdToIndex()]++; 506 snoops++; 507 snoopTraffic += pkt_size; 508 509 // we should only see express snoops from caches 510 assert(pkt->isExpressSnoop()); 511 512 // set the packet header and payload delay, for now use forward latency 513 // @todo Assess the choice of latency further 514 calcPacketTiming(pkt, forwardLatency * clockPeriod()); 515 516 // remember if a cache has already committed to responding so we 517 // can see if it changes during the snooping 518 const bool cache_responding = pkt->cacheResponding(); 519 520 assert(pkt->snoopDelay == 0); 521 522 if (snoopFilter) { 523 // let the Snoop Filter work its magic and guide probing 524 auto sf_res = snoopFilter->lookupSnoop(pkt); 525 // the time required by a packet to be delivered through 526 // the xbar has to be charged also with to lookup latency 527 // of the snoop filter 528 pkt->headerDelay += sf_res.second * clockPeriod(); 529 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n", 530 __func__, masterPorts[master_port_id]->name(), pkt->print(), 531 sf_res.first.size(), sf_res.second); 532 533 // forward to all snoopers 534 forwardTiming(pkt, InvalidPortID, sf_res.first); 535 } else { 536 forwardTiming(pkt, InvalidPortID); 537 } 538 539 // add the snoop delay to our header delay, and then reset it 540 pkt->headerDelay += pkt->snoopDelay; 541 pkt->snoopDelay = 0; 542 543 // if we can expect a response, remember how to route it 544 if (!cache_responding && pkt->cacheResponding()) { 545 assert(routeTo.find(pkt->req) == routeTo.end()); 546 routeTo[pkt->req] = master_port_id; 547 } 548 549 // a snoop request came from a connected slave device (one of 550 // our master ports), and if it is not coming from the slave 551 // device responsible for the address range something is 552 // wrong, hence there is nothing further to do as the packet 553 // would be going back to where it came from 554 assert(master_port_id == findPort(pkt->getAddr())); 555} 556 557bool 558CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id) 559{ 560 // determine the source port based on the id 561 SlavePort* src_port = slavePorts[slave_port_id]; 562 563 // get the destination 564 const auto route_lookup = routeTo.find(pkt->req); 565 assert(route_lookup != routeTo.end()); 566 const PortID dest_port_id = route_lookup->second; 567 assert(dest_port_id != InvalidPortID); 568 569 // determine if the response is from a snoop request we 570 // created as the result of a normal request (in which case it 571 // should be in the outstandingSnoop), or if we merely forwarded 572 // someone else's snoop request 573 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) == 574 outstandingSnoop.end(); 575 576 // test if the crossbar should be considered occupied for the 577 // current port, note that the check is bypassed if the response 578 // is being passed on as a normal response since this is occupying 579 // the response layer rather than the snoop response layer 580 if (forwardAsSnoop) { 581 assert(dest_port_id < snoopLayers.size()); 582 if (!snoopLayers[dest_port_id]->tryTiming(src_port)) { 583 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__, 584 src_port->name(), pkt->print()); 585 return false; 586 } 587 } else { 588 // get the master port that mirrors this slave port internally 589 MasterPort* snoop_port = snoopRespPorts[slave_port_id]; 590 assert(dest_port_id < respLayers.size()); 591 if (!respLayers[dest_port_id]->tryTiming(snoop_port)) { 592 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__, 593 snoop_port->name(), pkt->print()); 594 return false; 595 } 596 } 597 598 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 599 src_port->name(), pkt->print()); 600 601 // store size and command as they might be modified when 602 // forwarding the packet 603 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; 604 unsigned int pkt_cmd = pkt->cmdToIndex(); 605 606 // responses are never express snoops 607 assert(!pkt->isExpressSnoop()); 608 609 // a snoop response sees the snoop response latency, and if it is 610 // forwarded as a normal response, the response latency 611 Tick xbar_delay = 612 (forwardAsSnoop ? snoopResponseLatency : responseLatency) * 613 clockPeriod(); 614 615 // set the packet header and payload delay 616 calcPacketTiming(pkt, xbar_delay); 617 618 // determine how long to be crossbar layer is busy 619 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay; 620 621 // forward it either as a snoop response or a normal response 622 if (forwardAsSnoop) { 623 // this is a snoop response to a snoop request we forwarded, 624 // e.g. coming from the L1 and going to the L2, and it should 625 // be forwarded as a snoop response 626 627 if (snoopFilter) { 628 // update the probe filter so that it can properly track the line 629 snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id], 630 *masterPorts[dest_port_id]); 631 } 632 633 bool success M5_VAR_USED = 634 masterPorts[dest_port_id]->sendTimingSnoopResp(pkt); 635 pktCount[slave_port_id][dest_port_id]++; 636 pktSize[slave_port_id][dest_port_id] += pkt_size; 637 assert(success); 638 639 snoopLayers[dest_port_id]->succeededTiming(packetFinishTime); 640 } else { 641 // we got a snoop response on one of our slave ports, 642 // i.e. from a coherent master connected to the crossbar, and 643 // since we created the snoop request as part of recvTiming, 644 // this should now be a normal response again 645 outstandingSnoop.erase(pkt->req); 646 647 // this is a snoop response from a coherent master, hence it 648 // should never go back to where the snoop response came from, 649 // but instead to where the original request came from 650 assert(slave_port_id != dest_port_id); 651 652 if (snoopFilter) { 653 // update the probe filter so that it can properly track the line 654 snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id], 655 *slavePorts[dest_port_id]); 656 } 657 658 DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__, 659 src_port->name(), pkt->print()); 660 661 // as a normal response, it should go back to a master through 662 // one of our slave ports, we also pay for any outstanding 663 // header latency 664 Tick latency = pkt->headerDelay; 665 pkt->headerDelay = 0; 666 slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency); 667 668 respLayers[dest_port_id]->succeededTiming(packetFinishTime); 669 } 670 671 // remove the request from the routing table 672 routeTo.erase(route_lookup); 673 674 // stats updates 675 transDist[pkt_cmd]++; 676 snoops++; 677 snoopTraffic += pkt_size; 678 679 return true; 680} 681 682 683void 684CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id, 685 const std::vector<QueuedSlavePort*>& dests) 686{ 687 DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print()); 688 689 // snoops should only happen if the system isn't bypassing caches 690 assert(!system->bypassCaches()); 691 692 unsigned fanout = 0; 693 694 for (const auto& p: dests) { 695 // we could have gotten this request from a snooping master 696 // (corresponding to our own slave port that is also in 697 // snoopPorts) and should not send it back to where it came 698 // from 699 if (exclude_slave_port_id == InvalidPortID || 700 p->getId() != exclude_slave_port_id) { 701 // cache is not allowed to refuse snoop 702 p->sendTimingSnoopReq(pkt); 703 fanout++; 704 } 705 } 706 707 // Stats for fanout of this forward operation 708 snoopFanout.sample(fanout); 709} 710 711void 712CoherentXBar::recvReqRetry(PortID master_port_id) 713{ 714 // responses and snoop responses never block on forwarding them, 715 // so the retry will always be coming from a port to which we 716 // tried to forward a request 717 reqLayers[master_port_id]->recvRetry(); 718} 719 720Tick 721CoherentXBar::recvAtomic(PacketPtr pkt, PortID slave_port_id) 722{ 723 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 724 slavePorts[slave_port_id]->name(), pkt->print()); 725 726 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; 727 unsigned int pkt_cmd = pkt->cmdToIndex(); 728 729 MemCmd snoop_response_cmd = MemCmd::InvalidCmd; 730 Tick snoop_response_latency = 0; 731 732 // is this the destination point for this packet? (e.g. true if 733 // this xbar is the PoC for a cache maintenance operation to the 734 // PoC) otherwise the destination is any cache that can satisfy 735 // the request 736 const bool is_destination = isDestination(pkt); 737 738 const bool snoop_caches = !system->bypassCaches() && 739 pkt->cmd != MemCmd::WriteClean; 740 if (snoop_caches) { 741 // forward to all snoopers but the source 742 std::pair<MemCmd, Tick> snoop_result; 743 if (snoopFilter) { 744 // check with the snoop filter where to forward this packet 745 auto sf_res = 746 snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]); 747 snoop_response_latency += sf_res.second * clockPeriod(); 748 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n", 749 __func__, slavePorts[slave_port_id]->name(), pkt->print(), 750 sf_res.first.size(), sf_res.second); 751 752 // let the snoop filter know about the success of the send 753 // operation, and do it even before sending it onwards to 754 // avoid situations where atomic upward snoops sneak in 755 // between and change the filter state 756 snoopFilter->finishRequest(false, pkt->getAddr(), pkt->isSecure()); 757 758 if (pkt->isEviction()) { 759 // for block-evicting packets, i.e. writebacks and 760 // clean evictions, there is no need to snoop up, as 761 // all we do is determine if the block is cached or 762 // not, instead just set it here based on the snoop 763 // filter result 764 if (!sf_res.first.empty()) 765 pkt->setBlockCached(); 766 } else { 767 snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID, 768 sf_res.first); 769 } 770 } else { 771 snoop_result = forwardAtomic(pkt, slave_port_id); 772 } 773 snoop_response_cmd = snoop_result.first; 774 snoop_response_latency += snoop_result.second; 775 } 776 777 // set up a sensible default value 778 Tick response_latency = 0; 779 780 const bool sink_packet = sinkPacket(pkt); 781 782 // even if we had a snoop response, we must continue and also 783 // perform the actual request at the destination 784 PortID master_port_id = findPort(pkt->getAddr()); 785 786 if (sink_packet) { 787 DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__, 788 pkt->print()); 789 } else { 790 if (forwardPacket(pkt)) { 791 // make sure that the write request (e.g., WriteClean) 792 // will stop at the memory below if this crossbar is its 793 // destination 794 if (pkt->isWrite() && is_destination) { 795 pkt->clearWriteThrough(); 796 } 797 798 // forward the request to the appropriate destination 799 response_latency = masterPorts[master_port_id]->sendAtomic(pkt); 800 } else { 801 // if it does not need a response we sink the packet above 802 assert(pkt->needsResponse()); 803 804 pkt->makeResponse(); 805 } 806 } 807 808 // stats updates for the request 809 pktCount[slave_port_id][master_port_id]++; 810 pktSize[slave_port_id][master_port_id] += pkt_size; 811 transDist[pkt_cmd]++; 812 813 814 // if lower levels have replied, tell the snoop filter 815 if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) { 816 snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]); 817 } 818 819 // if we got a response from a snooper, restore it here 820 if (snoop_response_cmd != MemCmd::InvalidCmd) { 821 // no one else should have responded 822 assert(!pkt->isResponse()); 823 pkt->cmd = snoop_response_cmd; 824 response_latency = snoop_response_latency; 825 } 826 827 // If this is the destination of the cache clean operation the 828 // crossbar is responsible for responding. This crossbar will 829 // respond when the cache clean is complete. An atomic cache clean 830 // is complete when the crossbars receives the cache clean 831 // request (CleanSharedReq, CleanInvalidReq), as either: 832 // * no cache above had a dirty copy of the block as indicated by 833 // the satisfied flag of the packet, or 834 // * the crossbar has already seen the corresponding write 835 // (WriteClean) which updates the block in the memory below. 836 if (pkt->isClean() && isDestination(pkt) && pkt->satisfied()) { 837 auto it = outstandingCMO.find(pkt->id); 838 assert(it != outstandingCMO.end()); 839 // we are responding right away 840 outstandingCMO.erase(it); 841 } else if (pkt->cmd == MemCmd::WriteClean && isDestination(pkt)) { 842 // if this is the destination of the operation, the xbar 843 // sends the responce to the cache clean operation only 844 // after having encountered the cache clean request 845 auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr); 846 // in atomic mode we know that the WriteClean packet should 847 // precede the clean request 848 assert(ret.second); 849 } 850 851 // add the response data 852 if (pkt->isResponse()) { 853 pkt_size = pkt->hasData() ? pkt->getSize() : 0; 854 pkt_cmd = pkt->cmdToIndex(); 855 856 // stats updates 857 pktCount[slave_port_id][master_port_id]++; 858 pktSize[slave_port_id][master_port_id] += pkt_size; 859 transDist[pkt_cmd]++; 860 } 861 862 // @todo: Not setting header time 863 pkt->payloadDelay = response_latency; 864 return response_latency; 865} 866 867Tick 868CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id) 869{ 870 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 871 masterPorts[master_port_id]->name(), pkt->print()); 872 873 // add the request snoop data 874 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; 875 snoops++; 876 snoopTraffic += pkt_size; 877 878 // forward to all snoopers 879 std::pair<MemCmd, Tick> snoop_result; 880 Tick snoop_response_latency = 0; 881 if (snoopFilter) { 882 auto sf_res = snoopFilter->lookupSnoop(pkt); 883 snoop_response_latency += sf_res.second * clockPeriod(); 884 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n", 885 __func__, masterPorts[master_port_id]->name(), pkt->print(), 886 sf_res.first.size(), sf_res.second); 887 snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id, 888 sf_res.first); 889 } else { 890 snoop_result = forwardAtomic(pkt, InvalidPortID); 891 } 892 MemCmd snoop_response_cmd = snoop_result.first; 893 snoop_response_latency += snoop_result.second; 894 895 if (snoop_response_cmd != MemCmd::InvalidCmd) 896 pkt->cmd = snoop_response_cmd; 897 898 // add the response snoop data 899 if (pkt->isResponse()) { 900 snoops++; 901 } 902 903 // @todo: Not setting header time 904 pkt->payloadDelay = snoop_response_latency; 905 return snoop_response_latency; 906} 907 908std::pair<MemCmd, Tick> 909CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id, 910 PortID source_master_port_id, 911 const std::vector<QueuedSlavePort*>& dests) 912{ 913 // the packet may be changed on snoops, record the original 914 // command to enable us to restore it between snoops so that 915 // additional snoops can take place properly 916 MemCmd orig_cmd = pkt->cmd; 917 MemCmd snoop_response_cmd = MemCmd::InvalidCmd; 918 Tick snoop_response_latency = 0; 919 920 // snoops should only happen if the system isn't bypassing caches 921 assert(!system->bypassCaches()); 922 923 unsigned fanout = 0; 924 925 for (const auto& p: dests) { 926 // we could have gotten this request from a snooping master 927 // (corresponding to our own slave port that is also in 928 // snoopPorts) and should not send it back to where it came 929 // from 930 if (exclude_slave_port_id != InvalidPortID && 931 p->getId() == exclude_slave_port_id) 932 continue; 933 934 Tick latency = p->sendAtomicSnoop(pkt); 935 fanout++; 936 937 // in contrast to a functional access, we have to keep on 938 // going as all snoopers must be updated even if we get a 939 // response 940 if (!pkt->isResponse()) 941 continue; 942 943 // response from snoop agent 944 assert(pkt->cmd != orig_cmd); 945 assert(pkt->cacheResponding()); 946 // should only happen once 947 assert(snoop_response_cmd == MemCmd::InvalidCmd); 948 // save response state 949 snoop_response_cmd = pkt->cmd; 950 snoop_response_latency = latency; 951 952 if (snoopFilter) { 953 // Handle responses by the snoopers and differentiate between 954 // responses to requests from above and snoops from below 955 if (source_master_port_id != InvalidPortID) { 956 // Getting a response for a snoop from below 957 assert(exclude_slave_port_id == InvalidPortID); 958 snoopFilter->updateSnoopForward(pkt, *p, 959 *masterPorts[source_master_port_id]); 960 } else { 961 // Getting a response for a request from above 962 assert(source_master_port_id == InvalidPortID); 963 snoopFilter->updateSnoopResponse(pkt, *p, 964 *slavePorts[exclude_slave_port_id]); 965 } 966 } 967 // restore original packet state for remaining snoopers 968 pkt->cmd = orig_cmd; 969 } 970 971 // Stats for fanout 972 snoopFanout.sample(fanout); 973 974 // the packet is restored as part of the loop and any potential 975 // snoop response is part of the returned pair 976 return std::make_pair(snoop_response_cmd, snoop_response_latency); 977} 978 979void 980CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id) 981{ 982 if (!pkt->isPrint()) { 983 // don't do DPRINTFs on PrintReq as it clutters up the output 984 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 985 slavePorts[slave_port_id]->name(), pkt->print()); 986 } 987 988 if (!system->bypassCaches()) { 989 // forward to all snoopers but the source 990 forwardFunctional(pkt, slave_port_id); 991 } 992 993 // there is no need to continue if the snooping has found what we 994 // were looking for and the packet is already a response 995 if (!pkt->isResponse()) { 996 // since our slave ports are queued ports we need to check them as well 997 for (const auto& p : slavePorts) { 998 // if we find a response that has the data, then the 999 // downstream caches/memories may be out of date, so simply stop 1000 // here 1001 if (p->checkFunctional(pkt)) { 1002 if (pkt->needsResponse()) 1003 pkt->makeResponse(); 1004 return; 1005 } 1006 } 1007 1008 PortID dest_id = findPort(pkt->getAddr()); 1009 1010 masterPorts[dest_id]->sendFunctional(pkt); 1011 } 1012} 1013 1014void 1015CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id) 1016{ 1017 if (!pkt->isPrint()) { 1018 // don't do DPRINTFs on PrintReq as it clutters up the output 1019 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, 1020 masterPorts[master_port_id]->name(), pkt->print()); 1021 } 1022 1023 for (const auto& p : slavePorts) { 1024 if (p->checkFunctional(pkt)) { 1025 if (pkt->needsResponse()) 1026 pkt->makeResponse(); 1027 return; 1028 } 1029 } 1030 1031 // forward to all snoopers 1032 forwardFunctional(pkt, InvalidPortID); 1033} 1034 1035void 1036CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id) 1037{ 1038 // snoops should only happen if the system isn't bypassing caches 1039 assert(!system->bypassCaches()); 1040 1041 for (const auto& p: snoopPorts) { 1042 // we could have gotten this request from a snooping master 1043 // (corresponding to our own slave port that is also in 1044 // snoopPorts) and should not send it back to where it came 1045 // from 1046 if (exclude_slave_port_id == InvalidPortID || 1047 p->getId() != exclude_slave_port_id) 1048 p->sendFunctionalSnoop(pkt); 1049 1050 // if we get a response we are done 1051 if (pkt->isResponse()) { 1052 break; 1053 } 1054 } 1055} 1056 1057bool 1058CoherentXBar::sinkPacket(const PacketPtr pkt) const 1059{ 1060 // we can sink the packet if: 1061 // 1) the crossbar is the point of coherency, and a cache is 1062 // responding after being snooped 1063 // 2) the crossbar is the point of coherency, and the packet is a 1064 // coherency packet (not a read or a write) that does not 1065 // require a response 1066 // 3) this is a clean evict or clean writeback, but the packet is 1067 // found in a cache above this crossbar 1068 // 4) a cache is responding after being snooped, and the packet 1069 // either does not need the block to be writable, or the cache 1070 // that has promised to respond (setting the cache responding 1071 // flag) is providing writable and thus had a Modified block, 1072 // and no further action is needed 1073 return (pointOfCoherency && pkt->cacheResponding()) || 1074 (pointOfCoherency && !(pkt->isRead() || pkt->isWrite()) && 1075 !pkt->needsResponse()) || 1076 (pkt->isCleanEviction() && pkt->isBlockCached()) || 1077 (pkt->cacheResponding() && 1078 (!pkt->needsWritable() || pkt->responderHadWritable())); 1079} 1080 1081bool 1082CoherentXBar::forwardPacket(const PacketPtr pkt) 1083{ 1084 // we are forwarding the packet if: 1085 // 1) this is a cache clean request to the PoU/PoC and this 1086 // crossbar is above the PoU/PoC 1087 // 2) this is a read or a write 1088 // 3) this crossbar is above the point of coherency 1089 if (pkt->isClean()) { 1090 return !isDestination(pkt); 1091 } 1092 return pkt->isRead() || pkt->isWrite() || !pointOfCoherency; 1093} 1094 1095 1096void 1097CoherentXBar::regStats() 1098{ 1099 // register the stats of the base class and our layers 1100 BaseXBar::regStats(); 1101 for (auto l: reqLayers) 1102 l->regStats(); 1103 for (auto l: respLayers) 1104 l->regStats(); 1105 for (auto l: snoopLayers) 1106 l->regStats(); 1107 1108 snoops 1109 .name(name() + ".snoops") 1110 .desc("Total snoops (count)") 1111 ; 1112 1113 snoopTraffic 1114 .name(name() + ".snoopTraffic") 1115 .desc("Total snoop traffic (bytes)") 1116 ; 1117 1118 snoopFanout 1119 .init(0, snoopPorts.size(), 1) 1120 .name(name() + ".snoop_fanout") 1121 .desc("Request fanout histogram") 1122 ; 1123} 1124 1125CoherentXBar * 1126CoherentXBarParams::create() 1127{ 1128 return new CoherentXBar(this); 1129} 1130