coherent_xbar.cc (13808:0a44fbc3a853) coherent_xbar.cc (13847:c9b92a513019)
1/*
2 * Copyright (c) 2011-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Andreas Hansson
42 * William Wang
43 * Nikos Nikoleris
44 */
45
46/**
47 * @file
48 * Definition of a crossbar object.
49 */
50
51#include "mem/coherent_xbar.hh"
52
53#include "base/logging.hh"
54#include "base/trace.hh"
55#include "debug/AddrRanges.hh"
56#include "debug/CoherentXBar.hh"
57#include "sim/system.hh"
58
59CoherentXBar::CoherentXBar(const CoherentXBarParams *p)
60 : BaseXBar(p), system(p->system), snoopFilter(p->snoop_filter),
61 snoopResponseLatency(p->snoop_response_latency),
62 pointOfCoherency(p->point_of_coherency),
63 pointOfUnification(p->point_of_unification)
64{
65 // create the ports based on the size of the master and slave
66 // vector ports, and the presence of the default port, the ports
67 // are enumerated starting from zero
68 for (int i = 0; i < p->port_master_connection_count; ++i) {
69 std::string portName = csprintf("%s.master[%d]", name(), i);
70 MasterPort* bp = new CoherentXBarMasterPort(portName, *this, i);
71 masterPorts.push_back(bp);
72 reqLayers.push_back(new ReqLayer(*bp, *this,
73 csprintf(".reqLayer%d", i)));
74 snoopLayers.push_back(
75 new SnoopRespLayer(*bp, *this, csprintf(".snoopLayer%d", i)));
76 }
77
78 // see if we have a default slave device connected and if so add
79 // our corresponding master port
80 if (p->port_default_connection_count) {
81 defaultPortID = masterPorts.size();
82 std::string portName = name() + ".default";
83 MasterPort* bp = new CoherentXBarMasterPort(portName, *this,
84 defaultPortID);
85 masterPorts.push_back(bp);
86 reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d",
87 defaultPortID)));
88 snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
89 csprintf(".snoopLayer%d",
90 defaultPortID)));
91 }
92
93 // create the slave ports, once again starting at zero
94 for (int i = 0; i < p->port_slave_connection_count; ++i) {
95 std::string portName = csprintf("%s.slave[%d]", name(), i);
96 QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i);
97 slavePorts.push_back(bp);
98 respLayers.push_back(new RespLayer(*bp, *this,
99 csprintf(".respLayer%d", i)));
100 snoopRespPorts.push_back(new SnoopRespPort(*bp, *this));
101 }
102}
103
104CoherentXBar::~CoherentXBar()
105{
106 for (auto l: reqLayers)
107 delete l;
108 for (auto l: respLayers)
109 delete l;
110 for (auto l: snoopLayers)
111 delete l;
112 for (auto p: snoopRespPorts)
113 delete p;
114}
115
116void
117CoherentXBar::init()
118{
119 BaseXBar::init();
120
121 // iterate over our slave ports and determine which of our
122 // neighbouring master ports are snooping and add them as snoopers
123 for (const auto& p: slavePorts) {
124 // check if the connected master port is snooping
125 if (p->isSnooping()) {
126 DPRINTF(AddrRanges, "Adding snooping master %s\n",
127 p->getMasterPort().name());
128 snoopPorts.push_back(p);
129 }
130 }
131
132 if (snoopPorts.empty())
133 warn("CoherentXBar %s has no snooping ports attached!\n", name());
134
135 // inform the snoop filter about the slave ports so it can create
136 // its own internal representation
137 if (snoopFilter)
138 snoopFilter->setSlavePorts(slavePorts);
139}
140
141bool
142CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
143{
144 // determine the source port based on the id
145 SlavePort *src_port = slavePorts[slave_port_id];
146
147 // remember if the packet is an express snoop
148 bool is_express_snoop = pkt->isExpressSnoop();
149 bool cache_responding = pkt->cacheResponding();
150 // for normal requests, going downstream, the express snoop flag
151 // and the cache responding flag should always be the same
152 assert(is_express_snoop == cache_responding);
153
154 // determine the destination based on the destination address range
155 AddrRange addr_range = RangeSize(pkt->getAddr(), pkt->getSize());
156 PortID master_port_id = findPort(addr_range);
157
158 // test if the crossbar should be considered occupied for the current
159 // port, and exclude express snoops from the check
160 if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) {
161 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
162 src_port->name(), pkt->print());
163 return false;
164 }
165
166 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
167 src_port->name(), pkt->print());
168
169 // store size and command as they might be modified when
170 // forwarding the packet
171 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
172 unsigned int pkt_cmd = pkt->cmdToIndex();
173
174 // store the old header delay so we can restore it if needed
175 Tick old_header_delay = pkt->headerDelay;
176
177 // a request sees the frontend and forward latency
178 Tick xbar_delay = (frontendLatency + forwardLatency) * clockPeriod();
179
180 // set the packet header and payload delay
181 calcPacketTiming(pkt, xbar_delay);
182
183 // determine how long to be crossbar layer is busy
184 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
185
186 // is this the destination point for this packet? (e.g. true if
187 // this xbar is the PoC for a cache maintenance operation to the
188 // PoC) otherwise the destination is any cache that can satisfy
189 // the request
190 const bool is_destination = isDestination(pkt);
191
192 const bool snoop_caches = !system->bypassCaches() &&
193 pkt->cmd != MemCmd::WriteClean;
194 if (snoop_caches) {
195 assert(pkt->snoopDelay == 0);
196
197 if (pkt->isClean() && !is_destination) {
198 // before snooping we need to make sure that the memory
199 // below is not busy and the cache clean request can be
200 // forwarded to it
201 if (!masterPorts[master_port_id]->tryTiming(pkt)) {
202 DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
203 src_port->name(), pkt->print());
204
205 // update the layer state and schedule an idle event
206 reqLayers[master_port_id]->failedTiming(src_port,
207 clockEdge(Cycles(1)));
208 return false;
209 }
210 }
211
212
213 // the packet is a memory-mapped request and should be
214 // broadcasted to our snoopers but the source
215 if (snoopFilter) {
216 // check with the snoop filter where to forward this packet
217 auto sf_res = snoopFilter->lookupRequest(pkt, *src_port);
218 // the time required by a packet to be delivered through
219 // the xbar has to be charged also with to lookup latency
220 // of the snoop filter
221 pkt->headerDelay += sf_res.second * clockPeriod();
222 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
223 __func__, src_port->name(), pkt->print(),
224 sf_res.first.size(), sf_res.second);
225
226 if (pkt->isEviction()) {
227 // for block-evicting packets, i.e. writebacks and
228 // clean evictions, there is no need to snoop up, as
229 // all we do is determine if the block is cached or
230 // not, instead just set it here based on the snoop
231 // filter result
232 if (!sf_res.first.empty())
233 pkt->setBlockCached();
234 } else {
235 forwardTiming(pkt, slave_port_id, sf_res.first);
236 }
237 } else {
238 forwardTiming(pkt, slave_port_id);
239 }
240
241 // add the snoop delay to our header delay, and then reset it
242 pkt->headerDelay += pkt->snoopDelay;
243 pkt->snoopDelay = 0;
244 }
245
246 // set up a sensible starting point
247 bool success = true;
248
249 // remember if the packet will generate a snoop response by
250 // checking if a cache set the cacheResponding flag during the
251 // snooping above
252 const bool expect_snoop_resp = !cache_responding && pkt->cacheResponding();
253 bool expect_response = pkt->needsResponse() && !pkt->cacheResponding();
254
255 const bool sink_packet = sinkPacket(pkt);
256
257 // in certain cases the crossbar is responsible for responding
258 bool respond_directly = false;
259 // store the original address as an address mapper could possibly
260 // modify the address upon a sendTimingRequest
261 const Addr addr(pkt->getAddr());
262 if (sink_packet) {
263 DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
264 pkt->print());
265 } else {
266 // determine if we are forwarding the packet, or responding to
267 // it
268 if (forwardPacket(pkt)) {
269 // if we are passing on, rather than sinking, a packet to
270 // which an upstream cache has committed to responding,
271 // the line was needs writable, and the responding only
272 // had an Owned copy, so we need to immidiately let the
273 // downstream caches know, bypass any flow control
274 if (pkt->cacheResponding()) {
275 pkt->setExpressSnoop();
276 }
277
278 // make sure that the write request (e.g., WriteClean)
279 // will stop at the memory below if this crossbar is its
280 // destination
281 if (pkt->isWrite() && is_destination) {
282 pkt->clearWriteThrough();
283 }
284
285 // since it is a normal request, attempt to send the packet
286 success = masterPorts[master_port_id]->sendTimingReq(pkt);
287 } else {
288 // no need to forward, turn this packet around and respond
289 // directly
290 assert(pkt->needsResponse());
291
292 respond_directly = true;
293 assert(!expect_snoop_resp);
294 expect_response = false;
295 }
296 }
297
298 if (snoopFilter && snoop_caches) {
299 // Let the snoop filter know about the success of the send operation
300 snoopFilter->finishRequest(!success, addr, pkt->isSecure());
301 }
302
303 // check if we were successful in sending the packet onwards
304 if (!success) {
305 // express snoops should never be forced to retry
306 assert(!is_express_snoop);
307
308 // restore the header delay
309 pkt->headerDelay = old_header_delay;
310
311 DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
312 src_port->name(), pkt->print());
313
314 // update the layer state and schedule an idle event
315 reqLayers[master_port_id]->failedTiming(src_port,
316 clockEdge(Cycles(1)));
317 } else {
318 // express snoops currently bypass the crossbar state entirely
319 if (!is_express_snoop) {
320 // if this particular request will generate a snoop
321 // response
322 if (expect_snoop_resp) {
323 // we should never have an exsiting request outstanding
324 assert(outstandingSnoop.find(pkt->req) ==
325 outstandingSnoop.end());
326 outstandingSnoop.insert(pkt->req);
327
328 // basic sanity check on the outstanding snoops
329 panic_if(outstandingSnoop.size() > 512,
330 "Outstanding snoop requests exceeded 512\n");
331 }
332
333 // remember where to route the normal response to
334 if (expect_response || expect_snoop_resp) {
335 assert(routeTo.find(pkt->req) == routeTo.end());
336 routeTo[pkt->req] = slave_port_id;
337
338 panic_if(routeTo.size() > 512,
339 "Routing table exceeds 512 packets\n");
340 }
341
342 // update the layer state and schedule an idle event
343 reqLayers[master_port_id]->succeededTiming(packetFinishTime);
344 }
345
346 // stats updates only consider packets that were successfully sent
347 pktCount[slave_port_id][master_port_id]++;
348 pktSize[slave_port_id][master_port_id] += pkt_size;
349 transDist[pkt_cmd]++;
350
351 if (is_express_snoop) {
352 snoops++;
353 snoopTraffic += pkt_size;
354 }
355 }
356
357 if (sink_packet)
358 // queue the packet for deletion
359 pendingDelete.reset(pkt);
360
361 // normally we respond to the packet we just received if we need to
362 PacketPtr rsp_pkt = pkt;
363 PortID rsp_port_id = slave_port_id;
364
365 // If this is the destination of the cache clean operation the
366 // crossbar is responsible for responding. This crossbar will
367 // respond when the cache clean is complete. A cache clean
368 // is complete either:
369 // * direcly, if no cache above had a dirty copy of the block
370 // as indicated by the satisfied flag of the packet, or
371 // * when the crossbar has seen both the cache clean request
372 // (CleanSharedReq, CleanInvalidReq) and the corresponding
373 // write (WriteClean) which updates the block in the memory
374 // below.
375 if (success &&
376 ((pkt->isClean() && pkt->satisfied()) ||
377 pkt->cmd == MemCmd::WriteClean) &&
378 is_destination) {
379 PacketPtr deferred_rsp = pkt->isWrite() ? nullptr : pkt;
380 auto cmo_lookup = outstandingCMO.find(pkt->id);
381 if (cmo_lookup != outstandingCMO.end()) {
382 // the cache clean request has already reached this xbar
383 respond_directly = true;
384 if (pkt->isWrite()) {
385 rsp_pkt = cmo_lookup->second;
386 assert(rsp_pkt);
387
388 // determine the destination
389 const auto route_lookup = routeTo.find(rsp_pkt->req);
390 assert(route_lookup != routeTo.end());
391 rsp_port_id = route_lookup->second;
392 assert(rsp_port_id != InvalidPortID);
393 assert(rsp_port_id < respLayers.size());
394 // remove the request from the routing table
395 routeTo.erase(route_lookup);
396 }
397 outstandingCMO.erase(cmo_lookup);
398 } else {
399 respond_directly = false;
400 outstandingCMO.emplace(pkt->id, deferred_rsp);
401 if (!pkt->isWrite()) {
402 assert(routeTo.find(pkt->req) == routeTo.end());
403 routeTo[pkt->req] = slave_port_id;
404
405 panic_if(routeTo.size() > 512,
406 "Routing table exceeds 512 packets\n");
407 }
408 }
409 }
410
411
412 if (respond_directly) {
413 assert(rsp_pkt->needsResponse());
414 assert(success);
415
416 rsp_pkt->makeResponse();
417
418 if (snoopFilter && !system->bypassCaches()) {
419 // let the snoop filter inspect the response and update its state
420 snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]);
421 }
422
423 // we send the response after the current packet, even if the
424 // response is not for this packet (e.g. cache clean operation
425 // where both the request and the write packet have to cross
426 // the destination xbar before the response is sent.)
427 Tick response_time = clockEdge() + pkt->headerDelay;
428 rsp_pkt->headerDelay = 0;
429
430 slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
431 }
432
433 return success;
434}
435
436bool
437CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
438{
439 // determine the source port based on the id
440 MasterPort *src_port = masterPorts[master_port_id];
441
442 // determine the destination
443 const auto route_lookup = routeTo.find(pkt->req);
444 assert(route_lookup != routeTo.end());
445 const PortID slave_port_id = route_lookup->second;
446 assert(slave_port_id != InvalidPortID);
447 assert(slave_port_id < respLayers.size());
448
449 // test if the crossbar should be considered occupied for the
450 // current port
451 if (!respLayers[slave_port_id]->tryTiming(src_port)) {
452 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
453 src_port->name(), pkt->print());
454 return false;
455 }
456
457 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
458 src_port->name(), pkt->print());
459
460 // store size and command as they might be modified when
461 // forwarding the packet
462 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
463 unsigned int pkt_cmd = pkt->cmdToIndex();
464
465 // a response sees the response latency
466 Tick xbar_delay = responseLatency * clockPeriod();
467
468 // set the packet header and payload delay
469 calcPacketTiming(pkt, xbar_delay);
470
471 // determine how long to be crossbar layer is busy
472 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
473
474 if (snoopFilter && !system->bypassCaches()) {
475 // let the snoop filter inspect the response and update its state
476 snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
477 }
478
479 // send the packet through the destination slave port and pay for
480 // any outstanding header delay
481 Tick latency = pkt->headerDelay;
482 pkt->headerDelay = 0;
483 slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
484
485 // remove the request from the routing table
486 routeTo.erase(route_lookup);
487
488 respLayers[slave_port_id]->succeededTiming(packetFinishTime);
489
490 // stats updates
491 pktCount[slave_port_id][master_port_id]++;
492 pktSize[slave_port_id][master_port_id] += pkt_size;
493 transDist[pkt_cmd]++;
494
495 return true;
496}
497
498void
499CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
500{
501 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
502 masterPorts[master_port_id]->name(), pkt->print());
503
504 // update stats here as we know the forwarding will succeed
505 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
506 transDist[pkt->cmdToIndex()]++;
507 snoops++;
508 snoopTraffic += pkt_size;
509
510 // we should only see express snoops from caches
511 assert(pkt->isExpressSnoop());
512
513 // set the packet header and payload delay, for now use forward latency
514 // @todo Assess the choice of latency further
515 calcPacketTiming(pkt, forwardLatency * clockPeriod());
516
517 // remember if a cache has already committed to responding so we
518 // can see if it changes during the snooping
519 const bool cache_responding = pkt->cacheResponding();
520
521 assert(pkt->snoopDelay == 0);
522
523 if (snoopFilter) {
524 // let the Snoop Filter work its magic and guide probing
525 auto sf_res = snoopFilter->lookupSnoop(pkt);
526 // the time required by a packet to be delivered through
527 // the xbar has to be charged also with to lookup latency
528 // of the snoop filter
529 pkt->headerDelay += sf_res.second * clockPeriod();
530 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
531 __func__, masterPorts[master_port_id]->name(), pkt->print(),
532 sf_res.first.size(), sf_res.second);
533
534 // forward to all snoopers
535 forwardTiming(pkt, InvalidPortID, sf_res.first);
536 } else {
537 forwardTiming(pkt, InvalidPortID);
538 }
539
540 // add the snoop delay to our header delay, and then reset it
541 pkt->headerDelay += pkt->snoopDelay;
542 pkt->snoopDelay = 0;
543
544 // if we can expect a response, remember how to route it
545 if (!cache_responding && pkt->cacheResponding()) {
546 assert(routeTo.find(pkt->req) == routeTo.end());
547 routeTo[pkt->req] = master_port_id;
548 }
549
550 // a snoop request came from a connected slave device (one of
551 // our master ports), and if it is not coming from the slave
552 // device responsible for the address range something is
553 // wrong, hence there is nothing further to do as the packet
554 // would be going back to where it came from
555 AddrRange addr_range M5_VAR_USED =
556 RangeSize(pkt->getAddr(), pkt->getSize());
557 assert(findPort(addr_range) == master_port_id);
558}
559
560bool
561CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
562{
563 // determine the source port based on the id
564 SlavePort* src_port = slavePorts[slave_port_id];
565
566 // get the destination
567 const auto route_lookup = routeTo.find(pkt->req);
568 assert(route_lookup != routeTo.end());
569 const PortID dest_port_id = route_lookup->second;
570 assert(dest_port_id != InvalidPortID);
571
572 // determine if the response is from a snoop request we
573 // created as the result of a normal request (in which case it
574 // should be in the outstandingSnoop), or if we merely forwarded
575 // someone else's snoop request
576 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
577 outstandingSnoop.end();
578
579 // test if the crossbar should be considered occupied for the
580 // current port, note that the check is bypassed if the response
581 // is being passed on as a normal response since this is occupying
582 // the response layer rather than the snoop response layer
583 if (forwardAsSnoop) {
584 assert(dest_port_id < snoopLayers.size());
585 if (!snoopLayers[dest_port_id]->tryTiming(src_port)) {
586 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
587 src_port->name(), pkt->print());
588 return false;
589 }
590 } else {
591 // get the master port that mirrors this slave port internally
592 MasterPort* snoop_port = snoopRespPorts[slave_port_id];
593 assert(dest_port_id < respLayers.size());
594 if (!respLayers[dest_port_id]->tryTiming(snoop_port)) {
595 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
596 snoop_port->name(), pkt->print());
597 return false;
598 }
599 }
600
601 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
602 src_port->name(), pkt->print());
603
604 // store size and command as they might be modified when
605 // forwarding the packet
606 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
607 unsigned int pkt_cmd = pkt->cmdToIndex();
608
609 // responses are never express snoops
610 assert(!pkt->isExpressSnoop());
611
612 // a snoop response sees the snoop response latency, and if it is
613 // forwarded as a normal response, the response latency
614 Tick xbar_delay =
615 (forwardAsSnoop ? snoopResponseLatency : responseLatency) *
616 clockPeriod();
617
618 // set the packet header and payload delay
619 calcPacketTiming(pkt, xbar_delay);
620
621 // determine how long to be crossbar layer is busy
622 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
623
624 // forward it either as a snoop response or a normal response
625 if (forwardAsSnoop) {
626 // this is a snoop response to a snoop request we forwarded,
627 // e.g. coming from the L1 and going to the L2, and it should
628 // be forwarded as a snoop response
629
630 if (snoopFilter) {
631 // update the probe filter so that it can properly track the line
632 snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id],
633 *masterPorts[dest_port_id]);
634 }
635
636 bool success M5_VAR_USED =
637 masterPorts[dest_port_id]->sendTimingSnoopResp(pkt);
638 pktCount[slave_port_id][dest_port_id]++;
639 pktSize[slave_port_id][dest_port_id] += pkt_size;
640 assert(success);
641
642 snoopLayers[dest_port_id]->succeededTiming(packetFinishTime);
643 } else {
644 // we got a snoop response on one of our slave ports,
645 // i.e. from a coherent master connected to the crossbar, and
646 // since we created the snoop request as part of recvTiming,
647 // this should now be a normal response again
648 outstandingSnoop.erase(pkt->req);
649
650 // this is a snoop response from a coherent master, hence it
651 // should never go back to where the snoop response came from,
652 // but instead to where the original request came from
653 assert(slave_port_id != dest_port_id);
654
655 if (snoopFilter) {
656 // update the probe filter so that it can properly track the line
657 snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id],
658 *slavePorts[dest_port_id]);
659 }
660
661 DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__,
662 src_port->name(), pkt->print());
663
664 // as a normal response, it should go back to a master through
665 // one of our slave ports, we also pay for any outstanding
666 // header latency
667 Tick latency = pkt->headerDelay;
668 pkt->headerDelay = 0;
669 slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency);
670
671 respLayers[dest_port_id]->succeededTiming(packetFinishTime);
672 }
673
674 // remove the request from the routing table
675 routeTo.erase(route_lookup);
676
677 // stats updates
678 transDist[pkt_cmd]++;
679 snoops++;
680 snoopTraffic += pkt_size;
681
682 return true;
683}
684
685
686void
687CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
688 const std::vector<QueuedSlavePort*>& dests)
689{
690 DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print());
691
692 // snoops should only happen if the system isn't bypassing caches
693 assert(!system->bypassCaches());
694
695 unsigned fanout = 0;
696
697 for (const auto& p: dests) {
698 // we could have gotten this request from a snooping master
699 // (corresponding to our own slave port that is also in
700 // snoopPorts) and should not send it back to where it came
701 // from
702 if (exclude_slave_port_id == InvalidPortID ||
703 p->getId() != exclude_slave_port_id) {
704 // cache is not allowed to refuse snoop
705 p->sendTimingSnoopReq(pkt);
706 fanout++;
707 }
708 }
709
710 // Stats for fanout of this forward operation
711 snoopFanout.sample(fanout);
712}
713
714void
715CoherentXBar::recvReqRetry(PortID master_port_id)
716{
717 // responses and snoop responses never block on forwarding them,
718 // so the retry will always be coming from a port to which we
719 // tried to forward a request
720 reqLayers[master_port_id]->recvRetry();
721}
722
723Tick
1/*
2 * Copyright (c) 2011-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 * Andreas Hansson
42 * William Wang
43 * Nikos Nikoleris
44 */
45
46/**
47 * @file
48 * Definition of a crossbar object.
49 */
50
51#include "mem/coherent_xbar.hh"
52
53#include "base/logging.hh"
54#include "base/trace.hh"
55#include "debug/AddrRanges.hh"
56#include "debug/CoherentXBar.hh"
57#include "sim/system.hh"
58
59CoherentXBar::CoherentXBar(const CoherentXBarParams *p)
60 : BaseXBar(p), system(p->system), snoopFilter(p->snoop_filter),
61 snoopResponseLatency(p->snoop_response_latency),
62 pointOfCoherency(p->point_of_coherency),
63 pointOfUnification(p->point_of_unification)
64{
65 // create the ports based on the size of the master and slave
66 // vector ports, and the presence of the default port, the ports
67 // are enumerated starting from zero
68 for (int i = 0; i < p->port_master_connection_count; ++i) {
69 std::string portName = csprintf("%s.master[%d]", name(), i);
70 MasterPort* bp = new CoherentXBarMasterPort(portName, *this, i);
71 masterPorts.push_back(bp);
72 reqLayers.push_back(new ReqLayer(*bp, *this,
73 csprintf(".reqLayer%d", i)));
74 snoopLayers.push_back(
75 new SnoopRespLayer(*bp, *this, csprintf(".snoopLayer%d", i)));
76 }
77
78 // see if we have a default slave device connected and if so add
79 // our corresponding master port
80 if (p->port_default_connection_count) {
81 defaultPortID = masterPorts.size();
82 std::string portName = name() + ".default";
83 MasterPort* bp = new CoherentXBarMasterPort(portName, *this,
84 defaultPortID);
85 masterPorts.push_back(bp);
86 reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d",
87 defaultPortID)));
88 snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
89 csprintf(".snoopLayer%d",
90 defaultPortID)));
91 }
92
93 // create the slave ports, once again starting at zero
94 for (int i = 0; i < p->port_slave_connection_count; ++i) {
95 std::string portName = csprintf("%s.slave[%d]", name(), i);
96 QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i);
97 slavePorts.push_back(bp);
98 respLayers.push_back(new RespLayer(*bp, *this,
99 csprintf(".respLayer%d", i)));
100 snoopRespPorts.push_back(new SnoopRespPort(*bp, *this));
101 }
102}
103
104CoherentXBar::~CoherentXBar()
105{
106 for (auto l: reqLayers)
107 delete l;
108 for (auto l: respLayers)
109 delete l;
110 for (auto l: snoopLayers)
111 delete l;
112 for (auto p: snoopRespPorts)
113 delete p;
114}
115
116void
117CoherentXBar::init()
118{
119 BaseXBar::init();
120
121 // iterate over our slave ports and determine which of our
122 // neighbouring master ports are snooping and add them as snoopers
123 for (const auto& p: slavePorts) {
124 // check if the connected master port is snooping
125 if (p->isSnooping()) {
126 DPRINTF(AddrRanges, "Adding snooping master %s\n",
127 p->getMasterPort().name());
128 snoopPorts.push_back(p);
129 }
130 }
131
132 if (snoopPorts.empty())
133 warn("CoherentXBar %s has no snooping ports attached!\n", name());
134
135 // inform the snoop filter about the slave ports so it can create
136 // its own internal representation
137 if (snoopFilter)
138 snoopFilter->setSlavePorts(slavePorts);
139}
140
141bool
142CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
143{
144 // determine the source port based on the id
145 SlavePort *src_port = slavePorts[slave_port_id];
146
147 // remember if the packet is an express snoop
148 bool is_express_snoop = pkt->isExpressSnoop();
149 bool cache_responding = pkt->cacheResponding();
150 // for normal requests, going downstream, the express snoop flag
151 // and the cache responding flag should always be the same
152 assert(is_express_snoop == cache_responding);
153
154 // determine the destination based on the destination address range
155 AddrRange addr_range = RangeSize(pkt->getAddr(), pkt->getSize());
156 PortID master_port_id = findPort(addr_range);
157
158 // test if the crossbar should be considered occupied for the current
159 // port, and exclude express snoops from the check
160 if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) {
161 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
162 src_port->name(), pkt->print());
163 return false;
164 }
165
166 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
167 src_port->name(), pkt->print());
168
169 // store size and command as they might be modified when
170 // forwarding the packet
171 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
172 unsigned int pkt_cmd = pkt->cmdToIndex();
173
174 // store the old header delay so we can restore it if needed
175 Tick old_header_delay = pkt->headerDelay;
176
177 // a request sees the frontend and forward latency
178 Tick xbar_delay = (frontendLatency + forwardLatency) * clockPeriod();
179
180 // set the packet header and payload delay
181 calcPacketTiming(pkt, xbar_delay);
182
183 // determine how long to be crossbar layer is busy
184 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
185
186 // is this the destination point for this packet? (e.g. true if
187 // this xbar is the PoC for a cache maintenance operation to the
188 // PoC) otherwise the destination is any cache that can satisfy
189 // the request
190 const bool is_destination = isDestination(pkt);
191
192 const bool snoop_caches = !system->bypassCaches() &&
193 pkt->cmd != MemCmd::WriteClean;
194 if (snoop_caches) {
195 assert(pkt->snoopDelay == 0);
196
197 if (pkt->isClean() && !is_destination) {
198 // before snooping we need to make sure that the memory
199 // below is not busy and the cache clean request can be
200 // forwarded to it
201 if (!masterPorts[master_port_id]->tryTiming(pkt)) {
202 DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
203 src_port->name(), pkt->print());
204
205 // update the layer state and schedule an idle event
206 reqLayers[master_port_id]->failedTiming(src_port,
207 clockEdge(Cycles(1)));
208 return false;
209 }
210 }
211
212
213 // the packet is a memory-mapped request and should be
214 // broadcasted to our snoopers but the source
215 if (snoopFilter) {
216 // check with the snoop filter where to forward this packet
217 auto sf_res = snoopFilter->lookupRequest(pkt, *src_port);
218 // the time required by a packet to be delivered through
219 // the xbar has to be charged also with to lookup latency
220 // of the snoop filter
221 pkt->headerDelay += sf_res.second * clockPeriod();
222 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
223 __func__, src_port->name(), pkt->print(),
224 sf_res.first.size(), sf_res.second);
225
226 if (pkt->isEviction()) {
227 // for block-evicting packets, i.e. writebacks and
228 // clean evictions, there is no need to snoop up, as
229 // all we do is determine if the block is cached or
230 // not, instead just set it here based on the snoop
231 // filter result
232 if (!sf_res.first.empty())
233 pkt->setBlockCached();
234 } else {
235 forwardTiming(pkt, slave_port_id, sf_res.first);
236 }
237 } else {
238 forwardTiming(pkt, slave_port_id);
239 }
240
241 // add the snoop delay to our header delay, and then reset it
242 pkt->headerDelay += pkt->snoopDelay;
243 pkt->snoopDelay = 0;
244 }
245
246 // set up a sensible starting point
247 bool success = true;
248
249 // remember if the packet will generate a snoop response by
250 // checking if a cache set the cacheResponding flag during the
251 // snooping above
252 const bool expect_snoop_resp = !cache_responding && pkt->cacheResponding();
253 bool expect_response = pkt->needsResponse() && !pkt->cacheResponding();
254
255 const bool sink_packet = sinkPacket(pkt);
256
257 // in certain cases the crossbar is responsible for responding
258 bool respond_directly = false;
259 // store the original address as an address mapper could possibly
260 // modify the address upon a sendTimingRequest
261 const Addr addr(pkt->getAddr());
262 if (sink_packet) {
263 DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
264 pkt->print());
265 } else {
266 // determine if we are forwarding the packet, or responding to
267 // it
268 if (forwardPacket(pkt)) {
269 // if we are passing on, rather than sinking, a packet to
270 // which an upstream cache has committed to responding,
271 // the line was needs writable, and the responding only
272 // had an Owned copy, so we need to immidiately let the
273 // downstream caches know, bypass any flow control
274 if (pkt->cacheResponding()) {
275 pkt->setExpressSnoop();
276 }
277
278 // make sure that the write request (e.g., WriteClean)
279 // will stop at the memory below if this crossbar is its
280 // destination
281 if (pkt->isWrite() && is_destination) {
282 pkt->clearWriteThrough();
283 }
284
285 // since it is a normal request, attempt to send the packet
286 success = masterPorts[master_port_id]->sendTimingReq(pkt);
287 } else {
288 // no need to forward, turn this packet around and respond
289 // directly
290 assert(pkt->needsResponse());
291
292 respond_directly = true;
293 assert(!expect_snoop_resp);
294 expect_response = false;
295 }
296 }
297
298 if (snoopFilter && snoop_caches) {
299 // Let the snoop filter know about the success of the send operation
300 snoopFilter->finishRequest(!success, addr, pkt->isSecure());
301 }
302
303 // check if we were successful in sending the packet onwards
304 if (!success) {
305 // express snoops should never be forced to retry
306 assert(!is_express_snoop);
307
308 // restore the header delay
309 pkt->headerDelay = old_header_delay;
310
311 DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
312 src_port->name(), pkt->print());
313
314 // update the layer state and schedule an idle event
315 reqLayers[master_port_id]->failedTiming(src_port,
316 clockEdge(Cycles(1)));
317 } else {
318 // express snoops currently bypass the crossbar state entirely
319 if (!is_express_snoop) {
320 // if this particular request will generate a snoop
321 // response
322 if (expect_snoop_resp) {
323 // we should never have an exsiting request outstanding
324 assert(outstandingSnoop.find(pkt->req) ==
325 outstandingSnoop.end());
326 outstandingSnoop.insert(pkt->req);
327
328 // basic sanity check on the outstanding snoops
329 panic_if(outstandingSnoop.size() > 512,
330 "Outstanding snoop requests exceeded 512\n");
331 }
332
333 // remember where to route the normal response to
334 if (expect_response || expect_snoop_resp) {
335 assert(routeTo.find(pkt->req) == routeTo.end());
336 routeTo[pkt->req] = slave_port_id;
337
338 panic_if(routeTo.size() > 512,
339 "Routing table exceeds 512 packets\n");
340 }
341
342 // update the layer state and schedule an idle event
343 reqLayers[master_port_id]->succeededTiming(packetFinishTime);
344 }
345
346 // stats updates only consider packets that were successfully sent
347 pktCount[slave_port_id][master_port_id]++;
348 pktSize[slave_port_id][master_port_id] += pkt_size;
349 transDist[pkt_cmd]++;
350
351 if (is_express_snoop) {
352 snoops++;
353 snoopTraffic += pkt_size;
354 }
355 }
356
357 if (sink_packet)
358 // queue the packet for deletion
359 pendingDelete.reset(pkt);
360
361 // normally we respond to the packet we just received if we need to
362 PacketPtr rsp_pkt = pkt;
363 PortID rsp_port_id = slave_port_id;
364
365 // If this is the destination of the cache clean operation the
366 // crossbar is responsible for responding. This crossbar will
367 // respond when the cache clean is complete. A cache clean
368 // is complete either:
369 // * direcly, if no cache above had a dirty copy of the block
370 // as indicated by the satisfied flag of the packet, or
371 // * when the crossbar has seen both the cache clean request
372 // (CleanSharedReq, CleanInvalidReq) and the corresponding
373 // write (WriteClean) which updates the block in the memory
374 // below.
375 if (success &&
376 ((pkt->isClean() && pkt->satisfied()) ||
377 pkt->cmd == MemCmd::WriteClean) &&
378 is_destination) {
379 PacketPtr deferred_rsp = pkt->isWrite() ? nullptr : pkt;
380 auto cmo_lookup = outstandingCMO.find(pkt->id);
381 if (cmo_lookup != outstandingCMO.end()) {
382 // the cache clean request has already reached this xbar
383 respond_directly = true;
384 if (pkt->isWrite()) {
385 rsp_pkt = cmo_lookup->second;
386 assert(rsp_pkt);
387
388 // determine the destination
389 const auto route_lookup = routeTo.find(rsp_pkt->req);
390 assert(route_lookup != routeTo.end());
391 rsp_port_id = route_lookup->second;
392 assert(rsp_port_id != InvalidPortID);
393 assert(rsp_port_id < respLayers.size());
394 // remove the request from the routing table
395 routeTo.erase(route_lookup);
396 }
397 outstandingCMO.erase(cmo_lookup);
398 } else {
399 respond_directly = false;
400 outstandingCMO.emplace(pkt->id, deferred_rsp);
401 if (!pkt->isWrite()) {
402 assert(routeTo.find(pkt->req) == routeTo.end());
403 routeTo[pkt->req] = slave_port_id;
404
405 panic_if(routeTo.size() > 512,
406 "Routing table exceeds 512 packets\n");
407 }
408 }
409 }
410
411
412 if (respond_directly) {
413 assert(rsp_pkt->needsResponse());
414 assert(success);
415
416 rsp_pkt->makeResponse();
417
418 if (snoopFilter && !system->bypassCaches()) {
419 // let the snoop filter inspect the response and update its state
420 snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]);
421 }
422
423 // we send the response after the current packet, even if the
424 // response is not for this packet (e.g. cache clean operation
425 // where both the request and the write packet have to cross
426 // the destination xbar before the response is sent.)
427 Tick response_time = clockEdge() + pkt->headerDelay;
428 rsp_pkt->headerDelay = 0;
429
430 slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
431 }
432
433 return success;
434}
435
436bool
437CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
438{
439 // determine the source port based on the id
440 MasterPort *src_port = masterPorts[master_port_id];
441
442 // determine the destination
443 const auto route_lookup = routeTo.find(pkt->req);
444 assert(route_lookup != routeTo.end());
445 const PortID slave_port_id = route_lookup->second;
446 assert(slave_port_id != InvalidPortID);
447 assert(slave_port_id < respLayers.size());
448
449 // test if the crossbar should be considered occupied for the
450 // current port
451 if (!respLayers[slave_port_id]->tryTiming(src_port)) {
452 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
453 src_port->name(), pkt->print());
454 return false;
455 }
456
457 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
458 src_port->name(), pkt->print());
459
460 // store size and command as they might be modified when
461 // forwarding the packet
462 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
463 unsigned int pkt_cmd = pkt->cmdToIndex();
464
465 // a response sees the response latency
466 Tick xbar_delay = responseLatency * clockPeriod();
467
468 // set the packet header and payload delay
469 calcPacketTiming(pkt, xbar_delay);
470
471 // determine how long to be crossbar layer is busy
472 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
473
474 if (snoopFilter && !system->bypassCaches()) {
475 // let the snoop filter inspect the response and update its state
476 snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
477 }
478
479 // send the packet through the destination slave port and pay for
480 // any outstanding header delay
481 Tick latency = pkt->headerDelay;
482 pkt->headerDelay = 0;
483 slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
484
485 // remove the request from the routing table
486 routeTo.erase(route_lookup);
487
488 respLayers[slave_port_id]->succeededTiming(packetFinishTime);
489
490 // stats updates
491 pktCount[slave_port_id][master_port_id]++;
492 pktSize[slave_port_id][master_port_id] += pkt_size;
493 transDist[pkt_cmd]++;
494
495 return true;
496}
497
498void
499CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
500{
501 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
502 masterPorts[master_port_id]->name(), pkt->print());
503
504 // update stats here as we know the forwarding will succeed
505 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
506 transDist[pkt->cmdToIndex()]++;
507 snoops++;
508 snoopTraffic += pkt_size;
509
510 // we should only see express snoops from caches
511 assert(pkt->isExpressSnoop());
512
513 // set the packet header and payload delay, for now use forward latency
514 // @todo Assess the choice of latency further
515 calcPacketTiming(pkt, forwardLatency * clockPeriod());
516
517 // remember if a cache has already committed to responding so we
518 // can see if it changes during the snooping
519 const bool cache_responding = pkt->cacheResponding();
520
521 assert(pkt->snoopDelay == 0);
522
523 if (snoopFilter) {
524 // let the Snoop Filter work its magic and guide probing
525 auto sf_res = snoopFilter->lookupSnoop(pkt);
526 // the time required by a packet to be delivered through
527 // the xbar has to be charged also with to lookup latency
528 // of the snoop filter
529 pkt->headerDelay += sf_res.second * clockPeriod();
530 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
531 __func__, masterPorts[master_port_id]->name(), pkt->print(),
532 sf_res.first.size(), sf_res.second);
533
534 // forward to all snoopers
535 forwardTiming(pkt, InvalidPortID, sf_res.first);
536 } else {
537 forwardTiming(pkt, InvalidPortID);
538 }
539
540 // add the snoop delay to our header delay, and then reset it
541 pkt->headerDelay += pkt->snoopDelay;
542 pkt->snoopDelay = 0;
543
544 // if we can expect a response, remember how to route it
545 if (!cache_responding && pkt->cacheResponding()) {
546 assert(routeTo.find(pkt->req) == routeTo.end());
547 routeTo[pkt->req] = master_port_id;
548 }
549
550 // a snoop request came from a connected slave device (one of
551 // our master ports), and if it is not coming from the slave
552 // device responsible for the address range something is
553 // wrong, hence there is nothing further to do as the packet
554 // would be going back to where it came from
555 AddrRange addr_range M5_VAR_USED =
556 RangeSize(pkt->getAddr(), pkt->getSize());
557 assert(findPort(addr_range) == master_port_id);
558}
559
560bool
561CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
562{
563 // determine the source port based on the id
564 SlavePort* src_port = slavePorts[slave_port_id];
565
566 // get the destination
567 const auto route_lookup = routeTo.find(pkt->req);
568 assert(route_lookup != routeTo.end());
569 const PortID dest_port_id = route_lookup->second;
570 assert(dest_port_id != InvalidPortID);
571
572 // determine if the response is from a snoop request we
573 // created as the result of a normal request (in which case it
574 // should be in the outstandingSnoop), or if we merely forwarded
575 // someone else's snoop request
576 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
577 outstandingSnoop.end();
578
579 // test if the crossbar should be considered occupied for the
580 // current port, note that the check is bypassed if the response
581 // is being passed on as a normal response since this is occupying
582 // the response layer rather than the snoop response layer
583 if (forwardAsSnoop) {
584 assert(dest_port_id < snoopLayers.size());
585 if (!snoopLayers[dest_port_id]->tryTiming(src_port)) {
586 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
587 src_port->name(), pkt->print());
588 return false;
589 }
590 } else {
591 // get the master port that mirrors this slave port internally
592 MasterPort* snoop_port = snoopRespPorts[slave_port_id];
593 assert(dest_port_id < respLayers.size());
594 if (!respLayers[dest_port_id]->tryTiming(snoop_port)) {
595 DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
596 snoop_port->name(), pkt->print());
597 return false;
598 }
599 }
600
601 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
602 src_port->name(), pkt->print());
603
604 // store size and command as they might be modified when
605 // forwarding the packet
606 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
607 unsigned int pkt_cmd = pkt->cmdToIndex();
608
609 // responses are never express snoops
610 assert(!pkt->isExpressSnoop());
611
612 // a snoop response sees the snoop response latency, and if it is
613 // forwarded as a normal response, the response latency
614 Tick xbar_delay =
615 (forwardAsSnoop ? snoopResponseLatency : responseLatency) *
616 clockPeriod();
617
618 // set the packet header and payload delay
619 calcPacketTiming(pkt, xbar_delay);
620
621 // determine how long to be crossbar layer is busy
622 Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
623
624 // forward it either as a snoop response or a normal response
625 if (forwardAsSnoop) {
626 // this is a snoop response to a snoop request we forwarded,
627 // e.g. coming from the L1 and going to the L2, and it should
628 // be forwarded as a snoop response
629
630 if (snoopFilter) {
631 // update the probe filter so that it can properly track the line
632 snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id],
633 *masterPorts[dest_port_id]);
634 }
635
636 bool success M5_VAR_USED =
637 masterPorts[dest_port_id]->sendTimingSnoopResp(pkt);
638 pktCount[slave_port_id][dest_port_id]++;
639 pktSize[slave_port_id][dest_port_id] += pkt_size;
640 assert(success);
641
642 snoopLayers[dest_port_id]->succeededTiming(packetFinishTime);
643 } else {
644 // we got a snoop response on one of our slave ports,
645 // i.e. from a coherent master connected to the crossbar, and
646 // since we created the snoop request as part of recvTiming,
647 // this should now be a normal response again
648 outstandingSnoop.erase(pkt->req);
649
650 // this is a snoop response from a coherent master, hence it
651 // should never go back to where the snoop response came from,
652 // but instead to where the original request came from
653 assert(slave_port_id != dest_port_id);
654
655 if (snoopFilter) {
656 // update the probe filter so that it can properly track the line
657 snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id],
658 *slavePorts[dest_port_id]);
659 }
660
661 DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__,
662 src_port->name(), pkt->print());
663
664 // as a normal response, it should go back to a master through
665 // one of our slave ports, we also pay for any outstanding
666 // header latency
667 Tick latency = pkt->headerDelay;
668 pkt->headerDelay = 0;
669 slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency);
670
671 respLayers[dest_port_id]->succeededTiming(packetFinishTime);
672 }
673
674 // remove the request from the routing table
675 routeTo.erase(route_lookup);
676
677 // stats updates
678 transDist[pkt_cmd]++;
679 snoops++;
680 snoopTraffic += pkt_size;
681
682 return true;
683}
684
685
686void
687CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
688 const std::vector<QueuedSlavePort*>& dests)
689{
690 DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print());
691
692 // snoops should only happen if the system isn't bypassing caches
693 assert(!system->bypassCaches());
694
695 unsigned fanout = 0;
696
697 for (const auto& p: dests) {
698 // we could have gotten this request from a snooping master
699 // (corresponding to our own slave port that is also in
700 // snoopPorts) and should not send it back to where it came
701 // from
702 if (exclude_slave_port_id == InvalidPortID ||
703 p->getId() != exclude_slave_port_id) {
704 // cache is not allowed to refuse snoop
705 p->sendTimingSnoopReq(pkt);
706 fanout++;
707 }
708 }
709
710 // Stats for fanout of this forward operation
711 snoopFanout.sample(fanout);
712}
713
714void
715CoherentXBar::recvReqRetry(PortID master_port_id)
716{
717 // responses and snoop responses never block on forwarding them,
718 // so the retry will always be coming from a port to which we
719 // tried to forward a request
720 reqLayers[master_port_id]->recvRetry();
721}
722
723Tick
724CoherentXBar::recvAtomic(PacketPtr pkt, PortID slave_port_id)
724CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
725 MemBackdoorPtr *backdoor)
725{
726 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
727 slavePorts[slave_port_id]->name(), pkt->print());
728
729 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
730 unsigned int pkt_cmd = pkt->cmdToIndex();
731
732 MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
733 Tick snoop_response_latency = 0;
734
735 // is this the destination point for this packet? (e.g. true if
736 // this xbar is the PoC for a cache maintenance operation to the
737 // PoC) otherwise the destination is any cache that can satisfy
738 // the request
739 const bool is_destination = isDestination(pkt);
740
741 const bool snoop_caches = !system->bypassCaches() &&
742 pkt->cmd != MemCmd::WriteClean;
743 if (snoop_caches) {
744 // forward to all snoopers but the source
745 std::pair<MemCmd, Tick> snoop_result;
746 if (snoopFilter) {
747 // check with the snoop filter where to forward this packet
748 auto sf_res =
749 snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]);
750 snoop_response_latency += sf_res.second * clockPeriod();
751 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
752 __func__, slavePorts[slave_port_id]->name(), pkt->print(),
753 sf_res.first.size(), sf_res.second);
754
755 // let the snoop filter know about the success of the send
756 // operation, and do it even before sending it onwards to
757 // avoid situations where atomic upward snoops sneak in
758 // between and change the filter state
759 snoopFilter->finishRequest(false, pkt->getAddr(), pkt->isSecure());
760
761 if (pkt->isEviction()) {
762 // for block-evicting packets, i.e. writebacks and
763 // clean evictions, there is no need to snoop up, as
764 // all we do is determine if the block is cached or
765 // not, instead just set it here based on the snoop
766 // filter result
767 if (!sf_res.first.empty())
768 pkt->setBlockCached();
769 } else {
770 snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID,
771 sf_res.first);
772 }
773 } else {
774 snoop_result = forwardAtomic(pkt, slave_port_id);
775 }
776 snoop_response_cmd = snoop_result.first;
777 snoop_response_latency += snoop_result.second;
778 }
779
780 // set up a sensible default value
781 Tick response_latency = 0;
782
783 const bool sink_packet = sinkPacket(pkt);
784
785 // even if we had a snoop response, we must continue and also
786 // perform the actual request at the destination
787 AddrRange addr_range = RangeSize(pkt->getAddr(), pkt->getSize());
788 PortID master_port_id = findPort(addr_range);
789
790 if (sink_packet) {
791 DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
792 pkt->print());
793 } else {
794 if (forwardPacket(pkt)) {
795 // make sure that the write request (e.g., WriteClean)
796 // will stop at the memory below if this crossbar is its
797 // destination
798 if (pkt->isWrite() && is_destination) {
799 pkt->clearWriteThrough();
800 }
801
802 // forward the request to the appropriate destination
726{
727 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
728 slavePorts[slave_port_id]->name(), pkt->print());
729
730 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
731 unsigned int pkt_cmd = pkt->cmdToIndex();
732
733 MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
734 Tick snoop_response_latency = 0;
735
736 // is this the destination point for this packet? (e.g. true if
737 // this xbar is the PoC for a cache maintenance operation to the
738 // PoC) otherwise the destination is any cache that can satisfy
739 // the request
740 const bool is_destination = isDestination(pkt);
741
742 const bool snoop_caches = !system->bypassCaches() &&
743 pkt->cmd != MemCmd::WriteClean;
744 if (snoop_caches) {
745 // forward to all snoopers but the source
746 std::pair<MemCmd, Tick> snoop_result;
747 if (snoopFilter) {
748 // check with the snoop filter where to forward this packet
749 auto sf_res =
750 snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]);
751 snoop_response_latency += sf_res.second * clockPeriod();
752 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
753 __func__, slavePorts[slave_port_id]->name(), pkt->print(),
754 sf_res.first.size(), sf_res.second);
755
756 // let the snoop filter know about the success of the send
757 // operation, and do it even before sending it onwards to
758 // avoid situations where atomic upward snoops sneak in
759 // between and change the filter state
760 snoopFilter->finishRequest(false, pkt->getAddr(), pkt->isSecure());
761
762 if (pkt->isEviction()) {
763 // for block-evicting packets, i.e. writebacks and
764 // clean evictions, there is no need to snoop up, as
765 // all we do is determine if the block is cached or
766 // not, instead just set it here based on the snoop
767 // filter result
768 if (!sf_res.first.empty())
769 pkt->setBlockCached();
770 } else {
771 snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID,
772 sf_res.first);
773 }
774 } else {
775 snoop_result = forwardAtomic(pkt, slave_port_id);
776 }
777 snoop_response_cmd = snoop_result.first;
778 snoop_response_latency += snoop_result.second;
779 }
780
781 // set up a sensible default value
782 Tick response_latency = 0;
783
784 const bool sink_packet = sinkPacket(pkt);
785
786 // even if we had a snoop response, we must continue and also
787 // perform the actual request at the destination
788 AddrRange addr_range = RangeSize(pkt->getAddr(), pkt->getSize());
789 PortID master_port_id = findPort(addr_range);
790
791 if (sink_packet) {
792 DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
793 pkt->print());
794 } else {
795 if (forwardPacket(pkt)) {
796 // make sure that the write request (e.g., WriteClean)
797 // will stop at the memory below if this crossbar is its
798 // destination
799 if (pkt->isWrite() && is_destination) {
800 pkt->clearWriteThrough();
801 }
802
803 // forward the request to the appropriate destination
803 response_latency = masterPorts[master_port_id]->sendAtomic(pkt);
804 auto master = masterPorts[master_port_id];
805 response_latency = backdoor ?
806 master->sendAtomicBackdoor(pkt, *backdoor) :
807 master->sendAtomic(pkt);
804 } else {
805 // if it does not need a response we sink the packet above
806 assert(pkt->needsResponse());
807
808 pkt->makeResponse();
809 }
810 }
811
812 // stats updates for the request
813 pktCount[slave_port_id][master_port_id]++;
814 pktSize[slave_port_id][master_port_id] += pkt_size;
815 transDist[pkt_cmd]++;
816
817
818 // if lower levels have replied, tell the snoop filter
819 if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) {
820 snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
821 }
822
823 // if we got a response from a snooper, restore it here
824 if (snoop_response_cmd != MemCmd::InvalidCmd) {
825 // no one else should have responded
826 assert(!pkt->isResponse());
827 pkt->cmd = snoop_response_cmd;
828 response_latency = snoop_response_latency;
829 }
830
831 // If this is the destination of the cache clean operation the
832 // crossbar is responsible for responding. This crossbar will
833 // respond when the cache clean is complete. An atomic cache clean
834 // is complete when the crossbars receives the cache clean
835 // request (CleanSharedReq, CleanInvalidReq), as either:
836 // * no cache above had a dirty copy of the block as indicated by
837 // the satisfied flag of the packet, or
838 // * the crossbar has already seen the corresponding write
839 // (WriteClean) which updates the block in the memory below.
840 if (pkt->isClean() && isDestination(pkt) && pkt->satisfied()) {
841 auto it = outstandingCMO.find(pkt->id);
842 assert(it != outstandingCMO.end());
843 // we are responding right away
844 outstandingCMO.erase(it);
845 } else if (pkt->cmd == MemCmd::WriteClean && isDestination(pkt)) {
846 // if this is the destination of the operation, the xbar
847 // sends the responce to the cache clean operation only
848 // after having encountered the cache clean request
849 auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr);
850 // in atomic mode we know that the WriteClean packet should
851 // precede the clean request
852 assert(ret.second);
853 }
854
855 // add the response data
856 if (pkt->isResponse()) {
857 pkt_size = pkt->hasData() ? pkt->getSize() : 0;
858 pkt_cmd = pkt->cmdToIndex();
859
860 // stats updates
861 pktCount[slave_port_id][master_port_id]++;
862 pktSize[slave_port_id][master_port_id] += pkt_size;
863 transDist[pkt_cmd]++;
864 }
865
866 // @todo: Not setting header time
867 pkt->payloadDelay = response_latency;
868 return response_latency;
869}
870
871Tick
872CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id)
873{
874 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
875 masterPorts[master_port_id]->name(), pkt->print());
876
877 // add the request snoop data
878 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
879 snoops++;
880 snoopTraffic += pkt_size;
881
882 // forward to all snoopers
883 std::pair<MemCmd, Tick> snoop_result;
884 Tick snoop_response_latency = 0;
885 if (snoopFilter) {
886 auto sf_res = snoopFilter->lookupSnoop(pkt);
887 snoop_response_latency += sf_res.second * clockPeriod();
888 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
889 __func__, masterPorts[master_port_id]->name(), pkt->print(),
890 sf_res.first.size(), sf_res.second);
891 snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id,
892 sf_res.first);
893 } else {
894 snoop_result = forwardAtomic(pkt, InvalidPortID);
895 }
896 MemCmd snoop_response_cmd = snoop_result.first;
897 snoop_response_latency += snoop_result.second;
898
899 if (snoop_response_cmd != MemCmd::InvalidCmd)
900 pkt->cmd = snoop_response_cmd;
901
902 // add the response snoop data
903 if (pkt->isResponse()) {
904 snoops++;
905 }
906
907 // @todo: Not setting header time
908 pkt->payloadDelay = snoop_response_latency;
909 return snoop_response_latency;
910}
911
912std::pair<MemCmd, Tick>
913CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
914 PortID source_master_port_id,
915 const std::vector<QueuedSlavePort*>& dests)
916{
917 // the packet may be changed on snoops, record the original
918 // command to enable us to restore it between snoops so that
919 // additional snoops can take place properly
920 MemCmd orig_cmd = pkt->cmd;
921 MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
922 Tick snoop_response_latency = 0;
923
924 // snoops should only happen if the system isn't bypassing caches
925 assert(!system->bypassCaches());
926
927 unsigned fanout = 0;
928
929 for (const auto& p: dests) {
930 // we could have gotten this request from a snooping master
931 // (corresponding to our own slave port that is also in
932 // snoopPorts) and should not send it back to where it came
933 // from
934 if (exclude_slave_port_id != InvalidPortID &&
935 p->getId() == exclude_slave_port_id)
936 continue;
937
938 Tick latency = p->sendAtomicSnoop(pkt);
939 fanout++;
940
941 // in contrast to a functional access, we have to keep on
942 // going as all snoopers must be updated even if we get a
943 // response
944 if (!pkt->isResponse())
945 continue;
946
947 // response from snoop agent
948 assert(pkt->cmd != orig_cmd);
949 assert(pkt->cacheResponding());
950 // should only happen once
951 assert(snoop_response_cmd == MemCmd::InvalidCmd);
952 // save response state
953 snoop_response_cmd = pkt->cmd;
954 snoop_response_latency = latency;
955
956 if (snoopFilter) {
957 // Handle responses by the snoopers and differentiate between
958 // responses to requests from above and snoops from below
959 if (source_master_port_id != InvalidPortID) {
960 // Getting a response for a snoop from below
961 assert(exclude_slave_port_id == InvalidPortID);
962 snoopFilter->updateSnoopForward(pkt, *p,
963 *masterPorts[source_master_port_id]);
964 } else {
965 // Getting a response for a request from above
966 assert(source_master_port_id == InvalidPortID);
967 snoopFilter->updateSnoopResponse(pkt, *p,
968 *slavePorts[exclude_slave_port_id]);
969 }
970 }
971 // restore original packet state for remaining snoopers
972 pkt->cmd = orig_cmd;
973 }
974
975 // Stats for fanout
976 snoopFanout.sample(fanout);
977
978 // the packet is restored as part of the loop and any potential
979 // snoop response is part of the returned pair
980 return std::make_pair(snoop_response_cmd, snoop_response_latency);
981}
982
983void
984CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
985{
986 if (!pkt->isPrint()) {
987 // don't do DPRINTFs on PrintReq as it clutters up the output
988 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
989 slavePorts[slave_port_id]->name(), pkt->print());
990 }
991
992 if (!system->bypassCaches()) {
993 // forward to all snoopers but the source
994 forwardFunctional(pkt, slave_port_id);
995 }
996
997 // there is no need to continue if the snooping has found what we
998 // were looking for and the packet is already a response
999 if (!pkt->isResponse()) {
1000 // since our slave ports are queued ports we need to check them as well
1001 for (const auto& p : slavePorts) {
1002 // if we find a response that has the data, then the
1003 // downstream caches/memories may be out of date, so simply stop
1004 // here
1005 if (p->trySatisfyFunctional(pkt)) {
1006 if (pkt->needsResponse())
1007 pkt->makeResponse();
1008 return;
1009 }
1010 }
1011
1012 PortID dest_id = findPort(RangeSize(pkt->getAddr(), pkt->getSize()));
1013
1014 masterPorts[dest_id]->sendFunctional(pkt);
1015 }
1016}
1017
1018void
1019CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
1020{
1021 if (!pkt->isPrint()) {
1022 // don't do DPRINTFs on PrintReq as it clutters up the output
1023 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
1024 masterPorts[master_port_id]->name(), pkt->print());
1025 }
1026
1027 for (const auto& p : slavePorts) {
1028 if (p->trySatisfyFunctional(pkt)) {
1029 if (pkt->needsResponse())
1030 pkt->makeResponse();
1031 return;
1032 }
1033 }
1034
1035 // forward to all snoopers
1036 forwardFunctional(pkt, InvalidPortID);
1037}
1038
1039void
1040CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
1041{
1042 // snoops should only happen if the system isn't bypassing caches
1043 assert(!system->bypassCaches());
1044
1045 for (const auto& p: snoopPorts) {
1046 // we could have gotten this request from a snooping master
1047 // (corresponding to our own slave port that is also in
1048 // snoopPorts) and should not send it back to where it came
1049 // from
1050 if (exclude_slave_port_id == InvalidPortID ||
1051 p->getId() != exclude_slave_port_id)
1052 p->sendFunctionalSnoop(pkt);
1053
1054 // if we get a response we are done
1055 if (pkt->isResponse()) {
1056 break;
1057 }
1058 }
1059}
1060
1061bool
1062CoherentXBar::sinkPacket(const PacketPtr pkt) const
1063{
1064 // we can sink the packet if:
1065 // 1) the crossbar is the point of coherency, and a cache is
1066 // responding after being snooped
1067 // 2) the crossbar is the point of coherency, and the packet is a
1068 // coherency packet (not a read or a write) that does not
1069 // require a response
1070 // 3) this is a clean evict or clean writeback, but the packet is
1071 // found in a cache above this crossbar
1072 // 4) a cache is responding after being snooped, and the packet
1073 // either does not need the block to be writable, or the cache
1074 // that has promised to respond (setting the cache responding
1075 // flag) is providing writable and thus had a Modified block,
1076 // and no further action is needed
1077 return (pointOfCoherency && pkt->cacheResponding()) ||
1078 (pointOfCoherency && !(pkt->isRead() || pkt->isWrite()) &&
1079 !pkt->needsResponse()) ||
1080 (pkt->isCleanEviction() && pkt->isBlockCached()) ||
1081 (pkt->cacheResponding() &&
1082 (!pkt->needsWritable() || pkt->responderHadWritable()));
1083}
1084
1085bool
1086CoherentXBar::forwardPacket(const PacketPtr pkt)
1087{
1088 // we are forwarding the packet if:
1089 // 1) this is a cache clean request to the PoU/PoC and this
1090 // crossbar is above the PoU/PoC
1091 // 2) this is a read or a write
1092 // 3) this crossbar is above the point of coherency
1093 if (pkt->isClean()) {
1094 return !isDestination(pkt);
1095 }
1096 return pkt->isRead() || pkt->isWrite() || !pointOfCoherency;
1097}
1098
1099
1100void
1101CoherentXBar::regStats()
1102{
1103 // register the stats of the base class and our layers
1104 BaseXBar::regStats();
1105 for (auto l: reqLayers)
1106 l->regStats();
1107 for (auto l: respLayers)
1108 l->regStats();
1109 for (auto l: snoopLayers)
1110 l->regStats();
1111
1112 snoops
1113 .name(name() + ".snoops")
1114 .desc("Total snoops (count)")
1115 ;
1116
1117 snoopTraffic
1118 .name(name() + ".snoopTraffic")
1119 .desc("Total snoop traffic (bytes)")
1120 ;
1121
1122 snoopFanout
1123 .init(0, snoopPorts.size(), 1)
1124 .name(name() + ".snoop_fanout")
1125 .desc("Request fanout histogram")
1126 ;
1127}
1128
1129CoherentXBar *
1130CoherentXBarParams::create()
1131{
1132 return new CoherentXBar(this);
1133}
808 } else {
809 // if it does not need a response we sink the packet above
810 assert(pkt->needsResponse());
811
812 pkt->makeResponse();
813 }
814 }
815
816 // stats updates for the request
817 pktCount[slave_port_id][master_port_id]++;
818 pktSize[slave_port_id][master_port_id] += pkt_size;
819 transDist[pkt_cmd]++;
820
821
822 // if lower levels have replied, tell the snoop filter
823 if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) {
824 snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
825 }
826
827 // if we got a response from a snooper, restore it here
828 if (snoop_response_cmd != MemCmd::InvalidCmd) {
829 // no one else should have responded
830 assert(!pkt->isResponse());
831 pkt->cmd = snoop_response_cmd;
832 response_latency = snoop_response_latency;
833 }
834
835 // If this is the destination of the cache clean operation the
836 // crossbar is responsible for responding. This crossbar will
837 // respond when the cache clean is complete. An atomic cache clean
838 // is complete when the crossbars receives the cache clean
839 // request (CleanSharedReq, CleanInvalidReq), as either:
840 // * no cache above had a dirty copy of the block as indicated by
841 // the satisfied flag of the packet, or
842 // * the crossbar has already seen the corresponding write
843 // (WriteClean) which updates the block in the memory below.
844 if (pkt->isClean() && isDestination(pkt) && pkt->satisfied()) {
845 auto it = outstandingCMO.find(pkt->id);
846 assert(it != outstandingCMO.end());
847 // we are responding right away
848 outstandingCMO.erase(it);
849 } else if (pkt->cmd == MemCmd::WriteClean && isDestination(pkt)) {
850 // if this is the destination of the operation, the xbar
851 // sends the responce to the cache clean operation only
852 // after having encountered the cache clean request
853 auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr);
854 // in atomic mode we know that the WriteClean packet should
855 // precede the clean request
856 assert(ret.second);
857 }
858
859 // add the response data
860 if (pkt->isResponse()) {
861 pkt_size = pkt->hasData() ? pkt->getSize() : 0;
862 pkt_cmd = pkt->cmdToIndex();
863
864 // stats updates
865 pktCount[slave_port_id][master_port_id]++;
866 pktSize[slave_port_id][master_port_id] += pkt_size;
867 transDist[pkt_cmd]++;
868 }
869
870 // @todo: Not setting header time
871 pkt->payloadDelay = response_latency;
872 return response_latency;
873}
874
875Tick
876CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id)
877{
878 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
879 masterPorts[master_port_id]->name(), pkt->print());
880
881 // add the request snoop data
882 unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
883 snoops++;
884 snoopTraffic += pkt_size;
885
886 // forward to all snoopers
887 std::pair<MemCmd, Tick> snoop_result;
888 Tick snoop_response_latency = 0;
889 if (snoopFilter) {
890 auto sf_res = snoopFilter->lookupSnoop(pkt);
891 snoop_response_latency += sf_res.second * clockPeriod();
892 DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
893 __func__, masterPorts[master_port_id]->name(), pkt->print(),
894 sf_res.first.size(), sf_res.second);
895 snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id,
896 sf_res.first);
897 } else {
898 snoop_result = forwardAtomic(pkt, InvalidPortID);
899 }
900 MemCmd snoop_response_cmd = snoop_result.first;
901 snoop_response_latency += snoop_result.second;
902
903 if (snoop_response_cmd != MemCmd::InvalidCmd)
904 pkt->cmd = snoop_response_cmd;
905
906 // add the response snoop data
907 if (pkt->isResponse()) {
908 snoops++;
909 }
910
911 // @todo: Not setting header time
912 pkt->payloadDelay = snoop_response_latency;
913 return snoop_response_latency;
914}
915
916std::pair<MemCmd, Tick>
917CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
918 PortID source_master_port_id,
919 const std::vector<QueuedSlavePort*>& dests)
920{
921 // the packet may be changed on snoops, record the original
922 // command to enable us to restore it between snoops so that
923 // additional snoops can take place properly
924 MemCmd orig_cmd = pkt->cmd;
925 MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
926 Tick snoop_response_latency = 0;
927
928 // snoops should only happen if the system isn't bypassing caches
929 assert(!system->bypassCaches());
930
931 unsigned fanout = 0;
932
933 for (const auto& p: dests) {
934 // we could have gotten this request from a snooping master
935 // (corresponding to our own slave port that is also in
936 // snoopPorts) and should not send it back to where it came
937 // from
938 if (exclude_slave_port_id != InvalidPortID &&
939 p->getId() == exclude_slave_port_id)
940 continue;
941
942 Tick latency = p->sendAtomicSnoop(pkt);
943 fanout++;
944
945 // in contrast to a functional access, we have to keep on
946 // going as all snoopers must be updated even if we get a
947 // response
948 if (!pkt->isResponse())
949 continue;
950
951 // response from snoop agent
952 assert(pkt->cmd != orig_cmd);
953 assert(pkt->cacheResponding());
954 // should only happen once
955 assert(snoop_response_cmd == MemCmd::InvalidCmd);
956 // save response state
957 snoop_response_cmd = pkt->cmd;
958 snoop_response_latency = latency;
959
960 if (snoopFilter) {
961 // Handle responses by the snoopers and differentiate between
962 // responses to requests from above and snoops from below
963 if (source_master_port_id != InvalidPortID) {
964 // Getting a response for a snoop from below
965 assert(exclude_slave_port_id == InvalidPortID);
966 snoopFilter->updateSnoopForward(pkt, *p,
967 *masterPorts[source_master_port_id]);
968 } else {
969 // Getting a response for a request from above
970 assert(source_master_port_id == InvalidPortID);
971 snoopFilter->updateSnoopResponse(pkt, *p,
972 *slavePorts[exclude_slave_port_id]);
973 }
974 }
975 // restore original packet state for remaining snoopers
976 pkt->cmd = orig_cmd;
977 }
978
979 // Stats for fanout
980 snoopFanout.sample(fanout);
981
982 // the packet is restored as part of the loop and any potential
983 // snoop response is part of the returned pair
984 return std::make_pair(snoop_response_cmd, snoop_response_latency);
985}
986
987void
988CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
989{
990 if (!pkt->isPrint()) {
991 // don't do DPRINTFs on PrintReq as it clutters up the output
992 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
993 slavePorts[slave_port_id]->name(), pkt->print());
994 }
995
996 if (!system->bypassCaches()) {
997 // forward to all snoopers but the source
998 forwardFunctional(pkt, slave_port_id);
999 }
1000
1001 // there is no need to continue if the snooping has found what we
1002 // were looking for and the packet is already a response
1003 if (!pkt->isResponse()) {
1004 // since our slave ports are queued ports we need to check them as well
1005 for (const auto& p : slavePorts) {
1006 // if we find a response that has the data, then the
1007 // downstream caches/memories may be out of date, so simply stop
1008 // here
1009 if (p->trySatisfyFunctional(pkt)) {
1010 if (pkt->needsResponse())
1011 pkt->makeResponse();
1012 return;
1013 }
1014 }
1015
1016 PortID dest_id = findPort(RangeSize(pkt->getAddr(), pkt->getSize()));
1017
1018 masterPorts[dest_id]->sendFunctional(pkt);
1019 }
1020}
1021
1022void
1023CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
1024{
1025 if (!pkt->isPrint()) {
1026 // don't do DPRINTFs on PrintReq as it clutters up the output
1027 DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
1028 masterPorts[master_port_id]->name(), pkt->print());
1029 }
1030
1031 for (const auto& p : slavePorts) {
1032 if (p->trySatisfyFunctional(pkt)) {
1033 if (pkt->needsResponse())
1034 pkt->makeResponse();
1035 return;
1036 }
1037 }
1038
1039 // forward to all snoopers
1040 forwardFunctional(pkt, InvalidPortID);
1041}
1042
1043void
1044CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
1045{
1046 // snoops should only happen if the system isn't bypassing caches
1047 assert(!system->bypassCaches());
1048
1049 for (const auto& p: snoopPorts) {
1050 // we could have gotten this request from a snooping master
1051 // (corresponding to our own slave port that is also in
1052 // snoopPorts) and should not send it back to where it came
1053 // from
1054 if (exclude_slave_port_id == InvalidPortID ||
1055 p->getId() != exclude_slave_port_id)
1056 p->sendFunctionalSnoop(pkt);
1057
1058 // if we get a response we are done
1059 if (pkt->isResponse()) {
1060 break;
1061 }
1062 }
1063}
1064
1065bool
1066CoherentXBar::sinkPacket(const PacketPtr pkt) const
1067{
1068 // we can sink the packet if:
1069 // 1) the crossbar is the point of coherency, and a cache is
1070 // responding after being snooped
1071 // 2) the crossbar is the point of coherency, and the packet is a
1072 // coherency packet (not a read or a write) that does not
1073 // require a response
1074 // 3) this is a clean evict or clean writeback, but the packet is
1075 // found in a cache above this crossbar
1076 // 4) a cache is responding after being snooped, and the packet
1077 // either does not need the block to be writable, or the cache
1078 // that has promised to respond (setting the cache responding
1079 // flag) is providing writable and thus had a Modified block,
1080 // and no further action is needed
1081 return (pointOfCoherency && pkt->cacheResponding()) ||
1082 (pointOfCoherency && !(pkt->isRead() || pkt->isWrite()) &&
1083 !pkt->needsResponse()) ||
1084 (pkt->isCleanEviction() && pkt->isBlockCached()) ||
1085 (pkt->cacheResponding() &&
1086 (!pkt->needsWritable() || pkt->responderHadWritable()));
1087}
1088
1089bool
1090CoherentXBar::forwardPacket(const PacketPtr pkt)
1091{
1092 // we are forwarding the packet if:
1093 // 1) this is a cache clean request to the PoU/PoC and this
1094 // crossbar is above the PoU/PoC
1095 // 2) this is a read or a write
1096 // 3) this crossbar is above the point of coherency
1097 if (pkt->isClean()) {
1098 return !isDestination(pkt);
1099 }
1100 return pkt->isRead() || pkt->isWrite() || !pointOfCoherency;
1101}
1102
1103
1104void
1105CoherentXBar::regStats()
1106{
1107 // register the stats of the base class and our layers
1108 BaseXBar::regStats();
1109 for (auto l: reqLayers)
1110 l->regStats();
1111 for (auto l: respLayers)
1112 l->regStats();
1113 for (auto l: snoopLayers)
1114 l->regStats();
1115
1116 snoops
1117 .name(name() + ".snoops")
1118 .desc("Total snoops (count)")
1119 ;
1120
1121 snoopTraffic
1122 .name(name() + ".snoopTraffic")
1123 .desc("Total snoop traffic (bytes)")
1124 ;
1125
1126 snoopFanout
1127 .init(0, snoopPorts.size(), 1)
1128 .name(name() + ".snoop_fanout")
1129 .desc("Request fanout histogram")
1130 ;
1131}
1132
1133CoherentXBar *
1134CoherentXBarParams::create()
1135{
1136 return new CoherentXBar(this);
1137}