coherent_xbar.cc revision 14006
1/*
2 * Copyright (c) 2011-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 *          Andreas Hansson
42 *          William Wang
43 *          Nikos Nikoleris
44 */
45
46/**
47 * @file
48 * Definition of a crossbar object.
49 */
50
51#include "mem/coherent_xbar.hh"
52
53#include "base/logging.hh"
54#include "base/trace.hh"
55#include "debug/AddrRanges.hh"
56#include "debug/CoherentXBar.hh"
57#include "sim/system.hh"
58
59CoherentXBar::CoherentXBar(const CoherentXBarParams *p)
60    : BaseXBar(p), system(p->system), snoopFilter(p->snoop_filter),
61      snoopResponseLatency(p->snoop_response_latency),
62      maxOutstandingSnoopCheck(p->max_outstanding_snoops),
63      maxRoutingTableSizeCheck(p->max_routing_table_size),
64      pointOfCoherency(p->point_of_coherency),
65      pointOfUnification(p->point_of_unification)
66{
67    // create the ports based on the size of the master and slave
68    // vector ports, and the presence of the default port, the ports
69    // are enumerated starting from zero
70    for (int i = 0; i < p->port_master_connection_count; ++i) {
71        std::string portName = csprintf("%s.master[%d]", name(), i);
72        MasterPort* bp = new CoherentXBarMasterPort(portName, *this, i);
73        masterPorts.push_back(bp);
74        reqLayers.push_back(new ReqLayer(*bp, *this,
75                                         csprintf(".reqLayer%d", i)));
76        snoopLayers.push_back(
77                new SnoopRespLayer(*bp, *this, csprintf(".snoopLayer%d", i)));
78    }
79
80    // see if we have a default slave device connected and if so add
81    // our corresponding master port
82    if (p->port_default_connection_count) {
83        defaultPortID = masterPorts.size();
84        std::string portName = name() + ".default";
85        MasterPort* bp = new CoherentXBarMasterPort(portName, *this,
86                                                    defaultPortID);
87        masterPorts.push_back(bp);
88        reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d",
89                                         defaultPortID)));
90        snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
91                                                 csprintf(".snoopLayer%d",
92                                                          defaultPortID)));
93    }
94
95    // create the slave ports, once again starting at zero
96    for (int i = 0; i < p->port_slave_connection_count; ++i) {
97        std::string portName = csprintf("%s.slave[%d]", name(), i);
98        QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i);
99        slavePorts.push_back(bp);
100        respLayers.push_back(new RespLayer(*bp, *this,
101                                           csprintf(".respLayer%d", i)));
102        snoopRespPorts.push_back(new SnoopRespPort(*bp, *this));
103    }
104}
105
106CoherentXBar::~CoherentXBar()
107{
108    for (auto l: reqLayers)
109        delete l;
110    for (auto l: respLayers)
111        delete l;
112    for (auto l: snoopLayers)
113        delete l;
114    for (auto p: snoopRespPorts)
115        delete p;
116}
117
118void
119CoherentXBar::init()
120{
121    BaseXBar::init();
122
123    // iterate over our slave ports and determine which of our
124    // neighbouring master ports are snooping and add them as snoopers
125    for (const auto& p: slavePorts) {
126        // check if the connected master port is snooping
127        if (p->isSnooping()) {
128            DPRINTF(AddrRanges, "Adding snooping master %s\n",
129                    p->getMasterPort().name());
130            snoopPorts.push_back(p);
131        }
132    }
133
134    if (snoopPorts.empty())
135        warn("CoherentXBar %s has no snooping ports attached!\n", name());
136
137    // inform the snoop filter about the slave ports so it can create
138    // its own internal representation
139    if (snoopFilter)
140        snoopFilter->setSlavePorts(slavePorts);
141}
142
143bool
144CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
145{
146    // determine the source port based on the id
147    SlavePort *src_port = slavePorts[slave_port_id];
148
149    // remember if the packet is an express snoop
150    bool is_express_snoop = pkt->isExpressSnoop();
151    bool cache_responding = pkt->cacheResponding();
152    // for normal requests, going downstream, the express snoop flag
153    // and the cache responding flag should always be the same
154    assert(is_express_snoop == cache_responding);
155
156    // determine the destination based on the destination address range
157    PortID master_port_id = findPort(pkt->getAddrRange());
158
159    // test if the crossbar should be considered occupied for the current
160    // port, and exclude express snoops from the check
161    if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) {
162        DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
163                src_port->name(), pkt->print());
164        return false;
165    }
166
167    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
168            src_port->name(), pkt->print());
169
170    // store size and command as they might be modified when
171    // forwarding the packet
172    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
173    unsigned int pkt_cmd = pkt->cmdToIndex();
174
175    // store the old header delay so we can restore it if needed
176    Tick old_header_delay = pkt->headerDelay;
177
178    // a request sees the frontend and forward latency
179    Tick xbar_delay = (frontendLatency + forwardLatency) * clockPeriod();
180
181    // set the packet header and payload delay
182    calcPacketTiming(pkt, xbar_delay);
183
184    // determine how long to be crossbar layer is busy
185    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
186
187    // is this the destination point for this packet? (e.g. true if
188    // this xbar is the PoC for a cache maintenance operation to the
189    // PoC) otherwise the destination is any cache that can satisfy
190    // the request
191    const bool is_destination = isDestination(pkt);
192
193    const bool snoop_caches = !system->bypassCaches() &&
194        pkt->cmd != MemCmd::WriteClean;
195    if (snoop_caches) {
196        assert(pkt->snoopDelay == 0);
197
198        if (pkt->isClean() && !is_destination) {
199            // before snooping we need to make sure that the memory
200            // below is not busy and the cache clean request can be
201            // forwarded to it
202            if (!masterPorts[master_port_id]->tryTiming(pkt)) {
203                DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
204                        src_port->name(), pkt->print());
205
206                // update the layer state and schedule an idle event
207                reqLayers[master_port_id]->failedTiming(src_port,
208                                                        clockEdge(Cycles(1)));
209                return false;
210            }
211        }
212
213
214        // the packet is a memory-mapped request and should be
215        // broadcasted to our snoopers but the source
216        if (snoopFilter) {
217            // check with the snoop filter where to forward this packet
218            auto sf_res = snoopFilter->lookupRequest(pkt, *src_port);
219            // the time required by a packet to be delivered through
220            // the xbar has to be charged also with to lookup latency
221            // of the snoop filter
222            pkt->headerDelay += sf_res.second * clockPeriod();
223            DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
224                    __func__, src_port->name(), pkt->print(),
225                    sf_res.first.size(), sf_res.second);
226
227            if (pkt->isEviction()) {
228                // for block-evicting packets, i.e. writebacks and
229                // clean evictions, there is no need to snoop up, as
230                // all we do is determine if the block is cached or
231                // not, instead just set it here based on the snoop
232                // filter result
233                if (!sf_res.first.empty())
234                    pkt->setBlockCached();
235            } else {
236                forwardTiming(pkt, slave_port_id, sf_res.first);
237            }
238        } else {
239            forwardTiming(pkt, slave_port_id);
240        }
241
242        // add the snoop delay to our header delay, and then reset it
243        pkt->headerDelay += pkt->snoopDelay;
244        pkt->snoopDelay = 0;
245    }
246
247    // set up a sensible starting point
248    bool success = true;
249
250    // remember if the packet will generate a snoop response by
251    // checking if a cache set the cacheResponding flag during the
252    // snooping above
253    const bool expect_snoop_resp = !cache_responding && pkt->cacheResponding();
254    bool expect_response = pkt->needsResponse() && !pkt->cacheResponding();
255
256    const bool sink_packet = sinkPacket(pkt);
257
258    // in certain cases the crossbar is responsible for responding
259    bool respond_directly = false;
260    // store the original address as an address mapper could possibly
261    // modify the address upon a sendTimingRequest
262    const Addr addr(pkt->getAddr());
263    if (sink_packet) {
264        DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
265                pkt->print());
266    } else {
267        // determine if we are forwarding the packet, or responding to
268        // it
269        if (forwardPacket(pkt)) {
270            // if we are passing on, rather than sinking, a packet to
271            // which an upstream cache has committed to responding,
272            // the line was needs writable, and the responding only
273            // had an Owned copy, so we need to immidiately let the
274            // downstream caches know, bypass any flow control
275            if (pkt->cacheResponding()) {
276                pkt->setExpressSnoop();
277            }
278
279            // make sure that the write request (e.g., WriteClean)
280            // will stop at the memory below if this crossbar is its
281            // destination
282            if (pkt->isWrite() && is_destination) {
283                pkt->clearWriteThrough();
284            }
285
286            // since it is a normal request, attempt to send the packet
287            success = masterPorts[master_port_id]->sendTimingReq(pkt);
288        } else {
289            // no need to forward, turn this packet around and respond
290            // directly
291            assert(pkt->needsResponse());
292
293            respond_directly = true;
294            assert(!expect_snoop_resp);
295            expect_response = false;
296        }
297    }
298
299    if (snoopFilter && snoop_caches) {
300        // Let the snoop filter know about the success of the send operation
301        snoopFilter->finishRequest(!success, addr, pkt->isSecure());
302    }
303
304    // check if we were successful in sending the packet onwards
305    if (!success)  {
306        // express snoops should never be forced to retry
307        assert(!is_express_snoop);
308
309        // restore the header delay
310        pkt->headerDelay = old_header_delay;
311
312        DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
313                src_port->name(), pkt->print());
314
315        // update the layer state and schedule an idle event
316        reqLayers[master_port_id]->failedTiming(src_port,
317                                                clockEdge(Cycles(1)));
318    } else {
319        // express snoops currently bypass the crossbar state entirely
320        if (!is_express_snoop) {
321            // if this particular request will generate a snoop
322            // response
323            if (expect_snoop_resp) {
324                // we should never have an exsiting request outstanding
325                assert(outstandingSnoop.find(pkt->req) ==
326                       outstandingSnoop.end());
327                outstandingSnoop.insert(pkt->req);
328
329                // basic sanity check on the outstanding snoops
330                panic_if(outstandingSnoop.size() > maxOutstandingSnoopCheck,
331                         "%s: Outstanding snoop requests exceeded %d\n",
332                         name(), maxOutstandingSnoopCheck);
333            }
334
335            // remember where to route the normal response to
336            if (expect_response || expect_snoop_resp) {
337                assert(routeTo.find(pkt->req) == routeTo.end());
338                routeTo[pkt->req] = slave_port_id;
339
340                panic_if(routeTo.size() > maxRoutingTableSizeCheck,
341                         "%s: Routing table exceeds %d packets\n",
342                         name(), maxRoutingTableSizeCheck);
343            }
344
345            // update the layer state and schedule an idle event
346            reqLayers[master_port_id]->succeededTiming(packetFinishTime);
347        }
348
349        // stats updates only consider packets that were successfully sent
350        pktCount[slave_port_id][master_port_id]++;
351        pktSize[slave_port_id][master_port_id] += pkt_size;
352        transDist[pkt_cmd]++;
353
354        if (is_express_snoop) {
355            snoops++;
356            snoopTraffic += pkt_size;
357        }
358    }
359
360    if (sink_packet)
361        // queue the packet for deletion
362        pendingDelete.reset(pkt);
363
364    // normally we respond to the packet we just received if we need to
365    PacketPtr rsp_pkt = pkt;
366    PortID rsp_port_id = slave_port_id;
367
368    // If this is the destination of the cache clean operation the
369    // crossbar is responsible for responding. This crossbar will
370    // respond when the cache clean is complete. A cache clean
371    // is complete either:
372    // * direcly, if no cache above had a dirty copy of the block
373    //   as indicated by the satisfied flag of the packet, or
374    // * when the crossbar has seen both the cache clean request
375    //   (CleanSharedReq, CleanInvalidReq) and the corresponding
376    //   write (WriteClean) which updates the block in the memory
377    //   below.
378    if (success &&
379        ((pkt->isClean() && pkt->satisfied()) ||
380         pkt->cmd == MemCmd::WriteClean) &&
381        is_destination) {
382        PacketPtr deferred_rsp = pkt->isWrite() ? nullptr : pkt;
383        auto cmo_lookup = outstandingCMO.find(pkt->id);
384        if (cmo_lookup != outstandingCMO.end()) {
385            // the cache clean request has already reached this xbar
386            respond_directly = true;
387            if (pkt->isWrite()) {
388                rsp_pkt = cmo_lookup->second;
389                assert(rsp_pkt);
390
391                // determine the destination
392                const auto route_lookup = routeTo.find(rsp_pkt->req);
393                assert(route_lookup != routeTo.end());
394                rsp_port_id = route_lookup->second;
395                assert(rsp_port_id != InvalidPortID);
396                assert(rsp_port_id < respLayers.size());
397                // remove the request from the routing table
398                routeTo.erase(route_lookup);
399            }
400            outstandingCMO.erase(cmo_lookup);
401        } else {
402            respond_directly = false;
403            outstandingCMO.emplace(pkt->id, deferred_rsp);
404            if (!pkt->isWrite()) {
405                assert(routeTo.find(pkt->req) == routeTo.end());
406                routeTo[pkt->req] = slave_port_id;
407
408                panic_if(routeTo.size() > maxRoutingTableSizeCheck,
409                         "%s: Routing table exceeds %d packets\n",
410                         name(), maxRoutingTableSizeCheck);
411            }
412        }
413    }
414
415
416    if (respond_directly) {
417        assert(rsp_pkt->needsResponse());
418        assert(success);
419
420        rsp_pkt->makeResponse();
421
422        if (snoopFilter && !system->bypassCaches()) {
423            // let the snoop filter inspect the response and update its state
424            snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]);
425        }
426
427        // we send the response after the current packet, even if the
428        // response is not for this packet (e.g. cache clean operation
429        // where both the request and the write packet have to cross
430        // the destination xbar before the response is sent.)
431        Tick response_time = clockEdge() + pkt->headerDelay;
432        rsp_pkt->headerDelay = 0;
433
434        slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
435    }
436
437    return success;
438}
439
440bool
441CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
442{
443    // determine the source port based on the id
444    MasterPort *src_port = masterPorts[master_port_id];
445
446    // determine the destination
447    const auto route_lookup = routeTo.find(pkt->req);
448    assert(route_lookup != routeTo.end());
449    const PortID slave_port_id = route_lookup->second;
450    assert(slave_port_id != InvalidPortID);
451    assert(slave_port_id < respLayers.size());
452
453    // test if the crossbar should be considered occupied for the
454    // current port
455    if (!respLayers[slave_port_id]->tryTiming(src_port)) {
456        DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
457                src_port->name(), pkt->print());
458        return false;
459    }
460
461    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
462            src_port->name(), pkt->print());
463
464    // store size and command as they might be modified when
465    // forwarding the packet
466    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
467    unsigned int pkt_cmd = pkt->cmdToIndex();
468
469    // a response sees the response latency
470    Tick xbar_delay = responseLatency * clockPeriod();
471
472    // set the packet header and payload delay
473    calcPacketTiming(pkt, xbar_delay);
474
475    // determine how long to be crossbar layer is busy
476    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
477
478    if (snoopFilter && !system->bypassCaches()) {
479        // let the snoop filter inspect the response and update its state
480        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
481    }
482
483    // send the packet through the destination slave port and pay for
484    // any outstanding header delay
485    Tick latency = pkt->headerDelay;
486    pkt->headerDelay = 0;
487    slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
488
489    // remove the request from the routing table
490    routeTo.erase(route_lookup);
491
492    respLayers[slave_port_id]->succeededTiming(packetFinishTime);
493
494    // stats updates
495    pktCount[slave_port_id][master_port_id]++;
496    pktSize[slave_port_id][master_port_id] += pkt_size;
497    transDist[pkt_cmd]++;
498
499    return true;
500}
501
502void
503CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
504{
505    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
506            masterPorts[master_port_id]->name(), pkt->print());
507
508    // update stats here as we know the forwarding will succeed
509    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
510    transDist[pkt->cmdToIndex()]++;
511    snoops++;
512    snoopTraffic += pkt_size;
513
514    // we should only see express snoops from caches
515    assert(pkt->isExpressSnoop());
516
517    // set the packet header and payload delay, for now use forward latency
518    // @todo Assess the choice of latency further
519    calcPacketTiming(pkt, forwardLatency * clockPeriod());
520
521    // remember if a cache has already committed to responding so we
522    // can see if it changes during the snooping
523    const bool cache_responding = pkt->cacheResponding();
524
525    assert(pkt->snoopDelay == 0);
526
527    if (snoopFilter) {
528        // let the Snoop Filter work its magic and guide probing
529        auto sf_res = snoopFilter->lookupSnoop(pkt);
530        // the time required by a packet to be delivered through
531        // the xbar has to be charged also with to lookup latency
532        // of the snoop filter
533        pkt->headerDelay += sf_res.second * clockPeriod();
534        DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
535                __func__, masterPorts[master_port_id]->name(), pkt->print(),
536                sf_res.first.size(), sf_res.second);
537
538        // forward to all snoopers
539        forwardTiming(pkt, InvalidPortID, sf_res.first);
540    } else {
541        forwardTiming(pkt, InvalidPortID);
542    }
543
544    // add the snoop delay to our header delay, and then reset it
545    pkt->headerDelay += pkt->snoopDelay;
546    pkt->snoopDelay = 0;
547
548    // if we can expect a response, remember how to route it
549    if (!cache_responding && pkt->cacheResponding()) {
550        assert(routeTo.find(pkt->req) == routeTo.end());
551        routeTo[pkt->req] = master_port_id;
552    }
553
554    // a snoop request came from a connected slave device (one of
555    // our master ports), and if it is not coming from the slave
556    // device responsible for the address range something is
557    // wrong, hence there is nothing further to do as the packet
558    // would be going back to where it came from
559    assert(findPort(pkt->getAddrRange()) == master_port_id);
560}
561
562bool
563CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
564{
565    // determine the source port based on the id
566    SlavePort* src_port = slavePorts[slave_port_id];
567
568    // get the destination
569    const auto route_lookup = routeTo.find(pkt->req);
570    assert(route_lookup != routeTo.end());
571    const PortID dest_port_id = route_lookup->second;
572    assert(dest_port_id != InvalidPortID);
573
574    // determine if the response is from a snoop request we
575    // created as the result of a normal request (in which case it
576    // should be in the outstandingSnoop), or if we merely forwarded
577    // someone else's snoop request
578    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
579        outstandingSnoop.end();
580
581    // test if the crossbar should be considered occupied for the
582    // current port, note that the check is bypassed if the response
583    // is being passed on as a normal response since this is occupying
584    // the response layer rather than the snoop response layer
585    if (forwardAsSnoop) {
586        assert(dest_port_id < snoopLayers.size());
587        if (!snoopLayers[dest_port_id]->tryTiming(src_port)) {
588            DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
589                    src_port->name(), pkt->print());
590            return false;
591        }
592    } else {
593        // get the master port that mirrors this slave port internally
594        MasterPort* snoop_port = snoopRespPorts[slave_port_id];
595        assert(dest_port_id < respLayers.size());
596        if (!respLayers[dest_port_id]->tryTiming(snoop_port)) {
597            DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
598                    snoop_port->name(), pkt->print());
599            return false;
600        }
601    }
602
603    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
604            src_port->name(), pkt->print());
605
606    // store size and command as they might be modified when
607    // forwarding the packet
608    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
609    unsigned int pkt_cmd = pkt->cmdToIndex();
610
611    // responses are never express snoops
612    assert(!pkt->isExpressSnoop());
613
614    // a snoop response sees the snoop response latency, and if it is
615    // forwarded as a normal response, the response latency
616    Tick xbar_delay =
617        (forwardAsSnoop ? snoopResponseLatency : responseLatency) *
618        clockPeriod();
619
620    // set the packet header and payload delay
621    calcPacketTiming(pkt, xbar_delay);
622
623    // determine how long to be crossbar layer is busy
624    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
625
626    // forward it either as a snoop response or a normal response
627    if (forwardAsSnoop) {
628        // this is a snoop response to a snoop request we forwarded,
629        // e.g. coming from the L1 and going to the L2, and it should
630        // be forwarded as a snoop response
631
632        if (snoopFilter) {
633            // update the probe filter so that it can properly track the line
634            snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id],
635                                            *masterPorts[dest_port_id]);
636        }
637
638        bool success M5_VAR_USED =
639            masterPorts[dest_port_id]->sendTimingSnoopResp(pkt);
640        pktCount[slave_port_id][dest_port_id]++;
641        pktSize[slave_port_id][dest_port_id] += pkt_size;
642        assert(success);
643
644        snoopLayers[dest_port_id]->succeededTiming(packetFinishTime);
645    } else {
646        // we got a snoop response on one of our slave ports,
647        // i.e. from a coherent master connected to the crossbar, and
648        // since we created the snoop request as part of recvTiming,
649        // this should now be a normal response again
650        outstandingSnoop.erase(pkt->req);
651
652        // this is a snoop response from a coherent master, hence it
653        // should never go back to where the snoop response came from,
654        // but instead to where the original request came from
655        assert(slave_port_id != dest_port_id);
656
657        if (snoopFilter) {
658            // update the probe filter so that it can properly track the line
659            snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id],
660                                    *slavePorts[dest_port_id]);
661        }
662
663        DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__,
664                src_port->name(), pkt->print());
665
666        // as a normal response, it should go back to a master through
667        // one of our slave ports, we also pay for any outstanding
668        // header latency
669        Tick latency = pkt->headerDelay;
670        pkt->headerDelay = 0;
671        slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency);
672
673        respLayers[dest_port_id]->succeededTiming(packetFinishTime);
674    }
675
676    // remove the request from the routing table
677    routeTo.erase(route_lookup);
678
679    // stats updates
680    transDist[pkt_cmd]++;
681    snoops++;
682    snoopTraffic += pkt_size;
683
684    return true;
685}
686
687
688void
689CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
690                           const std::vector<QueuedSlavePort*>& dests)
691{
692    DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print());
693
694    // snoops should only happen if the system isn't bypassing caches
695    assert(!system->bypassCaches());
696
697    unsigned fanout = 0;
698
699    for (const auto& p: dests) {
700        // we could have gotten this request from a snooping master
701        // (corresponding to our own slave port that is also in
702        // snoopPorts) and should not send it back to where it came
703        // from
704        if (exclude_slave_port_id == InvalidPortID ||
705            p->getId() != exclude_slave_port_id) {
706            // cache is not allowed to refuse snoop
707            p->sendTimingSnoopReq(pkt);
708            fanout++;
709        }
710    }
711
712    // Stats for fanout of this forward operation
713    snoopFanout.sample(fanout);
714}
715
716void
717CoherentXBar::recvReqRetry(PortID master_port_id)
718{
719    // responses and snoop responses never block on forwarding them,
720    // so the retry will always be coming from a port to which we
721    // tried to forward a request
722    reqLayers[master_port_id]->recvRetry();
723}
724
725Tick
726CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
727                                 MemBackdoorPtr *backdoor)
728{
729    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
730            slavePorts[slave_port_id]->name(), pkt->print());
731
732    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
733    unsigned int pkt_cmd = pkt->cmdToIndex();
734
735    MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
736    Tick snoop_response_latency = 0;
737
738    // is this the destination point for this packet? (e.g. true if
739    // this xbar is the PoC for a cache maintenance operation to the
740    // PoC) otherwise the destination is any cache that can satisfy
741    // the request
742    const bool is_destination = isDestination(pkt);
743
744    const bool snoop_caches = !system->bypassCaches() &&
745        pkt->cmd != MemCmd::WriteClean;
746    if (snoop_caches) {
747        // forward to all snoopers but the source
748        std::pair<MemCmd, Tick> snoop_result;
749        if (snoopFilter) {
750            // check with the snoop filter where to forward this packet
751            auto sf_res =
752                snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]);
753            snoop_response_latency += sf_res.second * clockPeriod();
754            DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
755                    __func__, slavePorts[slave_port_id]->name(), pkt->print(),
756                    sf_res.first.size(), sf_res.second);
757
758            // let the snoop filter know about the success of the send
759            // operation, and do it even before sending it onwards to
760            // avoid situations where atomic upward snoops sneak in
761            // between and change the filter state
762            snoopFilter->finishRequest(false, pkt->getAddr(), pkt->isSecure());
763
764            if (pkt->isEviction()) {
765                // for block-evicting packets, i.e. writebacks and
766                // clean evictions, there is no need to snoop up, as
767                // all we do is determine if the block is cached or
768                // not, instead just set it here based on the snoop
769                // filter result
770                if (!sf_res.first.empty())
771                    pkt->setBlockCached();
772            } else {
773                snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID,
774                                             sf_res.first);
775            }
776        } else {
777            snoop_result = forwardAtomic(pkt, slave_port_id);
778        }
779        snoop_response_cmd = snoop_result.first;
780        snoop_response_latency += snoop_result.second;
781    }
782
783    // set up a sensible default value
784    Tick response_latency = 0;
785
786    const bool sink_packet = sinkPacket(pkt);
787
788    // even if we had a snoop response, we must continue and also
789    // perform the actual request at the destination
790    PortID master_port_id = findPort(pkt->getAddrRange());
791
792    if (sink_packet) {
793        DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
794                pkt->print());
795    } else {
796        if (forwardPacket(pkt)) {
797            // make sure that the write request (e.g., WriteClean)
798            // will stop at the memory below if this crossbar is its
799            // destination
800            if (pkt->isWrite() && is_destination) {
801                pkt->clearWriteThrough();
802            }
803
804            // forward the request to the appropriate destination
805            auto master = masterPorts[master_port_id];
806            response_latency = backdoor ?
807                master->sendAtomicBackdoor(pkt, *backdoor) :
808                master->sendAtomic(pkt);
809        } else {
810            // if it does not need a response we sink the packet above
811            assert(pkt->needsResponse());
812
813            pkt->makeResponse();
814        }
815    }
816
817    // stats updates for the request
818    pktCount[slave_port_id][master_port_id]++;
819    pktSize[slave_port_id][master_port_id] += pkt_size;
820    transDist[pkt_cmd]++;
821
822
823    // if lower levels have replied, tell the snoop filter
824    if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) {
825        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
826    }
827
828    // if we got a response from a snooper, restore it here
829    if (snoop_response_cmd != MemCmd::InvalidCmd) {
830        // no one else should have responded
831        assert(!pkt->isResponse());
832        pkt->cmd = snoop_response_cmd;
833        response_latency = snoop_response_latency;
834    }
835
836    // If this is the destination of the cache clean operation the
837    // crossbar is responsible for responding. This crossbar will
838    // respond when the cache clean is complete. An atomic cache clean
839    // is complete when the crossbars receives the cache clean
840    // request (CleanSharedReq, CleanInvalidReq), as either:
841    // * no cache above had a dirty copy of the block as indicated by
842    //   the satisfied flag of the packet, or
843    // * the crossbar has already seen the corresponding write
844    //   (WriteClean) which updates the block in the memory below.
845    if (pkt->isClean() && isDestination(pkt) && pkt->satisfied()) {
846        auto it = outstandingCMO.find(pkt->id);
847        assert(it != outstandingCMO.end());
848        // we are responding right away
849        outstandingCMO.erase(it);
850    } else if (pkt->cmd == MemCmd::WriteClean && isDestination(pkt)) {
851        // if this is the destination of the operation, the xbar
852        // sends the responce to the cache clean operation only
853        // after having encountered the cache clean request
854        auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr);
855        // in atomic mode we know that the WriteClean packet should
856        // precede the clean request
857        assert(ret.second);
858    }
859
860    // add the response data
861    if (pkt->isResponse()) {
862        pkt_size = pkt->hasData() ? pkt->getSize() : 0;
863        pkt_cmd = pkt->cmdToIndex();
864
865        // stats updates
866        pktCount[slave_port_id][master_port_id]++;
867        pktSize[slave_port_id][master_port_id] += pkt_size;
868        transDist[pkt_cmd]++;
869    }
870
871    // @todo: Not setting header time
872    pkt->payloadDelay = response_latency;
873    return response_latency;
874}
875
876Tick
877CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id)
878{
879    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
880            masterPorts[master_port_id]->name(), pkt->print());
881
882    // add the request snoop data
883    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
884    snoops++;
885    snoopTraffic += pkt_size;
886
887    // forward to all snoopers
888    std::pair<MemCmd, Tick> snoop_result;
889    Tick snoop_response_latency = 0;
890    if (snoopFilter) {
891        auto sf_res = snoopFilter->lookupSnoop(pkt);
892        snoop_response_latency += sf_res.second * clockPeriod();
893        DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
894                __func__, masterPorts[master_port_id]->name(), pkt->print(),
895                sf_res.first.size(), sf_res.second);
896        snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id,
897                                     sf_res.first);
898    } else {
899        snoop_result = forwardAtomic(pkt, InvalidPortID);
900    }
901    MemCmd snoop_response_cmd = snoop_result.first;
902    snoop_response_latency += snoop_result.second;
903
904    if (snoop_response_cmd != MemCmd::InvalidCmd)
905        pkt->cmd = snoop_response_cmd;
906
907    // add the response snoop data
908    if (pkt->isResponse()) {
909        snoops++;
910    }
911
912    // @todo: Not setting header time
913    pkt->payloadDelay = snoop_response_latency;
914    return snoop_response_latency;
915}
916
917std::pair<MemCmd, Tick>
918CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
919                           PortID source_master_port_id,
920                           const std::vector<QueuedSlavePort*>& dests)
921{
922    // the packet may be changed on snoops, record the original
923    // command to enable us to restore it between snoops so that
924    // additional snoops can take place properly
925    MemCmd orig_cmd = pkt->cmd;
926    MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
927    Tick snoop_response_latency = 0;
928
929    // snoops should only happen if the system isn't bypassing caches
930    assert(!system->bypassCaches());
931
932    unsigned fanout = 0;
933
934    for (const auto& p: dests) {
935        // we could have gotten this request from a snooping master
936        // (corresponding to our own slave port that is also in
937        // snoopPorts) and should not send it back to where it came
938        // from
939        if (exclude_slave_port_id != InvalidPortID &&
940            p->getId() == exclude_slave_port_id)
941            continue;
942
943        Tick latency = p->sendAtomicSnoop(pkt);
944        fanout++;
945
946        // in contrast to a functional access, we have to keep on
947        // going as all snoopers must be updated even if we get a
948        // response
949        if (!pkt->isResponse())
950            continue;
951
952        // response from snoop agent
953        assert(pkt->cmd != orig_cmd);
954        assert(pkt->cacheResponding());
955        // should only happen once
956        assert(snoop_response_cmd == MemCmd::InvalidCmd);
957        // save response state
958        snoop_response_cmd = pkt->cmd;
959        snoop_response_latency = latency;
960
961        if (snoopFilter) {
962            // Handle responses by the snoopers and differentiate between
963            // responses to requests from above and snoops from below
964            if (source_master_port_id != InvalidPortID) {
965                // Getting a response for a snoop from below
966                assert(exclude_slave_port_id == InvalidPortID);
967                snoopFilter->updateSnoopForward(pkt, *p,
968                             *masterPorts[source_master_port_id]);
969            } else {
970                // Getting a response for a request from above
971                assert(source_master_port_id == InvalidPortID);
972                snoopFilter->updateSnoopResponse(pkt, *p,
973                             *slavePorts[exclude_slave_port_id]);
974            }
975        }
976        // restore original packet state for remaining snoopers
977        pkt->cmd = orig_cmd;
978    }
979
980    // Stats for fanout
981    snoopFanout.sample(fanout);
982
983    // the packet is restored as part of the loop and any potential
984    // snoop response is part of the returned pair
985    return std::make_pair(snoop_response_cmd, snoop_response_latency);
986}
987
988void
989CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
990{
991    if (!pkt->isPrint()) {
992        // don't do DPRINTFs on PrintReq as it clutters up the output
993        DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
994                slavePorts[slave_port_id]->name(), pkt->print());
995    }
996
997    if (!system->bypassCaches()) {
998        // forward to all snoopers but the source
999        forwardFunctional(pkt, slave_port_id);
1000    }
1001
1002    // there is no need to continue if the snooping has found what we
1003    // were looking for and the packet is already a response
1004    if (!pkt->isResponse()) {
1005        // since our slave ports are queued ports we need to check them as well
1006        for (const auto& p : slavePorts) {
1007            // if we find a response that has the data, then the
1008            // downstream caches/memories may be out of date, so simply stop
1009            // here
1010            if (p->trySatisfyFunctional(pkt)) {
1011                if (pkt->needsResponse())
1012                    pkt->makeResponse();
1013                return;
1014            }
1015        }
1016
1017        PortID dest_id = findPort(pkt->getAddrRange());
1018
1019        masterPorts[dest_id]->sendFunctional(pkt);
1020    }
1021}
1022
1023void
1024CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
1025{
1026    if (!pkt->isPrint()) {
1027        // don't do DPRINTFs on PrintReq as it clutters up the output
1028        DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
1029                masterPorts[master_port_id]->name(), pkt->print());
1030    }
1031
1032    for (const auto& p : slavePorts) {
1033        if (p->trySatisfyFunctional(pkt)) {
1034            if (pkt->needsResponse())
1035                pkt->makeResponse();
1036            return;
1037        }
1038    }
1039
1040    // forward to all snoopers
1041    forwardFunctional(pkt, InvalidPortID);
1042}
1043
1044void
1045CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
1046{
1047    // snoops should only happen if the system isn't bypassing caches
1048    assert(!system->bypassCaches());
1049
1050    for (const auto& p: snoopPorts) {
1051        // we could have gotten this request from a snooping master
1052        // (corresponding to our own slave port that is also in
1053        // snoopPorts) and should not send it back to where it came
1054        // from
1055        if (exclude_slave_port_id == InvalidPortID ||
1056            p->getId() != exclude_slave_port_id)
1057            p->sendFunctionalSnoop(pkt);
1058
1059        // if we get a response we are done
1060        if (pkt->isResponse()) {
1061            break;
1062        }
1063    }
1064}
1065
1066bool
1067CoherentXBar::sinkPacket(const PacketPtr pkt) const
1068{
1069    // we can sink the packet if:
1070    // 1) the crossbar is the point of coherency, and a cache is
1071    //    responding after being snooped
1072    // 2) the crossbar is the point of coherency, and the packet is a
1073    //    coherency packet (not a read or a write) that does not
1074    //    require a response
1075    // 3) this is a clean evict or clean writeback, but the packet is
1076    //    found in a cache above this crossbar
1077    // 4) a cache is responding after being snooped, and the packet
1078    //    either does not need the block to be writable, or the cache
1079    //    that has promised to respond (setting the cache responding
1080    //    flag) is providing writable and thus had a Modified block,
1081    //    and no further action is needed
1082    return (pointOfCoherency && pkt->cacheResponding()) ||
1083        (pointOfCoherency && !(pkt->isRead() || pkt->isWrite()) &&
1084         !pkt->needsResponse()) ||
1085        (pkt->isCleanEviction() && pkt->isBlockCached()) ||
1086        (pkt->cacheResponding() &&
1087         (!pkt->needsWritable() || pkt->responderHadWritable()));
1088}
1089
1090bool
1091CoherentXBar::forwardPacket(const PacketPtr pkt)
1092{
1093    // we are forwarding the packet if:
1094    // 1) this is a cache clean request to the PoU/PoC and this
1095    //    crossbar is above the PoU/PoC
1096    // 2) this is a read or a write
1097    // 3) this crossbar is above the point of coherency
1098    if (pkt->isClean()) {
1099        return !isDestination(pkt);
1100    }
1101    return pkt->isRead() || pkt->isWrite() || !pointOfCoherency;
1102}
1103
1104
1105void
1106CoherentXBar::regStats()
1107{
1108    // register the stats of the base class and our layers
1109    BaseXBar::regStats();
1110    for (auto l: reqLayers)
1111        l->regStats();
1112    for (auto l: respLayers)
1113        l->regStats();
1114    for (auto l: snoopLayers)
1115        l->regStats();
1116
1117    snoops
1118        .name(name() + ".snoops")
1119        .desc("Total snoops (count)")
1120    ;
1121
1122    snoopTraffic
1123        .name(name() + ".snoopTraffic")
1124        .desc("Total snoop traffic (bytes)")
1125    ;
1126
1127    snoopFanout
1128        .init(0, snoopPorts.size(), 1)
1129        .name(name() + ".snoop_fanout")
1130        .desc("Request fanout histogram")
1131    ;
1132}
1133
1134CoherentXBar *
1135CoherentXBarParams::create()
1136{
1137    return new CoherentXBar(this);
1138}
1139