coherent_xbar.cc revision 13856
1/*
2 * Copyright (c) 2011-2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 *          Andreas Hansson
42 *          William Wang
43 *          Nikos Nikoleris
44 */
45
46/**
47 * @file
48 * Definition of a crossbar object.
49 */
50
51#include "mem/coherent_xbar.hh"
52
53#include "base/logging.hh"
54#include "base/trace.hh"
55#include "debug/AddrRanges.hh"
56#include "debug/CoherentXBar.hh"
57#include "sim/system.hh"
58
59CoherentXBar::CoherentXBar(const CoherentXBarParams *p)
60    : BaseXBar(p), system(p->system), snoopFilter(p->snoop_filter),
61      snoopResponseLatency(p->snoop_response_latency),
62      pointOfCoherency(p->point_of_coherency),
63      pointOfUnification(p->point_of_unification)
64{
65    // create the ports based on the size of the master and slave
66    // vector ports, and the presence of the default port, the ports
67    // are enumerated starting from zero
68    for (int i = 0; i < p->port_master_connection_count; ++i) {
69        std::string portName = csprintf("%s.master[%d]", name(), i);
70        MasterPort* bp = new CoherentXBarMasterPort(portName, *this, i);
71        masterPorts.push_back(bp);
72        reqLayers.push_back(new ReqLayer(*bp, *this,
73                                         csprintf(".reqLayer%d", i)));
74        snoopLayers.push_back(
75                new SnoopRespLayer(*bp, *this, csprintf(".snoopLayer%d", i)));
76    }
77
78    // see if we have a default slave device connected and if so add
79    // our corresponding master port
80    if (p->port_default_connection_count) {
81        defaultPortID = masterPorts.size();
82        std::string portName = name() + ".default";
83        MasterPort* bp = new CoherentXBarMasterPort(portName, *this,
84                                                    defaultPortID);
85        masterPorts.push_back(bp);
86        reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d",
87                                         defaultPortID)));
88        snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
89                                                 csprintf(".snoopLayer%d",
90                                                          defaultPortID)));
91    }
92
93    // create the slave ports, once again starting at zero
94    for (int i = 0; i < p->port_slave_connection_count; ++i) {
95        std::string portName = csprintf("%s.slave[%d]", name(), i);
96        QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i);
97        slavePorts.push_back(bp);
98        respLayers.push_back(new RespLayer(*bp, *this,
99                                           csprintf(".respLayer%d", i)));
100        snoopRespPorts.push_back(new SnoopRespPort(*bp, *this));
101    }
102}
103
104CoherentXBar::~CoherentXBar()
105{
106    for (auto l: reqLayers)
107        delete l;
108    for (auto l: respLayers)
109        delete l;
110    for (auto l: snoopLayers)
111        delete l;
112    for (auto p: snoopRespPorts)
113        delete p;
114}
115
116void
117CoherentXBar::init()
118{
119    BaseXBar::init();
120
121    // iterate over our slave ports and determine which of our
122    // neighbouring master ports are snooping and add them as snoopers
123    for (const auto& p: slavePorts) {
124        // check if the connected master port is snooping
125        if (p->isSnooping()) {
126            DPRINTF(AddrRanges, "Adding snooping master %s\n",
127                    p->getMasterPort().name());
128            snoopPorts.push_back(p);
129        }
130    }
131
132    if (snoopPorts.empty())
133        warn("CoherentXBar %s has no snooping ports attached!\n", name());
134
135    // inform the snoop filter about the slave ports so it can create
136    // its own internal representation
137    if (snoopFilter)
138        snoopFilter->setSlavePorts(slavePorts);
139}
140
141bool
142CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
143{
144    // determine the source port based on the id
145    SlavePort *src_port = slavePorts[slave_port_id];
146
147    // remember if the packet is an express snoop
148    bool is_express_snoop = pkt->isExpressSnoop();
149    bool cache_responding = pkt->cacheResponding();
150    // for normal requests, going downstream, the express snoop flag
151    // and the cache responding flag should always be the same
152    assert(is_express_snoop == cache_responding);
153
154    // determine the destination based on the destination address range
155    PortID master_port_id = findPort(pkt->getAddrRange());
156
157    // test if the crossbar should be considered occupied for the current
158    // port, and exclude express snoops from the check
159    if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) {
160        DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
161                src_port->name(), pkt->print());
162        return false;
163    }
164
165    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
166            src_port->name(), pkt->print());
167
168    // store size and command as they might be modified when
169    // forwarding the packet
170    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
171    unsigned int pkt_cmd = pkt->cmdToIndex();
172
173    // store the old header delay so we can restore it if needed
174    Tick old_header_delay = pkt->headerDelay;
175
176    // a request sees the frontend and forward latency
177    Tick xbar_delay = (frontendLatency + forwardLatency) * clockPeriod();
178
179    // set the packet header and payload delay
180    calcPacketTiming(pkt, xbar_delay);
181
182    // determine how long to be crossbar layer is busy
183    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
184
185    // is this the destination point for this packet? (e.g. true if
186    // this xbar is the PoC for a cache maintenance operation to the
187    // PoC) otherwise the destination is any cache that can satisfy
188    // the request
189    const bool is_destination = isDestination(pkt);
190
191    const bool snoop_caches = !system->bypassCaches() &&
192        pkt->cmd != MemCmd::WriteClean;
193    if (snoop_caches) {
194        assert(pkt->snoopDelay == 0);
195
196        if (pkt->isClean() && !is_destination) {
197            // before snooping we need to make sure that the memory
198            // below is not busy and the cache clean request can be
199            // forwarded to it
200            if (!masterPorts[master_port_id]->tryTiming(pkt)) {
201                DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
202                        src_port->name(), pkt->print());
203
204                // update the layer state and schedule an idle event
205                reqLayers[master_port_id]->failedTiming(src_port,
206                                                        clockEdge(Cycles(1)));
207                return false;
208            }
209        }
210
211
212        // the packet is a memory-mapped request and should be
213        // broadcasted to our snoopers but the source
214        if (snoopFilter) {
215            // check with the snoop filter where to forward this packet
216            auto sf_res = snoopFilter->lookupRequest(pkt, *src_port);
217            // the time required by a packet to be delivered through
218            // the xbar has to be charged also with to lookup latency
219            // of the snoop filter
220            pkt->headerDelay += sf_res.second * clockPeriod();
221            DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
222                    __func__, src_port->name(), pkt->print(),
223                    sf_res.first.size(), sf_res.second);
224
225            if (pkt->isEviction()) {
226                // for block-evicting packets, i.e. writebacks and
227                // clean evictions, there is no need to snoop up, as
228                // all we do is determine if the block is cached or
229                // not, instead just set it here based on the snoop
230                // filter result
231                if (!sf_res.first.empty())
232                    pkt->setBlockCached();
233            } else {
234                forwardTiming(pkt, slave_port_id, sf_res.first);
235            }
236        } else {
237            forwardTiming(pkt, slave_port_id);
238        }
239
240        // add the snoop delay to our header delay, and then reset it
241        pkt->headerDelay += pkt->snoopDelay;
242        pkt->snoopDelay = 0;
243    }
244
245    // set up a sensible starting point
246    bool success = true;
247
248    // remember if the packet will generate a snoop response by
249    // checking if a cache set the cacheResponding flag during the
250    // snooping above
251    const bool expect_snoop_resp = !cache_responding && pkt->cacheResponding();
252    bool expect_response = pkt->needsResponse() && !pkt->cacheResponding();
253
254    const bool sink_packet = sinkPacket(pkt);
255
256    // in certain cases the crossbar is responsible for responding
257    bool respond_directly = false;
258    // store the original address as an address mapper could possibly
259    // modify the address upon a sendTimingRequest
260    const Addr addr(pkt->getAddr());
261    if (sink_packet) {
262        DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
263                pkt->print());
264    } else {
265        // determine if we are forwarding the packet, or responding to
266        // it
267        if (forwardPacket(pkt)) {
268            // if we are passing on, rather than sinking, a packet to
269            // which an upstream cache has committed to responding,
270            // the line was needs writable, and the responding only
271            // had an Owned copy, so we need to immidiately let the
272            // downstream caches know, bypass any flow control
273            if (pkt->cacheResponding()) {
274                pkt->setExpressSnoop();
275            }
276
277            // make sure that the write request (e.g., WriteClean)
278            // will stop at the memory below if this crossbar is its
279            // destination
280            if (pkt->isWrite() && is_destination) {
281                pkt->clearWriteThrough();
282            }
283
284            // since it is a normal request, attempt to send the packet
285            success = masterPorts[master_port_id]->sendTimingReq(pkt);
286        } else {
287            // no need to forward, turn this packet around and respond
288            // directly
289            assert(pkt->needsResponse());
290
291            respond_directly = true;
292            assert(!expect_snoop_resp);
293            expect_response = false;
294        }
295    }
296
297    if (snoopFilter && snoop_caches) {
298        // Let the snoop filter know about the success of the send operation
299        snoopFilter->finishRequest(!success, addr, pkt->isSecure());
300    }
301
302    // check if we were successful in sending the packet onwards
303    if (!success)  {
304        // express snoops should never be forced to retry
305        assert(!is_express_snoop);
306
307        // restore the header delay
308        pkt->headerDelay = old_header_delay;
309
310        DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
311                src_port->name(), pkt->print());
312
313        // update the layer state and schedule an idle event
314        reqLayers[master_port_id]->failedTiming(src_port,
315                                                clockEdge(Cycles(1)));
316    } else {
317        // express snoops currently bypass the crossbar state entirely
318        if (!is_express_snoop) {
319            // if this particular request will generate a snoop
320            // response
321            if (expect_snoop_resp) {
322                // we should never have an exsiting request outstanding
323                assert(outstandingSnoop.find(pkt->req) ==
324                       outstandingSnoop.end());
325                outstandingSnoop.insert(pkt->req);
326
327                // basic sanity check on the outstanding snoops
328                panic_if(outstandingSnoop.size() > 512,
329                         "Outstanding snoop requests exceeded 512\n");
330            }
331
332            // remember where to route the normal response to
333            if (expect_response || expect_snoop_resp) {
334                assert(routeTo.find(pkt->req) == routeTo.end());
335                routeTo[pkt->req] = slave_port_id;
336
337                panic_if(routeTo.size() > 512,
338                         "Routing table exceeds 512 packets\n");
339            }
340
341            // update the layer state and schedule an idle event
342            reqLayers[master_port_id]->succeededTiming(packetFinishTime);
343        }
344
345        // stats updates only consider packets that were successfully sent
346        pktCount[slave_port_id][master_port_id]++;
347        pktSize[slave_port_id][master_port_id] += pkt_size;
348        transDist[pkt_cmd]++;
349
350        if (is_express_snoop) {
351            snoops++;
352            snoopTraffic += pkt_size;
353        }
354    }
355
356    if (sink_packet)
357        // queue the packet for deletion
358        pendingDelete.reset(pkt);
359
360    // normally we respond to the packet we just received if we need to
361    PacketPtr rsp_pkt = pkt;
362    PortID rsp_port_id = slave_port_id;
363
364    // If this is the destination of the cache clean operation the
365    // crossbar is responsible for responding. This crossbar will
366    // respond when the cache clean is complete. A cache clean
367    // is complete either:
368    // * direcly, if no cache above had a dirty copy of the block
369    //   as indicated by the satisfied flag of the packet, or
370    // * when the crossbar has seen both the cache clean request
371    //   (CleanSharedReq, CleanInvalidReq) and the corresponding
372    //   write (WriteClean) which updates the block in the memory
373    //   below.
374    if (success &&
375        ((pkt->isClean() && pkt->satisfied()) ||
376         pkt->cmd == MemCmd::WriteClean) &&
377        is_destination) {
378        PacketPtr deferred_rsp = pkt->isWrite() ? nullptr : pkt;
379        auto cmo_lookup = outstandingCMO.find(pkt->id);
380        if (cmo_lookup != outstandingCMO.end()) {
381            // the cache clean request has already reached this xbar
382            respond_directly = true;
383            if (pkt->isWrite()) {
384                rsp_pkt = cmo_lookup->second;
385                assert(rsp_pkt);
386
387                // determine the destination
388                const auto route_lookup = routeTo.find(rsp_pkt->req);
389                assert(route_lookup != routeTo.end());
390                rsp_port_id = route_lookup->second;
391                assert(rsp_port_id != InvalidPortID);
392                assert(rsp_port_id < respLayers.size());
393                // remove the request from the routing table
394                routeTo.erase(route_lookup);
395            }
396            outstandingCMO.erase(cmo_lookup);
397        } else {
398            respond_directly = false;
399            outstandingCMO.emplace(pkt->id, deferred_rsp);
400            if (!pkt->isWrite()) {
401                assert(routeTo.find(pkt->req) == routeTo.end());
402                routeTo[pkt->req] = slave_port_id;
403
404                panic_if(routeTo.size() > 512,
405                         "Routing table exceeds 512 packets\n");
406            }
407        }
408    }
409
410
411    if (respond_directly) {
412        assert(rsp_pkt->needsResponse());
413        assert(success);
414
415        rsp_pkt->makeResponse();
416
417        if (snoopFilter && !system->bypassCaches()) {
418            // let the snoop filter inspect the response and update its state
419            snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]);
420        }
421
422        // we send the response after the current packet, even if the
423        // response is not for this packet (e.g. cache clean operation
424        // where both the request and the write packet have to cross
425        // the destination xbar before the response is sent.)
426        Tick response_time = clockEdge() + pkt->headerDelay;
427        rsp_pkt->headerDelay = 0;
428
429        slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
430    }
431
432    return success;
433}
434
435bool
436CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
437{
438    // determine the source port based on the id
439    MasterPort *src_port = masterPorts[master_port_id];
440
441    // determine the destination
442    const auto route_lookup = routeTo.find(pkt->req);
443    assert(route_lookup != routeTo.end());
444    const PortID slave_port_id = route_lookup->second;
445    assert(slave_port_id != InvalidPortID);
446    assert(slave_port_id < respLayers.size());
447
448    // test if the crossbar should be considered occupied for the
449    // current port
450    if (!respLayers[slave_port_id]->tryTiming(src_port)) {
451        DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
452                src_port->name(), pkt->print());
453        return false;
454    }
455
456    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
457            src_port->name(), pkt->print());
458
459    // store size and command as they might be modified when
460    // forwarding the packet
461    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
462    unsigned int pkt_cmd = pkt->cmdToIndex();
463
464    // a response sees the response latency
465    Tick xbar_delay = responseLatency * clockPeriod();
466
467    // set the packet header and payload delay
468    calcPacketTiming(pkt, xbar_delay);
469
470    // determine how long to be crossbar layer is busy
471    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
472
473    if (snoopFilter && !system->bypassCaches()) {
474        // let the snoop filter inspect the response and update its state
475        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
476    }
477
478    // send the packet through the destination slave port and pay for
479    // any outstanding header delay
480    Tick latency = pkt->headerDelay;
481    pkt->headerDelay = 0;
482    slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
483
484    // remove the request from the routing table
485    routeTo.erase(route_lookup);
486
487    respLayers[slave_port_id]->succeededTiming(packetFinishTime);
488
489    // stats updates
490    pktCount[slave_port_id][master_port_id]++;
491    pktSize[slave_port_id][master_port_id] += pkt_size;
492    transDist[pkt_cmd]++;
493
494    return true;
495}
496
497void
498CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
499{
500    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
501            masterPorts[master_port_id]->name(), pkt->print());
502
503    // update stats here as we know the forwarding will succeed
504    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
505    transDist[pkt->cmdToIndex()]++;
506    snoops++;
507    snoopTraffic += pkt_size;
508
509    // we should only see express snoops from caches
510    assert(pkt->isExpressSnoop());
511
512    // set the packet header and payload delay, for now use forward latency
513    // @todo Assess the choice of latency further
514    calcPacketTiming(pkt, forwardLatency * clockPeriod());
515
516    // remember if a cache has already committed to responding so we
517    // can see if it changes during the snooping
518    const bool cache_responding = pkt->cacheResponding();
519
520    assert(pkt->snoopDelay == 0);
521
522    if (snoopFilter) {
523        // let the Snoop Filter work its magic and guide probing
524        auto sf_res = snoopFilter->lookupSnoop(pkt);
525        // the time required by a packet to be delivered through
526        // the xbar has to be charged also with to lookup latency
527        // of the snoop filter
528        pkt->headerDelay += sf_res.second * clockPeriod();
529        DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
530                __func__, masterPorts[master_port_id]->name(), pkt->print(),
531                sf_res.first.size(), sf_res.second);
532
533        // forward to all snoopers
534        forwardTiming(pkt, InvalidPortID, sf_res.first);
535    } else {
536        forwardTiming(pkt, InvalidPortID);
537    }
538
539    // add the snoop delay to our header delay, and then reset it
540    pkt->headerDelay += pkt->snoopDelay;
541    pkt->snoopDelay = 0;
542
543    // if we can expect a response, remember how to route it
544    if (!cache_responding && pkt->cacheResponding()) {
545        assert(routeTo.find(pkt->req) == routeTo.end());
546        routeTo[pkt->req] = master_port_id;
547    }
548
549    // a snoop request came from a connected slave device (one of
550    // our master ports), and if it is not coming from the slave
551    // device responsible for the address range something is
552    // wrong, hence there is nothing further to do as the packet
553    // would be going back to where it came from
554    assert(findPort(pkt->getAddrRange()) == master_port_id);
555}
556
557bool
558CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
559{
560    // determine the source port based on the id
561    SlavePort* src_port = slavePorts[slave_port_id];
562
563    // get the destination
564    const auto route_lookup = routeTo.find(pkt->req);
565    assert(route_lookup != routeTo.end());
566    const PortID dest_port_id = route_lookup->second;
567    assert(dest_port_id != InvalidPortID);
568
569    // determine if the response is from a snoop request we
570    // created as the result of a normal request (in which case it
571    // should be in the outstandingSnoop), or if we merely forwarded
572    // someone else's snoop request
573    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
574        outstandingSnoop.end();
575
576    // test if the crossbar should be considered occupied for the
577    // current port, note that the check is bypassed if the response
578    // is being passed on as a normal response since this is occupying
579    // the response layer rather than the snoop response layer
580    if (forwardAsSnoop) {
581        assert(dest_port_id < snoopLayers.size());
582        if (!snoopLayers[dest_port_id]->tryTiming(src_port)) {
583            DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
584                    src_port->name(), pkt->print());
585            return false;
586        }
587    } else {
588        // get the master port that mirrors this slave port internally
589        MasterPort* snoop_port = snoopRespPorts[slave_port_id];
590        assert(dest_port_id < respLayers.size());
591        if (!respLayers[dest_port_id]->tryTiming(snoop_port)) {
592            DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
593                    snoop_port->name(), pkt->print());
594            return false;
595        }
596    }
597
598    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
599            src_port->name(), pkt->print());
600
601    // store size and command as they might be modified when
602    // forwarding the packet
603    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
604    unsigned int pkt_cmd = pkt->cmdToIndex();
605
606    // responses are never express snoops
607    assert(!pkt->isExpressSnoop());
608
609    // a snoop response sees the snoop response latency, and if it is
610    // forwarded as a normal response, the response latency
611    Tick xbar_delay =
612        (forwardAsSnoop ? snoopResponseLatency : responseLatency) *
613        clockPeriod();
614
615    // set the packet header and payload delay
616    calcPacketTiming(pkt, xbar_delay);
617
618    // determine how long to be crossbar layer is busy
619    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
620
621    // forward it either as a snoop response or a normal response
622    if (forwardAsSnoop) {
623        // this is a snoop response to a snoop request we forwarded,
624        // e.g. coming from the L1 and going to the L2, and it should
625        // be forwarded as a snoop response
626
627        if (snoopFilter) {
628            // update the probe filter so that it can properly track the line
629            snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id],
630                                            *masterPorts[dest_port_id]);
631        }
632
633        bool success M5_VAR_USED =
634            masterPorts[dest_port_id]->sendTimingSnoopResp(pkt);
635        pktCount[slave_port_id][dest_port_id]++;
636        pktSize[slave_port_id][dest_port_id] += pkt_size;
637        assert(success);
638
639        snoopLayers[dest_port_id]->succeededTiming(packetFinishTime);
640    } else {
641        // we got a snoop response on one of our slave ports,
642        // i.e. from a coherent master connected to the crossbar, and
643        // since we created the snoop request as part of recvTiming,
644        // this should now be a normal response again
645        outstandingSnoop.erase(pkt->req);
646
647        // this is a snoop response from a coherent master, hence it
648        // should never go back to where the snoop response came from,
649        // but instead to where the original request came from
650        assert(slave_port_id != dest_port_id);
651
652        if (snoopFilter) {
653            // update the probe filter so that it can properly track the line
654            snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id],
655                                    *slavePorts[dest_port_id]);
656        }
657
658        DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__,
659                src_port->name(), pkt->print());
660
661        // as a normal response, it should go back to a master through
662        // one of our slave ports, we also pay for any outstanding
663        // header latency
664        Tick latency = pkt->headerDelay;
665        pkt->headerDelay = 0;
666        slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency);
667
668        respLayers[dest_port_id]->succeededTiming(packetFinishTime);
669    }
670
671    // remove the request from the routing table
672    routeTo.erase(route_lookup);
673
674    // stats updates
675    transDist[pkt_cmd]++;
676    snoops++;
677    snoopTraffic += pkt_size;
678
679    return true;
680}
681
682
683void
684CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
685                           const std::vector<QueuedSlavePort*>& dests)
686{
687    DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print());
688
689    // snoops should only happen if the system isn't bypassing caches
690    assert(!system->bypassCaches());
691
692    unsigned fanout = 0;
693
694    for (const auto& p: dests) {
695        // we could have gotten this request from a snooping master
696        // (corresponding to our own slave port that is also in
697        // snoopPorts) and should not send it back to where it came
698        // from
699        if (exclude_slave_port_id == InvalidPortID ||
700            p->getId() != exclude_slave_port_id) {
701            // cache is not allowed to refuse snoop
702            p->sendTimingSnoopReq(pkt);
703            fanout++;
704        }
705    }
706
707    // Stats for fanout of this forward operation
708    snoopFanout.sample(fanout);
709}
710
711void
712CoherentXBar::recvReqRetry(PortID master_port_id)
713{
714    // responses and snoop responses never block on forwarding them,
715    // so the retry will always be coming from a port to which we
716    // tried to forward a request
717    reqLayers[master_port_id]->recvRetry();
718}
719
720Tick
721CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
722                                 MemBackdoorPtr *backdoor)
723{
724    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
725            slavePorts[slave_port_id]->name(), pkt->print());
726
727    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
728    unsigned int pkt_cmd = pkt->cmdToIndex();
729
730    MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
731    Tick snoop_response_latency = 0;
732
733    // is this the destination point for this packet? (e.g. true if
734    // this xbar is the PoC for a cache maintenance operation to the
735    // PoC) otherwise the destination is any cache that can satisfy
736    // the request
737    const bool is_destination = isDestination(pkt);
738
739    const bool snoop_caches = !system->bypassCaches() &&
740        pkt->cmd != MemCmd::WriteClean;
741    if (snoop_caches) {
742        // forward to all snoopers but the source
743        std::pair<MemCmd, Tick> snoop_result;
744        if (snoopFilter) {
745            // check with the snoop filter where to forward this packet
746            auto sf_res =
747                snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]);
748            snoop_response_latency += sf_res.second * clockPeriod();
749            DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
750                    __func__, slavePorts[slave_port_id]->name(), pkt->print(),
751                    sf_res.first.size(), sf_res.second);
752
753            // let the snoop filter know about the success of the send
754            // operation, and do it even before sending it onwards to
755            // avoid situations where atomic upward snoops sneak in
756            // between and change the filter state
757            snoopFilter->finishRequest(false, pkt->getAddr(), pkt->isSecure());
758
759            if (pkt->isEviction()) {
760                // for block-evicting packets, i.e. writebacks and
761                // clean evictions, there is no need to snoop up, as
762                // all we do is determine if the block is cached or
763                // not, instead just set it here based on the snoop
764                // filter result
765                if (!sf_res.first.empty())
766                    pkt->setBlockCached();
767            } else {
768                snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID,
769                                             sf_res.first);
770            }
771        } else {
772            snoop_result = forwardAtomic(pkt, slave_port_id);
773        }
774        snoop_response_cmd = snoop_result.first;
775        snoop_response_latency += snoop_result.second;
776    }
777
778    // set up a sensible default value
779    Tick response_latency = 0;
780
781    const bool sink_packet = sinkPacket(pkt);
782
783    // even if we had a snoop response, we must continue and also
784    // perform the actual request at the destination
785    PortID master_port_id = findPort(pkt->getAddrRange());
786
787    if (sink_packet) {
788        DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
789                pkt->print());
790    } else {
791        if (forwardPacket(pkt)) {
792            // make sure that the write request (e.g., WriteClean)
793            // will stop at the memory below if this crossbar is its
794            // destination
795            if (pkt->isWrite() && is_destination) {
796                pkt->clearWriteThrough();
797            }
798
799            // forward the request to the appropriate destination
800            auto master = masterPorts[master_port_id];
801            response_latency = backdoor ?
802                master->sendAtomicBackdoor(pkt, *backdoor) :
803                master->sendAtomic(pkt);
804        } else {
805            // if it does not need a response we sink the packet above
806            assert(pkt->needsResponse());
807
808            pkt->makeResponse();
809        }
810    }
811
812    // stats updates for the request
813    pktCount[slave_port_id][master_port_id]++;
814    pktSize[slave_port_id][master_port_id] += pkt_size;
815    transDist[pkt_cmd]++;
816
817
818    // if lower levels have replied, tell the snoop filter
819    if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) {
820        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
821    }
822
823    // if we got a response from a snooper, restore it here
824    if (snoop_response_cmd != MemCmd::InvalidCmd) {
825        // no one else should have responded
826        assert(!pkt->isResponse());
827        pkt->cmd = snoop_response_cmd;
828        response_latency = snoop_response_latency;
829    }
830
831    // If this is the destination of the cache clean operation the
832    // crossbar is responsible for responding. This crossbar will
833    // respond when the cache clean is complete. An atomic cache clean
834    // is complete when the crossbars receives the cache clean
835    // request (CleanSharedReq, CleanInvalidReq), as either:
836    // * no cache above had a dirty copy of the block as indicated by
837    //   the satisfied flag of the packet, or
838    // * the crossbar has already seen the corresponding write
839    //   (WriteClean) which updates the block in the memory below.
840    if (pkt->isClean() && isDestination(pkt) && pkt->satisfied()) {
841        auto it = outstandingCMO.find(pkt->id);
842        assert(it != outstandingCMO.end());
843        // we are responding right away
844        outstandingCMO.erase(it);
845    } else if (pkt->cmd == MemCmd::WriteClean && isDestination(pkt)) {
846        // if this is the destination of the operation, the xbar
847        // sends the responce to the cache clean operation only
848        // after having encountered the cache clean request
849        auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr);
850        // in atomic mode we know that the WriteClean packet should
851        // precede the clean request
852        assert(ret.second);
853    }
854
855    // add the response data
856    if (pkt->isResponse()) {
857        pkt_size = pkt->hasData() ? pkt->getSize() : 0;
858        pkt_cmd = pkt->cmdToIndex();
859
860        // stats updates
861        pktCount[slave_port_id][master_port_id]++;
862        pktSize[slave_port_id][master_port_id] += pkt_size;
863        transDist[pkt_cmd]++;
864    }
865
866    // @todo: Not setting header time
867    pkt->payloadDelay = response_latency;
868    return response_latency;
869}
870
871Tick
872CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id)
873{
874    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
875            masterPorts[master_port_id]->name(), pkt->print());
876
877    // add the request snoop data
878    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
879    snoops++;
880    snoopTraffic += pkt_size;
881
882    // forward to all snoopers
883    std::pair<MemCmd, Tick> snoop_result;
884    Tick snoop_response_latency = 0;
885    if (snoopFilter) {
886        auto sf_res = snoopFilter->lookupSnoop(pkt);
887        snoop_response_latency += sf_res.second * clockPeriod();
888        DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
889                __func__, masterPorts[master_port_id]->name(), pkt->print(),
890                sf_res.first.size(), sf_res.second);
891        snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id,
892                                     sf_res.first);
893    } else {
894        snoop_result = forwardAtomic(pkt, InvalidPortID);
895    }
896    MemCmd snoop_response_cmd = snoop_result.first;
897    snoop_response_latency += snoop_result.second;
898
899    if (snoop_response_cmd != MemCmd::InvalidCmd)
900        pkt->cmd = snoop_response_cmd;
901
902    // add the response snoop data
903    if (pkt->isResponse()) {
904        snoops++;
905    }
906
907    // @todo: Not setting header time
908    pkt->payloadDelay = snoop_response_latency;
909    return snoop_response_latency;
910}
911
912std::pair<MemCmd, Tick>
913CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
914                           PortID source_master_port_id,
915                           const std::vector<QueuedSlavePort*>& dests)
916{
917    // the packet may be changed on snoops, record the original
918    // command to enable us to restore it between snoops so that
919    // additional snoops can take place properly
920    MemCmd orig_cmd = pkt->cmd;
921    MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
922    Tick snoop_response_latency = 0;
923
924    // snoops should only happen if the system isn't bypassing caches
925    assert(!system->bypassCaches());
926
927    unsigned fanout = 0;
928
929    for (const auto& p: dests) {
930        // we could have gotten this request from a snooping master
931        // (corresponding to our own slave port that is also in
932        // snoopPorts) and should not send it back to where it came
933        // from
934        if (exclude_slave_port_id != InvalidPortID &&
935            p->getId() == exclude_slave_port_id)
936            continue;
937
938        Tick latency = p->sendAtomicSnoop(pkt);
939        fanout++;
940
941        // in contrast to a functional access, we have to keep on
942        // going as all snoopers must be updated even if we get a
943        // response
944        if (!pkt->isResponse())
945            continue;
946
947        // response from snoop agent
948        assert(pkt->cmd != orig_cmd);
949        assert(pkt->cacheResponding());
950        // should only happen once
951        assert(snoop_response_cmd == MemCmd::InvalidCmd);
952        // save response state
953        snoop_response_cmd = pkt->cmd;
954        snoop_response_latency = latency;
955
956        if (snoopFilter) {
957            // Handle responses by the snoopers and differentiate between
958            // responses to requests from above and snoops from below
959            if (source_master_port_id != InvalidPortID) {
960                // Getting a response for a snoop from below
961                assert(exclude_slave_port_id == InvalidPortID);
962                snoopFilter->updateSnoopForward(pkt, *p,
963                             *masterPorts[source_master_port_id]);
964            } else {
965                // Getting a response for a request from above
966                assert(source_master_port_id == InvalidPortID);
967                snoopFilter->updateSnoopResponse(pkt, *p,
968                             *slavePorts[exclude_slave_port_id]);
969            }
970        }
971        // restore original packet state for remaining snoopers
972        pkt->cmd = orig_cmd;
973    }
974
975    // Stats for fanout
976    snoopFanout.sample(fanout);
977
978    // the packet is restored as part of the loop and any potential
979    // snoop response is part of the returned pair
980    return std::make_pair(snoop_response_cmd, snoop_response_latency);
981}
982
983void
984CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
985{
986    if (!pkt->isPrint()) {
987        // don't do DPRINTFs on PrintReq as it clutters up the output
988        DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
989                slavePorts[slave_port_id]->name(), pkt->print());
990    }
991
992    if (!system->bypassCaches()) {
993        // forward to all snoopers but the source
994        forwardFunctional(pkt, slave_port_id);
995    }
996
997    // there is no need to continue if the snooping has found what we
998    // were looking for and the packet is already a response
999    if (!pkt->isResponse()) {
1000        // since our slave ports are queued ports we need to check them as well
1001        for (const auto& p : slavePorts) {
1002            // if we find a response that has the data, then the
1003            // downstream caches/memories may be out of date, so simply stop
1004            // here
1005            if (p->trySatisfyFunctional(pkt)) {
1006                if (pkt->needsResponse())
1007                    pkt->makeResponse();
1008                return;
1009            }
1010        }
1011
1012        PortID dest_id = findPort(pkt->getAddrRange());
1013
1014        masterPorts[dest_id]->sendFunctional(pkt);
1015    }
1016}
1017
1018void
1019CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
1020{
1021    if (!pkt->isPrint()) {
1022        // don't do DPRINTFs on PrintReq as it clutters up the output
1023        DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
1024                masterPorts[master_port_id]->name(), pkt->print());
1025    }
1026
1027    for (const auto& p : slavePorts) {
1028        if (p->trySatisfyFunctional(pkt)) {
1029            if (pkt->needsResponse())
1030                pkt->makeResponse();
1031            return;
1032        }
1033    }
1034
1035    // forward to all snoopers
1036    forwardFunctional(pkt, InvalidPortID);
1037}
1038
1039void
1040CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
1041{
1042    // snoops should only happen if the system isn't bypassing caches
1043    assert(!system->bypassCaches());
1044
1045    for (const auto& p: snoopPorts) {
1046        // we could have gotten this request from a snooping master
1047        // (corresponding to our own slave port that is also in
1048        // snoopPorts) and should not send it back to where it came
1049        // from
1050        if (exclude_slave_port_id == InvalidPortID ||
1051            p->getId() != exclude_slave_port_id)
1052            p->sendFunctionalSnoop(pkt);
1053
1054        // if we get a response we are done
1055        if (pkt->isResponse()) {
1056            break;
1057        }
1058    }
1059}
1060
1061bool
1062CoherentXBar::sinkPacket(const PacketPtr pkt) const
1063{
1064    // we can sink the packet if:
1065    // 1) the crossbar is the point of coherency, and a cache is
1066    //    responding after being snooped
1067    // 2) the crossbar is the point of coherency, and the packet is a
1068    //    coherency packet (not a read or a write) that does not
1069    //    require a response
1070    // 3) this is a clean evict or clean writeback, but the packet is
1071    //    found in a cache above this crossbar
1072    // 4) a cache is responding after being snooped, and the packet
1073    //    either does not need the block to be writable, or the cache
1074    //    that has promised to respond (setting the cache responding
1075    //    flag) is providing writable and thus had a Modified block,
1076    //    and no further action is needed
1077    return (pointOfCoherency && pkt->cacheResponding()) ||
1078        (pointOfCoherency && !(pkt->isRead() || pkt->isWrite()) &&
1079         !pkt->needsResponse()) ||
1080        (pkt->isCleanEviction() && pkt->isBlockCached()) ||
1081        (pkt->cacheResponding() &&
1082         (!pkt->needsWritable() || pkt->responderHadWritable()));
1083}
1084
1085bool
1086CoherentXBar::forwardPacket(const PacketPtr pkt)
1087{
1088    // we are forwarding the packet if:
1089    // 1) this is a cache clean request to the PoU/PoC and this
1090    //    crossbar is above the PoU/PoC
1091    // 2) this is a read or a write
1092    // 3) this crossbar is above the point of coherency
1093    if (pkt->isClean()) {
1094        return !isDestination(pkt);
1095    }
1096    return pkt->isRead() || pkt->isWrite() || !pointOfCoherency;
1097}
1098
1099
1100void
1101CoherentXBar::regStats()
1102{
1103    // register the stats of the base class and our layers
1104    BaseXBar::regStats();
1105    for (auto l: reqLayers)
1106        l->regStats();
1107    for (auto l: respLayers)
1108        l->regStats();
1109    for (auto l: snoopLayers)
1110        l->regStats();
1111
1112    snoops
1113        .name(name() + ".snoops")
1114        .desc("Total snoops (count)")
1115    ;
1116
1117    snoopTraffic
1118        .name(name() + ".snoopTraffic")
1119        .desc("Total snoop traffic (bytes)")
1120    ;
1121
1122    snoopFanout
1123        .init(0, snoopPorts.size(), 1)
1124        .name(name() + ".snoop_fanout")
1125        .desc("Request fanout histogram")
1126    ;
1127}
1128
1129CoherentXBar *
1130CoherentXBarParams::create()
1131{
1132    return new CoherentXBar(this);
1133}
1134