1/*
2 * Copyright (c) 2011-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 *          Andreas Hansson
42 *          William Wang
43 *          Nikos Nikoleris
44 */
45
46/**
47 * @file
48 * Definition of a crossbar object.
49 */
50
51#include "mem/coherent_xbar.hh"
52
53#include "base/logging.hh"
54#include "base/trace.hh"
55#include "debug/AddrRanges.hh"
56#include "debug/CoherentXBar.hh"
57#include "sim/system.hh"
58
59CoherentXBar::CoherentXBar(const CoherentXBarParams *p)
60    : BaseXBar(p), system(p->system), snoopFilter(p->snoop_filter),
61      snoopResponseLatency(p->snoop_response_latency),
62      maxOutstandingSnoopCheck(p->max_outstanding_snoops),
63      maxRoutingTableSizeCheck(p->max_routing_table_size),
64      pointOfCoherency(p->point_of_coherency),
65      pointOfUnification(p->point_of_unification)
66{
67    // create the ports based on the size of the master and slave
68    // vector ports, and the presence of the default port, the ports
69    // are enumerated starting from zero
70    for (int i = 0; i < p->port_master_connection_count; ++i) {
71        std::string portName = csprintf("%s.master[%d]", name(), i);
72        MasterPort* bp = new CoherentXBarMasterPort(portName, *this, i);
73        masterPorts.push_back(bp);
74        reqLayers.push_back(new ReqLayer(*bp, *this,
75                                         csprintf(".reqLayer%d", i)));
76        snoopLayers.push_back(
77                new SnoopRespLayer(*bp, *this, csprintf(".snoopLayer%d", i)));
78    }
79
80    // see if we have a default slave device connected and if so add
81    // our corresponding master port
82    if (p->port_default_connection_count) {
83        defaultPortID = masterPorts.size();
84        std::string portName = name() + ".default";
85        MasterPort* bp = new CoherentXBarMasterPort(portName, *this,
86                                                    defaultPortID);
87        masterPorts.push_back(bp);
88        reqLayers.push_back(new ReqLayer(*bp, *this, csprintf(".reqLayer%d",
89                                         defaultPortID)));
90        snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
91                                                 csprintf(".snoopLayer%d",
92                                                          defaultPortID)));
93    }
94
95    // create the slave ports, once again starting at zero
96    for (int i = 0; i < p->port_slave_connection_count; ++i) {
97        std::string portName = csprintf("%s.slave[%d]", name(), i);
98        QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i);
99        slavePorts.push_back(bp);
100        respLayers.push_back(new RespLayer(*bp, *this,
101                                           csprintf(".respLayer%d", i)));
102        snoopRespPorts.push_back(new SnoopRespPort(*bp, *this));
103    }
104}
105
106CoherentXBar::~CoherentXBar()
107{
108    for (auto l: reqLayers)
109        delete l;
110    for (auto l: respLayers)
111        delete l;
112    for (auto l: snoopLayers)
113        delete l;
114    for (auto p: snoopRespPorts)
115        delete p;
116}
117
118void
119CoherentXBar::init()
120{
121    BaseXBar::init();
122
123    // iterate over our slave ports and determine which of our
124    // neighbouring master ports are snooping and add them as snoopers
125    for (const auto& p: slavePorts) {
126        // check if the connected master port is snooping
127        if (p->isSnooping()) {
128            DPRINTF(AddrRanges, "Adding snooping master %s\n", p->getPeer());
129            snoopPorts.push_back(p);
130        }
131    }
132
133    if (snoopPorts.empty())
134        warn("CoherentXBar %s has no snooping ports attached!\n", name());
135
136    // inform the snoop filter about the slave ports so it can create
137    // its own internal representation
138    if (snoopFilter)
139        snoopFilter->setSlavePorts(slavePorts);
140}
141
142bool
143CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
144{
145    // determine the source port based on the id
146    SlavePort *src_port = slavePorts[slave_port_id];
147
148    // remember if the packet is an express snoop
149    bool is_express_snoop = pkt->isExpressSnoop();
150    bool cache_responding = pkt->cacheResponding();
151    // for normal requests, going downstream, the express snoop flag
152    // and the cache responding flag should always be the same
153    assert(is_express_snoop == cache_responding);
154
155    // determine the destination based on the destination address range
156    PortID master_port_id = findPort(pkt->getAddrRange());
157
158    // test if the crossbar should be considered occupied for the current
159    // port, and exclude express snoops from the check
160    if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) {
161        DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
162                src_port->name(), pkt->print());
163        return false;
164    }
165
166    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
167            src_port->name(), pkt->print());
168
169    // store size and command as they might be modified when
170    // forwarding the packet
171    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
172    unsigned int pkt_cmd = pkt->cmdToIndex();
173
174    // store the old header delay so we can restore it if needed
175    Tick old_header_delay = pkt->headerDelay;
176
177    // a request sees the frontend and forward latency
178    Tick xbar_delay = (frontendLatency + forwardLatency) * clockPeriod();
179
180    // set the packet header and payload delay
181    calcPacketTiming(pkt, xbar_delay);
182
183    // determine how long to be crossbar layer is busy
184    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
185
186    // is this the destination point for this packet? (e.g. true if
187    // this xbar is the PoC for a cache maintenance operation to the
188    // PoC) otherwise the destination is any cache that can satisfy
189    // the request
190    const bool is_destination = isDestination(pkt);
191
192    const bool snoop_caches = !system->bypassCaches() &&
193        pkt->cmd != MemCmd::WriteClean;
194    if (snoop_caches) {
195        assert(pkt->snoopDelay == 0);
196
197        if (pkt->isClean() && !is_destination) {
198            // before snooping we need to make sure that the memory
199            // below is not busy and the cache clean request can be
200            // forwarded to it
201            if (!masterPorts[master_port_id]->tryTiming(pkt)) {
202                DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
203                        src_port->name(), pkt->print());
204
205                // update the layer state and schedule an idle event
206                reqLayers[master_port_id]->failedTiming(src_port,
207                                                        clockEdge(Cycles(1)));
208                return false;
209            }
210        }
211
212
213        // the packet is a memory-mapped request and should be
214        // broadcasted to our snoopers but the source
215        if (snoopFilter) {
216            // check with the snoop filter where to forward this packet
217            auto sf_res = snoopFilter->lookupRequest(pkt, *src_port);
218            // the time required by a packet to be delivered through
219            // the xbar has to be charged also with to lookup latency
220            // of the snoop filter
221            pkt->headerDelay += sf_res.second * clockPeriod();
222            DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
223                    __func__, src_port->name(), pkt->print(),
224                    sf_res.first.size(), sf_res.second);
225
226            if (pkt->isEviction()) {
227                // for block-evicting packets, i.e. writebacks and
228                // clean evictions, there is no need to snoop up, as
229                // all we do is determine if the block is cached or
230                // not, instead just set it here based on the snoop
231                // filter result
232                if (!sf_res.first.empty())
233                    pkt->setBlockCached();
234            } else {
235                forwardTiming(pkt, slave_port_id, sf_res.first);
236            }
237        } else {
238            forwardTiming(pkt, slave_port_id);
239        }
240
241        // add the snoop delay to our header delay, and then reset it
242        pkt->headerDelay += pkt->snoopDelay;
243        pkt->snoopDelay = 0;
244    }
245
246    // set up a sensible starting point
247    bool success = true;
248
249    // remember if the packet will generate a snoop response by
250    // checking if a cache set the cacheResponding flag during the
251    // snooping above
252    const bool expect_snoop_resp = !cache_responding && pkt->cacheResponding();
253    bool expect_response = pkt->needsResponse() && !pkt->cacheResponding();
254
255    const bool sink_packet = sinkPacket(pkt);
256
257    // in certain cases the crossbar is responsible for responding
258    bool respond_directly = false;
259    // store the original address as an address mapper could possibly
260    // modify the address upon a sendTimingRequest
261    const Addr addr(pkt->getAddr());
262    if (sink_packet) {
263        DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
264                pkt->print());
265    } else {
266        // determine if we are forwarding the packet, or responding to
267        // it
268        if (forwardPacket(pkt)) {
269            // if we are passing on, rather than sinking, a packet to
270            // which an upstream cache has committed to responding,
271            // the line was needs writable, and the responding only
272            // had an Owned copy, so we need to immidiately let the
273            // downstream caches know, bypass any flow control
274            if (pkt->cacheResponding()) {
275                pkt->setExpressSnoop();
276            }
277
278            // make sure that the write request (e.g., WriteClean)
279            // will stop at the memory below if this crossbar is its
280            // destination
281            if (pkt->isWrite() && is_destination) {
282                pkt->clearWriteThrough();
283            }
284
285            // since it is a normal request, attempt to send the packet
286            success = masterPorts[master_port_id]->sendTimingReq(pkt);
287        } else {
288            // no need to forward, turn this packet around and respond
289            // directly
290            assert(pkt->needsResponse());
291
292            respond_directly = true;
293            assert(!expect_snoop_resp);
294            expect_response = false;
295        }
296    }
297
298    if (snoopFilter && snoop_caches) {
299        // Let the snoop filter know about the success of the send operation
300        snoopFilter->finishRequest(!success, addr, pkt->isSecure());
301    }
302
303    // check if we were successful in sending the packet onwards
304    if (!success)  {
305        // express snoops should never be forced to retry
306        assert(!is_express_snoop);
307
308        // restore the header delay
309        pkt->headerDelay = old_header_delay;
310
311        DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
312                src_port->name(), pkt->print());
313
314        // update the layer state and schedule an idle event
315        reqLayers[master_port_id]->failedTiming(src_port,
316                                                clockEdge(Cycles(1)));
317    } else {
318        // express snoops currently bypass the crossbar state entirely
319        if (!is_express_snoop) {
320            // if this particular request will generate a snoop
321            // response
322            if (expect_snoop_resp) {
323                // we should never have an exsiting request outstanding
324                assert(outstandingSnoop.find(pkt->req) ==
325                       outstandingSnoop.end());
326                outstandingSnoop.insert(pkt->req);
327
328                // basic sanity check on the outstanding snoops
329                panic_if(outstandingSnoop.size() > maxOutstandingSnoopCheck,
330                         "%s: Outstanding snoop requests exceeded %d\n",
331                         name(), maxOutstandingSnoopCheck);
332            }
333
334            // remember where to route the normal response to
335            if (expect_response || expect_snoop_resp) {
336                assert(routeTo.find(pkt->req) == routeTo.end());
337                routeTo[pkt->req] = slave_port_id;
338
339                panic_if(routeTo.size() > maxRoutingTableSizeCheck,
340                         "%s: Routing table exceeds %d packets\n",
341                         name(), maxRoutingTableSizeCheck);
342            }
343
344            // update the layer state and schedule an idle event
345            reqLayers[master_port_id]->succeededTiming(packetFinishTime);
346        }
347
348        // stats updates only consider packets that were successfully sent
349        pktCount[slave_port_id][master_port_id]++;
350        pktSize[slave_port_id][master_port_id] += pkt_size;
351        transDist[pkt_cmd]++;
352
353        if (is_express_snoop) {
354            snoops++;
355            snoopTraffic += pkt_size;
356        }
357    }
358
359    if (sink_packet)
360        // queue the packet for deletion
361        pendingDelete.reset(pkt);
362
363    // normally we respond to the packet we just received if we need to
364    PacketPtr rsp_pkt = pkt;
365    PortID rsp_port_id = slave_port_id;
366
367    // If this is the destination of the cache clean operation the
368    // crossbar is responsible for responding. This crossbar will
369    // respond when the cache clean is complete. A cache clean
370    // is complete either:
371    // * direcly, if no cache above had a dirty copy of the block
372    //   as indicated by the satisfied flag of the packet, or
373    // * when the crossbar has seen both the cache clean request
374    //   (CleanSharedReq, CleanInvalidReq) and the corresponding
375    //   write (WriteClean) which updates the block in the memory
376    //   below.
377    if (success &&
378        ((pkt->isClean() && pkt->satisfied()) ||
379         pkt->cmd == MemCmd::WriteClean) &&
380        is_destination) {
381        PacketPtr deferred_rsp = pkt->isWrite() ? nullptr : pkt;
382        auto cmo_lookup = outstandingCMO.find(pkt->id);
383        if (cmo_lookup != outstandingCMO.end()) {
384            // the cache clean request has already reached this xbar
385            respond_directly = true;
386            if (pkt->isWrite()) {
387                rsp_pkt = cmo_lookup->second;
388                assert(rsp_pkt);
389
390                // determine the destination
391                const auto route_lookup = routeTo.find(rsp_pkt->req);
392                assert(route_lookup != routeTo.end());
393                rsp_port_id = route_lookup->second;
394                assert(rsp_port_id != InvalidPortID);
395                assert(rsp_port_id < respLayers.size());
396                // remove the request from the routing table
397                routeTo.erase(route_lookup);
398            }
399            outstandingCMO.erase(cmo_lookup);
400        } else {
401            respond_directly = false;
402            outstandingCMO.emplace(pkt->id, deferred_rsp);
403            if (!pkt->isWrite()) {
404                assert(routeTo.find(pkt->req) == routeTo.end());
405                routeTo[pkt->req] = slave_port_id;
406
407                panic_if(routeTo.size() > maxRoutingTableSizeCheck,
408                         "%s: Routing table exceeds %d packets\n",
409                         name(), maxRoutingTableSizeCheck);
410            }
411        }
412    }
413
414
415    if (respond_directly) {
416        assert(rsp_pkt->needsResponse());
417        assert(success);
418
419        rsp_pkt->makeResponse();
420
421        if (snoopFilter && !system->bypassCaches()) {
422            // let the snoop filter inspect the response and update its state
423            snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]);
424        }
425
426        // we send the response after the current packet, even if the
427        // response is not for this packet (e.g. cache clean operation
428        // where both the request and the write packet have to cross
429        // the destination xbar before the response is sent.)
430        Tick response_time = clockEdge() + pkt->headerDelay;
431        rsp_pkt->headerDelay = 0;
432
433        slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
434    }
435
436    return success;
437}
438
439bool
440CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
441{
442    // determine the source port based on the id
443    MasterPort *src_port = masterPorts[master_port_id];
444
445    // determine the destination
446    const auto route_lookup = routeTo.find(pkt->req);
447    assert(route_lookup != routeTo.end());
448    const PortID slave_port_id = route_lookup->second;
449    assert(slave_port_id != InvalidPortID);
450    assert(slave_port_id < respLayers.size());
451
452    // test if the crossbar should be considered occupied for the
453    // current port
454    if (!respLayers[slave_port_id]->tryTiming(src_port)) {
455        DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
456                src_port->name(), pkt->print());
457        return false;
458    }
459
460    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
461            src_port->name(), pkt->print());
462
463    // store size and command as they might be modified when
464    // forwarding the packet
465    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
466    unsigned int pkt_cmd = pkt->cmdToIndex();
467
468    // a response sees the response latency
469    Tick xbar_delay = responseLatency * clockPeriod();
470
471    // set the packet header and payload delay
472    calcPacketTiming(pkt, xbar_delay);
473
474    // determine how long to be crossbar layer is busy
475    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
476
477    if (snoopFilter && !system->bypassCaches()) {
478        // let the snoop filter inspect the response and update its state
479        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
480    }
481
482    // send the packet through the destination slave port and pay for
483    // any outstanding header delay
484    Tick latency = pkt->headerDelay;
485    pkt->headerDelay = 0;
486    slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
487
488    // remove the request from the routing table
489    routeTo.erase(route_lookup);
490
491    respLayers[slave_port_id]->succeededTiming(packetFinishTime);
492
493    // stats updates
494    pktCount[slave_port_id][master_port_id]++;
495    pktSize[slave_port_id][master_port_id] += pkt_size;
496    transDist[pkt_cmd]++;
497
498    return true;
499}
500
501void
502CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
503{
504    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
505            masterPorts[master_port_id]->name(), pkt->print());
506
507    // update stats here as we know the forwarding will succeed
508    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
509    transDist[pkt->cmdToIndex()]++;
510    snoops++;
511    snoopTraffic += pkt_size;
512
513    // we should only see express snoops from caches
514    assert(pkt->isExpressSnoop());
515
516    // set the packet header and payload delay, for now use forward latency
517    // @todo Assess the choice of latency further
518    calcPacketTiming(pkt, forwardLatency * clockPeriod());
519
520    // remember if a cache has already committed to responding so we
521    // can see if it changes during the snooping
522    const bool cache_responding = pkt->cacheResponding();
523
524    assert(pkt->snoopDelay == 0);
525
526    if (snoopFilter) {
527        // let the Snoop Filter work its magic and guide probing
528        auto sf_res = snoopFilter->lookupSnoop(pkt);
529        // the time required by a packet to be delivered through
530        // the xbar has to be charged also with to lookup latency
531        // of the snoop filter
532        pkt->headerDelay += sf_res.second * clockPeriod();
533        DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
534                __func__, masterPorts[master_port_id]->name(), pkt->print(),
535                sf_res.first.size(), sf_res.second);
536
537        // forward to all snoopers
538        forwardTiming(pkt, InvalidPortID, sf_res.first);
539    } else {
540        forwardTiming(pkt, InvalidPortID);
541    }
542
543    // add the snoop delay to our header delay, and then reset it
544    pkt->headerDelay += pkt->snoopDelay;
545    pkt->snoopDelay = 0;
546
547    // if we can expect a response, remember how to route it
548    if (!cache_responding && pkt->cacheResponding()) {
549        assert(routeTo.find(pkt->req) == routeTo.end());
550        routeTo[pkt->req] = master_port_id;
551    }
552
553    // a snoop request came from a connected slave device (one of
554    // our master ports), and if it is not coming from the slave
555    // device responsible for the address range something is
556    // wrong, hence there is nothing further to do as the packet
557    // would be going back to where it came from
558    assert(findPort(pkt->getAddrRange()) == master_port_id);
559}
560
561bool
562CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
563{
564    // determine the source port based on the id
565    SlavePort* src_port = slavePorts[slave_port_id];
566
567    // get the destination
568    const auto route_lookup = routeTo.find(pkt->req);
569    assert(route_lookup != routeTo.end());
570    const PortID dest_port_id = route_lookup->second;
571    assert(dest_port_id != InvalidPortID);
572
573    // determine if the response is from a snoop request we
574    // created as the result of a normal request (in which case it
575    // should be in the outstandingSnoop), or if we merely forwarded
576    // someone else's snoop request
577    const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
578        outstandingSnoop.end();
579
580    // test if the crossbar should be considered occupied for the
581    // current port, note that the check is bypassed if the response
582    // is being passed on as a normal response since this is occupying
583    // the response layer rather than the snoop response layer
584    if (forwardAsSnoop) {
585        assert(dest_port_id < snoopLayers.size());
586        if (!snoopLayers[dest_port_id]->tryTiming(src_port)) {
587            DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
588                    src_port->name(), pkt->print());
589            return false;
590        }
591    } else {
592        // get the master port that mirrors this slave port internally
593        MasterPort* snoop_port = snoopRespPorts[slave_port_id];
594        assert(dest_port_id < respLayers.size());
595        if (!respLayers[dest_port_id]->tryTiming(snoop_port)) {
596            DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
597                    snoop_port->name(), pkt->print());
598            return false;
599        }
600    }
601
602    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
603            src_port->name(), pkt->print());
604
605    // store size and command as they might be modified when
606    // forwarding the packet
607    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
608    unsigned int pkt_cmd = pkt->cmdToIndex();
609
610    // responses are never express snoops
611    assert(!pkt->isExpressSnoop());
612
613    // a snoop response sees the snoop response latency, and if it is
614    // forwarded as a normal response, the response latency
615    Tick xbar_delay =
616        (forwardAsSnoop ? snoopResponseLatency : responseLatency) *
617        clockPeriod();
618
619    // set the packet header and payload delay
620    calcPacketTiming(pkt, xbar_delay);
621
622    // determine how long to be crossbar layer is busy
623    Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
624
625    // forward it either as a snoop response or a normal response
626    if (forwardAsSnoop) {
627        // this is a snoop response to a snoop request we forwarded,
628        // e.g. coming from the L1 and going to the L2, and it should
629        // be forwarded as a snoop response
630
631        if (snoopFilter) {
632            // update the probe filter so that it can properly track the line
633            snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id],
634                                            *masterPorts[dest_port_id]);
635        }
636
637        bool success M5_VAR_USED =
638            masterPorts[dest_port_id]->sendTimingSnoopResp(pkt);
639        pktCount[slave_port_id][dest_port_id]++;
640        pktSize[slave_port_id][dest_port_id] += pkt_size;
641        assert(success);
642
643        snoopLayers[dest_port_id]->succeededTiming(packetFinishTime);
644    } else {
645        // we got a snoop response on one of our slave ports,
646        // i.e. from a coherent master connected to the crossbar, and
647        // since we created the snoop request as part of recvTiming,
648        // this should now be a normal response again
649        outstandingSnoop.erase(pkt->req);
650
651        // this is a snoop response from a coherent master, hence it
652        // should never go back to where the snoop response came from,
653        // but instead to where the original request came from
654        assert(slave_port_id != dest_port_id);
655
656        if (snoopFilter) {
657            // update the probe filter so that it can properly track the line
658            snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id],
659                                    *slavePorts[dest_port_id]);
660        }
661
662        DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__,
663                src_port->name(), pkt->print());
664
665        // as a normal response, it should go back to a master through
666        // one of our slave ports, we also pay for any outstanding
667        // header latency
668        Tick latency = pkt->headerDelay;
669        pkt->headerDelay = 0;
670        slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency);
671
672        respLayers[dest_port_id]->succeededTiming(packetFinishTime);
673    }
674
675    // remove the request from the routing table
676    routeTo.erase(route_lookup);
677
678    // stats updates
679    transDist[pkt_cmd]++;
680    snoops++;
681    snoopTraffic += pkt_size;
682
683    return true;
684}
685
686
687void
688CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
689                           const std::vector<QueuedSlavePort*>& dests)
690{
691    DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print());
692
693    // snoops should only happen if the system isn't bypassing caches
694    assert(!system->bypassCaches());
695
696    unsigned fanout = 0;
697
698    for (const auto& p: dests) {
699        // we could have gotten this request from a snooping master
700        // (corresponding to our own slave port that is also in
701        // snoopPorts) and should not send it back to where it came
702        // from
703        if (exclude_slave_port_id == InvalidPortID ||
704            p->getId() != exclude_slave_port_id) {
705            // cache is not allowed to refuse snoop
706            p->sendTimingSnoopReq(pkt);
707            fanout++;
708        }
709    }
710
711    // Stats for fanout of this forward operation
712    snoopFanout.sample(fanout);
713}
714
715void
716CoherentXBar::recvReqRetry(PortID master_port_id)
717{
718    // responses and snoop responses never block on forwarding them,
719    // so the retry will always be coming from a port to which we
720    // tried to forward a request
721    reqLayers[master_port_id]->recvRetry();
722}
723
724Tick
725CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
726                                 MemBackdoorPtr *backdoor)
727{
728    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
729            slavePorts[slave_port_id]->name(), pkt->print());
730
731    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
732    unsigned int pkt_cmd = pkt->cmdToIndex();
733
734    MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
735    Tick snoop_response_latency = 0;
736
737    // is this the destination point for this packet? (e.g. true if
738    // this xbar is the PoC for a cache maintenance operation to the
739    // PoC) otherwise the destination is any cache that can satisfy
740    // the request
741    const bool is_destination = isDestination(pkt);
742
743    const bool snoop_caches = !system->bypassCaches() &&
744        pkt->cmd != MemCmd::WriteClean;
745    if (snoop_caches) {
746        // forward to all snoopers but the source
747        std::pair<MemCmd, Tick> snoop_result;
748        if (snoopFilter) {
749            // check with the snoop filter where to forward this packet
750            auto sf_res =
751                snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]);
752            snoop_response_latency += sf_res.second * clockPeriod();
753            DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
754                    __func__, slavePorts[slave_port_id]->name(), pkt->print(),
755                    sf_res.first.size(), sf_res.second);
756
757            // let the snoop filter know about the success of the send
758            // operation, and do it even before sending it onwards to
759            // avoid situations where atomic upward snoops sneak in
760            // between and change the filter state
761            snoopFilter->finishRequest(false, pkt->getAddr(), pkt->isSecure());
762
763            if (pkt->isEviction()) {
764                // for block-evicting packets, i.e. writebacks and
765                // clean evictions, there is no need to snoop up, as
766                // all we do is determine if the block is cached or
767                // not, instead just set it here based on the snoop
768                // filter result
769                if (!sf_res.first.empty())
770                    pkt->setBlockCached();
771            } else {
772                snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID,
773                                             sf_res.first);
774            }
775        } else {
776            snoop_result = forwardAtomic(pkt, slave_port_id);
777        }
778        snoop_response_cmd = snoop_result.first;
779        snoop_response_latency += snoop_result.second;
780    }
781
782    // set up a sensible default value
783    Tick response_latency = 0;
784
785    const bool sink_packet = sinkPacket(pkt);
786
787    // even if we had a snoop response, we must continue and also
788    // perform the actual request at the destination
789    PortID master_port_id = findPort(pkt->getAddrRange());
790
791    if (sink_packet) {
792        DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
793                pkt->print());
794    } else {
795        if (forwardPacket(pkt)) {
796            // make sure that the write request (e.g., WriteClean)
797            // will stop at the memory below if this crossbar is its
798            // destination
799            if (pkt->isWrite() && is_destination) {
800                pkt->clearWriteThrough();
801            }
802
803            // forward the request to the appropriate destination
804            auto master = masterPorts[master_port_id];
805            response_latency = backdoor ?
806                master->sendAtomicBackdoor(pkt, *backdoor) :
807                master->sendAtomic(pkt);
808        } else {
809            // if it does not need a response we sink the packet above
810            assert(pkt->needsResponse());
811
812            pkt->makeResponse();
813        }
814    }
815
816    // stats updates for the request
817    pktCount[slave_port_id][master_port_id]++;
818    pktSize[slave_port_id][master_port_id] += pkt_size;
819    transDist[pkt_cmd]++;
820
821
822    // if lower levels have replied, tell the snoop filter
823    if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) {
824        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
825    }
826
827    // if we got a response from a snooper, restore it here
828    if (snoop_response_cmd != MemCmd::InvalidCmd) {
829        // no one else should have responded
830        assert(!pkt->isResponse());
831        pkt->cmd = snoop_response_cmd;
832        response_latency = snoop_response_latency;
833    }
834
835    // If this is the destination of the cache clean operation the
836    // crossbar is responsible for responding. This crossbar will
837    // respond when the cache clean is complete. An atomic cache clean
838    // is complete when the crossbars receives the cache clean
839    // request (CleanSharedReq, CleanInvalidReq), as either:
840    // * no cache above had a dirty copy of the block as indicated by
841    //   the satisfied flag of the packet, or
842    // * the crossbar has already seen the corresponding write
843    //   (WriteClean) which updates the block in the memory below.
844    if (pkt->isClean() && isDestination(pkt) && pkt->satisfied()) {
845        auto it = outstandingCMO.find(pkt->id);
846        assert(it != outstandingCMO.end());
847        // we are responding right away
848        outstandingCMO.erase(it);
849    } else if (pkt->cmd == MemCmd::WriteClean && isDestination(pkt)) {
850        // if this is the destination of the operation, the xbar
851        // sends the responce to the cache clean operation only
852        // after having encountered the cache clean request
853        auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr);
854        // in atomic mode we know that the WriteClean packet should
855        // precede the clean request
856        assert(ret.second);
857    }
858
859    // add the response data
860    if (pkt->isResponse()) {
861        pkt_size = pkt->hasData() ? pkt->getSize() : 0;
862        pkt_cmd = pkt->cmdToIndex();
863
864        // stats updates
865        pktCount[slave_port_id][master_port_id]++;
866        pktSize[slave_port_id][master_port_id] += pkt_size;
867        transDist[pkt_cmd]++;
868    }
869
870    // @todo: Not setting header time
871    pkt->payloadDelay = response_latency;
872    return response_latency;
873}
874
875Tick
876CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id)
877{
878    DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
879            masterPorts[master_port_id]->name(), pkt->print());
880
881    // add the request snoop data
882    unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
883    snoops++;
884    snoopTraffic += pkt_size;
885
886    // forward to all snoopers
887    std::pair<MemCmd, Tick> snoop_result;
888    Tick snoop_response_latency = 0;
889    if (snoopFilter) {
890        auto sf_res = snoopFilter->lookupSnoop(pkt);
891        snoop_response_latency += sf_res.second * clockPeriod();
892        DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
893                __func__, masterPorts[master_port_id]->name(), pkt->print(),
894                sf_res.first.size(), sf_res.second);
895        snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id,
896                                     sf_res.first);
897    } else {
898        snoop_result = forwardAtomic(pkt, InvalidPortID);
899    }
900    MemCmd snoop_response_cmd = snoop_result.first;
901    snoop_response_latency += snoop_result.second;
902
903    if (snoop_response_cmd != MemCmd::InvalidCmd)
904        pkt->cmd = snoop_response_cmd;
905
906    // add the response snoop data
907    if (pkt->isResponse()) {
908        snoops++;
909    }
910
911    // @todo: Not setting header time
912    pkt->payloadDelay = snoop_response_latency;
913    return snoop_response_latency;
914}
915
916std::pair<MemCmd, Tick>
917CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
918                           PortID source_master_port_id,
919                           const std::vector<QueuedSlavePort*>& dests)
920{
921    // the packet may be changed on snoops, record the original
922    // command to enable us to restore it between snoops so that
923    // additional snoops can take place properly
924    MemCmd orig_cmd = pkt->cmd;
925    MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
926    Tick snoop_response_latency = 0;
927
928    // snoops should only happen if the system isn't bypassing caches
929    assert(!system->bypassCaches());
930
931    unsigned fanout = 0;
932
933    for (const auto& p: dests) {
934        // we could have gotten this request from a snooping master
935        // (corresponding to our own slave port that is also in
936        // snoopPorts) and should not send it back to where it came
937        // from
938        if (exclude_slave_port_id != InvalidPortID &&
939            p->getId() == exclude_slave_port_id)
940            continue;
941
942        Tick latency = p->sendAtomicSnoop(pkt);
943        fanout++;
944
945        // in contrast to a functional access, we have to keep on
946        // going as all snoopers must be updated even if we get a
947        // response
948        if (!pkt->isResponse())
949            continue;
950
951        // response from snoop agent
952        assert(pkt->cmd != orig_cmd);
953        assert(pkt->cacheResponding());
954        // should only happen once
955        assert(snoop_response_cmd == MemCmd::InvalidCmd);
956        // save response state
957        snoop_response_cmd = pkt->cmd;
958        snoop_response_latency = latency;
959
960        if (snoopFilter) {
961            // Handle responses by the snoopers and differentiate between
962            // responses to requests from above and snoops from below
963            if (source_master_port_id != InvalidPortID) {
964                // Getting a response for a snoop from below
965                assert(exclude_slave_port_id == InvalidPortID);
966                snoopFilter->updateSnoopForward(pkt, *p,
967                             *masterPorts[source_master_port_id]);
968            } else {
969                // Getting a response for a request from above
970                assert(source_master_port_id == InvalidPortID);
971                snoopFilter->updateSnoopResponse(pkt, *p,
972                             *slavePorts[exclude_slave_port_id]);
973            }
974        }
975        // restore original packet state for remaining snoopers
976        pkt->cmd = orig_cmd;
977    }
978
979    // Stats for fanout
980    snoopFanout.sample(fanout);
981
982    // the packet is restored as part of the loop and any potential
983    // snoop response is part of the returned pair
984    return std::make_pair(snoop_response_cmd, snoop_response_latency);
985}
986
987void
988CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
989{
990    if (!pkt->isPrint()) {
991        // don't do DPRINTFs on PrintReq as it clutters up the output
992        DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
993                slavePorts[slave_port_id]->name(), pkt->print());
994    }
995
996    if (!system->bypassCaches()) {
997        // forward to all snoopers but the source
998        forwardFunctional(pkt, slave_port_id);
999    }
1000
1001    // there is no need to continue if the snooping has found what we
1002    // were looking for and the packet is already a response
1003    if (!pkt->isResponse()) {
1004        // since our slave ports are queued ports we need to check them as well
1005        for (const auto& p : slavePorts) {
1006            // if we find a response that has the data, then the
1007            // downstream caches/memories may be out of date, so simply stop
1008            // here
1009            if (p->trySatisfyFunctional(pkt)) {
1010                if (pkt->needsResponse())
1011                    pkt->makeResponse();
1012                return;
1013            }
1014        }
1015
1016        PortID dest_id = findPort(pkt->getAddrRange());
1017
1018        masterPorts[dest_id]->sendFunctional(pkt);
1019    }
1020}
1021
1022void
1023CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
1024{
1025    if (!pkt->isPrint()) {
1026        // don't do DPRINTFs on PrintReq as it clutters up the output
1027        DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
1028                masterPorts[master_port_id]->name(), pkt->print());
1029    }
1030
1031    for (const auto& p : slavePorts) {
1032        if (p->trySatisfyFunctional(pkt)) {
1033            if (pkt->needsResponse())
1034                pkt->makeResponse();
1035            return;
1036        }
1037    }
1038
1039    // forward to all snoopers
1040    forwardFunctional(pkt, InvalidPortID);
1041}
1042
1043void
1044CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
1045{
1046    // snoops should only happen if the system isn't bypassing caches
1047    assert(!system->bypassCaches());
1048
1049    for (const auto& p: snoopPorts) {
1050        // we could have gotten this request from a snooping master
1051        // (corresponding to our own slave port that is also in
1052        // snoopPorts) and should not send it back to where it came
1053        // from
1054        if (exclude_slave_port_id == InvalidPortID ||
1055            p->getId() != exclude_slave_port_id)
1056            p->sendFunctionalSnoop(pkt);
1057
1058        // if we get a response we are done
1059        if (pkt->isResponse()) {
1060            break;
1061        }
1062    }
1063}
1064
1065bool
1066CoherentXBar::sinkPacket(const PacketPtr pkt) const
1067{
1068    // we can sink the packet if:
1069    // 1) the crossbar is the point of coherency, and a cache is
1070    //    responding after being snooped
1071    // 2) the crossbar is the point of coherency, and the packet is a
1072    //    coherency packet (not a read or a write) that does not
1073    //    require a response
1074    // 3) this is a clean evict or clean writeback, but the packet is
1075    //    found in a cache above this crossbar
1076    // 4) a cache is responding after being snooped, and the packet
1077    //    either does not need the block to be writable, or the cache
1078    //    that has promised to respond (setting the cache responding
1079    //    flag) is providing writable and thus had a Modified block,
1080    //    and no further action is needed
1081    return (pointOfCoherency && pkt->cacheResponding()) ||
1082        (pointOfCoherency && !(pkt->isRead() || pkt->isWrite()) &&
1083         !pkt->needsResponse()) ||
1084        (pkt->isCleanEviction() && pkt->isBlockCached()) ||
1085        (pkt->cacheResponding() &&
1086         (!pkt->needsWritable() || pkt->responderHadWritable()));
1087}
1088
1089bool
1090CoherentXBar::forwardPacket(const PacketPtr pkt)
1091{
1092    // we are forwarding the packet if:
1093    // 1) this is a cache clean request to the PoU/PoC and this
1094    //    crossbar is above the PoU/PoC
1095    // 2) this is a read or a write
1096    // 3) this crossbar is above the point of coherency
1097    if (pkt->isClean()) {
1098        return !isDestination(pkt);
1099    }
1100    return pkt->isRead() || pkt->isWrite() || !pointOfCoherency;
1101}
1102
1103
1104void
1105CoherentXBar::regStats()
1106{
1107    // register the stats of the base class and our layers
1108    BaseXBar::regStats();
1109    for (auto l: reqLayers)
1110        l->regStats();
1111    for (auto l: respLayers)
1112        l->regStats();
1113    for (auto l: snoopLayers)
1114        l->regStats();
1115
1116    snoops
1117        .name(name() + ".snoops")
1118        .desc("Total snoops (count)")
1119    ;
1120
1121    snoopTraffic
1122        .name(name() + ".snoopTraffic")
1123        .desc("Total snoop traffic (bytes)")
1124    ;
1125
1126    snoopFanout
1127        .init(0, snoopPorts.size(), 1)
1128        .name(name() + ".snoop_fanout")
1129        .desc("Request fanout histogram")
1130    ;
1131}
1132
1133CoherentXBar *
1134CoherentXBarParams::create()
1135{
1136    return new CoherentXBar(this);
1137}
1138