1/*
2 * Copyright (c) 2011-2013, 2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2006 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 *          Steve Reinhardt
42 *          Andreas Hansson
43 */
44
45/**
46 * @file
47 * Implementation of a memory-mapped bridge that connects a master
48 * and a slave through a request and response queue.
49 */
50
51#include "mem/bridge.hh"
52
53#include "base/trace.hh"
54#include "debug/Bridge.hh"
55#include "params/Bridge.hh"
56
57Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name,
58                                         Bridge& _bridge,
59                                         BridgeMasterPort& _masterPort,
60                                         Cycles _delay, int _resp_limit,
61                                         std::vector<AddrRange> _ranges)
62    : SlavePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort),
63      delay(_delay), ranges(_ranges.begin(), _ranges.end()),
64      outstandingResponses(0), retryReq(false), respQueueLimit(_resp_limit),
65      sendEvent([this]{ trySendTiming(); }, _name)
66{
67}
68
69Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name,
70                                           Bridge& _bridge,
71                                           BridgeSlavePort& _slavePort,
72                                           Cycles _delay, int _req_limit)
73    : MasterPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort),
74      delay(_delay), reqQueueLimit(_req_limit),
75      sendEvent([this]{ trySendTiming(); }, _name)
76{
77}
78
79Bridge::Bridge(Params *p)
80    : ClockedObject(p),
81      slavePort(p->name + ".slave", *this, masterPort,
82                ticksToCycles(p->delay), p->resp_size, p->ranges),
83      masterPort(p->name + ".master", *this, slavePort,
84                 ticksToCycles(p->delay), p->req_size)
85{
86}
87
88Port &
89Bridge::getPort(const std::string &if_name, PortID idx)
90{
91    if (if_name == "master")
92        return masterPort;
93    else if (if_name == "slave")
94        return slavePort;
95    else
96        // pass it along to our super class
97        return ClockedObject::getPort(if_name, idx);
98}
99
100void
101Bridge::init()
102{
103    // make sure both sides are connected and have the same block size
104    if (!slavePort.isConnected() || !masterPort.isConnected())
105        fatal("Both ports of a bridge must be connected.\n");
106
107    // notify the master side  of our address ranges
108    slavePort.sendRangeChange();
109}
110
111bool
112Bridge::BridgeSlavePort::respQueueFull() const
113{
114    return outstandingResponses == respQueueLimit;
115}
116
117bool
118Bridge::BridgeMasterPort::reqQueueFull() const
119{
120    return transmitList.size() == reqQueueLimit;
121}
122
123bool
124Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
125{
126    // all checks are done when the request is accepted on the slave
127    // side, so we are guaranteed to have space for the response
128    DPRINTF(Bridge, "recvTimingResp: %s addr 0x%x\n",
129            pkt->cmdString(), pkt->getAddr());
130
131    DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size());
132
133    // technically the packet only reaches us after the header delay,
134    // and typically we also need to deserialise any payload (unless
135    // the two sides of the bridge are synchronous)
136    Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
137    pkt->headerDelay = pkt->payloadDelay = 0;
138
139    slavePort.schedTimingResp(pkt, bridge.clockEdge(delay) +
140                              receive_delay);
141
142    return true;
143}
144
145bool
146Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
147{
148    DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
149            pkt->cmdString(), pkt->getAddr());
150
151    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
152             "is responding");
153
154    // we should not get a new request after committing to retry the
155    // current one, but unfortunately the CPU violates this rule, so
156    // simply ignore it for now
157    if (retryReq)
158        return false;
159
160    DPRINTF(Bridge, "Response queue size: %d outresp: %d\n",
161            transmitList.size(), outstandingResponses);
162
163    // if the request queue is full then there is no hope
164    if (masterPort.reqQueueFull()) {
165        DPRINTF(Bridge, "Request queue full\n");
166        retryReq = true;
167    } else {
168        // look at the response queue if we expect to see a response
169        bool expects_response = pkt->needsResponse();
170        if (expects_response) {
171            if (respQueueFull()) {
172                DPRINTF(Bridge, "Response queue full\n");
173                retryReq = true;
174            } else {
175                // ok to send the request with space for the response
176                DPRINTF(Bridge, "Reserving space for response\n");
177                assert(outstandingResponses != respQueueLimit);
178                ++outstandingResponses;
179
180                // no need to set retryReq to false as this is already the
181                // case
182            }
183        }
184
185        if (!retryReq) {
186            // technically the packet only reaches us after the header
187            // delay, and typically we also need to deserialise any
188            // payload (unless the two sides of the bridge are
189            // synchronous)
190            Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
191            pkt->headerDelay = pkt->payloadDelay = 0;
192
193            masterPort.schedTimingReq(pkt, bridge.clockEdge(delay) +
194                                      receive_delay);
195        }
196    }
197
198    // remember that we are now stalling a packet and that we have to
199    // tell the sending master to retry once space becomes available,
200    // we make no distinction whether the stalling is due to the
201    // request queue or response queue being full
202    return !retryReq;
203}
204
205void
206Bridge::BridgeSlavePort::retryStalledReq()
207{
208    if (retryReq) {
209        DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
210        retryReq = false;
211        sendRetryReq();
212    }
213}
214
215void
216Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
217{
218    // If we're about to put this packet at the head of the queue, we
219    // need to schedule an event to do the transmit.  Otherwise there
220    // should already be an event scheduled for sending the head
221    // packet.
222    if (transmitList.empty()) {
223        bridge.schedule(sendEvent, when);
224    }
225
226    assert(transmitList.size() != reqQueueLimit);
227
228    transmitList.emplace_back(pkt, when);
229}
230
231
232void
233Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
234{
235    // If we're about to put this packet at the head of the queue, we
236    // need to schedule an event to do the transmit.  Otherwise there
237    // should already be an event scheduled for sending the head
238    // packet.
239    if (transmitList.empty()) {
240        bridge.schedule(sendEvent, when);
241    }
242
243    transmitList.emplace_back(pkt, when);
244}
245
246void
247Bridge::BridgeMasterPort::trySendTiming()
248{
249    assert(!transmitList.empty());
250
251    DeferredPacket req = transmitList.front();
252
253    assert(req.tick <= curTick());
254
255    PacketPtr pkt = req.pkt;
256
257    DPRINTF(Bridge, "trySend request addr 0x%x, queue size %d\n",
258            pkt->getAddr(), transmitList.size());
259
260    if (sendTimingReq(pkt)) {
261        // send successful
262        transmitList.pop_front();
263        DPRINTF(Bridge, "trySend request successful\n");
264
265        // If there are more packets to send, schedule event to try again.
266        if (!transmitList.empty()) {
267            DeferredPacket next_req = transmitList.front();
268            DPRINTF(Bridge, "Scheduling next send\n");
269            bridge.schedule(sendEvent, std::max(next_req.tick,
270                                                bridge.clockEdge()));
271        }
272
273        // if we have stalled a request due to a full request queue,
274        // then send a retry at this point, also note that if the
275        // request we stalled was waiting for the response queue
276        // rather than the request queue we might stall it again
277        slavePort.retryStalledReq();
278    }
279
280    // if the send failed, then we try again once we receive a retry,
281    // and therefore there is no need to take any action
282}
283
284void
285Bridge::BridgeSlavePort::trySendTiming()
286{
287    assert(!transmitList.empty());
288
289    DeferredPacket resp = transmitList.front();
290
291    assert(resp.tick <= curTick());
292
293    PacketPtr pkt = resp.pkt;
294
295    DPRINTF(Bridge, "trySend response addr 0x%x, outstanding %d\n",
296            pkt->getAddr(), outstandingResponses);
297
298    if (sendTimingResp(pkt)) {
299        // send successful
300        transmitList.pop_front();
301        DPRINTF(Bridge, "trySend response successful\n");
302
303        assert(outstandingResponses != 0);
304        --outstandingResponses;
305
306        // If there are more packets to send, schedule event to try again.
307        if (!transmitList.empty()) {
308            DeferredPacket next_resp = transmitList.front();
309            DPRINTF(Bridge, "Scheduling next send\n");
310            bridge.schedule(sendEvent, std::max(next_resp.tick,
311                                                bridge.clockEdge()));
312        }
313
314        // if there is space in the request queue and we were stalling
315        // a request, it will definitely be possible to accept it now
316        // since there is guaranteed space in the response queue
317        if (!masterPort.reqQueueFull() && retryReq) {
318            DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
319            retryReq = false;
320            sendRetryReq();
321        }
322    }
323
324    // if the send failed, then we try again once we receive a retry,
325    // and therefore there is no need to take any action
326}
327
328void
329Bridge::BridgeMasterPort::recvReqRetry()
330{
331    trySendTiming();
332}
333
334void
335Bridge::BridgeSlavePort::recvRespRetry()
336{
337    trySendTiming();
338}
339
340Tick
341Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt)
342{
343    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
344             "is responding");
345
346    return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt);
347}
348
349void
350Bridge::BridgeSlavePort::recvFunctional(PacketPtr pkt)
351{
352    pkt->pushLabel(name());
353
354    // check the response queue
355    for (auto i = transmitList.begin();  i != transmitList.end(); ++i) {
356        if (pkt->trySatisfyFunctional((*i).pkt)) {
357            pkt->makeResponse();
358            return;
359        }
360    }
361
362    // also check the master port's request queue
363    if (masterPort.trySatisfyFunctional(pkt)) {
364        return;
365    }
366
367    pkt->popLabel();
368
369    // fall through if pkt still not satisfied
370    masterPort.sendFunctional(pkt);
371}
372
373bool
374Bridge::BridgeMasterPort::trySatisfyFunctional(PacketPtr pkt)
375{
376    bool found = false;
377    auto i = transmitList.begin();
378
379    while (i != transmitList.end() && !found) {
380        if (pkt->trySatisfyFunctional((*i).pkt)) {
381            pkt->makeResponse();
382            found = true;
383        }
384        ++i;
385    }
386
387    return found;
388}
389
390AddrRangeList
391Bridge::BridgeSlavePort::getAddrRanges() const
392{
393    return ranges;
394}
395
396Bridge *
397BridgeParams::create()
398{
399    return new Bridge(this);
400}
401