RubyPort.cc revision 6893:9cdf9b65d946
1
2/*
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/physical.hh"
31#include "mem/ruby/system/RubyPort.hh"
32#include "mem/ruby/slicc_interface/AbstractController.hh"
33
34uint16_t RubyPort::m_num_ports = 0;
35
36RubyPort::RequestMap RubyPort::pending_cpu_requests;
37
38RubyPort::RubyPort(const Params *p)
39    : MemObject(p)
40{
41    m_version = p->version;
42    assert(m_version != -1);
43
44    physmem = p->physmem;
45
46    m_controller = NULL;
47    m_mandatory_q_ptr = NULL;
48
49    m_port_id = m_num_ports++;
50    m_request_cnt = 0;
51    m_hit_callback = ruby_hit_callback;
52    pio_port = NULL;
53    physMemPort = NULL;
54    assert(m_num_ports <= 2048); // see below for reason
55}
56
57void RubyPort::init()
58{
59    assert(m_controller != NULL);
60    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
61}
62
63Port *
64RubyPort::getPort(const std::string &if_name, int idx)
65{
66    if (if_name == "port") {
67        return new M5Port(csprintf("%s-port%d", name(), idx), this);
68    } else if (if_name == "pio_port") {
69        //
70        // ensure there is only one pio port
71        //
72        assert(pio_port == NULL);
73
74        pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx),
75                                     this);
76
77        return pio_port;
78    } else if (if_name == "physMemPort") {
79        //
80        // RubyPort should only have one port to physical memory
81        //
82        assert (physMemPort == NULL);
83
84        physMemPort = new M5Port(csprintf("%s-physMemPort", name()),
85                                 this);
86
87        return physMemPort;
88    } else if (if_name == "functional") {
89        //
90        // Calls for the functional port only want to access functional memory.
91        // Therefore, directly pass these calls ports to physmem.
92        //
93        assert(physmem != NULL);
94        return physmem->getPort(if_name, idx);
95    }
96    return NULL;
97}
98
99RubyPort::PioPort::PioPort(const std::string &_name,
100                           RubyPort *_port)
101    : SimpleTimingPort(_name, _port)
102{
103    DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
104    ruby_port = _port;
105}
106
107RubyPort::M5Port::M5Port(const std::string &_name,
108                         RubyPort *_port)
109    : SimpleTimingPort(_name, _port)
110{
111    DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
112    ruby_port = _port;
113}
114
115Tick
116RubyPort::PioPort::recvAtomic(PacketPtr pkt)
117{
118    panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
119    return 0;
120}
121
122
123Tick
124RubyPort::M5Port::recvAtomic(PacketPtr pkt)
125{
126    panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
127    return 0;
128}
129
130
131bool
132RubyPort::PioPort::recvTiming(PacketPtr pkt)
133{
134    //
135    // In FS mode, ruby memory will receive pio responses from devices and
136    // it must forward these responses back to the particular CPU.
137    //
138    DPRINTF(MemoryAccess,
139            "Pio response for address %#x\n",
140            pkt->getAddr());
141
142    assert(pkt->isResponse());
143
144    //
145    // First we must retrieve the request port from the sender State
146    //
147    RubyPort::SenderState *senderState =
148      safe_cast<RubyPort::SenderState *>(pkt->senderState);
149    M5Port *port = senderState->port;
150    assert(port != NULL);
151
152    // pop the sender state from the packet
153    pkt->senderState = senderState->saved;
154    delete senderState;
155
156    port->sendTiming(pkt);
157
158    return true;
159}
160
161bool
162RubyPort::M5Port::recvTiming(PacketPtr pkt)
163{
164    DPRINTF(MemoryAccess,
165            "Timing access caught for address %#x\n",
166            pkt->getAddr());
167
168    //dsm: based on SimpleTimingPort::recvTiming(pkt);
169
170    //
171    // After checking for pio responses, the remainder of packets
172    // received by ruby should only be M5 requests, which should never
173    // get nacked.  There used to be code to hanldle nacks here, but
174    // I'm pretty sure it didn't work correctly with the drain code,
175    // so that would need to be fixed if we ever added it back.
176    //
177    assert(pkt->isRequest());
178
179    if (pkt->memInhibitAsserted()) {
180        warn("memInhibitAsserted???");
181        // snooper will supply based on copy of packet
182        // still target's responsibility to delete packet
183        delete pkt;
184        return true;
185    }
186
187    //
188    // Check for pio requests and directly send them to the dedicated
189    // pio port.
190    //
191    if (!isPhysMemAddress(pkt->getAddr())) {
192        assert(ruby_port->pio_port != NULL);
193
194        //
195        // Save the port in the sender state object to be used later to
196        // route the response
197        //
198        pkt->senderState = new SenderState(this, pkt->senderState);
199
200        return ruby_port->pio_port->sendTiming(pkt);
201    }
202
203    //
204    // For DMA and CPU requests, translate them to ruby requests before
205    // sending them to our assigned ruby port.
206    //
207    RubyRequestType type = RubyRequestType_NULL;
208    Addr pc = 0;
209    if (pkt->isRead()) {
210        if (pkt->req->isInstFetch()) {
211            type = RubyRequestType_IFETCH;
212            pc = pkt->req->getPC();
213        } else {
214            type = RubyRequestType_LD;
215        }
216    } else if (pkt->isWrite()) {
217        type = RubyRequestType_ST;
218    } else if (pkt->isReadWrite()) {
219        type = RubyRequestType_RMW_Write;
220    }
221
222    RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
223                             pkt->getSize(), pc, type,
224                             RubyAccessMode_Supervisor);
225
226    // Submit the ruby request
227    int64_t req_id = ruby_port->makeRequest(ruby_request);
228    if (req_id == -1) {
229        return false;
230    }
231
232    // Save the request for the callback
233    RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this);
234
235    return true;
236}
237
238void
239RubyPort::ruby_hit_callback(int64_t req_id)
240{
241    //
242    // Note: This single fuction can be called by cpu and dma ports,
243    // as well as the functional port.
244    //
245    RequestMap::iterator i = pending_cpu_requests.find(req_id);
246    if (i == pending_cpu_requests.end())
247        panic("could not find pending request %d\n", req_id);
248
249    RequestCookie *cookie = i->second;
250    pending_cpu_requests.erase(i);
251
252    Packet *pkt = cookie->pkt;
253    M5Port *port = cookie->m5Port;
254    delete cookie;
255
256    port->hitCallback(pkt);
257}
258
259void
260RubyPort::M5Port::hitCallback(PacketPtr pkt)
261{
262
263    bool needsResponse = pkt->needsResponse();
264
265    DPRINTF(MemoryAccess, "Hit callback needs response %d\n",
266            needsResponse);
267
268    ruby_port->physMemPort->sendAtomic(pkt);
269
270    // turn packet around to go back to requester if response expected
271    if (needsResponse) {
272        // sendAtomic() should already have turned packet into
273        // atomic response
274        assert(pkt->isResponse());
275        DPRINTF(MemoryAccess, "Sending packet back over port\n");
276        sendTiming(pkt);
277    } else {
278        delete pkt;
279    }
280    DPRINTF(MemoryAccess, "Hit callback done!\n");
281}
282
283bool
284RubyPort::M5Port::sendTiming(PacketPtr pkt)
285{
286    schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
287    return true;
288}
289
290bool
291RubyPort::PioPort::sendTiming(PacketPtr pkt)
292{
293    schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
294    return true;
295}
296
297bool
298RubyPort::M5Port::isPhysMemAddress(Addr addr)
299{
300    AddrRangeList physMemAddrList;
301    bool snoop = false;
302    ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop);
303    for(AddrRangeIter iter = physMemAddrList.begin();
304        iter != physMemAddrList.end();
305        iter++) {
306        if (addr >= iter->start && addr <= iter->end) {
307            DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
308                    iter->start, iter->end);
309            return true;
310        }
311    }
312    return false;
313}
314