RubyPort.cc revision 6899:f8057af86bf7
1
2/*
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "mem/physical.hh"
31#include "mem/ruby/system/RubyPort.hh"
32#include "mem/ruby/slicc_interface/AbstractController.hh"
33#include "cpu/rubytest/RubyTester.hh"
34
35uint16_t RubyPort::m_num_ports = 0;
36
37RubyPort::RequestMap RubyPort::pending_cpu_requests;
38
39RubyPort::RubyPort(const Params *p)
40    : MemObject(p)
41{
42    m_version = p->version;
43    assert(m_version != -1);
44
45    physmem = p->physmem;
46
47    m_controller = NULL;
48    m_mandatory_q_ptr = NULL;
49
50    m_port_id = m_num_ports++;
51    m_request_cnt = 0;
52    m_hit_callback = ruby_hit_callback;
53    pio_port = NULL;
54    physMemPort = NULL;
55    assert(m_num_ports <= 2048); // see below for reason
56}
57
58void RubyPort::init()
59{
60    assert(m_controller != NULL);
61    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
62}
63
64Port *
65RubyPort::getPort(const std::string &if_name, int idx)
66{
67    if (if_name == "port") {
68        return new M5Port(csprintf("%s-port%d", name(), idx), this);
69    } else if (if_name == "pio_port") {
70        //
71        // ensure there is only one pio port
72        //
73        assert(pio_port == NULL);
74
75        pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx),
76                                     this);
77
78        return pio_port;
79    } else if (if_name == "physMemPort") {
80        //
81        // RubyPort should only have one port to physical memory
82        //
83        assert (physMemPort == NULL);
84
85        physMemPort = new M5Port(csprintf("%s-physMemPort", name()),
86                                 this);
87
88        return physMemPort;
89    } else if (if_name == "functional") {
90        //
91        // Calls for the functional port only want to access functional memory.
92        // Therefore, directly pass these calls ports to physmem.
93        //
94        assert(physmem != NULL);
95        return physmem->getPort(if_name, idx);
96    }
97    return NULL;
98}
99
100RubyPort::PioPort::PioPort(const std::string &_name,
101                           RubyPort *_port)
102    : SimpleTimingPort(_name, _port)
103{
104    DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
105    ruby_port = _port;
106}
107
108RubyPort::M5Port::M5Port(const std::string &_name,
109                         RubyPort *_port)
110    : SimpleTimingPort(_name, _port)
111{
112    DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
113    ruby_port = _port;
114}
115
116Tick
117RubyPort::PioPort::recvAtomic(PacketPtr pkt)
118{
119    panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
120    return 0;
121}
122
123
124Tick
125RubyPort::M5Port::recvAtomic(PacketPtr pkt)
126{
127    panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
128    return 0;
129}
130
131
132bool
133RubyPort::PioPort::recvTiming(PacketPtr pkt)
134{
135    //
136    // In FS mode, ruby memory will receive pio responses from devices and
137    // it must forward these responses back to the particular CPU.
138    //
139    DPRINTF(MemoryAccess,
140            "Pio response for address %#x\n",
141            pkt->getAddr());
142
143    assert(pkt->isResponse());
144
145    //
146    // First we must retrieve the request port from the sender State
147    //
148    RubyPort::SenderState *senderState =
149      safe_cast<RubyPort::SenderState *>(pkt->senderState);
150    M5Port *port = senderState->port;
151    assert(port != NULL);
152
153    // pop the sender state from the packet
154    pkt->senderState = senderState->saved;
155    delete senderState;
156
157    port->sendTiming(pkt);
158
159    return true;
160}
161
162bool
163RubyPort::M5Port::recvTiming(PacketPtr pkt)
164{
165    DPRINTF(MemoryAccess,
166            "Timing access caught for address %#x\n",
167            pkt->getAddr());
168
169    //dsm: based on SimpleTimingPort::recvTiming(pkt);
170
171    //
172    // After checking for pio responses, the remainder of packets
173    // received by ruby should only be M5 requests, which should never
174    // get nacked.  There used to be code to hanldle nacks here, but
175    // I'm pretty sure it didn't work correctly with the drain code,
176    // so that would need to be fixed if we ever added it back.
177    //
178    assert(pkt->isRequest());
179
180    if (pkt->memInhibitAsserted()) {
181        warn("memInhibitAsserted???");
182        // snooper will supply based on copy of packet
183        // still target's responsibility to delete packet
184        delete pkt;
185        return true;
186    }
187
188    //
189    // Check for pio requests and directly send them to the dedicated
190    // pio port.
191    //
192    if (!isPhysMemAddress(pkt->getAddr())) {
193        assert(ruby_port->pio_port != NULL);
194
195        //
196        // Save the port in the sender state object to be used later to
197        // route the response
198        //
199        pkt->senderState = new SenderState(this, pkt->senderState);
200
201        return ruby_port->pio_port->sendTiming(pkt);
202    }
203
204    //
205    // For DMA and CPU requests, translate them to ruby requests before
206    // sending them to our assigned ruby port.
207    //
208    RubyRequestType type = RubyRequestType_NULL;
209
210    //
211    // If valid, copy the pc to the ruby request
212    //
213    Addr pc = 0;
214    if (pkt->req->hasPC()) {
215        pc = pkt->req->getPC();
216    }
217
218    if (pkt->isRead()) {
219        if (pkt->req->isInstFetch()) {
220            type = RubyRequestType_IFETCH;
221        } else {
222            type = RubyRequestType_LD;
223        }
224    } else if (pkt->isWrite()) {
225        type = RubyRequestType_ST;
226    } else if (pkt->isReadWrite()) {
227        type = RubyRequestType_RMW_Write;
228    }
229
230    RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
231                             pkt->getSize(), pc, type,
232                             RubyAccessMode_Supervisor);
233
234    // Submit the ruby request
235    int64_t req_id = ruby_port->makeRequest(ruby_request);
236    if (req_id == -1) {
237        return false;
238    }
239
240    // Save the request for the callback
241    RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this);
242
243    return true;
244}
245
246void
247RubyPort::ruby_hit_callback(int64_t req_id)
248{
249    //
250    // Note: This single fuction can be called by cpu and dma ports,
251    // as well as the functional port.
252    //
253    RequestMap::iterator i = pending_cpu_requests.find(req_id);
254    if (i == pending_cpu_requests.end())
255        panic("could not find pending request %d\n", req_id);
256
257    RequestCookie *cookie = i->second;
258    pending_cpu_requests.erase(i);
259
260    Packet *pkt = cookie->pkt;
261    M5Port *port = cookie->m5Port;
262    delete cookie;
263
264    port->hitCallback(pkt);
265}
266
267void
268RubyPort::M5Port::hitCallback(PacketPtr pkt)
269{
270
271    bool needsResponse = pkt->needsResponse();
272
273    DPRINTF(MemoryAccess, "Hit callback needs response %d\n",
274            needsResponse);
275
276    ruby_port->physMemPort->sendAtomic(pkt);
277
278    // turn packet around to go back to requester if response expected
279    if (needsResponse) {
280        // sendAtomic() should already have turned packet into
281        // atomic response
282        assert(pkt->isResponse());
283        DPRINTF(MemoryAccess, "Sending packet back over port\n");
284        sendTiming(pkt);
285    } else {
286        delete pkt;
287    }
288    DPRINTF(MemoryAccess, "Hit callback done!\n");
289}
290
291bool
292RubyPort::M5Port::sendTiming(PacketPtr pkt)
293{
294    schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
295    return true;
296}
297
298bool
299RubyPort::PioPort::sendTiming(PacketPtr pkt)
300{
301    schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
302    return true;
303}
304
305bool
306RubyPort::M5Port::isPhysMemAddress(Addr addr)
307{
308    AddrRangeList physMemAddrList;
309    bool snoop = false;
310    ruby_port->physMemPort->getPeerAddressRanges(physMemAddrList, snoop);
311    for(AddrRangeIter iter = physMemAddrList.begin();
312        iter != physMemAddrList.end();
313        iter++) {
314        if (addr >= iter->start && addr <= iter->end) {
315            DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
316                    iter->start, iter->end);
317            return true;
318        }
319    }
320    return false;
321}
322