physical.cc revision 3170:37fd1e73f836
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 *          Ali Saidi
30 */
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <errno.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <zlib.h>
38
39#include <iostream>
40#include <string>
41
42
43#include "base/misc.hh"
44#include "config/full_system.hh"
45#include "mem/packet_impl.hh"
46#include "mem/physical.hh"
47#include "sim/host.hh"
48#include "sim/builder.hh"
49#include "sim/eventq.hh"
50#include "arch/isa_traits.hh"
51
52
53using namespace std;
54using namespace TheISA;
55
56
57PhysicalMemory::PhysicalMemory(Params *p)
58    : MemObject(p->name), pmemAddr(NULL), port(NULL), lat(p->latency), _params(p)
59{
60    if (params()->addrRange.size() % TheISA::PageBytes != 0)
61        panic("Memory Size not divisible by page size\n");
62
63    int map_flags = MAP_ANON | MAP_PRIVATE;
64    pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
65                                map_flags, -1, 0);
66
67    if (pmemAddr == (void *)MAP_FAILED) {
68        perror("mmap");
69        fatal("Could not mmap!\n");
70    }
71
72    pagePtr = 0;
73}
74
75void
76PhysicalMemory::init()
77{
78    if (!port)
79        panic("PhysicalMemory not connected to anything!");
80    port->sendStatusChange(Port::RangeChange);
81}
82
83PhysicalMemory::~PhysicalMemory()
84{
85    if (pmemAddr)
86        munmap(pmemAddr, params()->addrRange.size());
87    //Remove memPorts?
88}
89
90Addr
91PhysicalMemory::new_page()
92{
93    Addr return_addr = pagePtr << LogVMPageSize;
94    return_addr += params()->addrRange.start;
95
96    ++pagePtr;
97    return return_addr;
98}
99
100int
101PhysicalMemory::deviceBlockSize()
102{
103    //Can accept anysize request
104    return 0;
105}
106
107Tick
108PhysicalMemory::calculateLatency(Packet *pkt)
109{
110    return lat;
111}
112
113
114
115// Add load-locked to tracking list.  Should only be called if the
116// operation is a load and the LOCKED flag is set.
117void
118PhysicalMemory::trackLoadLocked(Request *req)
119{
120    Addr paddr = LockedAddr::mask(req->getPaddr());
121
122    // first we check if we already have a locked addr for this
123    // xc.  Since each xc only gets one, we just update the
124    // existing record with the new address.
125    list<LockedAddr>::iterator i;
126
127    for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
128        if (i->matchesContext(req)) {
129            DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n",
130                    req->getCpuNum(), req->getThreadNum(), paddr);
131            i->addr = paddr;
132            return;
133        }
134    }
135
136    // no record for this xc: need to allocate a new one
137    DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n",
138            req->getCpuNum(), req->getThreadNum(), paddr);
139    lockedAddrList.push_front(LockedAddr(req));
140}
141
142
143// Called on *writes* only... both regular stores and
144// store-conditional operations.  Check for conventional stores which
145// conflict with locked addresses, and for success/failure of store
146// conditionals.
147bool
148PhysicalMemory::checkLockedAddrList(Request *req)
149{
150    Addr paddr = LockedAddr::mask(req->getPaddr());
151    bool isLocked = req->isLocked();
152
153    // Initialize return value.  Non-conditional stores always
154    // succeed.  Assume conditional stores will fail until proven
155    // otherwise.
156    bool success = !isLocked;
157
158    // Iterate over list.  Note that there could be multiple matching
159    // records, as more than one context could have done a load locked
160    // to this location.
161    list<LockedAddr>::iterator i = lockedAddrList.begin();
162
163    while (i != lockedAddrList.end()) {
164
165        if (i->addr == paddr) {
166            // we have a matching address
167
168            if (isLocked && i->matchesContext(req)) {
169                // it's a store conditional, and as far as the memory
170                // system can tell, the requesting context's lock is
171                // still valid.
172                DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n",
173                        req->getCpuNum(), req->getThreadNum(), paddr);
174                success = true;
175            }
176
177            // Get rid of our record of this lock and advance to next
178            DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n",
179                    i->cpuNum, i->threadNum, paddr);
180            i = lockedAddrList.erase(i);
181        }
182        else {
183            // no match: advance to next record
184            ++i;
185        }
186    }
187
188    if (isLocked) {
189        req->setScResult(success ? 1 : 0);
190    }
191
192    return success;
193}
194
195void
196PhysicalMemory::doFunctionalAccess(Packet *pkt)
197{
198    assert(pkt->getAddr() + pkt->getSize() < params()->addrRange.size());
199
200    switch (pkt->cmd) {
201      case Packet::ReadReq:
202        if (pkt->req->isLocked()) {
203            trackLoadLocked(pkt->req);
204        }
205        memcpy(pkt->getPtr<uint8_t>(),
206               pmemAddr + pkt->getAddr() - params()->addrRange.start,
207               pkt->getSize());
208        break;
209      case Packet::WriteReq:
210        if (writeOK(pkt->req)) {
211            memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start,
212                   pkt->getPtr<uint8_t>(), pkt->getSize());
213        }
214        break;
215      default:
216        panic("unimplemented");
217    }
218
219    pkt->result = Packet::Success;
220}
221
222Port *
223PhysicalMemory::getPort(const std::string &if_name, int idx)
224{
225    if (if_name == "port" && idx == -1) {
226        if (port != NULL)
227           panic("PhysicalMemory::getPort: additional port requested to memory!");
228        port = new MemoryPort(name() + "-port", this);
229        return port;
230    } else if (if_name == "functional") {
231        /* special port for functional writes at startup. */
232        return new MemoryPort(name() + "-funcport", this);
233    } else {
234        panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
235    }
236}
237
238void
239PhysicalMemory::recvStatusChange(Port::Status status)
240{
241}
242
243PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
244                                       PhysicalMemory *_memory)
245    : SimpleTimingPort(_name), memory(_memory)
246{ }
247
248void
249PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status)
250{
251    memory->recvStatusChange(status);
252}
253
254void
255PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp,
256                                            AddrRangeList &snoop)
257{
258    memory->getAddressRanges(resp, snoop);
259}
260
261void
262PhysicalMemory::getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
263{
264    snoop.clear();
265    resp.clear();
266    resp.push_back(RangeSize(params()->addrRange.start,
267                             params()->addrRange.size()));
268}
269
270int
271PhysicalMemory::MemoryPort::deviceBlockSize()
272{
273    return memory->deviceBlockSize();
274}
275
276Tick
277PhysicalMemory::MemoryPort::recvAtomic(Packet *pkt)
278{
279    memory->doFunctionalAccess(pkt);
280    return memory->calculateLatency(pkt);
281}
282
283void
284PhysicalMemory::MemoryPort::recvFunctional(Packet *pkt)
285{
286    // Default implementation of SimpleTimingPort::recvFunctional()
287    // calls recvAtomic() and throws away the latency; we can save a
288    // little here by just not calculating the latency.
289    memory->doFunctionalAccess(pkt);
290}
291
292unsigned int
293PhysicalMemory::drain(Event *de)
294{
295    int count = port->drain(de);
296    if (count)
297        changeState(Draining);
298    else
299        changeState(Drained);
300    return count;
301}
302
303void
304PhysicalMemory::serialize(ostream &os)
305{
306    gzFile compressedMem;
307    string filename = name() + ".physmem";
308
309    SERIALIZE_SCALAR(filename);
310
311    // write memory file
312    string thefile = Checkpoint::dir() + "/" + filename.c_str();
313    int fd = creat(thefile.c_str(), 0664);
314    if (fd < 0) {
315        perror("creat");
316        fatal("Can't open physical memory checkpoint file '%s'\n", filename);
317    }
318
319    compressedMem = gzdopen(fd, "wb");
320    if (compressedMem == NULL)
321        fatal("Insufficient memory to allocate compression state for %s\n",
322                filename);
323
324    if (gzwrite(compressedMem, pmemAddr, params()->addrRange.size()) != params()->addrRange.size()) {
325        fatal("Write failed on physical memory checkpoint file '%s'\n",
326              filename);
327    }
328
329    if (gzclose(compressedMem))
330        fatal("Close failed on physical memory checkpoint file '%s'\n",
331              filename);
332}
333
334void
335PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
336{
337    gzFile compressedMem;
338    long *tempPage;
339    long *pmem_current;
340    uint64_t curSize;
341    uint32_t bytesRead;
342    const int chunkSize = 16384;
343
344
345    string filename;
346
347    UNSERIALIZE_SCALAR(filename);
348
349    filename = cp->cptDir + "/" + filename;
350
351    // mmap memoryfile
352    int fd = open(filename.c_str(), O_RDONLY);
353    if (fd < 0) {
354        perror("open");
355        fatal("Can't open physical memory checkpoint file '%s'", filename);
356    }
357
358    compressedMem = gzdopen(fd, "rb");
359    if (compressedMem == NULL)
360        fatal("Insufficient memory to allocate compression state for %s\n",
361                filename);
362
363    // unmap file that was mmaped in the constructor
364    // This is done here to make sure that gzip and open don't muck with our
365    // nice large space of memory before we reallocate it
366    munmap(pmemAddr, params()->addrRange.size());
367
368    pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
369                                MAP_ANON | MAP_PRIVATE, -1, 0);
370
371    if (pmemAddr == (void *)MAP_FAILED) {
372        perror("mmap");
373        fatal("Could not mmap physical memory!\n");
374    }
375
376    curSize = 0;
377    tempPage = (long*)malloc(chunkSize);
378    if (tempPage == NULL)
379        fatal("Unable to malloc memory to read file %s\n", filename);
380
381    /* Only copy bytes that are non-zero, so we don't give the VM system hell */
382    while (curSize < params()->addrRange.size()) {
383        bytesRead = gzread(compressedMem, tempPage, chunkSize);
384        if (bytesRead != chunkSize && bytesRead != params()->addrRange.size() - curSize)
385            fatal("Read failed on physical memory checkpoint file '%s'"
386                  " got %d bytes, expected %d or %d bytes\n",
387                  filename, bytesRead, chunkSize, params()->addrRange.size()-curSize);
388
389        assert(bytesRead % sizeof(long) == 0);
390
391        for (int x = 0; x < bytesRead/sizeof(long); x++)
392        {
393             if (*(tempPage+x) != 0) {
394                 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
395                 *pmem_current = *(tempPage+x);
396             }
397        }
398        curSize += bytesRead;
399    }
400
401    free(tempPage);
402
403    if (gzclose(compressedMem))
404        fatal("Close failed on physical memory checkpoint file '%s'\n",
405              filename);
406
407}
408
409
410BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
411
412    Param<string> file;
413    Param<Range<Addr> > range;
414    Param<Tick> latency;
415
416END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
417
418BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
419
420    INIT_PARAM_DFLT(file, "memory mapped file", ""),
421    INIT_PARAM(range, "Device Address Range"),
422    INIT_PARAM(latency, "Memory access latency")
423
424END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
425
426CREATE_SIM_OBJECT(PhysicalMemory)
427{
428    PhysicalMemory::Params *p = new PhysicalMemory::Params;
429    p->name = getInstanceName();
430    p->addrRange = range;
431    p->latency = latency;
432    return new PhysicalMemory(p);
433}
434
435REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory)
436