physical.cc revision 8712:7f762428a9f5
1/*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 *          Ali Saidi
42 */
43
44#include <sys/mman.h>
45#include <sys/types.h>
46#include <sys/user.h>
47#include <fcntl.h>
48#include <unistd.h>
49#include <zlib.h>
50
51#include <cerrno>
52#include <cstdio>
53#include <iostream>
54#include <string>
55
56#include "arch/isa_traits.hh"
57#include "arch/registers.hh"
58#include "base/intmath.hh"
59#include "base/misc.hh"
60#include "base/random.hh"
61#include "base/types.hh"
62#include "config/full_system.hh"
63#include "config/the_isa.hh"
64#include "debug/LLSC.hh"
65#include "debug/MemoryAccess.hh"
66#include "mem/packet_access.hh"
67#include "mem/physical.hh"
68#include "sim/eventq.hh"
69
70using namespace std;
71using namespace TheISA;
72
73PhysicalMemory::PhysicalMemory(const Params *p)
74    : MemObject(p), pmemAddr(NULL), lat(p->latency), lat_var(p->latency_var),
75      _size(params()->range.size()), _start(params()->range.start)
76{
77    if (size() % TheISA::PageBytes != 0)
78        panic("Memory Size not divisible by page size\n");
79
80    if (params()->null)
81        return;
82
83
84    if (params()->file == "") {
85        int map_flags = MAP_ANON | MAP_PRIVATE;
86        pmemAddr = (uint8_t *)mmap(NULL, size(),
87                                   PROT_READ | PROT_WRITE, map_flags, -1, 0);
88    } else {
89        int map_flags = MAP_PRIVATE;
90        int fd = open(params()->file.c_str(), O_RDONLY);
91        _size = lseek(fd, 0, SEEK_END);
92        lseek(fd, 0, SEEK_SET);
93        pmemAddr = (uint8_t *)mmap(NULL, roundUp(size(), sysconf(_SC_PAGESIZE)),
94                                   PROT_READ | PROT_WRITE, map_flags, fd, 0);
95    }
96
97    if (pmemAddr == (void *)MAP_FAILED) {
98        perror("mmap");
99        if (params()->file == "")
100            fatal("Could not mmap!\n");
101        else
102            fatal("Could not find file: %s\n", params()->file);
103    }
104
105    //If requested, initialize all the memory to 0
106    if (p->zero)
107        memset(pmemAddr, 0, size());
108}
109
110void
111PhysicalMemory::init()
112{
113    if (ports.size() == 0) {
114        fatal("PhysicalMemory object %s is unconnected!", name());
115    }
116
117    for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
118        if (*pi)
119            (*pi)->sendRangeChange();
120    }
121}
122
123PhysicalMemory::~PhysicalMemory()
124{
125    if (pmemAddr)
126        munmap((char*)pmemAddr, size());
127}
128
129unsigned
130PhysicalMemory::deviceBlockSize() const
131{
132    //Can accept anysize request
133    return 0;
134}
135
136Tick
137PhysicalMemory::calculateLatency(PacketPtr pkt)
138{
139    Tick latency = lat;
140    if (lat_var != 0)
141        latency += random_mt.random<Tick>(0, lat_var);
142    return latency;
143}
144
145
146
147// Add load-locked to tracking list.  Should only be called if the
148// operation is a load and the LLSC flag is set.
149void
150PhysicalMemory::trackLoadLocked(PacketPtr pkt)
151{
152    Request *req = pkt->req;
153    Addr paddr = LockedAddr::mask(req->getPaddr());
154
155    // first we check if we already have a locked addr for this
156    // xc.  Since each xc only gets one, we just update the
157    // existing record with the new address.
158    list<LockedAddr>::iterator i;
159
160    for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
161        if (i->matchesContext(req)) {
162            DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
163                    req->contextId(), paddr);
164            i->addr = paddr;
165            return;
166        }
167    }
168
169    // no record for this xc: need to allocate a new one
170    DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
171            req->contextId(), paddr);
172    lockedAddrList.push_front(LockedAddr(req));
173}
174
175
176// Called on *writes* only... both regular stores and
177// store-conditional operations.  Check for conventional stores which
178// conflict with locked addresses, and for success/failure of store
179// conditionals.
180bool
181PhysicalMemory::checkLockedAddrList(PacketPtr pkt)
182{
183    Request *req = pkt->req;
184    Addr paddr = LockedAddr::mask(req->getPaddr());
185    bool isLLSC = pkt->isLLSC();
186
187    // Initialize return value.  Non-conditional stores always
188    // succeed.  Assume conditional stores will fail until proven
189    // otherwise.
190    bool success = !isLLSC;
191
192    // Iterate over list.  Note that there could be multiple matching
193    // records, as more than one context could have done a load locked
194    // to this location.
195    list<LockedAddr>::iterator i = lockedAddrList.begin();
196
197    while (i != lockedAddrList.end()) {
198
199        if (i->addr == paddr) {
200            // we have a matching address
201
202            if (isLLSC && i->matchesContext(req)) {
203                // it's a store conditional, and as far as the memory
204                // system can tell, the requesting context's lock is
205                // still valid.
206                DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
207                        req->contextId(), paddr);
208                success = true;
209            }
210
211            // Get rid of our record of this lock and advance to next
212            DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
213                    i->contextId, paddr);
214            i = lockedAddrList.erase(i);
215        }
216        else {
217            // no match: advance to next record
218            ++i;
219        }
220    }
221
222    if (isLLSC) {
223        req->setExtraData(success ? 1 : 0);
224    }
225
226    return success;
227}
228
229
230#if TRACING_ON
231
232#define CASE(A, T)                                                      \
233  case sizeof(T):                                                       \
234    DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n",   \
235            A, pkt->getSize(), pkt->getAddr(), pkt->get<T>());          \
236  break
237
238
239#define TRACE_PACKET(A)                                                 \
240    do {                                                                \
241        switch (pkt->getSize()) {                                       \
242          CASE(A, uint64_t);                                            \
243          CASE(A, uint32_t);                                            \
244          CASE(A, uint16_t);                                            \
245          CASE(A, uint8_t);                                             \
246          default:                                                      \
247            DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n",    \
248                    A, pkt->getSize(), pkt->getAddr());                 \
249            DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
250        }                                                               \
251    } while (0)
252
253#else
254
255#define TRACE_PACKET(A)
256
257#endif
258
259Tick
260PhysicalMemory::doAtomicAccess(PacketPtr pkt)
261{
262    assert(pkt->getAddr() >= start() &&
263           pkt->getAddr() + pkt->getSize() <= start() + size());
264
265    if (pkt->memInhibitAsserted()) {
266        DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
267                pkt->getAddr());
268        return 0;
269    }
270
271    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
272
273    if (pkt->cmd == MemCmd::SwapReq) {
274        IntReg overwrite_val;
275        bool overwrite_mem;
276        uint64_t condition_val64;
277        uint32_t condition_val32;
278
279        if (!pmemAddr)
280            panic("Swap only works if there is real memory (i.e. null=False)");
281        assert(sizeof(IntReg) >= pkt->getSize());
282
283        overwrite_mem = true;
284        // keep a copy of our possible write value, and copy what is at the
285        // memory address into the packet
286        std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
287        std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
288
289        if (pkt->req->isCondSwap()) {
290            if (pkt->getSize() == sizeof(uint64_t)) {
291                condition_val64 = pkt->req->getExtraData();
292                overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
293                                             sizeof(uint64_t));
294            } else if (pkt->getSize() == sizeof(uint32_t)) {
295                condition_val32 = (uint32_t)pkt->req->getExtraData();
296                overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
297                                             sizeof(uint32_t));
298            } else
299                panic("Invalid size for conditional read/write\n");
300        }
301
302        if (overwrite_mem)
303            std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
304
305        assert(!pkt->req->isInstFetch());
306        TRACE_PACKET("Read/Write");
307    } else if (pkt->isRead()) {
308        assert(!pkt->isWrite());
309        if (pkt->isLLSC()) {
310            trackLoadLocked(pkt);
311        }
312        if (pmemAddr)
313            memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
314        TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
315    } else if (pkt->isWrite()) {
316        if (writeOK(pkt)) {
317            if (pmemAddr)
318                memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
319            assert(!pkt->req->isInstFetch());
320            TRACE_PACKET("Write");
321        }
322    } else if (pkt->isInvalidate()) {
323        //upgrade or invalidate
324        if (pkt->needsResponse()) {
325            pkt->makeAtomicResponse();
326        }
327    } else {
328        panic("unimplemented");
329    }
330
331    if (pkt->needsResponse()) {
332        pkt->makeAtomicResponse();
333    }
334    return calculateLatency(pkt);
335}
336
337
338void
339PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
340{
341    assert(pkt->getAddr() >= start() &&
342           pkt->getAddr() + pkt->getSize() <= start() + size());
343
344
345    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
346
347    if (pkt->isRead()) {
348        if (pmemAddr)
349            memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
350        TRACE_PACKET("Read");
351        pkt->makeAtomicResponse();
352    } else if (pkt->isWrite()) {
353        if (pmemAddr)
354            memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
355        TRACE_PACKET("Write");
356        pkt->makeAtomicResponse();
357    } else if (pkt->isPrint()) {
358        Packet::PrintReqState *prs =
359            dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
360        // Need to call printLabels() explicitly since we're not going
361        // through printObj().
362        prs->printLabels();
363        // Right now we just print the single byte at the specified address.
364        ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
365    } else {
366        panic("PhysicalMemory: unimplemented functional command %s",
367              pkt->cmdString());
368    }
369}
370
371
372Port *
373PhysicalMemory::getPort(const std::string &if_name, int idx)
374{
375    if (if_name != "port") {
376        panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
377    }
378
379    if (idx >= (int)ports.size()) {
380        ports.resize(idx + 1);
381    }
382
383    if (ports[idx] != NULL) {
384        panic("PhysicalMemory::getPort: port %d already assigned", idx);
385    }
386
387    MemoryPort *port =
388        new MemoryPort(csprintf("%s-port%d", name(), idx), this);
389
390    ports[idx] = port;
391    return port;
392}
393
394PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
395                                       PhysicalMemory *_memory)
396    : SimpleTimingPort(_name, _memory), memory(_memory)
397{ }
398
399void
400PhysicalMemory::MemoryPort::recvRangeChange()
401{
402    // memory is a slave and thus should never have to worry about its
403    // neighbours address ranges
404}
405
406AddrRangeList
407PhysicalMemory::MemoryPort::getAddrRanges()
408{
409    return memory->getAddrRanges();
410}
411
412AddrRangeList
413PhysicalMemory::getAddrRanges()
414{
415    AddrRangeList ranges;
416    ranges.push_back(RangeSize(start(), size()));
417    return ranges;
418}
419
420unsigned
421PhysicalMemory::MemoryPort::deviceBlockSize() const
422{
423    return memory->deviceBlockSize();
424}
425
426Tick
427PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
428{
429    return memory->doAtomicAccess(pkt);
430}
431
432void
433PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
434{
435    pkt->pushLabel(memory->name());
436
437    if (!checkFunctional(pkt)) {
438        // Default implementation of SimpleTimingPort::recvFunctional()
439        // calls recvAtomic() and throws away the latency; we can save a
440        // little here by just not calculating the latency.
441        memory->doFunctionalAccess(pkt);
442    }
443
444    pkt->popLabel();
445}
446
447unsigned int
448PhysicalMemory::drain(Event *de)
449{
450    int count = 0;
451    for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
452        count += (*pi)->drain(de);
453    }
454
455    if (count)
456        changeState(Draining);
457    else
458        changeState(Drained);
459    return count;
460}
461
462void
463PhysicalMemory::serialize(ostream &os)
464{
465    if (!pmemAddr)
466        return;
467
468    gzFile compressedMem;
469    string filename = name() + ".physmem";
470
471    SERIALIZE_SCALAR(filename);
472    SERIALIZE_SCALAR(_size);
473
474    // write memory file
475    string thefile = Checkpoint::dir() + "/" + filename.c_str();
476    int fd = creat(thefile.c_str(), 0664);
477    if (fd < 0) {
478        perror("creat");
479        fatal("Can't open physical memory checkpoint file '%s'\n", filename);
480    }
481
482    compressedMem = gzdopen(fd, "wb");
483    if (compressedMem == NULL)
484        fatal("Insufficient memory to allocate compression state for %s\n",
485                filename);
486
487    if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) {
488        fatal("Write failed on physical memory checkpoint file '%s'\n",
489              filename);
490    }
491
492    if (gzclose(compressedMem))
493        fatal("Close failed on physical memory checkpoint file '%s'\n",
494              filename);
495
496    list<LockedAddr>::iterator i = lockedAddrList.begin();
497
498    vector<Addr> lal_addr;
499    vector<int> lal_cid;
500    while (i != lockedAddrList.end()) {
501        lal_addr.push_back(i->addr);
502        lal_cid.push_back(i->contextId);
503        i++;
504    }
505    arrayParamOut(os, "lal_addr", lal_addr);
506    arrayParamOut(os, "lal_cid", lal_cid);
507}
508
509void
510PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
511{
512    if (!pmemAddr)
513        return;
514
515    gzFile compressedMem;
516    long *tempPage;
517    long *pmem_current;
518    uint64_t curSize;
519    uint32_t bytesRead;
520    const uint32_t chunkSize = 16384;
521
522    string filename;
523
524    UNSERIALIZE_SCALAR(filename);
525
526    filename = cp->cptDir + "/" + filename;
527
528    // mmap memoryfile
529    int fd = open(filename.c_str(), O_RDONLY);
530    if (fd < 0) {
531        perror("open");
532        fatal("Can't open physical memory checkpoint file '%s'", filename);
533    }
534
535    compressedMem = gzdopen(fd, "rb");
536    if (compressedMem == NULL)
537        fatal("Insufficient memory to allocate compression state for %s\n",
538                filename);
539
540    // unmap file that was mmapped in the constructor
541    // This is done here to make sure that gzip and open don't muck with our
542    // nice large space of memory before we reallocate it
543    munmap((char*)pmemAddr, size());
544
545    UNSERIALIZE_SCALAR(_size);
546    if (size() > params()->range.size())
547        fatal("Memory size has changed! size %lld, param size %lld\n",
548              size(), params()->range.size());
549
550    pmemAddr = (uint8_t *)mmap(NULL, size(),
551        PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
552
553    if (pmemAddr == (void *)MAP_FAILED) {
554        perror("mmap");
555        fatal("Could not mmap physical memory!\n");
556    }
557
558    curSize = 0;
559    tempPage = (long*)malloc(chunkSize);
560    if (tempPage == NULL)
561        fatal("Unable to malloc memory to read file %s\n", filename);
562
563    /* Only copy bytes that are non-zero, so we don't give the VM system hell */
564    while (curSize < size()) {
565        bytesRead = gzread(compressedMem, tempPage, chunkSize);
566        if (bytesRead == 0)
567            break;
568
569        assert(bytesRead % sizeof(long) == 0);
570
571        for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
572        {
573             if (*(tempPage+x) != 0) {
574                 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
575                 *pmem_current = *(tempPage+x);
576             }
577        }
578        curSize += bytesRead;
579    }
580
581    free(tempPage);
582
583    if (gzclose(compressedMem))
584        fatal("Close failed on physical memory checkpoint file '%s'\n",
585              filename);
586
587    vector<Addr> lal_addr;
588    vector<int> lal_cid;
589    arrayParamIn(cp, section, "lal_addr", lal_addr);
590    arrayParamIn(cp, section, "lal_cid", lal_cid);
591    for(int i = 0; i < lal_addr.size(); i++)
592        lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
593}
594
595PhysicalMemory *
596PhysicalMemoryParams::create()
597{
598    return new PhysicalMemory(this);
599}
600