physical.cc revision 8799:dac1e33e07b0
1/*
2 * Copyright (c) 2010-2011 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 *          Ali Saidi
42 */
43
44#include <sys/mman.h>
45#include <sys/types.h>
46#include <sys/user.h>
47#include <fcntl.h>
48#include <unistd.h>
49#include <zlib.h>
50
51#include <cerrno>
52#include <cstdio>
53#include <iostream>
54#include <string>
55
56#include "arch/isa_traits.hh"
57#include "arch/registers.hh"
58#include "base/intmath.hh"
59#include "base/misc.hh"
60#include "base/random.hh"
61#include "base/types.hh"
62#include "config/the_isa.hh"
63#include "debug/LLSC.hh"
64#include "debug/MemoryAccess.hh"
65#include "mem/packet_access.hh"
66#include "mem/physical.hh"
67#include "sim/eventq.hh"
68
69using namespace std;
70using namespace TheISA;
71
72PhysicalMemory::PhysicalMemory(const Params *p)
73    : MemObject(p), pmemAddr(NULL), lat(p->latency), lat_var(p->latency_var),
74      _size(params()->range.size()), _start(params()->range.start)
75{
76    if (size() % TheISA::PageBytes != 0)
77        panic("Memory Size not divisible by page size\n");
78
79    if (params()->null)
80        return;
81
82
83    if (params()->file == "") {
84        int map_flags = MAP_ANON | MAP_PRIVATE;
85        pmemAddr = (uint8_t *)mmap(NULL, size(),
86                                   PROT_READ | PROT_WRITE, map_flags, -1, 0);
87    } else {
88        int map_flags = MAP_PRIVATE;
89        int fd = open(params()->file.c_str(), O_RDONLY);
90        _size = lseek(fd, 0, SEEK_END);
91        lseek(fd, 0, SEEK_SET);
92        pmemAddr = (uint8_t *)mmap(NULL, roundUp(size(), sysconf(_SC_PAGESIZE)),
93                                   PROT_READ | PROT_WRITE, map_flags, fd, 0);
94    }
95
96    if (pmemAddr == (void *)MAP_FAILED) {
97        perror("mmap");
98        if (params()->file == "")
99            fatal("Could not mmap!\n");
100        else
101            fatal("Could not find file: %s\n", params()->file);
102    }
103
104    //If requested, initialize all the memory to 0
105    if (p->zero)
106        memset(pmemAddr, 0, size());
107}
108
109void
110PhysicalMemory::init()
111{
112    if (ports.size() == 0) {
113        fatal("PhysicalMemory object %s is unconnected!", name());
114    }
115
116    for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
117        if (*pi)
118            (*pi)->sendRangeChange();
119    }
120}
121
122PhysicalMemory::~PhysicalMemory()
123{
124    if (pmemAddr)
125        munmap((char*)pmemAddr, size());
126}
127
128void
129PhysicalMemory::regStats()
130{
131    using namespace Stats;
132
133    bytesRead
134        .name(name() + ".bytes_read")
135        .desc("Number of bytes read from this memory")
136        ;
137    bytesInstRead
138        .name(name() + ".bytes_inst_read")
139        .desc("Number of instructions bytes read from this memory")
140        ;
141    bytesWritten
142        .name(name() + ".bytes_written")
143        .desc("Number of bytes written to this memory")
144        ;
145    numReads
146        .name(name() + ".num_reads")
147        .desc("Number of read requests responded to by this memory")
148        ;
149    numWrites
150        .name(name() + ".num_writes")
151        .desc("Number of write requests responded to by this memory")
152        ;
153    numOther
154        .name(name() + ".num_other")
155        .desc("Number of other requests responded to by this memory")
156        ;
157    bwRead
158        .name(name() + ".bw_read")
159        .desc("Total read bandwidth from this memory (bytes/s)")
160        .precision(0)
161        .prereq(bytesRead)
162        ;
163    bwInstRead
164        .name(name() + ".bw_inst_read")
165        .desc("Instruction read bandwidth from this memory (bytes/s)")
166        .precision(0)
167        .prereq(bytesInstRead)
168        ;
169    bwWrite
170        .name(name() + ".bw_write")
171        .desc("Write bandwidth from this memory (bytes/s)")
172        .precision(0)
173        .prereq(bytesWritten)
174        ;
175    bwTotal
176        .name(name() + ".bw_total")
177        .desc("Total bandwidth to/from this memory (bytes/s)")
178        .precision(0)
179        .prereq(bwTotal)
180        ;
181    bwRead = bytesRead / simSeconds;
182    bwInstRead = bytesInstRead / simSeconds;
183    bwWrite = bytesWritten / simSeconds;
184    bwTotal = (bytesRead + bytesWritten) / simSeconds;
185}
186
187unsigned
188PhysicalMemory::deviceBlockSize() const
189{
190    //Can accept anysize request
191    return 0;
192}
193
194Tick
195PhysicalMemory::calculateLatency(PacketPtr pkt)
196{
197    Tick latency = lat;
198    if (lat_var != 0)
199        latency += random_mt.random<Tick>(0, lat_var);
200    return latency;
201}
202
203
204
205// Add load-locked to tracking list.  Should only be called if the
206// operation is a load and the LLSC flag is set.
207void
208PhysicalMemory::trackLoadLocked(PacketPtr pkt)
209{
210    Request *req = pkt->req;
211    Addr paddr = LockedAddr::mask(req->getPaddr());
212
213    // first we check if we already have a locked addr for this
214    // xc.  Since each xc only gets one, we just update the
215    // existing record with the new address.
216    list<LockedAddr>::iterator i;
217
218    for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
219        if (i->matchesContext(req)) {
220            DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
221                    req->contextId(), paddr);
222            i->addr = paddr;
223            return;
224        }
225    }
226
227    // no record for this xc: need to allocate a new one
228    DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
229            req->contextId(), paddr);
230    lockedAddrList.push_front(LockedAddr(req));
231}
232
233
234// Called on *writes* only... both regular stores and
235// store-conditional operations.  Check for conventional stores which
236// conflict with locked addresses, and for success/failure of store
237// conditionals.
238bool
239PhysicalMemory::checkLockedAddrList(PacketPtr pkt)
240{
241    Request *req = pkt->req;
242    Addr paddr = LockedAddr::mask(req->getPaddr());
243    bool isLLSC = pkt->isLLSC();
244
245    // Initialize return value.  Non-conditional stores always
246    // succeed.  Assume conditional stores will fail until proven
247    // otherwise.
248    bool success = !isLLSC;
249
250    // Iterate over list.  Note that there could be multiple matching
251    // records, as more than one context could have done a load locked
252    // to this location.
253    list<LockedAddr>::iterator i = lockedAddrList.begin();
254
255    while (i != lockedAddrList.end()) {
256
257        if (i->addr == paddr) {
258            // we have a matching address
259
260            if (isLLSC && i->matchesContext(req)) {
261                // it's a store conditional, and as far as the memory
262                // system can tell, the requesting context's lock is
263                // still valid.
264                DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
265                        req->contextId(), paddr);
266                success = true;
267            }
268
269            // Get rid of our record of this lock and advance to next
270            DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
271                    i->contextId, paddr);
272            i = lockedAddrList.erase(i);
273        }
274        else {
275            // no match: advance to next record
276            ++i;
277        }
278    }
279
280    if (isLLSC) {
281        req->setExtraData(success ? 1 : 0);
282    }
283
284    return success;
285}
286
287
288#if TRACING_ON
289
290#define CASE(A, T)                                                      \
291  case sizeof(T):                                                       \
292    DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n",   \
293            A, pkt->getSize(), pkt->getAddr(), pkt->get<T>());          \
294  break
295
296
297#define TRACE_PACKET(A)                                                 \
298    do {                                                                \
299        switch (pkt->getSize()) {                                       \
300          CASE(A, uint64_t);                                            \
301          CASE(A, uint32_t);                                            \
302          CASE(A, uint16_t);                                            \
303          CASE(A, uint8_t);                                             \
304          default:                                                      \
305            DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n",    \
306                    A, pkt->getSize(), pkt->getAddr());                 \
307            DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
308        }                                                               \
309    } while (0)
310
311#else
312
313#define TRACE_PACKET(A)
314
315#endif
316
317Tick
318PhysicalMemory::doAtomicAccess(PacketPtr pkt)
319{
320    assert(pkt->getAddr() >= start() &&
321           pkt->getAddr() + pkt->getSize() <= start() + size());
322
323    if (pkt->memInhibitAsserted()) {
324        DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
325                pkt->getAddr());
326        return 0;
327    }
328
329    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
330
331    if (pkt->cmd == MemCmd::SwapReq) {
332        IntReg overwrite_val;
333        bool overwrite_mem;
334        uint64_t condition_val64;
335        uint32_t condition_val32;
336
337        if (!pmemAddr)
338            panic("Swap only works if there is real memory (i.e. null=False)");
339        assert(sizeof(IntReg) >= pkt->getSize());
340
341        overwrite_mem = true;
342        // keep a copy of our possible write value, and copy what is at the
343        // memory address into the packet
344        std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
345        std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
346
347        if (pkt->req->isCondSwap()) {
348            if (pkt->getSize() == sizeof(uint64_t)) {
349                condition_val64 = pkt->req->getExtraData();
350                overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
351                                             sizeof(uint64_t));
352            } else if (pkt->getSize() == sizeof(uint32_t)) {
353                condition_val32 = (uint32_t)pkt->req->getExtraData();
354                overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
355                                             sizeof(uint32_t));
356            } else
357                panic("Invalid size for conditional read/write\n");
358        }
359
360        if (overwrite_mem)
361            std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
362
363        assert(!pkt->req->isInstFetch());
364        TRACE_PACKET("Read/Write");
365        numOther++;
366    } else if (pkt->isRead()) {
367        assert(!pkt->isWrite());
368        if (pkt->isLLSC()) {
369            trackLoadLocked(pkt);
370        }
371        if (pmemAddr)
372            memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
373        TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
374        numReads++;
375        bytesRead += pkt->getSize();
376        if (pkt->req->isInstFetch())
377            bytesInstRead += pkt->getSize();
378    } else if (pkt->isWrite()) {
379        if (writeOK(pkt)) {
380            if (pmemAddr)
381                memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
382            assert(!pkt->req->isInstFetch());
383            TRACE_PACKET("Write");
384            numWrites++;
385            bytesWritten += pkt->getSize();
386        }
387    } else if (pkt->isInvalidate()) {
388        //upgrade or invalidate
389        if (pkt->needsResponse()) {
390            pkt->makeAtomicResponse();
391        }
392    } else {
393        panic("unimplemented");
394    }
395
396    if (pkt->needsResponse()) {
397        pkt->makeAtomicResponse();
398    }
399    return calculateLatency(pkt);
400}
401
402
403void
404PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
405{
406    assert(pkt->getAddr() >= start() &&
407           pkt->getAddr() + pkt->getSize() <= start() + size());
408
409
410    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
411
412    if (pkt->isRead()) {
413        if (pmemAddr)
414            memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
415        TRACE_PACKET("Read");
416        pkt->makeAtomicResponse();
417    } else if (pkt->isWrite()) {
418        if (pmemAddr)
419            memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
420        TRACE_PACKET("Write");
421        pkt->makeAtomicResponse();
422    } else if (pkt->isPrint()) {
423        Packet::PrintReqState *prs =
424            dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
425        // Need to call printLabels() explicitly since we're not going
426        // through printObj().
427        prs->printLabels();
428        // Right now we just print the single byte at the specified address.
429        ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
430    } else {
431        panic("PhysicalMemory: unimplemented functional command %s",
432              pkt->cmdString());
433    }
434}
435
436
437Port *
438PhysicalMemory::getPort(const std::string &if_name, int idx)
439{
440    if (if_name != "port") {
441        panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
442    }
443
444    if (idx >= (int)ports.size()) {
445        ports.resize(idx + 1);
446    }
447
448    if (ports[idx] != NULL) {
449        panic("PhysicalMemory::getPort: port %d already assigned", idx);
450    }
451
452    MemoryPort *port =
453        new MemoryPort(csprintf("%s-port%d", name(), idx), this);
454
455    ports[idx] = port;
456    return port;
457}
458
459PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
460                                       PhysicalMemory *_memory)
461    : SimpleTimingPort(_name, _memory), memory(_memory)
462{ }
463
464void
465PhysicalMemory::MemoryPort::recvRangeChange()
466{
467    // memory is a slave and thus should never have to worry about its
468    // neighbours address ranges
469}
470
471AddrRangeList
472PhysicalMemory::MemoryPort::getAddrRanges()
473{
474    return memory->getAddrRanges();
475}
476
477AddrRangeList
478PhysicalMemory::getAddrRanges()
479{
480    AddrRangeList ranges;
481    ranges.push_back(RangeSize(start(), size()));
482    return ranges;
483}
484
485unsigned
486PhysicalMemory::MemoryPort::deviceBlockSize() const
487{
488    return memory->deviceBlockSize();
489}
490
491Tick
492PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
493{
494    return memory->doAtomicAccess(pkt);
495}
496
497void
498PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
499{
500    pkt->pushLabel(memory->name());
501
502    if (!checkFunctional(pkt)) {
503        // Default implementation of SimpleTimingPort::recvFunctional()
504        // calls recvAtomic() and throws away the latency; we can save a
505        // little here by just not calculating the latency.
506        memory->doFunctionalAccess(pkt);
507    }
508
509    pkt->popLabel();
510}
511
512unsigned int
513PhysicalMemory::drain(Event *de)
514{
515    int count = 0;
516    for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
517        count += (*pi)->drain(de);
518    }
519
520    if (count)
521        changeState(Draining);
522    else
523        changeState(Drained);
524    return count;
525}
526
527void
528PhysicalMemory::serialize(ostream &os)
529{
530    if (!pmemAddr)
531        return;
532
533    gzFile compressedMem;
534    string filename = name() + ".physmem";
535
536    SERIALIZE_SCALAR(filename);
537    SERIALIZE_SCALAR(_size);
538
539    // write memory file
540    string thefile = Checkpoint::dir() + "/" + filename.c_str();
541    int fd = creat(thefile.c_str(), 0664);
542    if (fd < 0) {
543        perror("creat");
544        fatal("Can't open physical memory checkpoint file '%s'\n", filename);
545    }
546
547    compressedMem = gzdopen(fd, "wb");
548    if (compressedMem == NULL)
549        fatal("Insufficient memory to allocate compression state for %s\n",
550                filename);
551
552    if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) {
553        fatal("Write failed on physical memory checkpoint file '%s'\n",
554              filename);
555    }
556
557    if (gzclose(compressedMem))
558        fatal("Close failed on physical memory checkpoint file '%s'\n",
559              filename);
560
561    list<LockedAddr>::iterator i = lockedAddrList.begin();
562
563    vector<Addr> lal_addr;
564    vector<int> lal_cid;
565    while (i != lockedAddrList.end()) {
566        lal_addr.push_back(i->addr);
567        lal_cid.push_back(i->contextId);
568        i++;
569    }
570    arrayParamOut(os, "lal_addr", lal_addr);
571    arrayParamOut(os, "lal_cid", lal_cid);
572}
573
574void
575PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
576{
577    if (!pmemAddr)
578        return;
579
580    gzFile compressedMem;
581    long *tempPage;
582    long *pmem_current;
583    uint64_t curSize;
584    uint32_t bytesRead;
585    const uint32_t chunkSize = 16384;
586
587    string filename;
588
589    UNSERIALIZE_SCALAR(filename);
590
591    filename = cp->cptDir + "/" + filename;
592
593    // mmap memoryfile
594    int fd = open(filename.c_str(), O_RDONLY);
595    if (fd < 0) {
596        perror("open");
597        fatal("Can't open physical memory checkpoint file '%s'", filename);
598    }
599
600    compressedMem = gzdopen(fd, "rb");
601    if (compressedMem == NULL)
602        fatal("Insufficient memory to allocate compression state for %s\n",
603                filename);
604
605    // unmap file that was mmapped in the constructor
606    // This is done here to make sure that gzip and open don't muck with our
607    // nice large space of memory before we reallocate it
608    munmap((char*)pmemAddr, size());
609
610    UNSERIALIZE_SCALAR(_size);
611    if (size() > params()->range.size())
612        fatal("Memory size has changed! size %lld, param size %lld\n",
613              size(), params()->range.size());
614
615    pmemAddr = (uint8_t *)mmap(NULL, size(),
616        PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
617
618    if (pmemAddr == (void *)MAP_FAILED) {
619        perror("mmap");
620        fatal("Could not mmap physical memory!\n");
621    }
622
623    curSize = 0;
624    tempPage = (long*)malloc(chunkSize);
625    if (tempPage == NULL)
626        fatal("Unable to malloc memory to read file %s\n", filename);
627
628    /* Only copy bytes that are non-zero, so we don't give the VM system hell */
629    while (curSize < size()) {
630        bytesRead = gzread(compressedMem, tempPage, chunkSize);
631        if (bytesRead == 0)
632            break;
633
634        assert(bytesRead % sizeof(long) == 0);
635
636        for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
637        {
638             if (*(tempPage+x) != 0) {
639                 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
640                 *pmem_current = *(tempPage+x);
641             }
642        }
643        curSize += bytesRead;
644    }
645
646    free(tempPage);
647
648    if (gzclose(compressedMem))
649        fatal("Close failed on physical memory checkpoint file '%s'\n",
650              filename);
651
652    vector<Addr> lal_addr;
653    vector<int> lal_cid;
654    arrayParamIn(cp, section, "lal_addr", lal_addr);
655    arrayParamIn(cp, section, "lal_cid", lal_cid);
656    for(int i = 0; i < lal_addr.size(); i++)
657        lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
658}
659
660PhysicalMemory *
661PhysicalMemoryParams::create()
662{
663    return new PhysicalMemory(this);
664}
665