abstract_mem.cc revision 8719
1/*
2 * Copyright (c) 2010-2011 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 *          Ali Saidi
42 */
43
44#include <sys/mman.h>
45#include <sys/types.h>
46#include <sys/user.h>
47#include <fcntl.h>
48#include <unistd.h>
49#include <zlib.h>
50
51#include <cerrno>
52#include <cstdio>
53#include <iostream>
54#include <string>
55
56#include "arch/isa_traits.hh"
57#include "arch/registers.hh"
58#include "base/intmath.hh"
59#include "base/misc.hh"
60#include "base/random.hh"
61#include "base/types.hh"
62#include "config/full_system.hh"
63#include "config/the_isa.hh"
64#include "debug/LLSC.hh"
65#include "debug/MemoryAccess.hh"
66#include "mem/packet_access.hh"
67#include "mem/physical.hh"
68#include "sim/eventq.hh"
69
70using namespace std;
71using namespace TheISA;
72
73PhysicalMemory::PhysicalMemory(const Params *p)
74    : MemObject(p), pmemAddr(NULL), lat(p->latency), lat_var(p->latency_var),
75      _size(params()->range.size()), _start(params()->range.start)
76{
77    if (size() % TheISA::PageBytes != 0)
78        panic("Memory Size not divisible by page size\n");
79
80    if (params()->null)
81        return;
82
83
84    if (params()->file == "") {
85        int map_flags = MAP_ANON | MAP_PRIVATE;
86        pmemAddr = (uint8_t *)mmap(NULL, size(),
87                                   PROT_READ | PROT_WRITE, map_flags, -1, 0);
88    } else {
89        int map_flags = MAP_PRIVATE;
90        int fd = open(params()->file.c_str(), O_RDONLY);
91        _size = lseek(fd, 0, SEEK_END);
92        lseek(fd, 0, SEEK_SET);
93        pmemAddr = (uint8_t *)mmap(NULL, roundUp(size(), sysconf(_SC_PAGESIZE)),
94                                   PROT_READ | PROT_WRITE, map_flags, fd, 0);
95    }
96
97    if (pmemAddr == (void *)MAP_FAILED) {
98        perror("mmap");
99        if (params()->file == "")
100            fatal("Could not mmap!\n");
101        else
102            fatal("Could not find file: %s\n", params()->file);
103    }
104
105    //If requested, initialize all the memory to 0
106    if (p->zero)
107        memset(pmemAddr, 0, size());
108}
109
110void
111PhysicalMemory::init()
112{
113    if (ports.size() == 0) {
114        fatal("PhysicalMemory object %s is unconnected!", name());
115    }
116
117    for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
118        if (*pi)
119            (*pi)->sendRangeChange();
120    }
121}
122
123PhysicalMemory::~PhysicalMemory()
124{
125    if (pmemAddr)
126        munmap((char*)pmemAddr, size());
127}
128
129void
130PhysicalMemory::regStats()
131{
132    using namespace Stats;
133
134    bytesRead
135        .name(name() + ".bytes_read")
136        .desc("Number of bytes read from this memory")
137        ;
138    bytesInstRead
139        .name(name() + ".bytes_inst_read")
140        .desc("Number of instructions bytes read from this memory")
141        ;
142    bytesWritten
143        .name(name() + ".bytes_written")
144        .desc("Number of bytes written to this memory")
145        ;
146    numReads
147        .name(name() + ".num_reads")
148        .desc("Number of read requests responded to by this memory")
149        ;
150    numWrites
151        .name(name() + ".num_writes")
152        .desc("Number of write requests responded to by this memory")
153        ;
154    numOther
155        .name(name() + ".num_other")
156        .desc("Number of other requests responded to by this memory")
157        ;
158    bwRead
159        .name(name() + ".bw_read")
160        .desc("Total read bandwidth from this memory (bytes/s)")
161        .precision(0)
162        .prereq(bytesRead)
163        ;
164    bwInstRead
165        .name(name() + ".bw_inst_read")
166        .desc("Instruction read bandwidth from this memory (bytes/s)")
167        .precision(0)
168        .prereq(bytesInstRead)
169        ;
170    bwWrite
171        .name(name() + ".bw_write")
172        .desc("Write bandwidth from this memory (bytes/s)")
173        .precision(0)
174        .prereq(bytesWritten)
175        ;
176    bwTotal
177        .name(name() + ".bw_total")
178        .desc("Total bandwidth to/from this memory (bytes/s)")
179        .precision(0)
180        .prereq(bwTotal)
181        ;
182    bwRead = bytesRead / simSeconds;
183    bwInstRead = bytesInstRead / simSeconds;
184    bwWrite = bytesWritten / simSeconds;
185    bwTotal = (bytesRead + bytesWritten) / simSeconds;
186}
187
188unsigned
189PhysicalMemory::deviceBlockSize() const
190{
191    //Can accept anysize request
192    return 0;
193}
194
195Tick
196PhysicalMemory::calculateLatency(PacketPtr pkt)
197{
198    Tick latency = lat;
199    if (lat_var != 0)
200        latency += random_mt.random<Tick>(0, lat_var);
201    return latency;
202}
203
204
205
206// Add load-locked to tracking list.  Should only be called if the
207// operation is a load and the LLSC flag is set.
208void
209PhysicalMemory::trackLoadLocked(PacketPtr pkt)
210{
211    Request *req = pkt->req;
212    Addr paddr = LockedAddr::mask(req->getPaddr());
213
214    // first we check if we already have a locked addr for this
215    // xc.  Since each xc only gets one, we just update the
216    // existing record with the new address.
217    list<LockedAddr>::iterator i;
218
219    for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
220        if (i->matchesContext(req)) {
221            DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
222                    req->contextId(), paddr);
223            i->addr = paddr;
224            return;
225        }
226    }
227
228    // no record for this xc: need to allocate a new one
229    DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
230            req->contextId(), paddr);
231    lockedAddrList.push_front(LockedAddr(req));
232}
233
234
235// Called on *writes* only... both regular stores and
236// store-conditional operations.  Check for conventional stores which
237// conflict with locked addresses, and for success/failure of store
238// conditionals.
239bool
240PhysicalMemory::checkLockedAddrList(PacketPtr pkt)
241{
242    Request *req = pkt->req;
243    Addr paddr = LockedAddr::mask(req->getPaddr());
244    bool isLLSC = pkt->isLLSC();
245
246    // Initialize return value.  Non-conditional stores always
247    // succeed.  Assume conditional stores will fail until proven
248    // otherwise.
249    bool success = !isLLSC;
250
251    // Iterate over list.  Note that there could be multiple matching
252    // records, as more than one context could have done a load locked
253    // to this location.
254    list<LockedAddr>::iterator i = lockedAddrList.begin();
255
256    while (i != lockedAddrList.end()) {
257
258        if (i->addr == paddr) {
259            // we have a matching address
260
261            if (isLLSC && i->matchesContext(req)) {
262                // it's a store conditional, and as far as the memory
263                // system can tell, the requesting context's lock is
264                // still valid.
265                DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
266                        req->contextId(), paddr);
267                success = true;
268            }
269
270            // Get rid of our record of this lock and advance to next
271            DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
272                    i->contextId, paddr);
273            i = lockedAddrList.erase(i);
274        }
275        else {
276            // no match: advance to next record
277            ++i;
278        }
279    }
280
281    if (isLLSC) {
282        req->setExtraData(success ? 1 : 0);
283    }
284
285    return success;
286}
287
288
289#if TRACING_ON
290
291#define CASE(A, T)                                                      \
292  case sizeof(T):                                                       \
293    DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n",   \
294            A, pkt->getSize(), pkt->getAddr(), pkt->get<T>());          \
295  break
296
297
298#define TRACE_PACKET(A)                                                 \
299    do {                                                                \
300        switch (pkt->getSize()) {                                       \
301          CASE(A, uint64_t);                                            \
302          CASE(A, uint32_t);                                            \
303          CASE(A, uint16_t);                                            \
304          CASE(A, uint8_t);                                             \
305          default:                                                      \
306            DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n",    \
307                    A, pkt->getSize(), pkt->getAddr());                 \
308            DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
309        }                                                               \
310    } while (0)
311
312#else
313
314#define TRACE_PACKET(A)
315
316#endif
317
318Tick
319PhysicalMemory::doAtomicAccess(PacketPtr pkt)
320{
321    assert(pkt->getAddr() >= start() &&
322           pkt->getAddr() + pkt->getSize() <= start() + size());
323
324    if (pkt->memInhibitAsserted()) {
325        DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
326                pkt->getAddr());
327        return 0;
328    }
329
330    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
331
332    if (pkt->cmd == MemCmd::SwapReq) {
333        IntReg overwrite_val;
334        bool overwrite_mem;
335        uint64_t condition_val64;
336        uint32_t condition_val32;
337
338        if (!pmemAddr)
339            panic("Swap only works if there is real memory (i.e. null=False)");
340        assert(sizeof(IntReg) >= pkt->getSize());
341
342        overwrite_mem = true;
343        // keep a copy of our possible write value, and copy what is at the
344        // memory address into the packet
345        std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
346        std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
347
348        if (pkt->req->isCondSwap()) {
349            if (pkt->getSize() == sizeof(uint64_t)) {
350                condition_val64 = pkt->req->getExtraData();
351                overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
352                                             sizeof(uint64_t));
353            } else if (pkt->getSize() == sizeof(uint32_t)) {
354                condition_val32 = (uint32_t)pkt->req->getExtraData();
355                overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
356                                             sizeof(uint32_t));
357            } else
358                panic("Invalid size for conditional read/write\n");
359        }
360
361        if (overwrite_mem)
362            std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
363
364        assert(!pkt->req->isInstFetch());
365        TRACE_PACKET("Read/Write");
366        numOther++;
367    } else if (pkt->isRead()) {
368        assert(!pkt->isWrite());
369        if (pkt->isLLSC()) {
370            trackLoadLocked(pkt);
371        }
372        if (pmemAddr)
373            memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
374        TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
375        numReads++;
376        bytesRead += pkt->getSize();
377        if (pkt->req->isInstFetch())
378            bytesInstRead += pkt->getSize();
379    } else if (pkt->isWrite()) {
380        if (writeOK(pkt)) {
381            if (pmemAddr)
382                memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
383            assert(!pkt->req->isInstFetch());
384            TRACE_PACKET("Write");
385            numWrites++;
386            bytesWritten += pkt->getSize();
387        }
388    } else if (pkt->isInvalidate()) {
389        //upgrade or invalidate
390        if (pkt->needsResponse()) {
391            pkt->makeAtomicResponse();
392        }
393    } else {
394        panic("unimplemented");
395    }
396
397    if (pkt->needsResponse()) {
398        pkt->makeAtomicResponse();
399    }
400    return calculateLatency(pkt);
401}
402
403
404void
405PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
406{
407    assert(pkt->getAddr() >= start() &&
408           pkt->getAddr() + pkt->getSize() <= start() + size());
409
410
411    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
412
413    if (pkt->isRead()) {
414        if (pmemAddr)
415            memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
416        TRACE_PACKET("Read");
417        pkt->makeAtomicResponse();
418    } else if (pkt->isWrite()) {
419        if (pmemAddr)
420            memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
421        TRACE_PACKET("Write");
422        pkt->makeAtomicResponse();
423    } else if (pkt->isPrint()) {
424        Packet::PrintReqState *prs =
425            dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
426        // Need to call printLabels() explicitly since we're not going
427        // through printObj().
428        prs->printLabels();
429        // Right now we just print the single byte at the specified address.
430        ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
431    } else {
432        panic("PhysicalMemory: unimplemented functional command %s",
433              pkt->cmdString());
434    }
435}
436
437
438Port *
439PhysicalMemory::getPort(const std::string &if_name, int idx)
440{
441    if (if_name != "port") {
442        panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
443    }
444
445    if (idx >= (int)ports.size()) {
446        ports.resize(idx + 1);
447    }
448
449    if (ports[idx] != NULL) {
450        panic("PhysicalMemory::getPort: port %d already assigned", idx);
451    }
452
453    MemoryPort *port =
454        new MemoryPort(csprintf("%s-port%d", name(), idx), this);
455
456    ports[idx] = port;
457    return port;
458}
459
460PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
461                                       PhysicalMemory *_memory)
462    : SimpleTimingPort(_name, _memory), memory(_memory)
463{ }
464
465void
466PhysicalMemory::MemoryPort::recvRangeChange()
467{
468    // memory is a slave and thus should never have to worry about its
469    // neighbours address ranges
470}
471
472AddrRangeList
473PhysicalMemory::MemoryPort::getAddrRanges()
474{
475    return memory->getAddrRanges();
476}
477
478AddrRangeList
479PhysicalMemory::getAddrRanges()
480{
481    AddrRangeList ranges;
482    ranges.push_back(RangeSize(start(), size()));
483    return ranges;
484}
485
486unsigned
487PhysicalMemory::MemoryPort::deviceBlockSize() const
488{
489    return memory->deviceBlockSize();
490}
491
492Tick
493PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
494{
495    return memory->doAtomicAccess(pkt);
496}
497
498void
499PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
500{
501    pkt->pushLabel(memory->name());
502
503    if (!checkFunctional(pkt)) {
504        // Default implementation of SimpleTimingPort::recvFunctional()
505        // calls recvAtomic() and throws away the latency; we can save a
506        // little here by just not calculating the latency.
507        memory->doFunctionalAccess(pkt);
508    }
509
510    pkt->popLabel();
511}
512
513unsigned int
514PhysicalMemory::drain(Event *de)
515{
516    int count = 0;
517    for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
518        count += (*pi)->drain(de);
519    }
520
521    if (count)
522        changeState(Draining);
523    else
524        changeState(Drained);
525    return count;
526}
527
528void
529PhysicalMemory::serialize(ostream &os)
530{
531    if (!pmemAddr)
532        return;
533
534    gzFile compressedMem;
535    string filename = name() + ".physmem";
536
537    SERIALIZE_SCALAR(filename);
538    SERIALIZE_SCALAR(_size);
539
540    // write memory file
541    string thefile = Checkpoint::dir() + "/" + filename.c_str();
542    int fd = creat(thefile.c_str(), 0664);
543    if (fd < 0) {
544        perror("creat");
545        fatal("Can't open physical memory checkpoint file '%s'\n", filename);
546    }
547
548    compressedMem = gzdopen(fd, "wb");
549    if (compressedMem == NULL)
550        fatal("Insufficient memory to allocate compression state for %s\n",
551                filename);
552
553    if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) {
554        fatal("Write failed on physical memory checkpoint file '%s'\n",
555              filename);
556    }
557
558    if (gzclose(compressedMem))
559        fatal("Close failed on physical memory checkpoint file '%s'\n",
560              filename);
561
562    list<LockedAddr>::iterator i = lockedAddrList.begin();
563
564    vector<Addr> lal_addr;
565    vector<int> lal_cid;
566    while (i != lockedAddrList.end()) {
567        lal_addr.push_back(i->addr);
568        lal_cid.push_back(i->contextId);
569        i++;
570    }
571    arrayParamOut(os, "lal_addr", lal_addr);
572    arrayParamOut(os, "lal_cid", lal_cid);
573}
574
575void
576PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
577{
578    if (!pmemAddr)
579        return;
580
581    gzFile compressedMem;
582    long *tempPage;
583    long *pmem_current;
584    uint64_t curSize;
585    uint32_t bytesRead;
586    const uint32_t chunkSize = 16384;
587
588    string filename;
589
590    UNSERIALIZE_SCALAR(filename);
591
592    filename = cp->cptDir + "/" + filename;
593
594    // mmap memoryfile
595    int fd = open(filename.c_str(), O_RDONLY);
596    if (fd < 0) {
597        perror("open");
598        fatal("Can't open physical memory checkpoint file '%s'", filename);
599    }
600
601    compressedMem = gzdopen(fd, "rb");
602    if (compressedMem == NULL)
603        fatal("Insufficient memory to allocate compression state for %s\n",
604                filename);
605
606    // unmap file that was mmapped in the constructor
607    // This is done here to make sure that gzip and open don't muck with our
608    // nice large space of memory before we reallocate it
609    munmap((char*)pmemAddr, size());
610
611    UNSERIALIZE_SCALAR(_size);
612    if (size() > params()->range.size())
613        fatal("Memory size has changed! size %lld, param size %lld\n",
614              size(), params()->range.size());
615
616    pmemAddr = (uint8_t *)mmap(NULL, size(),
617        PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
618
619    if (pmemAddr == (void *)MAP_FAILED) {
620        perror("mmap");
621        fatal("Could not mmap physical memory!\n");
622    }
623
624    curSize = 0;
625    tempPage = (long*)malloc(chunkSize);
626    if (tempPage == NULL)
627        fatal("Unable to malloc memory to read file %s\n", filename);
628
629    /* Only copy bytes that are non-zero, so we don't give the VM system hell */
630    while (curSize < size()) {
631        bytesRead = gzread(compressedMem, tempPage, chunkSize);
632        if (bytesRead == 0)
633            break;
634
635        assert(bytesRead % sizeof(long) == 0);
636
637        for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
638        {
639             if (*(tempPage+x) != 0) {
640                 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
641                 *pmem_current = *(tempPage+x);
642             }
643        }
644        curSize += bytesRead;
645    }
646
647    free(tempPage);
648
649    if (gzclose(compressedMem))
650        fatal("Close failed on physical memory checkpoint file '%s'\n",
651              filename);
652
653    vector<Addr> lal_addr;
654    vector<int> lal_cid;
655    arrayParamIn(cp, section, "lal_addr", lal_addr);
656    arrayParamIn(cp, section, "lal_cid", lal_cid);
657    for(int i = 0; i < lal_addr.size(); i++)
658        lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
659}
660
661PhysicalMemory *
662PhysicalMemoryParams::create()
663{
664    return new PhysicalMemory(this);
665}
666