physical.cc revision 9356
112SN/A/*
21762SN/A * Copyright (c) 2012 ARM Limited
312SN/A * All rights reserved
412SN/A *
512SN/A * The license below extends only to copyright in the software and shall
612SN/A * not be construed as granting a license to any other intellectual
712SN/A * property including but not limited to intellectual property relating
812SN/A * to a hardware implementation of the functionality of the software
912SN/A * licensed hereunder.  You may use the software subject to the license
1012SN/A * terms below provided that you ensure that this notice is replicated
1112SN/A * unmodified and in its entirety in all distributions of the software,
1212SN/A * modified or unmodified, in source code or in binary form.
1312SN/A *
1412SN/A * Redistribution and use in source and binary forms, with or without
1512SN/A * modification, are permitted provided that the following conditions are
1612SN/A * met: redistributions of source code must retain the above copyright
1712SN/A * notice, this list of conditions and the following disclaimer;
1812SN/A * redistributions in binary form must reproduce the above copyright
1912SN/A * notice, this list of conditions and the following disclaimer in the
2012SN/A * documentation and/or other materials provided with the distribution;
2112SN/A * neither the name of the copyright holders nor the names of its
2212SN/A * contributors may be used to endorse or promote products derived from
2312SN/A * this software without specific prior written permission.
2412SN/A *
2512SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2612SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
272665Ssaidi@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
282665Ssaidi@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
292665Ssaidi@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3012SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3112SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
325616Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3312SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3412SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
352634Sstever@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36468SN/A *
3756SN/A * Authors: Andreas Hansson
384484Sbinkertn@umich.edu */
392439SN/A
405543Ssaidi@eecs.umich.edu#include <sys/mman.h>
412423SN/A#include <sys/types.h>
422423SN/A#include <sys/user.h>
4312SN/A#include <fcntl.h>
4412SN/A#include <unistd.h>
4512SN/A#include <zlib.h>
4612SN/A
4712SN/A#include <cerrno>
48443SN/A#include <climits>
49443SN/A#include <cstdio>
502207SN/A#include <iostream>
512207SN/A#include <string>
52443SN/A
53468SN/A#include "base/trace.hh"
541708SN/A#include "debug/BusAddrRanges.hh"
551708SN/A#include "debug/Checkpoint.hh"
56443SN/A#include "mem/abstract_mem.hh"
57468SN/A#include "mem/physical.hh"
58443SN/A
59468SN/Ausing namespace std;
60443SN/A
61443SN/APhysicalMemory::PhysicalMemory(const string& _name,
62468SN/A                               const vector<AbstractMemory*>& _memories) :
63468SN/A    _name(_name), size(0)
64443SN/A{
65443SN/A    // add the memories from the system to the address map as
66443SN/A    // appropriate
672476SN/A    for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
682207SN/A         m != _memories.end(); ++m) {
692207SN/A        // only add the memory if it is part of the global address map
702207SN/A        if ((*m)->isInAddrMap()) {
712207SN/A            memories.push_back(*m);
722207SN/A
734111Sgblack@eecs.umich.edu            // calculate the total size once and for all
744111Sgblack@eecs.umich.edu            size += (*m)->size();
752620SN/A
764111Sgblack@eecs.umich.edu            // add the range to our interval tree and make sure it does not
774111Sgblack@eecs.umich.edu            // intersect an existing range
784111Sgblack@eecs.umich.edu            if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
794111Sgblack@eecs.umich.edu                fatal("Memory address range for %s is overlapping\n",
804111Sgblack@eecs.umich.edu                      (*m)->name());
812207SN/A        } else {
822207SN/A            DPRINTF(BusAddrRanges,
835383Sgblack@eecs.umich.edu                    "Skipping memory %s that is not in global address map\n",
845383Sgblack@eecs.umich.edu                    (*m)->name());
855383Sgblack@eecs.umich.edu            // this type of memory is used e.g. as reference memory by
865383Sgblack@eecs.umich.edu            // Ruby, and they also needs a backing store, but should
875383Sgblack@eecs.umich.edu            // not be part of the global address map
885383Sgblack@eecs.umich.edu
895383Sgblack@eecs.umich.edu            // simply do it independently, also note that this kind of
904166Sgblack@eecs.umich.edu            // memories are allowed to overlap in the logic address
914166Sgblack@eecs.umich.edu            // map
924166Sgblack@eecs.umich.edu            vector<AbstractMemory*> unmapped_mems;
934166Sgblack@eecs.umich.edu            unmapped_mems.push_back(*m);
944166Sgblack@eecs.umich.edu            createBackingStore((*m)->getAddrRange(), unmapped_mems);
954166Sgblack@eecs.umich.edu        }
962207SN/A    }
972207SN/A
985335Shines@cs.fsu.edu    // iterate over the increasing addresses and create as large
995335Shines@cs.fsu.edu    // chunks as possible of contigous space to be mapped to backing
1002207SN/A    // store, also remember what memories constitute the range so we
1012600SN/A    // can go and find out if we have to init their parts to zero
1022207SN/A    AddrRange curr_range;
1032207SN/A    vector<AbstractMemory*> curr_memories;
1042207SN/A    for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
1052207SN/A         r != addrMap.end(); ++r) {
1062207SN/A        // simply skip past all memories that are null and hence do
1072207SN/A        // not need any backing store
1082238SN/A        if (!r->second->isNull()) {
1092207SN/A            // if the current range is valid, decide if we split or
1105335Shines@cs.fsu.edu            // not
1112207SN/A            if (curr_range.valid()) {
1122207SN/A                // if the ranges are neighbours, then append, this
1132207SN/A                // will eventually be extended to include support for
1142207SN/A                // address striping and merge the interleaved ranges
1152238SN/A                if (curr_range.end + 1 == r->first.start) {
1162207SN/A                    DPRINTF(BusAddrRanges,
1172207SN/A                            "Merging neighbouring ranges %x:%x and %x:%x\n",
1182238SN/A                            curr_range.start, curr_range.end, r->first.start,
1192207SN/A                            r->first.end);
1202207SN/A                    // update the end of the range and add the current
1212207SN/A                    // memory to the list of memories
1222207SN/A                    curr_range.end = r->first.end;
1232238SN/A                    curr_memories.push_back(r->second);
1242238SN/A                } else {
1252600SN/A                    // what we already have is valid, and this is not
1262238SN/A                    // contigious, so create the backing store and
1272238SN/A                    // then start over
1282238SN/A                    createBackingStore(curr_range, curr_memories);
1292238SN/A
1302238SN/A                    // remember the current range and reset the current
1312238SN/A                    // set of memories to contain this one
1322238SN/A                    curr_range = r->first;
1332238SN/A                    curr_memories.clear();
1342238SN/A                    curr_memories.push_back(r->second);
1352238SN/A                }
1362600SN/A            } else {
1372238SN/A                // we haven't seen any valid ranges yet, so remember
1382238SN/A                // the current range and reset the current set of
1392238SN/A                // memories to contain this one
1402238SN/A                curr_range = r->first;
1412238SN/A                curr_memories.clear();
1422238SN/A                curr_memories.push_back(r->second);
1432238SN/A            }
1442238SN/A        }
1452238SN/A    }
1462238SN/A
1472238SN/A    // if we have a valid range upon finishing the iteration, then
1482238SN/A    // create the backing store
1492238SN/A    if (curr_range.valid())
1502238SN/A        createBackingStore(curr_range, curr_memories);
1512238SN/A}
1522238SN/A
1532238SN/Avoid
1542238SN/APhysicalMemory::createBackingStore(AddrRange range,
1552238SN/A                                   const vector<AbstractMemory*>& _memories)
1562238SN/A{
1572238SN/A    // perform the actual mmap
1582238SN/A    DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n",
1592600SN/A            range.start, range.end);
1602600SN/A    int map_flags = MAP_ANON | MAP_PRIVATE;
1612600SN/A    uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
1622600SN/A                                    PROT_READ | PROT_WRITE,
1632600SN/A                                    map_flags, -1, 0);
1642238SN/A
1652238SN/A    if (pmem == (uint8_t*) MAP_FAILED) {
1662238SN/A        perror("mmap");
1672472SN/A        fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(),
1682976Sgblack@eecs.umich.edu              range.start, range.end);
1692976Sgblack@eecs.umich.edu    }
1702976Sgblack@eecs.umich.edu
1712976Sgblack@eecs.umich.edu    // remember this backing store so we can checkpoint it and unmap
1722976Sgblack@eecs.umich.edu    // it appropriately
1732976Sgblack@eecs.umich.edu    backingStore.push_back(make_pair(range, pmem));
1742976Sgblack@eecs.umich.edu
1752976Sgblack@eecs.umich.edu    // point the memories to their backing store, and if requested,
1762976Sgblack@eecs.umich.edu    // initialize the memory range to 0
1772976Sgblack@eecs.umich.edu    for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
1782976Sgblack@eecs.umich.edu         m != _memories.end(); ++m) {
1792976Sgblack@eecs.umich.edu        DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
1802976Sgblack@eecs.umich.edu                (*m)->name());
1812976Sgblack@eecs.umich.edu        (*m)->setBackingStore(pmem);
1822976Sgblack@eecs.umich.edu
1832976Sgblack@eecs.umich.edu        // if it should be zero, then go and make it so
1842976Sgblack@eecs.umich.edu        if ((*m)->initToZero())
1852976Sgblack@eecs.umich.edu            memset(pmem, 0, (*m)->size());
1862976Sgblack@eecs.umich.edu
1872976Sgblack@eecs.umich.edu        // advance the pointer for the next memory in line
1882976Sgblack@eecs.umich.edu        pmem += (*m)->size();
1895143Sgblack@eecs.umich.edu    }
1902976Sgblack@eecs.umich.edu}
1912976Sgblack@eecs.umich.edu
1922976Sgblack@eecs.umich.eduPhysicalMemory::~PhysicalMemory()
1932976Sgblack@eecs.umich.edu{
1942976Sgblack@eecs.umich.edu    // unmap the backing store
1952976Sgblack@eecs.umich.edu    for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
1962976Sgblack@eecs.umich.edu         s != backingStore.end(); ++s)
1972976Sgblack@eecs.umich.edu        munmap((char*)s->second, s->first.size());
1982238SN/A}
1992976Sgblack@eecs.umich.edu
20012SN/Abool
20112SN/APhysicalMemory::isMemAddr(Addr addr) const
20212SN/A{
20312SN/A    // see if the address is within the last matched range
20412SN/A    if (addr != rangeCache) {
205360SN/A        // lookup in the interval tree
206360SN/A        AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
207360SN/A        if (r == addrMap.end()) {
208443SN/A            // not in the cache, and not in the tree
20912SN/A            return false;
210443SN/A        }
211443SN/A        // the range is in the tree, update the cache
21212SN/A        rangeCache = r->first;
213468SN/A    }
2141708SN/A
2151708SN/A    assert(addrMap.find(addr) != addrMap.end());
21612SN/A
217468SN/A    // either matched the cache or found in the tree
218443SN/A    return true;
219468SN/A}
220443SN/A
22112SN/AAddrRangeList
222468SN/APhysicalMemory::getConfAddrRanges() const
223468SN/A{
224443SN/A    // this could be done once in the constructor, but since it is unlikely to
22512SN/A    // be called more than once the iteration should not be a problem
22612SN/A    AddrRangeList ranges;
227468SN/A    for (vector<AbstractMemory*>::const_iterator m = memories.begin();
22812SN/A         m != memories.end(); ++m) {
229468SN/A        if ((*m)->isConfReported()) {
230468SN/A            ranges.push_back((*m)->getAddrRange());
231468SN/A        }
2325090Sgblack@eecs.umich.edu    }
2335090Sgblack@eecs.umich.edu
2345090Sgblack@eecs.umich.edu    return ranges;
2355090Sgblack@eecs.umich.edu}
2365090Sgblack@eecs.umich.edu
2375090Sgblack@eecs.umich.eduvoid
2385090Sgblack@eecs.umich.eduPhysicalMemory::access(PacketPtr pkt)
2395090Sgblack@eecs.umich.edu{
2405090Sgblack@eecs.umich.edu    assert(pkt->isRequest());
2415090Sgblack@eecs.umich.edu    Addr addr = pkt->getAddr();
2425090Sgblack@eecs.umich.edu    AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
2435090Sgblack@eecs.umich.edu    assert(m != addrMap.end());
2445090Sgblack@eecs.umich.edu    m->second->access(pkt);
2455090Sgblack@eecs.umich.edu}
2465090Sgblack@eecs.umich.edu
2475090Sgblack@eecs.umich.eduvoid
2485090Sgblack@eecs.umich.eduPhysicalMemory::functionalAccess(PacketPtr pkt)
2495090Sgblack@eecs.umich.edu{
2505090Sgblack@eecs.umich.edu    assert(pkt->isRequest());
2515090Sgblack@eecs.umich.edu    Addr addr = pkt->getAddr();
2525090Sgblack@eecs.umich.edu    AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
2535090Sgblack@eecs.umich.edu    assert(m != addrMap.end());
2545090Sgblack@eecs.umich.edu    m->second->functionalAccess(pkt);
2555090Sgblack@eecs.umich.edu}
2565090Sgblack@eecs.umich.edu
2575090Sgblack@eecs.umich.eduvoid
2585090Sgblack@eecs.umich.eduPhysicalMemory::serialize(ostream& os)
2595090Sgblack@eecs.umich.edu{
2605090Sgblack@eecs.umich.edu    // serialize all the locked addresses and their context ids
2615090Sgblack@eecs.umich.edu    vector<Addr> lal_addr;
2625090Sgblack@eecs.umich.edu    vector<int> lal_cid;
263468SN/A
264468SN/A    for (vector<AbstractMemory*>::iterator m = memories.begin();
265468SN/A         m != memories.end(); ++m) {
2665090Sgblack@eecs.umich.edu        const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
267468SN/A        for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
268468SN/A             l != locked_addrs.end(); ++l) {
269468SN/A            lal_addr.push_back(l->addr);
270468SN/A            lal_cid.push_back(l->contextId);
271468SN/A        }
272468SN/A    }
2735090Sgblack@eecs.umich.edu
2745143Sgblack@eecs.umich.edu    arrayParamOut(os, "lal_addr", lal_addr);
2755143Sgblack@eecs.umich.edu    arrayParamOut(os, "lal_cid", lal_cid);
2765090Sgblack@eecs.umich.edu
2775143Sgblack@eecs.umich.edu    // serialize the backing stores
2785090Sgblack@eecs.umich.edu    unsigned int nbr_of_stores = backingStore.size();
2795090Sgblack@eecs.umich.edu    SERIALIZE_SCALAR(nbr_of_stores);
2805090Sgblack@eecs.umich.edu
2815090Sgblack@eecs.umich.edu    unsigned int store_id = 0;
2825090Sgblack@eecs.umich.edu    // store each backing store memory segment in a file
2835152Sgblack@eecs.umich.edu    for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
2845152Sgblack@eecs.umich.edu         s != backingStore.end(); ++s) {
2855143Sgblack@eecs.umich.edu        nameOut(os, csprintf("%s.store%d", name(), store_id));
286468SN/A        serializeStore(os, store_id++, s->first, s->second);
2872420SN/A    }
2885152Sgblack@eecs.umich.edu}
2895152Sgblack@eecs.umich.edu
2905143Sgblack@eecs.umich.eduvoid
291468SN/APhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
2922420SN/A                               AddrRange range, uint8_t* pmem)
2932476SN/A{
2945090Sgblack@eecs.umich.edu    // we cannot use the address range for the name as the
2955143Sgblack@eecs.umich.edu    // memories that are not part of the address map can overlap
2965090Sgblack@eecs.umich.edu    string filename = "store" + to_string(store_id) + ".pmem";
2975090Sgblack@eecs.umich.edu    long range_size = range.size();
2985090Sgblack@eecs.umich.edu
299468SN/A    DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
300468SN/A            filename, range_size);
301468SN/A
302468SN/A    SERIALIZE_SCALAR(store_id);
303468SN/A    SERIALIZE_SCALAR(filename);
304468SN/A    SERIALIZE_SCALAR(range_size);
305468SN/A
306468SN/A    // write memory file
307468SN/A    string filepath = Checkpoint::dir() + "/" + filename.c_str();
308468SN/A    int fd = creat(filepath.c_str(), 0664);
309443SN/A    if (fd < 0) {
310443SN/A        perror("creat");
311468SN/A        fatal("Can't open physical memory checkpoint file '%s'\n",
31212SN/A              filename);
31312SN/A    }
31412SN/A
31512SN/A    gzFile compressed_mem = gzdopen(fd, "wb");
316468SN/A    if (compressed_mem == NULL)
31712SN/A        fatal("Insufficient memory to allocate compression state for %s\n",
318443SN/A              filename);
319766SN/A
320443SN/A    uint64_t pass_size = 0;
321443SN/A
322443SN/A    // gzwrite fails if (int)len < 0 (gzwrite returns int)
323443SN/A    for (uint64_t written = 0; written < range.size();
324443SN/A         written += pass_size) {
325443SN/A        pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
326443SN/A            (uint64_t)INT_MAX : (range.size() - written);
327443SN/A
328443SN/A        if (gzwrite(compressed_mem, pmem + written,
329443SN/A                    (unsigned int) pass_size) != (int) pass_size) {
330468SN/A            fatal("Write failed on physical memory checkpoint file '%s'\n",
3311708SN/A                  filename);
3321708SN/A        }
333443SN/A    }
334468SN/A
335443SN/A    // close the compressed stream and check that the exit status
336443SN/A    // is zero
337443SN/A    if (gzclose(compressed_mem))
338443SN/A        fatal("Close failed on physical memory checkpoint file '%s'\n",
339468SN/A              filename);
340454SN/A
341443SN/A}
342468SN/A
343468SN/Avoid
344443SN/APhysicalMemory::unserialize(Checkpoint* cp, const string& section)
345443SN/A{
346468SN/A    // unserialize the locked addresses and map them to the
347443SN/A    // appropriate memory controller
348443SN/A    vector<Addr> lal_addr;
349443SN/A    vector<int> lal_cid;
350443SN/A    arrayParamIn(cp, section, "lal_addr", lal_addr);
351443SN/A    arrayParamIn(cp, section, "lal_cid", lal_cid);
352468SN/A    for(size_t i = 0; i < lal_addr.size(); ++i) {
353468SN/A        AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
354443SN/A        m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
355836SN/A    }
356468SN/A
357468SN/A    // unserialize the backing stores
358443SN/A    unsigned int nbr_of_stores;
359443SN/A    UNSERIALIZE_SCALAR(nbr_of_stores);
360443SN/A
361454SN/A    for (unsigned int i = 0; i < nbr_of_stores; ++i) {
362454SN/A        unserializeStore(cp, csprintf("%s.store%d", section, i));
363443SN/A    }
364443SN/A
365443SN/A}
366443SN/A
367443SN/Avoid
36812SN/APhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
36912SN/A{
37012SN/A    const uint32_t chunk_size = 16384;
3713812Ssaidi@eecs.umich.edu
372468SN/A    unsigned int store_id;
373468SN/A    UNSERIALIZE_SCALAR(store_id);
374468SN/A
375468SN/A    string filename;
376468SN/A    UNSERIALIZE_SCALAR(filename);
3773812Ssaidi@eecs.umich.edu    string filepath = cp->cptDir + "/" + filename;
37812SN/A
379468SN/A    // mmap memoryfile
38012SN/A    int fd = open(filepath.c_str(), O_RDONLY);
3813917Ssaidi@eecs.umich.edu    if (fd < 0) {
3825090Sgblack@eecs.umich.edu        perror("open");
3835090Sgblack@eecs.umich.edu        fatal("Can't open physical memory checkpoint file '%s'", filename);
3845090Sgblack@eecs.umich.edu    }
3855090Sgblack@eecs.umich.edu
3865090Sgblack@eecs.umich.edu    gzFile compressed_mem = gzdopen(fd, "rb");
3875090Sgblack@eecs.umich.edu    if (compressed_mem == NULL)
3885090Sgblack@eecs.umich.edu        fatal("Insufficient memory to allocate compression state for %s\n",
3895090Sgblack@eecs.umich.edu              filename);
3905090Sgblack@eecs.umich.edu
3915090Sgblack@eecs.umich.edu    uint8_t* pmem = backingStore[store_id].second;
3925090Sgblack@eecs.umich.edu    AddrRange range = backingStore[store_id].first;
3935090Sgblack@eecs.umich.edu
3945090Sgblack@eecs.umich.edu    // unmap file that was mmapped in the constructor, this is
3955090Sgblack@eecs.umich.edu    // done here to make sure that gzip and open don't muck with
3965090Sgblack@eecs.umich.edu    // our nice large space of memory before we reallocate it
3975090Sgblack@eecs.umich.edu    munmap((char*) pmem, range.size());
3985070Ssaidi@eecs.umich.edu
3995070Ssaidi@eecs.umich.edu    long range_size;
4003917Ssaidi@eecs.umich.edu    UNSERIALIZE_SCALAR(range_size);
4013917Ssaidi@eecs.umich.edu
4023917Ssaidi@eecs.umich.edu    DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
4033917Ssaidi@eecs.umich.edu            filename, range_size);
4043917Ssaidi@eecs.umich.edu
4053917Ssaidi@eecs.umich.edu    if (range_size != range.size())
4063917Ssaidi@eecs.umich.edu        fatal("Memory range size has changed! Saw %lld, expected %lld\n",
4073917Ssaidi@eecs.umich.edu              range_size, range.size());
4085070Ssaidi@eecs.umich.edu
4095070Ssaidi@eecs.umich.edu    pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
4103917Ssaidi@eecs.umich.edu                           MAP_ANON | MAP_PRIVATE, -1, 0);
4113917Ssaidi@eecs.umich.edu
4123917Ssaidi@eecs.umich.edu    if (pmem == (void*) MAP_FAILED) {
4133917Ssaidi@eecs.umich.edu        perror("mmap");
4143917Ssaidi@eecs.umich.edu        fatal("Could not mmap physical memory!\n");
4153917Ssaidi@eecs.umich.edu    }
4163917Ssaidi@eecs.umich.edu
4173917Ssaidi@eecs.umich.edu    uint64_t curr_size = 0;
4183917Ssaidi@eecs.umich.edu    long* temp_page = new long[chunk_size];
4193917Ssaidi@eecs.umich.edu    long* pmem_current;
4203917Ssaidi@eecs.umich.edu    uint32_t bytes_read;
4213917Ssaidi@eecs.umich.edu    while (curr_size < range.size()) {
4223917Ssaidi@eecs.umich.edu        bytes_read = gzread(compressed_mem, temp_page, chunk_size);
4233917Ssaidi@eecs.umich.edu        if (bytes_read == 0)
4243917Ssaidi@eecs.umich.edu            break;
4253917Ssaidi@eecs.umich.edu
4263917Ssaidi@eecs.umich.edu        assert(bytes_read % sizeof(long) == 0);
4273917Ssaidi@eecs.umich.edu
4283917Ssaidi@eecs.umich.edu        for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
4295070Ssaidi@eecs.umich.edu            // Only copy bytes that are non-zero, so we don't give
4303917Ssaidi@eecs.umich.edu            // the VM system hell
4313917Ssaidi@eecs.umich.edu            if (*(temp_page + x) != 0) {
4323917Ssaidi@eecs.umich.edu                pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
4333917Ssaidi@eecs.umich.edu                *pmem_current = *(temp_page + x);
4345070Ssaidi@eecs.umich.edu            }
4355070Ssaidi@eecs.umich.edu        }
4365070Ssaidi@eecs.umich.edu        curr_size += bytes_read;
4375070Ssaidi@eecs.umich.edu    }
4385070Ssaidi@eecs.umich.edu
4395070Ssaidi@eecs.umich.edu    delete[] temp_page;
4405070Ssaidi@eecs.umich.edu
4413917Ssaidi@eecs.umich.edu    if (gzclose(compressed_mem))
4425070Ssaidi@eecs.umich.edu        fatal("Close failed on physical memory checkpoint file '%s'\n",
443              filename);
444}
445