physical.cc revision 9386
14120Sgblack@eecs.umich.edu/*
211274Sshingarov@labware.com * Copyright (c) 2012 ARM Limited
310600Sgabeblack@google.com * All rights reserved
44120Sgblack@eecs.umich.edu *
54120Sgblack@eecs.umich.edu * The license below extends only to copyright in the software and shall
64120Sgblack@eecs.umich.edu * not be construed as granting a license to any other intellectual
77087Snate@binkert.org * property including but not limited to intellectual property relating
87087Snate@binkert.org * to a hardware implementation of the functionality of the software
97087Snate@binkert.org * licensed hereunder.  You may use the software subject to the license
107087Snate@binkert.org * terms below provided that you ensure that this notice is replicated
117087Snate@binkert.org * unmodified and in its entirety in all distributions of the software,
127087Snate@binkert.org * modified or unmodified, in source code or in binary form.
137087Snate@binkert.org *
147087Snate@binkert.org * Redistribution and use in source and binary forms, with or without
154120Sgblack@eecs.umich.edu * modification, are permitted provided that the following conditions are
167087Snate@binkert.org * met: redistributions of source code must retain the above copyright
177087Snate@binkert.org * notice, this list of conditions and the following disclaimer;
187087Snate@binkert.org * redistributions in binary form must reproduce the above copyright
197087Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
207087Snate@binkert.org * documentation and/or other materials provided with the distribution;
217087Snate@binkert.org * neither the name of the copyright holders nor the names of its
227087Snate@binkert.org * contributors may be used to endorse or promote products derived from
237087Snate@binkert.org * this software without specific prior written permission.
244120Sgblack@eecs.umich.edu *
257087Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
264120Sgblack@eecs.umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
274120Sgblack@eecs.umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
284120Sgblack@eecs.umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
294120Sgblack@eecs.umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
304120Sgblack@eecs.umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
314120Sgblack@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
324120Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
334120Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
344120Sgblack@eecs.umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
354120Sgblack@eecs.umich.edu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
364120Sgblack@eecs.umich.edu *
374120Sgblack@eecs.umich.edu * Authors: Andreas Hansson
384120Sgblack@eecs.umich.edu */
394120Sgblack@eecs.umich.edu
4011274Sshingarov@labware.com#include <sys/mman.h>
414120Sgblack@eecs.umich.edu#include <sys/types.h>
424120Sgblack@eecs.umich.edu#include <sys/user.h>
434120Sgblack@eecs.umich.edu#include <fcntl.h>
444120Sgblack@eecs.umich.edu#include <unistd.h>
454120Sgblack@eecs.umich.edu#include <zlib.h>
4610600Sgabeblack@google.com
4710600Sgabeblack@google.com#include <cerrno>
484144Sgblack@eecs.umich.edu#include <climits>
494144Sgblack@eecs.umich.edu#include <cstdio>
504144Sgblack@eecs.umich.edu#include <iostream>
514144Sgblack@eecs.umich.edu#include <string>
524144Sgblack@eecs.umich.edu
534120Sgblack@eecs.umich.edu#include "base/trace.hh"
544120Sgblack@eecs.umich.edu#include "debug/BusAddrRanges.hh"
554120Sgblack@eecs.umich.edu#include "debug/Checkpoint.hh"
5610600Sgabeblack@google.com#include "mem/abstract_mem.hh"
5710600Sgabeblack@google.com#include "mem/physical.hh"
5811274Sshingarov@labware.com
5911274Sshingarov@labware.comusing namespace std;
6011274Sshingarov@labware.com
6111274Sshingarov@labware.comPhysicalMemory::PhysicalMemory(const string& _name,
6210600Sgabeblack@google.com                               const vector<AbstractMemory*>& _memories) :
6311274Sshingarov@labware.com    _name(_name), size(0)
6411274Sshingarov@labware.com{
6511274Sshingarov@labware.com    // add the memories from the system to the address map as
6611274Sshingarov@labware.com    // appropriate
6711274Sshingarov@labware.com    for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
6811274Sshingarov@labware.com         m != _memories.end(); ++m) {
6911274Sshingarov@labware.com        // only add the memory if it is part of the global address map
7011274Sshingarov@labware.com        if ((*m)->isInAddrMap()) {
7111274Sshingarov@labware.com            memories.push_back(*m);
7211274Sshingarov@labware.com
7311274Sshingarov@labware.com            // calculate the total size once and for all
7411274Sshingarov@labware.com            size += (*m)->size();
7511274Sshingarov@labware.com
7611274Sshingarov@labware.com            // add the range to our interval tree and make sure it does not
7711274Sshingarov@labware.com            // intersect an existing range
7811274Sshingarov@labware.com            if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
7911274Sshingarov@labware.com                fatal("Memory address range for %s is overlapping\n",
8011274Sshingarov@labware.com                      (*m)->name());
8111274Sshingarov@labware.com        } else {
8211274Sshingarov@labware.com            DPRINTF(BusAddrRanges,
8311274Sshingarov@labware.com                    "Skipping memory %s that is not in global address map\n",
8411274Sshingarov@labware.com                    (*m)->name());
8511274Sshingarov@labware.com            // this type of memory is used e.g. as reference memory by
8611274Sshingarov@labware.com            // Ruby, and they also needs a backing store, but should
8711274Sshingarov@labware.com            // not be part of the global address map
8812031Sgabeblack@google.com
8912031Sgabeblack@google.com            // simply do it independently, also note that this kind of
9012031Sgabeblack@google.com            // memories are allowed to overlap in the logic address
9112031Sgabeblack@google.com            // map
9212031Sgabeblack@google.com            vector<AbstractMemory*> unmapped_mems;
9310600Sgabeblack@google.com            unmapped_mems.push_back(*m);
9410600Sgabeblack@google.com            createBackingStore((*m)->getAddrRange(), unmapped_mems);
9511274Sshingarov@labware.com        }
9610600Sgabeblack@google.com    }
9711274Sshingarov@labware.com
9811274Sshingarov@labware.com    // iterate over the increasing addresses and create as large
9912073Smatthiashille8@gmail.com    // chunks as possible of contigous space to be mapped to backing
10011274Sshingarov@labware.com    // store, also remember what memories constitute the range so we
10111274Sshingarov@labware.com    // can go and find out if we have to init their parts to zero
10211274Sshingarov@labware.com    AddrRange curr_range;
10311274Sshingarov@labware.com    vector<AbstractMemory*> curr_memories;
10411274Sshingarov@labware.com    for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
10511274Sshingarov@labware.com         r != addrMap.end(); ++r) {
10611274Sshingarov@labware.com        // simply skip past all memories that are null and hence do
10711274Sshingarov@labware.com        // not need any backing store
10811274Sshingarov@labware.com        if (!r->second->isNull()) {
10911274Sshingarov@labware.com            // if the current range is valid, decide if we split or
11011274Sshingarov@labware.com            // not
11111274Sshingarov@labware.com            if (curr_range.valid()) {
11211274Sshingarov@labware.com                // if the ranges are neighbours, then append, this
11311274Sshingarov@labware.com                // will eventually be extended to include support for
11411274Sshingarov@labware.com                // address striping and merge the interleaved ranges
11511274Sshingarov@labware.com                if (curr_range.end + 1 == r->first.start) {
11611274Sshingarov@labware.com                    DPRINTF(BusAddrRanges,
11711274Sshingarov@labware.com                            "Merging neighbouring ranges %x:%x and %x:%x\n",
11811274Sshingarov@labware.com                            curr_range.start, curr_range.end, r->first.start,
11911274Sshingarov@labware.com                            r->first.end);
12011274Sshingarov@labware.com                    // update the end of the range and add the current
12111274Sshingarov@labware.com                    // memory to the list of memories
12211274Sshingarov@labware.com                    curr_range.end = r->first.end;
12311274Sshingarov@labware.com                    curr_memories.push_back(r->second);
12411274Sshingarov@labware.com                } else {
12511274Sshingarov@labware.com                    // what we already have is valid, and this is not
12611274Sshingarov@labware.com                    // contigious, so create the backing store and
12711274Sshingarov@labware.com                    // then start over
12811274Sshingarov@labware.com                    createBackingStore(curr_range, curr_memories);
12911274Sshingarov@labware.com
13011274Sshingarov@labware.com                    // remember the current range and reset the current
13111274Sshingarov@labware.com                    // set of memories to contain this one
13211274Sshingarov@labware.com                    curr_range = r->first;
13311274Sshingarov@labware.com                    curr_memories.clear();
13411274Sshingarov@labware.com                    curr_memories.push_back(r->second);
13512031Sgabeblack@google.com                }
13612031Sgabeblack@google.com            } else {
13712031Sgabeblack@google.com                // we haven't seen any valid ranges yet, so remember
13812031Sgabeblack@google.com                // the current range and reset the current set of
13912031Sgabeblack@google.com                // memories to contain this one
14010600Sgabeblack@google.com                curr_range = r->first;
14110600Sgabeblack@google.com                curr_memories.clear();
14212031Sgabeblack@google.com                curr_memories.push_back(r->second);
14312031Sgabeblack@google.com            }
14412031Sgabeblack@google.com        }
14511274Sshingarov@labware.com    }
14612449Sgabeblack@google.com
14711274Sshingarov@labware.com    // if we have a valid range upon finishing the iteration, then
14810600Sgabeblack@google.com    // create the backing store
14911274Sshingarov@labware.com    if (curr_range.valid())
1504120Sgblack@eecs.umich.edu        createBackingStore(curr_range, curr_memories);
1514120Sgblack@eecs.umich.edu}
152
153void
154PhysicalMemory::createBackingStore(AddrRange range,
155                                   const vector<AbstractMemory*>& _memories)
156{
157    // perform the actual mmap
158    DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n",
159            range.start, range.end);
160    int map_flags = MAP_ANON | MAP_PRIVATE;
161    uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
162                                    PROT_READ | PROT_WRITE,
163                                    map_flags, -1, 0);
164
165    if (pmem == (uint8_t*) MAP_FAILED) {
166        perror("mmap");
167        fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(),
168              range.start, range.end);
169    }
170
171    // remember this backing store so we can checkpoint it and unmap
172    // it appropriately
173    backingStore.push_back(make_pair(range, pmem));
174
175    // point the memories to their backing store, and if requested,
176    // initialize the memory range to 0
177    for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
178         m != _memories.end(); ++m) {
179        DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
180                (*m)->name());
181        (*m)->setBackingStore(pmem);
182
183        // if it should be zero, then go and make it so
184        if ((*m)->initToZero())
185            memset(pmem, 0, (*m)->size());
186
187        // advance the pointer for the next memory in line
188        pmem += (*m)->size();
189    }
190}
191
192PhysicalMemory::~PhysicalMemory()
193{
194    // unmap the backing store
195    for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
196         s != backingStore.end(); ++s)
197        munmap((char*)s->second, s->first.size());
198}
199
200bool
201PhysicalMemory::isMemAddr(Addr addr) const
202{
203    // see if the address is within the last matched range
204    if (addr != rangeCache) {
205        // lookup in the interval tree
206        AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
207        if (r == addrMap.end()) {
208            // not in the cache, and not in the tree
209            return false;
210        }
211        // the range is in the tree, update the cache
212        rangeCache = r->first;
213    }
214
215    assert(addrMap.find(addr) != addrMap.end());
216
217    // either matched the cache or found in the tree
218    return true;
219}
220
221AddrRangeList
222PhysicalMemory::getConfAddrRanges() const
223{
224    // this could be done once in the constructor, but since it is unlikely to
225    // be called more than once the iteration should not be a problem
226    AddrRangeList ranges;
227    for (vector<AbstractMemory*>::const_iterator m = memories.begin();
228         m != memories.end(); ++m) {
229        if ((*m)->isConfReported()) {
230            ranges.push_back((*m)->getAddrRange());
231        }
232    }
233
234    return ranges;
235}
236
237void
238PhysicalMemory::access(PacketPtr pkt)
239{
240    assert(pkt->isRequest());
241    Addr addr = pkt->getAddr();
242    AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
243    assert(m != addrMap.end());
244    m->second->access(pkt);
245}
246
247void
248PhysicalMemory::functionalAccess(PacketPtr pkt)
249{
250    assert(pkt->isRequest());
251    Addr addr = pkt->getAddr();
252    AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
253    assert(m != addrMap.end());
254    m->second->functionalAccess(pkt);
255}
256
257void
258PhysicalMemory::serialize(ostream& os)
259{
260    // serialize all the locked addresses and their context ids
261    vector<Addr> lal_addr;
262    vector<int> lal_cid;
263
264    for (vector<AbstractMemory*>::iterator m = memories.begin();
265         m != memories.end(); ++m) {
266        const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
267        for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
268             l != locked_addrs.end(); ++l) {
269            lal_addr.push_back(l->addr);
270            lal_cid.push_back(l->contextId);
271        }
272    }
273
274    arrayParamOut(os, "lal_addr", lal_addr);
275    arrayParamOut(os, "lal_cid", lal_cid);
276
277    // serialize the backing stores
278    unsigned int nbr_of_stores = backingStore.size();
279    SERIALIZE_SCALAR(nbr_of_stores);
280
281    unsigned int store_id = 0;
282    // store each backing store memory segment in a file
283    for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
284         s != backingStore.end(); ++s) {
285        nameOut(os, csprintf("%s.store%d", name(), store_id));
286        serializeStore(os, store_id++, s->first, s->second);
287    }
288}
289
290void
291PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
292                               AddrRange range, uint8_t* pmem)
293{
294    // we cannot use the address range for the name as the
295    // memories that are not part of the address map can overlap
296    string filename = name() + ".store" + to_string(store_id) + ".pmem";
297    long range_size = range.size();
298
299    DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
300            filename, range_size);
301
302    SERIALIZE_SCALAR(store_id);
303    SERIALIZE_SCALAR(filename);
304    SERIALIZE_SCALAR(range_size);
305
306    // write memory file
307    string filepath = Checkpoint::dir() + "/" + filename.c_str();
308    int fd = creat(filepath.c_str(), 0664);
309    if (fd < 0) {
310        perror("creat");
311        fatal("Can't open physical memory checkpoint file '%s'\n",
312              filename);
313    }
314
315    gzFile compressed_mem = gzdopen(fd, "wb");
316    if (compressed_mem == NULL)
317        fatal("Insufficient memory to allocate compression state for %s\n",
318              filename);
319
320    uint64_t pass_size = 0;
321
322    // gzwrite fails if (int)len < 0 (gzwrite returns int)
323    for (uint64_t written = 0; written < range.size();
324         written += pass_size) {
325        pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
326            (uint64_t)INT_MAX : (range.size() - written);
327
328        if (gzwrite(compressed_mem, pmem + written,
329                    (unsigned int) pass_size) != (int) pass_size) {
330            fatal("Write failed on physical memory checkpoint file '%s'\n",
331                  filename);
332        }
333    }
334
335    // close the compressed stream and check that the exit status
336    // is zero
337    if (gzclose(compressed_mem))
338        fatal("Close failed on physical memory checkpoint file '%s'\n",
339              filename);
340
341}
342
343void
344PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
345{
346    // unserialize the locked addresses and map them to the
347    // appropriate memory controller
348    vector<Addr> lal_addr;
349    vector<int> lal_cid;
350    arrayParamIn(cp, section, "lal_addr", lal_addr);
351    arrayParamIn(cp, section, "lal_cid", lal_cid);
352    for(size_t i = 0; i < lal_addr.size(); ++i) {
353        AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
354        m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
355    }
356
357    // unserialize the backing stores
358    unsigned int nbr_of_stores;
359    UNSERIALIZE_SCALAR(nbr_of_stores);
360
361    for (unsigned int i = 0; i < nbr_of_stores; ++i) {
362        unserializeStore(cp, csprintf("%s.store%d", section, i));
363    }
364
365}
366
367void
368PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
369{
370    const uint32_t chunk_size = 16384;
371
372    unsigned int store_id;
373    UNSERIALIZE_SCALAR(store_id);
374
375    string filename;
376    UNSERIALIZE_SCALAR(filename);
377    string filepath = cp->cptDir + "/" + filename;
378
379    // mmap memoryfile
380    int fd = open(filepath.c_str(), O_RDONLY);
381    if (fd < 0) {
382        perror("open");
383        fatal("Can't open physical memory checkpoint file '%s'", filename);
384    }
385
386    gzFile compressed_mem = gzdopen(fd, "rb");
387    if (compressed_mem == NULL)
388        fatal("Insufficient memory to allocate compression state for %s\n",
389              filename);
390
391    uint8_t* pmem = backingStore[store_id].second;
392    AddrRange range = backingStore[store_id].first;
393
394    // unmap file that was mmapped in the constructor, this is
395    // done here to make sure that gzip and open don't muck with
396    // our nice large space of memory before we reallocate it
397    munmap((char*) pmem, range.size());
398
399    long range_size;
400    UNSERIALIZE_SCALAR(range_size);
401
402    DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
403            filename, range_size);
404
405    if (range_size != range.size())
406        fatal("Memory range size has changed! Saw %lld, expected %lld\n",
407              range_size, range.size());
408
409    pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
410                           MAP_ANON | MAP_PRIVATE, -1, 0);
411
412    if (pmem == (void*) MAP_FAILED) {
413        perror("mmap");
414        fatal("Could not mmap physical memory!\n");
415    }
416
417    uint64_t curr_size = 0;
418    long* temp_page = new long[chunk_size];
419    long* pmem_current;
420    uint32_t bytes_read;
421    while (curr_size < range.size()) {
422        bytes_read = gzread(compressed_mem, temp_page, chunk_size);
423        if (bytes_read == 0)
424            break;
425
426        assert(bytes_read % sizeof(long) == 0);
427
428        for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
429            // Only copy bytes that are non-zero, so we don't give
430            // the VM system hell
431            if (*(temp_page + x) != 0) {
432                pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
433                *pmem_current = *(temp_page + x);
434            }
435        }
436        curr_size += bytes_read;
437    }
438
439    delete[] temp_page;
440
441    if (gzclose(compressed_mem))
442        fatal("Close failed on physical memory checkpoint file '%s'\n",
443              filename);
444}
445