physical.cc revision 10412:6400a2ab4e22
15390SN/A/*
25390SN/A * Copyright (c) 2012 ARM Limited
35390SN/A * All rights reserved
45390SN/A *
55390SN/A * The license below extends only to copyright in the software and shall
65390SN/A * not be construed as granting a license to any other intellectual
75390SN/A * property including but not limited to intellectual property relating
85390SN/A * to a hardware implementation of the functionality of the software
95390SN/A * licensed hereunder.  You may use the software subject to the license
105390SN/A * terms below provided that you ensure that this notice is replicated
115390SN/A * unmodified and in its entirety in all distributions of the software,
125390SN/A * modified or unmodified, in source code or in binary form.
135390SN/A *
145390SN/A * Redistribution and use in source and binary forms, with or without
155390SN/A * modification, are permitted provided that the following conditions are
165390SN/A * met: redistributions of source code must retain the above copyright
175390SN/A * notice, this list of conditions and the following disclaimer;
185390SN/A * redistributions in binary form must reproduce the above copyright
195390SN/A * notice, this list of conditions and the following disclaimer in the
205390SN/A * documentation and/or other materials provided with the distribution;
215390SN/A * neither the name of the copyright holders nor the names of its
225390SN/A * contributors may be used to endorse or promote products derived from
235390SN/A * this software without specific prior written permission.
245390SN/A *
255390SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
265390SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
275390SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
285390SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
295390SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
305390SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
318232Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
325629Sgblack@eecs.umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
335634Sgblack@eecs.umich.edu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
345390SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
355390SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
365632Sgblack@eecs.umich.edu *
375632Sgblack@eecs.umich.edu * Authors: Andreas Hansson
385632Sgblack@eecs.umich.edu */
395634Sgblack@eecs.umich.edu
405827Sgblack@eecs.umich.edu#include <sys/mman.h>
415827Sgblack@eecs.umich.edu#include <sys/types.h>
425827Sgblack@eecs.umich.edu#include <sys/user.h>
435632Sgblack@eecs.umich.edu#include <fcntl.h>
445632Sgblack@eecs.umich.edu#include <unistd.h>
455390SN/A#include <zlib.h>
465390SN/A
475390SN/A#include <cerrno>
485390SN/A#include <climits>
495629Sgblack@eecs.umich.edu#include <cstdio>
505390SN/A#include <iostream>
515390SN/A#include <string>
525390SN/A
535390SN/A#include "base/trace.hh"
545390SN/A#include "debug/AddrRanges.hh"
555390SN/A#include "debug/Checkpoint.hh"
565390SN/A#include "mem/abstract_mem.hh"
575390SN/A#include "mem/physical.hh"
585390SN/A
595390SN/Ausing namespace std;
605898Sgblack@eecs.umich.edu
615390SN/APhysicalMemory::PhysicalMemory(const string& _name,
625390SN/A                               const vector<AbstractMemory*>& _memories) :
635390SN/A    _name(_name), size(0)
645390SN/A{
655390SN/A    // add the memories from the system to the address map as
665390SN/A    // appropriate
675390SN/A    for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
685629Sgblack@eecs.umich.edu         m != _memories.end(); ++m) {
695390SN/A        // only add the memory if it is part of the global address map
705390SN/A        if ((*m)->isInAddrMap()) {
715390SN/A            memories.push_back(*m);
725390SN/A
735390SN/A            // calculate the total size once and for all
745390SN/A            size += (*m)->size();
755390SN/A
765390SN/A            // add the range to our interval tree and make sure it does not
775390SN/A            // intersect an existing range
785390SN/A            if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
795898Sgblack@eecs.umich.edu                fatal("Memory address range for %s is overlapping\n",
805390SN/A                      (*m)->name());
815390SN/A        } else {
825390SN/A            DPRINTF(AddrRanges,
835390SN/A                    "Skipping memory %s that is not in global address map\n",
845390SN/A                    (*m)->name());
855390SN/A            // this type of memory is used e.g. as reference memory by
865390SN/A            // Ruby, and they also needs a backing store, but should
875629Sgblack@eecs.umich.edu            // not be part of the global address map
885393SN/A
895629Sgblack@eecs.umich.edu            // simply do it independently, also note that this kind of
905629Sgblack@eecs.umich.edu            // memories are allowed to overlap in the logic address
917799Sgblack@eecs.umich.edu            // map
925393SN/A            vector<AbstractMemory*> unmapped_mems;
935629Sgblack@eecs.umich.edu            unmapped_mems.push_back(*m);
945629Sgblack@eecs.umich.edu            createBackingStore((*m)->getAddrRange(), unmapped_mems);
957799Sgblack@eecs.umich.edu        }
965390SN/A    }
975629Sgblack@eecs.umich.edu
985390SN/A    // iterate over the increasing addresses and chunks of contiguous
995390SN/A    // space to be mapped to backing store, create it and inform the
1005390SN/A    // memories
1015390SN/A    vector<AddrRange> intlv_ranges;
1025390SN/A    vector<AbstractMemory*> curr_memories;
1035390SN/A    for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
1045393SN/A         r != addrMap.end(); ++r) {
1055629Sgblack@eecs.umich.edu        // simply skip past all memories that are null and hence do
1067799Sgblack@eecs.umich.edu        // not need any backing store
1075393SN/A        if (!r->second->isNull()) {
1085393SN/A            // if the range is interleaved then save it for now
1095629Sgblack@eecs.umich.edu            if (r->first.interleaved()) {
1107799Sgblack@eecs.umich.edu                // if we already got interleaved ranges that are not
1115629Sgblack@eecs.umich.edu                // part of the same range, then first do a merge
1125390SN/A                // before we add the new one
1135390SN/A                if (!intlv_ranges.empty() &&
1145629Sgblack@eecs.umich.edu                    !intlv_ranges.back().mergesWith(r->first)) {
1157903Shestness@cs.utexas.edu                    AddrRange merged_range(intlv_ranges);
1167903Shestness@cs.utexas.edu                    createBackingStore(merged_range, curr_memories);
1177903Shestness@cs.utexas.edu                    intlv_ranges.clear();
1187903Shestness@cs.utexas.edu                    curr_memories.clear();
1197903Shestness@cs.utexas.edu                }
1207903Shestness@cs.utexas.edu                intlv_ranges.push_back(r->first);
1217903Shestness@cs.utexas.edu                curr_memories.push_back(r->second);
1227903Shestness@cs.utexas.edu            } else {
1237903Shestness@cs.utexas.edu                vector<AbstractMemory*> single_memory;
1247903Shestness@cs.utexas.edu                single_memory.push_back(r->second);
1257903Shestness@cs.utexas.edu                createBackingStore(r->first, single_memory);
1267903Shestness@cs.utexas.edu            }
1277903Shestness@cs.utexas.edu        }
1287903Shestness@cs.utexas.edu    }
1297903Shestness@cs.utexas.edu
1307903Shestness@cs.utexas.edu    // if there is still interleaved ranges waiting to be merged, go
1317903Shestness@cs.utexas.edu    // ahead and do it
1327903Shestness@cs.utexas.edu    if (!intlv_ranges.empty()) {
1337903Shestness@cs.utexas.edu        AddrRange merged_range(intlv_ranges);
1347903Shestness@cs.utexas.edu        createBackingStore(merged_range, curr_memories);
1355629Sgblack@eecs.umich.edu    }
1365629Sgblack@eecs.umich.edu}
1375629Sgblack@eecs.umich.edu
1385629Sgblack@eecs.umich.eduvoid
1395629Sgblack@eecs.umich.eduPhysicalMemory::createBackingStore(AddrRange range,
140                                   const vector<AbstractMemory*>& _memories)
141{
142    if (range.interleaved())
143        panic("Cannot create backing store for interleaved range %s\n",
144              range.to_string());
145
146    // perform the actual mmap
147    DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
148            range.to_string(), range.size());
149    int map_flags = MAP_ANON | MAP_PRIVATE;
150    uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
151                                    PROT_READ | PROT_WRITE,
152                                    map_flags, -1, 0);
153
154    if (pmem == (uint8_t*) MAP_FAILED) {
155        perror("mmap");
156        fatal("Could not mmap %d bytes for range %s!\n", range.size(),
157              range.to_string());
158    }
159
160    // remember this backing store so we can checkpoint it and unmap
161    // it appropriately
162    backingStore.push_back(make_pair(range, pmem));
163
164    // point the memories to their backing store
165    for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
166         m != _memories.end(); ++m) {
167        DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
168                (*m)->name());
169        (*m)->setBackingStore(pmem);
170    }
171}
172
173PhysicalMemory::~PhysicalMemory()
174{
175    // unmap the backing store
176    for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
177         s != backingStore.end(); ++s)
178        munmap((char*)s->second, s->first.size());
179}
180
181bool
182PhysicalMemory::isMemAddr(Addr addr) const
183{
184    // see if the address is within the last matched range
185    if (!rangeCache.contains(addr)) {
186        // lookup in the interval tree
187        AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
188        if (r == addrMap.end()) {
189            // not in the cache, and not in the tree
190            return false;
191        }
192        // the range is in the tree, update the cache
193        rangeCache = r->first;
194    }
195
196    assert(addrMap.find(addr) != addrMap.end());
197
198    // either matched the cache or found in the tree
199    return true;
200}
201
202AddrRangeList
203PhysicalMemory::getConfAddrRanges() const
204{
205    // this could be done once in the constructor, but since it is unlikely to
206    // be called more than once the iteration should not be a problem
207    AddrRangeList ranges;
208    vector<AddrRange> intlv_ranges;
209    for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
210         r != addrMap.end(); ++r) {
211        if (r->second->isConfReported()) {
212            // if the range is interleaved then save it for now
213            if (r->first.interleaved()) {
214                // if we already got interleaved ranges that are not
215                // part of the same range, then first do a merge
216                // before we add the new one
217                if (!intlv_ranges.empty() &&
218                    !intlv_ranges.back().mergesWith(r->first)) {
219                    ranges.push_back(AddrRange(intlv_ranges));
220                    intlv_ranges.clear();
221                }
222                intlv_ranges.push_back(r->first);
223            } else {
224                // keep the current range
225                ranges.push_back(r->first);
226            }
227        }
228    }
229
230    // if there is still interleaved ranges waiting to be merged,
231    // go ahead and do it
232    if (!intlv_ranges.empty()) {
233        ranges.push_back(AddrRange(intlv_ranges));
234    }
235
236    return ranges;
237}
238
239void
240PhysicalMemory::access(PacketPtr pkt)
241{
242    assert(pkt->isRequest());
243    Addr addr = pkt->getAddr();
244    AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
245    assert(m != addrMap.end());
246    m->second->access(pkt);
247}
248
249void
250PhysicalMemory::functionalAccess(PacketPtr pkt)
251{
252    assert(pkt->isRequest());
253    Addr addr = pkt->getAddr();
254    AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
255    assert(m != addrMap.end());
256    m->second->functionalAccess(pkt);
257}
258
259void
260PhysicalMemory::serialize(ostream& os)
261{
262    // serialize all the locked addresses and their context ids
263    vector<Addr> lal_addr;
264    vector<int> lal_cid;
265
266    for (vector<AbstractMemory*>::iterator m = memories.begin();
267         m != memories.end(); ++m) {
268        const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
269        for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
270             l != locked_addrs.end(); ++l) {
271            lal_addr.push_back(l->addr);
272            lal_cid.push_back(l->contextId);
273        }
274    }
275
276    arrayParamOut(os, "lal_addr", lal_addr);
277    arrayParamOut(os, "lal_cid", lal_cid);
278
279    // serialize the backing stores
280    unsigned int nbr_of_stores = backingStore.size();
281    SERIALIZE_SCALAR(nbr_of_stores);
282
283    unsigned int store_id = 0;
284    // store each backing store memory segment in a file
285    for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
286         s != backingStore.end(); ++s) {
287        nameOut(os, csprintf("%s.store%d", name(), store_id));
288        serializeStore(os, store_id++, s->first, s->second);
289    }
290}
291
292void
293PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
294                               AddrRange range, uint8_t* pmem)
295{
296    // we cannot use the address range for the name as the
297    // memories that are not part of the address map can overlap
298    string filename = name() + ".store" + to_string(store_id) + ".pmem";
299    long range_size = range.size();
300
301    DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
302            filename, range_size);
303
304    SERIALIZE_SCALAR(store_id);
305    SERIALIZE_SCALAR(filename);
306    SERIALIZE_SCALAR(range_size);
307
308    // write memory file
309    string filepath = Checkpoint::dir() + "/" + filename.c_str();
310    gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
311    if (compressed_mem == NULL)
312        fatal("Can't open physical memory checkpoint file '%s'\n",
313              filename);
314
315    uint64_t pass_size = 0;
316
317    // gzwrite fails if (int)len < 0 (gzwrite returns int)
318    for (uint64_t written = 0; written < range.size();
319         written += pass_size) {
320        pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
321            (uint64_t)INT_MAX : (range.size() - written);
322
323        if (gzwrite(compressed_mem, pmem + written,
324                    (unsigned int) pass_size) != (int) pass_size) {
325            fatal("Write failed on physical memory checkpoint file '%s'\n",
326                  filename);
327        }
328    }
329
330    // close the compressed stream and check that the exit status
331    // is zero
332    if (gzclose(compressed_mem))
333        fatal("Close failed on physical memory checkpoint file '%s'\n",
334              filename);
335
336}
337
338void
339PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
340{
341    // unserialize the locked addresses and map them to the
342    // appropriate memory controller
343    vector<Addr> lal_addr;
344    vector<int> lal_cid;
345    arrayParamIn(cp, section, "lal_addr", lal_addr);
346    arrayParamIn(cp, section, "lal_cid", lal_cid);
347    for(size_t i = 0; i < lal_addr.size(); ++i) {
348        AddrRangeMap<AbstractMemory*>::const_iterator m =
349            addrMap.find(lal_addr[i]);
350        m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
351    }
352
353    // unserialize the backing stores
354    unsigned int nbr_of_stores;
355    UNSERIALIZE_SCALAR(nbr_of_stores);
356
357    for (unsigned int i = 0; i < nbr_of_stores; ++i) {
358        unserializeStore(cp, csprintf("%s.store%d", section, i));
359    }
360
361}
362
363void
364PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
365{
366    const uint32_t chunk_size = 16384;
367
368    unsigned int store_id;
369    UNSERIALIZE_SCALAR(store_id);
370
371    string filename;
372    UNSERIALIZE_SCALAR(filename);
373    string filepath = cp->cptDir + "/" + filename;
374
375    // mmap memoryfile
376    gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
377    if (compressed_mem == NULL)
378        fatal("Can't open physical memory checkpoint file '%s'", filename);
379
380    // we've already got the actual backing store mapped
381    uint8_t* pmem = backingStore[store_id].second;
382    AddrRange range = backingStore[store_id].first;
383
384    long range_size;
385    UNSERIALIZE_SCALAR(range_size);
386
387    DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
388            filename, range_size);
389
390    if (range_size != range.size())
391        fatal("Memory range size has changed! Saw %lld, expected %lld\n",
392              range_size, range.size());
393
394    uint64_t curr_size = 0;
395    long* temp_page = new long[chunk_size];
396    long* pmem_current;
397    uint32_t bytes_read;
398    while (curr_size < range.size()) {
399        bytes_read = gzread(compressed_mem, temp_page, chunk_size);
400        if (bytes_read == 0)
401            break;
402
403        assert(bytes_read % sizeof(long) == 0);
404
405        for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
406            // Only copy bytes that are non-zero, so we don't give
407            // the VM system hell
408            if (*(temp_page + x) != 0) {
409                pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
410                *pmem_current = *(temp_page + x);
411            }
412        }
413        curr_size += bytes_read;
414    }
415
416    delete[] temp_page;
417
418    if (gzclose(compressed_mem))
419        fatal("Close failed on physical memory checkpoint file '%s'\n",
420              filename);
421}
422