physical.cc revision 10482
1/*
2 * Copyright (c) 2012, 2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include <sys/mman.h>
41#include <sys/types.h>
42#include <sys/user.h>
43#include <fcntl.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
53#include "base/trace.hh"
54#include "debug/AddrRanges.hh"
55#include "debug/Checkpoint.hh"
56#include "mem/abstract_mem.hh"
57#include "mem/physical.hh"
58
59using namespace std;
60
61PhysicalMemory::PhysicalMemory(const string& _name,
62                               const vector<AbstractMemory*>& _memories) :
63    _name(_name), size(0)
64{
65    // add the memories from the system to the address map as
66    // appropriate
67    for (const auto& m : _memories) {
68        // only add the memory if it is part of the global address map
69        if (m->isInAddrMap()) {
70            memories.push_back(m);
71
72            // calculate the total size once and for all
73            size += m->size();
74
75            // add the range to our interval tree and make sure it does not
76            // intersect an existing range
77            fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
78                     "Memory address range for %s is overlapping\n",
79                     m->name());
80        } else {
81            // this type of memory is used e.g. as reference memory by
82            // Ruby, and they also needs a backing store, but should
83            // not be part of the global address map
84            DPRINTF(AddrRanges,
85                    "Skipping memory %s that is not in global address map\n",
86                    m->name());
87
88            // sanity check
89            fatal_if(m->getAddrRange().interleaved(),
90                     "Memory %s that is not in the global address map cannot "
91                     "be interleaved\n", m->name());
92
93            // simply do it independently, also note that this kind of
94            // memories are allowed to overlap in the logic address
95            // map
96            vector<AbstractMemory*> unmapped_mems{m};
97            createBackingStore(m->getAddrRange(), unmapped_mems);
98        }
99    }
100
101    // iterate over the increasing addresses and chunks of contiguous
102    // space to be mapped to backing store, create it and inform the
103    // memories
104    vector<AddrRange> intlv_ranges;
105    vector<AbstractMemory*> curr_memories;
106    for (const auto& r : addrMap) {
107        // simply skip past all memories that are null and hence do
108        // not need any backing store
109        if (!r.second->isNull()) {
110            // if the range is interleaved then save it for now
111            if (r.first.interleaved()) {
112                // if we already got interleaved ranges that are not
113                // part of the same range, then first do a merge
114                // before we add the new one
115                if (!intlv_ranges.empty() &&
116                    !intlv_ranges.back().mergesWith(r.first)) {
117                    AddrRange merged_range(intlv_ranges);
118                    createBackingStore(merged_range, curr_memories);
119                    intlv_ranges.clear();
120                    curr_memories.clear();
121                }
122                intlv_ranges.push_back(r.first);
123                curr_memories.push_back(r.second);
124            } else {
125                vector<AbstractMemory*> single_memory{r.second};
126                createBackingStore(r.first, single_memory);
127            }
128        }
129    }
130
131    // if there is still interleaved ranges waiting to be merged, go
132    // ahead and do it
133    if (!intlv_ranges.empty()) {
134        AddrRange merged_range(intlv_ranges);
135        createBackingStore(merged_range, curr_memories);
136    }
137}
138
139void
140PhysicalMemory::createBackingStore(AddrRange range,
141                                   const vector<AbstractMemory*>& _memories)
142{
143    panic_if(range.interleaved(),
144             "Cannot create backing store for interleaved range %s\n",
145              range.to_string());
146
147    // perform the actual mmap
148    DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
149            range.to_string(), range.size());
150    int map_flags = MAP_ANON | MAP_PRIVATE;
151    uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
152                                    PROT_READ | PROT_WRITE,
153                                    map_flags, -1, 0);
154
155    if (pmem == (uint8_t*) MAP_FAILED) {
156        perror("mmap");
157        fatal("Could not mmap %d bytes for range %s!\n", range.size(),
158              range.to_string());
159    }
160
161    // remember this backing store so we can checkpoint it and unmap
162    // it appropriately
163    backingStore.push_back(make_pair(range, pmem));
164
165    // point the memories to their backing store
166    for (const auto& m : _memories) {
167        DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
168                m->name());
169        m->setBackingStore(pmem);
170    }
171}
172
173PhysicalMemory::~PhysicalMemory()
174{
175    // unmap the backing store
176    for (auto& s : backingStore)
177        munmap((char*)s.second, s.first.size());
178}
179
180bool
181PhysicalMemory::isMemAddr(Addr addr) const
182{
183    // see if the address is within the last matched range
184    if (!rangeCache.contains(addr)) {
185        // lookup in the interval tree
186        const auto& r = addrMap.find(addr);
187        if (r == addrMap.end()) {
188            // not in the cache, and not in the tree
189            return false;
190        }
191        // the range is in the tree, update the cache
192        rangeCache = r->first;
193    }
194
195    assert(addrMap.find(addr) != addrMap.end());
196
197    // either matched the cache or found in the tree
198    return true;
199}
200
201AddrRangeList
202PhysicalMemory::getConfAddrRanges() const
203{
204    // this could be done once in the constructor, but since it is unlikely to
205    // be called more than once the iteration should not be a problem
206    AddrRangeList ranges;
207    vector<AddrRange> intlv_ranges;
208    for (const auto& r : addrMap) {
209        if (r.second->isConfReported()) {
210            // if the range is interleaved then save it for now
211            if (r.first.interleaved()) {
212                // if we already got interleaved ranges that are not
213                // part of the same range, then first do a merge
214                // before we add the new one
215                if (!intlv_ranges.empty() &&
216                    !intlv_ranges.back().mergesWith(r.first)) {
217                    ranges.push_back(AddrRange(intlv_ranges));
218                    intlv_ranges.clear();
219                }
220                intlv_ranges.push_back(r.first);
221            } else {
222                // keep the current range
223                ranges.push_back(r.first);
224            }
225        }
226    }
227
228    // if there is still interleaved ranges waiting to be merged,
229    // go ahead and do it
230    if (!intlv_ranges.empty()) {
231        ranges.push_back(AddrRange(intlv_ranges));
232    }
233
234    return ranges;
235}
236
237void
238PhysicalMemory::access(PacketPtr pkt)
239{
240    assert(pkt->isRequest());
241    Addr addr = pkt->getAddr();
242    const auto& m = addrMap.find(addr);
243    assert(m != addrMap.end());
244    m->second->access(pkt);
245}
246
247void
248PhysicalMemory::functionalAccess(PacketPtr pkt)
249{
250    assert(pkt->isRequest());
251    Addr addr = pkt->getAddr();
252    const auto& m = addrMap.find(addr);
253    assert(m != addrMap.end());
254    m->second->functionalAccess(pkt);
255}
256
257void
258PhysicalMemory::serialize(ostream& os)
259{
260    // serialize all the locked addresses and their context ids
261    vector<Addr> lal_addr;
262    vector<int> lal_cid;
263
264    for (auto& m : memories) {
265        const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
266        for (const auto& l : locked_addrs) {
267            lal_addr.push_back(l.addr);
268            lal_cid.push_back(l.contextId);
269        }
270    }
271
272    arrayParamOut(os, "lal_addr", lal_addr);
273    arrayParamOut(os, "lal_cid", lal_cid);
274
275    // serialize the backing stores
276    unsigned int nbr_of_stores = backingStore.size();
277    SERIALIZE_SCALAR(nbr_of_stores);
278
279    unsigned int store_id = 0;
280    // store each backing store memory segment in a file
281    for (auto& s : backingStore) {
282        nameOut(os, csprintf("%s.store%d", name(), store_id));
283        serializeStore(os, store_id++, s.first, s.second);
284    }
285}
286
287void
288PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
289                               AddrRange range, uint8_t* pmem)
290{
291    // we cannot use the address range for the name as the
292    // memories that are not part of the address map can overlap
293    string filename = name() + ".store" + to_string(store_id) + ".pmem";
294    long range_size = range.size();
295
296    DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
297            filename, range_size);
298
299    SERIALIZE_SCALAR(store_id);
300    SERIALIZE_SCALAR(filename);
301    SERIALIZE_SCALAR(range_size);
302
303    // write memory file
304    string filepath = Checkpoint::dir() + "/" + filename.c_str();
305    gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
306    if (compressed_mem == NULL)
307        fatal("Can't open physical memory checkpoint file '%s'\n",
308              filename);
309
310    uint64_t pass_size = 0;
311
312    // gzwrite fails if (int)len < 0 (gzwrite returns int)
313    for (uint64_t written = 0; written < range.size();
314         written += pass_size) {
315        pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
316            (uint64_t)INT_MAX : (range.size() - written);
317
318        if (gzwrite(compressed_mem, pmem + written,
319                    (unsigned int) pass_size) != (int) pass_size) {
320            fatal("Write failed on physical memory checkpoint file '%s'\n",
321                  filename);
322        }
323    }
324
325    // close the compressed stream and check that the exit status
326    // is zero
327    if (gzclose(compressed_mem))
328        fatal("Close failed on physical memory checkpoint file '%s'\n",
329              filename);
330
331}
332
333void
334PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
335{
336    // unserialize the locked addresses and map them to the
337    // appropriate memory controller
338    vector<Addr> lal_addr;
339    vector<int> lal_cid;
340    arrayParamIn(cp, section, "lal_addr", lal_addr);
341    arrayParamIn(cp, section, "lal_cid", lal_cid);
342    for(size_t i = 0; i < lal_addr.size(); ++i) {
343        const auto& m = addrMap.find(lal_addr[i]);
344        m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
345    }
346
347    // unserialize the backing stores
348    unsigned int nbr_of_stores;
349    UNSERIALIZE_SCALAR(nbr_of_stores);
350
351    for (unsigned int i = 0; i < nbr_of_stores; ++i) {
352        unserializeStore(cp, csprintf("%s.store%d", section, i));
353    }
354
355}
356
357void
358PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
359{
360    const uint32_t chunk_size = 16384;
361
362    unsigned int store_id;
363    UNSERIALIZE_SCALAR(store_id);
364
365    string filename;
366    UNSERIALIZE_SCALAR(filename);
367    string filepath = cp->cptDir + "/" + filename;
368
369    // mmap memoryfile
370    gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
371    if (compressed_mem == NULL)
372        fatal("Can't open physical memory checkpoint file '%s'", filename);
373
374    // we've already got the actual backing store mapped
375    uint8_t* pmem = backingStore[store_id].second;
376    AddrRange range = backingStore[store_id].first;
377
378    long range_size;
379    UNSERIALIZE_SCALAR(range_size);
380
381    DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
382            filename, range_size);
383
384    if (range_size != range.size())
385        fatal("Memory range size has changed! Saw %lld, expected %lld\n",
386              range_size, range.size());
387
388    uint64_t curr_size = 0;
389    long* temp_page = new long[chunk_size];
390    long* pmem_current;
391    uint32_t bytes_read;
392    while (curr_size < range.size()) {
393        bytes_read = gzread(compressed_mem, temp_page, chunk_size);
394        if (bytes_read == 0)
395            break;
396
397        assert(bytes_read % sizeof(long) == 0);
398
399        for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
400            // Only copy bytes that are non-zero, so we don't give
401            // the VM system hell
402            if (*(temp_page + x) != 0) {
403                pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
404                *pmem_current = *(temp_page + x);
405            }
406        }
407        curr_size += bytes_read;
408    }
409
410    delete[] temp_page;
411
412    if (gzclose(compressed_mem))
413        fatal("Close failed on physical memory checkpoint file '%s'\n",
414              filename);
415}
416