physical.cc revision 10699
14484Sbinkertn@umich.edu/*
24484Sbinkertn@umich.edu * Copyright (c) 2012, 2014 ARM Limited
34484Sbinkertn@umich.edu * All rights reserved
44484Sbinkertn@umich.edu *
54484Sbinkertn@umich.edu * The license below extends only to copyright in the software and shall
64484Sbinkertn@umich.edu * not be construed as granting a license to any other intellectual
74484Sbinkertn@umich.edu * property including but not limited to intellectual property relating
84484Sbinkertn@umich.edu * to a hardware implementation of the functionality of the software
94484Sbinkertn@umich.edu * licensed hereunder.  You may use the software subject to the license
104484Sbinkertn@umich.edu * terms below provided that you ensure that this notice is replicated
114484Sbinkertn@umich.edu * unmodified and in its entirety in all distributions of the software,
124484Sbinkertn@umich.edu * modified or unmodified, in source code or in binary form.
134484Sbinkertn@umich.edu *
144484Sbinkertn@umich.edu * Redistribution and use in source and binary forms, with or without
154484Sbinkertn@umich.edu * modification, are permitted provided that the following conditions are
164484Sbinkertn@umich.edu * met: redistributions of source code must retain the above copyright
174484Sbinkertn@umich.edu * notice, this list of conditions and the following disclaimer;
184484Sbinkertn@umich.edu * redistributions in binary form must reproduce the above copyright
194484Sbinkertn@umich.edu * notice, this list of conditions and the following disclaimer in the
204484Sbinkertn@umich.edu * documentation and/or other materials provided with the distribution;
214484Sbinkertn@umich.edu * neither the name of the copyright holders nor the names of its
224484Sbinkertn@umich.edu * contributors may be used to endorse or promote products derived from
234484Sbinkertn@umich.edu * this software without specific prior written permission.
244484Sbinkertn@umich.edu *
254484Sbinkertn@umich.edu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
264484Sbinkertn@umich.edu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
274484Sbinkertn@umich.edu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
284484Sbinkertn@umich.edu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
294484Sbinkertn@umich.edu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
304484Sbinkertn@umich.edu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
314494Ssaidi@eecs.umich.edu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
324484Sbinkertn@umich.edu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
336121Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
344484Sbinkertn@umich.edu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
358946Sandreas.hansson@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
368946Sandreas.hansson@arm.com *
374484Sbinkertn@umich.edu * Authors: Andreas Hansson
384484Sbinkertn@umich.edu */
394484Sbinkertn@umich.edu
404781Snate@binkert.org#include <sys/mman.h>
414484Sbinkertn@umich.edu#include <sys/types.h>
424484Sbinkertn@umich.edu#include <sys/user.h>
434484Sbinkertn@umich.edu#include <fcntl.h>
444484Sbinkertn@umich.edu#include <unistd.h>
458349Sgblack@eecs.umich.edu#include <zlib.h>
468349Sgblack@eecs.umich.edu
474484Sbinkertn@umich.edu#include <cerrno>
484484Sbinkertn@umich.edu#include <climits>
494484Sbinkertn@umich.edu#include <cstdio>
504484Sbinkertn@umich.edu#include <iostream>
514484Sbinkertn@umich.edu#include <string>
524484Sbinkertn@umich.edu
534484Sbinkertn@umich.edu#include "base/trace.hh"
544484Sbinkertn@umich.edu#include "debug/AddrRanges.hh"
554484Sbinkertn@umich.edu#include "debug/Checkpoint.hh"
564484Sbinkertn@umich.edu#include "mem/abstract_mem.hh"
574484Sbinkertn@umich.edu#include "mem/physical.hh"
584484Sbinkertn@umich.edu
594484Sbinkertn@umich.eduusing namespace std;
604484Sbinkertn@umich.edu
614484Sbinkertn@umich.eduPhysicalMemory::PhysicalMemory(const string& _name,
624484Sbinkertn@umich.edu                               const vector<AbstractMemory*>& _memories) :
634484Sbinkertn@umich.edu    _name(_name), rangeCache(addrMap.end()), size(0)
644484Sbinkertn@umich.edu{
654484Sbinkertn@umich.edu    // add the memories from the system to the address map as
664484Sbinkertn@umich.edu    // appropriate
674484Sbinkertn@umich.edu    for (const auto& m : _memories) {
684484Sbinkertn@umich.edu        // only add the memory if it is part of the global address map
694484Sbinkertn@umich.edu        if (m->isInAddrMap()) {
704484Sbinkertn@umich.edu            memories.push_back(m);
714484Sbinkertn@umich.edu
724484Sbinkertn@umich.edu            // calculate the total size once and for all
734484Sbinkertn@umich.edu            size += m->size();
744484Sbinkertn@umich.edu
754484Sbinkertn@umich.edu            // add the range to our interval tree and make sure it does not
764484Sbinkertn@umich.edu            // intersect an existing range
774484Sbinkertn@umich.edu            fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
784484Sbinkertn@umich.edu                     "Memory address range for %s is overlapping\n",
794484Sbinkertn@umich.edu                     m->name());
804484Sbinkertn@umich.edu        } else {
814484Sbinkertn@umich.edu            // this type of memory is used e.g. as reference memory by
824484Sbinkertn@umich.edu            // Ruby, and they also needs a backing store, but should
834484Sbinkertn@umich.edu            // not be part of the global address map
844484Sbinkertn@umich.edu            DPRINTF(AddrRanges,
854484Sbinkertn@umich.edu                    "Skipping memory %s that is not in global address map\n",
864484Sbinkertn@umich.edu                    m->name());
874484Sbinkertn@umich.edu
884484Sbinkertn@umich.edu            // sanity check
894484Sbinkertn@umich.edu            fatal_if(m->getAddrRange().interleaved(),
904484Sbinkertn@umich.edu                     "Memory %s that is not in the global address map cannot "
914484Sbinkertn@umich.edu                     "be interleaved\n", m->name());
924484Sbinkertn@umich.edu
934484Sbinkertn@umich.edu            // simply do it independently, also note that this kind of
946121Snate@binkert.org            // memories are allowed to overlap in the logic address
956121Snate@binkert.org            // map
9611294Sandreas.hansson@arm.com            vector<AbstractMemory*> unmapped_mems{m};
9711294Sandreas.hansson@arm.com            createBackingStore(m->getAddrRange(), unmapped_mems);
9811294Sandreas.hansson@arm.com        }
9911294Sandreas.hansson@arm.com    }
1008737Skoansin.tan@gmail.com
1018737Skoansin.tan@gmail.com    // iterate over the increasing addresses and chunks of contiguous
1029388Sandreas.hansson@arm.com    // space to be mapped to backing store, create it and inform the
1039388Sandreas.hansson@arm.com    // memories
1049388Sandreas.hansson@arm.com    vector<AddrRange> intlv_ranges;
1059388Sandreas.hansson@arm.com    vector<AbstractMemory*> curr_memories;
1069388Sandreas.hansson@arm.com    for (const auto& r : addrMap) {
1075765Snate@binkert.org        // simply skip past all memories that are null and hence do
1085397Ssaidi@eecs.umich.edu        // not need any backing store
1095274Ssaidi@eecs.umich.edu        if (!r.second->isNull()) {
1104494Ssaidi@eecs.umich.edu            // if the range is interleaved then save it for now
1114504Ssaidi@eecs.umich.edu            if (r.first.interleaved()) {
1124494Ssaidi@eecs.umich.edu                // if we already got interleaved ranges that are not
1134494Ssaidi@eecs.umich.edu                // part of the same range, then first do a merge
1144496Ssaidi@eecs.umich.edu                // before we add the new one
1154504Ssaidi@eecs.umich.edu                if (!intlv_ranges.empty() &&
1164504Ssaidi@eecs.umich.edu                    !intlv_ranges.back().mergesWith(r.first)) {
1174500Sbinkertn@umich.edu                    AddrRange merged_range(intlv_ranges);
1184500Sbinkertn@umich.edu                    createBackingStore(merged_range, curr_memories);
1194496Ssaidi@eecs.umich.edu                    intlv_ranges.clear();
1204496Ssaidi@eecs.umich.edu                    curr_memories.clear();
1217739Sgblack@eecs.umich.edu                }
1224487Sstever@eecs.umich.edu                intlv_ranges.push_back(r.first);
1234484Sbinkertn@umich.edu                curr_memories.push_back(r.second);
1244484Sbinkertn@umich.edu            } else {
1254484Sbinkertn@umich.edu                vector<AbstractMemory*> single_memory{r.second};
1264484Sbinkertn@umich.edu                createBackingStore(r.first, single_memory);
1274484Sbinkertn@umich.edu            }
1284484Sbinkertn@umich.edu        }
1295601Snate@binkert.org    }
1305601Snate@binkert.org
1315601Snate@binkert.org    // if there is still interleaved ranges waiting to be merged, go
1325601Snate@binkert.org    // ahead and do it
1334484Sbinkertn@umich.edu    if (!intlv_ranges.empty()) {
1346121Snate@binkert.org        AddrRange merged_range(intlv_ranges);
1356121Snate@binkert.org        createBackingStore(merged_range, curr_memories);
1366121Snate@binkert.org    }
1374494Ssaidi@eecs.umich.edu}
138
139void
140PhysicalMemory::createBackingStore(AddrRange range,
141                                   const vector<AbstractMemory*>& _memories)
142{
143    panic_if(range.interleaved(),
144             "Cannot create backing store for interleaved range %s\n",
145              range.to_string());
146
147    // perform the actual mmap
148    DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
149            range.to_string(), range.size());
150    int map_flags = MAP_ANON | MAP_PRIVATE;
151    uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
152                                    PROT_READ | PROT_WRITE,
153                                    map_flags, -1, 0);
154
155    if (pmem == (uint8_t*) MAP_FAILED) {
156        perror("mmap");
157        fatal("Could not mmap %d bytes for range %s!\n", range.size(),
158              range.to_string());
159    }
160
161    // remember this backing store so we can checkpoint it and unmap
162    // it appropriately
163    backingStore.push_back(make_pair(range, pmem));
164
165    // point the memories to their backing store
166    for (const auto& m : _memories) {
167        DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
168                m->name());
169        m->setBackingStore(pmem);
170    }
171}
172
173PhysicalMemory::~PhysicalMemory()
174{
175    // unmap the backing store
176    for (auto& s : backingStore)
177        munmap((char*)s.second, s.first.size());
178}
179
180bool
181PhysicalMemory::isMemAddr(Addr addr) const
182{
183    // see if the address is within the last matched range
184    if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
185        return true;
186    } else {
187        // lookup in the interval tree
188        const auto& r = addrMap.find(addr);
189        if (r == addrMap.end()) {
190            // not in the cache, and not in the tree
191            return false;
192        }
193        // the range is in the tree, update the cache
194        rangeCache = r;
195        return true;
196    }
197}
198
199AddrRangeList
200PhysicalMemory::getConfAddrRanges() const
201{
202    // this could be done once in the constructor, but since it is unlikely to
203    // be called more than once the iteration should not be a problem
204    AddrRangeList ranges;
205    vector<AddrRange> intlv_ranges;
206    for (const auto& r : addrMap) {
207        if (r.second->isConfReported()) {
208            // if the range is interleaved then save it for now
209            if (r.first.interleaved()) {
210                // if we already got interleaved ranges that are not
211                // part of the same range, then first do a merge
212                // before we add the new one
213                if (!intlv_ranges.empty() &&
214                    !intlv_ranges.back().mergesWith(r.first)) {
215                    ranges.push_back(AddrRange(intlv_ranges));
216                    intlv_ranges.clear();
217                }
218                intlv_ranges.push_back(r.first);
219            } else {
220                // keep the current range
221                ranges.push_back(r.first);
222            }
223        }
224    }
225
226    // if there is still interleaved ranges waiting to be merged,
227    // go ahead and do it
228    if (!intlv_ranges.empty()) {
229        ranges.push_back(AddrRange(intlv_ranges));
230    }
231
232    return ranges;
233}
234
235void
236PhysicalMemory::access(PacketPtr pkt)
237{
238    assert(pkt->isRequest());
239    Addr addr = pkt->getAddr();
240    if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
241        rangeCache->second->access(pkt);
242    } else {
243        // do not update the cache here, as we typically call
244        // isMemAddr before calling access
245        const auto& m = addrMap.find(addr);
246        assert(m != addrMap.end());
247        m->second->access(pkt);
248    }
249}
250
251void
252PhysicalMemory::functionalAccess(PacketPtr pkt)
253{
254    assert(pkt->isRequest());
255    Addr addr = pkt->getAddr();
256    if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
257        rangeCache->second->functionalAccess(pkt);
258    } else {
259        // do not update the cache here, as we typically call
260        // isMemAddr before calling functionalAccess
261        const auto& m = addrMap.find(addr);
262        assert(m != addrMap.end());
263        m->second->functionalAccess(pkt);
264    }
265}
266
267void
268PhysicalMemory::serialize(ostream& os)
269{
270    // serialize all the locked addresses and their context ids
271    vector<Addr> lal_addr;
272    vector<int> lal_cid;
273
274    for (auto& m : memories) {
275        const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
276        for (const auto& l : locked_addrs) {
277            lal_addr.push_back(l.addr);
278            lal_cid.push_back(l.contextId);
279        }
280    }
281
282    arrayParamOut(os, "lal_addr", lal_addr);
283    arrayParamOut(os, "lal_cid", lal_cid);
284
285    // serialize the backing stores
286    unsigned int nbr_of_stores = backingStore.size();
287    SERIALIZE_SCALAR(nbr_of_stores);
288
289    unsigned int store_id = 0;
290    // store each backing store memory segment in a file
291    for (auto& s : backingStore) {
292        nameOut(os, csprintf("%s.store%d", name(), store_id));
293        serializeStore(os, store_id++, s.first, s.second);
294    }
295}
296
297void
298PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
299                               AddrRange range, uint8_t* pmem)
300{
301    // we cannot use the address range for the name as the
302    // memories that are not part of the address map can overlap
303    string filename = name() + ".store" + to_string(store_id) + ".pmem";
304    long range_size = range.size();
305
306    DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
307            filename, range_size);
308
309    SERIALIZE_SCALAR(store_id);
310    SERIALIZE_SCALAR(filename);
311    SERIALIZE_SCALAR(range_size);
312
313    // write memory file
314    string filepath = Checkpoint::dir() + "/" + filename.c_str();
315    gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
316    if (compressed_mem == NULL)
317        fatal("Can't open physical memory checkpoint file '%s'\n",
318              filename);
319
320    uint64_t pass_size = 0;
321
322    // gzwrite fails if (int)len < 0 (gzwrite returns int)
323    for (uint64_t written = 0; written < range.size();
324         written += pass_size) {
325        pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
326            (uint64_t)INT_MAX : (range.size() - written);
327
328        if (gzwrite(compressed_mem, pmem + written,
329                    (unsigned int) pass_size) != (int) pass_size) {
330            fatal("Write failed on physical memory checkpoint file '%s'\n",
331                  filename);
332        }
333    }
334
335    // close the compressed stream and check that the exit status
336    // is zero
337    if (gzclose(compressed_mem))
338        fatal("Close failed on physical memory checkpoint file '%s'\n",
339              filename);
340
341}
342
343void
344PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
345{
346    // unserialize the locked addresses and map them to the
347    // appropriate memory controller
348    vector<Addr> lal_addr;
349    vector<int> lal_cid;
350    arrayParamIn(cp, section, "lal_addr", lal_addr);
351    arrayParamIn(cp, section, "lal_cid", lal_cid);
352    for(size_t i = 0; i < lal_addr.size(); ++i) {
353        const auto& m = addrMap.find(lal_addr[i]);
354        m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
355    }
356
357    // unserialize the backing stores
358    unsigned int nbr_of_stores;
359    UNSERIALIZE_SCALAR(nbr_of_stores);
360
361    for (unsigned int i = 0; i < nbr_of_stores; ++i) {
362        unserializeStore(cp, csprintf("%s.store%d", section, i));
363    }
364
365}
366
367void
368PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
369{
370    const uint32_t chunk_size = 16384;
371
372    unsigned int store_id;
373    UNSERIALIZE_SCALAR(store_id);
374
375    string filename;
376    UNSERIALIZE_SCALAR(filename);
377    string filepath = cp->cptDir + "/" + filename;
378
379    // mmap memoryfile
380    gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
381    if (compressed_mem == NULL)
382        fatal("Can't open physical memory checkpoint file '%s'", filename);
383
384    // we've already got the actual backing store mapped
385    uint8_t* pmem = backingStore[store_id].second;
386    AddrRange range = backingStore[store_id].first;
387
388    long range_size;
389    UNSERIALIZE_SCALAR(range_size);
390
391    DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
392            filename, range_size);
393
394    if (range_size != range.size())
395        fatal("Memory range size has changed! Saw %lld, expected %lld\n",
396              range_size, range.size());
397
398    uint64_t curr_size = 0;
399    long* temp_page = new long[chunk_size];
400    long* pmem_current;
401    uint32_t bytes_read;
402    while (curr_size < range.size()) {
403        bytes_read = gzread(compressed_mem, temp_page, chunk_size);
404        if (bytes_read == 0)
405            break;
406
407        assert(bytes_read % sizeof(long) == 0);
408
409        for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
410            // Only copy bytes that are non-zero, so we don't give
411            // the VM system hell
412            if (*(temp_page + x) != 0) {
413                pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
414                *pmem_current = *(temp_page + x);
415            }
416        }
417        curr_size += bytes_read;
418    }
419
420    delete[] temp_page;
421
422    if (gzclose(compressed_mem))
423        fatal("Close failed on physical memory checkpoint file '%s'\n",
424              filename);
425}
426