1/* 2 * Copyright (c) 2012, 2014, 2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Hansson 38 */ 39 40#include "mem/physical.hh" 41 42#include <fcntl.h> 43#include <sys/mman.h> 44#include <sys/types.h> 45#include <sys/user.h> 46#include <unistd.h> 47#include <zlib.h> 48 49#include <cerrno> 50#include <climits> 51#include <cstdio> 52#include <iostream> 53#include <string> 54 55#include "base/trace.hh" 56#include "debug/AddrRanges.hh" 57#include "debug/Checkpoint.hh" 58#include "mem/abstract_mem.hh" 59 60/** 61 * On Linux, MAP_NORESERVE allow us to simulate a very large memory 62 * without committing to actually providing the swap space on the 63 * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist, 64 * so simply make it 0. 65 */ 66#if defined(__APPLE__) || defined(__FreeBSD__) 67#ifndef MAP_NORESERVE 68#define MAP_NORESERVE 0 69#endif 70#endif 71 72using namespace std; 73 74PhysicalMemory::PhysicalMemory(const string& _name, 75 const vector<AbstractMemory*>& _memories, 76 bool mmap_using_noreserve) : 77 _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve) 78{ 79 if (mmap_using_noreserve) 80 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n"); 81 82 // add the memories from the system to the address map as 83 // appropriate 84 for (const auto& m : _memories) { 85 // only add the memory if it is part of the global address map 86 if (m->isInAddrMap()) { 87 memories.push_back(m); 88 89 // calculate the total size once and for all 90 size += m->size(); 91 92 // add the range to our interval tree and make sure it does not 93 // intersect an existing range 94 fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(), 95 "Memory address range for %s is overlapping\n", 96 m->name()); 97 } else { 98 // this type of memory is used e.g. as reference memory by 99 // Ruby, and they also needs a backing store, but should 100 // not be part of the global address map 101 DPRINTF(AddrRanges, 102 "Skipping memory %s that is not in global address map\n", 103 m->name()); 104 105 // sanity check 106 fatal_if(m->getAddrRange().interleaved(), 107 "Memory %s that is not in the global address map cannot " 108 "be interleaved\n", m->name()); 109 110 // simply do it independently, also note that this kind of 111 // memories are allowed to overlap in the logic address 112 // map 113 vector<AbstractMemory*> unmapped_mems{m}; 114 createBackingStore(m->getAddrRange(), unmapped_mems, 115 m->isConfReported(), m->isInAddrMap(), 116 m->isKvmMap()); 117 } 118 } 119 120 // iterate over the increasing addresses and chunks of contiguous 121 // space to be mapped to backing store, create it and inform the 122 // memories 123 vector<AddrRange> intlv_ranges; 124 vector<AbstractMemory*> curr_memories; 125 for (const auto& r : addrMap) { 126 // simply skip past all memories that are null and hence do 127 // not need any backing store 128 if (!r.second->isNull()) { 129 // if the range is interleaved then save it for now 130 if (r.first.interleaved()) { 131 // if we already got interleaved ranges that are not 132 // part of the same range, then first do a merge 133 // before we add the new one 134 if (!intlv_ranges.empty() && 135 !intlv_ranges.back().mergesWith(r.first)) { 136 AddrRange merged_range(intlv_ranges); 137 138 AbstractMemory *f = curr_memories.front(); 139 for (const auto& c : curr_memories) 140 if (f->isConfReported() != c->isConfReported() || 141 f->isInAddrMap() != c->isInAddrMap() || 142 f->isKvmMap() != c->isKvmMap()) 143 fatal("Inconsistent flags in an interleaved " 144 "range\n"); 145 146 createBackingStore(merged_range, curr_memories, 147 f->isConfReported(), f->isInAddrMap(), 148 f->isKvmMap()); 149 150 intlv_ranges.clear(); 151 curr_memories.clear(); 152 } 153 intlv_ranges.push_back(r.first); 154 curr_memories.push_back(r.second); 155 } else { 156 vector<AbstractMemory*> single_memory{r.second}; 157 createBackingStore(r.first, single_memory, 158 r.second->isConfReported(), 159 r.second->isInAddrMap(), 160 r.second->isKvmMap()); 161 } 162 } 163 } 164 165 // if there is still interleaved ranges waiting to be merged, go 166 // ahead and do it 167 if (!intlv_ranges.empty()) { 168 AddrRange merged_range(intlv_ranges); 169 170 AbstractMemory *f = curr_memories.front(); 171 for (const auto& c : curr_memories) 172 if (f->isConfReported() != c->isConfReported() || 173 f->isInAddrMap() != c->isInAddrMap() || 174 f->isKvmMap() != c->isKvmMap()) 175 fatal("Inconsistent flags in an interleaved " 176 "range\n"); 177 178 createBackingStore(merged_range, curr_memories, 179 f->isConfReported(), f->isInAddrMap(), 180 f->isKvmMap()); 181 } 182} 183 184void 185PhysicalMemory::createBackingStore(AddrRange range, 186 const vector<AbstractMemory*>& _memories, 187 bool conf_table_reported, 188 bool in_addr_map, bool kvm_map) 189{ 190 panic_if(range.interleaved(), 191 "Cannot create backing store for interleaved range %s\n", 192 range.to_string()); 193 194 // perform the actual mmap 195 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n", 196 range.to_string(), range.size()); 197 int map_flags = MAP_ANON | MAP_PRIVATE; 198 199 // to be able to simulate very large memories, the user can opt to 200 // pass noreserve to mmap 201 if (mmapUsingNoReserve) { 202 map_flags |= MAP_NORESERVE; 203 } 204 205 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(), 206 PROT_READ | PROT_WRITE, 207 map_flags, -1, 0); 208 209 if (pmem == (uint8_t*) MAP_FAILED) { 210 perror("mmap"); 211 fatal("Could not mmap %d bytes for range %s!\n", range.size(), 212 range.to_string()); 213 } 214 215 // remember this backing store so we can checkpoint it and unmap 216 // it appropriately 217 backingStore.emplace_back(range, pmem, 218 conf_table_reported, in_addr_map, kvm_map); 219 220 // point the memories to their backing store 221 for (const auto& m : _memories) { 222 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n", 223 m->name()); 224 m->setBackingStore(pmem); 225 } 226} 227 228PhysicalMemory::~PhysicalMemory() 229{ 230 // unmap the backing store 231 for (auto& s : backingStore) 232 munmap((char*)s.pmem, s.range.size()); 233} 234 235bool 236PhysicalMemory::isMemAddr(Addr addr) const 237{ 238 return addrMap.contains(addr) != addrMap.end(); 239} 240 241AddrRangeList 242PhysicalMemory::getConfAddrRanges() const 243{ 244 // this could be done once in the constructor, but since it is unlikely to 245 // be called more than once the iteration should not be a problem 246 AddrRangeList ranges; 247 vector<AddrRange> intlv_ranges; 248 for (const auto& r : addrMap) { 249 if (r.second->isConfReported()) { 250 // if the range is interleaved then save it for now 251 if (r.first.interleaved()) { 252 // if we already got interleaved ranges that are not 253 // part of the same range, then first do a merge 254 // before we add the new one 255 if (!intlv_ranges.empty() && 256 !intlv_ranges.back().mergesWith(r.first)) { 257 ranges.push_back(AddrRange(intlv_ranges)); 258 intlv_ranges.clear(); 259 } 260 intlv_ranges.push_back(r.first); 261 } else { 262 // keep the current range 263 ranges.push_back(r.first); 264 } 265 } 266 } 267 268 // if there is still interleaved ranges waiting to be merged, 269 // go ahead and do it 270 if (!intlv_ranges.empty()) { 271 ranges.push_back(AddrRange(intlv_ranges)); 272 } 273 274 return ranges; 275} 276 277void 278PhysicalMemory::access(PacketPtr pkt) 279{ 280 assert(pkt->isRequest()); 281 const auto& m = addrMap.contains(pkt->getAddrRange()); 282 assert(m != addrMap.end()); 283 m->second->access(pkt); 284} 285 286void 287PhysicalMemory::functionalAccess(PacketPtr pkt) 288{ 289 assert(pkt->isRequest()); 290 const auto& m = addrMap.contains(pkt->getAddrRange()); 291 assert(m != addrMap.end()); 292 m->second->functionalAccess(pkt); 293} 294 295void 296PhysicalMemory::serialize(CheckpointOut &cp) const 297{ 298 // serialize all the locked addresses and their context ids 299 vector<Addr> lal_addr; 300 vector<ContextID> lal_cid; 301 302 for (auto& m : memories) { 303 const list<LockedAddr>& locked_addrs = m->getLockedAddrList(); 304 for (const auto& l : locked_addrs) { 305 lal_addr.push_back(l.addr); 306 lal_cid.push_back(l.contextId); 307 } 308 } 309 310 SERIALIZE_CONTAINER(lal_addr); 311 SERIALIZE_CONTAINER(lal_cid); 312 313 // serialize the backing stores 314 unsigned int nbr_of_stores = backingStore.size(); 315 SERIALIZE_SCALAR(nbr_of_stores); 316 317 unsigned int store_id = 0; 318 // store each backing store memory segment in a file 319 for (auto& s : backingStore) { 320 ScopedCheckpointSection sec(cp, csprintf("store%d", store_id)); 321 serializeStore(cp, store_id++, s.range, s.pmem); 322 } 323} 324 325void 326PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id, 327 AddrRange range, uint8_t* pmem) const 328{ 329 // we cannot use the address range for the name as the 330 // memories that are not part of the address map can overlap 331 string filename = name() + ".store" + to_string(store_id) + ".pmem"; 332 long range_size = range.size(); 333 334 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n", 335 filename, range_size); 336 337 SERIALIZE_SCALAR(store_id); 338 SERIALIZE_SCALAR(filename); 339 SERIALIZE_SCALAR(range_size); 340 341 // write memory file 342 string filepath = CheckpointIn::dir() + "/" + filename.c_str(); 343 gzFile compressed_mem = gzopen(filepath.c_str(), "wb"); 344 if (compressed_mem == NULL) 345 fatal("Can't open physical memory checkpoint file '%s'\n", 346 filename); 347 348 uint64_t pass_size = 0; 349 350 // gzwrite fails if (int)len < 0 (gzwrite returns int) 351 for (uint64_t written = 0; written < range.size(); 352 written += pass_size) { 353 pass_size = (uint64_t)INT_MAX < (range.size() - written) ? 354 (uint64_t)INT_MAX : (range.size() - written); 355 356 if (gzwrite(compressed_mem, pmem + written, 357 (unsigned int) pass_size) != (int) pass_size) { 358 fatal("Write failed on physical memory checkpoint file '%s'\n", 359 filename); 360 } 361 } 362 363 // close the compressed stream and check that the exit status 364 // is zero 365 if (gzclose(compressed_mem)) 366 fatal("Close failed on physical memory checkpoint file '%s'\n", 367 filename); 368 369} 370 371void 372PhysicalMemory::unserialize(CheckpointIn &cp) 373{ 374 // unserialize the locked addresses and map them to the 375 // appropriate memory controller 376 vector<Addr> lal_addr; 377 vector<ContextID> lal_cid; 378 UNSERIALIZE_CONTAINER(lal_addr); 379 UNSERIALIZE_CONTAINER(lal_cid); 380 for (size_t i = 0; i < lal_addr.size(); ++i) { 381 const auto& m = addrMap.contains(lal_addr[i]); 382 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i])); 383 } 384 385 // unserialize the backing stores 386 unsigned int nbr_of_stores; 387 UNSERIALIZE_SCALAR(nbr_of_stores); 388 389 for (unsigned int i = 0; i < nbr_of_stores; ++i) { 390 ScopedCheckpointSection sec(cp, csprintf("store%d", i)); 391 unserializeStore(cp); 392 } 393 394} 395 396void 397PhysicalMemory::unserializeStore(CheckpointIn &cp) 398{ 399 const uint32_t chunk_size = 16384; 400 401 unsigned int store_id; 402 UNSERIALIZE_SCALAR(store_id); 403 404 string filename; 405 UNSERIALIZE_SCALAR(filename); 406 string filepath = cp.cptDir + "/" + filename; 407 408 // mmap memoryfile 409 gzFile compressed_mem = gzopen(filepath.c_str(), "rb"); 410 if (compressed_mem == NULL) 411 fatal("Can't open physical memory checkpoint file '%s'", filename); 412 413 // we've already got the actual backing store mapped 414 uint8_t* pmem = backingStore[store_id].pmem; 415 AddrRange range = backingStore[store_id].range; 416 417 long range_size; 418 UNSERIALIZE_SCALAR(range_size); 419 420 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n", 421 filename, range_size); 422 423 if (range_size != range.size()) 424 fatal("Memory range size has changed! Saw %lld, expected %lld\n", 425 range_size, range.size()); 426 427 uint64_t curr_size = 0; 428 long* temp_page = new long[chunk_size]; 429 long* pmem_current; 430 uint32_t bytes_read; 431 while (curr_size < range.size()) { 432 bytes_read = gzread(compressed_mem, temp_page, chunk_size); 433 if (bytes_read == 0) 434 break; 435 436 assert(bytes_read % sizeof(long) == 0); 437 438 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) { 439 // Only copy bytes that are non-zero, so we don't give 440 // the VM system hell 441 if (*(temp_page + x) != 0) { 442 pmem_current = (long*)(pmem + curr_size + x * sizeof(long)); 443 *pmem_current = *(temp_page + x); 444 } 445 } 446 curr_size += bytes_read; 447 } 448 449 delete[] temp_page; 450 451 if (gzclose(compressed_mem)) 452 fatal("Close failed on physical memory checkpoint file '%s'\n", 453 filename); 454} 455