physical.cc revision 10405
1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Hansson 38 */ 39 40#include <sys/mman.h> 41#include <sys/types.h> 42#include <sys/user.h> 43#include <fcntl.h> 44#include <unistd.h> 45#include <zlib.h> 46 47#include <cerrno> 48#include <climits> 49#include <cstdio> 50#include <iostream> 51#include <string> 52 53#include "base/trace.hh" 54#include "debug/AddrRanges.hh" 55#include "debug/Checkpoint.hh" 56#include "mem/abstract_mem.hh" 57#include "mem/physical.hh" 58 59using namespace std; 60 61PhysicalMemory::PhysicalMemory(const string& _name, 62 const vector<AbstractMemory*>& _memories) : 63 _name(_name), size(0) 64{ 65 // add the memories from the system to the address map as 66 // appropriate 67 for (vector<AbstractMemory*>::const_iterator m = _memories.begin(); 68 m != _memories.end(); ++m) { 69 // only add the memory if it is part of the global address map 70 if ((*m)->isInAddrMap()) { 71 memories.push_back(*m); 72 73 // calculate the total size once and for all 74 size += (*m)->size(); 75 76 // add the range to our interval tree and make sure it does not 77 // intersect an existing range 78 if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end()) 79 fatal("Memory address range for %s is overlapping\n", 80 (*m)->name()); 81 } else { 82 DPRINTF(AddrRanges, 83 "Skipping memory %s that is not in global address map\n", 84 (*m)->name()); 85 // this type of memory is used e.g. as reference memory by 86 // Ruby, and they also needs a backing store, but should 87 // not be part of the global address map 88 89 // simply do it independently, also note that this kind of 90 // memories are allowed to overlap in the logic address 91 // map 92 vector<AbstractMemory*> unmapped_mems; 93 unmapped_mems.push_back(*m); 94 createBackingStore((*m)->getAddrRange(), unmapped_mems); 95 } 96 } 97 98 // iterate over the increasing addresses and chunks of contiguous 99 // space to be mapped to backing store, create it and inform the 100 // memories 101 vector<AddrRange> intlv_ranges; 102 vector<AbstractMemory*> curr_memories; 103 for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin(); 104 r != addrMap.end(); ++r) { 105 // simply skip past all memories that are null and hence do 106 // not need any backing store 107 if (!r->second->isNull()) { 108 // if the range is interleaved then save it for now 109 if (r->first.interleaved()) { 110 // if we already got interleaved ranges that are not 111 // part of the same range, then first do a merge 112 // before we add the new one 113 if (!intlv_ranges.empty() && 114 !intlv_ranges.back().mergesWith(r->first)) { 115 AddrRange merged_range(intlv_ranges); 116 createBackingStore(merged_range, curr_memories); 117 intlv_ranges.clear(); 118 curr_memories.clear(); 119 } 120 intlv_ranges.push_back(r->first); 121 curr_memories.push_back(r->second); 122 } else { 123 vector<AbstractMemory*> single_memory; 124 single_memory.push_back(r->second); 125 createBackingStore(r->first, single_memory); 126 } 127 } 128 } 129 130 // if there is still interleaved ranges waiting to be merged, go 131 // ahead and do it 132 if (!intlv_ranges.empty()) { 133 AddrRange merged_range(intlv_ranges); 134 createBackingStore(merged_range, curr_memories); 135 } 136} 137 138void 139PhysicalMemory::createBackingStore(AddrRange range, 140 const vector<AbstractMemory*>& _memories) 141{ 142 if (range.interleaved()) 143 panic("Cannot create backing store for interleaved range %s\n", 144 range.to_string()); 145 146 // perform the actual mmap 147 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n", 148 range.to_string(), range.size()); 149 int map_flags = MAP_ANON | MAP_PRIVATE; 150 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(), 151 PROT_READ | PROT_WRITE, 152 map_flags, -1, 0); 153 154 if (pmem == (uint8_t*) MAP_FAILED) { 155 perror("mmap"); 156 fatal("Could not mmap %d bytes for range %s!\n", range.size(), 157 range.to_string()); 158 } 159 160 // remember this backing store so we can checkpoint it and unmap 161 // it appropriately 162 backingStore.push_back(make_pair(range, pmem)); 163 164 // point the memories to their backing store 165 for (vector<AbstractMemory*>::const_iterator m = _memories.begin(); 166 m != _memories.end(); ++m) { 167 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n", 168 (*m)->name()); 169 (*m)->setBackingStore(pmem); 170 } 171} 172 173PhysicalMemory::~PhysicalMemory() 174{ 175 // unmap the backing store 176 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin(); 177 s != backingStore.end(); ++s) 178 munmap((char*)s->second, s->first.size()); 179} 180 181bool 182PhysicalMemory::isMemAddr(Addr addr) const 183{ 184 // see if the address is within the last matched range 185 if (!rangeCache.contains(addr)) { 186 // lookup in the interval tree 187 AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr); 188 if (r == addrMap.end()) { 189 // not in the cache, and not in the tree 190 return false; 191 } 192 // the range is in the tree, update the cache 193 rangeCache = r->first; 194 } 195 196 assert(addrMap.find(addr) != addrMap.end()); 197 198 // either matched the cache or found in the tree 199 return true; 200} 201 202AddrRangeList 203PhysicalMemory::getConfAddrRanges() const 204{ 205 // this could be done once in the constructor, but since it is unlikely to 206 // be called more than once the iteration should not be a problem 207 AddrRangeList ranges; 208 vector<AddrRange> intlv_ranges; 209 for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin(); 210 r != addrMap.end(); ++r) { 211 if (r->second->isConfReported()) { 212 // if the range is interleaved then save it for now 213 if (r->first.interleaved()) { 214 // if we already got interleaved ranges that are not 215 // part of the same range, then first do a merge 216 // before we add the new one 217 if (!intlv_ranges.empty() && 218 !intlv_ranges.back().mergesWith(r->first)) { 219 ranges.push_back(AddrRange(intlv_ranges)); 220 intlv_ranges.clear(); 221 } 222 intlv_ranges.push_back(r->first); 223 } else { 224 // keep the current range 225 ranges.push_back(r->first); 226 } 227 } 228 } 229 230 // if there is still interleaved ranges waiting to be merged, 231 // go ahead and do it 232 if (!intlv_ranges.empty()) { 233 ranges.push_back(AddrRange(intlv_ranges)); 234 } 235 236 return ranges; 237} 238 239void 240PhysicalMemory::access(PacketPtr pkt) 241{ 242 assert(pkt->isRequest()); 243 Addr addr = pkt->getAddr(); 244 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr); 245 assert(m != addrMap.end()); 246 m->second->access(pkt); 247} 248 249void 250PhysicalMemory::functionalAccess(PacketPtr pkt) 251{ 252 assert(pkt->isRequest()); 253 Addr addr = pkt->getAddr(); 254 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr); 255 assert(m != addrMap.end()); 256 m->second->functionalAccess(pkt); 257} 258 259void 260PhysicalMemory::serialize(ostream& os) 261{ 262 // serialize all the locked addresses and their context ids 263 vector<Addr> lal_addr; 264 vector<int> lal_cid; 265 266 for (vector<AbstractMemory*>::iterator m = memories.begin(); 267 m != memories.end(); ++m) { 268 const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList(); 269 for (list<LockedAddr>::const_iterator l = locked_addrs.begin(); 270 l != locked_addrs.end(); ++l) { 271 lal_addr.push_back(l->addr); 272 lal_cid.push_back(l->contextId); 273 } 274 } 275 276 arrayParamOut(os, "lal_addr", lal_addr); 277 arrayParamOut(os, "lal_cid", lal_cid); 278 279 // serialize the backing stores 280 unsigned int nbr_of_stores = backingStore.size(); 281 SERIALIZE_SCALAR(nbr_of_stores); 282 283 unsigned int store_id = 0; 284 // store each backing store memory segment in a file 285 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin(); 286 s != backingStore.end(); ++s) { 287 nameOut(os, csprintf("%s.store%d", name(), store_id)); 288 serializeStore(os, store_id++, s->first, s->second); 289 } 290} 291 292void 293PhysicalMemory::serializeStore(ostream& os, unsigned int store_id, 294 AddrRange range, uint8_t* pmem) 295{ 296 // we cannot use the address range for the name as the 297 // memories that are not part of the address map can overlap 298 string filename = name() + ".store" + to_string(store_id) + ".pmem"; 299 long range_size = range.size(); 300 301 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n", 302 filename, range_size); 303 304 SERIALIZE_SCALAR(store_id); 305 SERIALIZE_SCALAR(filename); 306 SERIALIZE_SCALAR(range_size); 307 308 // write memory file 309 string filepath = Checkpoint::dir() + "/" + filename.c_str(); 310 int fd = creat(filepath.c_str(), 0664); 311 if (fd < 0) { 312 perror("creat"); 313 fatal("Can't open physical memory checkpoint file '%s'\n", 314 filename); 315 } 316 317 gzFile compressed_mem = gzdopen(fd, "wb"); 318 if (compressed_mem == NULL) 319 fatal("Insufficient memory to allocate compression state for %s\n", 320 filename); 321 322 uint64_t pass_size = 0; 323 324 // gzwrite fails if (int)len < 0 (gzwrite returns int) 325 for (uint64_t written = 0; written < range.size(); 326 written += pass_size) { 327 pass_size = (uint64_t)INT_MAX < (range.size() - written) ? 328 (uint64_t)INT_MAX : (range.size() - written); 329 330 if (gzwrite(compressed_mem, pmem + written, 331 (unsigned int) pass_size) != (int) pass_size) { 332 fatal("Write failed on physical memory checkpoint file '%s'\n", 333 filename); 334 } 335 } 336 337 // close the compressed stream and check that the exit status 338 // is zero 339 if (gzclose(compressed_mem)) 340 fatal("Close failed on physical memory checkpoint file '%s'\n", 341 filename); 342 343} 344 345void 346PhysicalMemory::unserialize(Checkpoint* cp, const string& section) 347{ 348 // unserialize the locked addresses and map them to the 349 // appropriate memory controller 350 vector<Addr> lal_addr; 351 vector<int> lal_cid; 352 arrayParamIn(cp, section, "lal_addr", lal_addr); 353 arrayParamIn(cp, section, "lal_cid", lal_cid); 354 for(size_t i = 0; i < lal_addr.size(); ++i) { 355 AddrRangeMap<AbstractMemory*>::const_iterator m = 356 addrMap.find(lal_addr[i]); 357 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i])); 358 } 359 360 // unserialize the backing stores 361 unsigned int nbr_of_stores; 362 UNSERIALIZE_SCALAR(nbr_of_stores); 363 364 for (unsigned int i = 0; i < nbr_of_stores; ++i) { 365 unserializeStore(cp, csprintf("%s.store%d", section, i)); 366 } 367 368} 369 370void 371PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section) 372{ 373 const uint32_t chunk_size = 16384; 374 375 unsigned int store_id; 376 UNSERIALIZE_SCALAR(store_id); 377 378 string filename; 379 UNSERIALIZE_SCALAR(filename); 380 string filepath = cp->cptDir + "/" + filename; 381 382 // mmap memoryfile 383 int fd = open(filepath.c_str(), O_RDONLY); 384 if (fd < 0) { 385 perror("open"); 386 fatal("Can't open physical memory checkpoint file '%s'", filename); 387 } 388 389 gzFile compressed_mem = gzdopen(fd, "rb"); 390 if (compressed_mem == NULL) 391 fatal("Insufficient memory to allocate compression state for %s\n", 392 filename); 393 394 // we've already got the actual backing store mapped 395 uint8_t* pmem = backingStore[store_id].second; 396 AddrRange range = backingStore[store_id].first; 397 398 long range_size; 399 UNSERIALIZE_SCALAR(range_size); 400 401 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n", 402 filename, range_size); 403 404 if (range_size != range.size()) 405 fatal("Memory range size has changed! Saw %lld, expected %lld\n", 406 range_size, range.size()); 407 408 uint64_t curr_size = 0; 409 long* temp_page = new long[chunk_size]; 410 long* pmem_current; 411 uint32_t bytes_read; 412 while (curr_size < range.size()) { 413 bytes_read = gzread(compressed_mem, temp_page, chunk_size); 414 if (bytes_read == 0) 415 break; 416 417 assert(bytes_read % sizeof(long) == 0); 418 419 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) { 420 // Only copy bytes that are non-zero, so we don't give 421 // the VM system hell 422 if (*(temp_page + x) != 0) { 423 pmem_current = (long*)(pmem + curr_size + x * sizeof(long)); 424 *pmem_current = *(temp_page + x); 425 } 426 } 427 curr_size += bytes_read; 428 } 429 430 delete[] temp_page; 431 432 if (gzclose(compressed_mem)) 433 fatal("Close failed on physical memory checkpoint file '%s'\n", 434 filename); 435} 436