physical.cc (9235:5aa4896ed55a) | physical.cc (9293:df7c3f99ebca) |
---|---|
1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software --- 23 unchanged lines hidden (view full) --- 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Hansson 38 */ 39 | 1/* 2 * Copyright (c) 2012 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software --- 23 unchanged lines hidden (view full) --- 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Andreas Hansson 38 */ 39 |
40#include <sys/mman.h> 41#include <sys/types.h> 42#include <sys/user.h> 43#include <fcntl.h> 44#include <unistd.h> 45#include <zlib.h> 46 47#include <cerrno> 48#include <climits> 49#include <cstdio> 50#include <iostream> 51#include <string> 52 |
|
40#include "debug/BusAddrRanges.hh" | 53#include "debug/BusAddrRanges.hh" |
54#include "debug/Checkpoint.hh" 55#include "mem/abstract_mem.hh" |
|
41#include "mem/physical.hh" 42 43using namespace std; 44 | 56#include "mem/physical.hh" 57 58using namespace std; 59 |
45PhysicalMemory::PhysicalMemory(const vector<AbstractMemory*>& _memories) : 46 size(0) | 60PhysicalMemory::PhysicalMemory(const string& _name, 61 const vector<AbstractMemory*>& _memories) : 62 _name(_name), size(0) |
47{ | 63{ |
64 // add the memories from the system to the address map as 65 // appropriate |
|
48 for (vector<AbstractMemory*>::const_iterator m = _memories.begin(); 49 m != _memories.end(); ++m) { 50 // only add the memory if it is part of the global address map 51 if ((*m)->isInAddrMap()) { 52 memories.push_back(*m); 53 54 // calculate the total size once and for all 55 size += (*m)->size(); 56 57 // add the range to our interval tree and make sure it does not 58 // intersect an existing range 59 if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end()) 60 fatal("Memory address range for %s is overlapping\n", 61 (*m)->name()); | 66 for (vector<AbstractMemory*>::const_iterator m = _memories.begin(); 67 m != _memories.end(); ++m) { 68 // only add the memory if it is part of the global address map 69 if ((*m)->isInAddrMap()) { 70 memories.push_back(*m); 71 72 // calculate the total size once and for all 73 size += (*m)->size(); 74 75 // add the range to our interval tree and make sure it does not 76 // intersect an existing range 77 if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end()) 78 fatal("Memory address range for %s is overlapping\n", 79 (*m)->name()); |
80 } else { 81 DPRINTF(BusAddrRanges, 82 "Skipping memory %s that is not in global address map\n", 83 (*m)->name()); 84 // this type of memory is used e.g. as reference memory by 85 // Ruby, and they also needs a backing store, but should 86 // not be part of the global address map 87 88 // simply do it independently, also note that this kind of 89 // memories are allowed to overlap in the logic address 90 // map 91 vector<AbstractMemory*> unmapped_mems; 92 unmapped_mems.push_back(*m); 93 createBackingStore((*m)->getAddrRange(), unmapped_mems); |
|
62 } | 94 } |
63 DPRINTF(BusAddrRanges, 64 "Skipping memory %s that is not in global address map\n", | 95 } 96 97 // iterate over the increasing addresses and create as large 98 // chunks as possible of contigous space to be mapped to backing 99 // store, also remember what memories constitute the range so we 100 // can go and find out if we have to init their parts to zero 101 AddrRange curr_range; 102 vector<AbstractMemory*> curr_memories; 103 for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin(); 104 r != addrMap.end(); ++r) { 105 // simply skip past all memories that are null and hence do 106 // not need any backing store 107 if (!r->second->isNull()) { 108 // if the current range is valid, decide if we split or 109 // not 110 if (curr_range.valid()) { 111 // if the ranges are neighbours, then append, this 112 // will eventually be extended to include support for 113 // address striping and merge the interleaved ranges 114 if (curr_range.end + 1 == r->first.start) { 115 DPRINTF(BusAddrRanges, 116 "Merging neighbouring ranges %x:%x and %x:%x\n", 117 curr_range.start, curr_range.end, r->first.start, 118 r->first.end); 119 // update the end of the range and add the current 120 // memory to the list of memories 121 curr_range.end = r->first.end; 122 curr_memories.push_back(r->second); 123 } else { 124 // what we already have is valid, and this is not 125 // contigious, so create the backing store and 126 // then start over 127 createBackingStore(curr_range, curr_memories); 128 129 // remember the current range and reset the current 130 // set of memories to contain this one 131 curr_range = r->first; 132 curr_memories.clear(); 133 curr_memories.push_back(r->second); 134 } 135 } else { 136 // we haven't seen any valid ranges yet, so remember 137 // the current range and reset the current set of 138 // memories to contain this one 139 curr_range = r->first; 140 curr_memories.clear(); 141 curr_memories.push_back(r->second); 142 } 143 } 144 } 145 146 // if we have a valid range upon finishing the iteration, then 147 // create the backing store 148 if (curr_range.valid()) 149 createBackingStore(curr_range, curr_memories); 150} 151 152void 153PhysicalMemory::createBackingStore(AddrRange range, 154 const vector<AbstractMemory*>& _memories) 155{ 156 // perform the actual mmap 157 DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n", 158 range.start, range.end); 159 int map_flags = MAP_ANON | MAP_PRIVATE; 160 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(), 161 PROT_READ | PROT_WRITE, 162 map_flags, -1, 0); 163 164 if (pmem == (uint8_t*) MAP_FAILED) { 165 perror("mmap"); 166 fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(), 167 range.start, range.end); 168 } 169 170 // remember this backing store so we can checkpoint it and unmap 171 // it appropriately 172 backingStore.push_back(make_pair(range, pmem)); 173 174 // point the memories to their backing store, and if requested, 175 // initialize the memory range to 0 176 for (vector<AbstractMemory*>::const_iterator m = _memories.begin(); 177 m != _memories.end(); ++m) { 178 DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n", |
65 (*m)->name()); | 179 (*m)->name()); |
180 (*m)->setBackingStore(pmem); 181 182 // if it should be zero, then go and make it so 183 if ((*m)->initToZero()) 184 memset(pmem, 0, (*m)->size()); 185 186 // advance the pointer for the next memory in line 187 pmem += (*m)->size(); |
|
66 } 67} 68 | 188 } 189} 190 |
191PhysicalMemory::~PhysicalMemory() 192{ 193 // unmap the backing store 194 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin(); 195 s != backingStore.end(); ++s) 196 munmap((char*)s->second, s->first.size()); 197} 198 |
|
69bool 70PhysicalMemory::isMemAddr(Addr addr) const 71{ 72 // see if the address is within the last matched range 73 if (addr != rangeCache) { 74 // lookup in the interval tree 75 AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr); 76 if (r == addrMap.end()) { --- 40 unchanged lines hidden (view full) --- 117PhysicalMemory::functionalAccess(PacketPtr pkt) 118{ 119 assert(pkt->isRequest()); 120 Addr addr = pkt->getAddr(); 121 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr); 122 assert(m != addrMap.end()); 123 m->second->functionalAccess(pkt); 124} | 199bool 200PhysicalMemory::isMemAddr(Addr addr) const 201{ 202 // see if the address is within the last matched range 203 if (addr != rangeCache) { 204 // lookup in the interval tree 205 AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr); 206 if (r == addrMap.end()) { --- 40 unchanged lines hidden (view full) --- 247PhysicalMemory::functionalAccess(PacketPtr pkt) 248{ 249 assert(pkt->isRequest()); 250 Addr addr = pkt->getAddr(); 251 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr); 252 assert(m != addrMap.end()); 253 m->second->functionalAccess(pkt); 254} |
255 256void 257PhysicalMemory::serialize(ostream& os) 258{ 259 // serialize all the locked addresses and their context ids 260 vector<Addr> lal_addr; 261 vector<int> lal_cid; 262 263 for (vector<AbstractMemory*>::iterator m = memories.begin(); 264 m != memories.end(); ++m) { 265 const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList(); 266 for (list<LockedAddr>::const_iterator l = locked_addrs.begin(); 267 l != locked_addrs.end(); ++l) { 268 lal_addr.push_back(l->addr); 269 lal_cid.push_back(l->contextId); 270 } 271 } 272 273 arrayParamOut(os, "lal_addr", lal_addr); 274 arrayParamOut(os, "lal_cid", lal_cid); 275 276 // serialize the backing stores 277 unsigned int nbr_of_stores = backingStore.size(); 278 SERIALIZE_SCALAR(nbr_of_stores); 279 280 unsigned int store_id = 0; 281 // store each backing store memory segment in a file 282 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin(); 283 s != backingStore.end(); ++s) { 284 nameOut(os, csprintf("%s.store%d", name(), store_id)); 285 serializeStore(os, store_id++, s->first, s->second); 286 } 287} 288 289void 290PhysicalMemory::serializeStore(ostream& os, unsigned int store_id, 291 AddrRange range, uint8_t* pmem) 292{ 293 // we cannot use the address range for the name as the 294 // memories that are not part of the address map can overlap 295 string filename = "store" + to_string(store_id) + ".pmem"; 296 long range_size = range.size(); 297 298 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n", 299 filename, range_size); 300 301 SERIALIZE_SCALAR(store_id); 302 SERIALIZE_SCALAR(filename); 303 SERIALIZE_SCALAR(range_size); 304 305 // write memory file 306 string filepath = Checkpoint::dir() + "/" + filename.c_str(); 307 int fd = creat(filepath.c_str(), 0664); 308 if (fd < 0) { 309 perror("creat"); 310 fatal("Can't open physical memory checkpoint file '%s'\n", 311 filename); 312 } 313 314 gzFile compressed_mem = gzdopen(fd, "wb"); 315 if (compressed_mem == NULL) 316 fatal("Insufficient memory to allocate compression state for %s\n", 317 filename); 318 319 uint64_t pass_size = 0; 320 321 // gzwrite fails if (int)len < 0 (gzwrite returns int) 322 for (uint64_t written = 0; written < range.size(); 323 written += pass_size) { 324 pass_size = (uint64_t)INT_MAX < (range.size() - written) ? 325 (uint64_t)INT_MAX : (range.size() - written); 326 327 if (gzwrite(compressed_mem, pmem + written, 328 (unsigned int) pass_size) != (int) pass_size) { 329 fatal("Write failed on physical memory checkpoint file '%s'\n", 330 filename); 331 } 332 } 333 334 // close the compressed stream and check that the exit status 335 // is zero 336 if (gzclose(compressed_mem)) 337 fatal("Close failed on physical memory checkpoint file '%s'\n", 338 filename); 339 340} 341 342void 343PhysicalMemory::unserialize(Checkpoint* cp, const string& section) 344{ 345 // unserialize the locked addresses and map them to the 346 // appropriate memory controller 347 vector<Addr> lal_addr; 348 vector<int> lal_cid; 349 arrayParamIn(cp, section, "lal_addr", lal_addr); 350 arrayParamIn(cp, section, "lal_cid", lal_cid); 351 for(size_t i = 0; i < lal_addr.size(); ++i) { 352 AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]); 353 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i])); 354 } 355 356 // unserialize the backing stores 357 unsigned int nbr_of_stores; 358 UNSERIALIZE_SCALAR(nbr_of_stores); 359 360 for (unsigned int i = 0; i < nbr_of_stores; ++i) { 361 unserializeStore(cp, csprintf("%s.store%d", section, i)); 362 } 363 364} 365 366void 367PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section) 368{ 369 const uint32_t chunk_size = 16384; 370 371 unsigned int store_id; 372 UNSERIALIZE_SCALAR(store_id); 373 374 string filename; 375 UNSERIALIZE_SCALAR(filename); 376 string filepath = cp->cptDir + "/" + filename; 377 378 // mmap memoryfile 379 int fd = open(filepath.c_str(), O_RDONLY); 380 if (fd < 0) { 381 perror("open"); 382 fatal("Can't open physical memory checkpoint file '%s'", filename); 383 } 384 385 gzFile compressed_mem = gzdopen(fd, "rb"); 386 if (compressed_mem == NULL) 387 fatal("Insufficient memory to allocate compression state for %s\n", 388 filename); 389 390 uint8_t* pmem = backingStore[store_id].second; 391 AddrRange range = backingStore[store_id].first; 392 393 // unmap file that was mmapped in the constructor, this is 394 // done here to make sure that gzip and open don't muck with 395 // our nice large space of memory before we reallocate it 396 munmap((char*) pmem, range.size()); 397 398 long range_size; 399 UNSERIALIZE_SCALAR(range_size); 400 401 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n", 402 filename, range_size); 403 404 if (range_size != range.size()) 405 fatal("Memory range size has changed! Saw %lld, expected %lld\n", 406 range_size, range.size()); 407 408 pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE, 409 MAP_ANON | MAP_PRIVATE, -1, 0); 410 411 if (pmem == (void*) MAP_FAILED) { 412 perror("mmap"); 413 fatal("Could not mmap physical memory!\n"); 414 } 415 416 uint64_t curr_size = 0; 417 long* temp_page = new long[chunk_size]; 418 long* pmem_current; 419 uint32_t bytes_read; 420 while (curr_size < range.size()) { 421 bytes_read = gzread(compressed_mem, temp_page, chunk_size); 422 if (bytes_read == 0) 423 break; 424 425 assert(bytes_read % sizeof(long) == 0); 426 427 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) { 428 // Only copy bytes that are non-zero, so we don't give 429 // the VM system hell 430 if (*(temp_page + x) != 0) { 431 pmem_current = (long*)(pmem + curr_size + x * sizeof(long)); 432 *pmem_current = *(temp_page + x); 433 } 434 } 435 curr_size += bytes_read; 436 } 437 438 delete[] temp_page; 439 440 if (gzclose(compressed_mem)) 441 fatal("Close failed on physical memory checkpoint file '%s'\n", 442 filename); 443} |
|