physical.cc (9404:c194718a592c) physical.cc (9405:c0a0593510db)
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include <sys/mman.h>
41#include <sys/types.h>
42#include <sys/user.h>
43#include <fcntl.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
53#include "base/trace.hh"
54#include "debug/BusAddrRanges.hh"
55#include "debug/Checkpoint.hh"
56#include "mem/abstract_mem.hh"
57#include "mem/physical.hh"
58
59using namespace std;
60
61PhysicalMemory::PhysicalMemory(const string& _name,
62 const vector<AbstractMemory*>& _memories) :
63 _name(_name), size(0)
64{
65 // add the memories from the system to the address map as
66 // appropriate
67 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
68 m != _memories.end(); ++m) {
69 // only add the memory if it is part of the global address map
70 if ((*m)->isInAddrMap()) {
71 memories.push_back(*m);
72
73 // calculate the total size once and for all
74 size += (*m)->size();
75
76 // add the range to our interval tree and make sure it does not
77 // intersect an existing range
78 if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
79 fatal("Memory address range for %s is overlapping\n",
80 (*m)->name());
81 } else {
82 DPRINTF(BusAddrRanges,
83 "Skipping memory %s that is not in global address map\n",
84 (*m)->name());
85 // this type of memory is used e.g. as reference memory by
86 // Ruby, and they also needs a backing store, but should
87 // not be part of the global address map
88
89 // simply do it independently, also note that this kind of
90 // memories are allowed to overlap in the logic address
91 // map
92 vector<AbstractMemory*> unmapped_mems;
93 unmapped_mems.push_back(*m);
94 createBackingStore((*m)->getAddrRange(), unmapped_mems);
95 }
96 }
97
98 // iterate over the increasing addresses and chunks of contigous
99 // space to be mapped to backing store, also remember what
100 // memories constitute the range so we can go and find out if we
101 // have to init their parts to zero
102 vector<AbstractMemory*> curr_memories;
103 for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
104 r != addrMap.end(); ++r) {
105 // simply skip past all memories that are null and hence do
106 // not need any backing store
107 if (!r->second->isNull()) {
108 // this will eventually be extended to support merging of
109 // interleaved address ranges, and although it might seem
110 // overly complicated at this point it will all be used
111 curr_memories.push_back(r->second);
112 createBackingStore(r->first, curr_memories);
113 curr_memories.clear();
114 }
115 }
116}
117
118void
119PhysicalMemory::createBackingStore(AddrRange range,
120 const vector<AbstractMemory*>& _memories)
121{
122 // perform the actual mmap
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include <sys/mman.h>
41#include <sys/types.h>
42#include <sys/user.h>
43#include <fcntl.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
53#include "base/trace.hh"
54#include "debug/BusAddrRanges.hh"
55#include "debug/Checkpoint.hh"
56#include "mem/abstract_mem.hh"
57#include "mem/physical.hh"
58
59using namespace std;
60
61PhysicalMemory::PhysicalMemory(const string& _name,
62 const vector<AbstractMemory*>& _memories) :
63 _name(_name), size(0)
64{
65 // add the memories from the system to the address map as
66 // appropriate
67 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
68 m != _memories.end(); ++m) {
69 // only add the memory if it is part of the global address map
70 if ((*m)->isInAddrMap()) {
71 memories.push_back(*m);
72
73 // calculate the total size once and for all
74 size += (*m)->size();
75
76 // add the range to our interval tree and make sure it does not
77 // intersect an existing range
78 if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
79 fatal("Memory address range for %s is overlapping\n",
80 (*m)->name());
81 } else {
82 DPRINTF(BusAddrRanges,
83 "Skipping memory %s that is not in global address map\n",
84 (*m)->name());
85 // this type of memory is used e.g. as reference memory by
86 // Ruby, and they also needs a backing store, but should
87 // not be part of the global address map
88
89 // simply do it independently, also note that this kind of
90 // memories are allowed to overlap in the logic address
91 // map
92 vector<AbstractMemory*> unmapped_mems;
93 unmapped_mems.push_back(*m);
94 createBackingStore((*m)->getAddrRange(), unmapped_mems);
95 }
96 }
97
98 // iterate over the increasing addresses and chunks of contigous
99 // space to be mapped to backing store, also remember what
100 // memories constitute the range so we can go and find out if we
101 // have to init their parts to zero
102 vector<AbstractMemory*> curr_memories;
103 for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
104 r != addrMap.end(); ++r) {
105 // simply skip past all memories that are null and hence do
106 // not need any backing store
107 if (!r->second->isNull()) {
108 // this will eventually be extended to support merging of
109 // interleaved address ranges, and although it might seem
110 // overly complicated at this point it will all be used
111 curr_memories.push_back(r->second);
112 createBackingStore(r->first, curr_memories);
113 curr_memories.clear();
114 }
115 }
116}
117
118void
119PhysicalMemory::createBackingStore(AddrRange range,
120 const vector<AbstractMemory*>& _memories)
121{
122 // perform the actual mmap
123 DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n",
124 range.start, range.end);
123 DPRINTF(BusAddrRanges, "Creating backing store for range %s\n",
124 range.to_string());
125 int map_flags = MAP_ANON | MAP_PRIVATE;
126 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
127 PROT_READ | PROT_WRITE,
128 map_flags, -1, 0);
129
130 if (pmem == (uint8_t*) MAP_FAILED) {
131 perror("mmap");
125 int map_flags = MAP_ANON | MAP_PRIVATE;
126 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
127 PROT_READ | PROT_WRITE,
128 map_flags, -1, 0);
129
130 if (pmem == (uint8_t*) MAP_FAILED) {
131 perror("mmap");
132 fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(),
133 range.start, range.end);
132 fatal("Could not mmap %d bytes for range %s!\n", range.size(),
133 range.to_string());
134 }
135
136 // remember this backing store so we can checkpoint it and unmap
137 // it appropriately
138 backingStore.push_back(make_pair(range, pmem));
139
140 // count how many of the memories are to be zero initialized so we
141 // can see if some but not all have this parameter set
142 uint32_t init_to_zero = 0;
143
144 // point the memories to their backing store, and if requested,
145 // initialize the memory range to 0
146 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
147 m != _memories.end(); ++m) {
148 DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
149 (*m)->name());
150 (*m)->setBackingStore(pmem);
151
152 // if it should be zero, then go and make it so
153 if ((*m)->initToZero()) {
154 ++init_to_zero;
155 }
156 }
157
158 if (init_to_zero != 0) {
159 if (init_to_zero != _memories.size())
134 }
135
136 // remember this backing store so we can checkpoint it and unmap
137 // it appropriately
138 backingStore.push_back(make_pair(range, pmem));
139
140 // count how many of the memories are to be zero initialized so we
141 // can see if some but not all have this parameter set
142 uint32_t init_to_zero = 0;
143
144 // point the memories to their backing store, and if requested,
145 // initialize the memory range to 0
146 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
147 m != _memories.end(); ++m) {
148 DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
149 (*m)->name());
150 (*m)->setBackingStore(pmem);
151
152 // if it should be zero, then go and make it so
153 if ((*m)->initToZero()) {
154 ++init_to_zero;
155 }
156 }
157
158 if (init_to_zero != 0) {
159 if (init_to_zero != _memories.size())
160 fatal("Some, but not all memories in range %x:%x are set zero\n",
161 range.start, range.end);
160 fatal("Some, but not all memories in range %s are set zero\n",
161 range.to_string());
162
163 memset(pmem, 0, range.size());
164 }
165}
166
167PhysicalMemory::~PhysicalMemory()
168{
169 // unmap the backing store
170 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
171 s != backingStore.end(); ++s)
172 munmap((char*)s->second, s->first.size());
173}
174
175bool
176PhysicalMemory::isMemAddr(Addr addr) const
177{
178 // see if the address is within the last matched range
162
163 memset(pmem, 0, range.size());
164 }
165}
166
167PhysicalMemory::~PhysicalMemory()
168{
169 // unmap the backing store
170 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
171 s != backingStore.end(); ++s)
172 munmap((char*)s->second, s->first.size());
173}
174
175bool
176PhysicalMemory::isMemAddr(Addr addr) const
177{
178 // see if the address is within the last matched range
179 if (addr != rangeCache) {
179 if (!rangeCache.contains(addr)) {
180 // lookup in the interval tree
181 AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
182 if (r == addrMap.end()) {
183 // not in the cache, and not in the tree
184 return false;
185 }
186 // the range is in the tree, update the cache
187 rangeCache = r->first;
188 }
189
190 assert(addrMap.find(addr) != addrMap.end());
191
192 // either matched the cache or found in the tree
193 return true;
194}
195
196AddrRangeList
197PhysicalMemory::getConfAddrRanges() const
198{
199 // this could be done once in the constructor, but since it is unlikely to
200 // be called more than once the iteration should not be a problem
201 AddrRangeList ranges;
202 for (vector<AbstractMemory*>::const_iterator m = memories.begin();
203 m != memories.end(); ++m) {
204 if ((*m)->isConfReported()) {
205 ranges.push_back((*m)->getAddrRange());
206 }
207 }
208
209 return ranges;
210}
211
212void
213PhysicalMemory::access(PacketPtr pkt)
214{
215 assert(pkt->isRequest());
216 Addr addr = pkt->getAddr();
217 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
218 assert(m != addrMap.end());
219 m->second->access(pkt);
220}
221
222void
223PhysicalMemory::functionalAccess(PacketPtr pkt)
224{
225 assert(pkt->isRequest());
226 Addr addr = pkt->getAddr();
227 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
228 assert(m != addrMap.end());
229 m->second->functionalAccess(pkt);
230}
231
232void
233PhysicalMemory::serialize(ostream& os)
234{
235 // serialize all the locked addresses and their context ids
236 vector<Addr> lal_addr;
237 vector<int> lal_cid;
238
239 for (vector<AbstractMemory*>::iterator m = memories.begin();
240 m != memories.end(); ++m) {
241 const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
242 for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
243 l != locked_addrs.end(); ++l) {
244 lal_addr.push_back(l->addr);
245 lal_cid.push_back(l->contextId);
246 }
247 }
248
249 arrayParamOut(os, "lal_addr", lal_addr);
250 arrayParamOut(os, "lal_cid", lal_cid);
251
252 // serialize the backing stores
253 unsigned int nbr_of_stores = backingStore.size();
254 SERIALIZE_SCALAR(nbr_of_stores);
255
256 unsigned int store_id = 0;
257 // store each backing store memory segment in a file
258 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
259 s != backingStore.end(); ++s) {
260 nameOut(os, csprintf("%s.store%d", name(), store_id));
261 serializeStore(os, store_id++, s->first, s->second);
262 }
263}
264
265void
266PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
267 AddrRange range, uint8_t* pmem)
268{
269 // we cannot use the address range for the name as the
270 // memories that are not part of the address map can overlap
271 string filename = name() + ".store" + to_string(store_id) + ".pmem";
272 long range_size = range.size();
273
274 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
275 filename, range_size);
276
277 SERIALIZE_SCALAR(store_id);
278 SERIALIZE_SCALAR(filename);
279 SERIALIZE_SCALAR(range_size);
280
281 // write memory file
282 string filepath = Checkpoint::dir() + "/" + filename.c_str();
283 int fd = creat(filepath.c_str(), 0664);
284 if (fd < 0) {
285 perror("creat");
286 fatal("Can't open physical memory checkpoint file '%s'\n",
287 filename);
288 }
289
290 gzFile compressed_mem = gzdopen(fd, "wb");
291 if (compressed_mem == NULL)
292 fatal("Insufficient memory to allocate compression state for %s\n",
293 filename);
294
295 uint64_t pass_size = 0;
296
297 // gzwrite fails if (int)len < 0 (gzwrite returns int)
298 for (uint64_t written = 0; written < range.size();
299 written += pass_size) {
300 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
301 (uint64_t)INT_MAX : (range.size() - written);
302
303 if (gzwrite(compressed_mem, pmem + written,
304 (unsigned int) pass_size) != (int) pass_size) {
305 fatal("Write failed on physical memory checkpoint file '%s'\n",
306 filename);
307 }
308 }
309
310 // close the compressed stream and check that the exit status
311 // is zero
312 if (gzclose(compressed_mem))
313 fatal("Close failed on physical memory checkpoint file '%s'\n",
314 filename);
315
316}
317
318void
319PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
320{
321 // unserialize the locked addresses and map them to the
322 // appropriate memory controller
323 vector<Addr> lal_addr;
324 vector<int> lal_cid;
325 arrayParamIn(cp, section, "lal_addr", lal_addr);
326 arrayParamIn(cp, section, "lal_cid", lal_cid);
327 for(size_t i = 0; i < lal_addr.size(); ++i) {
328 AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
329 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
330 }
331
332 // unserialize the backing stores
333 unsigned int nbr_of_stores;
334 UNSERIALIZE_SCALAR(nbr_of_stores);
335
336 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
337 unserializeStore(cp, csprintf("%s.store%d", section, i));
338 }
339
340}
341
342void
343PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
344{
345 const uint32_t chunk_size = 16384;
346
347 unsigned int store_id;
348 UNSERIALIZE_SCALAR(store_id);
349
350 string filename;
351 UNSERIALIZE_SCALAR(filename);
352 string filepath = cp->cptDir + "/" + filename;
353
354 // mmap memoryfile
355 int fd = open(filepath.c_str(), O_RDONLY);
356 if (fd < 0) {
357 perror("open");
358 fatal("Can't open physical memory checkpoint file '%s'", filename);
359 }
360
361 gzFile compressed_mem = gzdopen(fd, "rb");
362 if (compressed_mem == NULL)
363 fatal("Insufficient memory to allocate compression state for %s\n",
364 filename);
365
366 uint8_t* pmem = backingStore[store_id].second;
367 AddrRange range = backingStore[store_id].first;
368
369 // unmap file that was mmapped in the constructor, this is
370 // done here to make sure that gzip and open don't muck with
371 // our nice large space of memory before we reallocate it
372 munmap((char*) pmem, range.size());
373
374 long range_size;
375 UNSERIALIZE_SCALAR(range_size);
376
377 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
378 filename, range_size);
379
380 if (range_size != range.size())
381 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
382 range_size, range.size());
383
384 pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
385 MAP_ANON | MAP_PRIVATE, -1, 0);
386
387 if (pmem == (void*) MAP_FAILED) {
388 perror("mmap");
389 fatal("Could not mmap physical memory!\n");
390 }
391
392 uint64_t curr_size = 0;
393 long* temp_page = new long[chunk_size];
394 long* pmem_current;
395 uint32_t bytes_read;
396 while (curr_size < range.size()) {
397 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
398 if (bytes_read == 0)
399 break;
400
401 assert(bytes_read % sizeof(long) == 0);
402
403 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
404 // Only copy bytes that are non-zero, so we don't give
405 // the VM system hell
406 if (*(temp_page + x) != 0) {
407 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
408 *pmem_current = *(temp_page + x);
409 }
410 }
411 curr_size += bytes_read;
412 }
413
414 delete[] temp_page;
415
416 if (gzclose(compressed_mem))
417 fatal("Close failed on physical memory checkpoint file '%s'\n",
418 filename);
419}
180 // lookup in the interval tree
181 AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
182 if (r == addrMap.end()) {
183 // not in the cache, and not in the tree
184 return false;
185 }
186 // the range is in the tree, update the cache
187 rangeCache = r->first;
188 }
189
190 assert(addrMap.find(addr) != addrMap.end());
191
192 // either matched the cache or found in the tree
193 return true;
194}
195
196AddrRangeList
197PhysicalMemory::getConfAddrRanges() const
198{
199 // this could be done once in the constructor, but since it is unlikely to
200 // be called more than once the iteration should not be a problem
201 AddrRangeList ranges;
202 for (vector<AbstractMemory*>::const_iterator m = memories.begin();
203 m != memories.end(); ++m) {
204 if ((*m)->isConfReported()) {
205 ranges.push_back((*m)->getAddrRange());
206 }
207 }
208
209 return ranges;
210}
211
212void
213PhysicalMemory::access(PacketPtr pkt)
214{
215 assert(pkt->isRequest());
216 Addr addr = pkt->getAddr();
217 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
218 assert(m != addrMap.end());
219 m->second->access(pkt);
220}
221
222void
223PhysicalMemory::functionalAccess(PacketPtr pkt)
224{
225 assert(pkt->isRequest());
226 Addr addr = pkt->getAddr();
227 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
228 assert(m != addrMap.end());
229 m->second->functionalAccess(pkt);
230}
231
232void
233PhysicalMemory::serialize(ostream& os)
234{
235 // serialize all the locked addresses and their context ids
236 vector<Addr> lal_addr;
237 vector<int> lal_cid;
238
239 for (vector<AbstractMemory*>::iterator m = memories.begin();
240 m != memories.end(); ++m) {
241 const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
242 for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
243 l != locked_addrs.end(); ++l) {
244 lal_addr.push_back(l->addr);
245 lal_cid.push_back(l->contextId);
246 }
247 }
248
249 arrayParamOut(os, "lal_addr", lal_addr);
250 arrayParamOut(os, "lal_cid", lal_cid);
251
252 // serialize the backing stores
253 unsigned int nbr_of_stores = backingStore.size();
254 SERIALIZE_SCALAR(nbr_of_stores);
255
256 unsigned int store_id = 0;
257 // store each backing store memory segment in a file
258 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
259 s != backingStore.end(); ++s) {
260 nameOut(os, csprintf("%s.store%d", name(), store_id));
261 serializeStore(os, store_id++, s->first, s->second);
262 }
263}
264
265void
266PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
267 AddrRange range, uint8_t* pmem)
268{
269 // we cannot use the address range for the name as the
270 // memories that are not part of the address map can overlap
271 string filename = name() + ".store" + to_string(store_id) + ".pmem";
272 long range_size = range.size();
273
274 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
275 filename, range_size);
276
277 SERIALIZE_SCALAR(store_id);
278 SERIALIZE_SCALAR(filename);
279 SERIALIZE_SCALAR(range_size);
280
281 // write memory file
282 string filepath = Checkpoint::dir() + "/" + filename.c_str();
283 int fd = creat(filepath.c_str(), 0664);
284 if (fd < 0) {
285 perror("creat");
286 fatal("Can't open physical memory checkpoint file '%s'\n",
287 filename);
288 }
289
290 gzFile compressed_mem = gzdopen(fd, "wb");
291 if (compressed_mem == NULL)
292 fatal("Insufficient memory to allocate compression state for %s\n",
293 filename);
294
295 uint64_t pass_size = 0;
296
297 // gzwrite fails if (int)len < 0 (gzwrite returns int)
298 for (uint64_t written = 0; written < range.size();
299 written += pass_size) {
300 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
301 (uint64_t)INT_MAX : (range.size() - written);
302
303 if (gzwrite(compressed_mem, pmem + written,
304 (unsigned int) pass_size) != (int) pass_size) {
305 fatal("Write failed on physical memory checkpoint file '%s'\n",
306 filename);
307 }
308 }
309
310 // close the compressed stream and check that the exit status
311 // is zero
312 if (gzclose(compressed_mem))
313 fatal("Close failed on physical memory checkpoint file '%s'\n",
314 filename);
315
316}
317
318void
319PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
320{
321 // unserialize the locked addresses and map them to the
322 // appropriate memory controller
323 vector<Addr> lal_addr;
324 vector<int> lal_cid;
325 arrayParamIn(cp, section, "lal_addr", lal_addr);
326 arrayParamIn(cp, section, "lal_cid", lal_cid);
327 for(size_t i = 0; i < lal_addr.size(); ++i) {
328 AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
329 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
330 }
331
332 // unserialize the backing stores
333 unsigned int nbr_of_stores;
334 UNSERIALIZE_SCALAR(nbr_of_stores);
335
336 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
337 unserializeStore(cp, csprintf("%s.store%d", section, i));
338 }
339
340}
341
342void
343PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
344{
345 const uint32_t chunk_size = 16384;
346
347 unsigned int store_id;
348 UNSERIALIZE_SCALAR(store_id);
349
350 string filename;
351 UNSERIALIZE_SCALAR(filename);
352 string filepath = cp->cptDir + "/" + filename;
353
354 // mmap memoryfile
355 int fd = open(filepath.c_str(), O_RDONLY);
356 if (fd < 0) {
357 perror("open");
358 fatal("Can't open physical memory checkpoint file '%s'", filename);
359 }
360
361 gzFile compressed_mem = gzdopen(fd, "rb");
362 if (compressed_mem == NULL)
363 fatal("Insufficient memory to allocate compression state for %s\n",
364 filename);
365
366 uint8_t* pmem = backingStore[store_id].second;
367 AddrRange range = backingStore[store_id].first;
368
369 // unmap file that was mmapped in the constructor, this is
370 // done here to make sure that gzip and open don't muck with
371 // our nice large space of memory before we reallocate it
372 munmap((char*) pmem, range.size());
373
374 long range_size;
375 UNSERIALIZE_SCALAR(range_size);
376
377 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
378 filename, range_size);
379
380 if (range_size != range.size())
381 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
382 range_size, range.size());
383
384 pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
385 MAP_ANON | MAP_PRIVATE, -1, 0);
386
387 if (pmem == (void*) MAP_FAILED) {
388 perror("mmap");
389 fatal("Could not mmap physical memory!\n");
390 }
391
392 uint64_t curr_size = 0;
393 long* temp_page = new long[chunk_size];
394 long* pmem_current;
395 uint32_t bytes_read;
396 while (curr_size < range.size()) {
397 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
398 if (bytes_read == 0)
399 break;
400
401 assert(bytes_read % sizeof(long) == 0);
402
403 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
404 // Only copy bytes that are non-zero, so we don't give
405 // the VM system hell
406 if (*(temp_page + x) != 0) {
407 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
408 *pmem_current = *(temp_page + x);
409 }
410 }
411 curr_size += bytes_read;
412 }
413
414 delete[] temp_page;
415
416 if (gzclose(compressed_mem))
417 fatal("Close failed on physical memory checkpoint file '%s'\n",
418 filename);
419}