physical.cc (12776:410b60d8a397) physical.cc (12779:c1dc175bb9be)
1/*
2 * Copyright (c) 2012, 2014, 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include "mem/physical.hh"
41
42#include <fcntl.h>
43#include <sys/mman.h>
44#include <sys/types.h>
45#include <sys/user.h>
46#include <unistd.h>
47#include <zlib.h>
48
49#include <cerrno>
50#include <climits>
51#include <cstdio>
52#include <iostream>
53#include <string>
54
55#include "base/trace.hh"
56#include "debug/AddrRanges.hh"
57#include "debug/Checkpoint.hh"
58#include "mem/abstract_mem.hh"
59
60/**
61 * On Linux, MAP_NORESERVE allow us to simulate a very large memory
62 * without committing to actually providing the swap space on the
63 * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist,
64 * so simply make it 0.
65 */
66#if defined(__APPLE__) || defined(__FreeBSD__)
67#ifndef MAP_NORESERVE
68#define MAP_NORESERVE 0
69#endif
70#endif
71
72using namespace std;
73
74PhysicalMemory::PhysicalMemory(const string& _name,
75 const vector<AbstractMemory*>& _memories,
76 bool mmap_using_noreserve) :
1/*
2 * Copyright (c) 2012, 2014, 2018 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include "mem/physical.hh"
41
42#include <fcntl.h>
43#include <sys/mman.h>
44#include <sys/types.h>
45#include <sys/user.h>
46#include <unistd.h>
47#include <zlib.h>
48
49#include <cerrno>
50#include <climits>
51#include <cstdio>
52#include <iostream>
53#include <string>
54
55#include "base/trace.hh"
56#include "debug/AddrRanges.hh"
57#include "debug/Checkpoint.hh"
58#include "mem/abstract_mem.hh"
59
60/**
61 * On Linux, MAP_NORESERVE allow us to simulate a very large memory
62 * without committing to actually providing the swap space on the
63 * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist,
64 * so simply make it 0.
65 */
66#if defined(__APPLE__) || defined(__FreeBSD__)
67#ifndef MAP_NORESERVE
68#define MAP_NORESERVE 0
69#endif
70#endif
71
72using namespace std;
73
74PhysicalMemory::PhysicalMemory(const string& _name,
75 const vector<AbstractMemory*>& _memories,
76 bool mmap_using_noreserve) :
77 _name(_name), rangeCache(addrMap.end()), size(0),
78 mmapUsingNoReserve(mmap_using_noreserve)
77 _name(_name), size(0), mmapUsingNoReserve(mmap_using_noreserve)
79{
80 if (mmap_using_noreserve)
81 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
82
83 // add the memories from the system to the address map as
84 // appropriate
85 for (const auto& m : _memories) {
86 // only add the memory if it is part of the global address map
87 if (m->isInAddrMap()) {
88 memories.push_back(m);
89
90 // calculate the total size once and for all
91 size += m->size();
92
93 // add the range to our interval tree and make sure it does not
94 // intersect an existing range
95 fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
96 "Memory address range for %s is overlapping\n",
97 m->name());
98 } else {
99 // this type of memory is used e.g. as reference memory by
100 // Ruby, and they also needs a backing store, but should
101 // not be part of the global address map
102 DPRINTF(AddrRanges,
103 "Skipping memory %s that is not in global address map\n",
104 m->name());
105
106 // sanity check
107 fatal_if(m->getAddrRange().interleaved(),
108 "Memory %s that is not in the global address map cannot "
109 "be interleaved\n", m->name());
110
111 // simply do it independently, also note that this kind of
112 // memories are allowed to overlap in the logic address
113 // map
114 vector<AbstractMemory*> unmapped_mems{m};
115 createBackingStore(m->getAddrRange(), unmapped_mems,
116 m->isConfReported(), m->isInAddrMap(),
117 m->isKvmMap());
118 }
119 }
120
121 // iterate over the increasing addresses and chunks of contiguous
122 // space to be mapped to backing store, create it and inform the
123 // memories
124 vector<AddrRange> intlv_ranges;
125 vector<AbstractMemory*> curr_memories;
126 for (const auto& r : addrMap) {
127 // simply skip past all memories that are null and hence do
128 // not need any backing store
129 if (!r.second->isNull()) {
130 // if the range is interleaved then save it for now
131 if (r.first.interleaved()) {
132 // if we already got interleaved ranges that are not
133 // part of the same range, then first do a merge
134 // before we add the new one
135 if (!intlv_ranges.empty() &&
136 !intlv_ranges.back().mergesWith(r.first)) {
137 AddrRange merged_range(intlv_ranges);
138
139 AbstractMemory *f = curr_memories.front();
140 for (const auto& c : curr_memories)
141 if (f->isConfReported() != c->isConfReported() ||
142 f->isInAddrMap() != c->isInAddrMap() ||
143 f->isKvmMap() != c->isKvmMap())
144 fatal("Inconsistent flags in an interleaved "
145 "range\n");
146
147 createBackingStore(merged_range, curr_memories,
148 f->isConfReported(), f->isInAddrMap(),
149 f->isKvmMap());
150
151 intlv_ranges.clear();
152 curr_memories.clear();
153 }
154 intlv_ranges.push_back(r.first);
155 curr_memories.push_back(r.second);
156 } else {
157 vector<AbstractMemory*> single_memory{r.second};
158 createBackingStore(r.first, single_memory,
159 r.second->isConfReported(),
160 r.second->isInAddrMap(),
161 r.second->isKvmMap());
162 }
163 }
164 }
165
166 // if there is still interleaved ranges waiting to be merged, go
167 // ahead and do it
168 if (!intlv_ranges.empty()) {
169 AddrRange merged_range(intlv_ranges);
170
171 AbstractMemory *f = curr_memories.front();
172 for (const auto& c : curr_memories)
173 if (f->isConfReported() != c->isConfReported() ||
174 f->isInAddrMap() != c->isInAddrMap() ||
175 f->isKvmMap() != c->isKvmMap())
176 fatal("Inconsistent flags in an interleaved "
177 "range\n");
178
179 createBackingStore(merged_range, curr_memories,
180 f->isConfReported(), f->isInAddrMap(),
181 f->isKvmMap());
182 }
183}
184
185void
186PhysicalMemory::createBackingStore(AddrRange range,
187 const vector<AbstractMemory*>& _memories,
188 bool conf_table_reported,
189 bool in_addr_map, bool kvm_map)
190{
191 panic_if(range.interleaved(),
192 "Cannot create backing store for interleaved range %s\n",
193 range.to_string());
194
195 // perform the actual mmap
196 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
197 range.to_string(), range.size());
198 int map_flags = MAP_ANON | MAP_PRIVATE;
199
200 // to be able to simulate very large memories, the user can opt to
201 // pass noreserve to mmap
202 if (mmapUsingNoReserve) {
203 map_flags |= MAP_NORESERVE;
204 }
205
206 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
207 PROT_READ | PROT_WRITE,
208 map_flags, -1, 0);
209
210 if (pmem == (uint8_t*) MAP_FAILED) {
211 perror("mmap");
212 fatal("Could not mmap %d bytes for range %s!\n", range.size(),
213 range.to_string());
214 }
215
216 // remember this backing store so we can checkpoint it and unmap
217 // it appropriately
218 backingStore.emplace_back(range, pmem,
219 conf_table_reported, in_addr_map, kvm_map);
220
221 // point the memories to their backing store
222 for (const auto& m : _memories) {
223 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
224 m->name());
225 m->setBackingStore(pmem);
226 }
227}
228
229PhysicalMemory::~PhysicalMemory()
230{
231 // unmap the backing store
232 for (auto& s : backingStore)
233 munmap((char*)s.pmem, s.range.size());
234}
235
236bool
237PhysicalMemory::isMemAddr(Addr addr) const
238{
78{
79 if (mmap_using_noreserve)
80 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
81
82 // add the memories from the system to the address map as
83 // appropriate
84 for (const auto& m : _memories) {
85 // only add the memory if it is part of the global address map
86 if (m->isInAddrMap()) {
87 memories.push_back(m);
88
89 // calculate the total size once and for all
90 size += m->size();
91
92 // add the range to our interval tree and make sure it does not
93 // intersect an existing range
94 fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
95 "Memory address range for %s is overlapping\n",
96 m->name());
97 } else {
98 // this type of memory is used e.g. as reference memory by
99 // Ruby, and they also needs a backing store, but should
100 // not be part of the global address map
101 DPRINTF(AddrRanges,
102 "Skipping memory %s that is not in global address map\n",
103 m->name());
104
105 // sanity check
106 fatal_if(m->getAddrRange().interleaved(),
107 "Memory %s that is not in the global address map cannot "
108 "be interleaved\n", m->name());
109
110 // simply do it independently, also note that this kind of
111 // memories are allowed to overlap in the logic address
112 // map
113 vector<AbstractMemory*> unmapped_mems{m};
114 createBackingStore(m->getAddrRange(), unmapped_mems,
115 m->isConfReported(), m->isInAddrMap(),
116 m->isKvmMap());
117 }
118 }
119
120 // iterate over the increasing addresses and chunks of contiguous
121 // space to be mapped to backing store, create it and inform the
122 // memories
123 vector<AddrRange> intlv_ranges;
124 vector<AbstractMemory*> curr_memories;
125 for (const auto& r : addrMap) {
126 // simply skip past all memories that are null and hence do
127 // not need any backing store
128 if (!r.second->isNull()) {
129 // if the range is interleaved then save it for now
130 if (r.first.interleaved()) {
131 // if we already got interleaved ranges that are not
132 // part of the same range, then first do a merge
133 // before we add the new one
134 if (!intlv_ranges.empty() &&
135 !intlv_ranges.back().mergesWith(r.first)) {
136 AddrRange merged_range(intlv_ranges);
137
138 AbstractMemory *f = curr_memories.front();
139 for (const auto& c : curr_memories)
140 if (f->isConfReported() != c->isConfReported() ||
141 f->isInAddrMap() != c->isInAddrMap() ||
142 f->isKvmMap() != c->isKvmMap())
143 fatal("Inconsistent flags in an interleaved "
144 "range\n");
145
146 createBackingStore(merged_range, curr_memories,
147 f->isConfReported(), f->isInAddrMap(),
148 f->isKvmMap());
149
150 intlv_ranges.clear();
151 curr_memories.clear();
152 }
153 intlv_ranges.push_back(r.first);
154 curr_memories.push_back(r.second);
155 } else {
156 vector<AbstractMemory*> single_memory{r.second};
157 createBackingStore(r.first, single_memory,
158 r.second->isConfReported(),
159 r.second->isInAddrMap(),
160 r.second->isKvmMap());
161 }
162 }
163 }
164
165 // if there is still interleaved ranges waiting to be merged, go
166 // ahead and do it
167 if (!intlv_ranges.empty()) {
168 AddrRange merged_range(intlv_ranges);
169
170 AbstractMemory *f = curr_memories.front();
171 for (const auto& c : curr_memories)
172 if (f->isConfReported() != c->isConfReported() ||
173 f->isInAddrMap() != c->isInAddrMap() ||
174 f->isKvmMap() != c->isKvmMap())
175 fatal("Inconsistent flags in an interleaved "
176 "range\n");
177
178 createBackingStore(merged_range, curr_memories,
179 f->isConfReported(), f->isInAddrMap(),
180 f->isKvmMap());
181 }
182}
183
184void
185PhysicalMemory::createBackingStore(AddrRange range,
186 const vector<AbstractMemory*>& _memories,
187 bool conf_table_reported,
188 bool in_addr_map, bool kvm_map)
189{
190 panic_if(range.interleaved(),
191 "Cannot create backing store for interleaved range %s\n",
192 range.to_string());
193
194 // perform the actual mmap
195 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
196 range.to_string(), range.size());
197 int map_flags = MAP_ANON | MAP_PRIVATE;
198
199 // to be able to simulate very large memories, the user can opt to
200 // pass noreserve to mmap
201 if (mmapUsingNoReserve) {
202 map_flags |= MAP_NORESERVE;
203 }
204
205 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
206 PROT_READ | PROT_WRITE,
207 map_flags, -1, 0);
208
209 if (pmem == (uint8_t*) MAP_FAILED) {
210 perror("mmap");
211 fatal("Could not mmap %d bytes for range %s!\n", range.size(),
212 range.to_string());
213 }
214
215 // remember this backing store so we can checkpoint it and unmap
216 // it appropriately
217 backingStore.emplace_back(range, pmem,
218 conf_table_reported, in_addr_map, kvm_map);
219
220 // point the memories to their backing store
221 for (const auto& m : _memories) {
222 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
223 m->name());
224 m->setBackingStore(pmem);
225 }
226}
227
228PhysicalMemory::~PhysicalMemory()
229{
230 // unmap the backing store
231 for (auto& s : backingStore)
232 munmap((char*)s.pmem, s.range.size());
233}
234
235bool
236PhysicalMemory::isMemAddr(Addr addr) const
237{
239 // see if the address is within the last matched range
240 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
241 return true;
242 } else {
243 // lookup in the interval tree
244 const auto& r = addrMap.contains(addr);
245 if (r == addrMap.end()) {
246 // not in the cache, and not in the tree
247 return false;
248 }
249 // the range is in the tree, update the cache
250 rangeCache = r;
251 return true;
252 }
238 return addrMap.contains(addr) != addrMap.end();
253}
254
255AddrRangeList
256PhysicalMemory::getConfAddrRanges() const
257{
258 // this could be done once in the constructor, but since it is unlikely to
259 // be called more than once the iteration should not be a problem
260 AddrRangeList ranges;
261 vector<AddrRange> intlv_ranges;
262 for (const auto& r : addrMap) {
263 if (r.second->isConfReported()) {
264 // if the range is interleaved then save it for now
265 if (r.first.interleaved()) {
266 // if we already got interleaved ranges that are not
267 // part of the same range, then first do a merge
268 // before we add the new one
269 if (!intlv_ranges.empty() &&
270 !intlv_ranges.back().mergesWith(r.first)) {
271 ranges.push_back(AddrRange(intlv_ranges));
272 intlv_ranges.clear();
273 }
274 intlv_ranges.push_back(r.first);
275 } else {
276 // keep the current range
277 ranges.push_back(r.first);
278 }
279 }
280 }
281
282 // if there is still interleaved ranges waiting to be merged,
283 // go ahead and do it
284 if (!intlv_ranges.empty()) {
285 ranges.push_back(AddrRange(intlv_ranges));
286 }
287
288 return ranges;
289}
290
291void
292PhysicalMemory::access(PacketPtr pkt)
293{
294 assert(pkt->isRequest());
295 Addr addr = pkt->getAddr();
239}
240
241AddrRangeList
242PhysicalMemory::getConfAddrRanges() const
243{
244 // this could be done once in the constructor, but since it is unlikely to
245 // be called more than once the iteration should not be a problem
246 AddrRangeList ranges;
247 vector<AddrRange> intlv_ranges;
248 for (const auto& r : addrMap) {
249 if (r.second->isConfReported()) {
250 // if the range is interleaved then save it for now
251 if (r.first.interleaved()) {
252 // if we already got interleaved ranges that are not
253 // part of the same range, then first do a merge
254 // before we add the new one
255 if (!intlv_ranges.empty() &&
256 !intlv_ranges.back().mergesWith(r.first)) {
257 ranges.push_back(AddrRange(intlv_ranges));
258 intlv_ranges.clear();
259 }
260 intlv_ranges.push_back(r.first);
261 } else {
262 // keep the current range
263 ranges.push_back(r.first);
264 }
265 }
266 }
267
268 // if there is still interleaved ranges waiting to be merged,
269 // go ahead and do it
270 if (!intlv_ranges.empty()) {
271 ranges.push_back(AddrRange(intlv_ranges));
272 }
273
274 return ranges;
275}
276
277void
278PhysicalMemory::access(PacketPtr pkt)
279{
280 assert(pkt->isRequest());
281 Addr addr = pkt->getAddr();
296 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
297 rangeCache->second->access(pkt);
298 } else {
299 // do not update the cache here, as we typically call
300 // isMemAddr before calling access
301 const auto& m = addrMap.contains(addr);
302 assert(m != addrMap.end());
303 m->second->access(pkt);
304 }
282 const auto& m = addrMap.contains(addr);
283 assert(m != addrMap.end());
284 m->second->access(pkt);
305}
306
307void
308PhysicalMemory::functionalAccess(PacketPtr pkt)
309{
310 assert(pkt->isRequest());
311 Addr addr = pkt->getAddr();
285}
286
287void
288PhysicalMemory::functionalAccess(PacketPtr pkt)
289{
290 assert(pkt->isRequest());
291 Addr addr = pkt->getAddr();
312 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
313 rangeCache->second->functionalAccess(pkt);
314 } else {
315 // do not update the cache here, as we typically call
316 // isMemAddr before calling functionalAccess
317 const auto& m = addrMap.contains(addr);
318 assert(m != addrMap.end());
319 m->second->functionalAccess(pkt);
320 }
292 const auto& m = addrMap.contains(addr);
293 assert(m != addrMap.end());
294 m->second->functionalAccess(pkt);
321}
322
323void
324PhysicalMemory::serialize(CheckpointOut &cp) const
325{
326 // serialize all the locked addresses and their context ids
327 vector<Addr> lal_addr;
328 vector<ContextID> lal_cid;
329
330 for (auto& m : memories) {
331 const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
332 for (const auto& l : locked_addrs) {
333 lal_addr.push_back(l.addr);
334 lal_cid.push_back(l.contextId);
335 }
336 }
337
338 SERIALIZE_CONTAINER(lal_addr);
339 SERIALIZE_CONTAINER(lal_cid);
340
341 // serialize the backing stores
342 unsigned int nbr_of_stores = backingStore.size();
343 SERIALIZE_SCALAR(nbr_of_stores);
344
345 unsigned int store_id = 0;
346 // store each backing store memory segment in a file
347 for (auto& s : backingStore) {
348 ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
349 serializeStore(cp, store_id++, s.range, s.pmem);
350 }
351}
352
353void
354PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id,
355 AddrRange range, uint8_t* pmem) const
356{
357 // we cannot use the address range for the name as the
358 // memories that are not part of the address map can overlap
359 string filename = name() + ".store" + to_string(store_id) + ".pmem";
360 long range_size = range.size();
361
362 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
363 filename, range_size);
364
365 SERIALIZE_SCALAR(store_id);
366 SERIALIZE_SCALAR(filename);
367 SERIALIZE_SCALAR(range_size);
368
369 // write memory file
370 string filepath = CheckpointIn::dir() + "/" + filename.c_str();
371 gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
372 if (compressed_mem == NULL)
373 fatal("Can't open physical memory checkpoint file '%s'\n",
374 filename);
375
376 uint64_t pass_size = 0;
377
378 // gzwrite fails if (int)len < 0 (gzwrite returns int)
379 for (uint64_t written = 0; written < range.size();
380 written += pass_size) {
381 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
382 (uint64_t)INT_MAX : (range.size() - written);
383
384 if (gzwrite(compressed_mem, pmem + written,
385 (unsigned int) pass_size) != (int) pass_size) {
386 fatal("Write failed on physical memory checkpoint file '%s'\n",
387 filename);
388 }
389 }
390
391 // close the compressed stream and check that the exit status
392 // is zero
393 if (gzclose(compressed_mem))
394 fatal("Close failed on physical memory checkpoint file '%s'\n",
395 filename);
396
397}
398
399void
400PhysicalMemory::unserialize(CheckpointIn &cp)
401{
402 // unserialize the locked addresses and map them to the
403 // appropriate memory controller
404 vector<Addr> lal_addr;
405 vector<ContextID> lal_cid;
406 UNSERIALIZE_CONTAINER(lal_addr);
407 UNSERIALIZE_CONTAINER(lal_cid);
408 for (size_t i = 0; i < lal_addr.size(); ++i) {
409 const auto& m = addrMap.contains(lal_addr[i]);
410 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
411 }
412
413 // unserialize the backing stores
414 unsigned int nbr_of_stores;
415 UNSERIALIZE_SCALAR(nbr_of_stores);
416
417 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
418 ScopedCheckpointSection sec(cp, csprintf("store%d", i));
419 unserializeStore(cp);
420 }
421
422}
423
424void
425PhysicalMemory::unserializeStore(CheckpointIn &cp)
426{
427 const uint32_t chunk_size = 16384;
428
429 unsigned int store_id;
430 UNSERIALIZE_SCALAR(store_id);
431
432 string filename;
433 UNSERIALIZE_SCALAR(filename);
434 string filepath = cp.cptDir + "/" + filename;
435
436 // mmap memoryfile
437 gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
438 if (compressed_mem == NULL)
439 fatal("Can't open physical memory checkpoint file '%s'", filename);
440
441 // we've already got the actual backing store mapped
442 uint8_t* pmem = backingStore[store_id].pmem;
443 AddrRange range = backingStore[store_id].range;
444
445 long range_size;
446 UNSERIALIZE_SCALAR(range_size);
447
448 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
449 filename, range_size);
450
451 if (range_size != range.size())
452 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
453 range_size, range.size());
454
455 uint64_t curr_size = 0;
456 long* temp_page = new long[chunk_size];
457 long* pmem_current;
458 uint32_t bytes_read;
459 while (curr_size < range.size()) {
460 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
461 if (bytes_read == 0)
462 break;
463
464 assert(bytes_read % sizeof(long) == 0);
465
466 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
467 // Only copy bytes that are non-zero, so we don't give
468 // the VM system hell
469 if (*(temp_page + x) != 0) {
470 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
471 *pmem_current = *(temp_page + x);
472 }
473 }
474 curr_size += bytes_read;
475 }
476
477 delete[] temp_page;
478
479 if (gzclose(compressed_mem))
480 fatal("Close failed on physical memory checkpoint file '%s'\n",
481 filename);
482}
295}
296
297void
298PhysicalMemory::serialize(CheckpointOut &cp) const
299{
300 // serialize all the locked addresses and their context ids
301 vector<Addr> lal_addr;
302 vector<ContextID> lal_cid;
303
304 for (auto& m : memories) {
305 const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
306 for (const auto& l : locked_addrs) {
307 lal_addr.push_back(l.addr);
308 lal_cid.push_back(l.contextId);
309 }
310 }
311
312 SERIALIZE_CONTAINER(lal_addr);
313 SERIALIZE_CONTAINER(lal_cid);
314
315 // serialize the backing stores
316 unsigned int nbr_of_stores = backingStore.size();
317 SERIALIZE_SCALAR(nbr_of_stores);
318
319 unsigned int store_id = 0;
320 // store each backing store memory segment in a file
321 for (auto& s : backingStore) {
322 ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
323 serializeStore(cp, store_id++, s.range, s.pmem);
324 }
325}
326
327void
328PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id,
329 AddrRange range, uint8_t* pmem) const
330{
331 // we cannot use the address range for the name as the
332 // memories that are not part of the address map can overlap
333 string filename = name() + ".store" + to_string(store_id) + ".pmem";
334 long range_size = range.size();
335
336 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
337 filename, range_size);
338
339 SERIALIZE_SCALAR(store_id);
340 SERIALIZE_SCALAR(filename);
341 SERIALIZE_SCALAR(range_size);
342
343 // write memory file
344 string filepath = CheckpointIn::dir() + "/" + filename.c_str();
345 gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
346 if (compressed_mem == NULL)
347 fatal("Can't open physical memory checkpoint file '%s'\n",
348 filename);
349
350 uint64_t pass_size = 0;
351
352 // gzwrite fails if (int)len < 0 (gzwrite returns int)
353 for (uint64_t written = 0; written < range.size();
354 written += pass_size) {
355 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
356 (uint64_t)INT_MAX : (range.size() - written);
357
358 if (gzwrite(compressed_mem, pmem + written,
359 (unsigned int) pass_size) != (int) pass_size) {
360 fatal("Write failed on physical memory checkpoint file '%s'\n",
361 filename);
362 }
363 }
364
365 // close the compressed stream and check that the exit status
366 // is zero
367 if (gzclose(compressed_mem))
368 fatal("Close failed on physical memory checkpoint file '%s'\n",
369 filename);
370
371}
372
373void
374PhysicalMemory::unserialize(CheckpointIn &cp)
375{
376 // unserialize the locked addresses and map them to the
377 // appropriate memory controller
378 vector<Addr> lal_addr;
379 vector<ContextID> lal_cid;
380 UNSERIALIZE_CONTAINER(lal_addr);
381 UNSERIALIZE_CONTAINER(lal_cid);
382 for (size_t i = 0; i < lal_addr.size(); ++i) {
383 const auto& m = addrMap.contains(lal_addr[i]);
384 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
385 }
386
387 // unserialize the backing stores
388 unsigned int nbr_of_stores;
389 UNSERIALIZE_SCALAR(nbr_of_stores);
390
391 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
392 ScopedCheckpointSection sec(cp, csprintf("store%d", i));
393 unserializeStore(cp);
394 }
395
396}
397
398void
399PhysicalMemory::unserializeStore(CheckpointIn &cp)
400{
401 const uint32_t chunk_size = 16384;
402
403 unsigned int store_id;
404 UNSERIALIZE_SCALAR(store_id);
405
406 string filename;
407 UNSERIALIZE_SCALAR(filename);
408 string filepath = cp.cptDir + "/" + filename;
409
410 // mmap memoryfile
411 gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
412 if (compressed_mem == NULL)
413 fatal("Can't open physical memory checkpoint file '%s'", filename);
414
415 // we've already got the actual backing store mapped
416 uint8_t* pmem = backingStore[store_id].pmem;
417 AddrRange range = backingStore[store_id].range;
418
419 long range_size;
420 UNSERIALIZE_SCALAR(range_size);
421
422 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
423 filename, range_size);
424
425 if (range_size != range.size())
426 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
427 range_size, range.size());
428
429 uint64_t curr_size = 0;
430 long* temp_page = new long[chunk_size];
431 long* pmem_current;
432 uint32_t bytes_read;
433 while (curr_size < range.size()) {
434 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
435 if (bytes_read == 0)
436 break;
437
438 assert(bytes_read % sizeof(long) == 0);
439
440 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
441 // Only copy bytes that are non-zero, so we don't give
442 // the VM system hell
443 if (*(temp_page + x) != 0) {
444 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
445 *pmem_current = *(temp_page + x);
446 }
447 }
448 curr_size += bytes_read;
449 }
450
451 delete[] temp_page;
452
453 if (gzclose(compressed_mem))
454 fatal("Close failed on physical memory checkpoint file '%s'\n",
455 filename);
456}