physical.cc (11321:02e930db812d) physical.cc (11446:ae6e3dd1c32c)
1/*
2 * Copyright (c) 2012, 2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include <sys/mman.h>
41#include <sys/types.h>
42#include <sys/user.h>
43#include <fcntl.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
53#include "base/trace.hh"
54#include "debug/AddrRanges.hh"
55#include "debug/Checkpoint.hh"
56#include "mem/abstract_mem.hh"
57#include "mem/physical.hh"
58
59/**
60 * On Linux, MAP_NORESERVE allow us to simulate a very large memory
61 * without committing to actually providing the swap space on the
1/*
2 * Copyright (c) 2012, 2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include <sys/mman.h>
41#include <sys/types.h>
42#include <sys/user.h>
43#include <fcntl.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
53#include "base/trace.hh"
54#include "debug/AddrRanges.hh"
55#include "debug/Checkpoint.hh"
56#include "mem/abstract_mem.hh"
57#include "mem/physical.hh"
58
59/**
60 * On Linux, MAP_NORESERVE allow us to simulate a very large memory
61 * without committing to actually providing the swap space on the
62 * host. On OSX the MAP_NORESERVE flag does not exist, so simply make
63 * it 0.
62 * host. On FreeBSD or OSX the MAP_NORESERVE flag does not exist,
63 * so simply make it 0.
64 */
64 */
65#if defined(__APPLE__)
65#if defined(__APPLE__) || defined(__FreeBSD__)
66#ifndef MAP_NORESERVE
67#define MAP_NORESERVE 0
68#endif
69#endif
70
71using namespace std;
72
73PhysicalMemory::PhysicalMemory(const string& _name,
74 const vector<AbstractMemory*>& _memories,
75 bool mmap_using_noreserve) :
76 _name(_name), rangeCache(addrMap.end()), size(0),
77 mmapUsingNoReserve(mmap_using_noreserve)
78{
79 if (mmap_using_noreserve)
80 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
81
82 // add the memories from the system to the address map as
83 // appropriate
84 for (const auto& m : _memories) {
85 // only add the memory if it is part of the global address map
86 if (m->isInAddrMap()) {
87 memories.push_back(m);
88
89 // calculate the total size once and for all
90 size += m->size();
91
92 // add the range to our interval tree and make sure it does not
93 // intersect an existing range
94 fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
95 "Memory address range for %s is overlapping\n",
96 m->name());
97 } else {
98 // this type of memory is used e.g. as reference memory by
99 // Ruby, and they also needs a backing store, but should
100 // not be part of the global address map
101 DPRINTF(AddrRanges,
102 "Skipping memory %s that is not in global address map\n",
103 m->name());
104
105 // sanity check
106 fatal_if(m->getAddrRange().interleaved(),
107 "Memory %s that is not in the global address map cannot "
108 "be interleaved\n", m->name());
109
110 // simply do it independently, also note that this kind of
111 // memories are allowed to overlap in the logic address
112 // map
113 vector<AbstractMemory*> unmapped_mems{m};
114 createBackingStore(m->getAddrRange(), unmapped_mems);
115 }
116 }
117
118 // iterate over the increasing addresses and chunks of contiguous
119 // space to be mapped to backing store, create it and inform the
120 // memories
121 vector<AddrRange> intlv_ranges;
122 vector<AbstractMemory*> curr_memories;
123 for (const auto& r : addrMap) {
124 // simply skip past all memories that are null and hence do
125 // not need any backing store
126 if (!r.second->isNull()) {
127 // if the range is interleaved then save it for now
128 if (r.first.interleaved()) {
129 // if we already got interleaved ranges that are not
130 // part of the same range, then first do a merge
131 // before we add the new one
132 if (!intlv_ranges.empty() &&
133 !intlv_ranges.back().mergesWith(r.first)) {
134 AddrRange merged_range(intlv_ranges);
135 createBackingStore(merged_range, curr_memories);
136 intlv_ranges.clear();
137 curr_memories.clear();
138 }
139 intlv_ranges.push_back(r.first);
140 curr_memories.push_back(r.second);
141 } else {
142 vector<AbstractMemory*> single_memory{r.second};
143 createBackingStore(r.first, single_memory);
144 }
145 }
146 }
147
148 // if there is still interleaved ranges waiting to be merged, go
149 // ahead and do it
150 if (!intlv_ranges.empty()) {
151 AddrRange merged_range(intlv_ranges);
152 createBackingStore(merged_range, curr_memories);
153 }
154}
155
156void
157PhysicalMemory::createBackingStore(AddrRange range,
158 const vector<AbstractMemory*>& _memories)
159{
160 panic_if(range.interleaved(),
161 "Cannot create backing store for interleaved range %s\n",
162 range.to_string());
163
164 // perform the actual mmap
165 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
166 range.to_string(), range.size());
167 int map_flags = MAP_ANON | MAP_PRIVATE;
168
169 // to be able to simulate very large memories, the user can opt to
170 // pass noreserve to mmap
171 if (mmapUsingNoReserve) {
172 map_flags |= MAP_NORESERVE;
173 }
174
175 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
176 PROT_READ | PROT_WRITE,
177 map_flags, -1, 0);
178
179 if (pmem == (uint8_t*) MAP_FAILED) {
180 perror("mmap");
181 fatal("Could not mmap %d bytes for range %s!\n", range.size(),
182 range.to_string());
183 }
184
185 // remember this backing store so we can checkpoint it and unmap
186 // it appropriately
187 backingStore.push_back(make_pair(range, pmem));
188
189 // point the memories to their backing store
190 for (const auto& m : _memories) {
191 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
192 m->name());
193 m->setBackingStore(pmem);
194 }
195}
196
197PhysicalMemory::~PhysicalMemory()
198{
199 // unmap the backing store
200 for (auto& s : backingStore)
201 munmap((char*)s.second, s.first.size());
202}
203
204bool
205PhysicalMemory::isMemAddr(Addr addr) const
206{
207 // see if the address is within the last matched range
208 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
209 return true;
210 } else {
211 // lookup in the interval tree
212 const auto& r = addrMap.find(addr);
213 if (r == addrMap.end()) {
214 // not in the cache, and not in the tree
215 return false;
216 }
217 // the range is in the tree, update the cache
218 rangeCache = r;
219 return true;
220 }
221}
222
223AddrRangeList
224PhysicalMemory::getConfAddrRanges() const
225{
226 // this could be done once in the constructor, but since it is unlikely to
227 // be called more than once the iteration should not be a problem
228 AddrRangeList ranges;
229 vector<AddrRange> intlv_ranges;
230 for (const auto& r : addrMap) {
231 if (r.second->isConfReported()) {
232 // if the range is interleaved then save it for now
233 if (r.first.interleaved()) {
234 // if we already got interleaved ranges that are not
235 // part of the same range, then first do a merge
236 // before we add the new one
237 if (!intlv_ranges.empty() &&
238 !intlv_ranges.back().mergesWith(r.first)) {
239 ranges.push_back(AddrRange(intlv_ranges));
240 intlv_ranges.clear();
241 }
242 intlv_ranges.push_back(r.first);
243 } else {
244 // keep the current range
245 ranges.push_back(r.first);
246 }
247 }
248 }
249
250 // if there is still interleaved ranges waiting to be merged,
251 // go ahead and do it
252 if (!intlv_ranges.empty()) {
253 ranges.push_back(AddrRange(intlv_ranges));
254 }
255
256 return ranges;
257}
258
259void
260PhysicalMemory::access(PacketPtr pkt)
261{
262 assert(pkt->isRequest());
263 Addr addr = pkt->getAddr();
264 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
265 rangeCache->second->access(pkt);
266 } else {
267 // do not update the cache here, as we typically call
268 // isMemAddr before calling access
269 const auto& m = addrMap.find(addr);
270 assert(m != addrMap.end());
271 m->second->access(pkt);
272 }
273}
274
275void
276PhysicalMemory::functionalAccess(PacketPtr pkt)
277{
278 assert(pkt->isRequest());
279 Addr addr = pkt->getAddr();
280 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
281 rangeCache->second->functionalAccess(pkt);
282 } else {
283 // do not update the cache here, as we typically call
284 // isMemAddr before calling functionalAccess
285 const auto& m = addrMap.find(addr);
286 assert(m != addrMap.end());
287 m->second->functionalAccess(pkt);
288 }
289}
290
291void
292PhysicalMemory::serialize(CheckpointOut &cp) const
293{
294 // serialize all the locked addresses and their context ids
295 vector<Addr> lal_addr;
296 vector<ContextID> lal_cid;
297
298 for (auto& m : memories) {
299 const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
300 for (const auto& l : locked_addrs) {
301 lal_addr.push_back(l.addr);
302 lal_cid.push_back(l.contextId);
303 }
304 }
305
306 SERIALIZE_CONTAINER(lal_addr);
307 SERIALIZE_CONTAINER(lal_cid);
308
309 // serialize the backing stores
310 unsigned int nbr_of_stores = backingStore.size();
311 SERIALIZE_SCALAR(nbr_of_stores);
312
313 unsigned int store_id = 0;
314 // store each backing store memory segment in a file
315 for (auto& s : backingStore) {
316 ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
317 serializeStore(cp, store_id++, s.first, s.second);
318 }
319}
320
321void
322PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id,
323 AddrRange range, uint8_t* pmem) const
324{
325 // we cannot use the address range for the name as the
326 // memories that are not part of the address map can overlap
327 string filename = name() + ".store" + to_string(store_id) + ".pmem";
328 long range_size = range.size();
329
330 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
331 filename, range_size);
332
333 SERIALIZE_SCALAR(store_id);
334 SERIALIZE_SCALAR(filename);
335 SERIALIZE_SCALAR(range_size);
336
337 // write memory file
338 string filepath = CheckpointIn::dir() + "/" + filename.c_str();
339 gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
340 if (compressed_mem == NULL)
341 fatal("Can't open physical memory checkpoint file '%s'\n",
342 filename);
343
344 uint64_t pass_size = 0;
345
346 // gzwrite fails if (int)len < 0 (gzwrite returns int)
347 for (uint64_t written = 0; written < range.size();
348 written += pass_size) {
349 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
350 (uint64_t)INT_MAX : (range.size() - written);
351
352 if (gzwrite(compressed_mem, pmem + written,
353 (unsigned int) pass_size) != (int) pass_size) {
354 fatal("Write failed on physical memory checkpoint file '%s'\n",
355 filename);
356 }
357 }
358
359 // close the compressed stream and check that the exit status
360 // is zero
361 if (gzclose(compressed_mem))
362 fatal("Close failed on physical memory checkpoint file '%s'\n",
363 filename);
364
365}
366
367void
368PhysicalMemory::unserialize(CheckpointIn &cp)
369{
370 // unserialize the locked addresses and map them to the
371 // appropriate memory controller
372 vector<Addr> lal_addr;
373 vector<ContextID> lal_cid;
374 UNSERIALIZE_CONTAINER(lal_addr);
375 UNSERIALIZE_CONTAINER(lal_cid);
376 for (size_t i = 0; i < lal_addr.size(); ++i) {
377 const auto& m = addrMap.find(lal_addr[i]);
378 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
379 }
380
381 // unserialize the backing stores
382 unsigned int nbr_of_stores;
383 UNSERIALIZE_SCALAR(nbr_of_stores);
384
385 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
386 ScopedCheckpointSection sec(cp, csprintf("store%d", i));
387 unserializeStore(cp);
388 }
389
390}
391
392void
393PhysicalMemory::unserializeStore(CheckpointIn &cp)
394{
395 const uint32_t chunk_size = 16384;
396
397 unsigned int store_id;
398 UNSERIALIZE_SCALAR(store_id);
399
400 string filename;
401 UNSERIALIZE_SCALAR(filename);
402 string filepath = cp.cptDir + "/" + filename;
403
404 // mmap memoryfile
405 gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
406 if (compressed_mem == NULL)
407 fatal("Can't open physical memory checkpoint file '%s'", filename);
408
409 // we've already got the actual backing store mapped
410 uint8_t* pmem = backingStore[store_id].second;
411 AddrRange range = backingStore[store_id].first;
412
413 long range_size;
414 UNSERIALIZE_SCALAR(range_size);
415
416 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
417 filename, range_size);
418
419 if (range_size != range.size())
420 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
421 range_size, range.size());
422
423 uint64_t curr_size = 0;
424 long* temp_page = new long[chunk_size];
425 long* pmem_current;
426 uint32_t bytes_read;
427 while (curr_size < range.size()) {
428 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
429 if (bytes_read == 0)
430 break;
431
432 assert(bytes_read % sizeof(long) == 0);
433
434 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
435 // Only copy bytes that are non-zero, so we don't give
436 // the VM system hell
437 if (*(temp_page + x) != 0) {
438 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
439 *pmem_current = *(temp_page + x);
440 }
441 }
442 curr_size += bytes_read;
443 }
444
445 delete[] temp_page;
446
447 if (gzclose(compressed_mem))
448 fatal("Close failed on physical memory checkpoint file '%s'\n",
449 filename);
450}
66#ifndef MAP_NORESERVE
67#define MAP_NORESERVE 0
68#endif
69#endif
70
71using namespace std;
72
73PhysicalMemory::PhysicalMemory(const string& _name,
74 const vector<AbstractMemory*>& _memories,
75 bool mmap_using_noreserve) :
76 _name(_name), rangeCache(addrMap.end()), size(0),
77 mmapUsingNoReserve(mmap_using_noreserve)
78{
79 if (mmap_using_noreserve)
80 warn("Not reserving swap space. May cause SIGSEGV on actual usage\n");
81
82 // add the memories from the system to the address map as
83 // appropriate
84 for (const auto& m : _memories) {
85 // only add the memory if it is part of the global address map
86 if (m->isInAddrMap()) {
87 memories.push_back(m);
88
89 // calculate the total size once and for all
90 size += m->size();
91
92 // add the range to our interval tree and make sure it does not
93 // intersect an existing range
94 fatal_if(addrMap.insert(m->getAddrRange(), m) == addrMap.end(),
95 "Memory address range for %s is overlapping\n",
96 m->name());
97 } else {
98 // this type of memory is used e.g. as reference memory by
99 // Ruby, and they also needs a backing store, but should
100 // not be part of the global address map
101 DPRINTF(AddrRanges,
102 "Skipping memory %s that is not in global address map\n",
103 m->name());
104
105 // sanity check
106 fatal_if(m->getAddrRange().interleaved(),
107 "Memory %s that is not in the global address map cannot "
108 "be interleaved\n", m->name());
109
110 // simply do it independently, also note that this kind of
111 // memories are allowed to overlap in the logic address
112 // map
113 vector<AbstractMemory*> unmapped_mems{m};
114 createBackingStore(m->getAddrRange(), unmapped_mems);
115 }
116 }
117
118 // iterate over the increasing addresses and chunks of contiguous
119 // space to be mapped to backing store, create it and inform the
120 // memories
121 vector<AddrRange> intlv_ranges;
122 vector<AbstractMemory*> curr_memories;
123 for (const auto& r : addrMap) {
124 // simply skip past all memories that are null and hence do
125 // not need any backing store
126 if (!r.second->isNull()) {
127 // if the range is interleaved then save it for now
128 if (r.first.interleaved()) {
129 // if we already got interleaved ranges that are not
130 // part of the same range, then first do a merge
131 // before we add the new one
132 if (!intlv_ranges.empty() &&
133 !intlv_ranges.back().mergesWith(r.first)) {
134 AddrRange merged_range(intlv_ranges);
135 createBackingStore(merged_range, curr_memories);
136 intlv_ranges.clear();
137 curr_memories.clear();
138 }
139 intlv_ranges.push_back(r.first);
140 curr_memories.push_back(r.second);
141 } else {
142 vector<AbstractMemory*> single_memory{r.second};
143 createBackingStore(r.first, single_memory);
144 }
145 }
146 }
147
148 // if there is still interleaved ranges waiting to be merged, go
149 // ahead and do it
150 if (!intlv_ranges.empty()) {
151 AddrRange merged_range(intlv_ranges);
152 createBackingStore(merged_range, curr_memories);
153 }
154}
155
156void
157PhysicalMemory::createBackingStore(AddrRange range,
158 const vector<AbstractMemory*>& _memories)
159{
160 panic_if(range.interleaved(),
161 "Cannot create backing store for interleaved range %s\n",
162 range.to_string());
163
164 // perform the actual mmap
165 DPRINTF(AddrRanges, "Creating backing store for range %s with size %d\n",
166 range.to_string(), range.size());
167 int map_flags = MAP_ANON | MAP_PRIVATE;
168
169 // to be able to simulate very large memories, the user can opt to
170 // pass noreserve to mmap
171 if (mmapUsingNoReserve) {
172 map_flags |= MAP_NORESERVE;
173 }
174
175 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
176 PROT_READ | PROT_WRITE,
177 map_flags, -1, 0);
178
179 if (pmem == (uint8_t*) MAP_FAILED) {
180 perror("mmap");
181 fatal("Could not mmap %d bytes for range %s!\n", range.size(),
182 range.to_string());
183 }
184
185 // remember this backing store so we can checkpoint it and unmap
186 // it appropriately
187 backingStore.push_back(make_pair(range, pmem));
188
189 // point the memories to their backing store
190 for (const auto& m : _memories) {
191 DPRINTF(AddrRanges, "Mapping memory %s to backing store\n",
192 m->name());
193 m->setBackingStore(pmem);
194 }
195}
196
197PhysicalMemory::~PhysicalMemory()
198{
199 // unmap the backing store
200 for (auto& s : backingStore)
201 munmap((char*)s.second, s.first.size());
202}
203
204bool
205PhysicalMemory::isMemAddr(Addr addr) const
206{
207 // see if the address is within the last matched range
208 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
209 return true;
210 } else {
211 // lookup in the interval tree
212 const auto& r = addrMap.find(addr);
213 if (r == addrMap.end()) {
214 // not in the cache, and not in the tree
215 return false;
216 }
217 // the range is in the tree, update the cache
218 rangeCache = r;
219 return true;
220 }
221}
222
223AddrRangeList
224PhysicalMemory::getConfAddrRanges() const
225{
226 // this could be done once in the constructor, but since it is unlikely to
227 // be called more than once the iteration should not be a problem
228 AddrRangeList ranges;
229 vector<AddrRange> intlv_ranges;
230 for (const auto& r : addrMap) {
231 if (r.second->isConfReported()) {
232 // if the range is interleaved then save it for now
233 if (r.first.interleaved()) {
234 // if we already got interleaved ranges that are not
235 // part of the same range, then first do a merge
236 // before we add the new one
237 if (!intlv_ranges.empty() &&
238 !intlv_ranges.back().mergesWith(r.first)) {
239 ranges.push_back(AddrRange(intlv_ranges));
240 intlv_ranges.clear();
241 }
242 intlv_ranges.push_back(r.first);
243 } else {
244 // keep the current range
245 ranges.push_back(r.first);
246 }
247 }
248 }
249
250 // if there is still interleaved ranges waiting to be merged,
251 // go ahead and do it
252 if (!intlv_ranges.empty()) {
253 ranges.push_back(AddrRange(intlv_ranges));
254 }
255
256 return ranges;
257}
258
259void
260PhysicalMemory::access(PacketPtr pkt)
261{
262 assert(pkt->isRequest());
263 Addr addr = pkt->getAddr();
264 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
265 rangeCache->second->access(pkt);
266 } else {
267 // do not update the cache here, as we typically call
268 // isMemAddr before calling access
269 const auto& m = addrMap.find(addr);
270 assert(m != addrMap.end());
271 m->second->access(pkt);
272 }
273}
274
275void
276PhysicalMemory::functionalAccess(PacketPtr pkt)
277{
278 assert(pkt->isRequest());
279 Addr addr = pkt->getAddr();
280 if (rangeCache != addrMap.end() && rangeCache->first.contains(addr)) {
281 rangeCache->second->functionalAccess(pkt);
282 } else {
283 // do not update the cache here, as we typically call
284 // isMemAddr before calling functionalAccess
285 const auto& m = addrMap.find(addr);
286 assert(m != addrMap.end());
287 m->second->functionalAccess(pkt);
288 }
289}
290
291void
292PhysicalMemory::serialize(CheckpointOut &cp) const
293{
294 // serialize all the locked addresses and their context ids
295 vector<Addr> lal_addr;
296 vector<ContextID> lal_cid;
297
298 for (auto& m : memories) {
299 const list<LockedAddr>& locked_addrs = m->getLockedAddrList();
300 for (const auto& l : locked_addrs) {
301 lal_addr.push_back(l.addr);
302 lal_cid.push_back(l.contextId);
303 }
304 }
305
306 SERIALIZE_CONTAINER(lal_addr);
307 SERIALIZE_CONTAINER(lal_cid);
308
309 // serialize the backing stores
310 unsigned int nbr_of_stores = backingStore.size();
311 SERIALIZE_SCALAR(nbr_of_stores);
312
313 unsigned int store_id = 0;
314 // store each backing store memory segment in a file
315 for (auto& s : backingStore) {
316 ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
317 serializeStore(cp, store_id++, s.first, s.second);
318 }
319}
320
321void
322PhysicalMemory::serializeStore(CheckpointOut &cp, unsigned int store_id,
323 AddrRange range, uint8_t* pmem) const
324{
325 // we cannot use the address range for the name as the
326 // memories that are not part of the address map can overlap
327 string filename = name() + ".store" + to_string(store_id) + ".pmem";
328 long range_size = range.size();
329
330 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
331 filename, range_size);
332
333 SERIALIZE_SCALAR(store_id);
334 SERIALIZE_SCALAR(filename);
335 SERIALIZE_SCALAR(range_size);
336
337 // write memory file
338 string filepath = CheckpointIn::dir() + "/" + filename.c_str();
339 gzFile compressed_mem = gzopen(filepath.c_str(), "wb");
340 if (compressed_mem == NULL)
341 fatal("Can't open physical memory checkpoint file '%s'\n",
342 filename);
343
344 uint64_t pass_size = 0;
345
346 // gzwrite fails if (int)len < 0 (gzwrite returns int)
347 for (uint64_t written = 0; written < range.size();
348 written += pass_size) {
349 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
350 (uint64_t)INT_MAX : (range.size() - written);
351
352 if (gzwrite(compressed_mem, pmem + written,
353 (unsigned int) pass_size) != (int) pass_size) {
354 fatal("Write failed on physical memory checkpoint file '%s'\n",
355 filename);
356 }
357 }
358
359 // close the compressed stream and check that the exit status
360 // is zero
361 if (gzclose(compressed_mem))
362 fatal("Close failed on physical memory checkpoint file '%s'\n",
363 filename);
364
365}
366
367void
368PhysicalMemory::unserialize(CheckpointIn &cp)
369{
370 // unserialize the locked addresses and map them to the
371 // appropriate memory controller
372 vector<Addr> lal_addr;
373 vector<ContextID> lal_cid;
374 UNSERIALIZE_CONTAINER(lal_addr);
375 UNSERIALIZE_CONTAINER(lal_cid);
376 for (size_t i = 0; i < lal_addr.size(); ++i) {
377 const auto& m = addrMap.find(lal_addr[i]);
378 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
379 }
380
381 // unserialize the backing stores
382 unsigned int nbr_of_stores;
383 UNSERIALIZE_SCALAR(nbr_of_stores);
384
385 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
386 ScopedCheckpointSection sec(cp, csprintf("store%d", i));
387 unserializeStore(cp);
388 }
389
390}
391
392void
393PhysicalMemory::unserializeStore(CheckpointIn &cp)
394{
395 const uint32_t chunk_size = 16384;
396
397 unsigned int store_id;
398 UNSERIALIZE_SCALAR(store_id);
399
400 string filename;
401 UNSERIALIZE_SCALAR(filename);
402 string filepath = cp.cptDir + "/" + filename;
403
404 // mmap memoryfile
405 gzFile compressed_mem = gzopen(filepath.c_str(), "rb");
406 if (compressed_mem == NULL)
407 fatal("Can't open physical memory checkpoint file '%s'", filename);
408
409 // we've already got the actual backing store mapped
410 uint8_t* pmem = backingStore[store_id].second;
411 AddrRange range = backingStore[store_id].first;
412
413 long range_size;
414 UNSERIALIZE_SCALAR(range_size);
415
416 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
417 filename, range_size);
418
419 if (range_size != range.size())
420 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
421 range_size, range.size());
422
423 uint64_t curr_size = 0;
424 long* temp_page = new long[chunk_size];
425 long* pmem_current;
426 uint32_t bytes_read;
427 while (curr_size < range.size()) {
428 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
429 if (bytes_read == 0)
430 break;
431
432 assert(bytes_read % sizeof(long) == 0);
433
434 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
435 // Only copy bytes that are non-zero, so we don't give
436 // the VM system hell
437 if (*(temp_page + x) != 0) {
438 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
439 *pmem_current = *(temp_page + x);
440 }
441 }
442 curr_size += bytes_read;
443 }
444
445 delete[] temp_page;
446
447 if (gzclose(compressed_mem))
448 fatal("Close failed on physical memory checkpoint file '%s'\n",
449 filename);
450}