physical.cc (9293:df7c3f99ebca) physical.cc (9356:b279bad40aa3)
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include <sys/mman.h>
41#include <sys/types.h>
42#include <sys/user.h>
43#include <fcntl.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
1/*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Andreas Hansson
38 */
39
40#include <sys/mman.h>
41#include <sys/types.h>
42#include <sys/user.h>
43#include <fcntl.h>
44#include <unistd.h>
45#include <zlib.h>
46
47#include <cerrno>
48#include <climits>
49#include <cstdio>
50#include <iostream>
51#include <string>
52
53#include "base/trace.hh"
53#include "debug/BusAddrRanges.hh"
54#include "debug/Checkpoint.hh"
55#include "mem/abstract_mem.hh"
56#include "mem/physical.hh"
57
58using namespace std;
59
60PhysicalMemory::PhysicalMemory(const string& _name,
61 const vector<AbstractMemory*>& _memories) :
62 _name(_name), size(0)
63{
64 // add the memories from the system to the address map as
65 // appropriate
66 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
67 m != _memories.end(); ++m) {
68 // only add the memory if it is part of the global address map
69 if ((*m)->isInAddrMap()) {
70 memories.push_back(*m);
71
72 // calculate the total size once and for all
73 size += (*m)->size();
74
75 // add the range to our interval tree and make sure it does not
76 // intersect an existing range
77 if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
78 fatal("Memory address range for %s is overlapping\n",
79 (*m)->name());
80 } else {
81 DPRINTF(BusAddrRanges,
82 "Skipping memory %s that is not in global address map\n",
83 (*m)->name());
84 // this type of memory is used e.g. as reference memory by
85 // Ruby, and they also needs a backing store, but should
86 // not be part of the global address map
87
88 // simply do it independently, also note that this kind of
89 // memories are allowed to overlap in the logic address
90 // map
91 vector<AbstractMemory*> unmapped_mems;
92 unmapped_mems.push_back(*m);
93 createBackingStore((*m)->getAddrRange(), unmapped_mems);
94 }
95 }
96
97 // iterate over the increasing addresses and create as large
98 // chunks as possible of contigous space to be mapped to backing
99 // store, also remember what memories constitute the range so we
100 // can go and find out if we have to init their parts to zero
101 AddrRange curr_range;
102 vector<AbstractMemory*> curr_memories;
103 for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
104 r != addrMap.end(); ++r) {
105 // simply skip past all memories that are null and hence do
106 // not need any backing store
107 if (!r->second->isNull()) {
108 // if the current range is valid, decide if we split or
109 // not
110 if (curr_range.valid()) {
111 // if the ranges are neighbours, then append, this
112 // will eventually be extended to include support for
113 // address striping and merge the interleaved ranges
114 if (curr_range.end + 1 == r->first.start) {
115 DPRINTF(BusAddrRanges,
116 "Merging neighbouring ranges %x:%x and %x:%x\n",
117 curr_range.start, curr_range.end, r->first.start,
118 r->first.end);
119 // update the end of the range and add the current
120 // memory to the list of memories
121 curr_range.end = r->first.end;
122 curr_memories.push_back(r->second);
123 } else {
124 // what we already have is valid, and this is not
125 // contigious, so create the backing store and
126 // then start over
127 createBackingStore(curr_range, curr_memories);
128
129 // remember the current range and reset the current
130 // set of memories to contain this one
131 curr_range = r->first;
132 curr_memories.clear();
133 curr_memories.push_back(r->second);
134 }
135 } else {
136 // we haven't seen any valid ranges yet, so remember
137 // the current range and reset the current set of
138 // memories to contain this one
139 curr_range = r->first;
140 curr_memories.clear();
141 curr_memories.push_back(r->second);
142 }
143 }
144 }
145
146 // if we have a valid range upon finishing the iteration, then
147 // create the backing store
148 if (curr_range.valid())
149 createBackingStore(curr_range, curr_memories);
150}
151
152void
153PhysicalMemory::createBackingStore(AddrRange range,
154 const vector<AbstractMemory*>& _memories)
155{
156 // perform the actual mmap
157 DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n",
158 range.start, range.end);
159 int map_flags = MAP_ANON | MAP_PRIVATE;
160 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
161 PROT_READ | PROT_WRITE,
162 map_flags, -1, 0);
163
164 if (pmem == (uint8_t*) MAP_FAILED) {
165 perror("mmap");
166 fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(),
167 range.start, range.end);
168 }
169
170 // remember this backing store so we can checkpoint it and unmap
171 // it appropriately
172 backingStore.push_back(make_pair(range, pmem));
173
174 // point the memories to their backing store, and if requested,
175 // initialize the memory range to 0
176 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
177 m != _memories.end(); ++m) {
178 DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
179 (*m)->name());
180 (*m)->setBackingStore(pmem);
181
182 // if it should be zero, then go and make it so
183 if ((*m)->initToZero())
184 memset(pmem, 0, (*m)->size());
185
186 // advance the pointer for the next memory in line
187 pmem += (*m)->size();
188 }
189}
190
191PhysicalMemory::~PhysicalMemory()
192{
193 // unmap the backing store
194 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
195 s != backingStore.end(); ++s)
196 munmap((char*)s->second, s->first.size());
197}
198
199bool
200PhysicalMemory::isMemAddr(Addr addr) const
201{
202 // see if the address is within the last matched range
203 if (addr != rangeCache) {
204 // lookup in the interval tree
205 AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
206 if (r == addrMap.end()) {
207 // not in the cache, and not in the tree
208 return false;
209 }
210 // the range is in the tree, update the cache
211 rangeCache = r->first;
212 }
213
214 assert(addrMap.find(addr) != addrMap.end());
215
216 // either matched the cache or found in the tree
217 return true;
218}
219
220AddrRangeList
221PhysicalMemory::getConfAddrRanges() const
222{
223 // this could be done once in the constructor, but since it is unlikely to
224 // be called more than once the iteration should not be a problem
225 AddrRangeList ranges;
226 for (vector<AbstractMemory*>::const_iterator m = memories.begin();
227 m != memories.end(); ++m) {
228 if ((*m)->isConfReported()) {
229 ranges.push_back((*m)->getAddrRange());
230 }
231 }
232
233 return ranges;
234}
235
236void
237PhysicalMemory::access(PacketPtr pkt)
238{
239 assert(pkt->isRequest());
240 Addr addr = pkt->getAddr();
241 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
242 assert(m != addrMap.end());
243 m->second->access(pkt);
244}
245
246void
247PhysicalMemory::functionalAccess(PacketPtr pkt)
248{
249 assert(pkt->isRequest());
250 Addr addr = pkt->getAddr();
251 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
252 assert(m != addrMap.end());
253 m->second->functionalAccess(pkt);
254}
255
256void
257PhysicalMemory::serialize(ostream& os)
258{
259 // serialize all the locked addresses and their context ids
260 vector<Addr> lal_addr;
261 vector<int> lal_cid;
262
263 for (vector<AbstractMemory*>::iterator m = memories.begin();
264 m != memories.end(); ++m) {
265 const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
266 for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
267 l != locked_addrs.end(); ++l) {
268 lal_addr.push_back(l->addr);
269 lal_cid.push_back(l->contextId);
270 }
271 }
272
273 arrayParamOut(os, "lal_addr", lal_addr);
274 arrayParamOut(os, "lal_cid", lal_cid);
275
276 // serialize the backing stores
277 unsigned int nbr_of_stores = backingStore.size();
278 SERIALIZE_SCALAR(nbr_of_stores);
279
280 unsigned int store_id = 0;
281 // store each backing store memory segment in a file
282 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
283 s != backingStore.end(); ++s) {
284 nameOut(os, csprintf("%s.store%d", name(), store_id));
285 serializeStore(os, store_id++, s->first, s->second);
286 }
287}
288
289void
290PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
291 AddrRange range, uint8_t* pmem)
292{
293 // we cannot use the address range for the name as the
294 // memories that are not part of the address map can overlap
295 string filename = "store" + to_string(store_id) + ".pmem";
296 long range_size = range.size();
297
298 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
299 filename, range_size);
300
301 SERIALIZE_SCALAR(store_id);
302 SERIALIZE_SCALAR(filename);
303 SERIALIZE_SCALAR(range_size);
304
305 // write memory file
306 string filepath = Checkpoint::dir() + "/" + filename.c_str();
307 int fd = creat(filepath.c_str(), 0664);
308 if (fd < 0) {
309 perror("creat");
310 fatal("Can't open physical memory checkpoint file '%s'\n",
311 filename);
312 }
313
314 gzFile compressed_mem = gzdopen(fd, "wb");
315 if (compressed_mem == NULL)
316 fatal("Insufficient memory to allocate compression state for %s\n",
317 filename);
318
319 uint64_t pass_size = 0;
320
321 // gzwrite fails if (int)len < 0 (gzwrite returns int)
322 for (uint64_t written = 0; written < range.size();
323 written += pass_size) {
324 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
325 (uint64_t)INT_MAX : (range.size() - written);
326
327 if (gzwrite(compressed_mem, pmem + written,
328 (unsigned int) pass_size) != (int) pass_size) {
329 fatal("Write failed on physical memory checkpoint file '%s'\n",
330 filename);
331 }
332 }
333
334 // close the compressed stream and check that the exit status
335 // is zero
336 if (gzclose(compressed_mem))
337 fatal("Close failed on physical memory checkpoint file '%s'\n",
338 filename);
339
340}
341
342void
343PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
344{
345 // unserialize the locked addresses and map them to the
346 // appropriate memory controller
347 vector<Addr> lal_addr;
348 vector<int> lal_cid;
349 arrayParamIn(cp, section, "lal_addr", lal_addr);
350 arrayParamIn(cp, section, "lal_cid", lal_cid);
351 for(size_t i = 0; i < lal_addr.size(); ++i) {
352 AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
353 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
354 }
355
356 // unserialize the backing stores
357 unsigned int nbr_of_stores;
358 UNSERIALIZE_SCALAR(nbr_of_stores);
359
360 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
361 unserializeStore(cp, csprintf("%s.store%d", section, i));
362 }
363
364}
365
366void
367PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
368{
369 const uint32_t chunk_size = 16384;
370
371 unsigned int store_id;
372 UNSERIALIZE_SCALAR(store_id);
373
374 string filename;
375 UNSERIALIZE_SCALAR(filename);
376 string filepath = cp->cptDir + "/" + filename;
377
378 // mmap memoryfile
379 int fd = open(filepath.c_str(), O_RDONLY);
380 if (fd < 0) {
381 perror("open");
382 fatal("Can't open physical memory checkpoint file '%s'", filename);
383 }
384
385 gzFile compressed_mem = gzdopen(fd, "rb");
386 if (compressed_mem == NULL)
387 fatal("Insufficient memory to allocate compression state for %s\n",
388 filename);
389
390 uint8_t* pmem = backingStore[store_id].second;
391 AddrRange range = backingStore[store_id].first;
392
393 // unmap file that was mmapped in the constructor, this is
394 // done here to make sure that gzip and open don't muck with
395 // our nice large space of memory before we reallocate it
396 munmap((char*) pmem, range.size());
397
398 long range_size;
399 UNSERIALIZE_SCALAR(range_size);
400
401 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
402 filename, range_size);
403
404 if (range_size != range.size())
405 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
406 range_size, range.size());
407
408 pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
409 MAP_ANON | MAP_PRIVATE, -1, 0);
410
411 if (pmem == (void*) MAP_FAILED) {
412 perror("mmap");
413 fatal("Could not mmap physical memory!\n");
414 }
415
416 uint64_t curr_size = 0;
417 long* temp_page = new long[chunk_size];
418 long* pmem_current;
419 uint32_t bytes_read;
420 while (curr_size < range.size()) {
421 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
422 if (bytes_read == 0)
423 break;
424
425 assert(bytes_read % sizeof(long) == 0);
426
427 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
428 // Only copy bytes that are non-zero, so we don't give
429 // the VM system hell
430 if (*(temp_page + x) != 0) {
431 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
432 *pmem_current = *(temp_page + x);
433 }
434 }
435 curr_size += bytes_read;
436 }
437
438 delete[] temp_page;
439
440 if (gzclose(compressed_mem))
441 fatal("Close failed on physical memory checkpoint file '%s'\n",
442 filename);
443}
54#include "debug/BusAddrRanges.hh"
55#include "debug/Checkpoint.hh"
56#include "mem/abstract_mem.hh"
57#include "mem/physical.hh"
58
59using namespace std;
60
61PhysicalMemory::PhysicalMemory(const string& _name,
62 const vector<AbstractMemory*>& _memories) :
63 _name(_name), size(0)
64{
65 // add the memories from the system to the address map as
66 // appropriate
67 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
68 m != _memories.end(); ++m) {
69 // only add the memory if it is part of the global address map
70 if ((*m)->isInAddrMap()) {
71 memories.push_back(*m);
72
73 // calculate the total size once and for all
74 size += (*m)->size();
75
76 // add the range to our interval tree and make sure it does not
77 // intersect an existing range
78 if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
79 fatal("Memory address range for %s is overlapping\n",
80 (*m)->name());
81 } else {
82 DPRINTF(BusAddrRanges,
83 "Skipping memory %s that is not in global address map\n",
84 (*m)->name());
85 // this type of memory is used e.g. as reference memory by
86 // Ruby, and they also needs a backing store, but should
87 // not be part of the global address map
88
89 // simply do it independently, also note that this kind of
90 // memories are allowed to overlap in the logic address
91 // map
92 vector<AbstractMemory*> unmapped_mems;
93 unmapped_mems.push_back(*m);
94 createBackingStore((*m)->getAddrRange(), unmapped_mems);
95 }
96 }
97
98 // iterate over the increasing addresses and create as large
99 // chunks as possible of contigous space to be mapped to backing
100 // store, also remember what memories constitute the range so we
101 // can go and find out if we have to init their parts to zero
102 AddrRange curr_range;
103 vector<AbstractMemory*> curr_memories;
104 for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
105 r != addrMap.end(); ++r) {
106 // simply skip past all memories that are null and hence do
107 // not need any backing store
108 if (!r->second->isNull()) {
109 // if the current range is valid, decide if we split or
110 // not
111 if (curr_range.valid()) {
112 // if the ranges are neighbours, then append, this
113 // will eventually be extended to include support for
114 // address striping and merge the interleaved ranges
115 if (curr_range.end + 1 == r->first.start) {
116 DPRINTF(BusAddrRanges,
117 "Merging neighbouring ranges %x:%x and %x:%x\n",
118 curr_range.start, curr_range.end, r->first.start,
119 r->first.end);
120 // update the end of the range and add the current
121 // memory to the list of memories
122 curr_range.end = r->first.end;
123 curr_memories.push_back(r->second);
124 } else {
125 // what we already have is valid, and this is not
126 // contigious, so create the backing store and
127 // then start over
128 createBackingStore(curr_range, curr_memories);
129
130 // remember the current range and reset the current
131 // set of memories to contain this one
132 curr_range = r->first;
133 curr_memories.clear();
134 curr_memories.push_back(r->second);
135 }
136 } else {
137 // we haven't seen any valid ranges yet, so remember
138 // the current range and reset the current set of
139 // memories to contain this one
140 curr_range = r->first;
141 curr_memories.clear();
142 curr_memories.push_back(r->second);
143 }
144 }
145 }
146
147 // if we have a valid range upon finishing the iteration, then
148 // create the backing store
149 if (curr_range.valid())
150 createBackingStore(curr_range, curr_memories);
151}
152
153void
154PhysicalMemory::createBackingStore(AddrRange range,
155 const vector<AbstractMemory*>& _memories)
156{
157 // perform the actual mmap
158 DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n",
159 range.start, range.end);
160 int map_flags = MAP_ANON | MAP_PRIVATE;
161 uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
162 PROT_READ | PROT_WRITE,
163 map_flags, -1, 0);
164
165 if (pmem == (uint8_t*) MAP_FAILED) {
166 perror("mmap");
167 fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(),
168 range.start, range.end);
169 }
170
171 // remember this backing store so we can checkpoint it and unmap
172 // it appropriately
173 backingStore.push_back(make_pair(range, pmem));
174
175 // point the memories to their backing store, and if requested,
176 // initialize the memory range to 0
177 for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
178 m != _memories.end(); ++m) {
179 DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
180 (*m)->name());
181 (*m)->setBackingStore(pmem);
182
183 // if it should be zero, then go and make it so
184 if ((*m)->initToZero())
185 memset(pmem, 0, (*m)->size());
186
187 // advance the pointer for the next memory in line
188 pmem += (*m)->size();
189 }
190}
191
192PhysicalMemory::~PhysicalMemory()
193{
194 // unmap the backing store
195 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
196 s != backingStore.end(); ++s)
197 munmap((char*)s->second, s->first.size());
198}
199
200bool
201PhysicalMemory::isMemAddr(Addr addr) const
202{
203 // see if the address is within the last matched range
204 if (addr != rangeCache) {
205 // lookup in the interval tree
206 AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.find(addr);
207 if (r == addrMap.end()) {
208 // not in the cache, and not in the tree
209 return false;
210 }
211 // the range is in the tree, update the cache
212 rangeCache = r->first;
213 }
214
215 assert(addrMap.find(addr) != addrMap.end());
216
217 // either matched the cache or found in the tree
218 return true;
219}
220
221AddrRangeList
222PhysicalMemory::getConfAddrRanges() const
223{
224 // this could be done once in the constructor, but since it is unlikely to
225 // be called more than once the iteration should not be a problem
226 AddrRangeList ranges;
227 for (vector<AbstractMemory*>::const_iterator m = memories.begin();
228 m != memories.end(); ++m) {
229 if ((*m)->isConfReported()) {
230 ranges.push_back((*m)->getAddrRange());
231 }
232 }
233
234 return ranges;
235}
236
237void
238PhysicalMemory::access(PacketPtr pkt)
239{
240 assert(pkt->isRequest());
241 Addr addr = pkt->getAddr();
242 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
243 assert(m != addrMap.end());
244 m->second->access(pkt);
245}
246
247void
248PhysicalMemory::functionalAccess(PacketPtr pkt)
249{
250 assert(pkt->isRequest());
251 Addr addr = pkt->getAddr();
252 AddrRangeMap<AbstractMemory*>::const_iterator m = addrMap.find(addr);
253 assert(m != addrMap.end());
254 m->second->functionalAccess(pkt);
255}
256
257void
258PhysicalMemory::serialize(ostream& os)
259{
260 // serialize all the locked addresses and their context ids
261 vector<Addr> lal_addr;
262 vector<int> lal_cid;
263
264 for (vector<AbstractMemory*>::iterator m = memories.begin();
265 m != memories.end(); ++m) {
266 const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
267 for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
268 l != locked_addrs.end(); ++l) {
269 lal_addr.push_back(l->addr);
270 lal_cid.push_back(l->contextId);
271 }
272 }
273
274 arrayParamOut(os, "lal_addr", lal_addr);
275 arrayParamOut(os, "lal_cid", lal_cid);
276
277 // serialize the backing stores
278 unsigned int nbr_of_stores = backingStore.size();
279 SERIALIZE_SCALAR(nbr_of_stores);
280
281 unsigned int store_id = 0;
282 // store each backing store memory segment in a file
283 for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
284 s != backingStore.end(); ++s) {
285 nameOut(os, csprintf("%s.store%d", name(), store_id));
286 serializeStore(os, store_id++, s->first, s->second);
287 }
288}
289
290void
291PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
292 AddrRange range, uint8_t* pmem)
293{
294 // we cannot use the address range for the name as the
295 // memories that are not part of the address map can overlap
296 string filename = "store" + to_string(store_id) + ".pmem";
297 long range_size = range.size();
298
299 DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
300 filename, range_size);
301
302 SERIALIZE_SCALAR(store_id);
303 SERIALIZE_SCALAR(filename);
304 SERIALIZE_SCALAR(range_size);
305
306 // write memory file
307 string filepath = Checkpoint::dir() + "/" + filename.c_str();
308 int fd = creat(filepath.c_str(), 0664);
309 if (fd < 0) {
310 perror("creat");
311 fatal("Can't open physical memory checkpoint file '%s'\n",
312 filename);
313 }
314
315 gzFile compressed_mem = gzdopen(fd, "wb");
316 if (compressed_mem == NULL)
317 fatal("Insufficient memory to allocate compression state for %s\n",
318 filename);
319
320 uint64_t pass_size = 0;
321
322 // gzwrite fails if (int)len < 0 (gzwrite returns int)
323 for (uint64_t written = 0; written < range.size();
324 written += pass_size) {
325 pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
326 (uint64_t)INT_MAX : (range.size() - written);
327
328 if (gzwrite(compressed_mem, pmem + written,
329 (unsigned int) pass_size) != (int) pass_size) {
330 fatal("Write failed on physical memory checkpoint file '%s'\n",
331 filename);
332 }
333 }
334
335 // close the compressed stream and check that the exit status
336 // is zero
337 if (gzclose(compressed_mem))
338 fatal("Close failed on physical memory checkpoint file '%s'\n",
339 filename);
340
341}
342
343void
344PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
345{
346 // unserialize the locked addresses and map them to the
347 // appropriate memory controller
348 vector<Addr> lal_addr;
349 vector<int> lal_cid;
350 arrayParamIn(cp, section, "lal_addr", lal_addr);
351 arrayParamIn(cp, section, "lal_cid", lal_cid);
352 for(size_t i = 0; i < lal_addr.size(); ++i) {
353 AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
354 m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
355 }
356
357 // unserialize the backing stores
358 unsigned int nbr_of_stores;
359 UNSERIALIZE_SCALAR(nbr_of_stores);
360
361 for (unsigned int i = 0; i < nbr_of_stores; ++i) {
362 unserializeStore(cp, csprintf("%s.store%d", section, i));
363 }
364
365}
366
367void
368PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
369{
370 const uint32_t chunk_size = 16384;
371
372 unsigned int store_id;
373 UNSERIALIZE_SCALAR(store_id);
374
375 string filename;
376 UNSERIALIZE_SCALAR(filename);
377 string filepath = cp->cptDir + "/" + filename;
378
379 // mmap memoryfile
380 int fd = open(filepath.c_str(), O_RDONLY);
381 if (fd < 0) {
382 perror("open");
383 fatal("Can't open physical memory checkpoint file '%s'", filename);
384 }
385
386 gzFile compressed_mem = gzdopen(fd, "rb");
387 if (compressed_mem == NULL)
388 fatal("Insufficient memory to allocate compression state for %s\n",
389 filename);
390
391 uint8_t* pmem = backingStore[store_id].second;
392 AddrRange range = backingStore[store_id].first;
393
394 // unmap file that was mmapped in the constructor, this is
395 // done here to make sure that gzip and open don't muck with
396 // our nice large space of memory before we reallocate it
397 munmap((char*) pmem, range.size());
398
399 long range_size;
400 UNSERIALIZE_SCALAR(range_size);
401
402 DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
403 filename, range_size);
404
405 if (range_size != range.size())
406 fatal("Memory range size has changed! Saw %lld, expected %lld\n",
407 range_size, range.size());
408
409 pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
410 MAP_ANON | MAP_PRIVATE, -1, 0);
411
412 if (pmem == (void*) MAP_FAILED) {
413 perror("mmap");
414 fatal("Could not mmap physical memory!\n");
415 }
416
417 uint64_t curr_size = 0;
418 long* temp_page = new long[chunk_size];
419 long* pmem_current;
420 uint32_t bytes_read;
421 while (curr_size < range.size()) {
422 bytes_read = gzread(compressed_mem, temp_page, chunk_size);
423 if (bytes_read == 0)
424 break;
425
426 assert(bytes_read % sizeof(long) == 0);
427
428 for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
429 // Only copy bytes that are non-zero, so we don't give
430 // the VM system hell
431 if (*(temp_page + x) != 0) {
432 pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
433 *pmem_current = *(temp_page + x);
434 }
435 }
436 curr_size += bytes_read;
437 }
438
439 delete[] temp_page;
440
441 if (gzclose(compressed_mem))
442 fatal("Close failed on physical memory checkpoint file '%s'\n",
443 filename);
444}