physical.cc (3349:fec4a86fa212) physical.cc (3584:8c3cdb2c001c)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 * Ali Saidi
30 */
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <errno.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <zlib.h>
38
39#include <iostream>
40#include <string>
41
42#include "arch/isa_traits.hh"
43#include "base/misc.hh"
44#include "config/full_system.hh"
45#include "mem/physical.hh"
46#include "sim/builder.hh"
47#include "sim/eventq.hh"
48#include "sim/host.hh"
49
50using namespace std;
51using namespace TheISA;
52
53PhysicalMemory::PhysicalMemory(Params *p)
54 : MemObject(p->name), pmemAddr(NULL), port(NULL), lat(p->latency), _params(p)
55{
56 if (params()->addrRange.size() % TheISA::PageBytes != 0)
57 panic("Memory Size not divisible by page size\n");
58
59 int map_flags = MAP_ANON | MAP_PRIVATE;
60 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
61 map_flags, -1, 0);
62
63 if (pmemAddr == (void *)MAP_FAILED) {
64 perror("mmap");
65 fatal("Could not mmap!\n");
66 }
67
68 pagePtr = 0;
69}
70
71void
72PhysicalMemory::init()
73{
74 if (!port)
75 panic("PhysicalMemory not connected to anything!");
76 port->sendStatusChange(Port::RangeChange);
77}
78
79PhysicalMemory::~PhysicalMemory()
80{
81 if (pmemAddr)
82 munmap(pmemAddr, params()->addrRange.size());
83 //Remove memPorts?
84}
85
86Addr
87PhysicalMemory::new_page()
88{
89 Addr return_addr = pagePtr << LogVMPageSize;
90 return_addr += params()->addrRange.start;
91
92 ++pagePtr;
93 return return_addr;
94}
95
96int
97PhysicalMemory::deviceBlockSize()
98{
99 //Can accept anysize request
100 return 0;
101}
102
103Tick
104PhysicalMemory::calculateLatency(PacketPtr pkt)
105{
106 return lat;
107}
108
109
110
111// Add load-locked to tracking list. Should only be called if the
112// operation is a load and the LOCKED flag is set.
113void
114PhysicalMemory::trackLoadLocked(Request *req)
115{
116 Addr paddr = LockedAddr::mask(req->getPaddr());
117
118 // first we check if we already have a locked addr for this
119 // xc. Since each xc only gets one, we just update the
120 // existing record with the new address.
121 list<LockedAddr>::iterator i;
122
123 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
124 if (i->matchesContext(req)) {
125 DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n",
126 req->getCpuNum(), req->getThreadNum(), paddr);
127 i->addr = paddr;
128 return;
129 }
130 }
131
132 // no record for this xc: need to allocate a new one
133 DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n",
134 req->getCpuNum(), req->getThreadNum(), paddr);
135 lockedAddrList.push_front(LockedAddr(req));
136}
137
138
139// Called on *writes* only... both regular stores and
140// store-conditional operations. Check for conventional stores which
141// conflict with locked addresses, and for success/failure of store
142// conditionals.
143bool
144PhysicalMemory::checkLockedAddrList(Request *req)
145{
146 Addr paddr = LockedAddr::mask(req->getPaddr());
147 bool isLocked = req->isLocked();
148
149 // Initialize return value. Non-conditional stores always
150 // succeed. Assume conditional stores will fail until proven
151 // otherwise.
152 bool success = !isLocked;
153
154 // Iterate over list. Note that there could be multiple matching
155 // records, as more than one context could have done a load locked
156 // to this location.
157 list<LockedAddr>::iterator i = lockedAddrList.begin();
158
159 while (i != lockedAddrList.end()) {
160
161 if (i->addr == paddr) {
162 // we have a matching address
163
164 if (isLocked && i->matchesContext(req)) {
165 // it's a store conditional, and as far as the memory
166 // system can tell, the requesting context's lock is
167 // still valid.
168 DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n",
169 req->getCpuNum(), req->getThreadNum(), paddr);
170 success = true;
171 }
172
173 // Get rid of our record of this lock and advance to next
174 DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n",
175 i->cpuNum, i->threadNum, paddr);
176 i = lockedAddrList.erase(i);
177 }
178 else {
179 // no match: advance to next record
180 ++i;
181 }
182 }
183
184 if (isLocked) {
185 req->setScResult(success ? 1 : 0);
186 }
187
188 return success;
189}
190
191void
192PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
193{
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 * Ali Saidi
30 */
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <errno.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <zlib.h>
38
39#include <iostream>
40#include <string>
41
42#include "arch/isa_traits.hh"
43#include "base/misc.hh"
44#include "config/full_system.hh"
45#include "mem/physical.hh"
46#include "sim/builder.hh"
47#include "sim/eventq.hh"
48#include "sim/host.hh"
49
50using namespace std;
51using namespace TheISA;
52
53PhysicalMemory::PhysicalMemory(Params *p)
54 : MemObject(p->name), pmemAddr(NULL), port(NULL), lat(p->latency), _params(p)
55{
56 if (params()->addrRange.size() % TheISA::PageBytes != 0)
57 panic("Memory Size not divisible by page size\n");
58
59 int map_flags = MAP_ANON | MAP_PRIVATE;
60 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
61 map_flags, -1, 0);
62
63 if (pmemAddr == (void *)MAP_FAILED) {
64 perror("mmap");
65 fatal("Could not mmap!\n");
66 }
67
68 pagePtr = 0;
69}
70
71void
72PhysicalMemory::init()
73{
74 if (!port)
75 panic("PhysicalMemory not connected to anything!");
76 port->sendStatusChange(Port::RangeChange);
77}
78
79PhysicalMemory::~PhysicalMemory()
80{
81 if (pmemAddr)
82 munmap(pmemAddr, params()->addrRange.size());
83 //Remove memPorts?
84}
85
86Addr
87PhysicalMemory::new_page()
88{
89 Addr return_addr = pagePtr << LogVMPageSize;
90 return_addr += params()->addrRange.start;
91
92 ++pagePtr;
93 return return_addr;
94}
95
96int
97PhysicalMemory::deviceBlockSize()
98{
99 //Can accept anysize request
100 return 0;
101}
102
103Tick
104PhysicalMemory::calculateLatency(PacketPtr pkt)
105{
106 return lat;
107}
108
109
110
111// Add load-locked to tracking list. Should only be called if the
112// operation is a load and the LOCKED flag is set.
113void
114PhysicalMemory::trackLoadLocked(Request *req)
115{
116 Addr paddr = LockedAddr::mask(req->getPaddr());
117
118 // first we check if we already have a locked addr for this
119 // xc. Since each xc only gets one, we just update the
120 // existing record with the new address.
121 list<LockedAddr>::iterator i;
122
123 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
124 if (i->matchesContext(req)) {
125 DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n",
126 req->getCpuNum(), req->getThreadNum(), paddr);
127 i->addr = paddr;
128 return;
129 }
130 }
131
132 // no record for this xc: need to allocate a new one
133 DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n",
134 req->getCpuNum(), req->getThreadNum(), paddr);
135 lockedAddrList.push_front(LockedAddr(req));
136}
137
138
139// Called on *writes* only... both regular stores and
140// store-conditional operations. Check for conventional stores which
141// conflict with locked addresses, and for success/failure of store
142// conditionals.
143bool
144PhysicalMemory::checkLockedAddrList(Request *req)
145{
146 Addr paddr = LockedAddr::mask(req->getPaddr());
147 bool isLocked = req->isLocked();
148
149 // Initialize return value. Non-conditional stores always
150 // succeed. Assume conditional stores will fail until proven
151 // otherwise.
152 bool success = !isLocked;
153
154 // Iterate over list. Note that there could be multiple matching
155 // records, as more than one context could have done a load locked
156 // to this location.
157 list<LockedAddr>::iterator i = lockedAddrList.begin();
158
159 while (i != lockedAddrList.end()) {
160
161 if (i->addr == paddr) {
162 // we have a matching address
163
164 if (isLocked && i->matchesContext(req)) {
165 // it's a store conditional, and as far as the memory
166 // system can tell, the requesting context's lock is
167 // still valid.
168 DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n",
169 req->getCpuNum(), req->getThreadNum(), paddr);
170 success = true;
171 }
172
173 // Get rid of our record of this lock and advance to next
174 DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n",
175 i->cpuNum, i->threadNum, paddr);
176 i = lockedAddrList.erase(i);
177 }
178 else {
179 // no match: advance to next record
180 ++i;
181 }
182 }
183
184 if (isLocked) {
185 req->setScResult(success ? 1 : 0);
186 }
187
188 return success;
189}
190
191void
192PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
193{
194 assert(pkt->getAddr() + pkt->getSize() <= params()->addrRange.size());
194 assert(pkt->getAddr() + pkt->getSize() > params()->addrRange.start &&
195 pkt->getAddr() + pkt->getSize() <= params()->addrRange.start +
196 params()->addrRange.size());
195
196 if (pkt->isRead()) {
197 if (pkt->req->isLocked()) {
198 trackLoadLocked(pkt->req);
199 }
200 DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n",
201 pkt->getSize(), pkt->getAddr());
202 memcpy(pkt->getPtr<uint8_t>(),
203 pmemAddr + pkt->getAddr() - params()->addrRange.start,
204 pkt->getSize());
205 }
206 else if (pkt->isWrite()) {
207 if (writeOK(pkt->req)) {
208 DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n",
209 pkt->getSize(), pkt->getAddr());
210 memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start,
211 pkt->getPtr<uint8_t>(), pkt->getSize());
212 }
213 }
214 else if (pkt->isInvalidate()) {
215 //upgrade or invalidate
216 pkt->flags |= SATISFIED;
217 }
218 else {
219 panic("unimplemented");
220 }
221
222 pkt->result = Packet::Success;
223}
224
225Port *
226PhysicalMemory::getPort(const std::string &if_name, int idx)
227{
228 if (if_name == "port" && idx == -1) {
229 if (port != NULL)
230 panic("PhysicalMemory::getPort: additional port requested to memory!");
231 port = new MemoryPort(name() + "-port", this);
232 return port;
233 } else if (if_name == "functional") {
234 /* special port for functional writes at startup. And for memtester */
235 return new MemoryPort(name() + "-funcport", this);
236 } else {
237 panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
238 }
239}
240
241void
242PhysicalMemory::recvStatusChange(Port::Status status)
243{
244}
245
246PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
247 PhysicalMemory *_memory)
248 : SimpleTimingPort(_name), memory(_memory)
249{ }
250
251void
252PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status)
253{
254 memory->recvStatusChange(status);
255}
256
257void
258PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp,
259 AddrRangeList &snoop)
260{
261 memory->getAddressRanges(resp, snoop);
262}
263
264void
265PhysicalMemory::getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
266{
267 snoop.clear();
268 resp.clear();
269 resp.push_back(RangeSize(params()->addrRange.start,
270 params()->addrRange.size()));
271}
272
273int
274PhysicalMemory::MemoryPort::deviceBlockSize()
275{
276 return memory->deviceBlockSize();
277}
278
279Tick
280PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
281{
282 memory->doFunctionalAccess(pkt);
283 return memory->calculateLatency(pkt);
284}
285
286void
287PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
288{
289 // Default implementation of SimpleTimingPort::recvFunctional()
290 // calls recvAtomic() and throws away the latency; we can save a
291 // little here by just not calculating the latency.
292 memory->doFunctionalAccess(pkt);
293}
294
295unsigned int
296PhysicalMemory::drain(Event *de)
297{
298 int count = port->drain(de);
299 if (count)
300 changeState(Draining);
301 else
302 changeState(Drained);
303 return count;
304}
305
306void
307PhysicalMemory::serialize(ostream &os)
308{
309 gzFile compressedMem;
310 string filename = name() + ".physmem";
311
312 SERIALIZE_SCALAR(filename);
313
314 // write memory file
315 string thefile = Checkpoint::dir() + "/" + filename.c_str();
316 int fd = creat(thefile.c_str(), 0664);
317 if (fd < 0) {
318 perror("creat");
319 fatal("Can't open physical memory checkpoint file '%s'\n", filename);
320 }
321
322 compressedMem = gzdopen(fd, "wb");
323 if (compressedMem == NULL)
324 fatal("Insufficient memory to allocate compression state for %s\n",
325 filename);
326
327 if (gzwrite(compressedMem, pmemAddr, params()->addrRange.size()) != params()->addrRange.size()) {
328 fatal("Write failed on physical memory checkpoint file '%s'\n",
329 filename);
330 }
331
332 if (gzclose(compressedMem))
333 fatal("Close failed on physical memory checkpoint file '%s'\n",
334 filename);
335}
336
337void
338PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
339{
340 gzFile compressedMem;
341 long *tempPage;
342 long *pmem_current;
343 uint64_t curSize;
344 uint32_t bytesRead;
345 const int chunkSize = 16384;
346
347
348 string filename;
349
350 UNSERIALIZE_SCALAR(filename);
351
352 filename = cp->cptDir + "/" + filename;
353
354 // mmap memoryfile
355 int fd = open(filename.c_str(), O_RDONLY);
356 if (fd < 0) {
357 perror("open");
358 fatal("Can't open physical memory checkpoint file '%s'", filename);
359 }
360
361 compressedMem = gzdopen(fd, "rb");
362 if (compressedMem == NULL)
363 fatal("Insufficient memory to allocate compression state for %s\n",
364 filename);
365
366 // unmap file that was mmaped in the constructor
367 // This is done here to make sure that gzip and open don't muck with our
368 // nice large space of memory before we reallocate it
369 munmap(pmemAddr, params()->addrRange.size());
370
371 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
372 MAP_ANON | MAP_PRIVATE, -1, 0);
373
374 if (pmemAddr == (void *)MAP_FAILED) {
375 perror("mmap");
376 fatal("Could not mmap physical memory!\n");
377 }
378
379 curSize = 0;
380 tempPage = (long*)malloc(chunkSize);
381 if (tempPage == NULL)
382 fatal("Unable to malloc memory to read file %s\n", filename);
383
384 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
385 while (curSize < params()->addrRange.size()) {
386 bytesRead = gzread(compressedMem, tempPage, chunkSize);
387 if (bytesRead != chunkSize && bytesRead != params()->addrRange.size() - curSize)
388 fatal("Read failed on physical memory checkpoint file '%s'"
389 " got %d bytes, expected %d or %d bytes\n",
390 filename, bytesRead, chunkSize, params()->addrRange.size()-curSize);
391
392 assert(bytesRead % sizeof(long) == 0);
393
394 for (int x = 0; x < bytesRead/sizeof(long); x++)
395 {
396 if (*(tempPage+x) != 0) {
397 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
398 *pmem_current = *(tempPage+x);
399 }
400 }
401 curSize += bytesRead;
402 }
403
404 free(tempPage);
405
406 if (gzclose(compressedMem))
407 fatal("Close failed on physical memory checkpoint file '%s'\n",
408 filename);
409
410}
411
412
413BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
414
415 Param<string> file;
416 Param<Range<Addr> > range;
417 Param<Tick> latency;
418
419END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
420
421BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
422
423 INIT_PARAM_DFLT(file, "memory mapped file", ""),
424 INIT_PARAM(range, "Device Address Range"),
425 INIT_PARAM(latency, "Memory access latency")
426
427END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
428
429CREATE_SIM_OBJECT(PhysicalMemory)
430{
431 PhysicalMemory::Params *p = new PhysicalMemory::Params;
432 p->name = getInstanceName();
433 p->addrRange = range;
434 p->latency = latency;
435 return new PhysicalMemory(p);
436}
437
438REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory)
197
198 if (pkt->isRead()) {
199 if (pkt->req->isLocked()) {
200 trackLoadLocked(pkt->req);
201 }
202 DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n",
203 pkt->getSize(), pkt->getAddr());
204 memcpy(pkt->getPtr<uint8_t>(),
205 pmemAddr + pkt->getAddr() - params()->addrRange.start,
206 pkt->getSize());
207 }
208 else if (pkt->isWrite()) {
209 if (writeOK(pkt->req)) {
210 DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n",
211 pkt->getSize(), pkt->getAddr());
212 memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start,
213 pkt->getPtr<uint8_t>(), pkt->getSize());
214 }
215 }
216 else if (pkt->isInvalidate()) {
217 //upgrade or invalidate
218 pkt->flags |= SATISFIED;
219 }
220 else {
221 panic("unimplemented");
222 }
223
224 pkt->result = Packet::Success;
225}
226
227Port *
228PhysicalMemory::getPort(const std::string &if_name, int idx)
229{
230 if (if_name == "port" && idx == -1) {
231 if (port != NULL)
232 panic("PhysicalMemory::getPort: additional port requested to memory!");
233 port = new MemoryPort(name() + "-port", this);
234 return port;
235 } else if (if_name == "functional") {
236 /* special port for functional writes at startup. And for memtester */
237 return new MemoryPort(name() + "-funcport", this);
238 } else {
239 panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
240 }
241}
242
243void
244PhysicalMemory::recvStatusChange(Port::Status status)
245{
246}
247
248PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
249 PhysicalMemory *_memory)
250 : SimpleTimingPort(_name), memory(_memory)
251{ }
252
253void
254PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status)
255{
256 memory->recvStatusChange(status);
257}
258
259void
260PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp,
261 AddrRangeList &snoop)
262{
263 memory->getAddressRanges(resp, snoop);
264}
265
266void
267PhysicalMemory::getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
268{
269 snoop.clear();
270 resp.clear();
271 resp.push_back(RangeSize(params()->addrRange.start,
272 params()->addrRange.size()));
273}
274
275int
276PhysicalMemory::MemoryPort::deviceBlockSize()
277{
278 return memory->deviceBlockSize();
279}
280
281Tick
282PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
283{
284 memory->doFunctionalAccess(pkt);
285 return memory->calculateLatency(pkt);
286}
287
288void
289PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
290{
291 // Default implementation of SimpleTimingPort::recvFunctional()
292 // calls recvAtomic() and throws away the latency; we can save a
293 // little here by just not calculating the latency.
294 memory->doFunctionalAccess(pkt);
295}
296
297unsigned int
298PhysicalMemory::drain(Event *de)
299{
300 int count = port->drain(de);
301 if (count)
302 changeState(Draining);
303 else
304 changeState(Drained);
305 return count;
306}
307
308void
309PhysicalMemory::serialize(ostream &os)
310{
311 gzFile compressedMem;
312 string filename = name() + ".physmem";
313
314 SERIALIZE_SCALAR(filename);
315
316 // write memory file
317 string thefile = Checkpoint::dir() + "/" + filename.c_str();
318 int fd = creat(thefile.c_str(), 0664);
319 if (fd < 0) {
320 perror("creat");
321 fatal("Can't open physical memory checkpoint file '%s'\n", filename);
322 }
323
324 compressedMem = gzdopen(fd, "wb");
325 if (compressedMem == NULL)
326 fatal("Insufficient memory to allocate compression state for %s\n",
327 filename);
328
329 if (gzwrite(compressedMem, pmemAddr, params()->addrRange.size()) != params()->addrRange.size()) {
330 fatal("Write failed on physical memory checkpoint file '%s'\n",
331 filename);
332 }
333
334 if (gzclose(compressedMem))
335 fatal("Close failed on physical memory checkpoint file '%s'\n",
336 filename);
337}
338
339void
340PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
341{
342 gzFile compressedMem;
343 long *tempPage;
344 long *pmem_current;
345 uint64_t curSize;
346 uint32_t bytesRead;
347 const int chunkSize = 16384;
348
349
350 string filename;
351
352 UNSERIALIZE_SCALAR(filename);
353
354 filename = cp->cptDir + "/" + filename;
355
356 // mmap memoryfile
357 int fd = open(filename.c_str(), O_RDONLY);
358 if (fd < 0) {
359 perror("open");
360 fatal("Can't open physical memory checkpoint file '%s'", filename);
361 }
362
363 compressedMem = gzdopen(fd, "rb");
364 if (compressedMem == NULL)
365 fatal("Insufficient memory to allocate compression state for %s\n",
366 filename);
367
368 // unmap file that was mmaped in the constructor
369 // This is done here to make sure that gzip and open don't muck with our
370 // nice large space of memory before we reallocate it
371 munmap(pmemAddr, params()->addrRange.size());
372
373 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
374 MAP_ANON | MAP_PRIVATE, -1, 0);
375
376 if (pmemAddr == (void *)MAP_FAILED) {
377 perror("mmap");
378 fatal("Could not mmap physical memory!\n");
379 }
380
381 curSize = 0;
382 tempPage = (long*)malloc(chunkSize);
383 if (tempPage == NULL)
384 fatal("Unable to malloc memory to read file %s\n", filename);
385
386 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
387 while (curSize < params()->addrRange.size()) {
388 bytesRead = gzread(compressedMem, tempPage, chunkSize);
389 if (bytesRead != chunkSize && bytesRead != params()->addrRange.size() - curSize)
390 fatal("Read failed on physical memory checkpoint file '%s'"
391 " got %d bytes, expected %d or %d bytes\n",
392 filename, bytesRead, chunkSize, params()->addrRange.size()-curSize);
393
394 assert(bytesRead % sizeof(long) == 0);
395
396 for (int x = 0; x < bytesRead/sizeof(long); x++)
397 {
398 if (*(tempPage+x) != 0) {
399 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
400 *pmem_current = *(tempPage+x);
401 }
402 }
403 curSize += bytesRead;
404 }
405
406 free(tempPage);
407
408 if (gzclose(compressedMem))
409 fatal("Close failed on physical memory checkpoint file '%s'\n",
410 filename);
411
412}
413
414
415BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
416
417 Param<string> file;
418 Param<Range<Addr> > range;
419 Param<Tick> latency;
420
421END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
422
423BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
424
425 INIT_PARAM_DFLT(file, "memory mapped file", ""),
426 INIT_PARAM(range, "Device Address Range"),
427 INIT_PARAM(latency, "Memory access latency")
428
429END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
430
431CREATE_SIM_OBJECT(PhysicalMemory)
432{
433 PhysicalMemory::Params *p = new PhysicalMemory::Params;
434 p->name = getInstanceName();
435 p->addrRange = range;
436 p->latency = latency;
437 return new PhysicalMemory(p);
438}
439
440REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory)