physical.cc (3612:936dcb3f3e2d) physical.cc (3749:89fb514175fe)
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 * Ali Saidi
30 */
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <errno.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <zlib.h>
38
39#include <iostream>
40#include <string>
41
42#include "arch/isa_traits.hh"
43#include "base/misc.hh"
44#include "config/full_system.hh"
45#include "mem/physical.hh"
46#include "sim/builder.hh"
47#include "sim/eventq.hh"
48#include "sim/host.hh"
49
50using namespace std;
51using namespace TheISA;
52
53PhysicalMemory::PhysicalMemory(Params *p)
54 : MemObject(p->name), pmemAddr(NULL), port(NULL), lat(p->latency), _params(p)
55{
56 if (params()->addrRange.size() % TheISA::PageBytes != 0)
57 panic("Memory Size not divisible by page size\n");
58
59 int map_flags = MAP_ANON | MAP_PRIVATE;
60 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
61 map_flags, -1, 0);
62
63 if (pmemAddr == (void *)MAP_FAILED) {
64 perror("mmap");
65 fatal("Could not mmap!\n");
66 }
67
68 pagePtr = 0;
69}
70
71void
72PhysicalMemory::init()
73{
74 if (!port)
75 panic("PhysicalMemory not connected to anything!");
76 port->sendStatusChange(Port::RangeChange);
77}
78
79PhysicalMemory::~PhysicalMemory()
80{
81 if (pmemAddr)
82 munmap(pmemAddr, params()->addrRange.size());
83 //Remove memPorts?
84}
85
86Addr
87PhysicalMemory::new_page()
88{
89 Addr return_addr = pagePtr << LogVMPageSize;
90 return_addr += params()->addrRange.start;
91
92 ++pagePtr;
93 return return_addr;
94}
95
96int
97PhysicalMemory::deviceBlockSize()
98{
99 //Can accept anysize request
100 return 0;
101}
102
103Tick
104PhysicalMemory::calculateLatency(PacketPtr pkt)
105{
106 return lat;
107}
108
109
110
111// Add load-locked to tracking list. Should only be called if the
112// operation is a load and the LOCKED flag is set.
113void
114PhysicalMemory::trackLoadLocked(Request *req)
115{
116 Addr paddr = LockedAddr::mask(req->getPaddr());
117
118 // first we check if we already have a locked addr for this
119 // xc. Since each xc only gets one, we just update the
120 // existing record with the new address.
121 list<LockedAddr>::iterator i;
122
123 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
124 if (i->matchesContext(req)) {
125 DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n",
126 req->getCpuNum(), req->getThreadNum(), paddr);
127 i->addr = paddr;
128 return;
129 }
130 }
131
132 // no record for this xc: need to allocate a new one
133 DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n",
134 req->getCpuNum(), req->getThreadNum(), paddr);
135 lockedAddrList.push_front(LockedAddr(req));
136}
137
138
139// Called on *writes* only... both regular stores and
140// store-conditional operations. Check for conventional stores which
141// conflict with locked addresses, and for success/failure of store
142// conditionals.
143bool
144PhysicalMemory::checkLockedAddrList(Request *req)
145{
146 Addr paddr = LockedAddr::mask(req->getPaddr());
147 bool isLocked = req->isLocked();
148
149 // Initialize return value. Non-conditional stores always
150 // succeed. Assume conditional stores will fail until proven
151 // otherwise.
152 bool success = !isLocked;
153
154 // Iterate over list. Note that there could be multiple matching
155 // records, as more than one context could have done a load locked
156 // to this location.
157 list<LockedAddr>::iterator i = lockedAddrList.begin();
158
159 while (i != lockedAddrList.end()) {
160
161 if (i->addr == paddr) {
162 // we have a matching address
163
164 if (isLocked && i->matchesContext(req)) {
165 // it's a store conditional, and as far as the memory
166 // system can tell, the requesting context's lock is
167 // still valid.
168 DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n",
169 req->getCpuNum(), req->getThreadNum(), paddr);
170 success = true;
171 }
172
173 // Get rid of our record of this lock and advance to next
174 DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n",
175 i->cpuNum, i->threadNum, paddr);
176 i = lockedAddrList.erase(i);
177 }
178 else {
179 // no match: advance to next record
180 ++i;
181 }
182 }
183
184 if (isLocked) {
185 req->setScResult(success ? 1 : 0);
186 }
187
188 return success;
189}
190
191void
192PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
193{
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ron Dreslinski
29 * Ali Saidi
30 */
31
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <errno.h>
35#include <fcntl.h>
36#include <unistd.h>
37#include <zlib.h>
38
39#include <iostream>
40#include <string>
41
42#include "arch/isa_traits.hh"
43#include "base/misc.hh"
44#include "config/full_system.hh"
45#include "mem/physical.hh"
46#include "sim/builder.hh"
47#include "sim/eventq.hh"
48#include "sim/host.hh"
49
50using namespace std;
51using namespace TheISA;
52
53PhysicalMemory::PhysicalMemory(Params *p)
54 : MemObject(p->name), pmemAddr(NULL), port(NULL), lat(p->latency), _params(p)
55{
56 if (params()->addrRange.size() % TheISA::PageBytes != 0)
57 panic("Memory Size not divisible by page size\n");
58
59 int map_flags = MAP_ANON | MAP_PRIVATE;
60 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
61 map_flags, -1, 0);
62
63 if (pmemAddr == (void *)MAP_FAILED) {
64 perror("mmap");
65 fatal("Could not mmap!\n");
66 }
67
68 pagePtr = 0;
69}
70
71void
72PhysicalMemory::init()
73{
74 if (!port)
75 panic("PhysicalMemory not connected to anything!");
76 port->sendStatusChange(Port::RangeChange);
77}
78
79PhysicalMemory::~PhysicalMemory()
80{
81 if (pmemAddr)
82 munmap(pmemAddr, params()->addrRange.size());
83 //Remove memPorts?
84}
85
86Addr
87PhysicalMemory::new_page()
88{
89 Addr return_addr = pagePtr << LogVMPageSize;
90 return_addr += params()->addrRange.start;
91
92 ++pagePtr;
93 return return_addr;
94}
95
96int
97PhysicalMemory::deviceBlockSize()
98{
99 //Can accept anysize request
100 return 0;
101}
102
103Tick
104PhysicalMemory::calculateLatency(PacketPtr pkt)
105{
106 return lat;
107}
108
109
110
111// Add load-locked to tracking list. Should only be called if the
112// operation is a load and the LOCKED flag is set.
113void
114PhysicalMemory::trackLoadLocked(Request *req)
115{
116 Addr paddr = LockedAddr::mask(req->getPaddr());
117
118 // first we check if we already have a locked addr for this
119 // xc. Since each xc only gets one, we just update the
120 // existing record with the new address.
121 list<LockedAddr>::iterator i;
122
123 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
124 if (i->matchesContext(req)) {
125 DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n",
126 req->getCpuNum(), req->getThreadNum(), paddr);
127 i->addr = paddr;
128 return;
129 }
130 }
131
132 // no record for this xc: need to allocate a new one
133 DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n",
134 req->getCpuNum(), req->getThreadNum(), paddr);
135 lockedAddrList.push_front(LockedAddr(req));
136}
137
138
139// Called on *writes* only... both regular stores and
140// store-conditional operations. Check for conventional stores which
141// conflict with locked addresses, and for success/failure of store
142// conditionals.
143bool
144PhysicalMemory::checkLockedAddrList(Request *req)
145{
146 Addr paddr = LockedAddr::mask(req->getPaddr());
147 bool isLocked = req->isLocked();
148
149 // Initialize return value. Non-conditional stores always
150 // succeed. Assume conditional stores will fail until proven
151 // otherwise.
152 bool success = !isLocked;
153
154 // Iterate over list. Note that there could be multiple matching
155 // records, as more than one context could have done a load locked
156 // to this location.
157 list<LockedAddr>::iterator i = lockedAddrList.begin();
158
159 while (i != lockedAddrList.end()) {
160
161 if (i->addr == paddr) {
162 // we have a matching address
163
164 if (isLocked && i->matchesContext(req)) {
165 // it's a store conditional, and as far as the memory
166 // system can tell, the requesting context's lock is
167 // still valid.
168 DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n",
169 req->getCpuNum(), req->getThreadNum(), paddr);
170 success = true;
171 }
172
173 // Get rid of our record of this lock and advance to next
174 DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n",
175 i->cpuNum, i->threadNum, paddr);
176 i = lockedAddrList.erase(i);
177 }
178 else {
179 // no match: advance to next record
180 ++i;
181 }
182 }
183
184 if (isLocked) {
185 req->setScResult(success ? 1 : 0);
186 }
187
188 return success;
189}
190
191void
192PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
193{
194 assert(pkt->getAddr() + pkt->getSize() > params()->addrRange.start &&
194 assert(pkt->getAddr() >= params()->addrRange.start &&
195 pkt->getAddr() + pkt->getSize() <= params()->addrRange.start +
196 params()->addrRange.size());
197
198 if (pkt->isRead()) {
199 if (pkt->req->isLocked()) {
200 trackLoadLocked(pkt->req);
201 }
202 DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n",
203 pkt->getSize(), pkt->getAddr());
204 memcpy(pkt->getPtr<uint8_t>(),
205 pmemAddr + pkt->getAddr() - params()->addrRange.start,
206 pkt->getSize());
207 }
208 else if (pkt->isWrite()) {
209 if (writeOK(pkt->req)) {
210 DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n",
211 pkt->getSize(), pkt->getAddr());
212 memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start,
213 pkt->getPtr<uint8_t>(), pkt->getSize());
214 }
215 }
216 else if (pkt->isInvalidate()) {
217 //upgrade or invalidate
218 pkt->flags |= SATISFIED;
219 }
220 else {
221 panic("unimplemented");
222 }
223
224 pkt->result = Packet::Success;
225}
226
227Port *
228PhysicalMemory::getPort(const std::string &if_name, int idx)
229{
230 if (if_name == "port" && idx == -1) {
231 if (port != NULL)
232 panic("PhysicalMemory::getPort: additional port requested to memory!");
233 port = new MemoryPort(name() + "-port", this);
234 return port;
235 } else if (if_name == "functional") {
236 /* special port for functional writes at startup. And for memtester */
237 return new MemoryPort(name() + "-funcport", this);
238 } else {
239 panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
240 }
241}
242
243void
244PhysicalMemory::recvStatusChange(Port::Status status)
245{
246}
247
248PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
249 PhysicalMemory *_memory)
250 : SimpleTimingPort(_name), memory(_memory)
251{ }
252
253void
254PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status)
255{
256 memory->recvStatusChange(status);
257}
258
259void
260PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp,
261 AddrRangeList &snoop)
262{
263 memory->getAddressRanges(resp, snoop);
264}
265
266void
267PhysicalMemory::getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
268{
269 snoop.clear();
270 resp.clear();
271 resp.push_back(RangeSize(params()->addrRange.start,
272 params()->addrRange.size()));
273}
274
275int
276PhysicalMemory::MemoryPort::deviceBlockSize()
277{
278 return memory->deviceBlockSize();
279}
280
281Tick
282PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
283{
284 memory->doFunctionalAccess(pkt);
285 return memory->calculateLatency(pkt);
286}
287
288void
289PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
290{
291 //Since we are overriding the function, make sure to have the impl of the
292 //check or functional accesses here.
293 std::list<std::pair<Tick,PacketPtr> >::iterator i = transmitList.begin();
294 std::list<std::pair<Tick,PacketPtr> >::iterator end = transmitList.end();
295 bool notDone = true;
296
297 while (i != end && notDone) {
298 PacketPtr target = i->second;
299 // If the target contains data, and it overlaps the
300 // probed request, need to update data
301 if (target->intersect(pkt))
302 notDone = fixPacket(pkt, target);
303 i++;
304 }
305
306 // Default implementation of SimpleTimingPort::recvFunctional()
307 // calls recvAtomic() and throws away the latency; we can save a
308 // little here by just not calculating the latency.
309 memory->doFunctionalAccess(pkt);
310}
311
312unsigned int
313PhysicalMemory::drain(Event *de)
314{
315 int count = port->drain(de);
316 if (count)
317 changeState(Draining);
318 else
319 changeState(Drained);
320 return count;
321}
322
323void
324PhysicalMemory::serialize(ostream &os)
325{
326 gzFile compressedMem;
327 string filename = name() + ".physmem";
328
329 SERIALIZE_SCALAR(filename);
330
331 // write memory file
332 string thefile = Checkpoint::dir() + "/" + filename.c_str();
333 int fd = creat(thefile.c_str(), 0664);
334 if (fd < 0) {
335 perror("creat");
336 fatal("Can't open physical memory checkpoint file '%s'\n", filename);
337 }
338
339 compressedMem = gzdopen(fd, "wb");
340 if (compressedMem == NULL)
341 fatal("Insufficient memory to allocate compression state for %s\n",
342 filename);
343
344 if (gzwrite(compressedMem, pmemAddr, params()->addrRange.size()) != params()->addrRange.size()) {
345 fatal("Write failed on physical memory checkpoint file '%s'\n",
346 filename);
347 }
348
349 if (gzclose(compressedMem))
350 fatal("Close failed on physical memory checkpoint file '%s'\n",
351 filename);
352}
353
354void
355PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
356{
357 gzFile compressedMem;
358 long *tempPage;
359 long *pmem_current;
360 uint64_t curSize;
361 uint32_t bytesRead;
362 const int chunkSize = 16384;
363
364
365 string filename;
366
367 UNSERIALIZE_SCALAR(filename);
368
369 filename = cp->cptDir + "/" + filename;
370
371 // mmap memoryfile
372 int fd = open(filename.c_str(), O_RDONLY);
373 if (fd < 0) {
374 perror("open");
375 fatal("Can't open physical memory checkpoint file '%s'", filename);
376 }
377
378 compressedMem = gzdopen(fd, "rb");
379 if (compressedMem == NULL)
380 fatal("Insufficient memory to allocate compression state for %s\n",
381 filename);
382
383 // unmap file that was mmaped in the constructor
384 // This is done here to make sure that gzip and open don't muck with our
385 // nice large space of memory before we reallocate it
386 munmap(pmemAddr, params()->addrRange.size());
387
388 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
389 MAP_ANON | MAP_PRIVATE, -1, 0);
390
391 if (pmemAddr == (void *)MAP_FAILED) {
392 perror("mmap");
393 fatal("Could not mmap physical memory!\n");
394 }
395
396 curSize = 0;
397 tempPage = (long*)malloc(chunkSize);
398 if (tempPage == NULL)
399 fatal("Unable to malloc memory to read file %s\n", filename);
400
401 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
402 while (curSize < params()->addrRange.size()) {
403 bytesRead = gzread(compressedMem, tempPage, chunkSize);
404 if (bytesRead != chunkSize && bytesRead != params()->addrRange.size() - curSize)
405 fatal("Read failed on physical memory checkpoint file '%s'"
406 " got %d bytes, expected %d or %d bytes\n",
407 filename, bytesRead, chunkSize, params()->addrRange.size()-curSize);
408
409 assert(bytesRead % sizeof(long) == 0);
410
411 for (int x = 0; x < bytesRead/sizeof(long); x++)
412 {
413 if (*(tempPage+x) != 0) {
414 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
415 *pmem_current = *(tempPage+x);
416 }
417 }
418 curSize += bytesRead;
419 }
420
421 free(tempPage);
422
423 if (gzclose(compressedMem))
424 fatal("Close failed on physical memory checkpoint file '%s'\n",
425 filename);
426
427}
428
429
430BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
431
432 Param<string> file;
433 Param<Range<Addr> > range;
434 Param<Tick> latency;
435
436END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
437
438BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
439
440 INIT_PARAM_DFLT(file, "memory mapped file", ""),
441 INIT_PARAM(range, "Device Address Range"),
442 INIT_PARAM(latency, "Memory access latency")
443
444END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
445
446CREATE_SIM_OBJECT(PhysicalMemory)
447{
448 PhysicalMemory::Params *p = new PhysicalMemory::Params;
449 p->name = getInstanceName();
450 p->addrRange = range;
451 p->latency = latency;
452 return new PhysicalMemory(p);
453}
454
455REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory)
195 pkt->getAddr() + pkt->getSize() <= params()->addrRange.start +
196 params()->addrRange.size());
197
198 if (pkt->isRead()) {
199 if (pkt->req->isLocked()) {
200 trackLoadLocked(pkt->req);
201 }
202 DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n",
203 pkt->getSize(), pkt->getAddr());
204 memcpy(pkt->getPtr<uint8_t>(),
205 pmemAddr + pkt->getAddr() - params()->addrRange.start,
206 pkt->getSize());
207 }
208 else if (pkt->isWrite()) {
209 if (writeOK(pkt->req)) {
210 DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n",
211 pkt->getSize(), pkt->getAddr());
212 memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start,
213 pkt->getPtr<uint8_t>(), pkt->getSize());
214 }
215 }
216 else if (pkt->isInvalidate()) {
217 //upgrade or invalidate
218 pkt->flags |= SATISFIED;
219 }
220 else {
221 panic("unimplemented");
222 }
223
224 pkt->result = Packet::Success;
225}
226
227Port *
228PhysicalMemory::getPort(const std::string &if_name, int idx)
229{
230 if (if_name == "port" && idx == -1) {
231 if (port != NULL)
232 panic("PhysicalMemory::getPort: additional port requested to memory!");
233 port = new MemoryPort(name() + "-port", this);
234 return port;
235 } else if (if_name == "functional") {
236 /* special port for functional writes at startup. And for memtester */
237 return new MemoryPort(name() + "-funcport", this);
238 } else {
239 panic("PhysicalMemory::getPort: unknown port %s requested", if_name);
240 }
241}
242
243void
244PhysicalMemory::recvStatusChange(Port::Status status)
245{
246}
247
248PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
249 PhysicalMemory *_memory)
250 : SimpleTimingPort(_name), memory(_memory)
251{ }
252
253void
254PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status)
255{
256 memory->recvStatusChange(status);
257}
258
259void
260PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp,
261 AddrRangeList &snoop)
262{
263 memory->getAddressRanges(resp, snoop);
264}
265
266void
267PhysicalMemory::getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop)
268{
269 snoop.clear();
270 resp.clear();
271 resp.push_back(RangeSize(params()->addrRange.start,
272 params()->addrRange.size()));
273}
274
275int
276PhysicalMemory::MemoryPort::deviceBlockSize()
277{
278 return memory->deviceBlockSize();
279}
280
281Tick
282PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
283{
284 memory->doFunctionalAccess(pkt);
285 return memory->calculateLatency(pkt);
286}
287
288void
289PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
290{
291 //Since we are overriding the function, make sure to have the impl of the
292 //check or functional accesses here.
293 std::list<std::pair<Tick,PacketPtr> >::iterator i = transmitList.begin();
294 std::list<std::pair<Tick,PacketPtr> >::iterator end = transmitList.end();
295 bool notDone = true;
296
297 while (i != end && notDone) {
298 PacketPtr target = i->second;
299 // If the target contains data, and it overlaps the
300 // probed request, need to update data
301 if (target->intersect(pkt))
302 notDone = fixPacket(pkt, target);
303 i++;
304 }
305
306 // Default implementation of SimpleTimingPort::recvFunctional()
307 // calls recvAtomic() and throws away the latency; we can save a
308 // little here by just not calculating the latency.
309 memory->doFunctionalAccess(pkt);
310}
311
312unsigned int
313PhysicalMemory::drain(Event *de)
314{
315 int count = port->drain(de);
316 if (count)
317 changeState(Draining);
318 else
319 changeState(Drained);
320 return count;
321}
322
323void
324PhysicalMemory::serialize(ostream &os)
325{
326 gzFile compressedMem;
327 string filename = name() + ".physmem";
328
329 SERIALIZE_SCALAR(filename);
330
331 // write memory file
332 string thefile = Checkpoint::dir() + "/" + filename.c_str();
333 int fd = creat(thefile.c_str(), 0664);
334 if (fd < 0) {
335 perror("creat");
336 fatal("Can't open physical memory checkpoint file '%s'\n", filename);
337 }
338
339 compressedMem = gzdopen(fd, "wb");
340 if (compressedMem == NULL)
341 fatal("Insufficient memory to allocate compression state for %s\n",
342 filename);
343
344 if (gzwrite(compressedMem, pmemAddr, params()->addrRange.size()) != params()->addrRange.size()) {
345 fatal("Write failed on physical memory checkpoint file '%s'\n",
346 filename);
347 }
348
349 if (gzclose(compressedMem))
350 fatal("Close failed on physical memory checkpoint file '%s'\n",
351 filename);
352}
353
354void
355PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
356{
357 gzFile compressedMem;
358 long *tempPage;
359 long *pmem_current;
360 uint64_t curSize;
361 uint32_t bytesRead;
362 const int chunkSize = 16384;
363
364
365 string filename;
366
367 UNSERIALIZE_SCALAR(filename);
368
369 filename = cp->cptDir + "/" + filename;
370
371 // mmap memoryfile
372 int fd = open(filename.c_str(), O_RDONLY);
373 if (fd < 0) {
374 perror("open");
375 fatal("Can't open physical memory checkpoint file '%s'", filename);
376 }
377
378 compressedMem = gzdopen(fd, "rb");
379 if (compressedMem == NULL)
380 fatal("Insufficient memory to allocate compression state for %s\n",
381 filename);
382
383 // unmap file that was mmaped in the constructor
384 // This is done here to make sure that gzip and open don't muck with our
385 // nice large space of memory before we reallocate it
386 munmap(pmemAddr, params()->addrRange.size());
387
388 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE,
389 MAP_ANON | MAP_PRIVATE, -1, 0);
390
391 if (pmemAddr == (void *)MAP_FAILED) {
392 perror("mmap");
393 fatal("Could not mmap physical memory!\n");
394 }
395
396 curSize = 0;
397 tempPage = (long*)malloc(chunkSize);
398 if (tempPage == NULL)
399 fatal("Unable to malloc memory to read file %s\n", filename);
400
401 /* Only copy bytes that are non-zero, so we don't give the VM system hell */
402 while (curSize < params()->addrRange.size()) {
403 bytesRead = gzread(compressedMem, tempPage, chunkSize);
404 if (bytesRead != chunkSize && bytesRead != params()->addrRange.size() - curSize)
405 fatal("Read failed on physical memory checkpoint file '%s'"
406 " got %d bytes, expected %d or %d bytes\n",
407 filename, bytesRead, chunkSize, params()->addrRange.size()-curSize);
408
409 assert(bytesRead % sizeof(long) == 0);
410
411 for (int x = 0; x < bytesRead/sizeof(long); x++)
412 {
413 if (*(tempPage+x) != 0) {
414 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
415 *pmem_current = *(tempPage+x);
416 }
417 }
418 curSize += bytesRead;
419 }
420
421 free(tempPage);
422
423 if (gzclose(compressedMem))
424 fatal("Close failed on physical memory checkpoint file '%s'\n",
425 filename);
426
427}
428
429
430BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
431
432 Param<string> file;
433 Param<Range<Addr> > range;
434 Param<Tick> latency;
435
436END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory)
437
438BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
439
440 INIT_PARAM_DFLT(file, "memory mapped file", ""),
441 INIT_PARAM(range, "Device Address Range"),
442 INIT_PARAM(latency, "Memory access latency")
443
444END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory)
445
446CREATE_SIM_OBJECT(PhysicalMemory)
447{
448 PhysicalMemory::Params *p = new PhysicalMemory::Params;
449 p->name = getInstanceName();
450 p->addrRange = range;
451 p->latency = latency;
452 return new PhysicalMemory(p);
453}
454
455REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory)