1/* 2 * Copyright (c) 2010 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ron Dreslinski 41 * Ali Saidi 42 */ 43 44#include <sys/types.h> 45#include <sys/mman.h> 46#include <sys/user.h> 47#include <errno.h> 48#include <fcntl.h> 49#include <unistd.h> 50#include <zlib.h> 51 52#include <cstdio> 53#include <iostream> 54#include <string> 55 56#include "arch/registers.hh" 57#include "base/intmath.hh" 58#include "base/misc.hh" 59#include "base/random.hh" 60#include "base/types.hh" 61#include "config/full_system.hh" 62#include "config/the_isa.hh" 63#include "mem/packet_access.hh" 64#include "mem/physical.hh" 65#include "sim/eventq.hh" 66 67using namespace std; 68using namespace TheISA; 69 70PhysicalMemory::PhysicalMemory(const Params *p)
| 1/* 2 * Copyright (c) 2010 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ron Dreslinski 41 * Ali Saidi 42 */ 43 44#include <sys/types.h> 45#include <sys/mman.h> 46#include <sys/user.h> 47#include <errno.h> 48#include <fcntl.h> 49#include <unistd.h> 50#include <zlib.h> 51 52#include <cstdio> 53#include <iostream> 54#include <string> 55 56#include "arch/registers.hh" 57#include "base/intmath.hh" 58#include "base/misc.hh" 59#include "base/random.hh" 60#include "base/types.hh" 61#include "config/full_system.hh" 62#include "config/the_isa.hh" 63#include "mem/packet_access.hh" 64#include "mem/physical.hh" 65#include "sim/eventq.hh" 66 67using namespace std; 68using namespace TheISA; 69 70PhysicalMemory::PhysicalMemory(const Params *p)
|
73 _size(params()->range.size()), _start(params()->range.start) 74{ 75 if (size() % TheISA::PageBytes != 0) 76 panic("Memory Size not divisible by page size\n"); 77 78 if (params()->null) 79 return; 80 81 82 if (params()->file == "") { 83 int map_flags = MAP_ANON | MAP_PRIVATE; 84 pmemAddr = (uint8_t *)mmap(NULL, size(), 85 PROT_READ | PROT_WRITE, map_flags, -1, 0); 86 } else { 87 int map_flags = MAP_PRIVATE; 88 int fd = open(params()->file.c_str(), O_RDONLY); 89 _size = lseek(fd, 0, SEEK_END); 90 lseek(fd, 0, SEEK_SET); 91 pmemAddr = (uint8_t *)mmap(NULL, roundUp(size(), PAGE_SIZE), 92 PROT_READ | PROT_WRITE, map_flags, fd, 0); 93 } 94 95 if (pmemAddr == (void *)MAP_FAILED) { 96 perror("mmap"); 97 if (params()->file == "") 98 fatal("Could not mmap!\n"); 99 else 100 fatal("Could not find file: %s\n", params()->file); 101 } 102 103 //If requested, initialize all the memory to 0 104 if (p->zero) 105 memset(pmemAddr, 0, size()); 106} 107 108void 109PhysicalMemory::init() 110{ 111 if (ports.size() == 0) { 112 fatal("PhysicalMemory object %s is unconnected!", name()); 113 } 114 115 for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 116 if (*pi) 117 (*pi)->sendStatusChange(Port::RangeChange); 118 } 119} 120 121PhysicalMemory::~PhysicalMemory() 122{ 123 if (pmemAddr) 124 munmap((char*)pmemAddr, size()); 125} 126
| 72 _size(params()->range.size()), _start(params()->range.start) 73{ 74 if (size() % TheISA::PageBytes != 0) 75 panic("Memory Size not divisible by page size\n"); 76 77 if (params()->null) 78 return; 79 80 81 if (params()->file == "") { 82 int map_flags = MAP_ANON | MAP_PRIVATE; 83 pmemAddr = (uint8_t *)mmap(NULL, size(), 84 PROT_READ | PROT_WRITE, map_flags, -1, 0); 85 } else { 86 int map_flags = MAP_PRIVATE; 87 int fd = open(params()->file.c_str(), O_RDONLY); 88 _size = lseek(fd, 0, SEEK_END); 89 lseek(fd, 0, SEEK_SET); 90 pmemAddr = (uint8_t *)mmap(NULL, roundUp(size(), PAGE_SIZE), 91 PROT_READ | PROT_WRITE, map_flags, fd, 0); 92 } 93 94 if (pmemAddr == (void *)MAP_FAILED) { 95 perror("mmap"); 96 if (params()->file == "") 97 fatal("Could not mmap!\n"); 98 else 99 fatal("Could not find file: %s\n", params()->file); 100 } 101 102 //If requested, initialize all the memory to 0 103 if (p->zero) 104 memset(pmemAddr, 0, size()); 105} 106 107void 108PhysicalMemory::init() 109{ 110 if (ports.size() == 0) { 111 fatal("PhysicalMemory object %s is unconnected!", name()); 112 } 113 114 for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 115 if (*pi) 116 (*pi)->sendStatusChange(Port::RangeChange); 117 } 118} 119 120PhysicalMemory::~PhysicalMemory() 121{ 122 if (pmemAddr) 123 munmap((char*)pmemAddr, size()); 124} 125
|
137unsigned 138PhysicalMemory::deviceBlockSize() const 139{ 140 //Can accept anysize request 141 return 0; 142} 143 144Tick 145PhysicalMemory::calculateLatency(PacketPtr pkt) 146{ 147 Tick latency = lat; 148 if (lat_var != 0) 149 latency += random_mt.random<Tick>(0, lat_var); 150 return latency; 151} 152 153 154 155// Add load-locked to tracking list. Should only be called if the 156// operation is a load and the LLSC flag is set. 157void 158PhysicalMemory::trackLoadLocked(PacketPtr pkt) 159{ 160 Request *req = pkt->req; 161 Addr paddr = LockedAddr::mask(req->getPaddr()); 162 163 // first we check if we already have a locked addr for this 164 // xc. Since each xc only gets one, we just update the 165 // existing record with the new address. 166 list<LockedAddr>::iterator i; 167 168 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) { 169 if (i->matchesContext(req)) { 170 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n", 171 req->contextId(), paddr); 172 i->addr = paddr; 173 return; 174 } 175 } 176 177 // no record for this xc: need to allocate a new one 178 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n", 179 req->contextId(), paddr); 180 lockedAddrList.push_front(LockedAddr(req)); 181} 182 183 184// Called on *writes* only... both regular stores and 185// store-conditional operations. Check for conventional stores which 186// conflict with locked addresses, and for success/failure of store 187// conditionals. 188bool 189PhysicalMemory::checkLockedAddrList(PacketPtr pkt) 190{ 191 Request *req = pkt->req; 192 Addr paddr = LockedAddr::mask(req->getPaddr()); 193 bool isLLSC = pkt->isLLSC(); 194 195 // Initialize return value. Non-conditional stores always 196 // succeed. Assume conditional stores will fail until proven 197 // otherwise. 198 bool success = !isLLSC; 199 200 // Iterate over list. Note that there could be multiple matching 201 // records, as more than one context could have done a load locked 202 // to this location. 203 list<LockedAddr>::iterator i = lockedAddrList.begin(); 204 205 while (i != lockedAddrList.end()) { 206 207 if (i->addr == paddr) { 208 // we have a matching address 209 210 if (isLLSC && i->matchesContext(req)) { 211 // it's a store conditional, and as far as the memory 212 // system can tell, the requesting context's lock is 213 // still valid. 214 DPRINTF(LLSC, "StCond success: context %d addr %#x\n", 215 req->contextId(), paddr); 216 success = true; 217 } 218 219 // Get rid of our record of this lock and advance to next 220 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n", 221 i->contextId, paddr); 222 i = lockedAddrList.erase(i); 223 } 224 else { 225 // no match: advance to next record 226 ++i; 227 } 228 } 229 230 if (isLLSC) { 231 req->setExtraData(success ? 1 : 0); 232 } 233 234 return success; 235} 236 237 238#if TRACING_ON 239 240#define CASE(A, T) \ 241 case sizeof(T): \ 242 DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \ 243 A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \ 244 break 245 246 247#define TRACE_PACKET(A) \ 248 do { \ 249 switch (pkt->getSize()) { \ 250 CASE(A, uint64_t); \ 251 CASE(A, uint32_t); \ 252 CASE(A, uint16_t); \ 253 CASE(A, uint8_t); \ 254 default: \ 255 DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \ 256 A, pkt->getSize(), pkt->getAddr()); \ 257 } \ 258 } while (0) 259 260#else 261 262#define TRACE_PACKET(A) 263 264#endif 265 266Tick 267PhysicalMemory::doAtomicAccess(PacketPtr pkt) 268{ 269 assert(pkt->getAddr() >= start() && 270 pkt->getAddr() + pkt->getSize() <= start() + size()); 271 272 if (pkt->memInhibitAsserted()) { 273 DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n", 274 pkt->getAddr()); 275 return 0; 276 } 277 278 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 279 280 if (pkt->cmd == MemCmd::SwapReq) { 281 IntReg overwrite_val; 282 bool overwrite_mem; 283 uint64_t condition_val64; 284 uint32_t condition_val32; 285 286 if (!pmemAddr) 287 panic("Swap only works if there is real memory (i.e. null=False)"); 288 assert(sizeof(IntReg) >= pkt->getSize()); 289 290 overwrite_mem = true; 291 // keep a copy of our possible write value, and copy what is at the 292 // memory address into the packet 293 std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize()); 294 std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 295 296 if (pkt->req->isCondSwap()) { 297 if (pkt->getSize() == sizeof(uint64_t)) { 298 condition_val64 = pkt->req->getExtraData(); 299 overwrite_mem = !std::memcmp(&condition_val64, hostAddr, 300 sizeof(uint64_t)); 301 } else if (pkt->getSize() == sizeof(uint32_t)) { 302 condition_val32 = (uint32_t)pkt->req->getExtraData(); 303 overwrite_mem = !std::memcmp(&condition_val32, hostAddr, 304 sizeof(uint32_t)); 305 } else 306 panic("Invalid size for conditional read/write\n"); 307 } 308 309 if (overwrite_mem) 310 std::memcpy(hostAddr, &overwrite_val, pkt->getSize()); 311 312 assert(!pkt->req->isInstFetch()); 313 TRACE_PACKET("Read/Write"); 314 } else if (pkt->isRead()) { 315 assert(!pkt->isWrite()); 316 if (pkt->isLLSC()) { 317 trackLoadLocked(pkt); 318 } 319 if (pmemAddr) 320 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 321 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read"); 322 } else if (pkt->isWrite()) { 323 if (writeOK(pkt)) { 324 if (pmemAddr) 325 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 326 assert(!pkt->req->isInstFetch()); 327 TRACE_PACKET("Write"); 328 } 329 } else if (pkt->isInvalidate()) { 330 //upgrade or invalidate 331 if (pkt->needsResponse()) { 332 pkt->makeAtomicResponse(); 333 } 334 } else { 335 panic("unimplemented"); 336 } 337 338 if (pkt->needsResponse()) { 339 pkt->makeAtomicResponse(); 340 } 341 return calculateLatency(pkt); 342} 343 344 345void 346PhysicalMemory::doFunctionalAccess(PacketPtr pkt) 347{ 348 assert(pkt->getAddr() >= start() && 349 pkt->getAddr() + pkt->getSize() <= start() + size()); 350 351 352 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 353 354 if (pkt->isRead()) { 355 if (pmemAddr) 356 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 357 TRACE_PACKET("Read"); 358 pkt->makeAtomicResponse(); 359 } else if (pkt->isWrite()) { 360 if (pmemAddr) 361 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 362 TRACE_PACKET("Write"); 363 pkt->makeAtomicResponse(); 364 } else if (pkt->isPrint()) { 365 Packet::PrintReqState *prs = 366 dynamic_cast<Packet::PrintReqState*>(pkt->senderState); 367 // Need to call printLabels() explicitly since we're not going 368 // through printObj(). 369 prs->printLabels(); 370 // Right now we just print the single byte at the specified address. 371 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr); 372 } else { 373 panic("PhysicalMemory: unimplemented functional command %s", 374 pkt->cmdString()); 375 } 376} 377 378 379Port * 380PhysicalMemory::getPort(const std::string &if_name, int idx) 381{ 382 // Accept request for "functional" port for backwards compatibility 383 // with places where this function is called from C++. I'd prefer 384 // to move all these into Python someday. 385 if (if_name == "functional") { 386 return new MemoryPort(csprintf("%s-functional", name()), this); 387 } 388 389 if (if_name != "port") { 390 panic("PhysicalMemory::getPort: unknown port %s requested", if_name); 391 } 392 393 if (idx >= (int)ports.size()) { 394 ports.resize(idx + 1); 395 } 396 397 if (ports[idx] != NULL) { 398 panic("PhysicalMemory::getPort: port %d already assigned", idx); 399 } 400 401 MemoryPort *port = 402 new MemoryPort(csprintf("%s-port%d", name(), idx), this); 403 404 ports[idx] = port; 405 return port; 406} 407 408 409void 410PhysicalMemory::recvStatusChange(Port::Status status) 411{ 412} 413 414PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name, 415 PhysicalMemory *_memory) 416 : SimpleTimingPort(_name, _memory), memory(_memory) 417{ } 418 419void 420PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status) 421{ 422 memory->recvStatusChange(status); 423} 424 425void 426PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp, 427 bool &snoop) 428{ 429 memory->getAddressRanges(resp, snoop); 430} 431 432void 433PhysicalMemory::getAddressRanges(AddrRangeList &resp, bool &snoop) 434{ 435 snoop = false; 436 resp.clear(); 437 resp.push_back(RangeSize(start(), size())); 438} 439 440unsigned 441PhysicalMemory::MemoryPort::deviceBlockSize() const 442{ 443 return memory->deviceBlockSize(); 444} 445 446Tick 447PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt) 448{ 449 return memory->doAtomicAccess(pkt); 450} 451 452void 453PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt) 454{ 455 pkt->pushLabel(memory->name()); 456 457 if (!checkFunctional(pkt)) { 458 // Default implementation of SimpleTimingPort::recvFunctional() 459 // calls recvAtomic() and throws away the latency; we can save a 460 // little here by just not calculating the latency. 461 memory->doFunctionalAccess(pkt); 462 } 463 464 pkt->popLabel(); 465} 466 467unsigned int 468PhysicalMemory::drain(Event *de) 469{ 470 int count = 0; 471 for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 472 count += (*pi)->drain(de); 473 } 474 475 if (count) 476 changeState(Draining); 477 else 478 changeState(Drained); 479 return count; 480} 481 482void 483PhysicalMemory::serialize(ostream &os) 484{ 485 if (!pmemAddr) 486 return; 487 488 gzFile compressedMem; 489 string filename = name() + ".physmem"; 490 491 SERIALIZE_SCALAR(filename); 492 SERIALIZE_SCALAR(_size); 493 494 // write memory file 495 string thefile = Checkpoint::dir() + "/" + filename.c_str(); 496 int fd = creat(thefile.c_str(), 0664); 497 if (fd < 0) { 498 perror("creat"); 499 fatal("Can't open physical memory checkpoint file '%s'\n", filename); 500 } 501 502 compressedMem = gzdopen(fd, "wb"); 503 if (compressedMem == NULL) 504 fatal("Insufficient memory to allocate compression state for %s\n", 505 filename); 506 507 if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) { 508 fatal("Write failed on physical memory checkpoint file '%s'\n", 509 filename); 510 } 511 512 if (gzclose(compressedMem)) 513 fatal("Close failed on physical memory checkpoint file '%s'\n", 514 filename); 515 516 list<LockedAddr>::iterator i = lockedAddrList.begin(); 517 518 vector<Addr> lal_addr; 519 vector<int> lal_cid; 520 while (i != lockedAddrList.end()) { 521 lal_addr.push_back(i->addr); 522 lal_cid.push_back(i->contextId); 523 i++; 524 } 525 arrayParamOut(os, "lal_addr", lal_addr); 526 arrayParamOut(os, "lal_cid", lal_cid); 527} 528 529void 530PhysicalMemory::unserialize(Checkpoint *cp, const string §ion) 531{ 532 if (!pmemAddr) 533 return; 534 535 gzFile compressedMem; 536 long *tempPage; 537 long *pmem_current; 538 uint64_t curSize; 539 uint32_t bytesRead; 540 const uint32_t chunkSize = 16384; 541 542 string filename; 543 544 UNSERIALIZE_SCALAR(filename); 545 546 filename = cp->cptDir + "/" + filename; 547 548 // mmap memoryfile 549 int fd = open(filename.c_str(), O_RDONLY); 550 if (fd < 0) { 551 perror("open"); 552 fatal("Can't open physical memory checkpoint file '%s'", filename); 553 } 554 555 compressedMem = gzdopen(fd, "rb"); 556 if (compressedMem == NULL) 557 fatal("Insufficient memory to allocate compression state for %s\n", 558 filename); 559 560 // unmap file that was mmaped in the constructor 561 // This is done here to make sure that gzip and open don't muck with our 562 // nice large space of memory before we reallocate it 563 munmap((char*)pmemAddr, size()); 564 565 UNSERIALIZE_SCALAR(_size); 566 if (size() > params()->range.size()) 567 fatal("Memory size has changed!\n"); 568 569 pmemAddr = (uint8_t *)mmap(NULL, size(), 570 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); 571 572 if (pmemAddr == (void *)MAP_FAILED) { 573 perror("mmap"); 574 fatal("Could not mmap physical memory!\n"); 575 } 576 577 curSize = 0; 578 tempPage = (long*)malloc(chunkSize); 579 if (tempPage == NULL) 580 fatal("Unable to malloc memory to read file %s\n", filename); 581 582 /* Only copy bytes that are non-zero, so we don't give the VM system hell */ 583 while (curSize < size()) { 584 bytesRead = gzread(compressedMem, tempPage, chunkSize); 585 if (bytesRead == 0) 586 break; 587 588 assert(bytesRead % sizeof(long) == 0); 589 590 for (uint32_t x = 0; x < bytesRead / sizeof(long); x++) 591 { 592 if (*(tempPage+x) != 0) { 593 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long)); 594 *pmem_current = *(tempPage+x); 595 } 596 } 597 curSize += bytesRead; 598 } 599 600 free(tempPage); 601 602 if (gzclose(compressedMem)) 603 fatal("Close failed on physical memory checkpoint file '%s'\n", 604 filename); 605 606 vector<Addr> lal_addr; 607 vector<int> lal_cid; 608 arrayParamIn(cp, section, "lal_addr", lal_addr); 609 arrayParamIn(cp, section, "lal_cid", lal_cid); 610 for(int i = 0; i < lal_addr.size(); i++) 611 lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i])); 612} 613 614PhysicalMemory * 615PhysicalMemoryParams::create() 616{ 617 return new PhysicalMemory(this); 618}
| 126unsigned 127PhysicalMemory::deviceBlockSize() const 128{ 129 //Can accept anysize request 130 return 0; 131} 132 133Tick 134PhysicalMemory::calculateLatency(PacketPtr pkt) 135{ 136 Tick latency = lat; 137 if (lat_var != 0) 138 latency += random_mt.random<Tick>(0, lat_var); 139 return latency; 140} 141 142 143 144// Add load-locked to tracking list. Should only be called if the 145// operation is a load and the LLSC flag is set. 146void 147PhysicalMemory::trackLoadLocked(PacketPtr pkt) 148{ 149 Request *req = pkt->req; 150 Addr paddr = LockedAddr::mask(req->getPaddr()); 151 152 // first we check if we already have a locked addr for this 153 // xc. Since each xc only gets one, we just update the 154 // existing record with the new address. 155 list<LockedAddr>::iterator i; 156 157 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) { 158 if (i->matchesContext(req)) { 159 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n", 160 req->contextId(), paddr); 161 i->addr = paddr; 162 return; 163 } 164 } 165 166 // no record for this xc: need to allocate a new one 167 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n", 168 req->contextId(), paddr); 169 lockedAddrList.push_front(LockedAddr(req)); 170} 171 172 173// Called on *writes* only... both regular stores and 174// store-conditional operations. Check for conventional stores which 175// conflict with locked addresses, and for success/failure of store 176// conditionals. 177bool 178PhysicalMemory::checkLockedAddrList(PacketPtr pkt) 179{ 180 Request *req = pkt->req; 181 Addr paddr = LockedAddr::mask(req->getPaddr()); 182 bool isLLSC = pkt->isLLSC(); 183 184 // Initialize return value. Non-conditional stores always 185 // succeed. Assume conditional stores will fail until proven 186 // otherwise. 187 bool success = !isLLSC; 188 189 // Iterate over list. Note that there could be multiple matching 190 // records, as more than one context could have done a load locked 191 // to this location. 192 list<LockedAddr>::iterator i = lockedAddrList.begin(); 193 194 while (i != lockedAddrList.end()) { 195 196 if (i->addr == paddr) { 197 // we have a matching address 198 199 if (isLLSC && i->matchesContext(req)) { 200 // it's a store conditional, and as far as the memory 201 // system can tell, the requesting context's lock is 202 // still valid. 203 DPRINTF(LLSC, "StCond success: context %d addr %#x\n", 204 req->contextId(), paddr); 205 success = true; 206 } 207 208 // Get rid of our record of this lock and advance to next 209 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n", 210 i->contextId, paddr); 211 i = lockedAddrList.erase(i); 212 } 213 else { 214 // no match: advance to next record 215 ++i; 216 } 217 } 218 219 if (isLLSC) { 220 req->setExtraData(success ? 1 : 0); 221 } 222 223 return success; 224} 225 226 227#if TRACING_ON 228 229#define CASE(A, T) \ 230 case sizeof(T): \ 231 DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \ 232 A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \ 233 break 234 235 236#define TRACE_PACKET(A) \ 237 do { \ 238 switch (pkt->getSize()) { \ 239 CASE(A, uint64_t); \ 240 CASE(A, uint32_t); \ 241 CASE(A, uint16_t); \ 242 CASE(A, uint8_t); \ 243 default: \ 244 DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \ 245 A, pkt->getSize(), pkt->getAddr()); \ 246 } \ 247 } while (0) 248 249#else 250 251#define TRACE_PACKET(A) 252 253#endif 254 255Tick 256PhysicalMemory::doAtomicAccess(PacketPtr pkt) 257{ 258 assert(pkt->getAddr() >= start() && 259 pkt->getAddr() + pkt->getSize() <= start() + size()); 260 261 if (pkt->memInhibitAsserted()) { 262 DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n", 263 pkt->getAddr()); 264 return 0; 265 } 266 267 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 268 269 if (pkt->cmd == MemCmd::SwapReq) { 270 IntReg overwrite_val; 271 bool overwrite_mem; 272 uint64_t condition_val64; 273 uint32_t condition_val32; 274 275 if (!pmemAddr) 276 panic("Swap only works if there is real memory (i.e. null=False)"); 277 assert(sizeof(IntReg) >= pkt->getSize()); 278 279 overwrite_mem = true; 280 // keep a copy of our possible write value, and copy what is at the 281 // memory address into the packet 282 std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize()); 283 std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 284 285 if (pkt->req->isCondSwap()) { 286 if (pkt->getSize() == sizeof(uint64_t)) { 287 condition_val64 = pkt->req->getExtraData(); 288 overwrite_mem = !std::memcmp(&condition_val64, hostAddr, 289 sizeof(uint64_t)); 290 } else if (pkt->getSize() == sizeof(uint32_t)) { 291 condition_val32 = (uint32_t)pkt->req->getExtraData(); 292 overwrite_mem = !std::memcmp(&condition_val32, hostAddr, 293 sizeof(uint32_t)); 294 } else 295 panic("Invalid size for conditional read/write\n"); 296 } 297 298 if (overwrite_mem) 299 std::memcpy(hostAddr, &overwrite_val, pkt->getSize()); 300 301 assert(!pkt->req->isInstFetch()); 302 TRACE_PACKET("Read/Write"); 303 } else if (pkt->isRead()) { 304 assert(!pkt->isWrite()); 305 if (pkt->isLLSC()) { 306 trackLoadLocked(pkt); 307 } 308 if (pmemAddr) 309 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 310 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read"); 311 } else if (pkt->isWrite()) { 312 if (writeOK(pkt)) { 313 if (pmemAddr) 314 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 315 assert(!pkt->req->isInstFetch()); 316 TRACE_PACKET("Write"); 317 } 318 } else if (pkt->isInvalidate()) { 319 //upgrade or invalidate 320 if (pkt->needsResponse()) { 321 pkt->makeAtomicResponse(); 322 } 323 } else { 324 panic("unimplemented"); 325 } 326 327 if (pkt->needsResponse()) { 328 pkt->makeAtomicResponse(); 329 } 330 return calculateLatency(pkt); 331} 332 333 334void 335PhysicalMemory::doFunctionalAccess(PacketPtr pkt) 336{ 337 assert(pkt->getAddr() >= start() && 338 pkt->getAddr() + pkt->getSize() <= start() + size()); 339 340 341 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 342 343 if (pkt->isRead()) { 344 if (pmemAddr) 345 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 346 TRACE_PACKET("Read"); 347 pkt->makeAtomicResponse(); 348 } else if (pkt->isWrite()) { 349 if (pmemAddr) 350 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 351 TRACE_PACKET("Write"); 352 pkt->makeAtomicResponse(); 353 } else if (pkt->isPrint()) { 354 Packet::PrintReqState *prs = 355 dynamic_cast<Packet::PrintReqState*>(pkt->senderState); 356 // Need to call printLabels() explicitly since we're not going 357 // through printObj(). 358 prs->printLabels(); 359 // Right now we just print the single byte at the specified address. 360 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr); 361 } else { 362 panic("PhysicalMemory: unimplemented functional command %s", 363 pkt->cmdString()); 364 } 365} 366 367 368Port * 369PhysicalMemory::getPort(const std::string &if_name, int idx) 370{ 371 // Accept request for "functional" port for backwards compatibility 372 // with places where this function is called from C++. I'd prefer 373 // to move all these into Python someday. 374 if (if_name == "functional") { 375 return new MemoryPort(csprintf("%s-functional", name()), this); 376 } 377 378 if (if_name != "port") { 379 panic("PhysicalMemory::getPort: unknown port %s requested", if_name); 380 } 381 382 if (idx >= (int)ports.size()) { 383 ports.resize(idx + 1); 384 } 385 386 if (ports[idx] != NULL) { 387 panic("PhysicalMemory::getPort: port %d already assigned", idx); 388 } 389 390 MemoryPort *port = 391 new MemoryPort(csprintf("%s-port%d", name(), idx), this); 392 393 ports[idx] = port; 394 return port; 395} 396 397 398void 399PhysicalMemory::recvStatusChange(Port::Status status) 400{ 401} 402 403PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name, 404 PhysicalMemory *_memory) 405 : SimpleTimingPort(_name, _memory), memory(_memory) 406{ } 407 408void 409PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status) 410{ 411 memory->recvStatusChange(status); 412} 413 414void 415PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp, 416 bool &snoop) 417{ 418 memory->getAddressRanges(resp, snoop); 419} 420 421void 422PhysicalMemory::getAddressRanges(AddrRangeList &resp, bool &snoop) 423{ 424 snoop = false; 425 resp.clear(); 426 resp.push_back(RangeSize(start(), size())); 427} 428 429unsigned 430PhysicalMemory::MemoryPort::deviceBlockSize() const 431{ 432 return memory->deviceBlockSize(); 433} 434 435Tick 436PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt) 437{ 438 return memory->doAtomicAccess(pkt); 439} 440 441void 442PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt) 443{ 444 pkt->pushLabel(memory->name()); 445 446 if (!checkFunctional(pkt)) { 447 // Default implementation of SimpleTimingPort::recvFunctional() 448 // calls recvAtomic() and throws away the latency; we can save a 449 // little here by just not calculating the latency. 450 memory->doFunctionalAccess(pkt); 451 } 452 453 pkt->popLabel(); 454} 455 456unsigned int 457PhysicalMemory::drain(Event *de) 458{ 459 int count = 0; 460 for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 461 count += (*pi)->drain(de); 462 } 463 464 if (count) 465 changeState(Draining); 466 else 467 changeState(Drained); 468 return count; 469} 470 471void 472PhysicalMemory::serialize(ostream &os) 473{ 474 if (!pmemAddr) 475 return; 476 477 gzFile compressedMem; 478 string filename = name() + ".physmem"; 479 480 SERIALIZE_SCALAR(filename); 481 SERIALIZE_SCALAR(_size); 482 483 // write memory file 484 string thefile = Checkpoint::dir() + "/" + filename.c_str(); 485 int fd = creat(thefile.c_str(), 0664); 486 if (fd < 0) { 487 perror("creat"); 488 fatal("Can't open physical memory checkpoint file '%s'\n", filename); 489 } 490 491 compressedMem = gzdopen(fd, "wb"); 492 if (compressedMem == NULL) 493 fatal("Insufficient memory to allocate compression state for %s\n", 494 filename); 495 496 if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) { 497 fatal("Write failed on physical memory checkpoint file '%s'\n", 498 filename); 499 } 500 501 if (gzclose(compressedMem)) 502 fatal("Close failed on physical memory checkpoint file '%s'\n", 503 filename); 504 505 list<LockedAddr>::iterator i = lockedAddrList.begin(); 506 507 vector<Addr> lal_addr; 508 vector<int> lal_cid; 509 while (i != lockedAddrList.end()) { 510 lal_addr.push_back(i->addr); 511 lal_cid.push_back(i->contextId); 512 i++; 513 } 514 arrayParamOut(os, "lal_addr", lal_addr); 515 arrayParamOut(os, "lal_cid", lal_cid); 516} 517 518void 519PhysicalMemory::unserialize(Checkpoint *cp, const string §ion) 520{ 521 if (!pmemAddr) 522 return; 523 524 gzFile compressedMem; 525 long *tempPage; 526 long *pmem_current; 527 uint64_t curSize; 528 uint32_t bytesRead; 529 const uint32_t chunkSize = 16384; 530 531 string filename; 532 533 UNSERIALIZE_SCALAR(filename); 534 535 filename = cp->cptDir + "/" + filename; 536 537 // mmap memoryfile 538 int fd = open(filename.c_str(), O_RDONLY); 539 if (fd < 0) { 540 perror("open"); 541 fatal("Can't open physical memory checkpoint file '%s'", filename); 542 } 543 544 compressedMem = gzdopen(fd, "rb"); 545 if (compressedMem == NULL) 546 fatal("Insufficient memory to allocate compression state for %s\n", 547 filename); 548 549 // unmap file that was mmaped in the constructor 550 // This is done here to make sure that gzip and open don't muck with our 551 // nice large space of memory before we reallocate it 552 munmap((char*)pmemAddr, size()); 553 554 UNSERIALIZE_SCALAR(_size); 555 if (size() > params()->range.size()) 556 fatal("Memory size has changed!\n"); 557 558 pmemAddr = (uint8_t *)mmap(NULL, size(), 559 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); 560 561 if (pmemAddr == (void *)MAP_FAILED) { 562 perror("mmap"); 563 fatal("Could not mmap physical memory!\n"); 564 } 565 566 curSize = 0; 567 tempPage = (long*)malloc(chunkSize); 568 if (tempPage == NULL) 569 fatal("Unable to malloc memory to read file %s\n", filename); 570 571 /* Only copy bytes that are non-zero, so we don't give the VM system hell */ 572 while (curSize < size()) { 573 bytesRead = gzread(compressedMem, tempPage, chunkSize); 574 if (bytesRead == 0) 575 break; 576 577 assert(bytesRead % sizeof(long) == 0); 578 579 for (uint32_t x = 0; x < bytesRead / sizeof(long); x++) 580 { 581 if (*(tempPage+x) != 0) { 582 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long)); 583 *pmem_current = *(tempPage+x); 584 } 585 } 586 curSize += bytesRead; 587 } 588 589 free(tempPage); 590 591 if (gzclose(compressedMem)) 592 fatal("Close failed on physical memory checkpoint file '%s'\n", 593 filename); 594 595 vector<Addr> lal_addr; 596 vector<int> lal_cid; 597 arrayParamIn(cp, section, "lal_addr", lal_addr); 598 arrayParamIn(cp, section, "lal_cid", lal_cid); 599 for(int i = 0; i < lal_addr.size(); i++) 600 lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i])); 601} 602 603PhysicalMemory * 604PhysicalMemoryParams::create() 605{ 606 return new PhysicalMemory(this); 607}
|