abstract_mem.cc revision 6658
1/* 2 * Copyright (c) 2001-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ron Dreslinski 29 * Ali Saidi 30 */ 31 32#include <sys/types.h> 33#include <sys/mman.h> 34#include <errno.h> 35#include <fcntl.h> 36#include <unistd.h> 37#include <zlib.h> 38 39#include <iostream> 40#include <string> 41 42#include "arch/registers.hh" 43#include "base/misc.hh" 44#include "base/random.hh" 45#include "base/types.hh" 46#include "config/full_system.hh" 47#include "config/the_isa.hh" 48#include "mem/packet_access.hh" 49#include "mem/physical.hh" 50#include "sim/eventq.hh" 51 52using namespace std; 53using namespace TheISA; 54 55PhysicalMemory::PhysicalMemory(const Params *p) 56 : MemObject(p), pmemAddr(NULL), pagePtr(0), 57 lat(p->latency), lat_var(p->latency_var), 58 cachedSize(params()->range.size()), cachedStart(params()->range.start) 59{ 60 if (params()->range.size() % TheISA::PageBytes != 0) 61 panic("Memory Size not divisible by page size\n"); 62 63 if (params()->null) 64 return; 65 66 int map_flags = MAP_ANON | MAP_PRIVATE; 67 pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 68 PROT_READ | PROT_WRITE, map_flags, -1, 0); 69 70 if (pmemAddr == (void *)MAP_FAILED) { 71 perror("mmap"); 72 fatal("Could not mmap!\n"); 73 } 74 75 //If requested, initialize all the memory to 0 76 if (p->zero) 77 memset(pmemAddr, 0, p->range.size()); 78} 79 80void 81PhysicalMemory::init() 82{ 83 if (ports.size() == 0) { 84 fatal("PhysicalMemory object %s is unconnected!", name()); 85 } 86 87 for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 88 if (*pi) 89 (*pi)->sendStatusChange(Port::RangeChange); 90 } 91} 92 93PhysicalMemory::~PhysicalMemory() 94{ 95 if (pmemAddr) 96 munmap((char*)pmemAddr, params()->range.size()); 97 //Remove memPorts? 98} 99 100Addr 101PhysicalMemory::new_page() 102{ 103 Addr return_addr = pagePtr << LogVMPageSize; 104 return_addr += start(); 105 106 ++pagePtr; 107 return return_addr; 108} 109 110unsigned 111PhysicalMemory::deviceBlockSize() const 112{ 113 //Can accept anysize request 114 return 0; 115} 116 117Tick 118PhysicalMemory::calculateLatency(PacketPtr pkt) 119{ 120 Tick latency = lat; 121 if (lat_var != 0) 122 latency += random_mt.random<Tick>(0, lat_var); 123 return latency; 124} 125 126 127 128// Add load-locked to tracking list. Should only be called if the 129// operation is a load and the LLSC flag is set. 130void 131PhysicalMemory::trackLoadLocked(PacketPtr pkt) 132{ 133 Request *req = pkt->req; 134 Addr paddr = LockedAddr::mask(req->getPaddr()); 135 136 // first we check if we already have a locked addr for this 137 // xc. Since each xc only gets one, we just update the 138 // existing record with the new address. 139 list<LockedAddr>::iterator i; 140 141 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) { 142 if (i->matchesContext(req)) { 143 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n", 144 req->contextId(), paddr); 145 i->addr = paddr; 146 return; 147 } 148 } 149 150 // no record for this xc: need to allocate a new one 151 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n", 152 req->contextId(), paddr); 153 lockedAddrList.push_front(LockedAddr(req)); 154} 155 156 157// Called on *writes* only... both regular stores and 158// store-conditional operations. Check for conventional stores which 159// conflict with locked addresses, and for success/failure of store 160// conditionals. 161bool 162PhysicalMemory::checkLockedAddrList(PacketPtr pkt) 163{ 164 Request *req = pkt->req; 165 Addr paddr = LockedAddr::mask(req->getPaddr()); 166 bool isLLSC = pkt->isLLSC(); 167 168 // Initialize return value. Non-conditional stores always 169 // succeed. Assume conditional stores will fail until proven 170 // otherwise. 171 bool success = !isLLSC; 172 173 // Iterate over list. Note that there could be multiple matching 174 // records, as more than one context could have done a load locked 175 // to this location. 176 list<LockedAddr>::iterator i = lockedAddrList.begin(); 177 178 while (i != lockedAddrList.end()) { 179 180 if (i->addr == paddr) { 181 // we have a matching address 182 183 if (isLLSC && i->matchesContext(req)) { 184 // it's a store conditional, and as far as the memory 185 // system can tell, the requesting context's lock is 186 // still valid. 187 DPRINTF(LLSC, "StCond success: context %d addr %#x\n", 188 req->contextId(), paddr); 189 success = true; 190 } 191 192 // Get rid of our record of this lock and advance to next 193 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n", 194 i->contextId, paddr); 195 i = lockedAddrList.erase(i); 196 } 197 else { 198 // no match: advance to next record 199 ++i; 200 } 201 } 202 203 if (isLLSC) { 204 req->setExtraData(success ? 1 : 0); 205 } 206 207 return success; 208} 209 210 211#if TRACING_ON 212 213#define CASE(A, T) \ 214 case sizeof(T): \ 215 DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \ 216 A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \ 217 break 218 219 220#define TRACE_PACKET(A) \ 221 do { \ 222 switch (pkt->getSize()) { \ 223 CASE(A, uint64_t); \ 224 CASE(A, uint32_t); \ 225 CASE(A, uint16_t); \ 226 CASE(A, uint8_t); \ 227 default: \ 228 DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \ 229 A, pkt->getSize(), pkt->getAddr()); \ 230 } \ 231 } while (0) 232 233#else 234 235#define TRACE_PACKET(A) 236 237#endif 238 239Tick 240PhysicalMemory::doAtomicAccess(PacketPtr pkt) 241{ 242 assert(pkt->getAddr() >= start() && 243 pkt->getAddr() + pkt->getSize() <= start() + size()); 244 245 if (pkt->memInhibitAsserted()) { 246 DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n", 247 pkt->getAddr()); 248 return 0; 249 } 250 251 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 252 253 if (pkt->cmd == MemCmd::SwapReq) { 254 IntReg overwrite_val; 255 bool overwrite_mem; 256 uint64_t condition_val64; 257 uint32_t condition_val32; 258 259 if (!pmemAddr) 260 panic("Swap only works if there is real memory (i.e. null=False)"); 261 assert(sizeof(IntReg) >= pkt->getSize()); 262 263 overwrite_mem = true; 264 // keep a copy of our possible write value, and copy what is at the 265 // memory address into the packet 266 std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize()); 267 std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 268 269 if (pkt->req->isCondSwap()) { 270 if (pkt->getSize() == sizeof(uint64_t)) { 271 condition_val64 = pkt->req->getExtraData(); 272 overwrite_mem = !std::memcmp(&condition_val64, hostAddr, 273 sizeof(uint64_t)); 274 } else if (pkt->getSize() == sizeof(uint32_t)) { 275 condition_val32 = (uint32_t)pkt->req->getExtraData(); 276 overwrite_mem = !std::memcmp(&condition_val32, hostAddr, 277 sizeof(uint32_t)); 278 } else 279 panic("Invalid size for conditional read/write\n"); 280 } 281 282 if (overwrite_mem) 283 std::memcpy(hostAddr, &overwrite_val, pkt->getSize()); 284 285 assert(!pkt->req->isInstFetch()); 286 TRACE_PACKET("Read/Write"); 287 } else if (pkt->isRead()) { 288 assert(!pkt->isWrite()); 289 if (pkt->isLLSC()) { 290 trackLoadLocked(pkt); 291 } 292 if (pmemAddr) 293 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 294 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read"); 295 } else if (pkt->isWrite()) { 296 if (writeOK(pkt)) { 297 if (pmemAddr) 298 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 299 assert(!pkt->req->isInstFetch()); 300 TRACE_PACKET("Write"); 301 } 302 } else if (pkt->isInvalidate()) { 303 //upgrade or invalidate 304 if (pkt->needsResponse()) { 305 pkt->makeAtomicResponse(); 306 } 307 } else { 308 panic("unimplemented"); 309 } 310 311 if (pkt->needsResponse()) { 312 pkt->makeAtomicResponse(); 313 } 314 return calculateLatency(pkt); 315} 316 317 318void 319PhysicalMemory::doFunctionalAccess(PacketPtr pkt) 320{ 321 assert(pkt->getAddr() >= start() && 322 pkt->getAddr() + pkt->getSize() <= start() + size()); 323 324 325 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 326 327 if (pkt->isRead()) { 328 if (pmemAddr) 329 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 330 TRACE_PACKET("Read"); 331 pkt->makeAtomicResponse(); 332 } else if (pkt->isWrite()) { 333 if (pmemAddr) 334 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 335 TRACE_PACKET("Write"); 336 pkt->makeAtomicResponse(); 337 } else if (pkt->isPrint()) { 338 Packet::PrintReqState *prs = 339 dynamic_cast<Packet::PrintReqState*>(pkt->senderState); 340 // Need to call printLabels() explicitly since we're not going 341 // through printObj(). 342 prs->printLabels(); 343 // Right now we just print the single byte at the specified address. 344 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr); 345 } else { 346 panic("PhysicalMemory: unimplemented functional command %s", 347 pkt->cmdString()); 348 } 349} 350 351 352Port * 353PhysicalMemory::getPort(const std::string &if_name, int idx) 354{ 355 // Accept request for "functional" port for backwards compatibility 356 // with places where this function is called from C++. I'd prefer 357 // to move all these into Python someday. 358 if (if_name == "functional") { 359 return new MemoryPort(csprintf("%s-functional", name()), this); 360 } 361 362 if (if_name != "port") { 363 panic("PhysicalMemory::getPort: unknown port %s requested", if_name); 364 } 365 366 if (idx >= (int)ports.size()) { 367 ports.resize(idx + 1); 368 } 369 370 if (ports[idx] != NULL) { 371 panic("PhysicalMemory::getPort: port %d already assigned", idx); 372 } 373 374 MemoryPort *port = 375 new MemoryPort(csprintf("%s-port%d", name(), idx), this); 376 377 ports[idx] = port; 378 return port; 379} 380 381 382void 383PhysicalMemory::recvStatusChange(Port::Status status) 384{ 385} 386 387PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name, 388 PhysicalMemory *_memory) 389 : SimpleTimingPort(_name, _memory), memory(_memory) 390{ } 391 392void 393PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status) 394{ 395 memory->recvStatusChange(status); 396} 397 398void 399PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp, 400 bool &snoop) 401{ 402 memory->getAddressRanges(resp, snoop); 403} 404 405void 406PhysicalMemory::getAddressRanges(AddrRangeList &resp, bool &snoop) 407{ 408 snoop = false; 409 resp.clear(); 410 resp.push_back(RangeSize(start(), params()->range.size())); 411} 412 413unsigned 414PhysicalMemory::MemoryPort::deviceBlockSize() const 415{ 416 return memory->deviceBlockSize(); 417} 418 419Tick 420PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt) 421{ 422 return memory->doAtomicAccess(pkt); 423} 424 425void 426PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt) 427{ 428 pkt->pushLabel(memory->name()); 429 430 if (!checkFunctional(pkt)) { 431 // Default implementation of SimpleTimingPort::recvFunctional() 432 // calls recvAtomic() and throws away the latency; we can save a 433 // little here by just not calculating the latency. 434 memory->doFunctionalAccess(pkt); 435 } 436 437 pkt->popLabel(); 438} 439 440unsigned int 441PhysicalMemory::drain(Event *de) 442{ 443 int count = 0; 444 for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 445 count += (*pi)->drain(de); 446 } 447 448 if (count) 449 changeState(Draining); 450 else 451 changeState(Drained); 452 return count; 453} 454 455void 456PhysicalMemory::serialize(ostream &os) 457{ 458 if (!pmemAddr) 459 return; 460 461 gzFile compressedMem; 462 string filename = name() + ".physmem"; 463 464 SERIALIZE_SCALAR(filename); 465 466 // write memory file 467 string thefile = Checkpoint::dir() + "/" + filename.c_str(); 468 int fd = creat(thefile.c_str(), 0664); 469 if (fd < 0) { 470 perror("creat"); 471 fatal("Can't open physical memory checkpoint file '%s'\n", filename); 472 } 473 474 compressedMem = gzdopen(fd, "wb"); 475 if (compressedMem == NULL) 476 fatal("Insufficient memory to allocate compression state for %s\n", 477 filename); 478 479 if (gzwrite(compressedMem, pmemAddr, params()->range.size()) != 480 (int)params()->range.size()) { 481 fatal("Write failed on physical memory checkpoint file '%s'\n", 482 filename); 483 } 484 485 if (gzclose(compressedMem)) 486 fatal("Close failed on physical memory checkpoint file '%s'\n", 487 filename); 488} 489 490void 491PhysicalMemory::unserialize(Checkpoint *cp, const string §ion) 492{ 493 if (!pmemAddr) 494 return; 495 496 gzFile compressedMem; 497 long *tempPage; 498 long *pmem_current; 499 uint64_t curSize; 500 uint32_t bytesRead; 501 const uint32_t chunkSize = 16384; 502 503 string filename; 504 505 UNSERIALIZE_SCALAR(filename); 506 507 filename = cp->cptDir + "/" + filename; 508 509 // mmap memoryfile 510 int fd = open(filename.c_str(), O_RDONLY); 511 if (fd < 0) { 512 perror("open"); 513 fatal("Can't open physical memory checkpoint file '%s'", filename); 514 } 515 516 compressedMem = gzdopen(fd, "rb"); 517 if (compressedMem == NULL) 518 fatal("Insufficient memory to allocate compression state for %s\n", 519 filename); 520 521 // unmap file that was mmaped in the constructor 522 // This is done here to make sure that gzip and open don't muck with our 523 // nice large space of memory before we reallocate it 524 munmap((char*)pmemAddr, params()->range.size()); 525 526 pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 527 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); 528 529 if (pmemAddr == (void *)MAP_FAILED) { 530 perror("mmap"); 531 fatal("Could not mmap physical memory!\n"); 532 } 533 534 curSize = 0; 535 tempPage = (long*)malloc(chunkSize); 536 if (tempPage == NULL) 537 fatal("Unable to malloc memory to read file %s\n", filename); 538 539 /* Only copy bytes that are non-zero, so we don't give the VM system hell */ 540 while (curSize < params()->range.size()) { 541 bytesRead = gzread(compressedMem, tempPage, chunkSize); 542 if (bytesRead != chunkSize && 543 bytesRead != params()->range.size() - curSize) 544 fatal("Read failed on physical memory checkpoint file '%s'" 545 " got %d bytes, expected %d or %d bytes\n", 546 filename, bytesRead, chunkSize, 547 params()->range.size() - curSize); 548 549 assert(bytesRead % sizeof(long) == 0); 550 551 for (uint32_t x = 0; x < bytesRead / sizeof(long); x++) 552 { 553 if (*(tempPage+x) != 0) { 554 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long)); 555 *pmem_current = *(tempPage+x); 556 } 557 } 558 curSize += bytesRead; 559 } 560 561 free(tempPage); 562 563 if (gzclose(compressedMem)) 564 fatal("Close failed on physical memory checkpoint file '%s'\n", 565 filename); 566 567} 568 569PhysicalMemory * 570PhysicalMemoryParams::create() 571{ 572 return new PhysicalMemory(this); 573} 574