abstract_mem.cc revision 3751
1/* 2 * Copyright (c) 2001-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ron Dreslinski 29 * Ali Saidi 30 */ 31 32#include <sys/types.h> 33#include <sys/mman.h> 34#include <errno.h> 35#include <fcntl.h> 36#include <unistd.h> 37#include <zlib.h> 38 39#include <iostream> 40#include <string> 41 42#include "arch/isa_traits.hh" 43#include "base/misc.hh" 44#include "config/full_system.hh" 45#include "mem/physical.hh" 46#include "sim/builder.hh" 47#include "sim/eventq.hh" 48#include "sim/host.hh" 49 50using namespace std; 51using namespace TheISA; 52 53PhysicalMemory::PhysicalMemory(Params *p) 54 : MemObject(p->name), pmemAddr(NULL), port(NULL), lat(p->latency), _params(p) 55{ 56 if (params()->addrRange.size() % TheISA::PageBytes != 0) 57 panic("Memory Size not divisible by page size\n"); 58 59 int map_flags = MAP_ANON | MAP_PRIVATE; 60 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE, 61 map_flags, -1, 0); 62 63 if (pmemAddr == (void *)MAP_FAILED) { 64 perror("mmap"); 65 fatal("Could not mmap!\n"); 66 } 67 68 //If requested, initialize all the memory to 0 69 if(params()->zero) 70 memset(pmemAddr, 0, params()->addrRange.size()); 71 72 pagePtr = 0; 73} 74 75void 76PhysicalMemory::init() 77{ 78 if (!port) 79 panic("PhysicalMemory not connected to anything!"); 80 port->sendStatusChange(Port::RangeChange); 81} 82 83PhysicalMemory::~PhysicalMemory() 84{ 85 if (pmemAddr) 86 munmap(pmemAddr, params()->addrRange.size()); 87 //Remove memPorts? 88} 89 90Addr 91PhysicalMemory::new_page() 92{ 93 Addr return_addr = pagePtr << LogVMPageSize; 94 return_addr += params()->addrRange.start; 95 96 ++pagePtr; 97 return return_addr; 98} 99 100int 101PhysicalMemory::deviceBlockSize() 102{ 103 //Can accept anysize request 104 return 0; 105} 106 107Tick 108PhysicalMemory::calculateLatency(PacketPtr pkt) 109{ 110 return lat; 111} 112 113 114 115// Add load-locked to tracking list. Should only be called if the 116// operation is a load and the LOCKED flag is set. 117void 118PhysicalMemory::trackLoadLocked(Request *req) 119{ 120 Addr paddr = LockedAddr::mask(req->getPaddr()); 121 122 // first we check if we already have a locked addr for this 123 // xc. Since each xc only gets one, we just update the 124 // existing record with the new address. 125 list<LockedAddr>::iterator i; 126 127 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) { 128 if (i->matchesContext(req)) { 129 DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n", 130 req->getCpuNum(), req->getThreadNum(), paddr); 131 i->addr = paddr; 132 return; 133 } 134 } 135 136 // no record for this xc: need to allocate a new one 137 DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n", 138 req->getCpuNum(), req->getThreadNum(), paddr); 139 lockedAddrList.push_front(LockedAddr(req)); 140} 141 142 143// Called on *writes* only... both regular stores and 144// store-conditional operations. Check for conventional stores which 145// conflict with locked addresses, and for success/failure of store 146// conditionals. 147bool 148PhysicalMemory::checkLockedAddrList(Request *req) 149{ 150 Addr paddr = LockedAddr::mask(req->getPaddr()); 151 bool isLocked = req->isLocked(); 152 153 // Initialize return value. Non-conditional stores always 154 // succeed. Assume conditional stores will fail until proven 155 // otherwise. 156 bool success = !isLocked; 157 158 // Iterate over list. Note that there could be multiple matching 159 // records, as more than one context could have done a load locked 160 // to this location. 161 list<LockedAddr>::iterator i = lockedAddrList.begin(); 162 163 while (i != lockedAddrList.end()) { 164 165 if (i->addr == paddr) { 166 // we have a matching address 167 168 if (isLocked && i->matchesContext(req)) { 169 // it's a store conditional, and as far as the memory 170 // system can tell, the requesting context's lock is 171 // still valid. 172 DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n", 173 req->getCpuNum(), req->getThreadNum(), paddr); 174 success = true; 175 } 176 177 // Get rid of our record of this lock and advance to next 178 DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n", 179 i->cpuNum, i->threadNum, paddr); 180 i = lockedAddrList.erase(i); 181 } 182 else { 183 // no match: advance to next record 184 ++i; 185 } 186 } 187 188 if (isLocked) { 189 req->setScResult(success ? 1 : 0); 190 } 191 192 return success; 193} 194 195void 196PhysicalMemory::doFunctionalAccess(PacketPtr pkt) 197{ 198 assert(pkt->getAddr() >= params()->addrRange.start && 199 pkt->getAddr() + pkt->getSize() <= params()->addrRange.start + 200 params()->addrRange.size()); 201 202 if (pkt->isRead()) { 203 if (pkt->req->isLocked()) { 204 trackLoadLocked(pkt->req); 205 } 206 DPRINTF(MemoryAccess, "Performing Read of size %i on address 0x%x\n", 207 pkt->getSize(), pkt->getAddr()); 208 memcpy(pkt->getPtr<uint8_t>(), 209 pmemAddr + pkt->getAddr() - params()->addrRange.start, 210 pkt->getSize()); 211 } 212 else if (pkt->isWrite()) { 213 if (writeOK(pkt->req)) { 214 DPRINTF(MemoryAccess, "Performing Write of size %i on address 0x%x\n", 215 pkt->getSize(), pkt->getAddr()); 216 memcpy(pmemAddr + pkt->getAddr() - params()->addrRange.start, 217 pkt->getPtr<uint8_t>(), pkt->getSize()); 218 } 219 } 220 else if (pkt->isInvalidate()) { 221 //upgrade or invalidate 222 pkt->flags |= SATISFIED; 223 } 224 else { 225 panic("unimplemented"); 226 } 227 228 pkt->result = Packet::Success; 229} 230 231Port * 232PhysicalMemory::getPort(const std::string &if_name, int idx) 233{ 234 if (if_name == "port" && idx == -1) { 235 if (port != NULL) 236 panic("PhysicalMemory::getPort: additional port requested to memory!"); 237 port = new MemoryPort(name() + "-port", this); 238 return port; 239 } else if (if_name == "functional") { 240 /* special port for functional writes at startup. And for memtester */ 241 return new MemoryPort(name() + "-funcport", this); 242 } else { 243 panic("PhysicalMemory::getPort: unknown port %s requested", if_name); 244 } 245} 246 247void 248PhysicalMemory::recvStatusChange(Port::Status status) 249{ 250} 251 252PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name, 253 PhysicalMemory *_memory) 254 : SimpleTimingPort(_name), memory(_memory) 255{ } 256 257void 258PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status) 259{ 260 memory->recvStatusChange(status); 261} 262 263void 264PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp, 265 AddrRangeList &snoop) 266{ 267 memory->getAddressRanges(resp, snoop); 268} 269 270void 271PhysicalMemory::getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop) 272{ 273 snoop.clear(); 274 resp.clear(); 275 resp.push_back(RangeSize(params()->addrRange.start, 276 params()->addrRange.size())); 277} 278 279int 280PhysicalMemory::MemoryPort::deviceBlockSize() 281{ 282 return memory->deviceBlockSize(); 283} 284 285Tick 286PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt) 287{ 288 memory->doFunctionalAccess(pkt); 289 return memory->calculateLatency(pkt); 290} 291 292void 293PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt) 294{ 295 //Since we are overriding the function, make sure to have the impl of the 296 //check or functional accesses here. 297 std::list<std::pair<Tick,PacketPtr> >::iterator i = transmitList.begin(); 298 std::list<std::pair<Tick,PacketPtr> >::iterator end = transmitList.end(); 299 bool notDone = true; 300 301 while (i != end && notDone) { 302 PacketPtr target = i->second; 303 // If the target contains data, and it overlaps the 304 // probed request, need to update data 305 if (target->intersect(pkt)) 306 notDone = fixPacket(pkt, target); 307 i++; 308 } 309 310 // Default implementation of SimpleTimingPort::recvFunctional() 311 // calls recvAtomic() and throws away the latency; we can save a 312 // little here by just not calculating the latency. 313 memory->doFunctionalAccess(pkt); 314} 315 316unsigned int 317PhysicalMemory::drain(Event *de) 318{ 319 int count = port->drain(de); 320 if (count) 321 changeState(Draining); 322 else 323 changeState(Drained); 324 return count; 325} 326 327void 328PhysicalMemory::serialize(ostream &os) 329{ 330 gzFile compressedMem; 331 string filename = name() + ".physmem"; 332 333 SERIALIZE_SCALAR(filename); 334 335 // write memory file 336 string thefile = Checkpoint::dir() + "/" + filename.c_str(); 337 int fd = creat(thefile.c_str(), 0664); 338 if (fd < 0) { 339 perror("creat"); 340 fatal("Can't open physical memory checkpoint file '%s'\n", filename); 341 } 342 343 compressedMem = gzdopen(fd, "wb"); 344 if (compressedMem == NULL) 345 fatal("Insufficient memory to allocate compression state for %s\n", 346 filename); 347 348 if (gzwrite(compressedMem, pmemAddr, params()->addrRange.size()) != params()->addrRange.size()) { 349 fatal("Write failed on physical memory checkpoint file '%s'\n", 350 filename); 351 } 352 353 if (gzclose(compressedMem)) 354 fatal("Close failed on physical memory checkpoint file '%s'\n", 355 filename); 356} 357 358void 359PhysicalMemory::unserialize(Checkpoint *cp, const string §ion) 360{ 361 gzFile compressedMem; 362 long *tempPage; 363 long *pmem_current; 364 uint64_t curSize; 365 uint32_t bytesRead; 366 const int chunkSize = 16384; 367 368 369 string filename; 370 371 UNSERIALIZE_SCALAR(filename); 372 373 filename = cp->cptDir + "/" + filename; 374 375 // mmap memoryfile 376 int fd = open(filename.c_str(), O_RDONLY); 377 if (fd < 0) { 378 perror("open"); 379 fatal("Can't open physical memory checkpoint file '%s'", filename); 380 } 381 382 compressedMem = gzdopen(fd, "rb"); 383 if (compressedMem == NULL) 384 fatal("Insufficient memory to allocate compression state for %s\n", 385 filename); 386 387 // unmap file that was mmaped in the constructor 388 // This is done here to make sure that gzip and open don't muck with our 389 // nice large space of memory before we reallocate it 390 munmap(pmemAddr, params()->addrRange.size()); 391 392 pmemAddr = (uint8_t *)mmap(NULL, params()->addrRange.size(), PROT_READ | PROT_WRITE, 393 MAP_ANON | MAP_PRIVATE, -1, 0); 394 395 if (pmemAddr == (void *)MAP_FAILED) { 396 perror("mmap"); 397 fatal("Could not mmap physical memory!\n"); 398 } 399 400 curSize = 0; 401 tempPage = (long*)malloc(chunkSize); 402 if (tempPage == NULL) 403 fatal("Unable to malloc memory to read file %s\n", filename); 404 405 /* Only copy bytes that are non-zero, so we don't give the VM system hell */ 406 while (curSize < params()->addrRange.size()) { 407 bytesRead = gzread(compressedMem, tempPage, chunkSize); 408 if (bytesRead != chunkSize && bytesRead != params()->addrRange.size() - curSize) 409 fatal("Read failed on physical memory checkpoint file '%s'" 410 " got %d bytes, expected %d or %d bytes\n", 411 filename, bytesRead, chunkSize, params()->addrRange.size()-curSize); 412 413 assert(bytesRead % sizeof(long) == 0); 414 415 for (int x = 0; x < bytesRead/sizeof(long); x++) 416 { 417 if (*(tempPage+x) != 0) { 418 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long)); 419 *pmem_current = *(tempPage+x); 420 } 421 } 422 curSize += bytesRead; 423 } 424 425 free(tempPage); 426 427 if (gzclose(compressedMem)) 428 fatal("Close failed on physical memory checkpoint file '%s'\n", 429 filename); 430 431} 432 433 434BEGIN_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory) 435 436 Param<string> file; 437 Param<Range<Addr> > range; 438 Param<Tick> latency; 439 Param<bool> zero; 440 441END_DECLARE_SIM_OBJECT_PARAMS(PhysicalMemory) 442 443BEGIN_INIT_SIM_OBJECT_PARAMS(PhysicalMemory) 444 445 INIT_PARAM_DFLT(file, "memory mapped file", ""), 446 INIT_PARAM(range, "Device Address Range"), 447 INIT_PARAM(latency, "Memory access latency"), 448 INIT_PARAM(zero, "Zero initialize memory") 449 450END_INIT_SIM_OBJECT_PARAMS(PhysicalMemory) 451 452CREATE_SIM_OBJECT(PhysicalMemory) 453{ 454 PhysicalMemory::Params *p = new PhysicalMemory::Params; 455 p->name = getInstanceName(); 456 p->addrRange = range; 457 p->latency = latency; 458 p->zero = zero; 459 return new PhysicalMemory(p); 460} 461 462REGISTER_SIM_OBJECT("PhysicalMemory", PhysicalMemory) 463