physical.cc revision 5222:bb733a878f85
16691Stjones1@inf.ed.ac.uk/* 26691Stjones1@inf.ed.ac.uk * Copyright (c) 2001-2005 The Regents of The University of Michigan 36691Stjones1@inf.ed.ac.uk * All rights reserved. 46691Stjones1@inf.ed.ac.uk * 56691Stjones1@inf.ed.ac.uk * Redistribution and use in source and binary forms, with or without 66691Stjones1@inf.ed.ac.uk * modification, are permitted provided that the following conditions are 76691Stjones1@inf.ed.ac.uk * met: redistributions of source code must retain the above copyright 86691Stjones1@inf.ed.ac.uk * notice, this list of conditions and the following disclaimer; 96691Stjones1@inf.ed.ac.uk * redistributions in binary form must reproduce the above copyright 106691Stjones1@inf.ed.ac.uk * notice, this list of conditions and the following disclaimer in the 116691Stjones1@inf.ed.ac.uk * documentation and/or other materials provided with the distribution; 126691Stjones1@inf.ed.ac.uk * neither the name of the copyright holders nor the names of its 136691Stjones1@inf.ed.ac.uk * contributors may be used to endorse or promote products derived from 146691Stjones1@inf.ed.ac.uk * this software without specific prior written permission. 156691Stjones1@inf.ed.ac.uk * 166691Stjones1@inf.ed.ac.uk * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 176691Stjones1@inf.ed.ac.uk * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 186691Stjones1@inf.ed.ac.uk * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 196691Stjones1@inf.ed.ac.uk * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 206691Stjones1@inf.ed.ac.uk * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 216691Stjones1@inf.ed.ac.uk * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 226691Stjones1@inf.ed.ac.uk * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236691Stjones1@inf.ed.ac.uk * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246691Stjones1@inf.ed.ac.uk * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256691Stjones1@inf.ed.ac.uk * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 266691Stjones1@inf.ed.ac.uk * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276691Stjones1@inf.ed.ac.uk * 286691Stjones1@inf.ed.ac.uk * Authors: Ron Dreslinski 296691Stjones1@inf.ed.ac.uk * Ali Saidi 306691Stjones1@inf.ed.ac.uk */ 316691Stjones1@inf.ed.ac.uk 326691Stjones1@inf.ed.ac.uk#include <sys/types.h> 3311793Sbrandon.potter@amd.com#include <sys/mman.h> 3411793Sbrandon.potter@amd.com#include <errno.h> 356691Stjones1@inf.ed.ac.uk#include <fcntl.h> 366691Stjones1@inf.ed.ac.uk#include <unistd.h> 376691Stjones1@inf.ed.ac.uk#include <zlib.h> 386691Stjones1@inf.ed.ac.uk 3912334Sgabeblack@google.com#include <iostream> 406691Stjones1@inf.ed.ac.uk#include <string> 418232Snate@binkert.org 426691Stjones1@inf.ed.ac.uk#include "arch/isa_traits.hh" 4312431Sgabeblack@google.com#include "base/misc.hh" 4411854Sbrandon.potter@amd.com#include "config/full_system.hh" 456691Stjones1@inf.ed.ac.uk#include "mem/packet_access.hh" 4611800Sbrandon.potter@amd.com#include "mem/physical.hh" 476691Stjones1@inf.ed.ac.uk#include "sim/eventq.hh" 486691Stjones1@inf.ed.ac.uk#include "sim/host.hh" 496691Stjones1@inf.ed.ac.uk 506691Stjones1@inf.ed.ac.ukusing namespace std; 516691Stjones1@inf.ed.ac.ukusing namespace TheISA; 5211851Sbrandon.potter@amd.com 5312432Sgabeblack@google.comPhysicalMemory::PhysicalMemory(const Params *p) 5412432Sgabeblack@google.com : MemObject(p), pmemAddr(NULL), lat(p->latency) 556691Stjones1@inf.ed.ac.uk{ 5612431Sgabeblack@google.com if (params()->range.size() % TheISA::PageBytes != 0) 5711905SBrandon.Potter@amd.com panic("Memory Size not divisible by page size\n"); 5811905SBrandon.Potter@amd.com 5911905SBrandon.Potter@amd.com int map_flags = MAP_ANON | MAP_PRIVATE; 6011905SBrandon.Potter@amd.com pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 6111905SBrandon.Potter@amd.com PROT_READ | PROT_WRITE, map_flags, -1, 0); 6211905SBrandon.Potter@amd.com 6311905SBrandon.Potter@amd.com if (pmemAddr == (void *)MAP_FAILED) { 6411905SBrandon.Potter@amd.com perror("mmap"); 656691Stjones1@inf.ed.ac.uk fatal("Could not mmap!\n"); 666691Stjones1@inf.ed.ac.uk } 6711905SBrandon.Potter@amd.com 686691Stjones1@inf.ed.ac.uk //If requested, initialize all the memory to 0 696691Stjones1@inf.ed.ac.uk if (p->zero) 7011905SBrandon.Potter@amd.com memset(pmemAddr, 0, p->range.size()); 7111905SBrandon.Potter@amd.com 7211905SBrandon.Potter@amd.com pagePtr = 0; 7311905SBrandon.Potter@amd.com} 746691Stjones1@inf.ed.ac.uk 756691Stjones1@inf.ed.ac.ukvoid 766691Stjones1@inf.ed.ac.ukPhysicalMemory::init() 7711851Sbrandon.potter@amd.com{ 786691Stjones1@inf.ed.ac.uk if (ports.size() == 0) { 797532Ssteve.reinhardt@amd.com fatal("PhysicalMemory object %s is unconnected!", name()); 807532Ssteve.reinhardt@amd.com } 8110318Sandreas.hansson@arm.com 826691Stjones1@inf.ed.ac.uk for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 836691Stjones1@inf.ed.ac.uk if (*pi) 846691Stjones1@inf.ed.ac.uk (*pi)->sendStatusChange(Port::RangeChange); 8511851Sbrandon.potter@amd.com } 866691Stjones1@inf.ed.ac.uk} 876691Stjones1@inf.ed.ac.uk 886691Stjones1@inf.ed.ac.ukPhysicalMemory::~PhysicalMemory() 896691Stjones1@inf.ed.ac.uk{ 906691Stjones1@inf.ed.ac.uk if (pmemAddr) 916691Stjones1@inf.ed.ac.uk munmap((char*)pmemAddr, params()->range.size()); 926691Stjones1@inf.ed.ac.uk //Remove memPorts? 936691Stjones1@inf.ed.ac.uk} 946691Stjones1@inf.ed.ac.uk 956691Stjones1@inf.ed.ac.ukAddr 966691Stjones1@inf.ed.ac.ukPhysicalMemory::new_page() 976691Stjones1@inf.ed.ac.uk{ 986691Stjones1@inf.ed.ac.uk Addr return_addr = pagePtr << LogVMPageSize; 9911389Sbrandon.potter@amd.com return_addr += start(); 10011389Sbrandon.potter@amd.com 10111389Sbrandon.potter@amd.com ++pagePtr; 1026691Stjones1@inf.ed.ac.uk return return_addr; 1036691Stjones1@inf.ed.ac.uk} 1046691Stjones1@inf.ed.ac.uk 1056691Stjones1@inf.ed.ac.ukint 1066691Stjones1@inf.ed.ac.ukPhysicalMemory::deviceBlockSize() 1076691Stjones1@inf.ed.ac.uk{ 1086691Stjones1@inf.ed.ac.uk //Can accept anysize request 1096691Stjones1@inf.ed.ac.uk return 0; 1106691Stjones1@inf.ed.ac.uk} 1116691Stjones1@inf.ed.ac.uk 1126691Stjones1@inf.ed.ac.ukTick 1136691Stjones1@inf.ed.ac.ukPhysicalMemory::calculateLatency(PacketPtr pkt) 1146691Stjones1@inf.ed.ac.uk{ 11510318Sandreas.hansson@arm.com return lat; 1166691Stjones1@inf.ed.ac.uk} 1176691Stjones1@inf.ed.ac.uk 1186691Stjones1@inf.ed.ac.uk 1196691Stjones1@inf.ed.ac.uk 1206691Stjones1@inf.ed.ac.uk// Add load-locked to tracking list. Should only be called if the 1216691Stjones1@inf.ed.ac.uk// operation is a load and the LOCKED flag is set. 1226691Stjones1@inf.ed.ac.ukvoid 1236691Stjones1@inf.ed.ac.ukPhysicalMemory::trackLoadLocked(PacketPtr pkt) 1246691Stjones1@inf.ed.ac.uk{ 12511389Sbrandon.potter@amd.com Request *req = pkt->req; 12611389Sbrandon.potter@amd.com Addr paddr = LockedAddr::mask(req->getPaddr()); 12711389Sbrandon.potter@amd.com 12811389Sbrandon.potter@amd.com // first we check if we already have a locked addr for this 1296691Stjones1@inf.ed.ac.uk // xc. Since each xc only gets one, we just update the 1306691Stjones1@inf.ed.ac.uk // existing record with the new address. 1316691Stjones1@inf.ed.ac.uk list<LockedAddr>::iterator i; 1326691Stjones1@inf.ed.ac.uk 1336691Stjones1@inf.ed.ac.uk for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) { 1346691Stjones1@inf.ed.ac.uk if (i->matchesContext(req)) { 1356691Stjones1@inf.ed.ac.uk DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n", 1366691Stjones1@inf.ed.ac.uk req->getCpuNum(), req->getThreadNum(), paddr); 1376691Stjones1@inf.ed.ac.uk i->addr = paddr; 1386691Stjones1@inf.ed.ac.uk return; 1396691Stjones1@inf.ed.ac.uk } 1406691Stjones1@inf.ed.ac.uk } 1416691Stjones1@inf.ed.ac.uk 1426691Stjones1@inf.ed.ac.uk // no record for this xc: need to allocate a new one 1436691Stjones1@inf.ed.ac.uk DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n", 1446691Stjones1@inf.ed.ac.uk req->getCpuNum(), req->getThreadNum(), paddr); 1456691Stjones1@inf.ed.ac.uk lockedAddrList.push_front(LockedAddr(req)); 1466691Stjones1@inf.ed.ac.uk} 1476691Stjones1@inf.ed.ac.uk 1486691Stjones1@inf.ed.ac.uk 1496691Stjones1@inf.ed.ac.uk// Called on *writes* only... both regular stores and 1506691Stjones1@inf.ed.ac.uk// store-conditional operations. Check for conventional stores which 1516691Stjones1@inf.ed.ac.uk// conflict with locked addresses, and for success/failure of store 1526691Stjones1@inf.ed.ac.uk// conditionals. 1536691Stjones1@inf.ed.ac.ukbool 1546691Stjones1@inf.ed.ac.ukPhysicalMemory::checkLockedAddrList(PacketPtr pkt) 1556691Stjones1@inf.ed.ac.uk{ 1566691Stjones1@inf.ed.ac.uk Request *req = pkt->req; 1576691Stjones1@inf.ed.ac.uk Addr paddr = LockedAddr::mask(req->getPaddr()); 1586691Stjones1@inf.ed.ac.uk bool isLocked = pkt->isLocked(); 1596691Stjones1@inf.ed.ac.uk 1606691Stjones1@inf.ed.ac.uk // Initialize return value. Non-conditional stores always 1616691Stjones1@inf.ed.ac.uk // succeed. Assume conditional stores will fail until proven 1626691Stjones1@inf.ed.ac.uk // otherwise. 1636691Stjones1@inf.ed.ac.uk bool success = !isLocked; 1646691Stjones1@inf.ed.ac.uk 1656691Stjones1@inf.ed.ac.uk // Iterate over list. Note that there could be multiple matching 1666691Stjones1@inf.ed.ac.uk // records, as more than one context could have done a load locked 1676691Stjones1@inf.ed.ac.uk // to this location. 1686691Stjones1@inf.ed.ac.uk list<LockedAddr>::iterator i = lockedAddrList.begin(); 1696691Stjones1@inf.ed.ac.uk 1706691Stjones1@inf.ed.ac.uk while (i != lockedAddrList.end()) { 1716691Stjones1@inf.ed.ac.uk 1726691Stjones1@inf.ed.ac.uk if (i->addr == paddr) { 1736691Stjones1@inf.ed.ac.uk // we have a matching address 1746691Stjones1@inf.ed.ac.uk 1756691Stjones1@inf.ed.ac.uk if (isLocked && i->matchesContext(req)) { 1766691Stjones1@inf.ed.ac.uk // it's a store conditional, and as far as the memory 1776691Stjones1@inf.ed.ac.uk // system can tell, the requesting context's lock is 1786691Stjones1@inf.ed.ac.uk // still valid. 1796691Stjones1@inf.ed.ac.uk DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n", 1806691Stjones1@inf.ed.ac.uk req->getCpuNum(), req->getThreadNum(), paddr); 1816691Stjones1@inf.ed.ac.uk success = true; 1826691Stjones1@inf.ed.ac.uk } 1836691Stjones1@inf.ed.ac.uk 1846691Stjones1@inf.ed.ac.uk // Get rid of our record of this lock and advance to next 1856691Stjones1@inf.ed.ac.uk DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n", 1866691Stjones1@inf.ed.ac.uk i->cpuNum, i->threadNum, paddr); 1876691Stjones1@inf.ed.ac.uk i = lockedAddrList.erase(i); 1886691Stjones1@inf.ed.ac.uk } 1896691Stjones1@inf.ed.ac.uk else { 1906691Stjones1@inf.ed.ac.uk // no match: advance to next record 1916691Stjones1@inf.ed.ac.uk ++i; 1926691Stjones1@inf.ed.ac.uk } 1936691Stjones1@inf.ed.ac.uk } 1946691Stjones1@inf.ed.ac.uk 1956691Stjones1@inf.ed.ac.uk if (isLocked) { 1966691Stjones1@inf.ed.ac.uk req->setExtraData(success ? 1 : 0); 19711905SBrandon.Potter@amd.com } 19811905SBrandon.Potter@amd.com 19911905SBrandon.Potter@amd.com return success; 20011905SBrandon.Potter@amd.com} 2016691Stjones1@inf.ed.ac.uk 2026691Stjones1@inf.ed.ac.uk 20311905SBrandon.Potter@amd.com#if TRACING_ON 20411905SBrandon.Potter@amd.com 2056691Stjones1@inf.ed.ac.uk#define CASE(A, T) \ 2066691Stjones1@inf.ed.ac.uk case sizeof(T): \ 20711905SBrandon.Potter@amd.com DPRINTF(MemoryAccess, A " of size %i on address 0x%x data 0x%x\n", \ 2086691Stjones1@inf.ed.ac.uk pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \ 2096691Stjones1@inf.ed.ac.uk break 2106691Stjones1@inf.ed.ac.uk 2116691Stjones1@inf.ed.ac.uk 2126691Stjones1@inf.ed.ac.uk#define TRACE_PACKET(A) \ 2136691Stjones1@inf.ed.ac.uk do { \ 2146691Stjones1@inf.ed.ac.uk switch (pkt->getSize()) { \ 2156691Stjones1@inf.ed.ac.uk CASE(A, uint64_t); \ 2166691Stjones1@inf.ed.ac.uk CASE(A, uint32_t); \ 2176691Stjones1@inf.ed.ac.uk CASE(A, uint16_t); \ 2186691Stjones1@inf.ed.ac.uk CASE(A, uint8_t); \ 2196691Stjones1@inf.ed.ac.uk default: \ 2206691Stjones1@inf.ed.ac.uk DPRINTF(MemoryAccess, A " of size %i on address 0x%x\n", \ 2216691Stjones1@inf.ed.ac.uk pkt->getSize(), pkt->getAddr()); \ 2226691Stjones1@inf.ed.ac.uk } \ 2236691Stjones1@inf.ed.ac.uk } while (0) 2246691Stjones1@inf.ed.ac.uk 2256691Stjones1@inf.ed.ac.uk#else 22611905SBrandon.Potter@amd.com 2276691Stjones1@inf.ed.ac.uk#define TRACE_PACKET(A) 2286691Stjones1@inf.ed.ac.uk 2296691Stjones1@inf.ed.ac.uk#endif 2306691Stjones1@inf.ed.ac.uk 2316691Stjones1@inf.ed.ac.ukTick 2326691Stjones1@inf.ed.ac.ukPhysicalMemory::doAtomicAccess(PacketPtr pkt) 2336691Stjones1@inf.ed.ac.uk{ 2346691Stjones1@inf.ed.ac.uk assert(pkt->getAddr() >= start() && 2356691Stjones1@inf.ed.ac.uk pkt->getAddr() + pkt->getSize() <= start() + size()); 2368852Sandreas.hansson@arm.com 2376691Stjones1@inf.ed.ac.uk if (pkt->memInhibitAsserted()) { 2386691Stjones1@inf.ed.ac.uk DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n", 2396691Stjones1@inf.ed.ac.uk pkt->getAddr()); 2406691Stjones1@inf.ed.ac.uk return 0; 2416691Stjones1@inf.ed.ac.uk } 2426691Stjones1@inf.ed.ac.uk 2438852Sandreas.hansson@arm.com uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 2446691Stjones1@inf.ed.ac.uk 2456691Stjones1@inf.ed.ac.uk if (pkt->cmd == MemCmd::SwapReq) { 2468852Sandreas.hansson@arm.com IntReg overwrite_val; 2476691Stjones1@inf.ed.ac.uk bool overwrite_mem; 2486691Stjones1@inf.ed.ac.uk uint64_t condition_val64; 2496691Stjones1@inf.ed.ac.uk uint32_t condition_val32; 2506691Stjones1@inf.ed.ac.uk 2516691Stjones1@inf.ed.ac.uk assert(sizeof(IntReg) >= pkt->getSize()); 2526691Stjones1@inf.ed.ac.uk 2538852Sandreas.hansson@arm.com overwrite_mem = true; 2546691Stjones1@inf.ed.ac.uk // keep a copy of our possible write value, and copy what is at the 2558852Sandreas.hansson@arm.com // memory address into the packet 2566691Stjones1@inf.ed.ac.uk std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize()); 2576691Stjones1@inf.ed.ac.uk std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 2586691Stjones1@inf.ed.ac.uk 2596691Stjones1@inf.ed.ac.uk if (pkt->req->isCondSwap()) { 2608852Sandreas.hansson@arm.com if (pkt->getSize() == sizeof(uint64_t)) { 2616691Stjones1@inf.ed.ac.uk condition_val64 = pkt->req->getExtraData(); 2626691Stjones1@inf.ed.ac.uk overwrite_mem = !std::memcmp(&condition_val64, hostAddr, 2636691Stjones1@inf.ed.ac.uk sizeof(uint64_t)); 2646691Stjones1@inf.ed.ac.uk } else if (pkt->getSize() == sizeof(uint32_t)) { 2656691Stjones1@inf.ed.ac.uk condition_val32 = (uint32_t)pkt->req->getExtraData(); 2668852Sandreas.hansson@arm.com overwrite_mem = !std::memcmp(&condition_val32, hostAddr, 2676691Stjones1@inf.ed.ac.uk sizeof(uint32_t)); 2686691Stjones1@inf.ed.ac.uk } else 2696691Stjones1@inf.ed.ac.uk panic("Invalid size for conditional read/write\n"); 2706691Stjones1@inf.ed.ac.uk } 27111905SBrandon.Potter@amd.com 2726691Stjones1@inf.ed.ac.uk if (overwrite_mem) 27311389Sbrandon.potter@amd.com std::memcpy(hostAddr, &overwrite_val, pkt->getSize()); 2746691Stjones1@inf.ed.ac.uk 2756691Stjones1@inf.ed.ac.uk TRACE_PACKET("Read/Write"); 27611905SBrandon.Potter@amd.com } else if (pkt->isRead()) { 2776691Stjones1@inf.ed.ac.uk assert(!pkt->isWrite()); 2786691Stjones1@inf.ed.ac.uk if (pkt->isLocked()) { 2796691Stjones1@inf.ed.ac.uk trackLoadLocked(pkt); 28011851Sbrandon.potter@amd.com } 2816691Stjones1@inf.ed.ac.uk memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 2826691Stjones1@inf.ed.ac.uk TRACE_PACKET("Read"); 2836701Sgblack@eecs.umich.edu } else if (pkt->isWrite()) { 2846691Stjones1@inf.ed.ac.uk if (writeOK(pkt)) { 2856691Stjones1@inf.ed.ac.uk memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 2866691Stjones1@inf.ed.ac.uk TRACE_PACKET("Write"); 28711851Sbrandon.potter@amd.com } 2886691Stjones1@inf.ed.ac.uk } else if (pkt->isInvalidate()) { 2896691Stjones1@inf.ed.ac.uk //upgrade or invalidate 2906691Stjones1@inf.ed.ac.uk if (pkt->needsResponse()) { 2916691Stjones1@inf.ed.ac.uk pkt->makeAtomicResponse(); 2926691Stjones1@inf.ed.ac.uk } 2936691Stjones1@inf.ed.ac.uk } else { 29411851Sbrandon.potter@amd.com panic("unimplemented"); 2956691Stjones1@inf.ed.ac.uk } 2967512Stjones1@inf.ed.ac.uk 29710223Ssteve.reinhardt@amd.com if (pkt->needsResponse()) { 2987512Stjones1@inf.ed.ac.uk pkt->makeAtomicResponse(); 2997512Stjones1@inf.ed.ac.uk } 3007512Stjones1@inf.ed.ac.uk return calculateLatency(pkt); 3017512Stjones1@inf.ed.ac.uk} 3027512Stjones1@inf.ed.ac.uk 30310223Ssteve.reinhardt@amd.com 3046691Stjones1@inf.ed.ac.ukvoid 305PhysicalMemory::doFunctionalAccess(PacketPtr pkt) 306{ 307 warn("addr %#x >= %#x AND %#x <= %#x", 308 pkt->getAddr(), start(), pkt->getAddr() + pkt->getSize(), start() + size()); 309 310 assert(pkt->getAddr() >= start() && 311 pkt->getAddr() + pkt->getSize() <= start() + size()); 312 313 314 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 315 316 if (pkt->cmd == MemCmd::ReadReq) { 317 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 318 TRACE_PACKET("Read"); 319 } else if (pkt->cmd == MemCmd::WriteReq) { 320 memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 321 TRACE_PACKET("Write"); 322 } else { 323 panic("PhysicalMemory: unimplemented functional command %s", 324 pkt->cmdString()); 325 } 326 327 pkt->makeAtomicResponse(); 328} 329 330 331Port * 332PhysicalMemory::getPort(const std::string &if_name, int idx) 333{ 334 // Accept request for "functional" port for backwards compatibility 335 // with places where this function is called from C++. I'd prefer 336 // to move all these into Python someday. 337 if (if_name == "functional") { 338 return new MemoryPort(csprintf("%s-functional", name()), this); 339 } 340 341 if (if_name != "port") { 342 panic("PhysicalMemory::getPort: unknown port %s requested", if_name); 343 } 344 345 if (idx >= ports.size()) { 346 ports.resize(idx+1); 347 } 348 349 if (ports[idx] != NULL) { 350 panic("PhysicalMemory::getPort: port %d already assigned", idx); 351 } 352 353 MemoryPort *port = 354 new MemoryPort(csprintf("%s-port%d", name(), idx), this); 355 356 ports[idx] = port; 357 return port; 358} 359 360 361void 362PhysicalMemory::recvStatusChange(Port::Status status) 363{ 364} 365 366PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name, 367 PhysicalMemory *_memory) 368 : SimpleTimingPort(_name), memory(_memory) 369{ } 370 371void 372PhysicalMemory::MemoryPort::recvStatusChange(Port::Status status) 373{ 374 memory->recvStatusChange(status); 375} 376 377void 378PhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp, 379 bool &snoop) 380{ 381 memory->getAddressRanges(resp, snoop); 382} 383 384void 385PhysicalMemory::getAddressRanges(AddrRangeList &resp, bool &snoop) 386{ 387 snoop = false; 388 resp.clear(); 389 resp.push_back(RangeSize(start(), params()->range.size())); 390} 391 392int 393PhysicalMemory::MemoryPort::deviceBlockSize() 394{ 395 return memory->deviceBlockSize(); 396} 397 398Tick 399PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt) 400{ 401 return memory->doAtomicAccess(pkt); 402} 403 404void 405PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt) 406{ 407 if (!checkFunctional(pkt)) { 408 // Default implementation of SimpleTimingPort::recvFunctional() 409 // calls recvAtomic() and throws away the latency; we can save a 410 // little here by just not calculating the latency. 411 memory->doFunctionalAccess(pkt); 412 } 413} 414 415unsigned int 416PhysicalMemory::drain(Event *de) 417{ 418 int count = 0; 419 for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 420 count += (*pi)->drain(de); 421 } 422 423 if (count) 424 changeState(Draining); 425 else 426 changeState(Drained); 427 return count; 428} 429 430void 431PhysicalMemory::serialize(ostream &os) 432{ 433 gzFile compressedMem; 434 string filename = name() + ".physmem"; 435 436 SERIALIZE_SCALAR(filename); 437 438 // write memory file 439 string thefile = Checkpoint::dir() + "/" + filename.c_str(); 440 int fd = creat(thefile.c_str(), 0664); 441 if (fd < 0) { 442 perror("creat"); 443 fatal("Can't open physical memory checkpoint file '%s'\n", filename); 444 } 445 446 compressedMem = gzdopen(fd, "wb"); 447 if (compressedMem == NULL) 448 fatal("Insufficient memory to allocate compression state for %s\n", 449 filename); 450 451 if (gzwrite(compressedMem, pmemAddr, params()->range.size()) != 452 params()->range.size()) { 453 fatal("Write failed on physical memory checkpoint file '%s'\n", 454 filename); 455 } 456 457 if (gzclose(compressedMem)) 458 fatal("Close failed on physical memory checkpoint file '%s'\n", 459 filename); 460} 461 462void 463PhysicalMemory::unserialize(Checkpoint *cp, const string §ion) 464{ 465 gzFile compressedMem; 466 long *tempPage; 467 long *pmem_current; 468 uint64_t curSize; 469 uint32_t bytesRead; 470 const int chunkSize = 16384; 471 472 473 string filename; 474 475 UNSERIALIZE_SCALAR(filename); 476 477 filename = cp->cptDir + "/" + filename; 478 479 // mmap memoryfile 480 int fd = open(filename.c_str(), O_RDONLY); 481 if (fd < 0) { 482 perror("open"); 483 fatal("Can't open physical memory checkpoint file '%s'", filename); 484 } 485 486 compressedMem = gzdopen(fd, "rb"); 487 if (compressedMem == NULL) 488 fatal("Insufficient memory to allocate compression state for %s\n", 489 filename); 490 491 // unmap file that was mmaped in the constructor 492 // This is done here to make sure that gzip and open don't muck with our 493 // nice large space of memory before we reallocate it 494 munmap((char*)pmemAddr, params()->range.size()); 495 496 pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 497 PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); 498 499 if (pmemAddr == (void *)MAP_FAILED) { 500 perror("mmap"); 501 fatal("Could not mmap physical memory!\n"); 502 } 503 504 curSize = 0; 505 tempPage = (long*)malloc(chunkSize); 506 if (tempPage == NULL) 507 fatal("Unable to malloc memory to read file %s\n", filename); 508 509 /* Only copy bytes that are non-zero, so we don't give the VM system hell */ 510 while (curSize < params()->range.size()) { 511 bytesRead = gzread(compressedMem, tempPage, chunkSize); 512 if (bytesRead != chunkSize && 513 bytesRead != params()->range.size() - curSize) 514 fatal("Read failed on physical memory checkpoint file '%s'" 515 " got %d bytes, expected %d or %d bytes\n", 516 filename, bytesRead, chunkSize, 517 params()->range.size() - curSize); 518 519 assert(bytesRead % sizeof(long) == 0); 520 521 for (int x = 0; x < bytesRead/sizeof(long); x++) 522 { 523 if (*(tempPage+x) != 0) { 524 pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long)); 525 *pmem_current = *(tempPage+x); 526 } 527 } 528 curSize += bytesRead; 529 } 530 531 free(tempPage); 532 533 if (gzclose(compressedMem)) 534 fatal("Close failed on physical memory checkpoint file '%s'\n", 535 filename); 536 537} 538 539PhysicalMemory * 540PhysicalMemoryParams::create() 541{ 542 return new PhysicalMemory(this); 543} 544