physical.cc revision 5314
12391SN/A/* 22391SN/A * Copyright (c) 2001-2005 The Regents of The University of Michigan 32391SN/A * All rights reserved. 42391SN/A * 52391SN/A * Redistribution and use in source and binary forms, with or without 62391SN/A * modification, are permitted provided that the following conditions are 72391SN/A * met: redistributions of source code must retain the above copyright 82391SN/A * notice, this list of conditions and the following disclaimer; 92391SN/A * redistributions in binary form must reproduce the above copyright 102391SN/A * notice, this list of conditions and the following disclaimer in the 112391SN/A * documentation and/or other materials provided with the distribution; 122391SN/A * neither the name of the copyright holders nor the names of its 132391SN/A * contributors may be used to endorse or promote products derived from 142391SN/A * this software without specific prior written permission. 152391SN/A * 162391SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172391SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182391SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192391SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202391SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212391SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222391SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232391SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242391SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252391SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262391SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Ron Dreslinski 292914Ssaidi@eecs.umich.edu * Ali Saidi 302391SN/A */ 312391SN/A 322391SN/A#include <sys/types.h> 332391SN/A#include <sys/mman.h> 342391SN/A#include <errno.h> 352391SN/A#include <fcntl.h> 362391SN/A#include <unistd.h> 372391SN/A#include <zlib.h> 382391SN/A 392391SN/A#include <iostream> 402391SN/A#include <string> 412391SN/A 423348Sbinkertn@umich.edu#include "arch/isa_traits.hh" 432391SN/A#include "base/misc.hh" 442391SN/A#include "config/full_system.hh" 453879Ssaidi@eecs.umich.edu#include "mem/packet_access.hh" 462394SN/A#include "mem/physical.hh" 472415SN/A#include "sim/eventq.hh" 483348Sbinkertn@umich.edu#include "sim/host.hh" 492394SN/A 502391SN/Ausing namespace std; 512423SN/Ausing namespace TheISA; 522391SN/A 534762Snate@binkert.orgPhysicalMemory::PhysicalMemory(const Params *p) 544762Snate@binkert.org : MemObject(p), pmemAddr(NULL), lat(p->latency) 552391SN/A{ 564762Snate@binkert.org if (params()->range.size() % TheISA::PageBytes != 0) 572391SN/A panic("Memory Size not divisible by page size\n"); 582391SN/A 592391SN/A int map_flags = MAP_ANON | MAP_PRIVATE; 604918Snate@binkert.org pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 614918Snate@binkert.org PROT_READ | PROT_WRITE, map_flags, -1, 0); 622391SN/A 633012Ssaidi@eecs.umich.edu if (pmemAddr == (void *)MAP_FAILED) { 642391SN/A perror("mmap"); 652391SN/A fatal("Could not mmap!\n"); 662391SN/A } 672391SN/A 683751Sgblack@eecs.umich.edu //If requested, initialize all the memory to 0 694762Snate@binkert.org if (p->zero) 704762Snate@binkert.org memset(pmemAddr, 0, p->range.size()); 713751Sgblack@eecs.umich.edu 723012Ssaidi@eecs.umich.edu pagePtr = 0; 735275Ssaidi@eecs.umich.edu 745275Ssaidi@eecs.umich.edu cachedSize = params()->range.size(); 755275Ssaidi@eecs.umich.edu cachedStart = params()->range.start; 765275Ssaidi@eecs.umich.edu 772391SN/A} 782391SN/A 792541SN/Avoid 802541SN/APhysicalMemory::init() 812541SN/A{ 824470Sstever@eecs.umich.edu if (ports.size() == 0) { 834470Sstever@eecs.umich.edu fatal("PhysicalMemory object %s is unconnected!", name()); 844470Sstever@eecs.umich.edu } 854470Sstever@eecs.umich.edu 864467Sstever@eecs.umich.edu for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 874467Sstever@eecs.umich.edu if (*pi) 884467Sstever@eecs.umich.edu (*pi)->sendStatusChange(Port::RangeChange); 894467Sstever@eecs.umich.edu } 902541SN/A} 912541SN/A 922391SN/APhysicalMemory::~PhysicalMemory() 932391SN/A{ 943012Ssaidi@eecs.umich.edu if (pmemAddr) 954762Snate@binkert.org munmap((char*)pmemAddr, params()->range.size()); 962416SN/A //Remove memPorts? 972391SN/A} 982391SN/A 992391SN/AAddr 1002391SN/APhysicalMemory::new_page() 1012391SN/A{ 1023012Ssaidi@eecs.umich.edu Addr return_addr = pagePtr << LogVMPageSize; 1034040Ssaidi@eecs.umich.edu return_addr += start(); 1042391SN/A 1053012Ssaidi@eecs.umich.edu ++pagePtr; 1062391SN/A return return_addr; 1072391SN/A} 1082391SN/A 1092408SN/Aint 1102408SN/APhysicalMemory::deviceBlockSize() 1112408SN/A{ 1122409SN/A //Can accept anysize request 1132409SN/A return 0; 1142408SN/A} 1152408SN/A 1163012Ssaidi@eecs.umich.eduTick 1173349Sbinkertn@umich.eduPhysicalMemory::calculateLatency(PacketPtr pkt) 1183012Ssaidi@eecs.umich.edu{ 1193012Ssaidi@eecs.umich.edu return lat; 1203012Ssaidi@eecs.umich.edu} 1212413SN/A 1223170Sstever@eecs.umich.edu 1233170Sstever@eecs.umich.edu 1243170Sstever@eecs.umich.edu// Add load-locked to tracking list. Should only be called if the 1253170Sstever@eecs.umich.edu// operation is a load and the LOCKED flag is set. 1263170Sstever@eecs.umich.eduvoid 1274626Sstever@eecs.umich.eduPhysicalMemory::trackLoadLocked(PacketPtr pkt) 1283170Sstever@eecs.umich.edu{ 1294626Sstever@eecs.umich.edu Request *req = pkt->req; 1303170Sstever@eecs.umich.edu Addr paddr = LockedAddr::mask(req->getPaddr()); 1313170Sstever@eecs.umich.edu 1323170Sstever@eecs.umich.edu // first we check if we already have a locked addr for this 1333170Sstever@eecs.umich.edu // xc. Since each xc only gets one, we just update the 1343170Sstever@eecs.umich.edu // existing record with the new address. 1353170Sstever@eecs.umich.edu list<LockedAddr>::iterator i; 1363170Sstever@eecs.umich.edu 1373170Sstever@eecs.umich.edu for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) { 1383170Sstever@eecs.umich.edu if (i->matchesContext(req)) { 1393170Sstever@eecs.umich.edu DPRINTF(LLSC, "Modifying lock record: cpu %d thread %d addr %#x\n", 1403170Sstever@eecs.umich.edu req->getCpuNum(), req->getThreadNum(), paddr); 1413170Sstever@eecs.umich.edu i->addr = paddr; 1423170Sstever@eecs.umich.edu return; 1433170Sstever@eecs.umich.edu } 1443170Sstever@eecs.umich.edu } 1453170Sstever@eecs.umich.edu 1463170Sstever@eecs.umich.edu // no record for this xc: need to allocate a new one 1473170Sstever@eecs.umich.edu DPRINTF(LLSC, "Adding lock record: cpu %d thread %d addr %#x\n", 1483170Sstever@eecs.umich.edu req->getCpuNum(), req->getThreadNum(), paddr); 1493170Sstever@eecs.umich.edu lockedAddrList.push_front(LockedAddr(req)); 1503170Sstever@eecs.umich.edu} 1513170Sstever@eecs.umich.edu 1523170Sstever@eecs.umich.edu 1533170Sstever@eecs.umich.edu// Called on *writes* only... both regular stores and 1543170Sstever@eecs.umich.edu// store-conditional operations. Check for conventional stores which 1553170Sstever@eecs.umich.edu// conflict with locked addresses, and for success/failure of store 1563170Sstever@eecs.umich.edu// conditionals. 1573170Sstever@eecs.umich.edubool 1584626Sstever@eecs.umich.eduPhysicalMemory::checkLockedAddrList(PacketPtr pkt) 1593170Sstever@eecs.umich.edu{ 1604626Sstever@eecs.umich.edu Request *req = pkt->req; 1613170Sstever@eecs.umich.edu Addr paddr = LockedAddr::mask(req->getPaddr()); 1624626Sstever@eecs.umich.edu bool isLocked = pkt->isLocked(); 1633170Sstever@eecs.umich.edu 1643170Sstever@eecs.umich.edu // Initialize return value. Non-conditional stores always 1653170Sstever@eecs.umich.edu // succeed. Assume conditional stores will fail until proven 1663170Sstever@eecs.umich.edu // otherwise. 1673170Sstever@eecs.umich.edu bool success = !isLocked; 1683170Sstever@eecs.umich.edu 1693170Sstever@eecs.umich.edu // Iterate over list. Note that there could be multiple matching 1703170Sstever@eecs.umich.edu // records, as more than one context could have done a load locked 1713170Sstever@eecs.umich.edu // to this location. 1723170Sstever@eecs.umich.edu list<LockedAddr>::iterator i = lockedAddrList.begin(); 1733170Sstever@eecs.umich.edu 1743170Sstever@eecs.umich.edu while (i != lockedAddrList.end()) { 1753170Sstever@eecs.umich.edu 1763170Sstever@eecs.umich.edu if (i->addr == paddr) { 1773170Sstever@eecs.umich.edu // we have a matching address 1783170Sstever@eecs.umich.edu 1793170Sstever@eecs.umich.edu if (isLocked && i->matchesContext(req)) { 1803170Sstever@eecs.umich.edu // it's a store conditional, and as far as the memory 1813170Sstever@eecs.umich.edu // system can tell, the requesting context's lock is 1823170Sstever@eecs.umich.edu // still valid. 1833170Sstever@eecs.umich.edu DPRINTF(LLSC, "StCond success: cpu %d thread %d addr %#x\n", 1843170Sstever@eecs.umich.edu req->getCpuNum(), req->getThreadNum(), paddr); 1853170Sstever@eecs.umich.edu success = true; 1863170Sstever@eecs.umich.edu } 1873170Sstever@eecs.umich.edu 1883170Sstever@eecs.umich.edu // Get rid of our record of this lock and advance to next 1893170Sstever@eecs.umich.edu DPRINTF(LLSC, "Erasing lock record: cpu %d thread %d addr %#x\n", 1903170Sstever@eecs.umich.edu i->cpuNum, i->threadNum, paddr); 1913170Sstever@eecs.umich.edu i = lockedAddrList.erase(i); 1923170Sstever@eecs.umich.edu } 1933170Sstever@eecs.umich.edu else { 1943170Sstever@eecs.umich.edu // no match: advance to next record 1953170Sstever@eecs.umich.edu ++i; 1963170Sstever@eecs.umich.edu } 1973170Sstever@eecs.umich.edu } 1983170Sstever@eecs.umich.edu 1993170Sstever@eecs.umich.edu if (isLocked) { 2004040Ssaidi@eecs.umich.edu req->setExtraData(success ? 1 : 0); 2013170Sstever@eecs.umich.edu } 2023170Sstever@eecs.umich.edu 2033170Sstever@eecs.umich.edu return success; 2043170Sstever@eecs.umich.edu} 2053170Sstever@eecs.umich.edu 2064626Sstever@eecs.umich.edu 2074626Sstever@eecs.umich.edu#if TRACING_ON 2084626Sstever@eecs.umich.edu 2094626Sstever@eecs.umich.edu#define CASE(A, T) \ 2104626Sstever@eecs.umich.edu case sizeof(T): \ 2114626Sstever@eecs.umich.edu DPRINTF(MemoryAccess, A " of size %i on address 0x%x data 0x%x\n", \ 2124626Sstever@eecs.umich.edu pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \ 2134626Sstever@eecs.umich.edu break 2144626Sstever@eecs.umich.edu 2154626Sstever@eecs.umich.edu 2164626Sstever@eecs.umich.edu#define TRACE_PACKET(A) \ 2174626Sstever@eecs.umich.edu do { \ 2184626Sstever@eecs.umich.edu switch (pkt->getSize()) { \ 2194626Sstever@eecs.umich.edu CASE(A, uint64_t); \ 2204626Sstever@eecs.umich.edu CASE(A, uint32_t); \ 2214626Sstever@eecs.umich.edu CASE(A, uint16_t); \ 2224626Sstever@eecs.umich.edu CASE(A, uint8_t); \ 2234626Sstever@eecs.umich.edu default: \ 2244626Sstever@eecs.umich.edu DPRINTF(MemoryAccess, A " of size %i on address 0x%x\n", \ 2254626Sstever@eecs.umich.edu pkt->getSize(), pkt->getAddr()); \ 2264626Sstever@eecs.umich.edu } \ 2274626Sstever@eecs.umich.edu } while (0) 2284626Sstever@eecs.umich.edu 2294626Sstever@eecs.umich.edu#else 2304626Sstever@eecs.umich.edu 2314626Sstever@eecs.umich.edu#define TRACE_PACKET(A) 2324626Sstever@eecs.umich.edu 2334626Sstever@eecs.umich.edu#endif 2344626Sstever@eecs.umich.edu 2354626Sstever@eecs.umich.eduTick 2364626Sstever@eecs.umich.eduPhysicalMemory::doAtomicAccess(PacketPtr pkt) 2372413SN/A{ 2384040Ssaidi@eecs.umich.edu assert(pkt->getAddr() >= start() && 2394040Ssaidi@eecs.umich.edu pkt->getAddr() + pkt->getSize() <= start() + size()); 2402414SN/A 2414626Sstever@eecs.umich.edu if (pkt->memInhibitAsserted()) { 2424626Sstever@eecs.umich.edu DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n", 2434626Sstever@eecs.umich.edu pkt->getAddr()); 2444626Sstever@eecs.umich.edu return 0; 2453175Srdreslin@umich.edu } 2464626Sstever@eecs.umich.edu 2474626Sstever@eecs.umich.edu uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 2484626Sstever@eecs.umich.edu 2494626Sstever@eecs.umich.edu if (pkt->cmd == MemCmd::SwapReq) { 2504040Ssaidi@eecs.umich.edu IntReg overwrite_val; 2514040Ssaidi@eecs.umich.edu bool overwrite_mem; 2524040Ssaidi@eecs.umich.edu uint64_t condition_val64; 2534040Ssaidi@eecs.umich.edu uint32_t condition_val32; 2544040Ssaidi@eecs.umich.edu 2554040Ssaidi@eecs.umich.edu assert(sizeof(IntReg) >= pkt->getSize()); 2564040Ssaidi@eecs.umich.edu 2574040Ssaidi@eecs.umich.edu overwrite_mem = true; 2584040Ssaidi@eecs.umich.edu // keep a copy of our possible write value, and copy what is at the 2594040Ssaidi@eecs.umich.edu // memory address into the packet 2604052Ssaidi@eecs.umich.edu std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize()); 2614626Sstever@eecs.umich.edu std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 2624040Ssaidi@eecs.umich.edu 2634040Ssaidi@eecs.umich.edu if (pkt->req->isCondSwap()) { 2644040Ssaidi@eecs.umich.edu if (pkt->getSize() == sizeof(uint64_t)) { 2654052Ssaidi@eecs.umich.edu condition_val64 = pkt->req->getExtraData(); 2664626Sstever@eecs.umich.edu overwrite_mem = !std::memcmp(&condition_val64, hostAddr, 2674626Sstever@eecs.umich.edu sizeof(uint64_t)); 2684040Ssaidi@eecs.umich.edu } else if (pkt->getSize() == sizeof(uint32_t)) { 2694052Ssaidi@eecs.umich.edu condition_val32 = (uint32_t)pkt->req->getExtraData(); 2704626Sstever@eecs.umich.edu overwrite_mem = !std::memcmp(&condition_val32, hostAddr, 2714626Sstever@eecs.umich.edu sizeof(uint32_t)); 2724040Ssaidi@eecs.umich.edu } else 2734040Ssaidi@eecs.umich.edu panic("Invalid size for conditional read/write\n"); 2744040Ssaidi@eecs.umich.edu } 2754040Ssaidi@eecs.umich.edu 2764040Ssaidi@eecs.umich.edu if (overwrite_mem) 2774626Sstever@eecs.umich.edu std::memcpy(hostAddr, &overwrite_val, pkt->getSize()); 2784040Ssaidi@eecs.umich.edu 2794626Sstever@eecs.umich.edu TRACE_PACKET("Read/Write"); 2804626Sstever@eecs.umich.edu } else if (pkt->isRead()) { 2814626Sstever@eecs.umich.edu assert(!pkt->isWrite()); 2824626Sstever@eecs.umich.edu if (pkt->isLocked()) { 2834626Sstever@eecs.umich.edu trackLoadLocked(pkt); 2844040Ssaidi@eecs.umich.edu } 2854626Sstever@eecs.umich.edu memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 2864626Sstever@eecs.umich.edu TRACE_PACKET("Read"); 2874626Sstever@eecs.umich.edu } else if (pkt->isWrite()) { 2884626Sstever@eecs.umich.edu if (writeOK(pkt)) { 2894626Sstever@eecs.umich.edu memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 2904626Sstever@eecs.umich.edu TRACE_PACKET("Write"); 2914626Sstever@eecs.umich.edu } 2924626Sstever@eecs.umich.edu } else if (pkt->isInvalidate()) { 2934626Sstever@eecs.umich.edu //upgrade or invalidate 2944626Sstever@eecs.umich.edu if (pkt->needsResponse()) { 2954626Sstever@eecs.umich.edu pkt->makeAtomicResponse(); 2964626Sstever@eecs.umich.edu } 2974040Ssaidi@eecs.umich.edu } else { 2982413SN/A panic("unimplemented"); 2992413SN/A } 3002420SN/A 3014626Sstever@eecs.umich.edu if (pkt->needsResponse()) { 3024626Sstever@eecs.umich.edu pkt->makeAtomicResponse(); 3034626Sstever@eecs.umich.edu } 3044626Sstever@eecs.umich.edu return calculateLatency(pkt); 3052413SN/A} 3062413SN/A 3074626Sstever@eecs.umich.edu 3084626Sstever@eecs.umich.eduvoid 3094626Sstever@eecs.umich.eduPhysicalMemory::doFunctionalAccess(PacketPtr pkt) 3104626Sstever@eecs.umich.edu{ 3114626Sstever@eecs.umich.edu assert(pkt->getAddr() >= start() && 3124626Sstever@eecs.umich.edu pkt->getAddr() + pkt->getSize() <= start() + size()); 3134626Sstever@eecs.umich.edu 3145222Sksewell@umich.edu 3154626Sstever@eecs.umich.edu uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 3164626Sstever@eecs.umich.edu 3175314Sstever@gmail.com if (pkt->isRead()) { 3184626Sstever@eecs.umich.edu memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 3194626Sstever@eecs.umich.edu TRACE_PACKET("Read"); 3205314Sstever@gmail.com pkt->makeAtomicResponse(); 3215314Sstever@gmail.com } else if (pkt->isWrite()) { 3224626Sstever@eecs.umich.edu memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 3234626Sstever@eecs.umich.edu TRACE_PACKET("Write"); 3245314Sstever@gmail.com pkt->makeAtomicResponse(); 3255314Sstever@gmail.com } else if (pkt->isPrint()) { 3265314Sstever@gmail.com Packet::PrintReqState *prs = dynamic_cast<Packet::PrintReqState*>(pkt->senderState); 3275314Sstever@gmail.com prs->printLabels(); 3285314Sstever@gmail.com ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr); 3294626Sstever@eecs.umich.edu } else { 3304626Sstever@eecs.umich.edu panic("PhysicalMemory: unimplemented functional command %s", 3314626Sstever@eecs.umich.edu pkt->cmdString()); 3324626Sstever@eecs.umich.edu } 3334490Sstever@eecs.umich.edu} 3344490Sstever@eecs.umich.edu 3354626Sstever@eecs.umich.edu 3362413SN/APort * 3372738Sstever@eecs.umich.eduPhysicalMemory::getPort(const std::string &if_name, int idx) 3382413SN/A{ 3394468Sstever@eecs.umich.edu // Accept request for "functional" port for backwards compatibility 3404468Sstever@eecs.umich.edu // with places where this function is called from C++. I'd prefer 3414468Sstever@eecs.umich.edu // to move all these into Python someday. 3424468Sstever@eecs.umich.edu if (if_name == "functional") { 3434468Sstever@eecs.umich.edu return new MemoryPort(csprintf("%s-functional", name()), this); 3444468Sstever@eecs.umich.edu } 3454468Sstever@eecs.umich.edu 3464467Sstever@eecs.umich.edu if (if_name != "port") { 3472462SN/A panic("PhysicalMemory::getPort: unknown port %s requested", if_name); 3482462SN/A } 3494467Sstever@eecs.umich.edu 3504467Sstever@eecs.umich.edu if (idx >= ports.size()) { 3514467Sstever@eecs.umich.edu ports.resize(idx+1); 3524467Sstever@eecs.umich.edu } 3534467Sstever@eecs.umich.edu 3544467Sstever@eecs.umich.edu if (ports[idx] != NULL) { 3554467Sstever@eecs.umich.edu panic("PhysicalMemory::getPort: port %d already assigned", idx); 3564467Sstever@eecs.umich.edu } 3574467Sstever@eecs.umich.edu 3584467Sstever@eecs.umich.edu MemoryPort *port = 3594467Sstever@eecs.umich.edu new MemoryPort(csprintf("%s-port%d", name(), idx), this); 3604467Sstever@eecs.umich.edu 3614467Sstever@eecs.umich.edu ports[idx] = port; 3624467Sstever@eecs.umich.edu return port; 3632413SN/A} 3642413SN/A 3654467Sstever@eecs.umich.edu 3662413SN/Avoid 3672413SN/APhysicalMemory::recvStatusChange(Port::Status status) 3682413SN/A{ 3692413SN/A} 3702413SN/A 3712640Sstever@eecs.umich.eduPhysicalMemory::MemoryPort::MemoryPort(const std::string &_name, 3722640Sstever@eecs.umich.edu PhysicalMemory *_memory) 3732914Ssaidi@eecs.umich.edu : SimpleTimingPort(_name), memory(_memory) 3742413SN/A{ } 3752413SN/A 3762413SN/Avoid 3772413SN/APhysicalMemory::MemoryPort::recvStatusChange(Port::Status status) 3782413SN/A{ 3792413SN/A memory->recvStatusChange(status); 3802413SN/A} 3812413SN/A 3822413SN/Avoid 3832522SN/APhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp, 3844475Sstever@eecs.umich.edu bool &snoop) 3852413SN/A{ 3862522SN/A memory->getAddressRanges(resp, snoop); 3872497SN/A} 3882497SN/A 3892497SN/Avoid 3904475Sstever@eecs.umich.eduPhysicalMemory::getAddressRanges(AddrRangeList &resp, bool &snoop) 3912497SN/A{ 3924475Sstever@eecs.umich.edu snoop = false; 3932522SN/A resp.clear(); 3944762Snate@binkert.org resp.push_back(RangeSize(start(), params()->range.size())); 3952413SN/A} 3962413SN/A 3972415SN/Aint 3982415SN/APhysicalMemory::MemoryPort::deviceBlockSize() 3992415SN/A{ 4002415SN/A return memory->deviceBlockSize(); 4012415SN/A} 4022413SN/A 4032413SN/ATick 4043349Sbinkertn@umich.eduPhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt) 4052413SN/A{ 4064626Sstever@eecs.umich.edu return memory->doAtomicAccess(pkt); 4072413SN/A} 4082413SN/A 4092413SN/Avoid 4103349Sbinkertn@umich.eduPhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt) 4112413SN/A{ 4125314Sstever@gmail.com pkt->pushLabel(memory->name()); 4135314Sstever@gmail.com 4144929Sstever@gmail.com if (!checkFunctional(pkt)) { 4154929Sstever@gmail.com // Default implementation of SimpleTimingPort::recvFunctional() 4164929Sstever@gmail.com // calls recvAtomic() and throws away the latency; we can save a 4174929Sstever@gmail.com // little here by just not calculating the latency. 4184929Sstever@gmail.com memory->doFunctionalAccess(pkt); 4194929Sstever@gmail.com } 4205314Sstever@gmail.com 4215314Sstever@gmail.com pkt->popLabel(); 4222413SN/A} 4232413SN/A 4242914Ssaidi@eecs.umich.eduunsigned int 4252914Ssaidi@eecs.umich.eduPhysicalMemory::drain(Event *de) 4262914Ssaidi@eecs.umich.edu{ 4274467Sstever@eecs.umich.edu int count = 0; 4284467Sstever@eecs.umich.edu for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 4294467Sstever@eecs.umich.edu count += (*pi)->drain(de); 4304467Sstever@eecs.umich.edu } 4314467Sstever@eecs.umich.edu 4322914Ssaidi@eecs.umich.edu if (count) 4332914Ssaidi@eecs.umich.edu changeState(Draining); 4342914Ssaidi@eecs.umich.edu else 4352914Ssaidi@eecs.umich.edu changeState(Drained); 4362914Ssaidi@eecs.umich.edu return count; 4372914Ssaidi@eecs.umich.edu} 4382413SN/A 4392391SN/Avoid 4402391SN/APhysicalMemory::serialize(ostream &os) 4412391SN/A{ 4422391SN/A gzFile compressedMem; 4432391SN/A string filename = name() + ".physmem"; 4442391SN/A 4452391SN/A SERIALIZE_SCALAR(filename); 4462391SN/A 4472391SN/A // write memory file 4482391SN/A string thefile = Checkpoint::dir() + "/" + filename.c_str(); 4492391SN/A int fd = creat(thefile.c_str(), 0664); 4502391SN/A if (fd < 0) { 4512391SN/A perror("creat"); 4522391SN/A fatal("Can't open physical memory checkpoint file '%s'\n", filename); 4532391SN/A } 4542391SN/A 4552391SN/A compressedMem = gzdopen(fd, "wb"); 4562391SN/A if (compressedMem == NULL) 4572391SN/A fatal("Insufficient memory to allocate compression state for %s\n", 4582391SN/A filename); 4592391SN/A 4604762Snate@binkert.org if (gzwrite(compressedMem, pmemAddr, params()->range.size()) != 4614762Snate@binkert.org params()->range.size()) { 4622391SN/A fatal("Write failed on physical memory checkpoint file '%s'\n", 4632391SN/A filename); 4642391SN/A } 4652391SN/A 4662391SN/A if (gzclose(compressedMem)) 4672391SN/A fatal("Close failed on physical memory checkpoint file '%s'\n", 4682391SN/A filename); 4692391SN/A} 4702391SN/A 4712391SN/Avoid 4722391SN/APhysicalMemory::unserialize(Checkpoint *cp, const string §ion) 4732391SN/A{ 4742391SN/A gzFile compressedMem; 4752391SN/A long *tempPage; 4762391SN/A long *pmem_current; 4772391SN/A uint64_t curSize; 4782391SN/A uint32_t bytesRead; 4792391SN/A const int chunkSize = 16384; 4802391SN/A 4812391SN/A 4822391SN/A string filename; 4832391SN/A 4842391SN/A UNSERIALIZE_SCALAR(filename); 4852391SN/A 4862391SN/A filename = cp->cptDir + "/" + filename; 4872391SN/A 4882391SN/A // mmap memoryfile 4892391SN/A int fd = open(filename.c_str(), O_RDONLY); 4902391SN/A if (fd < 0) { 4912391SN/A perror("open"); 4922391SN/A fatal("Can't open physical memory checkpoint file '%s'", filename); 4932391SN/A } 4942391SN/A 4952391SN/A compressedMem = gzdopen(fd, "rb"); 4962391SN/A if (compressedMem == NULL) 4972391SN/A fatal("Insufficient memory to allocate compression state for %s\n", 4982391SN/A filename); 4992391SN/A 5003012Ssaidi@eecs.umich.edu // unmap file that was mmaped in the constructor 5013012Ssaidi@eecs.umich.edu // This is done here to make sure that gzip and open don't muck with our 5023012Ssaidi@eecs.umich.edu // nice large space of memory before we reallocate it 5034762Snate@binkert.org munmap((char*)pmemAddr, params()->range.size()); 5042391SN/A 5054762Snate@binkert.org pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 5064762Snate@binkert.org PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); 5072391SN/A 5083012Ssaidi@eecs.umich.edu if (pmemAddr == (void *)MAP_FAILED) { 5092391SN/A perror("mmap"); 5102391SN/A fatal("Could not mmap physical memory!\n"); 5112391SN/A } 5122391SN/A 5132391SN/A curSize = 0; 5142391SN/A tempPage = (long*)malloc(chunkSize); 5152391SN/A if (tempPage == NULL) 5162391SN/A fatal("Unable to malloc memory to read file %s\n", filename); 5172391SN/A 5182391SN/A /* Only copy bytes that are non-zero, so we don't give the VM system hell */ 5194762Snate@binkert.org while (curSize < params()->range.size()) { 5202391SN/A bytesRead = gzread(compressedMem, tempPage, chunkSize); 5214762Snate@binkert.org if (bytesRead != chunkSize && 5224762Snate@binkert.org bytesRead != params()->range.size() - curSize) 5232391SN/A fatal("Read failed on physical memory checkpoint file '%s'" 5242391SN/A " got %d bytes, expected %d or %d bytes\n", 5254762Snate@binkert.org filename, bytesRead, chunkSize, 5264762Snate@binkert.org params()->range.size() - curSize); 5272391SN/A 5282391SN/A assert(bytesRead % sizeof(long) == 0); 5292391SN/A 5302391SN/A for (int x = 0; x < bytesRead/sizeof(long); x++) 5312391SN/A { 5322391SN/A if (*(tempPage+x) != 0) { 5333012Ssaidi@eecs.umich.edu pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long)); 5342391SN/A *pmem_current = *(tempPage+x); 5352391SN/A } 5362391SN/A } 5372391SN/A curSize += bytesRead; 5382391SN/A } 5392391SN/A 5402391SN/A free(tempPage); 5412391SN/A 5422391SN/A if (gzclose(compressedMem)) 5432391SN/A fatal("Close failed on physical memory checkpoint file '%s'\n", 5442391SN/A filename); 5452391SN/A 5462391SN/A} 5472391SN/A 5484762Snate@binkert.orgPhysicalMemory * 5494762Snate@binkert.orgPhysicalMemoryParams::create() 5502391SN/A{ 5514762Snate@binkert.org return new PhysicalMemory(this); 5522391SN/A} 553