physical.cc revision 6216
12391SN/A/* 22391SN/A * Copyright (c) 2001-2005 The Regents of The University of Michigan 32391SN/A * All rights reserved. 42391SN/A * 52391SN/A * Redistribution and use in source and binary forms, with or without 62391SN/A * modification, are permitted provided that the following conditions are 72391SN/A * met: redistributions of source code must retain the above copyright 82391SN/A * notice, this list of conditions and the following disclaimer; 92391SN/A * redistributions in binary form must reproduce the above copyright 102391SN/A * notice, this list of conditions and the following disclaimer in the 112391SN/A * documentation and/or other materials provided with the distribution; 122391SN/A * neither the name of the copyright holders nor the names of its 132391SN/A * contributors may be used to endorse or promote products derived from 142391SN/A * this software without specific prior written permission. 152391SN/A * 162391SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172391SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182391SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192391SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202391SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212391SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222391SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232391SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242391SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252391SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262391SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665Ssaidi@eecs.umich.edu * 282665Ssaidi@eecs.umich.edu * Authors: Ron Dreslinski 292914Ssaidi@eecs.umich.edu * Ali Saidi 302391SN/A */ 312391SN/A 322391SN/A#include <sys/types.h> 332391SN/A#include <sys/mman.h> 342391SN/A#include <errno.h> 352391SN/A#include <fcntl.h> 362391SN/A#include <unistd.h> 372391SN/A#include <zlib.h> 382391SN/A 392391SN/A#include <iostream> 402391SN/A#include <string> 412391SN/A 423348Sbinkertn@umich.edu#include "arch/isa_traits.hh" 432391SN/A#include "base/misc.hh" 445399Ssaidi@eecs.umich.edu#include "base/random.hh" 456216Snate@binkert.org#include "base/types.hh" 462391SN/A#include "config/full_system.hh" 473879Ssaidi@eecs.umich.edu#include "mem/packet_access.hh" 482394SN/A#include "mem/physical.hh" 492415SN/A#include "sim/eventq.hh" 502394SN/A 512391SN/Ausing namespace std; 522423SN/Ausing namespace TheISA; 532391SN/A 544762Snate@binkert.orgPhysicalMemory::PhysicalMemory(const Params *p) 555477Snate@binkert.org : MemObject(p), pmemAddr(NULL), pagePtr(0), 565477Snate@binkert.org lat(p->latency), lat_var(p->latency_var), 575477Snate@binkert.org cachedSize(params()->range.size()), cachedStart(params()->range.start) 582391SN/A{ 594762Snate@binkert.org if (params()->range.size() % TheISA::PageBytes != 0) 602391SN/A panic("Memory Size not divisible by page size\n"); 612391SN/A 625477Snate@binkert.org if (params()->null) 635477Snate@binkert.org return; 645477Snate@binkert.org 652391SN/A int map_flags = MAP_ANON | MAP_PRIVATE; 664918Snate@binkert.org pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 674918Snate@binkert.org PROT_READ | PROT_WRITE, map_flags, -1, 0); 682391SN/A 693012Ssaidi@eecs.umich.edu if (pmemAddr == (void *)MAP_FAILED) { 702391SN/A perror("mmap"); 712391SN/A fatal("Could not mmap!\n"); 722391SN/A } 732391SN/A 743751Sgblack@eecs.umich.edu //If requested, initialize all the memory to 0 754762Snate@binkert.org if (p->zero) 764762Snate@binkert.org memset(pmemAddr, 0, p->range.size()); 772391SN/A} 782391SN/A 792541SN/Avoid 802541SN/APhysicalMemory::init() 812541SN/A{ 824470Sstever@eecs.umich.edu if (ports.size() == 0) { 834470Sstever@eecs.umich.edu fatal("PhysicalMemory object %s is unconnected!", name()); 844470Sstever@eecs.umich.edu } 854470Sstever@eecs.umich.edu 864467Sstever@eecs.umich.edu for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 874467Sstever@eecs.umich.edu if (*pi) 884467Sstever@eecs.umich.edu (*pi)->sendStatusChange(Port::RangeChange); 894467Sstever@eecs.umich.edu } 902541SN/A} 912541SN/A 922391SN/APhysicalMemory::~PhysicalMemory() 932391SN/A{ 943012Ssaidi@eecs.umich.edu if (pmemAddr) 954762Snate@binkert.org munmap((char*)pmemAddr, params()->range.size()); 962416SN/A //Remove memPorts? 972391SN/A} 982391SN/A 992391SN/AAddr 1002391SN/APhysicalMemory::new_page() 1012391SN/A{ 1023012Ssaidi@eecs.umich.edu Addr return_addr = pagePtr << LogVMPageSize; 1034040Ssaidi@eecs.umich.edu return_addr += start(); 1042391SN/A 1053012Ssaidi@eecs.umich.edu ++pagePtr; 1062391SN/A return return_addr; 1072391SN/A} 1082391SN/A 1092408SN/Aint 1102408SN/APhysicalMemory::deviceBlockSize() 1112408SN/A{ 1122409SN/A //Can accept anysize request 1132409SN/A return 0; 1142408SN/A} 1152408SN/A 1163012Ssaidi@eecs.umich.eduTick 1173349Sbinkertn@umich.eduPhysicalMemory::calculateLatency(PacketPtr pkt) 1183012Ssaidi@eecs.umich.edu{ 1195399Ssaidi@eecs.umich.edu Tick latency = lat; 1205399Ssaidi@eecs.umich.edu if (lat_var != 0) 1215399Ssaidi@eecs.umich.edu latency += random_mt.random<Tick>(0, lat_var); 1225399Ssaidi@eecs.umich.edu return latency; 1233012Ssaidi@eecs.umich.edu} 1242413SN/A 1253170Sstever@eecs.umich.edu 1263170Sstever@eecs.umich.edu 1273170Sstever@eecs.umich.edu// Add load-locked to tracking list. Should only be called if the 1286076Sgblack@eecs.umich.edu// operation is a load and the LLSC flag is set. 1293170Sstever@eecs.umich.eduvoid 1304626Sstever@eecs.umich.eduPhysicalMemory::trackLoadLocked(PacketPtr pkt) 1313170Sstever@eecs.umich.edu{ 1324626Sstever@eecs.umich.edu Request *req = pkt->req; 1333170Sstever@eecs.umich.edu Addr paddr = LockedAddr::mask(req->getPaddr()); 1343170Sstever@eecs.umich.edu 1353170Sstever@eecs.umich.edu // first we check if we already have a locked addr for this 1363170Sstever@eecs.umich.edu // xc. Since each xc only gets one, we just update the 1373170Sstever@eecs.umich.edu // existing record with the new address. 1383170Sstever@eecs.umich.edu list<LockedAddr>::iterator i; 1393170Sstever@eecs.umich.edu 1403170Sstever@eecs.umich.edu for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) { 1413170Sstever@eecs.umich.edu if (i->matchesContext(req)) { 1425714Shsul@eecs.umich.edu DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n", 1435714Shsul@eecs.umich.edu req->contextId(), paddr); 1443170Sstever@eecs.umich.edu i->addr = paddr; 1453170Sstever@eecs.umich.edu return; 1463170Sstever@eecs.umich.edu } 1473170Sstever@eecs.umich.edu } 1483170Sstever@eecs.umich.edu 1493170Sstever@eecs.umich.edu // no record for this xc: need to allocate a new one 1505714Shsul@eecs.umich.edu DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n", 1515714Shsul@eecs.umich.edu req->contextId(), paddr); 1523170Sstever@eecs.umich.edu lockedAddrList.push_front(LockedAddr(req)); 1533170Sstever@eecs.umich.edu} 1543170Sstever@eecs.umich.edu 1553170Sstever@eecs.umich.edu 1563170Sstever@eecs.umich.edu// Called on *writes* only... both regular stores and 1573170Sstever@eecs.umich.edu// store-conditional operations. Check for conventional stores which 1583170Sstever@eecs.umich.edu// conflict with locked addresses, and for success/failure of store 1593170Sstever@eecs.umich.edu// conditionals. 1603170Sstever@eecs.umich.edubool 1614626Sstever@eecs.umich.eduPhysicalMemory::checkLockedAddrList(PacketPtr pkt) 1623170Sstever@eecs.umich.edu{ 1634626Sstever@eecs.umich.edu Request *req = pkt->req; 1643170Sstever@eecs.umich.edu Addr paddr = LockedAddr::mask(req->getPaddr()); 1656102Sgblack@eecs.umich.edu bool isLLSC = pkt->isLLSC(); 1663170Sstever@eecs.umich.edu 1673170Sstever@eecs.umich.edu // Initialize return value. Non-conditional stores always 1683170Sstever@eecs.umich.edu // succeed. Assume conditional stores will fail until proven 1693170Sstever@eecs.umich.edu // otherwise. 1706102Sgblack@eecs.umich.edu bool success = !isLLSC; 1713170Sstever@eecs.umich.edu 1723170Sstever@eecs.umich.edu // Iterate over list. Note that there could be multiple matching 1733170Sstever@eecs.umich.edu // records, as more than one context could have done a load locked 1743170Sstever@eecs.umich.edu // to this location. 1753170Sstever@eecs.umich.edu list<LockedAddr>::iterator i = lockedAddrList.begin(); 1763170Sstever@eecs.umich.edu 1773170Sstever@eecs.umich.edu while (i != lockedAddrList.end()) { 1783170Sstever@eecs.umich.edu 1793170Sstever@eecs.umich.edu if (i->addr == paddr) { 1803170Sstever@eecs.umich.edu // we have a matching address 1813170Sstever@eecs.umich.edu 1826102Sgblack@eecs.umich.edu if (isLLSC && i->matchesContext(req)) { 1833170Sstever@eecs.umich.edu // it's a store conditional, and as far as the memory 1843170Sstever@eecs.umich.edu // system can tell, the requesting context's lock is 1853170Sstever@eecs.umich.edu // still valid. 1865714Shsul@eecs.umich.edu DPRINTF(LLSC, "StCond success: context %d addr %#x\n", 1875714Shsul@eecs.umich.edu req->contextId(), paddr); 1883170Sstever@eecs.umich.edu success = true; 1893170Sstever@eecs.umich.edu } 1903170Sstever@eecs.umich.edu 1913170Sstever@eecs.umich.edu // Get rid of our record of this lock and advance to next 1925714Shsul@eecs.umich.edu DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n", 1935714Shsul@eecs.umich.edu i->contextId, paddr); 1943170Sstever@eecs.umich.edu i = lockedAddrList.erase(i); 1953170Sstever@eecs.umich.edu } 1963170Sstever@eecs.umich.edu else { 1973170Sstever@eecs.umich.edu // no match: advance to next record 1983170Sstever@eecs.umich.edu ++i; 1993170Sstever@eecs.umich.edu } 2003170Sstever@eecs.umich.edu } 2013170Sstever@eecs.umich.edu 2026102Sgblack@eecs.umich.edu if (isLLSC) { 2034040Ssaidi@eecs.umich.edu req->setExtraData(success ? 1 : 0); 2043170Sstever@eecs.umich.edu } 2053170Sstever@eecs.umich.edu 2063170Sstever@eecs.umich.edu return success; 2073170Sstever@eecs.umich.edu} 2083170Sstever@eecs.umich.edu 2094626Sstever@eecs.umich.edu 2104626Sstever@eecs.umich.edu#if TRACING_ON 2114626Sstever@eecs.umich.edu 2124626Sstever@eecs.umich.edu#define CASE(A, T) \ 2134626Sstever@eecs.umich.edu case sizeof(T): \ 2144626Sstever@eecs.umich.edu DPRINTF(MemoryAccess, A " of size %i on address 0x%x data 0x%x\n", \ 2154626Sstever@eecs.umich.edu pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \ 2164626Sstever@eecs.umich.edu break 2174626Sstever@eecs.umich.edu 2184626Sstever@eecs.umich.edu 2194626Sstever@eecs.umich.edu#define TRACE_PACKET(A) \ 2204626Sstever@eecs.umich.edu do { \ 2214626Sstever@eecs.umich.edu switch (pkt->getSize()) { \ 2224626Sstever@eecs.umich.edu CASE(A, uint64_t); \ 2234626Sstever@eecs.umich.edu CASE(A, uint32_t); \ 2244626Sstever@eecs.umich.edu CASE(A, uint16_t); \ 2254626Sstever@eecs.umich.edu CASE(A, uint8_t); \ 2264626Sstever@eecs.umich.edu default: \ 2274626Sstever@eecs.umich.edu DPRINTF(MemoryAccess, A " of size %i on address 0x%x\n", \ 2284626Sstever@eecs.umich.edu pkt->getSize(), pkt->getAddr()); \ 2294626Sstever@eecs.umich.edu } \ 2304626Sstever@eecs.umich.edu } while (0) 2314626Sstever@eecs.umich.edu 2324626Sstever@eecs.umich.edu#else 2334626Sstever@eecs.umich.edu 2344626Sstever@eecs.umich.edu#define TRACE_PACKET(A) 2354626Sstever@eecs.umich.edu 2364626Sstever@eecs.umich.edu#endif 2374626Sstever@eecs.umich.edu 2384626Sstever@eecs.umich.eduTick 2394626Sstever@eecs.umich.eduPhysicalMemory::doAtomicAccess(PacketPtr pkt) 2402413SN/A{ 2414040Ssaidi@eecs.umich.edu assert(pkt->getAddr() >= start() && 2424040Ssaidi@eecs.umich.edu pkt->getAddr() + pkt->getSize() <= start() + size()); 2432414SN/A 2444626Sstever@eecs.umich.edu if (pkt->memInhibitAsserted()) { 2454626Sstever@eecs.umich.edu DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n", 2464626Sstever@eecs.umich.edu pkt->getAddr()); 2474626Sstever@eecs.umich.edu return 0; 2483175Srdreslin@umich.edu } 2494626Sstever@eecs.umich.edu 2504626Sstever@eecs.umich.edu uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 2514626Sstever@eecs.umich.edu 2524626Sstever@eecs.umich.edu if (pkt->cmd == MemCmd::SwapReq) { 2534040Ssaidi@eecs.umich.edu IntReg overwrite_val; 2544040Ssaidi@eecs.umich.edu bool overwrite_mem; 2554040Ssaidi@eecs.umich.edu uint64_t condition_val64; 2564040Ssaidi@eecs.umich.edu uint32_t condition_val32; 2574040Ssaidi@eecs.umich.edu 2585477Snate@binkert.org if (!pmemAddr) 2595477Snate@binkert.org panic("Swap only works if there is real memory (i.e. null=False)"); 2604040Ssaidi@eecs.umich.edu assert(sizeof(IntReg) >= pkt->getSize()); 2614040Ssaidi@eecs.umich.edu 2624040Ssaidi@eecs.umich.edu overwrite_mem = true; 2634040Ssaidi@eecs.umich.edu // keep a copy of our possible write value, and copy what is at the 2644040Ssaidi@eecs.umich.edu // memory address into the packet 2654052Ssaidi@eecs.umich.edu std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize()); 2664626Sstever@eecs.umich.edu std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 2674040Ssaidi@eecs.umich.edu 2684040Ssaidi@eecs.umich.edu if (pkt->req->isCondSwap()) { 2694040Ssaidi@eecs.umich.edu if (pkt->getSize() == sizeof(uint64_t)) { 2704052Ssaidi@eecs.umich.edu condition_val64 = pkt->req->getExtraData(); 2714626Sstever@eecs.umich.edu overwrite_mem = !std::memcmp(&condition_val64, hostAddr, 2724626Sstever@eecs.umich.edu sizeof(uint64_t)); 2734040Ssaidi@eecs.umich.edu } else if (pkt->getSize() == sizeof(uint32_t)) { 2744052Ssaidi@eecs.umich.edu condition_val32 = (uint32_t)pkt->req->getExtraData(); 2754626Sstever@eecs.umich.edu overwrite_mem = !std::memcmp(&condition_val32, hostAddr, 2764626Sstever@eecs.umich.edu sizeof(uint32_t)); 2774040Ssaidi@eecs.umich.edu } else 2784040Ssaidi@eecs.umich.edu panic("Invalid size for conditional read/write\n"); 2794040Ssaidi@eecs.umich.edu } 2804040Ssaidi@eecs.umich.edu 2814040Ssaidi@eecs.umich.edu if (overwrite_mem) 2824626Sstever@eecs.umich.edu std::memcpy(hostAddr, &overwrite_val, pkt->getSize()); 2834040Ssaidi@eecs.umich.edu 2844626Sstever@eecs.umich.edu TRACE_PACKET("Read/Write"); 2854626Sstever@eecs.umich.edu } else if (pkt->isRead()) { 2864626Sstever@eecs.umich.edu assert(!pkt->isWrite()); 2876102Sgblack@eecs.umich.edu if (pkt->isLLSC()) { 2884626Sstever@eecs.umich.edu trackLoadLocked(pkt); 2894040Ssaidi@eecs.umich.edu } 2905477Snate@binkert.org if (pmemAddr) 2915477Snate@binkert.org memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 2924626Sstever@eecs.umich.edu TRACE_PACKET("Read"); 2934626Sstever@eecs.umich.edu } else if (pkt->isWrite()) { 2944626Sstever@eecs.umich.edu if (writeOK(pkt)) { 2955477Snate@binkert.org if (pmemAddr) 2965477Snate@binkert.org memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 2974626Sstever@eecs.umich.edu TRACE_PACKET("Write"); 2984626Sstever@eecs.umich.edu } 2994626Sstever@eecs.umich.edu } else if (pkt->isInvalidate()) { 3004626Sstever@eecs.umich.edu //upgrade or invalidate 3014626Sstever@eecs.umich.edu if (pkt->needsResponse()) { 3024626Sstever@eecs.umich.edu pkt->makeAtomicResponse(); 3034626Sstever@eecs.umich.edu } 3044040Ssaidi@eecs.umich.edu } else { 3052413SN/A panic("unimplemented"); 3062413SN/A } 3072420SN/A 3084626Sstever@eecs.umich.edu if (pkt->needsResponse()) { 3094626Sstever@eecs.umich.edu pkt->makeAtomicResponse(); 3104626Sstever@eecs.umich.edu } 3114626Sstever@eecs.umich.edu return calculateLatency(pkt); 3122413SN/A} 3132413SN/A 3144626Sstever@eecs.umich.edu 3154626Sstever@eecs.umich.eduvoid 3164626Sstever@eecs.umich.eduPhysicalMemory::doFunctionalAccess(PacketPtr pkt) 3174626Sstever@eecs.umich.edu{ 3184626Sstever@eecs.umich.edu assert(pkt->getAddr() >= start() && 3194626Sstever@eecs.umich.edu pkt->getAddr() + pkt->getSize() <= start() + size()); 3204626Sstever@eecs.umich.edu 3215222Sksewell@umich.edu 3224626Sstever@eecs.umich.edu uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start(); 3234626Sstever@eecs.umich.edu 3245314Sstever@gmail.com if (pkt->isRead()) { 3255477Snate@binkert.org if (pmemAddr) 3265477Snate@binkert.org memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize()); 3274626Sstever@eecs.umich.edu TRACE_PACKET("Read"); 3285314Sstever@gmail.com pkt->makeAtomicResponse(); 3295314Sstever@gmail.com } else if (pkt->isWrite()) { 3305477Snate@binkert.org if (pmemAddr) 3315477Snate@binkert.org memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize()); 3324626Sstever@eecs.umich.edu TRACE_PACKET("Write"); 3335314Sstever@gmail.com pkt->makeAtomicResponse(); 3345314Sstever@gmail.com } else if (pkt->isPrint()) { 3355315Sstever@gmail.com Packet::PrintReqState *prs = 3365315Sstever@gmail.com dynamic_cast<Packet::PrintReqState*>(pkt->senderState); 3375315Sstever@gmail.com // Need to call printLabels() explicitly since we're not going 3385315Sstever@gmail.com // through printObj(). 3395314Sstever@gmail.com prs->printLabels(); 3405315Sstever@gmail.com // Right now we just print the single byte at the specified address. 3415314Sstever@gmail.com ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr); 3424626Sstever@eecs.umich.edu } else { 3434626Sstever@eecs.umich.edu panic("PhysicalMemory: unimplemented functional command %s", 3444626Sstever@eecs.umich.edu pkt->cmdString()); 3454626Sstever@eecs.umich.edu } 3464490Sstever@eecs.umich.edu} 3474490Sstever@eecs.umich.edu 3484626Sstever@eecs.umich.edu 3492413SN/APort * 3502738Sstever@eecs.umich.eduPhysicalMemory::getPort(const std::string &if_name, int idx) 3512413SN/A{ 3524468Sstever@eecs.umich.edu // Accept request for "functional" port for backwards compatibility 3534468Sstever@eecs.umich.edu // with places where this function is called from C++. I'd prefer 3544468Sstever@eecs.umich.edu // to move all these into Python someday. 3554468Sstever@eecs.umich.edu if (if_name == "functional") { 3564468Sstever@eecs.umich.edu return new MemoryPort(csprintf("%s-functional", name()), this); 3574468Sstever@eecs.umich.edu } 3584468Sstever@eecs.umich.edu 3594467Sstever@eecs.umich.edu if (if_name != "port") { 3602462SN/A panic("PhysicalMemory::getPort: unknown port %s requested", if_name); 3612462SN/A } 3624467Sstever@eecs.umich.edu 3634467Sstever@eecs.umich.edu if (idx >= ports.size()) { 3644467Sstever@eecs.umich.edu ports.resize(idx+1); 3654467Sstever@eecs.umich.edu } 3664467Sstever@eecs.umich.edu 3674467Sstever@eecs.umich.edu if (ports[idx] != NULL) { 3684467Sstever@eecs.umich.edu panic("PhysicalMemory::getPort: port %d already assigned", idx); 3694467Sstever@eecs.umich.edu } 3704467Sstever@eecs.umich.edu 3714467Sstever@eecs.umich.edu MemoryPort *port = 3724467Sstever@eecs.umich.edu new MemoryPort(csprintf("%s-port%d", name(), idx), this); 3734467Sstever@eecs.umich.edu 3744467Sstever@eecs.umich.edu ports[idx] = port; 3754467Sstever@eecs.umich.edu return port; 3762413SN/A} 3772413SN/A 3784467Sstever@eecs.umich.edu 3792413SN/Avoid 3802413SN/APhysicalMemory::recvStatusChange(Port::Status status) 3812413SN/A{ 3822413SN/A} 3832413SN/A 3842640Sstever@eecs.umich.eduPhysicalMemory::MemoryPort::MemoryPort(const std::string &_name, 3852640Sstever@eecs.umich.edu PhysicalMemory *_memory) 3865606Snate@binkert.org : SimpleTimingPort(_name, _memory), memory(_memory) 3872413SN/A{ } 3882413SN/A 3892413SN/Avoid 3902413SN/APhysicalMemory::MemoryPort::recvStatusChange(Port::Status status) 3912413SN/A{ 3922413SN/A memory->recvStatusChange(status); 3932413SN/A} 3942413SN/A 3952413SN/Avoid 3962522SN/APhysicalMemory::MemoryPort::getDeviceAddressRanges(AddrRangeList &resp, 3974475Sstever@eecs.umich.edu bool &snoop) 3982413SN/A{ 3992522SN/A memory->getAddressRanges(resp, snoop); 4002497SN/A} 4012497SN/A 4022497SN/Avoid 4034475Sstever@eecs.umich.eduPhysicalMemory::getAddressRanges(AddrRangeList &resp, bool &snoop) 4042497SN/A{ 4054475Sstever@eecs.umich.edu snoop = false; 4062522SN/A resp.clear(); 4074762Snate@binkert.org resp.push_back(RangeSize(start(), params()->range.size())); 4082413SN/A} 4092413SN/A 4102415SN/Aint 4112415SN/APhysicalMemory::MemoryPort::deviceBlockSize() 4122415SN/A{ 4132415SN/A return memory->deviceBlockSize(); 4142415SN/A} 4152413SN/A 4162413SN/ATick 4173349Sbinkertn@umich.eduPhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt) 4182413SN/A{ 4194626Sstever@eecs.umich.edu return memory->doAtomicAccess(pkt); 4202413SN/A} 4212413SN/A 4222413SN/Avoid 4233349Sbinkertn@umich.eduPhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt) 4242413SN/A{ 4255314Sstever@gmail.com pkt->pushLabel(memory->name()); 4265314Sstever@gmail.com 4274929Sstever@gmail.com if (!checkFunctional(pkt)) { 4284929Sstever@gmail.com // Default implementation of SimpleTimingPort::recvFunctional() 4294929Sstever@gmail.com // calls recvAtomic() and throws away the latency; we can save a 4304929Sstever@gmail.com // little here by just not calculating the latency. 4314929Sstever@gmail.com memory->doFunctionalAccess(pkt); 4324929Sstever@gmail.com } 4335314Sstever@gmail.com 4345314Sstever@gmail.com pkt->popLabel(); 4352413SN/A} 4362413SN/A 4372914Ssaidi@eecs.umich.eduunsigned int 4382914Ssaidi@eecs.umich.eduPhysicalMemory::drain(Event *de) 4392914Ssaidi@eecs.umich.edu{ 4404467Sstever@eecs.umich.edu int count = 0; 4414467Sstever@eecs.umich.edu for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) { 4424467Sstever@eecs.umich.edu count += (*pi)->drain(de); 4434467Sstever@eecs.umich.edu } 4444467Sstever@eecs.umich.edu 4452914Ssaidi@eecs.umich.edu if (count) 4462914Ssaidi@eecs.umich.edu changeState(Draining); 4472914Ssaidi@eecs.umich.edu else 4482914Ssaidi@eecs.umich.edu changeState(Drained); 4492914Ssaidi@eecs.umich.edu return count; 4502914Ssaidi@eecs.umich.edu} 4512413SN/A 4522391SN/Avoid 4532391SN/APhysicalMemory::serialize(ostream &os) 4542391SN/A{ 4555477Snate@binkert.org if (!pmemAddr) 4565477Snate@binkert.org return; 4575477Snate@binkert.org 4582391SN/A gzFile compressedMem; 4592391SN/A string filename = name() + ".physmem"; 4602391SN/A 4612391SN/A SERIALIZE_SCALAR(filename); 4622391SN/A 4632391SN/A // write memory file 4642391SN/A string thefile = Checkpoint::dir() + "/" + filename.c_str(); 4652391SN/A int fd = creat(thefile.c_str(), 0664); 4662391SN/A if (fd < 0) { 4672391SN/A perror("creat"); 4682391SN/A fatal("Can't open physical memory checkpoint file '%s'\n", filename); 4692391SN/A } 4702391SN/A 4712391SN/A compressedMem = gzdopen(fd, "wb"); 4722391SN/A if (compressedMem == NULL) 4732391SN/A fatal("Insufficient memory to allocate compression state for %s\n", 4742391SN/A filename); 4752391SN/A 4764762Snate@binkert.org if (gzwrite(compressedMem, pmemAddr, params()->range.size()) != 4774762Snate@binkert.org params()->range.size()) { 4782391SN/A fatal("Write failed on physical memory checkpoint file '%s'\n", 4792391SN/A filename); 4802391SN/A } 4812391SN/A 4822391SN/A if (gzclose(compressedMem)) 4832391SN/A fatal("Close failed on physical memory checkpoint file '%s'\n", 4842391SN/A filename); 4852391SN/A} 4862391SN/A 4872391SN/Avoid 4882391SN/APhysicalMemory::unserialize(Checkpoint *cp, const string §ion) 4892391SN/A{ 4905477Snate@binkert.org if (!pmemAddr) 4915477Snate@binkert.org return; 4925477Snate@binkert.org 4932391SN/A gzFile compressedMem; 4942391SN/A long *tempPage; 4952391SN/A long *pmem_current; 4962391SN/A uint64_t curSize; 4972391SN/A uint32_t bytesRead; 4982391SN/A const int chunkSize = 16384; 4992391SN/A 5002391SN/A string filename; 5012391SN/A 5022391SN/A UNSERIALIZE_SCALAR(filename); 5032391SN/A 5042391SN/A filename = cp->cptDir + "/" + filename; 5052391SN/A 5062391SN/A // mmap memoryfile 5072391SN/A int fd = open(filename.c_str(), O_RDONLY); 5082391SN/A if (fd < 0) { 5092391SN/A perror("open"); 5102391SN/A fatal("Can't open physical memory checkpoint file '%s'", filename); 5112391SN/A } 5122391SN/A 5132391SN/A compressedMem = gzdopen(fd, "rb"); 5142391SN/A if (compressedMem == NULL) 5152391SN/A fatal("Insufficient memory to allocate compression state for %s\n", 5162391SN/A filename); 5172391SN/A 5183012Ssaidi@eecs.umich.edu // unmap file that was mmaped in the constructor 5193012Ssaidi@eecs.umich.edu // This is done here to make sure that gzip and open don't muck with our 5203012Ssaidi@eecs.umich.edu // nice large space of memory before we reallocate it 5214762Snate@binkert.org munmap((char*)pmemAddr, params()->range.size()); 5222391SN/A 5234762Snate@binkert.org pmemAddr = (uint8_t *)mmap(NULL, params()->range.size(), 5244762Snate@binkert.org PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); 5252391SN/A 5263012Ssaidi@eecs.umich.edu if (pmemAddr == (void *)MAP_FAILED) { 5272391SN/A perror("mmap"); 5282391SN/A fatal("Could not mmap physical memory!\n"); 5292391SN/A } 5302391SN/A 5312391SN/A curSize = 0; 5322391SN/A tempPage = (long*)malloc(chunkSize); 5332391SN/A if (tempPage == NULL) 5342391SN/A fatal("Unable to malloc memory to read file %s\n", filename); 5352391SN/A 5362391SN/A /* Only copy bytes that are non-zero, so we don't give the VM system hell */ 5374762Snate@binkert.org while (curSize < params()->range.size()) { 5382391SN/A bytesRead = gzread(compressedMem, tempPage, chunkSize); 5394762Snate@binkert.org if (bytesRead != chunkSize && 5404762Snate@binkert.org bytesRead != params()->range.size() - curSize) 5412391SN/A fatal("Read failed on physical memory checkpoint file '%s'" 5422391SN/A " got %d bytes, expected %d or %d bytes\n", 5434762Snate@binkert.org filename, bytesRead, chunkSize, 5444762Snate@binkert.org params()->range.size() - curSize); 5452391SN/A 5462391SN/A assert(bytesRead % sizeof(long) == 0); 5472391SN/A 5482391SN/A for (int x = 0; x < bytesRead/sizeof(long); x++) 5492391SN/A { 5502391SN/A if (*(tempPage+x) != 0) { 5513012Ssaidi@eecs.umich.edu pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long)); 5522391SN/A *pmem_current = *(tempPage+x); 5532391SN/A } 5542391SN/A } 5552391SN/A curSize += bytesRead; 5562391SN/A } 5572391SN/A 5582391SN/A free(tempPage); 5592391SN/A 5602391SN/A if (gzclose(compressedMem)) 5612391SN/A fatal("Close failed on physical memory checkpoint file '%s'\n", 5622391SN/A filename); 5632391SN/A 5642391SN/A} 5652391SN/A 5664762Snate@binkert.orgPhysicalMemory * 5674762Snate@binkert.orgPhysicalMemoryParams::create() 5682391SN/A{ 5694762Snate@binkert.org return new PhysicalMemory(this); 5702391SN/A} 571