memtest.cc revision 8853
12SN/A/* 21762SN/A * Copyright (c) 2002-2005 The Regents of The University of Michigan 32SN/A * All rights reserved. 42SN/A * 52SN/A * Redistribution and use in source and binary forms, with or without 62SN/A * modification, are permitted provided that the following conditions are 72SN/A * met: redistributions of source code must retain the above copyright 82SN/A * notice, this list of conditions and the following disclaimer; 92SN/A * redistributions in binary form must reproduce the above copyright 102SN/A * notice, this list of conditions and the following disclaimer in the 112SN/A * documentation and/or other materials provided with the distribution; 122SN/A * neither the name of the copyright holders nor the names of its 132SN/A * contributors may be used to endorse or promote products derived from 142SN/A * this software without specific prior written permission. 152SN/A * 162SN/A * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 172SN/A * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 182SN/A * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 192SN/A * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 202SN/A * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 212SN/A * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 222SN/A * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 232SN/A * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 242SN/A * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 252SN/A * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 262SN/A * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 272665SN/A * 282665SN/A * Authors: Erik Hallnor 292665SN/A * Steve Reinhardt 302SN/A */ 312SN/A 322SN/A// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded 332SN/A 341298SN/A#include <iomanip> 351298SN/A#include <set> 361259SN/A#include <string> 372SN/A#include <vector> 382SN/A 39146SN/A#include "base/misc.hh" 40146SN/A#include "base/statistics.hh" 417632SBrad.Beckmann@amd.com#include "cpu/testers/memtest/memtest.hh" 428232Snate@binkert.org#include "debug/MemTest.hh" 433348SN/A#include "mem/mem_object.hh" 448229Snate@binkert.org#include "mem/packet.hh" 453348SN/A#include "mem/port.hh" 463348SN/A#include "mem/request.hh" 4756SN/A#include "sim/sim_events.hh" 48695SN/A#include "sim/stats.hh" 498832SAli.Saidi@ARM.com#include "sim/system.hh" 502SN/A 512SN/Ausing namespace std; 522SN/A 531298SN/Aint TESTER_ALLOCATOR=0; 541298SN/A 553187SN/Abool 563349SN/AMemTest::CpuPort::recvTiming(PacketPtr pkt) 573187SN/A{ 584898SN/A if (pkt->isResponse()) { 594898SN/A memtest->completeRequest(pkt); 604898SN/A } else { 614898SN/A // must be snoop upcall 624898SN/A assert(pkt->isRequest()); 634898SN/A assert(pkt->getDest() == Packet::Broadcast); 644898SN/A } 653187SN/A return true; 663187SN/A} 673187SN/A 683187SN/ATick 693349SN/AMemTest::CpuPort::recvAtomic(PacketPtr pkt) 703187SN/A{ 714895SN/A // must be snoop upcall 724895SN/A assert(pkt->isRequest()); 734895SN/A assert(pkt->getDest() == Packet::Broadcast); 747823Ssteve.reinhardt@amd.com return curTick(); 753187SN/A} 763187SN/A 773187SN/Avoid 783349SN/AMemTest::CpuPort::recvFunctional(PacketPtr pkt) 793187SN/A{ 803204SN/A //Do nothing if we see one come through 817823Ssteve.reinhardt@amd.com// if (curTick() != 0)//Supress warning durring initialization 823340SN/A// warn("Functional Writes not implemented in MemTester\n"); 833262SN/A //Need to find any response values that intersect and update 843204SN/A return; 853187SN/A} 863187SN/A 873187SN/Avoid 888711Sandreas.hansson@arm.comMemTest::CpuPort::recvRangeChange() 893187SN/A{ 903187SN/A} 913187SN/A 923187SN/Avoid 933187SN/AMemTest::CpuPort::recvRetry() 943187SN/A{ 953187SN/A memtest->doRetry(); 963187SN/A} 973187SN/A 983262SN/Avoid 993349SN/AMemTest::sendPkt(PacketPtr pkt) { 1003262SN/A if (atomic) { 1013262SN/A cachePort.sendAtomic(pkt); 1023262SN/A completeRequest(pkt); 1033262SN/A } 1043262SN/A else if (!cachePort.sendTiming(pkt)) { 1057544SN/A DPRINTF(MemTest, "accessRetry setting to true\n"); 1067544SN/A 1077544SN/A // 1087544SN/A // dma requests should never be retried 1097544SN/A // 1107544SN/A if (issueDmas) { 1117544SN/A panic("Nacked DMA requests are not supported\n"); 1127544SN/A } 1133262SN/A accessRetry = true; 1143262SN/A retryPkt = pkt; 1157544SN/A } else { 1167544SN/A if (issueDmas) { 1177544SN/A dmaOutstanding = true; 1187544SN/A } 1193262SN/A } 1203262SN/A 1213262SN/A} 1223262SN/A 1235034SN/AMemTest::MemTest(const Params *p) 1245034SN/A : MemObject(p), 1252SN/A tickEvent(this), 1263187SN/A cachePort("test", this), 1273187SN/A funcPort("functional", this), 1288853Sandreas.hansson@arm.com funcProxy(funcPort), 1293187SN/A retryPkt(NULL), 1303187SN/A// mainMem(main_mem), 1313187SN/A// checkMem(check_mem), 1325034SN/A size(p->memory_size), 1335034SN/A percentReads(p->percent_reads), 1345034SN/A percentFunctional(p->percent_functional), 1355034SN/A percentUncacheable(p->percent_uncacheable), 1367544SN/A issueDmas(p->issue_dmas), 1378832SAli.Saidi@ARM.com masterId(p->sys->getMasterId(name())), 1385034SN/A progressInterval(p->progress_interval), 1395034SN/A nextProgressMessage(p->progress_interval), 1405034SN/A percentSourceUnaligned(p->percent_source_unaligned), 1415034SN/A percentDestUnaligned(p->percent_dest_unaligned), 1425034SN/A maxLoads(p->max_loads), 1438436SBrad.Beckmann@amd.com atomic(p->atomic), 1448436SBrad.Beckmann@amd.com suppress_func_warnings(p->suppress_func_warnings) 1452SN/A{ 1467544SN/A id = TESTER_ALLOCATOR++; 1477544SN/A 1483187SN/A // Needs to be masked off once we know the block size. 1495034SN/A traceBlockAddr = p->trace_addr; 1502SN/A baseAddr1 = 0x100000; 1512SN/A baseAddr2 = 0x400000; 1522SN/A uncacheAddr = 0x800000; 1532SN/A 1542SN/A // set up counters 1552SN/A noResponseCycles = 0; 1562SN/A numReads = 0; 1578436SBrad.Beckmann@amd.com numWrites = 0; 1585606SN/A schedule(tickEvent, 0); 1591298SN/A 1603187SN/A accessRetry = false; 1617544SN/A dmaOutstanding = false; 1623187SN/A} 1633187SN/A 1643187SN/APort * 1653187SN/AMemTest::getPort(const std::string &if_name, int idx) 1663187SN/A{ 1673187SN/A if (if_name == "functional") 1683187SN/A return &funcPort; 1693187SN/A else if (if_name == "test") 1703187SN/A return &cachePort; 1713187SN/A else 1723187SN/A panic("No Such Port\n"); 1733187SN/A} 1743187SN/A 1753187SN/Avoid 1763187SN/AMemTest::init() 1773187SN/A{ 1783187SN/A // By the time init() is called, the ports should be hooked up. 1793187SN/A blockSize = cachePort.peerBlockSize(); 1803187SN/A blockAddrMask = blockSize - 1; 1813187SN/A traceBlockAddr = blockAddr(traceBlockAddr); 1823187SN/A 1834579SN/A // initial memory contents for both physical memory and functional 1844579SN/A // memory should be 0; no need to initialize them. 1852SN/A} 1862SN/A 1872SN/A 1882SN/Avoid 1893349SN/AMemTest::completeRequest(PacketPtr pkt) 1902SN/A{ 1914628SN/A Request *req = pkt->req; 1924628SN/A 1937544SN/A if (issueDmas) { 1947544SN/A dmaOutstanding = false; 1957544SN/A } 1967544SN/A 1978436SBrad.Beckmann@amd.com DPRINTF(MemTest, "completing %s at address %x (blk %x) %s\n", 1984628SN/A pkt->isWrite() ? "write" : "read", 1998436SBrad.Beckmann@amd.com req->getPaddr(), blockAddr(req->getPaddr()), 2008436SBrad.Beckmann@amd.com pkt->isError() ? "error" : "success"); 2014628SN/A 2023187SN/A MemTestSenderState *state = 2033187SN/A dynamic_cast<MemTestSenderState *>(pkt->senderState); 2043187SN/A 2053187SN/A uint8_t *data = state->data; 2063187SN/A uint8_t *pkt_data = pkt->getPtr<uint8_t>(); 2073187SN/A 2081298SN/A //Remove the address from the list of outstanding 2094628SN/A std::set<unsigned>::iterator removeAddr = 2104628SN/A outstandingAddrs.find(req->getPaddr()); 2111298SN/A assert(removeAddr != outstandingAddrs.end()); 2121298SN/A outstandingAddrs.erase(removeAddr); 2131298SN/A 2148436SBrad.Beckmann@amd.com if (pkt->isError()) { 2158436SBrad.Beckmann@amd.com if (!suppress_func_warnings) { 2168436SBrad.Beckmann@amd.com warn("Functional Access failed for %x at %x\n", 2178436SBrad.Beckmann@amd.com pkt->isWrite() ? "write" : "read", req->getPaddr()); 2182SN/A } 2198436SBrad.Beckmann@amd.com } else { 2208436SBrad.Beckmann@amd.com if (pkt->isRead()) { 2218436SBrad.Beckmann@amd.com if (memcmp(pkt_data, data, pkt->getSize()) != 0) { 2228436SBrad.Beckmann@amd.com panic("%s: read of %x (blk %x) @ cycle %d " 2238436SBrad.Beckmann@amd.com "returns %x, expected %x\n", name(), 2248436SBrad.Beckmann@amd.com req->getPaddr(), blockAddr(req->getPaddr()), curTick(), 2258436SBrad.Beckmann@amd.com *pkt_data, *data); 2268436SBrad.Beckmann@amd.com } 2272SN/A 2288436SBrad.Beckmann@amd.com numReads++; 2298436SBrad.Beckmann@amd.com numReadsStat++; 2302SN/A 2318436SBrad.Beckmann@amd.com if (numReads == (uint64_t)nextProgressMessage) { 2328436SBrad.Beckmann@amd.com ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n", 2338436SBrad.Beckmann@amd.com name(), numReads, numWrites, curTick()); 2348436SBrad.Beckmann@amd.com nextProgressMessage += progressInterval; 2358436SBrad.Beckmann@amd.com } 2368436SBrad.Beckmann@amd.com 2378436SBrad.Beckmann@amd.com if (maxLoads != 0 && numReads >= maxLoads) 2388436SBrad.Beckmann@amd.com exitSimLoop("maximum number of loads reached"); 2398436SBrad.Beckmann@amd.com } else { 2408436SBrad.Beckmann@amd.com assert(pkt->isWrite()); 2418853Sandreas.hansson@arm.com funcProxy.writeBlob(req->getPaddr(), pkt_data, req->getSize()); 2428436SBrad.Beckmann@amd.com numWrites++; 2438436SBrad.Beckmann@amd.com numWritesStat++; 2442SN/A } 2452SN/A } 2462SN/A 2472SN/A noResponseCycles = 0; 2483187SN/A delete state; 2492SN/A delete [] data; 2503187SN/A delete pkt->req; 2513187SN/A delete pkt; 2522SN/A} 2532SN/A 2542SN/Avoid 2552SN/AMemTest::regStats() 2562SN/A{ 257729SN/A using namespace Stats; 2582SN/A 259695SN/A numReadsStat 2602SN/A .name(name() + ".num_reads") 2612SN/A .desc("number of read accesses completed") 2622SN/A ; 2632SN/A 264695SN/A numWritesStat 2652SN/A .name(name() + ".num_writes") 2662SN/A .desc("number of write accesses completed") 2672SN/A ; 2682SN/A 269695SN/A numCopiesStat 2702SN/A .name(name() + ".num_copies") 2712SN/A .desc("number of copy accesses completed") 2722SN/A ; 2732SN/A} 2742SN/A 2752SN/Avoid 2762SN/AMemTest::tick() 2772SN/A{ 2782SN/A if (!tickEvent.scheduled()) 2797823Ssteve.reinhardt@amd.com schedule(tickEvent, curTick() + ticks(1)); 2802SN/A 2811298SN/A if (++noResponseCycles >= 500000) { 2827544SN/A if (issueDmas) { 2837544SN/A cerr << "DMA tester "; 2847544SN/A } 2857823Ssteve.reinhardt@amd.com cerr << name() << ": deadlocked at cycle " << curTick() << endl; 2862SN/A fatal(""); 2872SN/A } 2882SN/A 2897544SN/A if (accessRetry || (issueDmas && dmaOutstanding)) { 2907544SN/A DPRINTF(MemTest, "MemTester waiting on accessRetry or DMA response\n"); 2912SN/A return; 2922SN/A } 2932SN/A 2942SN/A //make new request 2951899SN/A unsigned cmd = random() % 100; 2961899SN/A unsigned offset = random() % size; 2972SN/A unsigned base = random() % 2; 2982SN/A uint64_t data = random(); 2992SN/A unsigned access_size = random() % 4; 3005736SN/A bool uncacheable = (random() % 100) < percentUncacheable; 3012SN/A 3027544SN/A unsigned dma_access_size = random() % 4; 3037544SN/A 3041298SN/A //If we aren't doing copies, use id as offset, and do a false sharing 3051298SN/A //mem tester 3063187SN/A //We can eliminate the lower bits of the offset, and then use the id 3073187SN/A //to offset within the blks 3084628SN/A offset = blockAddr(offset); 3093187SN/A offset += id; 3103187SN/A access_size = 0; 3117544SN/A dma_access_size = 0; 3121298SN/A 3133187SN/A Request *req = new Request(); 3145736SN/A Request::Flags flags; 3153187SN/A Addr paddr; 3162SN/A 3175736SN/A if (uncacheable) { 3185736SN/A flags.set(Request::UNCACHEABLE); 3193187SN/A paddr = uncacheAddr + offset; 3207544SN/A } else { 3213187SN/A paddr = ((base) ? baseAddr1 : baseAddr2) + offset; 3222SN/A } 3237657Ssteve.reinhardt@amd.com bool do_functional = (random() % 100 < percentFunctional) && !uncacheable; 3242SN/A 3257544SN/A if (issueDmas) { 3267544SN/A paddr &= ~((1 << dma_access_size) - 1); 3278832SAli.Saidi@ARM.com req->setPhys(paddr, 1 << dma_access_size, flags, masterId); 3287544SN/A req->setThreadContext(id,0); 3297544SN/A } else { 3307544SN/A paddr &= ~((1 << access_size) - 1); 3318832SAli.Saidi@ARM.com req->setPhys(paddr, 1 << access_size, flags, masterId); 3327544SN/A req->setThreadContext(id,0); 3337544SN/A } 3347544SN/A assert(req->getSize() == 1); 3353187SN/A 3363187SN/A uint8_t *result = new uint8_t[8]; 3372SN/A 3382SN/A if (cmd < percentReads) { 3392SN/A // read 3401298SN/A 3414628SN/A // For now we only allow one outstanding request per address 3424628SN/A // per tester This means we assume CPU does write forwarding 3434628SN/A // to reads that alias something in the cpu store buffer. 3443282SN/A if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) { 3454203SN/A delete [] result; 3463282SN/A delete req; 3473282SN/A return; 3483282SN/A } 3494628SN/A 3504628SN/A outstandingAddrs.insert(paddr); 3511298SN/A 3523187SN/A // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin 3538853Sandreas.hansson@arm.com funcProxy.readBlob(req->getPaddr(), result, req->getSize()); 3543187SN/A 3554628SN/A DPRINTF(MemTest, 3567657Ssteve.reinhardt@amd.com "id %d initiating %sread at addr %x (blk %x) expecting %x\n", 3577657Ssteve.reinhardt@amd.com id, do_functional ? "functional " : "", req->getPaddr(), 3587657Ssteve.reinhardt@amd.com blockAddr(req->getPaddr()), *result); 3593187SN/A 3604022SN/A PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast); 3614660SN/A pkt->setSrc(0); 3623187SN/A pkt->dataDynamicArray(new uint8_t[req->getSize()]); 3633187SN/A MemTestSenderState *state = new MemTestSenderState(result); 3643187SN/A pkt->senderState = state; 3653187SN/A 3667657Ssteve.reinhardt@amd.com if (do_functional) { 3678436SBrad.Beckmann@amd.com assert(pkt->needsResponse()); 3688436SBrad.Beckmann@amd.com pkt->setSuppressFuncError(); 3693187SN/A cachePort.sendFunctional(pkt); 3703204SN/A completeRequest(pkt); 371145SN/A } else { 3723262SN/A sendPkt(pkt); 373145SN/A } 3743187SN/A } else { 3752SN/A // write 3761298SN/A 3774628SN/A // For now we only allow one outstanding request per addreess 3784628SN/A // per tester. This means we assume CPU does write forwarding 3794628SN/A // to reads that alias something in the cpu store buffer. 3803282SN/A if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) { 3813283SN/A delete [] result; 3823282SN/A delete req; 3833282SN/A return; 3843282SN/A } 3853282SN/A 3864628SN/A outstandingAddrs.insert(paddr); 3871298SN/A 3887657Ssteve.reinhardt@amd.com DPRINTF(MemTest, "initiating %swrite at addr %x (blk %x) value %x\n", 3897657Ssteve.reinhardt@amd.com do_functional ? "functional " : "", req->getPaddr(), 3907657Ssteve.reinhardt@amd.com blockAddr(req->getPaddr()), data & 0xff); 3914628SN/A 3924022SN/A PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast); 3934660SN/A pkt->setSrc(0); 3943187SN/A uint8_t *pkt_data = new uint8_t[req->getSize()]; 3953187SN/A pkt->dataDynamicArray(pkt_data); 3963187SN/A memcpy(pkt_data, &data, req->getSize()); 3973187SN/A MemTestSenderState *state = new MemTestSenderState(result); 3983187SN/A pkt->senderState = state; 3993187SN/A 4007657Ssteve.reinhardt@amd.com if (do_functional) { 4018436SBrad.Beckmann@amd.com pkt->setSuppressFuncError(); 4023187SN/A cachePort.sendFunctional(pkt); 4033262SN/A completeRequest(pkt); 404145SN/A } else { 4053262SN/A sendPkt(pkt); 406145SN/A } 4073187SN/A } 4082SN/A} 4092SN/A 4102SN/Avoid 4113187SN/AMemTest::doRetry() 4122SN/A{ 4133187SN/A if (cachePort.sendTiming(retryPkt)) { 4147544SN/A DPRINTF(MemTest, "accessRetry setting to false\n"); 4153187SN/A accessRetry = false; 4163187SN/A retryPkt = NULL; 4173187SN/A } 4182SN/A} 4192SN/A 4205314SN/A 4215314SN/Avoid 4225314SN/AMemTest::printAddr(Addr a) 4235314SN/A{ 4245314SN/A cachePort.printAddr(a); 4255314SN/A} 4265315SN/A 4275315SN/A 4285315SN/AMemTest * 4295315SN/AMemTestParams::create() 4305315SN/A{ 4315315SN/A return new MemTest(this); 4325315SN/A} 433