memtest.cc revision 8832:247fee427324
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Steve Reinhardt
30 */
31
32// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34#include <iomanip>
35#include <set>
36#include <string>
37#include <vector>
38
39#include "base/misc.hh"
40#include "base/statistics.hh"
41#include "cpu/testers/memtest/memtest.hh"
42#include "debug/MemTest.hh"
43#include "mem/mem_object.hh"
44#include "mem/packet.hh"
45#include "mem/port.hh"
46#include "mem/request.hh"
47#include "sim/sim_events.hh"
48#include "sim/stats.hh"
49#include "sim/system.hh"
50
51using namespace std;
52
53int TESTER_ALLOCATOR=0;
54
55bool
56MemTest::CpuPort::recvTiming(PacketPtr pkt)
57{
58    if (pkt->isResponse()) {
59        memtest->completeRequest(pkt);
60    } else {
61        // must be snoop upcall
62        assert(pkt->isRequest());
63        assert(pkt->getDest() == Packet::Broadcast);
64    }
65    return true;
66}
67
68Tick
69MemTest::CpuPort::recvAtomic(PacketPtr pkt)
70{
71    // must be snoop upcall
72    assert(pkt->isRequest());
73    assert(pkt->getDest() == Packet::Broadcast);
74    return curTick();
75}
76
77void
78MemTest::CpuPort::recvFunctional(PacketPtr pkt)
79{
80    //Do nothing if we see one come through
81//    if (curTick() != 0)//Supress warning durring initialization
82//        warn("Functional Writes not implemented in MemTester\n");
83    //Need to find any response values that intersect and update
84    return;
85}
86
87void
88MemTest::CpuPort::recvRangeChange()
89{
90}
91
92void
93MemTest::CpuPort::recvRetry()
94{
95    memtest->doRetry();
96}
97
98void
99MemTest::sendPkt(PacketPtr pkt) {
100    if (atomic) {
101        cachePort.sendAtomic(pkt);
102        completeRequest(pkt);
103    }
104    else if (!cachePort.sendTiming(pkt)) {
105        DPRINTF(MemTest, "accessRetry setting to true\n");
106
107        //
108        // dma requests should never be retried
109        //
110        if (issueDmas) {
111            panic("Nacked DMA requests are not supported\n");
112        }
113        accessRetry = true;
114        retryPkt = pkt;
115    } else {
116        if (issueDmas) {
117            dmaOutstanding = true;
118        }
119    }
120
121}
122
123MemTest::MemTest(const Params *p)
124    : MemObject(p),
125      tickEvent(this),
126      cachePort("test", this),
127      funcPort("functional", this),
128      retryPkt(NULL),
129//      mainMem(main_mem),
130//      checkMem(check_mem),
131      size(p->memory_size),
132      percentReads(p->percent_reads),
133      percentFunctional(p->percent_functional),
134      percentUncacheable(p->percent_uncacheable),
135      issueDmas(p->issue_dmas),
136      masterId(p->sys->getMasterId(name())),
137      progressInterval(p->progress_interval),
138      nextProgressMessage(p->progress_interval),
139      percentSourceUnaligned(p->percent_source_unaligned),
140      percentDestUnaligned(p->percent_dest_unaligned),
141      maxLoads(p->max_loads),
142      atomic(p->atomic),
143      suppress_func_warnings(p->suppress_func_warnings)
144{
145    id = TESTER_ALLOCATOR++;
146
147    // Needs to be masked off once we know the block size.
148    traceBlockAddr = p->trace_addr;
149    baseAddr1 = 0x100000;
150    baseAddr2 = 0x400000;
151    uncacheAddr = 0x800000;
152
153    // set up counters
154    noResponseCycles = 0;
155    numReads = 0;
156    numWrites = 0;
157    schedule(tickEvent, 0);
158
159    accessRetry = false;
160    dmaOutstanding = false;
161}
162
163Port *
164MemTest::getPort(const std::string &if_name, int idx)
165{
166    if (if_name == "functional")
167        return &funcPort;
168    else if (if_name == "test")
169        return &cachePort;
170    else
171        panic("No Such Port\n");
172}
173
174void
175MemTest::init()
176{
177    // By the time init() is called, the ports should be hooked up.
178    blockSize = cachePort.peerBlockSize();
179    blockAddrMask = blockSize - 1;
180    traceBlockAddr = blockAddr(traceBlockAddr);
181
182    // initial memory contents for both physical memory and functional
183    // memory should be 0; no need to initialize them.
184}
185
186
187void
188MemTest::completeRequest(PacketPtr pkt)
189{
190    Request *req = pkt->req;
191
192    if (issueDmas) {
193        dmaOutstanding = false;
194    }
195
196    DPRINTF(MemTest, "completing %s at address %x (blk %x) %s\n",
197            pkt->isWrite() ? "write" : "read",
198            req->getPaddr(), blockAddr(req->getPaddr()),
199            pkt->isError() ? "error" : "success");
200
201    MemTestSenderState *state =
202        dynamic_cast<MemTestSenderState *>(pkt->senderState);
203
204    uint8_t *data = state->data;
205    uint8_t *pkt_data = pkt->getPtr<uint8_t>();
206
207    //Remove the address from the list of outstanding
208    std::set<unsigned>::iterator removeAddr =
209        outstandingAddrs.find(req->getPaddr());
210    assert(removeAddr != outstandingAddrs.end());
211    outstandingAddrs.erase(removeAddr);
212
213    if (pkt->isError()) {
214        if (!suppress_func_warnings) {
215          warn("Functional Access failed for %x at %x\n",
216               pkt->isWrite() ? "write" : "read", req->getPaddr());
217        }
218    } else {
219        if (pkt->isRead()) {
220            if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
221                panic("%s: read of %x (blk %x) @ cycle %d "
222                      "returns %x, expected %x\n", name(),
223                      req->getPaddr(), blockAddr(req->getPaddr()), curTick(),
224                      *pkt_data, *data);
225            }
226
227            numReads++;
228            numReadsStat++;
229
230            if (numReads == (uint64_t)nextProgressMessage) {
231                ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n",
232                         name(), numReads, numWrites, curTick());
233                nextProgressMessage += progressInterval;
234            }
235
236            if (maxLoads != 0 && numReads >= maxLoads)
237                exitSimLoop("maximum number of loads reached");
238        } else {
239            assert(pkt->isWrite());
240            funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
241            numWrites++;
242            numWritesStat++;
243        }
244    }
245
246    noResponseCycles = 0;
247    delete state;
248    delete [] data;
249    delete pkt->req;
250    delete pkt;
251}
252
253void
254MemTest::regStats()
255{
256    using namespace Stats;
257
258    numReadsStat
259        .name(name() + ".num_reads")
260        .desc("number of read accesses completed")
261        ;
262
263    numWritesStat
264        .name(name() + ".num_writes")
265        .desc("number of write accesses completed")
266        ;
267
268    numCopiesStat
269        .name(name() + ".num_copies")
270        .desc("number of copy accesses completed")
271        ;
272}
273
274void
275MemTest::tick()
276{
277    if (!tickEvent.scheduled())
278        schedule(tickEvent, curTick() + ticks(1));
279
280    if (++noResponseCycles >= 500000) {
281        if (issueDmas) {
282            cerr << "DMA tester ";
283        }
284        cerr << name() << ": deadlocked at cycle " << curTick() << endl;
285        fatal("");
286    }
287
288    if (accessRetry || (issueDmas && dmaOutstanding)) {
289        DPRINTF(MemTest, "MemTester waiting on accessRetry or DMA response\n");
290        return;
291    }
292
293    //make new request
294    unsigned cmd = random() % 100;
295    unsigned offset = random() % size;
296    unsigned base = random() % 2;
297    uint64_t data = random();
298    unsigned access_size = random() % 4;
299    bool uncacheable = (random() % 100) < percentUncacheable;
300
301    unsigned dma_access_size = random() % 4;
302
303    //If we aren't doing copies, use id as offset, and do a false sharing
304    //mem tester
305    //We can eliminate the lower bits of the offset, and then use the id
306    //to offset within the blks
307    offset = blockAddr(offset);
308    offset += id;
309    access_size = 0;
310    dma_access_size = 0;
311
312    Request *req = new Request();
313    Request::Flags flags;
314    Addr paddr;
315
316    if (uncacheable) {
317        flags.set(Request::UNCACHEABLE);
318        paddr = uncacheAddr + offset;
319    } else  {
320        paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
321    }
322    bool do_functional = (random() % 100 < percentFunctional) && !uncacheable;
323
324    if (issueDmas) {
325        paddr &= ~((1 << dma_access_size) - 1);
326        req->setPhys(paddr, 1 << dma_access_size, flags, masterId);
327        req->setThreadContext(id,0);
328    } else {
329        paddr &= ~((1 << access_size) - 1);
330        req->setPhys(paddr, 1 << access_size, flags, masterId);
331        req->setThreadContext(id,0);
332    }
333    assert(req->getSize() == 1);
334
335    uint8_t *result = new uint8_t[8];
336
337    if (cmd < percentReads) {
338        // read
339
340        // For now we only allow one outstanding request per address
341        // per tester This means we assume CPU does write forwarding
342        // to reads that alias something in the cpu store buffer.
343        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
344            delete [] result;
345            delete req;
346            return;
347        }
348
349        outstandingAddrs.insert(paddr);
350
351        // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
352        funcPort.readBlob(req->getPaddr(), result, req->getSize());
353
354        DPRINTF(MemTest,
355                "id %d initiating %sread at addr %x (blk %x) expecting %x\n",
356                id, do_functional ? "functional " : "", req->getPaddr(),
357                blockAddr(req->getPaddr()), *result);
358
359        PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
360        pkt->setSrc(0);
361        pkt->dataDynamicArray(new uint8_t[req->getSize()]);
362        MemTestSenderState *state = new MemTestSenderState(result);
363        pkt->senderState = state;
364
365        if (do_functional) {
366            assert(pkt->needsResponse());
367            pkt->setSuppressFuncError();
368            cachePort.sendFunctional(pkt);
369            completeRequest(pkt);
370        } else {
371            sendPkt(pkt);
372        }
373    } else {
374        // write
375
376        // For now we only allow one outstanding request per addreess
377        // per tester.  This means we assume CPU does write forwarding
378        // to reads that alias something in the cpu store buffer.
379        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
380            delete [] result;
381            delete req;
382            return;
383        }
384
385        outstandingAddrs.insert(paddr);
386
387        DPRINTF(MemTest, "initiating %swrite at addr %x (blk %x) value %x\n",
388                do_functional ? "functional " : "", req->getPaddr(),
389                blockAddr(req->getPaddr()), data & 0xff);
390
391        PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
392        pkt->setSrc(0);
393        uint8_t *pkt_data = new uint8_t[req->getSize()];
394        pkt->dataDynamicArray(pkt_data);
395        memcpy(pkt_data, &data, req->getSize());
396        MemTestSenderState *state = new MemTestSenderState(result);
397        pkt->senderState = state;
398
399        if (do_functional) {
400            pkt->setSuppressFuncError();
401            cachePort.sendFunctional(pkt);
402            completeRequest(pkt);
403        } else {
404            sendPkt(pkt);
405        }
406    }
407}
408
409void
410MemTest::doRetry()
411{
412    if (cachePort.sendTiming(retryPkt)) {
413        DPRINTF(MemTest, "accessRetry setting to false\n");
414        accessRetry = false;
415        retryPkt = NULL;
416    }
417}
418
419
420void
421MemTest::printAddr(Addr a)
422{
423    cachePort.printAddr(a);
424}
425
426
427MemTest *
428MemTestParams::create()
429{
430    return new MemTest(this);
431}
432