memtest.cc revision 8853:0216ed80991b
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Steve Reinhardt
30 */
31
32// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34#include <iomanip>
35#include <set>
36#include <string>
37#include <vector>
38
39#include "base/misc.hh"
40#include "base/statistics.hh"
41#include "cpu/testers/memtest/memtest.hh"
42#include "debug/MemTest.hh"
43#include "mem/mem_object.hh"
44#include "mem/packet.hh"
45#include "mem/port.hh"
46#include "mem/request.hh"
47#include "sim/sim_events.hh"
48#include "sim/stats.hh"
49#include "sim/system.hh"
50
51using namespace std;
52
53int TESTER_ALLOCATOR=0;
54
55bool
56MemTest::CpuPort::recvTiming(PacketPtr pkt)
57{
58    if (pkt->isResponse()) {
59        memtest->completeRequest(pkt);
60    } else {
61        // must be snoop upcall
62        assert(pkt->isRequest());
63        assert(pkt->getDest() == Packet::Broadcast);
64    }
65    return true;
66}
67
68Tick
69MemTest::CpuPort::recvAtomic(PacketPtr pkt)
70{
71    // must be snoop upcall
72    assert(pkt->isRequest());
73    assert(pkt->getDest() == Packet::Broadcast);
74    return curTick();
75}
76
77void
78MemTest::CpuPort::recvFunctional(PacketPtr pkt)
79{
80    //Do nothing if we see one come through
81//    if (curTick() != 0)//Supress warning durring initialization
82//        warn("Functional Writes not implemented in MemTester\n");
83    //Need to find any response values that intersect and update
84    return;
85}
86
87void
88MemTest::CpuPort::recvRangeChange()
89{
90}
91
92void
93MemTest::CpuPort::recvRetry()
94{
95    memtest->doRetry();
96}
97
98void
99MemTest::sendPkt(PacketPtr pkt) {
100    if (atomic) {
101        cachePort.sendAtomic(pkt);
102        completeRequest(pkt);
103    }
104    else if (!cachePort.sendTiming(pkt)) {
105        DPRINTF(MemTest, "accessRetry setting to true\n");
106
107        //
108        // dma requests should never be retried
109        //
110        if (issueDmas) {
111            panic("Nacked DMA requests are not supported\n");
112        }
113        accessRetry = true;
114        retryPkt = pkt;
115    } else {
116        if (issueDmas) {
117            dmaOutstanding = true;
118        }
119    }
120
121}
122
123MemTest::MemTest(const Params *p)
124    : MemObject(p),
125      tickEvent(this),
126      cachePort("test", this),
127      funcPort("functional", this),
128      funcProxy(funcPort),
129      retryPkt(NULL),
130//      mainMem(main_mem),
131//      checkMem(check_mem),
132      size(p->memory_size),
133      percentReads(p->percent_reads),
134      percentFunctional(p->percent_functional),
135      percentUncacheable(p->percent_uncacheable),
136      issueDmas(p->issue_dmas),
137      masterId(p->sys->getMasterId(name())),
138      progressInterval(p->progress_interval),
139      nextProgressMessage(p->progress_interval),
140      percentSourceUnaligned(p->percent_source_unaligned),
141      percentDestUnaligned(p->percent_dest_unaligned),
142      maxLoads(p->max_loads),
143      atomic(p->atomic),
144      suppress_func_warnings(p->suppress_func_warnings)
145{
146    id = TESTER_ALLOCATOR++;
147
148    // Needs to be masked off once we know the block size.
149    traceBlockAddr = p->trace_addr;
150    baseAddr1 = 0x100000;
151    baseAddr2 = 0x400000;
152    uncacheAddr = 0x800000;
153
154    // set up counters
155    noResponseCycles = 0;
156    numReads = 0;
157    numWrites = 0;
158    schedule(tickEvent, 0);
159
160    accessRetry = false;
161    dmaOutstanding = false;
162}
163
164Port *
165MemTest::getPort(const std::string &if_name, int idx)
166{
167    if (if_name == "functional")
168        return &funcPort;
169    else if (if_name == "test")
170        return &cachePort;
171    else
172        panic("No Such Port\n");
173}
174
175void
176MemTest::init()
177{
178    // By the time init() is called, the ports should be hooked up.
179    blockSize = cachePort.peerBlockSize();
180    blockAddrMask = blockSize - 1;
181    traceBlockAddr = blockAddr(traceBlockAddr);
182
183    // initial memory contents for both physical memory and functional
184    // memory should be 0; no need to initialize them.
185}
186
187
188void
189MemTest::completeRequest(PacketPtr pkt)
190{
191    Request *req = pkt->req;
192
193    if (issueDmas) {
194        dmaOutstanding = false;
195    }
196
197    DPRINTF(MemTest, "completing %s at address %x (blk %x) %s\n",
198            pkt->isWrite() ? "write" : "read",
199            req->getPaddr(), blockAddr(req->getPaddr()),
200            pkt->isError() ? "error" : "success");
201
202    MemTestSenderState *state =
203        dynamic_cast<MemTestSenderState *>(pkt->senderState);
204
205    uint8_t *data = state->data;
206    uint8_t *pkt_data = pkt->getPtr<uint8_t>();
207
208    //Remove the address from the list of outstanding
209    std::set<unsigned>::iterator removeAddr =
210        outstandingAddrs.find(req->getPaddr());
211    assert(removeAddr != outstandingAddrs.end());
212    outstandingAddrs.erase(removeAddr);
213
214    if (pkt->isError()) {
215        if (!suppress_func_warnings) {
216          warn("Functional Access failed for %x at %x\n",
217               pkt->isWrite() ? "write" : "read", req->getPaddr());
218        }
219    } else {
220        if (pkt->isRead()) {
221            if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
222                panic("%s: read of %x (blk %x) @ cycle %d "
223                      "returns %x, expected %x\n", name(),
224                      req->getPaddr(), blockAddr(req->getPaddr()), curTick(),
225                      *pkt_data, *data);
226            }
227
228            numReads++;
229            numReadsStat++;
230
231            if (numReads == (uint64_t)nextProgressMessage) {
232                ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n",
233                         name(), numReads, numWrites, curTick());
234                nextProgressMessage += progressInterval;
235            }
236
237            if (maxLoads != 0 && numReads >= maxLoads)
238                exitSimLoop("maximum number of loads reached");
239        } else {
240            assert(pkt->isWrite());
241            funcProxy.writeBlob(req->getPaddr(), pkt_data, req->getSize());
242            numWrites++;
243            numWritesStat++;
244        }
245    }
246
247    noResponseCycles = 0;
248    delete state;
249    delete [] data;
250    delete pkt->req;
251    delete pkt;
252}
253
254void
255MemTest::regStats()
256{
257    using namespace Stats;
258
259    numReadsStat
260        .name(name() + ".num_reads")
261        .desc("number of read accesses completed")
262        ;
263
264    numWritesStat
265        .name(name() + ".num_writes")
266        .desc("number of write accesses completed")
267        ;
268
269    numCopiesStat
270        .name(name() + ".num_copies")
271        .desc("number of copy accesses completed")
272        ;
273}
274
275void
276MemTest::tick()
277{
278    if (!tickEvent.scheduled())
279        schedule(tickEvent, curTick() + ticks(1));
280
281    if (++noResponseCycles >= 500000) {
282        if (issueDmas) {
283            cerr << "DMA tester ";
284        }
285        cerr << name() << ": deadlocked at cycle " << curTick() << endl;
286        fatal("");
287    }
288
289    if (accessRetry || (issueDmas && dmaOutstanding)) {
290        DPRINTF(MemTest, "MemTester waiting on accessRetry or DMA response\n");
291        return;
292    }
293
294    //make new request
295    unsigned cmd = random() % 100;
296    unsigned offset = random() % size;
297    unsigned base = random() % 2;
298    uint64_t data = random();
299    unsigned access_size = random() % 4;
300    bool uncacheable = (random() % 100) < percentUncacheable;
301
302    unsigned dma_access_size = random() % 4;
303
304    //If we aren't doing copies, use id as offset, and do a false sharing
305    //mem tester
306    //We can eliminate the lower bits of the offset, and then use the id
307    //to offset within the blks
308    offset = blockAddr(offset);
309    offset += id;
310    access_size = 0;
311    dma_access_size = 0;
312
313    Request *req = new Request();
314    Request::Flags flags;
315    Addr paddr;
316
317    if (uncacheable) {
318        flags.set(Request::UNCACHEABLE);
319        paddr = uncacheAddr + offset;
320    } else  {
321        paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
322    }
323    bool do_functional = (random() % 100 < percentFunctional) && !uncacheable;
324
325    if (issueDmas) {
326        paddr &= ~((1 << dma_access_size) - 1);
327        req->setPhys(paddr, 1 << dma_access_size, flags, masterId);
328        req->setThreadContext(id,0);
329    } else {
330        paddr &= ~((1 << access_size) - 1);
331        req->setPhys(paddr, 1 << access_size, flags, masterId);
332        req->setThreadContext(id,0);
333    }
334    assert(req->getSize() == 1);
335
336    uint8_t *result = new uint8_t[8];
337
338    if (cmd < percentReads) {
339        // read
340
341        // For now we only allow one outstanding request per address
342        // per tester This means we assume CPU does write forwarding
343        // to reads that alias something in the cpu store buffer.
344        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
345            delete [] result;
346            delete req;
347            return;
348        }
349
350        outstandingAddrs.insert(paddr);
351
352        // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
353        funcProxy.readBlob(req->getPaddr(), result, req->getSize());
354
355        DPRINTF(MemTest,
356                "id %d initiating %sread at addr %x (blk %x) expecting %x\n",
357                id, do_functional ? "functional " : "", req->getPaddr(),
358                blockAddr(req->getPaddr()), *result);
359
360        PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
361        pkt->setSrc(0);
362        pkt->dataDynamicArray(new uint8_t[req->getSize()]);
363        MemTestSenderState *state = new MemTestSenderState(result);
364        pkt->senderState = state;
365
366        if (do_functional) {
367            assert(pkt->needsResponse());
368            pkt->setSuppressFuncError();
369            cachePort.sendFunctional(pkt);
370            completeRequest(pkt);
371        } else {
372            sendPkt(pkt);
373        }
374    } else {
375        // write
376
377        // For now we only allow one outstanding request per addreess
378        // per tester.  This means we assume CPU does write forwarding
379        // to reads that alias something in the cpu store buffer.
380        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
381            delete [] result;
382            delete req;
383            return;
384        }
385
386        outstandingAddrs.insert(paddr);
387
388        DPRINTF(MemTest, "initiating %swrite at addr %x (blk %x) value %x\n",
389                do_functional ? "functional " : "", req->getPaddr(),
390                blockAddr(req->getPaddr()), data & 0xff);
391
392        PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
393        pkt->setSrc(0);
394        uint8_t *pkt_data = new uint8_t[req->getSize()];
395        pkt->dataDynamicArray(pkt_data);
396        memcpy(pkt_data, &data, req->getSize());
397        MemTestSenderState *state = new MemTestSenderState(result);
398        pkt->senderState = state;
399
400        if (do_functional) {
401            pkt->setSuppressFuncError();
402            cachePort.sendFunctional(pkt);
403            completeRequest(pkt);
404        } else {
405            sendPkt(pkt);
406        }
407    }
408}
409
410void
411MemTest::doRetry()
412{
413    if (cachePort.sendTiming(retryPkt)) {
414        DPRINTF(MemTest, "accessRetry setting to false\n");
415        accessRetry = false;
416        retryPkt = NULL;
417    }
418}
419
420
421void
422MemTest::printAddr(Addr a)
423{
424    cachePort.printAddr(a);
425}
426
427
428MemTest *
429MemTestParams::create()
430{
431    return new MemTest(this);
432}
433