memtest.cc revision 5606
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Steve Reinhardt
30 */
31
32// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34#include <iomanip>
35#include <set>
36#include <string>
37#include <vector>
38
39#include "base/misc.hh"
40#include "base/statistics.hh"
41#include "cpu/memtest/memtest.hh"
42#include "mem/mem_object.hh"
43#include "mem/port.hh"
44#include "mem/packet.hh"
45#include "mem/request.hh"
46#include "sim/sim_events.hh"
47#include "sim/stats.hh"
48
49using namespace std;
50
51int TESTER_ALLOCATOR=0;
52
53bool
54MemTest::CpuPort::recvTiming(PacketPtr pkt)
55{
56    if (pkt->isResponse()) {
57        memtest->completeRequest(pkt);
58    } else {
59        // must be snoop upcall
60        assert(pkt->isRequest());
61        assert(pkt->getDest() == Packet::Broadcast);
62    }
63    return true;
64}
65
66Tick
67MemTest::CpuPort::recvAtomic(PacketPtr pkt)
68{
69    // must be snoop upcall
70    assert(pkt->isRequest());
71    assert(pkt->getDest() == Packet::Broadcast);
72    return curTick;
73}
74
75void
76MemTest::CpuPort::recvFunctional(PacketPtr pkt)
77{
78    //Do nothing if we see one come through
79//    if (curTick != 0)//Supress warning durring initialization
80//        warn("Functional Writes not implemented in MemTester\n");
81    //Need to find any response values that intersect and update
82    return;
83}
84
85void
86MemTest::CpuPort::recvStatusChange(Status status)
87{
88    if (status == RangeChange) {
89        if (!snoopRangeSent) {
90            snoopRangeSent = true;
91            sendStatusChange(Port::RangeChange);
92        }
93        return;
94    }
95
96    panic("MemTest doesn't expect recvStatusChange callback!");
97}
98
99void
100MemTest::CpuPort::recvRetry()
101{
102    memtest->doRetry();
103}
104
105void
106MemTest::sendPkt(PacketPtr pkt) {
107    if (atomic) {
108        cachePort.sendAtomic(pkt);
109        completeRequest(pkt);
110    }
111    else if (!cachePort.sendTiming(pkt)) {
112        accessRetry = true;
113        retryPkt = pkt;
114    }
115
116}
117
118MemTest::MemTest(const Params *p)
119    : MemObject(p),
120      tickEvent(this),
121      cachePort("test", this),
122      funcPort("functional", this),
123      retryPkt(NULL),
124//      mainMem(main_mem),
125//      checkMem(check_mem),
126      size(p->memory_size),
127      percentReads(p->percent_reads),
128      percentFunctional(p->percent_functional),
129      percentUncacheable(p->percent_uncacheable),
130      progressInterval(p->progress_interval),
131      nextProgressMessage(p->progress_interval),
132      percentSourceUnaligned(p->percent_source_unaligned),
133      percentDestUnaligned(p->percent_dest_unaligned),
134      maxLoads(p->max_loads),
135      atomic(p->atomic)
136{
137    vector<string> cmd;
138    cmd.push_back("/bin/ls");
139    vector<string> null_vec;
140    //  thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
141    curTick = 0;
142
143    cachePort.snoopRangeSent = false;
144    funcPort.snoopRangeSent = true;
145
146    // Needs to be masked off once we know the block size.
147    traceBlockAddr = p->trace_addr;
148    baseAddr1 = 0x100000;
149    baseAddr2 = 0x400000;
150    uncacheAddr = 0x800000;
151
152    // set up counters
153    noResponseCycles = 0;
154    numReads = 0;
155    schedule(tickEvent, 0);
156
157    id = TESTER_ALLOCATOR++;
158
159    accessRetry = false;
160}
161
162Port *
163MemTest::getPort(const std::string &if_name, int idx)
164{
165    if (if_name == "functional")
166        return &funcPort;
167    else if (if_name == "test")
168        return &cachePort;
169    else
170        panic("No Such Port\n");
171}
172
173void
174MemTest::init()
175{
176    // By the time init() is called, the ports should be hooked up.
177    blockSize = cachePort.peerBlockSize();
178    blockAddrMask = blockSize - 1;
179    traceBlockAddr = blockAddr(traceBlockAddr);
180
181    // initial memory contents for both physical memory and functional
182    // memory should be 0; no need to initialize them.
183}
184
185
186void
187MemTest::completeRequest(PacketPtr pkt)
188{
189    Request *req = pkt->req;
190
191    DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
192            pkt->isWrite() ? "write" : "read",
193            req->getPaddr(), blockAddr(req->getPaddr()));
194
195    MemTestSenderState *state =
196        dynamic_cast<MemTestSenderState *>(pkt->senderState);
197
198    uint8_t *data = state->data;
199    uint8_t *pkt_data = pkt->getPtr<uint8_t>();
200
201    //Remove the address from the list of outstanding
202    std::set<unsigned>::iterator removeAddr =
203        outstandingAddrs.find(req->getPaddr());
204    assert(removeAddr != outstandingAddrs.end());
205    outstandingAddrs.erase(removeAddr);
206
207    assert(pkt->isResponse());
208
209    if (pkt->isRead()) {
210        if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
211            panic("%s: read of %x (blk %x) @ cycle %d "
212                  "returns %x, expected %x\n", name(),
213                  req->getPaddr(), blockAddr(req->getPaddr()), curTick,
214                  *pkt_data, *data);
215        }
216
217        numReads++;
218        numReadsStat++;
219
220        if (numReads == nextProgressMessage) {
221            ccprintf(cerr, "%s: completed %d read accesses @%d\n",
222                     name(), numReads, curTick);
223            nextProgressMessage += progressInterval;
224        }
225
226        if (maxLoads != 0 && numReads >= maxLoads)
227            exitSimLoop("maximum number of loads reached");
228    } else {
229        assert(pkt->isWrite());
230        numWritesStat++;
231    }
232
233    noResponseCycles = 0;
234    delete state;
235    delete [] data;
236    delete pkt->req;
237    delete pkt;
238}
239
240void
241MemTest::regStats()
242{
243    using namespace Stats;
244
245    numReadsStat
246        .name(name() + ".num_reads")
247        .desc("number of read accesses completed")
248        ;
249
250    numWritesStat
251        .name(name() + ".num_writes")
252        .desc("number of write accesses completed")
253        ;
254
255    numCopiesStat
256        .name(name() + ".num_copies")
257        .desc("number of copy accesses completed")
258        ;
259}
260
261void
262MemTest::tick()
263{
264    if (!tickEvent.scheduled())
265        schedule(tickEvent, curTick + ticks(1));
266
267    if (++noResponseCycles >= 500000) {
268        cerr << name() << ": deadlocked at cycle " << curTick << endl;
269        fatal("");
270    }
271
272    if (accessRetry) {
273        return;
274    }
275
276    //make new request
277    unsigned cmd = random() % 100;
278    unsigned offset = random() % size;
279    unsigned base = random() % 2;
280    uint64_t data = random();
281    unsigned access_size = random() % 4;
282    unsigned cacheable = random() % 100;
283
284    //If we aren't doing copies, use id as offset, and do a false sharing
285    //mem tester
286    //We can eliminate the lower bits of the offset, and then use the id
287    //to offset within the blks
288    offset = blockAddr(offset);
289    offset += id;
290    access_size = 0;
291
292    Request *req = new Request();
293    uint32_t flags = 0;
294    Addr paddr;
295
296    if (cacheable < percentUncacheable) {
297        flags |= UNCACHEABLE;
298        paddr = uncacheAddr + offset;
299    } else {
300        paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
301    }
302    bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
303    //bool probe = false;
304
305    paddr &= ~((1 << access_size) - 1);
306    req->setPhys(paddr, 1 << access_size, flags);
307    req->setThreadContext(id,0);
308
309    uint8_t *result = new uint8_t[8];
310
311    if (cmd < percentReads) {
312        // read
313
314        // For now we only allow one outstanding request per address
315        // per tester This means we assume CPU does write forwarding
316        // to reads that alias something in the cpu store buffer.
317        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
318            delete [] result;
319            delete req;
320            return;
321        }
322
323        outstandingAddrs.insert(paddr);
324
325        // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
326        funcPort.readBlob(req->getPaddr(), result, req->getSize());
327
328        DPRINTF(MemTest,
329                "initiating read at address %x (blk %x) expecting %x\n",
330                req->getPaddr(), blockAddr(req->getPaddr()), *result);
331
332        PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
333        pkt->setSrc(0);
334        pkt->dataDynamicArray(new uint8_t[req->getSize()]);
335        MemTestSenderState *state = new MemTestSenderState(result);
336        pkt->senderState = state;
337
338        if (probe) {
339            cachePort.sendFunctional(pkt);
340            completeRequest(pkt);
341        } else {
342            sendPkt(pkt);
343        }
344    } else {
345        // write
346
347        // For now we only allow one outstanding request per addreess
348        // per tester.  This means we assume CPU does write forwarding
349        // to reads that alias something in the cpu store buffer.
350        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
351            delete [] result;
352            delete req;
353            return;
354        }
355
356        outstandingAddrs.insert(paddr);
357
358        DPRINTF(MemTest, "initiating write at address %x (blk %x) value %x\n",
359                req->getPaddr(), blockAddr(req->getPaddr()), data & 0xff);
360
361        PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
362        pkt->setSrc(0);
363        uint8_t *pkt_data = new uint8_t[req->getSize()];
364        pkt->dataDynamicArray(pkt_data);
365        memcpy(pkt_data, &data, req->getSize());
366        MemTestSenderState *state = new MemTestSenderState(result);
367        pkt->senderState = state;
368
369        funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
370
371        if (probe) {
372            cachePort.sendFunctional(pkt);
373            completeRequest(pkt);
374        } else {
375            sendPkt(pkt);
376        }
377    }
378}
379
380void
381MemTest::doRetry()
382{
383    if (cachePort.sendTiming(retryPkt)) {
384        accessRetry = false;
385        retryPkt = NULL;
386    }
387}
388
389
390void
391MemTest::printAddr(Addr a)
392{
393    cachePort.printAddr(a);
394}
395
396
397MemTest *
398MemTestParams::create()
399{
400    return new MemTest(this);
401}
402