memtest.cc revision 4895
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Steve Reinhardt
30 */
31
32// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34#include <iomanip>
35#include <set>
36#include <string>
37#include <vector>
38
39#include "base/misc.hh"
40#include "base/statistics.hh"
41#include "cpu/memtest/memtest.hh"
42//#include "cpu/simple_thread.hh"
43//#include "mem/cache/base_cache.hh"
44#include "mem/mem_object.hh"
45#include "mem/port.hh"
46#include "mem/packet.hh"
47//#include "mem/physical.hh"
48#include "mem/request.hh"
49#include "sim/builder.hh"
50#include "sim/sim_events.hh"
51#include "sim/stats.hh"
52
53using namespace std;
54
55int TESTER_ALLOCATOR=0;
56
57bool
58MemTest::CpuPort::recvTiming(PacketPtr pkt)
59{
60    memtest->completeRequest(pkt);
61    return true;
62}
63
64Tick
65MemTest::CpuPort::recvAtomic(PacketPtr pkt)
66{
67    // must be snoop upcall
68    assert(pkt->isRequest());
69    assert(pkt->getDest() == Packet::Broadcast);
70    return curTick;
71}
72
73void
74MemTest::CpuPort::recvFunctional(PacketPtr pkt)
75{
76    //Do nothing if we see one come through
77//    if (curTick != 0)//Supress warning durring initialization
78//        warn("Functional Writes not implemented in MemTester\n");
79    //Need to find any response values that intersect and update
80    return;
81}
82
83void
84MemTest::CpuPort::recvStatusChange(Status status)
85{
86    if (status == RangeChange) {
87        if (!snoopRangeSent) {
88            snoopRangeSent = true;
89            sendStatusChange(Port::RangeChange);
90        }
91        return;
92    }
93
94    panic("MemTest doesn't expect recvStatusChange callback!");
95}
96
97void
98MemTest::CpuPort::recvRetry()
99{
100    memtest->doRetry();
101}
102
103void
104MemTest::sendPkt(PacketPtr pkt) {
105    if (atomic) {
106        cachePort.sendAtomic(pkt);
107        completeRequest(pkt);
108    }
109    else if (!cachePort.sendTiming(pkt)) {
110        accessRetry = true;
111        retryPkt = pkt;
112    }
113
114}
115
116MemTest::MemTest(const string &name,
117//		 MemInterface *_cache_interface,
118//		 PhysicalMemory *main_mem,
119//		 PhysicalMemory *check_mem,
120                 unsigned _memorySize,
121                 unsigned _percentReads,
122                 unsigned _percentFunctional,
123                 unsigned _percentUncacheable,
124                 unsigned _progressInterval,
125                 unsigned _percentSourceUnaligned,
126                 unsigned _percentDestUnaligned,
127                 Addr _traceAddr,
128                 Counter _max_loads,
129                 bool _atomic)
130    : MemObject(name),
131      tickEvent(this),
132      cachePort("test", this),
133      funcPort("functional", this),
134      retryPkt(NULL),
135//      mainMem(main_mem),
136//      checkMem(check_mem),
137      size(_memorySize),
138      percentReads(_percentReads),
139      percentFunctional(_percentFunctional),
140      percentUncacheable(_percentUncacheable),
141      progressInterval(_progressInterval),
142      nextProgressMessage(_progressInterval),
143      percentSourceUnaligned(_percentSourceUnaligned),
144      percentDestUnaligned(percentDestUnaligned),
145      maxLoads(_max_loads),
146      atomic(_atomic)
147{
148    vector<string> cmd;
149    cmd.push_back("/bin/ls");
150    vector<string> null_vec;
151    //  thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
152    curTick = 0;
153
154    cachePort.snoopRangeSent = false;
155    funcPort.snoopRangeSent = true;
156
157    // Needs to be masked off once we know the block size.
158    traceBlockAddr = _traceAddr;
159    baseAddr1 = 0x100000;
160    baseAddr2 = 0x400000;
161    uncacheAddr = 0x800000;
162
163    // set up counters
164    noResponseCycles = 0;
165    numReads = 0;
166    tickEvent.schedule(0);
167
168    id = TESTER_ALLOCATOR++;
169
170    accessRetry = false;
171}
172
173Port *
174MemTest::getPort(const std::string &if_name, int idx)
175{
176    if (if_name == "functional")
177        return &funcPort;
178    else if (if_name == "test")
179        return &cachePort;
180    else
181        panic("No Such Port\n");
182}
183
184void
185MemTest::init()
186{
187    // By the time init() is called, the ports should be hooked up.
188    blockSize = cachePort.peerBlockSize();
189    blockAddrMask = blockSize - 1;
190    traceBlockAddr = blockAddr(traceBlockAddr);
191
192    // initial memory contents for both physical memory and functional
193    // memory should be 0; no need to initialize them.
194}
195
196
197void
198MemTest::completeRequest(PacketPtr pkt)
199{
200    Request *req = pkt->req;
201
202    DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
203            pkt->isWrite() ? "write" : "read",
204            req->getPaddr(), blockAddr(req->getPaddr()));
205
206    MemTestSenderState *state =
207        dynamic_cast<MemTestSenderState *>(pkt->senderState);
208
209    uint8_t *data = state->data;
210    uint8_t *pkt_data = pkt->getPtr<uint8_t>();
211
212    //Remove the address from the list of outstanding
213    std::set<unsigned>::iterator removeAddr =
214        outstandingAddrs.find(req->getPaddr());
215    assert(removeAddr != outstandingAddrs.end());
216    outstandingAddrs.erase(removeAddr);
217
218    switch (pkt->cmd.toInt()) {
219      case MemCmd::ReadResp:
220
221        if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
222            panic("%s: read of %x (blk %x) @ cycle %d "
223                  "returns %x, expected %x\n", name(),
224                  req->getPaddr(), blockAddr(req->getPaddr()), curTick,
225                  *pkt_data, *data);
226        }
227
228        numReads++;
229        numReadsStat++;
230
231        if (numReads == nextProgressMessage) {
232            ccprintf(cerr, "%s: completed %d read accesses @%d\n",
233                     name(), numReads, curTick);
234            nextProgressMessage += progressInterval;
235        }
236
237        if (maxLoads != 0 && numReads >= maxLoads)
238            exitSimLoop("maximum number of loads reached");
239        break;
240
241      case MemCmd::WriteResp:
242        numWritesStat++;
243        break;
244
245      default:
246        panic("invalid command %s (%d)", pkt->cmdString(), pkt->cmd.toInt());
247    }
248
249    noResponseCycles = 0;
250    delete state;
251    delete [] data;
252    delete pkt->req;
253    delete pkt;
254}
255
256void
257MemTest::regStats()
258{
259    using namespace Stats;
260
261    numReadsStat
262        .name(name() + ".num_reads")
263        .desc("number of read accesses completed")
264        ;
265
266    numWritesStat
267        .name(name() + ".num_writes")
268        .desc("number of write accesses completed")
269        ;
270
271    numCopiesStat
272        .name(name() + ".num_copies")
273        .desc("number of copy accesses completed")
274        ;
275}
276
277void
278MemTest::tick()
279{
280    if (!tickEvent.scheduled())
281        tickEvent.schedule(curTick + cycles(1));
282
283    if (++noResponseCycles >= 500000) {
284        cerr << name() << ": deadlocked at cycle " << curTick << endl;
285        fatal("");
286    }
287
288    if (accessRetry) {
289        return;
290    }
291
292    //make new request
293    unsigned cmd = random() % 100;
294    unsigned offset = random() % size;
295    unsigned base = random() % 2;
296    uint64_t data = random();
297    unsigned access_size = random() % 4;
298    unsigned cacheable = random() % 100;
299
300    //If we aren't doing copies, use id as offset, and do a false sharing
301    //mem tester
302    //We can eliminate the lower bits of the offset, and then use the id
303    //to offset within the blks
304    offset = blockAddr(offset);
305    offset += id;
306    access_size = 0;
307
308    Request *req = new Request();
309    uint32_t flags = 0;
310    Addr paddr;
311
312    if (cacheable < percentUncacheable) {
313        flags |= UNCACHEABLE;
314        paddr = uncacheAddr + offset;
315    } else {
316        paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
317    }
318    bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
319    //bool probe = false;
320
321    paddr &= ~((1 << access_size) - 1);
322    req->setPhys(paddr, 1 << access_size, flags);
323    req->setThreadContext(id,0);
324
325    uint8_t *result = new uint8_t[8];
326
327    if (cmd < percentReads) {
328        // read
329
330        // For now we only allow one outstanding request per address
331        // per tester This means we assume CPU does write forwarding
332        // to reads that alias something in the cpu store buffer.
333        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
334            delete [] result;
335            delete req;
336            return;
337        }
338
339        outstandingAddrs.insert(paddr);
340
341        // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
342        funcPort.readBlob(req->getPaddr(), result, req->getSize());
343
344        DPRINTF(MemTest,
345                "initiating read at address %x (blk %x) expecting %x\n",
346                req->getPaddr(), blockAddr(req->getPaddr()), *result);
347
348        PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
349        pkt->setSrc(0);
350        pkt->dataDynamicArray(new uint8_t[req->getSize()]);
351        MemTestSenderState *state = new MemTestSenderState(result);
352        pkt->senderState = state;
353
354        if (probe) {
355            cachePort.sendFunctional(pkt);
356            pkt->makeAtomicResponse();
357            completeRequest(pkt);
358        } else {
359            sendPkt(pkt);
360        }
361    } else {
362        // write
363
364        // For now we only allow one outstanding request per addreess
365        // per tester.  This means we assume CPU does write forwarding
366        // to reads that alias something in the cpu store buffer.
367        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
368            delete [] result;
369            delete req;
370            return;
371        }
372
373        outstandingAddrs.insert(paddr);
374
375        DPRINTF(MemTest, "initiating write at address %x (blk %x) value %x\n",
376                req->getPaddr(), blockAddr(req->getPaddr()), data & 0xff);
377
378        PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
379        pkt->setSrc(0);
380        uint8_t *pkt_data = new uint8_t[req->getSize()];
381        pkt->dataDynamicArray(pkt_data);
382        memcpy(pkt_data, &data, req->getSize());
383        MemTestSenderState *state = new MemTestSenderState(result);
384        pkt->senderState = state;
385
386        funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
387
388        if (probe) {
389            cachePort.sendFunctional(pkt);
390            pkt->makeAtomicResponse();
391            completeRequest(pkt);
392        } else {
393            sendPkt(pkt);
394        }
395    }
396}
397
398void
399MemTest::doRetry()
400{
401    if (cachePort.sendTiming(retryPkt)) {
402        accessRetry = false;
403        retryPkt = NULL;
404    }
405}
406
407BEGIN_DECLARE_SIM_OBJECT_PARAMS(MemTest)
408
409//    SimObjectParam<BaseCache *> cache;
410//    SimObjectParam<PhysicalMemory *> main_mem;
411//    SimObjectParam<PhysicalMemory *> check_mem;
412    Param<unsigned> memory_size;
413    Param<unsigned> percent_reads;
414    Param<unsigned> percent_functional;
415    Param<unsigned> percent_uncacheable;
416    Param<unsigned> progress_interval;
417    Param<unsigned> percent_source_unaligned;
418    Param<unsigned> percent_dest_unaligned;
419    Param<Addr> trace_addr;
420    Param<Counter> max_loads;
421    Param<bool> atomic;
422
423END_DECLARE_SIM_OBJECT_PARAMS(MemTest)
424
425
426BEGIN_INIT_SIM_OBJECT_PARAMS(MemTest)
427
428//    INIT_PARAM(cache, "L1 cache"),
429//    INIT_PARAM(main_mem, "hierarchical memory"),
430//    INIT_PARAM(check_mem, "check memory"),
431    INIT_PARAM(memory_size, "memory size"),
432    INIT_PARAM(percent_reads, "target read percentage"),
433    INIT_PARAM(percent_functional, "percentage of access that are functional"),
434    INIT_PARAM(percent_uncacheable, "target uncacheable percentage"),
435    INIT_PARAM(progress_interval, "progress report interval (in accesses)"),
436    INIT_PARAM(percent_source_unaligned,
437               "percent of copy source address that are unaligned"),
438    INIT_PARAM(percent_dest_unaligned,
439               "percent of copy dest address that are unaligned"),
440    INIT_PARAM(trace_addr, "address to trace"),
441                              INIT_PARAM(max_loads, "terminate when we have reached this load count"),
442    INIT_PARAM(atomic, "Is the tester testing atomic mode (or timing)")
443
444END_INIT_SIM_OBJECT_PARAMS(MemTest)
445
446
447CREATE_SIM_OBJECT(MemTest)
448{
449    return new MemTest(getInstanceName(), /*cache->getInterface(),*/ /*main_mem,*/
450                       /*check_mem,*/ memory_size, percent_reads, percent_functional,
451                       percent_uncacheable, progress_interval,
452                       percent_source_unaligned, percent_dest_unaligned,
453                       trace_addr, max_loads, atomic);
454}
455
456REGISTER_SIM_OBJECT("MemTest", MemTest)
457