memtest.cc revision 4893
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 *          Steve Reinhardt
30 */
31
32// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34#include <iomanip>
35#include <set>
36#include <string>
37#include <vector>
38
39#include "base/misc.hh"
40#include "base/statistics.hh"
41#include "cpu/memtest/memtest.hh"
42//#include "cpu/simple_thread.hh"
43//#include "mem/cache/base_cache.hh"
44#include "mem/mem_object.hh"
45#include "mem/port.hh"
46#include "mem/packet.hh"
47//#include "mem/physical.hh"
48#include "mem/request.hh"
49#include "sim/builder.hh"
50#include "sim/sim_events.hh"
51#include "sim/stats.hh"
52
53using namespace std;
54
55int TESTER_ALLOCATOR=0;
56
57bool
58MemTest::CpuPort::recvTiming(PacketPtr pkt)
59{
60    memtest->completeRequest(pkt);
61    return true;
62}
63
64Tick
65MemTest::CpuPort::recvAtomic(PacketPtr pkt)
66{
67    panic("MemTest doesn't expect recvAtomic callback!");
68    return curTick;
69}
70
71void
72MemTest::CpuPort::recvFunctional(PacketPtr pkt)
73{
74    //Do nothing if we see one come through
75//    if (curTick != 0)//Supress warning durring initialization
76//        warn("Functional Writes not implemented in MemTester\n");
77    //Need to find any response values that intersect and update
78    return;
79}
80
81void
82MemTest::CpuPort::recvStatusChange(Status status)
83{
84    if (status == RangeChange) {
85        if (!snoopRangeSent) {
86            snoopRangeSent = true;
87            sendStatusChange(Port::RangeChange);
88        }
89        return;
90    }
91
92    panic("MemTest doesn't expect recvStatusChange callback!");
93}
94
95void
96MemTest::CpuPort::recvRetry()
97{
98    memtest->doRetry();
99}
100
101void
102MemTest::sendPkt(PacketPtr pkt) {
103    if (atomic) {
104        cachePort.sendAtomic(pkt);
105        completeRequest(pkt);
106    }
107    else if (!cachePort.sendTiming(pkt)) {
108        accessRetry = true;
109        retryPkt = pkt;
110    }
111
112}
113
114MemTest::MemTest(const string &name,
115//		 MemInterface *_cache_interface,
116//		 PhysicalMemory *main_mem,
117//		 PhysicalMemory *check_mem,
118                 unsigned _memorySize,
119                 unsigned _percentReads,
120                 unsigned _percentFunctional,
121                 unsigned _percentUncacheable,
122                 unsigned _progressInterval,
123                 unsigned _percentSourceUnaligned,
124                 unsigned _percentDestUnaligned,
125                 Addr _traceAddr,
126                 Counter _max_loads,
127                 bool _atomic)
128    : MemObject(name),
129      tickEvent(this),
130      cachePort("test", this),
131      funcPort("functional", this),
132      retryPkt(NULL),
133//      mainMem(main_mem),
134//      checkMem(check_mem),
135      size(_memorySize),
136      percentReads(_percentReads),
137      percentFunctional(_percentFunctional),
138      percentUncacheable(_percentUncacheable),
139      progressInterval(_progressInterval),
140      nextProgressMessage(_progressInterval),
141      percentSourceUnaligned(_percentSourceUnaligned),
142      percentDestUnaligned(percentDestUnaligned),
143      maxLoads(_max_loads),
144      atomic(_atomic)
145{
146    vector<string> cmd;
147    cmd.push_back("/bin/ls");
148    vector<string> null_vec;
149    //  thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
150    curTick = 0;
151
152    cachePort.snoopRangeSent = false;
153    funcPort.snoopRangeSent = true;
154
155    // Needs to be masked off once we know the block size.
156    traceBlockAddr = _traceAddr;
157    baseAddr1 = 0x100000;
158    baseAddr2 = 0x400000;
159    uncacheAddr = 0x800000;
160
161    // set up counters
162    noResponseCycles = 0;
163    numReads = 0;
164    tickEvent.schedule(0);
165
166    id = TESTER_ALLOCATOR++;
167
168    accessRetry = false;
169}
170
171Port *
172MemTest::getPort(const std::string &if_name, int idx)
173{
174    if (if_name == "functional")
175        return &funcPort;
176    else if (if_name == "test")
177        return &cachePort;
178    else
179        panic("No Such Port\n");
180}
181
182void
183MemTest::init()
184{
185    // By the time init() is called, the ports should be hooked up.
186    blockSize = cachePort.peerBlockSize();
187    blockAddrMask = blockSize - 1;
188    traceBlockAddr = blockAddr(traceBlockAddr);
189
190    // initial memory contents for both physical memory and functional
191    // memory should be 0; no need to initialize them.
192}
193
194
195void
196MemTest::completeRequest(PacketPtr pkt)
197{
198    Request *req = pkt->req;
199
200    DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
201            pkt->isWrite() ? "write" : "read",
202            req->getPaddr(), blockAddr(req->getPaddr()));
203
204    MemTestSenderState *state =
205        dynamic_cast<MemTestSenderState *>(pkt->senderState);
206
207    uint8_t *data = state->data;
208    uint8_t *pkt_data = pkt->getPtr<uint8_t>();
209
210    //Remove the address from the list of outstanding
211    std::set<unsigned>::iterator removeAddr =
212        outstandingAddrs.find(req->getPaddr());
213    assert(removeAddr != outstandingAddrs.end());
214    outstandingAddrs.erase(removeAddr);
215
216    switch (pkt->cmd.toInt()) {
217      case MemCmd::ReadResp:
218
219        if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
220            panic("%s: read of %x (blk %x) @ cycle %d "
221                  "returns %x, expected %x\n", name(),
222                  req->getPaddr(), blockAddr(req->getPaddr()), curTick,
223                  *pkt_data, *data);
224        }
225
226        numReads++;
227        numReadsStat++;
228
229        if (numReads == nextProgressMessage) {
230            ccprintf(cerr, "%s: completed %d read accesses @%d\n",
231                     name(), numReads, curTick);
232            nextProgressMessage += progressInterval;
233        }
234
235        if (maxLoads != 0 && numReads >= maxLoads)
236            exitSimLoop("maximum number of loads reached");
237        break;
238
239      case MemCmd::WriteResp:
240        numWritesStat++;
241        break;
242
243      default:
244        panic("invalid command %s (%d)", pkt->cmdString(), pkt->cmd.toInt());
245    }
246
247    noResponseCycles = 0;
248    delete state;
249    delete [] data;
250    delete pkt->req;
251    delete pkt;
252}
253
254void
255MemTest::regStats()
256{
257    using namespace Stats;
258
259    numReadsStat
260        .name(name() + ".num_reads")
261        .desc("number of read accesses completed")
262        ;
263
264    numWritesStat
265        .name(name() + ".num_writes")
266        .desc("number of write accesses completed")
267        ;
268
269    numCopiesStat
270        .name(name() + ".num_copies")
271        .desc("number of copy accesses completed")
272        ;
273}
274
275void
276MemTest::tick()
277{
278    if (!tickEvent.scheduled())
279        tickEvent.schedule(curTick + cycles(1));
280
281    if (++noResponseCycles >= 500000) {
282        cerr << name() << ": deadlocked at cycle " << curTick << endl;
283        fatal("");
284    }
285
286    if (accessRetry) {
287        return;
288    }
289
290    //make new request
291    unsigned cmd = random() % 100;
292    unsigned offset = random() % size;
293    unsigned base = random() % 2;
294    uint64_t data = random();
295    unsigned access_size = random() % 4;
296    unsigned cacheable = random() % 100;
297
298    //If we aren't doing copies, use id as offset, and do a false sharing
299    //mem tester
300    //We can eliminate the lower bits of the offset, and then use the id
301    //to offset within the blks
302    offset = blockAddr(offset);
303    offset += id;
304    access_size = 0;
305
306    Request *req = new Request();
307    uint32_t flags = 0;
308    Addr paddr;
309
310    if (cacheable < percentUncacheable) {
311        flags |= UNCACHEABLE;
312        paddr = uncacheAddr + offset;
313    } else {
314        paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
315    }
316    bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
317    //bool probe = false;
318
319    paddr &= ~((1 << access_size) - 1);
320    req->setPhys(paddr, 1 << access_size, flags);
321    req->setThreadContext(id,0);
322
323    uint8_t *result = new uint8_t[8];
324
325    if (cmd < percentReads) {
326        // read
327
328        // For now we only allow one outstanding request per address
329        // per tester This means we assume CPU does write forwarding
330        // to reads that alias something in the cpu store buffer.
331        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
332            delete [] result;
333            delete req;
334            return;
335        }
336
337        outstandingAddrs.insert(paddr);
338
339        // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
340        funcPort.readBlob(req->getPaddr(), result, req->getSize());
341
342        DPRINTF(MemTest,
343                "initiating read at address %x (blk %x) expecting %x\n",
344                req->getPaddr(), blockAddr(req->getPaddr()), *result);
345
346        PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
347        pkt->setSrc(0);
348        pkt->dataDynamicArray(new uint8_t[req->getSize()]);
349        MemTestSenderState *state = new MemTestSenderState(result);
350        pkt->senderState = state;
351
352        if (probe) {
353            cachePort.sendFunctional(pkt);
354            pkt->makeAtomicResponse();
355            completeRequest(pkt);
356        } else {
357            sendPkt(pkt);
358        }
359    } else {
360        // write
361
362        // For now we only allow one outstanding request per addreess
363        // per tester.  This means we assume CPU does write forwarding
364        // to reads that alias something in the cpu store buffer.
365        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
366            delete [] result;
367            delete req;
368            return;
369        }
370
371        outstandingAddrs.insert(paddr);
372
373        DPRINTF(MemTest, "initiating write at address %x (blk %x) value %x\n",
374                req->getPaddr(), blockAddr(req->getPaddr()), data & 0xff);
375
376        PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
377        pkt->setSrc(0);
378        uint8_t *pkt_data = new uint8_t[req->getSize()];
379        pkt->dataDynamicArray(pkt_data);
380        memcpy(pkt_data, &data, req->getSize());
381        MemTestSenderState *state = new MemTestSenderState(result);
382        pkt->senderState = state;
383
384        funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
385
386        if (probe) {
387            cachePort.sendFunctional(pkt);
388            pkt->makeAtomicResponse();
389            completeRequest(pkt);
390        } else {
391            sendPkt(pkt);
392        }
393    }
394}
395
396void
397MemTest::doRetry()
398{
399    if (cachePort.sendTiming(retryPkt)) {
400        accessRetry = false;
401        retryPkt = NULL;
402    }
403}
404
405BEGIN_DECLARE_SIM_OBJECT_PARAMS(MemTest)
406
407//    SimObjectParam<BaseCache *> cache;
408//    SimObjectParam<PhysicalMemory *> main_mem;
409//    SimObjectParam<PhysicalMemory *> check_mem;
410    Param<unsigned> memory_size;
411    Param<unsigned> percent_reads;
412    Param<unsigned> percent_functional;
413    Param<unsigned> percent_uncacheable;
414    Param<unsigned> progress_interval;
415    Param<unsigned> percent_source_unaligned;
416    Param<unsigned> percent_dest_unaligned;
417    Param<Addr> trace_addr;
418    Param<Counter> max_loads;
419    Param<bool> atomic;
420
421END_DECLARE_SIM_OBJECT_PARAMS(MemTest)
422
423
424BEGIN_INIT_SIM_OBJECT_PARAMS(MemTest)
425
426//    INIT_PARAM(cache, "L1 cache"),
427//    INIT_PARAM(main_mem, "hierarchical memory"),
428//    INIT_PARAM(check_mem, "check memory"),
429    INIT_PARAM(memory_size, "memory size"),
430    INIT_PARAM(percent_reads, "target read percentage"),
431    INIT_PARAM(percent_functional, "percentage of access that are functional"),
432    INIT_PARAM(percent_uncacheable, "target uncacheable percentage"),
433    INIT_PARAM(progress_interval, "progress report interval (in accesses)"),
434    INIT_PARAM(percent_source_unaligned,
435               "percent of copy source address that are unaligned"),
436    INIT_PARAM(percent_dest_unaligned,
437               "percent of copy dest address that are unaligned"),
438    INIT_PARAM(trace_addr, "address to trace"),
439                              INIT_PARAM(max_loads, "terminate when we have reached this load count"),
440    INIT_PARAM(atomic, "Is the tester testing atomic mode (or timing)")
441
442END_INIT_SIM_OBJECT_PARAMS(MemTest)
443
444
445CREATE_SIM_OBJECT(MemTest)
446{
447    return new MemTest(getInstanceName(), /*cache->getInterface(),*/ /*main_mem,*/
448                       /*check_mem,*/ memory_size, percent_reads, percent_functional,
449                       percent_uncacheable, progress_interval,
450                       percent_source_unaligned, percent_dest_unaligned,
451                       trace_addr, max_loads, atomic);
452}
453
454REGISTER_SIM_OBJECT("MemTest", MemTest)
455