memtest.cc revision 5319
111238Sandreas.sandberg@arm.com/*
211238Sandreas.sandberg@arm.com * Copyright (c) 2002-2005 The Regents of The University of Michigan
311238Sandreas.sandberg@arm.com * All rights reserved.
411889Spierre-yves.peneau@lirmm.fr *
511889Spierre-yves.peneau@lirmm.fr * Redistribution and use in source and binary forms, with or without
611238Sandreas.sandberg@arm.com * modification, are permitted provided that the following conditions are
711238Sandreas.sandberg@arm.com * met: redistributions of source code must retain the above copyright
811238Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer;
911238Sandreas.sandberg@arm.com * redistributions in binary form must reproduce the above copyright
1011238Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer in the
1111238Sandreas.sandberg@arm.com * documentation and/or other materials provided with the distribution;
1211238Sandreas.sandberg@arm.com * neither the name of the copyright holders nor the names of its
1311238Sandreas.sandberg@arm.com * contributors may be used to endorse or promote products derived from
1411238Sandreas.sandberg@arm.com * this software without specific prior written permission.
1511238Sandreas.sandberg@arm.com *
1611238Sandreas.sandberg@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1711238Sandreas.sandberg@arm.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1811238Sandreas.sandberg@arm.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1911238Sandreas.sandberg@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2011238Sandreas.sandberg@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2111238Sandreas.sandberg@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2211238Sandreas.sandberg@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2311238Sandreas.sandberg@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2411238Sandreas.sandberg@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2511238Sandreas.sandberg@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2611238Sandreas.sandberg@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2711238Sandreas.sandberg@arm.com *
2811238Sandreas.sandberg@arm.com * Authors: Erik Hallnor
2911238Sandreas.sandberg@arm.com *          Steve Reinhardt
3011238Sandreas.sandberg@arm.com */
3111238Sandreas.sandberg@arm.com
3211238Sandreas.sandberg@arm.com// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
3311238Sandreas.sandberg@arm.com
3411238Sandreas.sandberg@arm.com#include <iomanip>
3511238Sandreas.sandberg@arm.com#include <set>
3611238Sandreas.sandberg@arm.com#include <string>
3711238Sandreas.sandberg@arm.com#include <vector>
3811238Sandreas.sandberg@arm.com
3911889Spierre-yves.peneau@lirmm.fr#include "base/misc.hh"
4011238Sandreas.sandberg@arm.com#include "base/statistics.hh"
4112564Sgabeblack@google.com#include "cpu/memtest/memtest.hh"
4212564Sgabeblack@google.com//#include "cpu/simple_thread.hh"
4311238Sandreas.sandberg@arm.com//#include "mem/cache/base_cache.hh"
4411238Sandreas.sandberg@arm.com#include "mem/mem_object.hh"
4511238Sandreas.sandberg@arm.com#include "mem/port.hh"
4611889Spierre-yves.peneau@lirmm.fr#include "mem/packet.hh"
4711238Sandreas.sandberg@arm.com//#include "mem/physical.hh"
4811238Sandreas.sandberg@arm.com#include "mem/request.hh"
4911238Sandreas.sandberg@arm.com#include "sim/sim_events.hh"
5011238Sandreas.sandberg@arm.com#include "sim/stats.hh"
5111238Sandreas.sandberg@arm.com
5211238Sandreas.sandberg@arm.comusing namespace std;
5311238Sandreas.sandberg@arm.com
5411238Sandreas.sandberg@arm.comint TESTER_ALLOCATOR=0;
5511238Sandreas.sandberg@arm.com
5611238Sandreas.sandberg@arm.combool
5711297Sandreas.sandberg@arm.comMemTest::CpuPort::recvTiming(PacketPtr pkt)
5811238Sandreas.sandberg@arm.com{
5911238Sandreas.sandberg@arm.com    if (pkt->isResponse()) {
6011238Sandreas.sandberg@arm.com        memtest->completeRequest(pkt);
6111238Sandreas.sandberg@arm.com    } else {
6211238Sandreas.sandberg@arm.com        // must be snoop upcall
6311238Sandreas.sandberg@arm.com        assert(pkt->isRequest());
6411238Sandreas.sandberg@arm.com        assert(pkt->getDest() == Packet::Broadcast);
6511238Sandreas.sandberg@arm.com    }
6611238Sandreas.sandberg@arm.com    return true;
6711238Sandreas.sandberg@arm.com}
6811238Sandreas.sandberg@arm.com
6911238Sandreas.sandberg@arm.comTick
7011238Sandreas.sandberg@arm.comMemTest::CpuPort::recvAtomic(PacketPtr pkt)
7111238Sandreas.sandberg@arm.com{
7211688Sandreas.hansson@arm.com    // must be snoop upcall
7311238Sandreas.sandberg@arm.com    assert(pkt->isRequest());
7411238Sandreas.sandberg@arm.com    assert(pkt->getDest() == Packet::Broadcast);
7511238Sandreas.sandberg@arm.com    return curTick;
7611238Sandreas.sandberg@arm.com}
7711238Sandreas.sandberg@arm.com
7811238Sandreas.sandberg@arm.comvoid
7911238Sandreas.sandberg@arm.comMemTest::CpuPort::recvFunctional(PacketPtr pkt)
8011238Sandreas.sandberg@arm.com{
8111238Sandreas.sandberg@arm.com    //Do nothing if we see one come through
8211238Sandreas.sandberg@arm.com//    if (curTick != 0)//Supress warning durring initialization
8311889Spierre-yves.peneau@lirmm.fr//        warn("Functional Writes not implemented in MemTester\n");
8411238Sandreas.sandberg@arm.com    //Need to find any response values that intersect and update
8511238Sandreas.sandberg@arm.com    return;
8611238Sandreas.sandberg@arm.com}
8711238Sandreas.sandberg@arm.com
8812564Sgabeblack@google.comvoid
8911238Sandreas.sandberg@arm.comMemTest::CpuPort::recvStatusChange(Status status)
9011238Sandreas.sandberg@arm.com{
9112564Sgabeblack@google.com    if (status == RangeChange) {
9211238Sandreas.sandberg@arm.com        if (!snoopRangeSent) {
9311238Sandreas.sandberg@arm.com            snoopRangeSent = true;
9411238Sandreas.sandberg@arm.com            sendStatusChange(Port::RangeChange);
9511238Sandreas.sandberg@arm.com        }
9611238Sandreas.sandberg@arm.com        return;
9711238Sandreas.sandberg@arm.com    }
9812564Sgabeblack@google.com
9911238Sandreas.sandberg@arm.com    panic("MemTest doesn't expect recvStatusChange callback!");
10011238Sandreas.sandberg@arm.com}
10112564Sgabeblack@google.com
10211238Sandreas.sandberg@arm.comvoid
10312564Sgabeblack@google.comMemTest::CpuPort::recvRetry()
10411238Sandreas.sandberg@arm.com{
10511238Sandreas.sandberg@arm.com    memtest->doRetry();
10611238Sandreas.sandberg@arm.com}
10711238Sandreas.sandberg@arm.com
10811238Sandreas.sandberg@arm.comvoid
10911238Sandreas.sandberg@arm.comMemTest::sendPkt(PacketPtr pkt) {
11011238Sandreas.sandberg@arm.com    if (atomic) {
11111238Sandreas.sandberg@arm.com        cachePort.sendAtomic(pkt);
11211238Sandreas.sandberg@arm.com        completeRequest(pkt);
11311238Sandreas.sandberg@arm.com    }
11411238Sandreas.sandberg@arm.com    else if (!cachePort.sendTiming(pkt)) {
11511238Sandreas.sandberg@arm.com        accessRetry = true;
116        retryPkt = pkt;
117    }
118
119}
120
121MemTest::MemTest(const Params *p)
122    : MemObject(p),
123      tickEvent(this),
124      cachePort("test", this),
125      funcPort("functional", this),
126      retryPkt(NULL),
127//      mainMem(main_mem),
128//      checkMem(check_mem),
129      size(p->memory_size),
130      percentReads(p->percent_reads),
131      percentFunctional(p->percent_functional),
132      percentUncacheable(p->percent_uncacheable),
133      progressInterval(p->progress_interval),
134      nextProgressMessage(p->progress_interval),
135      percentSourceUnaligned(p->percent_source_unaligned),
136      percentDestUnaligned(p->percent_dest_unaligned),
137      maxLoads(p->max_loads),
138      atomic(p->atomic)
139{
140    vector<string> cmd;
141    cmd.push_back("/bin/ls");
142    vector<string> null_vec;
143    //  thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
144    curTick = 0;
145
146    cachePort.snoopRangeSent = false;
147    funcPort.snoopRangeSent = true;
148
149    // Needs to be masked off once we know the block size.
150    traceBlockAddr = p->trace_addr;
151    baseAddr1 = 0x100000;
152    baseAddr2 = 0x400000;
153    uncacheAddr = 0x800000;
154
155    // set up counters
156    noResponseCycles = 0;
157    numReads = 0;
158    tickEvent.schedule(0);
159
160    id = TESTER_ALLOCATOR++;
161
162    accessRetry = false;
163}
164
165Port *
166MemTest::getPort(const std::string &if_name, int idx)
167{
168    if (if_name == "functional")
169        return &funcPort;
170    else if (if_name == "test")
171        return &cachePort;
172    else
173        panic("No Such Port\n");
174}
175
176void
177MemTest::init()
178{
179    // By the time init() is called, the ports should be hooked up.
180    blockSize = cachePort.peerBlockSize();
181    blockAddrMask = blockSize - 1;
182    traceBlockAddr = blockAddr(traceBlockAddr);
183
184    // initial memory contents for both physical memory and functional
185    // memory should be 0; no need to initialize them.
186}
187
188
189void
190MemTest::completeRequest(PacketPtr pkt)
191{
192    Request *req = pkt->req;
193
194    DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
195            pkt->isWrite() ? "write" : "read",
196            req->getPaddr(), blockAddr(req->getPaddr()));
197
198    MemTestSenderState *state =
199        dynamic_cast<MemTestSenderState *>(pkt->senderState);
200
201    uint8_t *data = state->data;
202    uint8_t *pkt_data = pkt->getPtr<uint8_t>();
203
204    //Remove the address from the list of outstanding
205    std::set<unsigned>::iterator removeAddr =
206        outstandingAddrs.find(req->getPaddr());
207    assert(removeAddr != outstandingAddrs.end());
208    outstandingAddrs.erase(removeAddr);
209
210    assert(pkt->isResponse());
211
212    if (pkt->isRead()) {
213        if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
214            panic("%s: read of %x (blk %x) @ cycle %d "
215                  "returns %x, expected %x\n", name(),
216                  req->getPaddr(), blockAddr(req->getPaddr()), curTick,
217                  *pkt_data, *data);
218        }
219
220        numReads++;
221        numReadsStat++;
222
223        if (numReads == nextProgressMessage) {
224            ccprintf(cerr, "%s: completed %d read accesses @%d\n",
225                     name(), numReads, curTick);
226            nextProgressMessage += progressInterval;
227        }
228
229        if (maxLoads != 0 && numReads >= maxLoads)
230            exitSimLoop("maximum number of loads reached");
231    } else {
232        assert(pkt->isWrite());
233        numWritesStat++;
234    }
235
236    noResponseCycles = 0;
237    delete state;
238    delete [] data;
239    delete pkt->req;
240    delete pkt;
241}
242
243void
244MemTest::regStats()
245{
246    using namespace Stats;
247
248    numReadsStat
249        .name(name() + ".num_reads")
250        .desc("number of read accesses completed")
251        ;
252
253    numWritesStat
254        .name(name() + ".num_writes")
255        .desc("number of write accesses completed")
256        ;
257
258    numCopiesStat
259        .name(name() + ".num_copies")
260        .desc("number of copy accesses completed")
261        ;
262}
263
264void
265MemTest::tick()
266{
267    if (!tickEvent.scheduled())
268        tickEvent.schedule(curTick + ticks(1));
269
270    if (++noResponseCycles >= 500000) {
271        cerr << name() << ": deadlocked at cycle " << curTick << endl;
272        fatal("");
273    }
274
275    if (accessRetry) {
276        return;
277    }
278
279    //make new request
280    unsigned cmd = random() % 100;
281    unsigned offset = random() % size;
282    unsigned base = random() % 2;
283    uint64_t data = random();
284    unsigned access_size = random() % 4;
285    unsigned cacheable = random() % 100;
286
287    //If we aren't doing copies, use id as offset, and do a false sharing
288    //mem tester
289    //We can eliminate the lower bits of the offset, and then use the id
290    //to offset within the blks
291    offset = blockAddr(offset);
292    offset += id;
293    access_size = 0;
294
295    Request *req = new Request();
296    uint32_t flags = 0;
297    Addr paddr;
298
299    if (cacheable < percentUncacheable) {
300        flags |= UNCACHEABLE;
301        paddr = uncacheAddr + offset;
302    } else {
303        paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
304    }
305    bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
306    //bool probe = false;
307
308    paddr &= ~((1 << access_size) - 1);
309    req->setPhys(paddr, 1 << access_size, flags);
310    req->setThreadContext(id,0);
311
312    uint8_t *result = new uint8_t[8];
313
314    if (cmd < percentReads) {
315        // read
316
317        // For now we only allow one outstanding request per address
318        // per tester This means we assume CPU does write forwarding
319        // to reads that alias something in the cpu store buffer.
320        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
321            delete [] result;
322            delete req;
323            return;
324        }
325
326        outstandingAddrs.insert(paddr);
327
328        // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
329        funcPort.readBlob(req->getPaddr(), result, req->getSize());
330
331        DPRINTF(MemTest,
332                "initiating read at address %x (blk %x) expecting %x\n",
333                req->getPaddr(), blockAddr(req->getPaddr()), *result);
334
335        PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
336        pkt->setSrc(0);
337        pkt->dataDynamicArray(new uint8_t[req->getSize()]);
338        MemTestSenderState *state = new MemTestSenderState(result);
339        pkt->senderState = state;
340
341        if (probe) {
342            cachePort.sendFunctional(pkt);
343            completeRequest(pkt);
344        } else {
345            sendPkt(pkt);
346        }
347    } else {
348        // write
349
350        // For now we only allow one outstanding request per addreess
351        // per tester.  This means we assume CPU does write forwarding
352        // to reads that alias something in the cpu store buffer.
353        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
354            delete [] result;
355            delete req;
356            return;
357        }
358
359        outstandingAddrs.insert(paddr);
360
361        DPRINTF(MemTest, "initiating write at address %x (blk %x) value %x\n",
362                req->getPaddr(), blockAddr(req->getPaddr()), data & 0xff);
363
364        PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
365        pkt->setSrc(0);
366        uint8_t *pkt_data = new uint8_t[req->getSize()];
367        pkt->dataDynamicArray(pkt_data);
368        memcpy(pkt_data, &data, req->getSize());
369        MemTestSenderState *state = new MemTestSenderState(result);
370        pkt->senderState = state;
371
372        funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
373
374        if (probe) {
375            cachePort.sendFunctional(pkt);
376            completeRequest(pkt);
377        } else {
378            sendPkt(pkt);
379        }
380    }
381}
382
383void
384MemTest::doRetry()
385{
386    if (cachePort.sendTiming(retryPkt)) {
387        accessRetry = false;
388        retryPkt = NULL;
389    }
390}
391
392
393void
394MemTest::printAddr(Addr a)
395{
396    cachePort.printAddr(a);
397}
398
399
400MemTest *
401MemTestParams::create()
402{
403    return new MemTest(this);
404}
405