memtest.cc revision 7804:42f343470ee3
112855Sgabeblack@google.com/*
212855Sgabeblack@google.com * Copyright (c) 2002-2005 The Regents of The University of Michigan
312855Sgabeblack@google.com * All rights reserved.
412855Sgabeblack@google.com *
512855Sgabeblack@google.com * Redistribution and use in source and binary forms, with or without
612855Sgabeblack@google.com * modification, are permitted provided that the following conditions are
712855Sgabeblack@google.com * met: redistributions of source code must retain the above copyright
812855Sgabeblack@google.com * notice, this list of conditions and the following disclaimer;
912855Sgabeblack@google.com * redistributions in binary form must reproduce the above copyright
1012855Sgabeblack@google.com * notice, this list of conditions and the following disclaimer in the
1112855Sgabeblack@google.com * documentation and/or other materials provided with the distribution;
1212855Sgabeblack@google.com * neither the name of the copyright holders nor the names of its
1312855Sgabeblack@google.com * contributors may be used to endorse or promote products derived from
1412855Sgabeblack@google.com * this software without specific prior written permission.
1512855Sgabeblack@google.com *
1612855Sgabeblack@google.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1712855Sgabeblack@google.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1812855Sgabeblack@google.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1912855Sgabeblack@google.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2012855Sgabeblack@google.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2112855Sgabeblack@google.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2212855Sgabeblack@google.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2312855Sgabeblack@google.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2412855Sgabeblack@google.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2512855Sgabeblack@google.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2612855Sgabeblack@google.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2712855Sgabeblack@google.com *
2812855Sgabeblack@google.com * Authors: Erik Hallnor
2912855Sgabeblack@google.com *          Steve Reinhardt
3012855Sgabeblack@google.com */
3112855Sgabeblack@google.com
3212855Sgabeblack@google.com// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
3312855Sgabeblack@google.com
3412855Sgabeblack@google.com#include <iomanip>
3512855Sgabeblack@google.com#include <set>
3612855Sgabeblack@google.com#include <string>
3712855Sgabeblack@google.com#include <vector>
3812855Sgabeblack@google.com
3912855Sgabeblack@google.com#include "base/misc.hh"
4012855Sgabeblack@google.com#include "base/statistics.hh"
4112855Sgabeblack@google.com#include "cpu/testers/memtest/memtest.hh"
4212855Sgabeblack@google.com#include "mem/mem_object.hh"
4312855Sgabeblack@google.com#include "mem/port.hh"
4412855Sgabeblack@google.com#include "mem/packet.hh"
4512855Sgabeblack@google.com#include "mem/request.hh"
4612855Sgabeblack@google.com#include "sim/sim_events.hh"
4712855Sgabeblack@google.com#include "sim/stats.hh"
4812855Sgabeblack@google.com
4912855Sgabeblack@google.comusing namespace std;
5012855Sgabeblack@google.com
5112855Sgabeblack@google.comint TESTER_ALLOCATOR=0;
5212855Sgabeblack@google.com
5312855Sgabeblack@google.combool
5412855Sgabeblack@google.comMemTest::CpuPort::recvTiming(PacketPtr pkt)
5512855Sgabeblack@google.com{
5612855Sgabeblack@google.com    if (pkt->isResponse()) {
5712855Sgabeblack@google.com        memtest->completeRequest(pkt);
5812855Sgabeblack@google.com    } else {
5912855Sgabeblack@google.com        // must be snoop upcall
6012855Sgabeblack@google.com        assert(pkt->isRequest());
6112855Sgabeblack@google.com        assert(pkt->getDest() == Packet::Broadcast);
6212855Sgabeblack@google.com    }
6312855Sgabeblack@google.com    return true;
6412855Sgabeblack@google.com}
6512855Sgabeblack@google.com
6612855Sgabeblack@google.comTick
6712855Sgabeblack@google.comMemTest::CpuPort::recvAtomic(PacketPtr pkt)
6812855Sgabeblack@google.com{
6912855Sgabeblack@google.com    // must be snoop upcall
7012855Sgabeblack@google.com    assert(pkt->isRequest());
7112855Sgabeblack@google.com    assert(pkt->getDest() == Packet::Broadcast);
7212855Sgabeblack@google.com    return curTick;
7312855Sgabeblack@google.com}
7412855Sgabeblack@google.com
7512855Sgabeblack@google.comvoid
7612855Sgabeblack@google.comMemTest::CpuPort::recvFunctional(PacketPtr pkt)
7712855Sgabeblack@google.com{
7812855Sgabeblack@google.com    //Do nothing if we see one come through
7912855Sgabeblack@google.com//    if (curTick != 0)//Supress warning durring initialization
8012855Sgabeblack@google.com//        warn("Functional Writes not implemented in MemTester\n");
8112855Sgabeblack@google.com    //Need to find any response values that intersect and update
8212855Sgabeblack@google.com    return;
8312855Sgabeblack@google.com}
8412855Sgabeblack@google.com
8512855Sgabeblack@google.comvoid
8612855Sgabeblack@google.comMemTest::CpuPort::recvStatusChange(Status status)
8712855Sgabeblack@google.com{
8812855Sgabeblack@google.com    if (status == RangeChange) {
8912855Sgabeblack@google.com        if (!snoopRangeSent) {
9012855Sgabeblack@google.com            snoopRangeSent = true;
9112855Sgabeblack@google.com            sendStatusChange(Port::RangeChange);
9212855Sgabeblack@google.com        }
9312855Sgabeblack@google.com        return;
9412855Sgabeblack@google.com    }
9512855Sgabeblack@google.com
96    panic("MemTest doesn't expect recvStatusChange callback!");
97}
98
99void
100MemTest::CpuPort::recvRetry()
101{
102    memtest->doRetry();
103}
104
105void
106MemTest::sendPkt(PacketPtr pkt) {
107    if (atomic) {
108        cachePort.sendAtomic(pkt);
109        completeRequest(pkt);
110    }
111    else if (!cachePort.sendTiming(pkt)) {
112        DPRINTF(MemTest, "accessRetry setting to true\n");
113
114        //
115        // dma requests should never be retried
116        //
117        if (issueDmas) {
118            panic("Nacked DMA requests are not supported\n");
119        }
120        accessRetry = true;
121        retryPkt = pkt;
122    } else {
123        if (issueDmas) {
124            dmaOutstanding = true;
125        }
126    }
127
128}
129
130MemTest::MemTest(const Params *p)
131    : MemObject(p),
132      tickEvent(this),
133      cachePort("test", this),
134      funcPort("functional", this),
135      retryPkt(NULL),
136//      mainMem(main_mem),
137//      checkMem(check_mem),
138      size(p->memory_size),
139      percentReads(p->percent_reads),
140      percentFunctional(p->percent_functional),
141      percentUncacheable(p->percent_uncacheable),
142      issueDmas(p->issue_dmas),
143      progressInterval(p->progress_interval),
144      nextProgressMessage(p->progress_interval),
145      percentSourceUnaligned(p->percent_source_unaligned),
146      percentDestUnaligned(p->percent_dest_unaligned),
147      maxLoads(p->max_loads),
148      atomic(p->atomic)
149{
150    cachePort.snoopRangeSent = false;
151    funcPort.snoopRangeSent = true;
152
153    id = TESTER_ALLOCATOR++;
154
155    // Needs to be masked off once we know the block size.
156    traceBlockAddr = p->trace_addr;
157    baseAddr1 = 0x100000;
158    baseAddr2 = 0x400000;
159    uncacheAddr = 0x800000;
160
161    // set up counters
162    noResponseCycles = 0;
163    numReads = 0;
164    schedule(tickEvent, 0);
165
166    accessRetry = false;
167    dmaOutstanding = false;
168}
169
170Port *
171MemTest::getPort(const std::string &if_name, int idx)
172{
173    if (if_name == "functional")
174        return &funcPort;
175    else if (if_name == "test")
176        return &cachePort;
177    else
178        panic("No Such Port\n");
179}
180
181void
182MemTest::init()
183{
184    // By the time init() is called, the ports should be hooked up.
185    blockSize = cachePort.peerBlockSize();
186    blockAddrMask = blockSize - 1;
187    traceBlockAddr = blockAddr(traceBlockAddr);
188
189    // initial memory contents for both physical memory and functional
190    // memory should be 0; no need to initialize them.
191}
192
193
194void
195MemTest::completeRequest(PacketPtr pkt)
196{
197    Request *req = pkt->req;
198
199    if (issueDmas) {
200        dmaOutstanding = false;
201    }
202
203    DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
204            pkt->isWrite() ? "write" : "read",
205            req->getPaddr(), blockAddr(req->getPaddr()));
206
207    MemTestSenderState *state =
208        dynamic_cast<MemTestSenderState *>(pkt->senderState);
209
210    uint8_t *data = state->data;
211    uint8_t *pkt_data = pkt->getPtr<uint8_t>();
212
213    //Remove the address from the list of outstanding
214    std::set<unsigned>::iterator removeAddr =
215        outstandingAddrs.find(req->getPaddr());
216    assert(removeAddr != outstandingAddrs.end());
217    outstandingAddrs.erase(removeAddr);
218
219    if (pkt->isRead()) {
220        if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
221            panic("%s: read of %x (blk %x) @ cycle %d "
222                  "returns %x, expected %x\n", name(),
223                  req->getPaddr(), blockAddr(req->getPaddr()), curTick,
224                  *pkt_data, *data);
225        }
226
227        numReads++;
228        numReadsStat++;
229
230        if (numReads == (uint64_t)nextProgressMessage) {
231            ccprintf(cerr, "%s: completed %d read accesses @%d\n",
232                     name(), numReads, curTick);
233            nextProgressMessage += progressInterval;
234        }
235
236        if (maxLoads != 0 && numReads >= maxLoads)
237            exitSimLoop("maximum number of loads reached");
238    } else {
239        assert(pkt->isWrite());
240        numWritesStat++;
241    }
242
243    noResponseCycles = 0;
244    delete state;
245    delete [] data;
246    delete pkt->req;
247    delete pkt;
248}
249
250void
251MemTest::regStats()
252{
253    using namespace Stats;
254
255    numReadsStat
256        .name(name() + ".num_reads")
257        .desc("number of read accesses completed")
258        ;
259
260    numWritesStat
261        .name(name() + ".num_writes")
262        .desc("number of write accesses completed")
263        ;
264
265    numCopiesStat
266        .name(name() + ".num_copies")
267        .desc("number of copy accesses completed")
268        ;
269}
270
271void
272MemTest::tick()
273{
274    if (!tickEvent.scheduled())
275        schedule(tickEvent, curTick + ticks(1));
276
277    if (++noResponseCycles >= 500000) {
278        if (issueDmas) {
279            cerr << "DMA tester ";
280        }
281        cerr << name() << ": deadlocked at cycle " << curTick << endl;
282        fatal("");
283    }
284
285    if (accessRetry || (issueDmas && dmaOutstanding)) {
286        DPRINTF(MemTest, "MemTester waiting on accessRetry or DMA response\n");
287        return;
288    }
289
290    //make new request
291    unsigned cmd = random() % 100;
292    unsigned offset = random() % size;
293    unsigned base = random() % 2;
294    uint64_t data = random();
295    unsigned access_size = random() % 4;
296    bool uncacheable = (random() % 100) < percentUncacheable;
297
298    unsigned dma_access_size = random() % 4;
299
300    //If we aren't doing copies, use id as offset, and do a false sharing
301    //mem tester
302    //We can eliminate the lower bits of the offset, and then use the id
303    //to offset within the blks
304    offset = blockAddr(offset);
305    offset += id;
306    access_size = 0;
307    dma_access_size = 0;
308
309    Request *req = new Request();
310    Request::Flags flags;
311    Addr paddr;
312
313    if (uncacheable) {
314        flags.set(Request::UNCACHEABLE);
315        paddr = uncacheAddr + offset;
316    } else  {
317        paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
318    }
319    bool do_functional = (random() % 100 < percentFunctional) && !uncacheable;
320
321    if (issueDmas) {
322        paddr &= ~((1 << dma_access_size) - 1);
323        req->setPhys(paddr, 1 << dma_access_size, flags);
324        req->setThreadContext(id,0);
325    } else {
326        paddr &= ~((1 << access_size) - 1);
327        req->setPhys(paddr, 1 << access_size, flags);
328        req->setThreadContext(id,0);
329    }
330    assert(req->getSize() == 1);
331
332    uint8_t *result = new uint8_t[8];
333
334    if (cmd < percentReads) {
335        // read
336
337        // For now we only allow one outstanding request per address
338        // per tester This means we assume CPU does write forwarding
339        // to reads that alias something in the cpu store buffer.
340        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
341            delete [] result;
342            delete req;
343            return;
344        }
345
346        outstandingAddrs.insert(paddr);
347
348        // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
349        funcPort.readBlob(req->getPaddr(), result, req->getSize());
350
351        DPRINTF(MemTest,
352                "id %d initiating %sread at addr %x (blk %x) expecting %x\n",
353                id, do_functional ? "functional " : "", req->getPaddr(),
354                blockAddr(req->getPaddr()), *result);
355
356        PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
357        pkt->setSrc(0);
358        pkt->dataDynamicArray(new uint8_t[req->getSize()]);
359        MemTestSenderState *state = new MemTestSenderState(result);
360        pkt->senderState = state;
361
362        if (do_functional) {
363            cachePort.sendFunctional(pkt);
364            completeRequest(pkt);
365        } else {
366            sendPkt(pkt);
367        }
368    } else {
369        // write
370
371        // For now we only allow one outstanding request per addreess
372        // per tester.  This means we assume CPU does write forwarding
373        // to reads that alias something in the cpu store buffer.
374        if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
375            delete [] result;
376            delete req;
377            return;
378        }
379
380        outstandingAddrs.insert(paddr);
381
382        DPRINTF(MemTest, "initiating %swrite at addr %x (blk %x) value %x\n",
383                do_functional ? "functional " : "", req->getPaddr(),
384                blockAddr(req->getPaddr()), data & 0xff);
385
386        PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
387        pkt->setSrc(0);
388        uint8_t *pkt_data = new uint8_t[req->getSize()];
389        pkt->dataDynamicArray(pkt_data);
390        memcpy(pkt_data, &data, req->getSize());
391        MemTestSenderState *state = new MemTestSenderState(result);
392        pkt->senderState = state;
393
394        funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
395
396        if (do_functional) {
397            cachePort.sendFunctional(pkt);
398            completeRequest(pkt);
399        } else {
400            sendPkt(pkt);
401        }
402    }
403}
404
405void
406MemTest::doRetry()
407{
408    if (cachePort.sendTiming(retryPkt)) {
409        DPRINTF(MemTest, "accessRetry setting to false\n");
410        accessRetry = false;
411        retryPkt = NULL;
412    }
413}
414
415
416void
417MemTest::printAddr(Addr a)
418{
419    cachePort.printAddr(a);
420}
421
422
423MemTest *
424MemTestParams::create()
425{
426    return new MemTest(this);
427}
428