1/*
2 * Copyright (c) 2015, 2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 *          Steve Reinhardt
42 *          Andreas Hansson
43 */
44
45#include "cpu/testers/memtest/memtest.hh"
46
47#include "base/random.hh"
48#include "base/statistics.hh"
49#include "base/trace.hh"
50#include "debug/MemTest.hh"
51#include "sim/sim_exit.hh"
52#include "sim/stats.hh"
53#include "sim/system.hh"
54
55using namespace std;
56
57unsigned int TESTER_ALLOCATOR = 0;
58
59bool
60MemTest::CpuPort::recvTimingResp(PacketPtr pkt)
61{
62    memtest.completeRequest(pkt);
63    return true;
64}
65
66void
67MemTest::CpuPort::recvReqRetry()
68{
69    memtest.recvRetry();
70}
71
72bool
73MemTest::sendPkt(PacketPtr pkt) {
74    if (atomic) {
75        port.sendAtomic(pkt);
76        completeRequest(pkt);
77    } else {
78        if (!port.sendTimingReq(pkt)) {
79            retryPkt = pkt;
80            return false;
81        }
82    }
83    return true;
84}
85
86MemTest::MemTest(const Params *p)
87    : ClockedObject(p),
88      tickEvent([this]{ tick(); }, name()),
89      noRequestEvent([this]{ noRequest(); }, name()),
90      noResponseEvent([this]{ noResponse(); }, name()),
91      port("port", *this),
92      retryPkt(nullptr),
93      size(p->size),
94      interval(p->interval),
95      percentReads(p->percent_reads),
96      percentFunctional(p->percent_functional),
97      percentUncacheable(p->percent_uncacheable),
98      masterId(p->system->getMasterId(this)),
99      blockSize(p->system->cacheLineSize()),
100      blockAddrMask(blockSize - 1),
101      progressInterval(p->progress_interval),
102      progressCheck(p->progress_check),
103      nextProgressMessage(p->progress_interval),
104      maxLoads(p->max_loads),
105      atomic(p->system->isAtomicMode()),
106      suppressFuncWarnings(p->suppress_func_warnings)
107{
108    id = TESTER_ALLOCATOR++;
109    fatal_if(id >= blockSize, "Too many testers, only %d allowed\n",
110             blockSize - 1);
111
112    baseAddr1 = 0x100000;
113    baseAddr2 = 0x400000;
114    uncacheAddr = 0x800000;
115
116    // set up counters
117    numReads = 0;
118    numWrites = 0;
119
120    // kick things into action
121    schedule(tickEvent, curTick());
122    schedule(noRequestEvent, clockEdge(progressCheck));
123}
124
125Port &
126MemTest::getPort(const std::string &if_name, PortID idx)
127{
128    if (if_name == "port")
129        return port;
130    else
131        return ClockedObject::getPort(if_name, idx);
132}
133
134void
135MemTest::completeRequest(PacketPtr pkt, bool functional)
136{
137    const RequestPtr &req = pkt->req;
138    assert(req->getSize() == 1);
139
140    // this address is no longer outstanding
141    auto remove_addr = outstandingAddrs.find(req->getPaddr());
142    assert(remove_addr != outstandingAddrs.end());
143    outstandingAddrs.erase(remove_addr);
144
145    DPRINTF(MemTest, "Completing %s at address %x (blk %x) %s\n",
146            pkt->isWrite() ? "write" : "read",
147            req->getPaddr(), blockAlign(req->getPaddr()),
148            pkt->isError() ? "error" : "success");
149
150    const uint8_t *pkt_data = pkt->getConstPtr<uint8_t>();
151
152    if (pkt->isError()) {
153        if (!functional || !suppressFuncWarnings) {
154            warn("%s access failed at %#x\n",
155                 pkt->isWrite() ? "Write" : "Read", req->getPaddr());
156        }
157    } else {
158        if (pkt->isRead()) {
159            uint8_t ref_data = referenceData[req->getPaddr()];
160            if (pkt_data[0] != ref_data) {
161                panic("%s: read of %x (blk %x) @ cycle %d "
162                      "returns %x, expected %x\n", name(),
163                      req->getPaddr(), blockAlign(req->getPaddr()), curTick(),
164                      pkt_data[0], ref_data);
165            }
166
167            numReads++;
168            numReadsStat++;
169
170            if (numReads == (uint64_t)nextProgressMessage) {
171                ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n",
172                         name(), numReads, numWrites, curTick());
173                nextProgressMessage += progressInterval;
174            }
175
176            if (maxLoads != 0 && numReads >= maxLoads)
177                exitSimLoop("maximum number of loads reached");
178        } else {
179            assert(pkt->isWrite());
180
181            // update the reference data
182            referenceData[req->getPaddr()] = pkt_data[0];
183            numWrites++;
184            numWritesStat++;
185        }
186    }
187
188    // the packet will delete the data
189    delete pkt;
190
191    // finally shift the response timeout forward if we are still
192    // expecting responses; deschedule it otherwise
193    if (outstandingAddrs.size() != 0)
194        reschedule(noResponseEvent, clockEdge(progressCheck));
195    else if (noResponseEvent.scheduled())
196        deschedule(noResponseEvent);
197}
198
199void
200MemTest::regStats()
201{
202    ClockedObject::regStats();
203
204    using namespace Stats;
205
206    numReadsStat
207        .name(name() + ".num_reads")
208        .desc("number of read accesses completed")
209        ;
210
211    numWritesStat
212        .name(name() + ".num_writes")
213        .desc("number of write accesses completed")
214        ;
215}
216
217void
218MemTest::tick()
219{
220    // we should never tick if we are waiting for a retry
221    assert(!retryPkt);
222
223    // create a new request
224    unsigned cmd = random_mt.random(0, 100);
225    uint8_t data = random_mt.random<uint8_t>();
226    bool uncacheable = random_mt.random(0, 100) < percentUncacheable;
227    unsigned base = random_mt.random(0, 1);
228    Request::Flags flags;
229    Addr paddr;
230
231    // generate a unique address
232    do {
233        unsigned offset = random_mt.random<unsigned>(0, size - 1);
234
235        // use the tester id as offset within the block for false sharing
236        offset = blockAlign(offset);
237        offset += id;
238
239        if (uncacheable) {
240            flags.set(Request::UNCACHEABLE);
241            paddr = uncacheAddr + offset;
242        } else  {
243            paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
244        }
245    } while (outstandingAddrs.find(paddr) != outstandingAddrs.end());
246
247    bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
248        !uncacheable;
249    RequestPtr req = std::make_shared<Request>(paddr, 1, flags, masterId);
250    req->setContext(id);
251
252    outstandingAddrs.insert(paddr);
253
254    // sanity check
255    panic_if(outstandingAddrs.size() > 100,
256             "Tester %s has more than 100 outstanding requests\n", name());
257
258    PacketPtr pkt = nullptr;
259    uint8_t *pkt_data = new uint8_t[1];
260
261    if (cmd < percentReads) {
262        // start by ensuring there is a reference value if we have not
263        // seen this address before
264        uint8_t M5_VAR_USED ref_data = 0;
265        auto ref = referenceData.find(req->getPaddr());
266        if (ref == referenceData.end()) {
267            referenceData[req->getPaddr()] = 0;
268        } else {
269            ref_data = ref->second;
270        }
271
272        DPRINTF(MemTest,
273                "Initiating %sread at addr %x (blk %x) expecting %x\n",
274                do_functional ? "functional " : "", req->getPaddr(),
275                blockAlign(req->getPaddr()), ref_data);
276
277        pkt = new Packet(req, MemCmd::ReadReq);
278        pkt->dataDynamic(pkt_data);
279    } else {
280        DPRINTF(MemTest, "Initiating %swrite at addr %x (blk %x) value %x\n",
281                do_functional ? "functional " : "", req->getPaddr(),
282                blockAlign(req->getPaddr()), data);
283
284        pkt = new Packet(req, MemCmd::WriteReq);
285        pkt->dataDynamic(pkt_data);
286        pkt_data[0] = data;
287    }
288
289    // there is no point in ticking if we are waiting for a retry
290    bool keep_ticking = true;
291    if (do_functional) {
292        pkt->setSuppressFuncError();
293        port.sendFunctional(pkt);
294        completeRequest(pkt, true);
295    } else {
296        keep_ticking = sendPkt(pkt);
297    }
298
299    if (keep_ticking) {
300        // schedule the next tick
301        schedule(tickEvent, clockEdge(interval));
302
303        // finally shift the timeout for sending of requests forwards
304        // as we have successfully sent a packet
305        reschedule(noRequestEvent, clockEdge(progressCheck), true);
306    } else {
307        DPRINTF(MemTest, "Waiting for retry\n");
308    }
309
310    // Schedule noResponseEvent now if we are expecting a response
311    if (!noResponseEvent.scheduled() && (outstandingAddrs.size() != 0))
312        schedule(noResponseEvent, clockEdge(progressCheck));
313}
314
315void
316MemTest::noRequest()
317{
318    panic("%s did not send a request for %d cycles", name(), progressCheck);
319}
320
321void
322MemTest::noResponse()
323{
324    panic("%s did not see a response for %d cycles", name(), progressCheck);
325}
326
327void
328MemTest::recvRetry()
329{
330    assert(retryPkt);
331    if (port.sendTimingReq(retryPkt)) {
332        DPRINTF(MemTest, "Proceeding after successful retry\n");
333
334        retryPkt = nullptr;
335        // kick things into action again
336        schedule(tickEvent, clockEdge(interval));
337        reschedule(noRequestEvent, clockEdge(progressCheck), true);
338    }
339}
340
341MemTest *
342MemTestParams::create()
343{
344    return new MemTest(this);
345}
346