memtest.cc (13784:1941dc118243) memtest.cc (13892:0182a0601f66)
1/*
2 * Copyright (c) 2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Andreas Hansson
43 */
44
45#include "cpu/testers/memtest/memtest.hh"
46
47#include "base/random.hh"
48#include "base/statistics.hh"
49#include "base/trace.hh"
50#include "debug/MemTest.hh"
1/*
2 * Copyright (c) 2015 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Steve Reinhardt
42 * Andreas Hansson
43 */
44
45#include "cpu/testers/memtest/memtest.hh"
46
47#include "base/random.hh"
48#include "base/statistics.hh"
49#include "base/trace.hh"
50#include "debug/MemTest.hh"
51#include "mem/mem_object.hh"
52#include "sim/sim_exit.hh"
53#include "sim/stats.hh"
54#include "sim/system.hh"
55
56using namespace std;
57
58unsigned int TESTER_ALLOCATOR = 0;
59
60bool
61MemTest::CpuPort::recvTimingResp(PacketPtr pkt)
62{
63 memtest.completeRequest(pkt);
64 return true;
65}
66
67void
68MemTest::CpuPort::recvReqRetry()
69{
70 memtest.recvRetry();
71}
72
73bool
74MemTest::sendPkt(PacketPtr pkt) {
75 if (atomic) {
76 port.sendAtomic(pkt);
77 completeRequest(pkt);
78 } else {
79 if (!port.sendTimingReq(pkt)) {
80 retryPkt = pkt;
81 return false;
82 }
83 }
84 return true;
85}
86
87MemTest::MemTest(const Params *p)
51#include "sim/sim_exit.hh"
52#include "sim/stats.hh"
53#include "sim/system.hh"
54
55using namespace std;
56
57unsigned int TESTER_ALLOCATOR = 0;
58
59bool
60MemTest::CpuPort::recvTimingResp(PacketPtr pkt)
61{
62 memtest.completeRequest(pkt);
63 return true;
64}
65
66void
67MemTest::CpuPort::recvReqRetry()
68{
69 memtest.recvRetry();
70}
71
72bool
73MemTest::sendPkt(PacketPtr pkt) {
74 if (atomic) {
75 port.sendAtomic(pkt);
76 completeRequest(pkt);
77 } else {
78 if (!port.sendTimingReq(pkt)) {
79 retryPkt = pkt;
80 return false;
81 }
82 }
83 return true;
84}
85
86MemTest::MemTest(const Params *p)
88 : MemObject(p),
87 : ClockedObject(p),
89 tickEvent([this]{ tick(); }, name()),
90 noRequestEvent([this]{ noRequest(); }, name()),
91 noResponseEvent([this]{ noResponse(); }, name()),
92 port("port", *this),
93 retryPkt(nullptr),
94 size(p->size),
95 interval(p->interval),
96 percentReads(p->percent_reads),
97 percentFunctional(p->percent_functional),
98 percentUncacheable(p->percent_uncacheable),
99 masterId(p->system->getMasterId(this)),
100 blockSize(p->system->cacheLineSize()),
101 blockAddrMask(blockSize - 1),
102 progressInterval(p->progress_interval),
103 progressCheck(p->progress_check),
104 nextProgressMessage(p->progress_interval),
105 maxLoads(p->max_loads),
106 atomic(p->system->isAtomicMode()),
107 suppressFuncWarnings(p->suppress_func_warnings)
108{
109 id = TESTER_ALLOCATOR++;
110 fatal_if(id >= blockSize, "Too many testers, only %d allowed\n",
111 blockSize - 1);
112
113 baseAddr1 = 0x100000;
114 baseAddr2 = 0x400000;
115 uncacheAddr = 0x800000;
116
117 // set up counters
118 numReads = 0;
119 numWrites = 0;
120
121 // kick things into action
122 schedule(tickEvent, curTick());
123 schedule(noRequestEvent, clockEdge(progressCheck));
124 schedule(noResponseEvent, clockEdge(progressCheck));
125}
126
127Port &
128MemTest::getPort(const std::string &if_name, PortID idx)
129{
130 if (if_name == "port")
131 return port;
132 else
88 tickEvent([this]{ tick(); }, name()),
89 noRequestEvent([this]{ noRequest(); }, name()),
90 noResponseEvent([this]{ noResponse(); }, name()),
91 port("port", *this),
92 retryPkt(nullptr),
93 size(p->size),
94 interval(p->interval),
95 percentReads(p->percent_reads),
96 percentFunctional(p->percent_functional),
97 percentUncacheable(p->percent_uncacheable),
98 masterId(p->system->getMasterId(this)),
99 blockSize(p->system->cacheLineSize()),
100 blockAddrMask(blockSize - 1),
101 progressInterval(p->progress_interval),
102 progressCheck(p->progress_check),
103 nextProgressMessage(p->progress_interval),
104 maxLoads(p->max_loads),
105 atomic(p->system->isAtomicMode()),
106 suppressFuncWarnings(p->suppress_func_warnings)
107{
108 id = TESTER_ALLOCATOR++;
109 fatal_if(id >= blockSize, "Too many testers, only %d allowed\n",
110 blockSize - 1);
111
112 baseAddr1 = 0x100000;
113 baseAddr2 = 0x400000;
114 uncacheAddr = 0x800000;
115
116 // set up counters
117 numReads = 0;
118 numWrites = 0;
119
120 // kick things into action
121 schedule(tickEvent, curTick());
122 schedule(noRequestEvent, clockEdge(progressCheck));
123 schedule(noResponseEvent, clockEdge(progressCheck));
124}
125
126Port &
127MemTest::getPort(const std::string &if_name, PortID idx)
128{
129 if (if_name == "port")
130 return port;
131 else
133 return MemObject::getPort(if_name, idx);
132 return ClockedObject::getPort(if_name, idx);
134}
135
136void
137MemTest::completeRequest(PacketPtr pkt, bool functional)
138{
139 const RequestPtr &req = pkt->req;
140 assert(req->getSize() == 1);
141
142 // this address is no longer outstanding
143 auto remove_addr = outstandingAddrs.find(req->getPaddr());
144 assert(remove_addr != outstandingAddrs.end());
145 outstandingAddrs.erase(remove_addr);
146
147 DPRINTF(MemTest, "Completing %s at address %x (blk %x) %s\n",
148 pkt->isWrite() ? "write" : "read",
149 req->getPaddr(), blockAlign(req->getPaddr()),
150 pkt->isError() ? "error" : "success");
151
152 const uint8_t *pkt_data = pkt->getConstPtr<uint8_t>();
153
154 if (pkt->isError()) {
155 if (!functional || !suppressFuncWarnings) {
156 warn("%s access failed at %#x\n",
157 pkt->isWrite() ? "Write" : "Read", req->getPaddr());
158 }
159 } else {
160 if (pkt->isRead()) {
161 uint8_t ref_data = referenceData[req->getPaddr()];
162 if (pkt_data[0] != ref_data) {
163 panic("%s: read of %x (blk %x) @ cycle %d "
164 "returns %x, expected %x\n", name(),
165 req->getPaddr(), blockAlign(req->getPaddr()), curTick(),
166 pkt_data[0], ref_data);
167 }
168
169 numReads++;
170 numReadsStat++;
171
172 if (numReads == (uint64_t)nextProgressMessage) {
173 ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n",
174 name(), numReads, numWrites, curTick());
175 nextProgressMessage += progressInterval;
176 }
177
178 if (maxLoads != 0 && numReads >= maxLoads)
179 exitSimLoop("maximum number of loads reached");
180 } else {
181 assert(pkt->isWrite());
182
183 // update the reference data
184 referenceData[req->getPaddr()] = pkt_data[0];
185 numWrites++;
186 numWritesStat++;
187 }
188 }
189
190 // the packet will delete the data
191 delete pkt;
192
193 // finally shift the response timeout forward
194 reschedule(noResponseEvent, clockEdge(progressCheck), true);
195}
196
197void
198MemTest::regStats()
199{
133}
134
135void
136MemTest::completeRequest(PacketPtr pkt, bool functional)
137{
138 const RequestPtr &req = pkt->req;
139 assert(req->getSize() == 1);
140
141 // this address is no longer outstanding
142 auto remove_addr = outstandingAddrs.find(req->getPaddr());
143 assert(remove_addr != outstandingAddrs.end());
144 outstandingAddrs.erase(remove_addr);
145
146 DPRINTF(MemTest, "Completing %s at address %x (blk %x) %s\n",
147 pkt->isWrite() ? "write" : "read",
148 req->getPaddr(), blockAlign(req->getPaddr()),
149 pkt->isError() ? "error" : "success");
150
151 const uint8_t *pkt_data = pkt->getConstPtr<uint8_t>();
152
153 if (pkt->isError()) {
154 if (!functional || !suppressFuncWarnings) {
155 warn("%s access failed at %#x\n",
156 pkt->isWrite() ? "Write" : "Read", req->getPaddr());
157 }
158 } else {
159 if (pkt->isRead()) {
160 uint8_t ref_data = referenceData[req->getPaddr()];
161 if (pkt_data[0] != ref_data) {
162 panic("%s: read of %x (blk %x) @ cycle %d "
163 "returns %x, expected %x\n", name(),
164 req->getPaddr(), blockAlign(req->getPaddr()), curTick(),
165 pkt_data[0], ref_data);
166 }
167
168 numReads++;
169 numReadsStat++;
170
171 if (numReads == (uint64_t)nextProgressMessage) {
172 ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n",
173 name(), numReads, numWrites, curTick());
174 nextProgressMessage += progressInterval;
175 }
176
177 if (maxLoads != 0 && numReads >= maxLoads)
178 exitSimLoop("maximum number of loads reached");
179 } else {
180 assert(pkt->isWrite());
181
182 // update the reference data
183 referenceData[req->getPaddr()] = pkt_data[0];
184 numWrites++;
185 numWritesStat++;
186 }
187 }
188
189 // the packet will delete the data
190 delete pkt;
191
192 // finally shift the response timeout forward
193 reschedule(noResponseEvent, clockEdge(progressCheck), true);
194}
195
196void
197MemTest::regStats()
198{
200 MemObject::regStats();
199 ClockedObject::regStats();
201
202 using namespace Stats;
203
204 numReadsStat
205 .name(name() + ".num_reads")
206 .desc("number of read accesses completed")
207 ;
208
209 numWritesStat
210 .name(name() + ".num_writes")
211 .desc("number of write accesses completed")
212 ;
213}
214
215void
216MemTest::tick()
217{
218 // we should never tick if we are waiting for a retry
219 assert(!retryPkt);
220
221 // create a new request
222 unsigned cmd = random_mt.random(0, 100);
223 uint8_t data = random_mt.random<uint8_t>();
224 bool uncacheable = random_mt.random(0, 100) < percentUncacheable;
225 unsigned base = random_mt.random(0, 1);
226 Request::Flags flags;
227 Addr paddr;
228
229 // generate a unique address
230 do {
231 unsigned offset = random_mt.random<unsigned>(0, size - 1);
232
233 // use the tester id as offset within the block for false sharing
234 offset = blockAlign(offset);
235 offset += id;
236
237 if (uncacheable) {
238 flags.set(Request::UNCACHEABLE);
239 paddr = uncacheAddr + offset;
240 } else {
241 paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
242 }
243 } while (outstandingAddrs.find(paddr) != outstandingAddrs.end());
244
245 bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
246 !uncacheable;
247 RequestPtr req = std::make_shared<Request>(paddr, 1, flags, masterId);
248 req->setContext(id);
249
250 outstandingAddrs.insert(paddr);
251
252 // sanity check
253 panic_if(outstandingAddrs.size() > 100,
254 "Tester %s has more than 100 outstanding requests\n", name());
255
256 PacketPtr pkt = nullptr;
257 uint8_t *pkt_data = new uint8_t[1];
258
259 if (cmd < percentReads) {
260 // start by ensuring there is a reference value if we have not
261 // seen this address before
262 uint8_t M5_VAR_USED ref_data = 0;
263 auto ref = referenceData.find(req->getPaddr());
264 if (ref == referenceData.end()) {
265 referenceData[req->getPaddr()] = 0;
266 } else {
267 ref_data = ref->second;
268 }
269
270 DPRINTF(MemTest,
271 "Initiating %sread at addr %x (blk %x) expecting %x\n",
272 do_functional ? "functional " : "", req->getPaddr(),
273 blockAlign(req->getPaddr()), ref_data);
274
275 pkt = new Packet(req, MemCmd::ReadReq);
276 pkt->dataDynamic(pkt_data);
277 } else {
278 DPRINTF(MemTest, "Initiating %swrite at addr %x (blk %x) value %x\n",
279 do_functional ? "functional " : "", req->getPaddr(),
280 blockAlign(req->getPaddr()), data);
281
282 pkt = new Packet(req, MemCmd::WriteReq);
283 pkt->dataDynamic(pkt_data);
284 pkt_data[0] = data;
285 }
286
287 // there is no point in ticking if we are waiting for a retry
288 bool keep_ticking = true;
289 if (do_functional) {
290 pkt->setSuppressFuncError();
291 port.sendFunctional(pkt);
292 completeRequest(pkt, true);
293 } else {
294 keep_ticking = sendPkt(pkt);
295 }
296
297 if (keep_ticking) {
298 // schedule the next tick
299 schedule(tickEvent, clockEdge(interval));
300
301 // finally shift the timeout for sending of requests forwards
302 // as we have successfully sent a packet
303 reschedule(noRequestEvent, clockEdge(progressCheck), true);
304 } else {
305 DPRINTF(MemTest, "Waiting for retry\n");
306 }
307}
308
309void
310MemTest::noRequest()
311{
312 panic("%s did not send a request for %d cycles", name(), progressCheck);
313}
314
315void
316MemTest::noResponse()
317{
318 panic("%s did not see a response for %d cycles", name(), progressCheck);
319}
320
321void
322MemTest::recvRetry()
323{
324 assert(retryPkt);
325 if (port.sendTimingReq(retryPkt)) {
326 DPRINTF(MemTest, "Proceeding after successful retry\n");
327
328 retryPkt = nullptr;
329 // kick things into action again
330 schedule(tickEvent, clockEdge(interval));
331 }
332}
333
334MemTest *
335MemTestParams::create()
336{
337 return new MemTest(this);
338}
200
201 using namespace Stats;
202
203 numReadsStat
204 .name(name() + ".num_reads")
205 .desc("number of read accesses completed")
206 ;
207
208 numWritesStat
209 .name(name() + ".num_writes")
210 .desc("number of write accesses completed")
211 ;
212}
213
214void
215MemTest::tick()
216{
217 // we should never tick if we are waiting for a retry
218 assert(!retryPkt);
219
220 // create a new request
221 unsigned cmd = random_mt.random(0, 100);
222 uint8_t data = random_mt.random<uint8_t>();
223 bool uncacheable = random_mt.random(0, 100) < percentUncacheable;
224 unsigned base = random_mt.random(0, 1);
225 Request::Flags flags;
226 Addr paddr;
227
228 // generate a unique address
229 do {
230 unsigned offset = random_mt.random<unsigned>(0, size - 1);
231
232 // use the tester id as offset within the block for false sharing
233 offset = blockAlign(offset);
234 offset += id;
235
236 if (uncacheable) {
237 flags.set(Request::UNCACHEABLE);
238 paddr = uncacheAddr + offset;
239 } else {
240 paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
241 }
242 } while (outstandingAddrs.find(paddr) != outstandingAddrs.end());
243
244 bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
245 !uncacheable;
246 RequestPtr req = std::make_shared<Request>(paddr, 1, flags, masterId);
247 req->setContext(id);
248
249 outstandingAddrs.insert(paddr);
250
251 // sanity check
252 panic_if(outstandingAddrs.size() > 100,
253 "Tester %s has more than 100 outstanding requests\n", name());
254
255 PacketPtr pkt = nullptr;
256 uint8_t *pkt_data = new uint8_t[1];
257
258 if (cmd < percentReads) {
259 // start by ensuring there is a reference value if we have not
260 // seen this address before
261 uint8_t M5_VAR_USED ref_data = 0;
262 auto ref = referenceData.find(req->getPaddr());
263 if (ref == referenceData.end()) {
264 referenceData[req->getPaddr()] = 0;
265 } else {
266 ref_data = ref->second;
267 }
268
269 DPRINTF(MemTest,
270 "Initiating %sread at addr %x (blk %x) expecting %x\n",
271 do_functional ? "functional " : "", req->getPaddr(),
272 blockAlign(req->getPaddr()), ref_data);
273
274 pkt = new Packet(req, MemCmd::ReadReq);
275 pkt->dataDynamic(pkt_data);
276 } else {
277 DPRINTF(MemTest, "Initiating %swrite at addr %x (blk %x) value %x\n",
278 do_functional ? "functional " : "", req->getPaddr(),
279 blockAlign(req->getPaddr()), data);
280
281 pkt = new Packet(req, MemCmd::WriteReq);
282 pkt->dataDynamic(pkt_data);
283 pkt_data[0] = data;
284 }
285
286 // there is no point in ticking if we are waiting for a retry
287 bool keep_ticking = true;
288 if (do_functional) {
289 pkt->setSuppressFuncError();
290 port.sendFunctional(pkt);
291 completeRequest(pkt, true);
292 } else {
293 keep_ticking = sendPkt(pkt);
294 }
295
296 if (keep_ticking) {
297 // schedule the next tick
298 schedule(tickEvent, clockEdge(interval));
299
300 // finally shift the timeout for sending of requests forwards
301 // as we have successfully sent a packet
302 reschedule(noRequestEvent, clockEdge(progressCheck), true);
303 } else {
304 DPRINTF(MemTest, "Waiting for retry\n");
305 }
306}
307
308void
309MemTest::noRequest()
310{
311 panic("%s did not send a request for %d cycles", name(), progressCheck);
312}
313
314void
315MemTest::noResponse()
316{
317 panic("%s did not see a response for %d cycles", name(), progressCheck);
318}
319
320void
321MemTest::recvRetry()
322{
323 assert(retryPkt);
324 if (port.sendTimingReq(retryPkt)) {
325 DPRINTF(MemTest, "Proceeding after successful retry\n");
326
327 retryPkt = nullptr;
328 // kick things into action again
329 schedule(tickEvent, clockEdge(interval));
330 }
331}
332
333MemTest *
334MemTestParams::create()
335{
336 return new MemTest(this);
337}