memtest.cc (8711:c7e14f52c682) memtest.cc (8832:247fee427324)
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Steve Reinhardt
30 */
31
32// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34#include <iomanip>
35#include <set>
36#include <string>
37#include <vector>
38
39#include "base/misc.hh"
40#include "base/statistics.hh"
41#include "cpu/testers/memtest/memtest.hh"
42#include "debug/MemTest.hh"
43#include "mem/mem_object.hh"
44#include "mem/packet.hh"
45#include "mem/port.hh"
46#include "mem/request.hh"
47#include "sim/sim_events.hh"
48#include "sim/stats.hh"
1/*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Steve Reinhardt
30 */
31
32// FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34#include <iomanip>
35#include <set>
36#include <string>
37#include <vector>
38
39#include "base/misc.hh"
40#include "base/statistics.hh"
41#include "cpu/testers/memtest/memtest.hh"
42#include "debug/MemTest.hh"
43#include "mem/mem_object.hh"
44#include "mem/packet.hh"
45#include "mem/port.hh"
46#include "mem/request.hh"
47#include "sim/sim_events.hh"
48#include "sim/stats.hh"
49#include "sim/system.hh"
49
50using namespace std;
51
52int TESTER_ALLOCATOR=0;
53
54bool
55MemTest::CpuPort::recvTiming(PacketPtr pkt)
56{
57 if (pkt->isResponse()) {
58 memtest->completeRequest(pkt);
59 } else {
60 // must be snoop upcall
61 assert(pkt->isRequest());
62 assert(pkt->getDest() == Packet::Broadcast);
63 }
64 return true;
65}
66
67Tick
68MemTest::CpuPort::recvAtomic(PacketPtr pkt)
69{
70 // must be snoop upcall
71 assert(pkt->isRequest());
72 assert(pkt->getDest() == Packet::Broadcast);
73 return curTick();
74}
75
76void
77MemTest::CpuPort::recvFunctional(PacketPtr pkt)
78{
79 //Do nothing if we see one come through
80// if (curTick() != 0)//Supress warning durring initialization
81// warn("Functional Writes not implemented in MemTester\n");
82 //Need to find any response values that intersect and update
83 return;
84}
85
86void
87MemTest::CpuPort::recvRangeChange()
88{
89}
90
91void
92MemTest::CpuPort::recvRetry()
93{
94 memtest->doRetry();
95}
96
97void
98MemTest::sendPkt(PacketPtr pkt) {
99 if (atomic) {
100 cachePort.sendAtomic(pkt);
101 completeRequest(pkt);
102 }
103 else if (!cachePort.sendTiming(pkt)) {
104 DPRINTF(MemTest, "accessRetry setting to true\n");
105
106 //
107 // dma requests should never be retried
108 //
109 if (issueDmas) {
110 panic("Nacked DMA requests are not supported\n");
111 }
112 accessRetry = true;
113 retryPkt = pkt;
114 } else {
115 if (issueDmas) {
116 dmaOutstanding = true;
117 }
118 }
119
120}
121
122MemTest::MemTest(const Params *p)
123 : MemObject(p),
124 tickEvent(this),
125 cachePort("test", this),
126 funcPort("functional", this),
127 retryPkt(NULL),
128// mainMem(main_mem),
129// checkMem(check_mem),
130 size(p->memory_size),
131 percentReads(p->percent_reads),
132 percentFunctional(p->percent_functional),
133 percentUncacheable(p->percent_uncacheable),
134 issueDmas(p->issue_dmas),
50
51using namespace std;
52
53int TESTER_ALLOCATOR=0;
54
55bool
56MemTest::CpuPort::recvTiming(PacketPtr pkt)
57{
58 if (pkt->isResponse()) {
59 memtest->completeRequest(pkt);
60 } else {
61 // must be snoop upcall
62 assert(pkt->isRequest());
63 assert(pkt->getDest() == Packet::Broadcast);
64 }
65 return true;
66}
67
68Tick
69MemTest::CpuPort::recvAtomic(PacketPtr pkt)
70{
71 // must be snoop upcall
72 assert(pkt->isRequest());
73 assert(pkt->getDest() == Packet::Broadcast);
74 return curTick();
75}
76
77void
78MemTest::CpuPort::recvFunctional(PacketPtr pkt)
79{
80 //Do nothing if we see one come through
81// if (curTick() != 0)//Supress warning durring initialization
82// warn("Functional Writes not implemented in MemTester\n");
83 //Need to find any response values that intersect and update
84 return;
85}
86
87void
88MemTest::CpuPort::recvRangeChange()
89{
90}
91
92void
93MemTest::CpuPort::recvRetry()
94{
95 memtest->doRetry();
96}
97
98void
99MemTest::sendPkt(PacketPtr pkt) {
100 if (atomic) {
101 cachePort.sendAtomic(pkt);
102 completeRequest(pkt);
103 }
104 else if (!cachePort.sendTiming(pkt)) {
105 DPRINTF(MemTest, "accessRetry setting to true\n");
106
107 //
108 // dma requests should never be retried
109 //
110 if (issueDmas) {
111 panic("Nacked DMA requests are not supported\n");
112 }
113 accessRetry = true;
114 retryPkt = pkt;
115 } else {
116 if (issueDmas) {
117 dmaOutstanding = true;
118 }
119 }
120
121}
122
123MemTest::MemTest(const Params *p)
124 : MemObject(p),
125 tickEvent(this),
126 cachePort("test", this),
127 funcPort("functional", this),
128 retryPkt(NULL),
129// mainMem(main_mem),
130// checkMem(check_mem),
131 size(p->memory_size),
132 percentReads(p->percent_reads),
133 percentFunctional(p->percent_functional),
134 percentUncacheable(p->percent_uncacheable),
135 issueDmas(p->issue_dmas),
136 masterId(p->sys->getMasterId(name())),
135 progressInterval(p->progress_interval),
136 nextProgressMessage(p->progress_interval),
137 percentSourceUnaligned(p->percent_source_unaligned),
138 percentDestUnaligned(p->percent_dest_unaligned),
139 maxLoads(p->max_loads),
140 atomic(p->atomic),
141 suppress_func_warnings(p->suppress_func_warnings)
142{
143 id = TESTER_ALLOCATOR++;
144
145 // Needs to be masked off once we know the block size.
146 traceBlockAddr = p->trace_addr;
147 baseAddr1 = 0x100000;
148 baseAddr2 = 0x400000;
149 uncacheAddr = 0x800000;
150
151 // set up counters
152 noResponseCycles = 0;
153 numReads = 0;
154 numWrites = 0;
155 schedule(tickEvent, 0);
156
157 accessRetry = false;
158 dmaOutstanding = false;
159}
160
161Port *
162MemTest::getPort(const std::string &if_name, int idx)
163{
164 if (if_name == "functional")
165 return &funcPort;
166 else if (if_name == "test")
167 return &cachePort;
168 else
169 panic("No Such Port\n");
170}
171
172void
173MemTest::init()
174{
175 // By the time init() is called, the ports should be hooked up.
176 blockSize = cachePort.peerBlockSize();
177 blockAddrMask = blockSize - 1;
178 traceBlockAddr = blockAddr(traceBlockAddr);
179
180 // initial memory contents for both physical memory and functional
181 // memory should be 0; no need to initialize them.
182}
183
184
185void
186MemTest::completeRequest(PacketPtr pkt)
187{
188 Request *req = pkt->req;
189
190 if (issueDmas) {
191 dmaOutstanding = false;
192 }
193
194 DPRINTF(MemTest, "completing %s at address %x (blk %x) %s\n",
195 pkt->isWrite() ? "write" : "read",
196 req->getPaddr(), blockAddr(req->getPaddr()),
197 pkt->isError() ? "error" : "success");
198
199 MemTestSenderState *state =
200 dynamic_cast<MemTestSenderState *>(pkt->senderState);
201
202 uint8_t *data = state->data;
203 uint8_t *pkt_data = pkt->getPtr<uint8_t>();
204
205 //Remove the address from the list of outstanding
206 std::set<unsigned>::iterator removeAddr =
207 outstandingAddrs.find(req->getPaddr());
208 assert(removeAddr != outstandingAddrs.end());
209 outstandingAddrs.erase(removeAddr);
210
211 if (pkt->isError()) {
212 if (!suppress_func_warnings) {
213 warn("Functional Access failed for %x at %x\n",
214 pkt->isWrite() ? "write" : "read", req->getPaddr());
215 }
216 } else {
217 if (pkt->isRead()) {
218 if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
219 panic("%s: read of %x (blk %x) @ cycle %d "
220 "returns %x, expected %x\n", name(),
221 req->getPaddr(), blockAddr(req->getPaddr()), curTick(),
222 *pkt_data, *data);
223 }
224
225 numReads++;
226 numReadsStat++;
227
228 if (numReads == (uint64_t)nextProgressMessage) {
229 ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n",
230 name(), numReads, numWrites, curTick());
231 nextProgressMessage += progressInterval;
232 }
233
234 if (maxLoads != 0 && numReads >= maxLoads)
235 exitSimLoop("maximum number of loads reached");
236 } else {
237 assert(pkt->isWrite());
238 funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
239 numWrites++;
240 numWritesStat++;
241 }
242 }
243
244 noResponseCycles = 0;
245 delete state;
246 delete [] data;
247 delete pkt->req;
248 delete pkt;
249}
250
251void
252MemTest::regStats()
253{
254 using namespace Stats;
255
256 numReadsStat
257 .name(name() + ".num_reads")
258 .desc("number of read accesses completed")
259 ;
260
261 numWritesStat
262 .name(name() + ".num_writes")
263 .desc("number of write accesses completed")
264 ;
265
266 numCopiesStat
267 .name(name() + ".num_copies")
268 .desc("number of copy accesses completed")
269 ;
270}
271
272void
273MemTest::tick()
274{
275 if (!tickEvent.scheduled())
276 schedule(tickEvent, curTick() + ticks(1));
277
278 if (++noResponseCycles >= 500000) {
279 if (issueDmas) {
280 cerr << "DMA tester ";
281 }
282 cerr << name() << ": deadlocked at cycle " << curTick() << endl;
283 fatal("");
284 }
285
286 if (accessRetry || (issueDmas && dmaOutstanding)) {
287 DPRINTF(MemTest, "MemTester waiting on accessRetry or DMA response\n");
288 return;
289 }
290
291 //make new request
292 unsigned cmd = random() % 100;
293 unsigned offset = random() % size;
294 unsigned base = random() % 2;
295 uint64_t data = random();
296 unsigned access_size = random() % 4;
297 bool uncacheable = (random() % 100) < percentUncacheable;
298
299 unsigned dma_access_size = random() % 4;
300
301 //If we aren't doing copies, use id as offset, and do a false sharing
302 //mem tester
303 //We can eliminate the lower bits of the offset, and then use the id
304 //to offset within the blks
305 offset = blockAddr(offset);
306 offset += id;
307 access_size = 0;
308 dma_access_size = 0;
309
310 Request *req = new Request();
311 Request::Flags flags;
312 Addr paddr;
313
314 if (uncacheable) {
315 flags.set(Request::UNCACHEABLE);
316 paddr = uncacheAddr + offset;
317 } else {
318 paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
319 }
320 bool do_functional = (random() % 100 < percentFunctional) && !uncacheable;
321
322 if (issueDmas) {
323 paddr &= ~((1 << dma_access_size) - 1);
137 progressInterval(p->progress_interval),
138 nextProgressMessage(p->progress_interval),
139 percentSourceUnaligned(p->percent_source_unaligned),
140 percentDestUnaligned(p->percent_dest_unaligned),
141 maxLoads(p->max_loads),
142 atomic(p->atomic),
143 suppress_func_warnings(p->suppress_func_warnings)
144{
145 id = TESTER_ALLOCATOR++;
146
147 // Needs to be masked off once we know the block size.
148 traceBlockAddr = p->trace_addr;
149 baseAddr1 = 0x100000;
150 baseAddr2 = 0x400000;
151 uncacheAddr = 0x800000;
152
153 // set up counters
154 noResponseCycles = 0;
155 numReads = 0;
156 numWrites = 0;
157 schedule(tickEvent, 0);
158
159 accessRetry = false;
160 dmaOutstanding = false;
161}
162
163Port *
164MemTest::getPort(const std::string &if_name, int idx)
165{
166 if (if_name == "functional")
167 return &funcPort;
168 else if (if_name == "test")
169 return &cachePort;
170 else
171 panic("No Such Port\n");
172}
173
174void
175MemTest::init()
176{
177 // By the time init() is called, the ports should be hooked up.
178 blockSize = cachePort.peerBlockSize();
179 blockAddrMask = blockSize - 1;
180 traceBlockAddr = blockAddr(traceBlockAddr);
181
182 // initial memory contents for both physical memory and functional
183 // memory should be 0; no need to initialize them.
184}
185
186
187void
188MemTest::completeRequest(PacketPtr pkt)
189{
190 Request *req = pkt->req;
191
192 if (issueDmas) {
193 dmaOutstanding = false;
194 }
195
196 DPRINTF(MemTest, "completing %s at address %x (blk %x) %s\n",
197 pkt->isWrite() ? "write" : "read",
198 req->getPaddr(), blockAddr(req->getPaddr()),
199 pkt->isError() ? "error" : "success");
200
201 MemTestSenderState *state =
202 dynamic_cast<MemTestSenderState *>(pkt->senderState);
203
204 uint8_t *data = state->data;
205 uint8_t *pkt_data = pkt->getPtr<uint8_t>();
206
207 //Remove the address from the list of outstanding
208 std::set<unsigned>::iterator removeAddr =
209 outstandingAddrs.find(req->getPaddr());
210 assert(removeAddr != outstandingAddrs.end());
211 outstandingAddrs.erase(removeAddr);
212
213 if (pkt->isError()) {
214 if (!suppress_func_warnings) {
215 warn("Functional Access failed for %x at %x\n",
216 pkt->isWrite() ? "write" : "read", req->getPaddr());
217 }
218 } else {
219 if (pkt->isRead()) {
220 if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
221 panic("%s: read of %x (blk %x) @ cycle %d "
222 "returns %x, expected %x\n", name(),
223 req->getPaddr(), blockAddr(req->getPaddr()), curTick(),
224 *pkt_data, *data);
225 }
226
227 numReads++;
228 numReadsStat++;
229
230 if (numReads == (uint64_t)nextProgressMessage) {
231 ccprintf(cerr, "%s: completed %d read, %d write accesses @%d\n",
232 name(), numReads, numWrites, curTick());
233 nextProgressMessage += progressInterval;
234 }
235
236 if (maxLoads != 0 && numReads >= maxLoads)
237 exitSimLoop("maximum number of loads reached");
238 } else {
239 assert(pkt->isWrite());
240 funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
241 numWrites++;
242 numWritesStat++;
243 }
244 }
245
246 noResponseCycles = 0;
247 delete state;
248 delete [] data;
249 delete pkt->req;
250 delete pkt;
251}
252
253void
254MemTest::regStats()
255{
256 using namespace Stats;
257
258 numReadsStat
259 .name(name() + ".num_reads")
260 .desc("number of read accesses completed")
261 ;
262
263 numWritesStat
264 .name(name() + ".num_writes")
265 .desc("number of write accesses completed")
266 ;
267
268 numCopiesStat
269 .name(name() + ".num_copies")
270 .desc("number of copy accesses completed")
271 ;
272}
273
274void
275MemTest::tick()
276{
277 if (!tickEvent.scheduled())
278 schedule(tickEvent, curTick() + ticks(1));
279
280 if (++noResponseCycles >= 500000) {
281 if (issueDmas) {
282 cerr << "DMA tester ";
283 }
284 cerr << name() << ": deadlocked at cycle " << curTick() << endl;
285 fatal("");
286 }
287
288 if (accessRetry || (issueDmas && dmaOutstanding)) {
289 DPRINTF(MemTest, "MemTester waiting on accessRetry or DMA response\n");
290 return;
291 }
292
293 //make new request
294 unsigned cmd = random() % 100;
295 unsigned offset = random() % size;
296 unsigned base = random() % 2;
297 uint64_t data = random();
298 unsigned access_size = random() % 4;
299 bool uncacheable = (random() % 100) < percentUncacheable;
300
301 unsigned dma_access_size = random() % 4;
302
303 //If we aren't doing copies, use id as offset, and do a false sharing
304 //mem tester
305 //We can eliminate the lower bits of the offset, and then use the id
306 //to offset within the blks
307 offset = blockAddr(offset);
308 offset += id;
309 access_size = 0;
310 dma_access_size = 0;
311
312 Request *req = new Request();
313 Request::Flags flags;
314 Addr paddr;
315
316 if (uncacheable) {
317 flags.set(Request::UNCACHEABLE);
318 paddr = uncacheAddr + offset;
319 } else {
320 paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
321 }
322 bool do_functional = (random() % 100 < percentFunctional) && !uncacheable;
323
324 if (issueDmas) {
325 paddr &= ~((1 << dma_access_size) - 1);
324 req->setPhys(paddr, 1 << dma_access_size, flags);
326 req->setPhys(paddr, 1 << dma_access_size, flags, masterId);
325 req->setThreadContext(id,0);
326 } else {
327 paddr &= ~((1 << access_size) - 1);
327 req->setThreadContext(id,0);
328 } else {
329 paddr &= ~((1 << access_size) - 1);
328 req->setPhys(paddr, 1 << access_size, flags);
330 req->setPhys(paddr, 1 << access_size, flags, masterId);
329 req->setThreadContext(id,0);
330 }
331 assert(req->getSize() == 1);
332
333 uint8_t *result = new uint8_t[8];
334
335 if (cmd < percentReads) {
336 // read
337
338 // For now we only allow one outstanding request per address
339 // per tester This means we assume CPU does write forwarding
340 // to reads that alias something in the cpu store buffer.
341 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
342 delete [] result;
343 delete req;
344 return;
345 }
346
347 outstandingAddrs.insert(paddr);
348
349 // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
350 funcPort.readBlob(req->getPaddr(), result, req->getSize());
351
352 DPRINTF(MemTest,
353 "id %d initiating %sread at addr %x (blk %x) expecting %x\n",
354 id, do_functional ? "functional " : "", req->getPaddr(),
355 blockAddr(req->getPaddr()), *result);
356
357 PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
358 pkt->setSrc(0);
359 pkt->dataDynamicArray(new uint8_t[req->getSize()]);
360 MemTestSenderState *state = new MemTestSenderState(result);
361 pkt->senderState = state;
362
363 if (do_functional) {
364 assert(pkt->needsResponse());
365 pkt->setSuppressFuncError();
366 cachePort.sendFunctional(pkt);
367 completeRequest(pkt);
368 } else {
369 sendPkt(pkt);
370 }
371 } else {
372 // write
373
374 // For now we only allow one outstanding request per addreess
375 // per tester. This means we assume CPU does write forwarding
376 // to reads that alias something in the cpu store buffer.
377 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
378 delete [] result;
379 delete req;
380 return;
381 }
382
383 outstandingAddrs.insert(paddr);
384
385 DPRINTF(MemTest, "initiating %swrite at addr %x (blk %x) value %x\n",
386 do_functional ? "functional " : "", req->getPaddr(),
387 blockAddr(req->getPaddr()), data & 0xff);
388
389 PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
390 pkt->setSrc(0);
391 uint8_t *pkt_data = new uint8_t[req->getSize()];
392 pkt->dataDynamicArray(pkt_data);
393 memcpy(pkt_data, &data, req->getSize());
394 MemTestSenderState *state = new MemTestSenderState(result);
395 pkt->senderState = state;
396
397 if (do_functional) {
398 pkt->setSuppressFuncError();
399 cachePort.sendFunctional(pkt);
400 completeRequest(pkt);
401 } else {
402 sendPkt(pkt);
403 }
404 }
405}
406
407void
408MemTest::doRetry()
409{
410 if (cachePort.sendTiming(retryPkt)) {
411 DPRINTF(MemTest, "accessRetry setting to false\n");
412 accessRetry = false;
413 retryPkt = NULL;
414 }
415}
416
417
418void
419MemTest::printAddr(Addr a)
420{
421 cachePort.printAddr(a);
422}
423
424
425MemTest *
426MemTestParams::create()
427{
428 return new MemTest(this);
429}
331 req->setThreadContext(id,0);
332 }
333 assert(req->getSize() == 1);
334
335 uint8_t *result = new uint8_t[8];
336
337 if (cmd < percentReads) {
338 // read
339
340 // For now we only allow one outstanding request per address
341 // per tester This means we assume CPU does write forwarding
342 // to reads that alias something in the cpu store buffer.
343 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
344 delete [] result;
345 delete req;
346 return;
347 }
348
349 outstandingAddrs.insert(paddr);
350
351 // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
352 funcPort.readBlob(req->getPaddr(), result, req->getSize());
353
354 DPRINTF(MemTest,
355 "id %d initiating %sread at addr %x (blk %x) expecting %x\n",
356 id, do_functional ? "functional " : "", req->getPaddr(),
357 blockAddr(req->getPaddr()), *result);
358
359 PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
360 pkt->setSrc(0);
361 pkt->dataDynamicArray(new uint8_t[req->getSize()]);
362 MemTestSenderState *state = new MemTestSenderState(result);
363 pkt->senderState = state;
364
365 if (do_functional) {
366 assert(pkt->needsResponse());
367 pkt->setSuppressFuncError();
368 cachePort.sendFunctional(pkt);
369 completeRequest(pkt);
370 } else {
371 sendPkt(pkt);
372 }
373 } else {
374 // write
375
376 // For now we only allow one outstanding request per addreess
377 // per tester. This means we assume CPU does write forwarding
378 // to reads that alias something in the cpu store buffer.
379 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
380 delete [] result;
381 delete req;
382 return;
383 }
384
385 outstandingAddrs.insert(paddr);
386
387 DPRINTF(MemTest, "initiating %swrite at addr %x (blk %x) value %x\n",
388 do_functional ? "functional " : "", req->getPaddr(),
389 blockAddr(req->getPaddr()), data & 0xff);
390
391 PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
392 pkt->setSrc(0);
393 uint8_t *pkt_data = new uint8_t[req->getSize()];
394 pkt->dataDynamicArray(pkt_data);
395 memcpy(pkt_data, &data, req->getSize());
396 MemTestSenderState *state = new MemTestSenderState(result);
397 pkt->senderState = state;
398
399 if (do_functional) {
400 pkt->setSuppressFuncError();
401 cachePort.sendFunctional(pkt);
402 completeRequest(pkt);
403 } else {
404 sendPkt(pkt);
405 }
406 }
407}
408
409void
410MemTest::doRetry()
411{
412 if (cachePort.sendTiming(retryPkt)) {
413 DPRINTF(MemTest, "accessRetry setting to false\n");
414 accessRetry = false;
415 retryPkt = NULL;
416 }
417}
418
419
420void
421MemTest::printAddr(Addr a)
422{
423 cachePort.printAddr(a);
424}
425
426
427MemTest *
428MemTestParams::create()
429{
430 return new MemTest(this);
431}