Deleted Added
sdiff udiff text old ( 11653:fab5e4523380 ) new ( 11793:ef606668d247 )
full compact
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 * Ali Saidi
42 * Andreas Hansson
43 */
44
45#include "mem/abstract_mem.hh"
46
47#include <vector>
48
49#include "cpu/base.hh"
50#include "cpu/thread_context.hh"
51#include "debug/LLSC.hh"
52#include "debug/MemoryAccess.hh"
53#include "mem/packet_access.hh"
54#include "sim/system.hh"
55
56using namespace std;
57
58AbstractMemory::AbstractMemory(const Params *p) :
59 MemObject(p), range(params()->range), pmemAddr(NULL),
60 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
61 kvmMap(p->kvm_map), _system(NULL)
62{
63}
64
65void
66AbstractMemory::init()
67{
68 assert(system());
69
70 if (size() % _system->getPageBytes() != 0)
71 panic("Memory Size not divisible by page size\n");
72}
73
74void
75AbstractMemory::setBackingStore(uint8_t* pmem_addr)
76{
77 pmemAddr = pmem_addr;
78}
79
80void
81AbstractMemory::regStats()
82{
83 MemObject::regStats();
84
85 using namespace Stats;
86
87 assert(system());
88
89 bytesRead
90 .init(system()->maxMasters())
91 .name(name() + ".bytes_read")
92 .desc("Number of bytes read from this memory")
93 .flags(total | nozero | nonan)
94 ;
95 for (int i = 0; i < system()->maxMasters(); i++) {
96 bytesRead.subname(i, system()->getMasterName(i));
97 }
98 bytesInstRead
99 .init(system()->maxMasters())
100 .name(name() + ".bytes_inst_read")
101 .desc("Number of instructions bytes read from this memory")
102 .flags(total | nozero | nonan)
103 ;
104 for (int i = 0; i < system()->maxMasters(); i++) {
105 bytesInstRead.subname(i, system()->getMasterName(i));
106 }
107 bytesWritten
108 .init(system()->maxMasters())
109 .name(name() + ".bytes_written")
110 .desc("Number of bytes written to this memory")
111 .flags(total | nozero | nonan)
112 ;
113 for (int i = 0; i < system()->maxMasters(); i++) {
114 bytesWritten.subname(i, system()->getMasterName(i));
115 }
116 numReads
117 .init(system()->maxMasters())
118 .name(name() + ".num_reads")
119 .desc("Number of read requests responded to by this memory")
120 .flags(total | nozero | nonan)
121 ;
122 for (int i = 0; i < system()->maxMasters(); i++) {
123 numReads.subname(i, system()->getMasterName(i));
124 }
125 numWrites
126 .init(system()->maxMasters())
127 .name(name() + ".num_writes")
128 .desc("Number of write requests responded to by this memory")
129 .flags(total | nozero | nonan)
130 ;
131 for (int i = 0; i < system()->maxMasters(); i++) {
132 numWrites.subname(i, system()->getMasterName(i));
133 }
134 numOther
135 .init(system()->maxMasters())
136 .name(name() + ".num_other")
137 .desc("Number of other requests responded to by this memory")
138 .flags(total | nozero | nonan)
139 ;
140 for (int i = 0; i < system()->maxMasters(); i++) {
141 numOther.subname(i, system()->getMasterName(i));
142 }
143 bwRead
144 .name(name() + ".bw_read")
145 .desc("Total read bandwidth from this memory (bytes/s)")
146 .precision(0)
147 .prereq(bytesRead)
148 .flags(total | nozero | nonan)
149 ;
150 for (int i = 0; i < system()->maxMasters(); i++) {
151 bwRead.subname(i, system()->getMasterName(i));
152 }
153
154 bwInstRead
155 .name(name() + ".bw_inst_read")
156 .desc("Instruction read bandwidth from this memory (bytes/s)")
157 .precision(0)
158 .prereq(bytesInstRead)
159 .flags(total | nozero | nonan)
160 ;
161 for (int i = 0; i < system()->maxMasters(); i++) {
162 bwInstRead.subname(i, system()->getMasterName(i));
163 }
164 bwWrite
165 .name(name() + ".bw_write")
166 .desc("Write bandwidth from this memory (bytes/s)")
167 .precision(0)
168 .prereq(bytesWritten)
169 .flags(total | nozero | nonan)
170 ;
171 for (int i = 0; i < system()->maxMasters(); i++) {
172 bwWrite.subname(i, system()->getMasterName(i));
173 }
174 bwTotal
175 .name(name() + ".bw_total")
176 .desc("Total bandwidth to/from this memory (bytes/s)")
177 .precision(0)
178 .prereq(bwTotal)
179 .flags(total | nozero | nonan)
180 ;
181 for (int i = 0; i < system()->maxMasters(); i++) {
182 bwTotal.subname(i, system()->getMasterName(i));
183 }
184 bwRead = bytesRead / simSeconds;
185 bwInstRead = bytesInstRead / simSeconds;
186 bwWrite = bytesWritten / simSeconds;
187 bwTotal = (bytesRead + bytesWritten) / simSeconds;
188}
189
190AddrRange
191AbstractMemory::getAddrRange() const
192{
193 return range;
194}
195
196// Add load-locked to tracking list. Should only be called if the
197// operation is a load and the LLSC flag is set.
198void
199AbstractMemory::trackLoadLocked(PacketPtr pkt)
200{
201 Request *req = pkt->req;
202 Addr paddr = LockedAddr::mask(req->getPaddr());
203
204 // first we check if we already have a locked addr for this
205 // xc. Since each xc only gets one, we just update the
206 // existing record with the new address.
207 list<LockedAddr>::iterator i;
208
209 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
210 if (i->matchesContext(req)) {
211 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
212 req->contextId(), paddr);
213 i->addr = paddr;
214 return;
215 }
216 }
217
218 // no record for this xc: need to allocate a new one
219 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
220 req->contextId(), paddr);
221 lockedAddrList.push_front(LockedAddr(req));
222}
223
224
225// Called on *writes* only... both regular stores and
226// store-conditional operations. Check for conventional stores which
227// conflict with locked addresses, and for success/failure of store
228// conditionals.
229bool
230AbstractMemory::checkLockedAddrList(PacketPtr pkt)
231{
232 Request *req = pkt->req;
233 Addr paddr = LockedAddr::mask(req->getPaddr());
234 bool isLLSC = pkt->isLLSC();
235
236 // Initialize return value. Non-conditional stores always
237 // succeed. Assume conditional stores will fail until proven
238 // otherwise.
239 bool allowStore = !isLLSC;
240
241 // Iterate over list. Note that there could be multiple matching records,
242 // as more than one context could have done a load locked to this location.
243 // Only remove records when we succeed in finding a record for (xc, addr);
244 // then, remove all records with this address. Failed store-conditionals do
245 // not blow unrelated reservations.
246 list<LockedAddr>::iterator i = lockedAddrList.begin();
247
248 if (isLLSC) {
249 while (i != lockedAddrList.end()) {
250 if (i->addr == paddr && i->matchesContext(req)) {
251 // it's a store conditional, and as far as the memory system can
252 // tell, the requesting context's lock is still valid.
253 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
254 req->contextId(), paddr);
255 allowStore = true;
256 break;
257 }
258 // If we didn't find a match, keep searching! Someone else may well
259 // have a reservation on this line here but we may find ours in just
260 // a little while.
261 i++;
262 }
263 req->setExtraData(allowStore ? 1 : 0);
264 }
265 // LLSCs that succeeded AND non-LLSC stores both fall into here:
266 if (allowStore) {
267 // We write address paddr. However, there may be several entries with a
268 // reservation on this address (for other contextIds) and they must all
269 // be removed.
270 i = lockedAddrList.begin();
271 while (i != lockedAddrList.end()) {
272 if (i->addr == paddr) {
273 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
274 i->contextId, paddr);
275 // For ARM, a spinlock would typically include a Wait
276 // For Event (WFE) to conserve energy. The ARMv8
277 // architecture specifies that an event is
278 // automatically generated when clearing the exclusive
279 // monitor to wake up the processor in WFE.
280 ThreadContext* ctx = system()->getThreadContext(i->contextId);
281 ctx->getCpuPtr()->wakeup(ctx->threadId());
282 i = lockedAddrList.erase(i);
283 } else {
284 i++;
285 }
286 }
287 }
288
289 return allowStore;
290}
291
292
293#if TRACING_ON
294
295#define CASE(A, T) \
296 case sizeof(T): \
297 DPRINTF(MemoryAccess,"%s from %s of size %i on address 0x%x data " \
298 "0x%x %c\n", A, system()->getMasterName(pkt->req->masterId()),\
299 pkt->getSize(), pkt->getAddr(), pkt->get<T>(), \
300 pkt->req->isUncacheable() ? 'U' : 'C'); \
301 break
302
303
304#define TRACE_PACKET(A) \
305 do { \
306 switch (pkt->getSize()) { \
307 CASE(A, uint64_t); \
308 CASE(A, uint32_t); \
309 CASE(A, uint16_t); \
310 CASE(A, uint8_t); \
311 default: \
312 DPRINTF(MemoryAccess, "%s from %s of size %i on address 0x%x %c\n",\
313 A, system()->getMasterName(pkt->req->masterId()), \
314 pkt->getSize(), pkt->getAddr(), \
315 pkt->req->isUncacheable() ? 'U' : 'C'); \
316 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize()); \
317 } \
318 } while (0)
319
320#else
321
322#define TRACE_PACKET(A)
323
324#endif
325
326void
327AbstractMemory::access(PacketPtr pkt)
328{
329 if (pkt->cacheResponding()) {
330 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
331 pkt->getAddr());
332 return;
333 }
334
335 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
336 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
337 pkt->getAddr());
338 return;
339 }
340
341 assert(AddrRange(pkt->getAddr(),
342 pkt->getAddr() + (pkt->getSize() - 1)).isSubset(range));
343
344 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
345
346 if (pkt->cmd == MemCmd::SwapReq) {
347 if (pkt->isAtomicOp()) {
348 if (pmemAddr) {
349 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
350 (*(pkt->getAtomicOp()))(hostAddr);
351 }
352 } else {
353 std::vector<uint8_t> overwrite_val(pkt->getSize());
354 uint64_t condition_val64;
355 uint32_t condition_val32;
356
357 if (!pmemAddr)
358 panic("Swap only works if there is real memory (i.e. null=False)");
359
360 bool overwrite_mem = true;
361 // keep a copy of our possible write value, and copy what is at the
362 // memory address into the packet
363 std::memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(),
364 pkt->getSize());
365 std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
366
367 if (pkt->req->isCondSwap()) {
368 if (pkt->getSize() == sizeof(uint64_t)) {
369 condition_val64 = pkt->req->getExtraData();
370 overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
371 sizeof(uint64_t));
372 } else if (pkt->getSize() == sizeof(uint32_t)) {
373 condition_val32 = (uint32_t)pkt->req->getExtraData();
374 overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
375 sizeof(uint32_t));
376 } else
377 panic("Invalid size for conditional read/write\n");
378 }
379
380 if (overwrite_mem)
381 std::memcpy(hostAddr, &overwrite_val[0], pkt->getSize());
382
383 assert(!pkt->req->isInstFetch());
384 TRACE_PACKET("Read/Write");
385 numOther[pkt->req->masterId()]++;
386 }
387 } else if (pkt->isRead()) {
388 assert(!pkt->isWrite());
389 if (pkt->isLLSC()) {
390 trackLoadLocked(pkt);
391 }
392 if (pmemAddr)
393 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
394 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
395 numReads[pkt->req->masterId()]++;
396 bytesRead[pkt->req->masterId()] += pkt->getSize();
397 if (pkt->req->isInstFetch())
398 bytesInstRead[pkt->req->masterId()] += pkt->getSize();
399 } else if (pkt->isInvalidate()) {
400 // no need to do anything
401 // this clause is intentionally before the write clause: the only
402 // transaction that is both a write and an invalidate is
403 // WriteInvalidate, and for the sake of consistency, it does not
404 // write to memory. in a cacheless system, there are no WriteInv's
405 // because the Write -> WriteInvalidate rewrite happens in the cache.
406 } else if (pkt->isWrite()) {
407 if (writeOK(pkt)) {
408 if (pmemAddr) {
409 memcpy(hostAddr, pkt->getConstPtr<uint8_t>(), pkt->getSize());
410 DPRINTF(MemoryAccess, "%s wrote %i bytes to address %x\n",
411 __func__, pkt->getSize(), pkt->getAddr());
412 }
413 assert(!pkt->req->isInstFetch());
414 TRACE_PACKET("Write");
415 numWrites[pkt->req->masterId()]++;
416 bytesWritten[pkt->req->masterId()] += pkt->getSize();
417 }
418 } else {
419 panic("unimplemented");
420 }
421
422 if (pkt->needsResponse()) {
423 pkt->makeResponse();
424 }
425}
426
427void
428AbstractMemory::functionalAccess(PacketPtr pkt)
429{
430 assert(AddrRange(pkt->getAddr(),
431 pkt->getAddr() + pkt->getSize() - 1).isSubset(range));
432
433 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
434
435 if (pkt->isRead()) {
436 if (pmemAddr)
437 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
438 TRACE_PACKET("Read");
439 pkt->makeResponse();
440 } else if (pkt->isWrite()) {
441 if (pmemAddr)
442 memcpy(hostAddr, pkt->getConstPtr<uint8_t>(), pkt->getSize());
443 TRACE_PACKET("Write");
444 pkt->makeResponse();
445 } else if (pkt->isPrint()) {
446 Packet::PrintReqState *prs =
447 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
448 assert(prs);
449 // Need to call printLabels() explicitly since we're not going
450 // through printObj().
451 prs->printLabels();
452 // Right now we just print the single byte at the specified address.
453 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
454 } else {
455 panic("AbstractMemory: unimplemented functional command %s",
456 pkt->cmdString());
457 }
458}