abstract_mem.cc (11284:b3926db25371) abstract_mem.cc (11306:a5340a2a24f9)
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 * Ali Saidi
42 * Andreas Hansson
43 */
44
45#include <vector>
46
47#include "cpu/base.hh"
48#include "cpu/thread_context.hh"
49#include "debug/LLSC.hh"
50#include "debug/MemoryAccess.hh"
51#include "mem/abstract_mem.hh"
52#include "mem/packet_access.hh"
53#include "sim/system.hh"
54
55using namespace std;
56
57AbstractMemory::AbstractMemory(const Params *p) :
58 MemObject(p), range(params()->range), pmemAddr(NULL),
59 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
60 _system(NULL)
61{
62}
63
64void
65AbstractMemory::init()
66{
67 assert(system());
68
69 if (size() % _system->getPageBytes() != 0)
70 panic("Memory Size not divisible by page size\n");
71}
72
73void
74AbstractMemory::setBackingStore(uint8_t* pmem_addr)
75{
76 pmemAddr = pmem_addr;
77}
78
79void
80AbstractMemory::regStats()
81{
82 using namespace Stats;
83
84 assert(system());
85
86 bytesRead
87 .init(system()->maxMasters())
88 .name(name() + ".bytes_read")
89 .desc("Number of bytes read from this memory")
90 .flags(total | nozero | nonan)
91 ;
92 for (int i = 0; i < system()->maxMasters(); i++) {
93 bytesRead.subname(i, system()->getMasterName(i));
94 }
95 bytesInstRead
96 .init(system()->maxMasters())
97 .name(name() + ".bytes_inst_read")
98 .desc("Number of instructions bytes read from this memory")
99 .flags(total | nozero | nonan)
100 ;
101 for (int i = 0; i < system()->maxMasters(); i++) {
102 bytesInstRead.subname(i, system()->getMasterName(i));
103 }
104 bytesWritten
105 .init(system()->maxMasters())
106 .name(name() + ".bytes_written")
107 .desc("Number of bytes written to this memory")
108 .flags(total | nozero | nonan)
109 ;
110 for (int i = 0; i < system()->maxMasters(); i++) {
111 bytesWritten.subname(i, system()->getMasterName(i));
112 }
113 numReads
114 .init(system()->maxMasters())
115 .name(name() + ".num_reads")
116 .desc("Number of read requests responded to by this memory")
117 .flags(total | nozero | nonan)
118 ;
119 for (int i = 0; i < system()->maxMasters(); i++) {
120 numReads.subname(i, system()->getMasterName(i));
121 }
122 numWrites
123 .init(system()->maxMasters())
124 .name(name() + ".num_writes")
125 .desc("Number of write requests responded to by this memory")
126 .flags(total | nozero | nonan)
127 ;
128 for (int i = 0; i < system()->maxMasters(); i++) {
129 numWrites.subname(i, system()->getMasterName(i));
130 }
131 numOther
132 .init(system()->maxMasters())
133 .name(name() + ".num_other")
134 .desc("Number of other requests responded to by this memory")
135 .flags(total | nozero | nonan)
136 ;
137 for (int i = 0; i < system()->maxMasters(); i++) {
138 numOther.subname(i, system()->getMasterName(i));
139 }
140 bwRead
141 .name(name() + ".bw_read")
142 .desc("Total read bandwidth from this memory (bytes/s)")
143 .precision(0)
144 .prereq(bytesRead)
145 .flags(total | nozero | nonan)
146 ;
147 for (int i = 0; i < system()->maxMasters(); i++) {
148 bwRead.subname(i, system()->getMasterName(i));
149 }
150
151 bwInstRead
152 .name(name() + ".bw_inst_read")
153 .desc("Instruction read bandwidth from this memory (bytes/s)")
154 .precision(0)
155 .prereq(bytesInstRead)
156 .flags(total | nozero | nonan)
157 ;
158 for (int i = 0; i < system()->maxMasters(); i++) {
159 bwInstRead.subname(i, system()->getMasterName(i));
160 }
161 bwWrite
162 .name(name() + ".bw_write")
163 .desc("Write bandwidth from this memory (bytes/s)")
164 .precision(0)
165 .prereq(bytesWritten)
166 .flags(total | nozero | nonan)
167 ;
168 for (int i = 0; i < system()->maxMasters(); i++) {
169 bwWrite.subname(i, system()->getMasterName(i));
170 }
171 bwTotal
172 .name(name() + ".bw_total")
173 .desc("Total bandwidth to/from this memory (bytes/s)")
174 .precision(0)
175 .prereq(bwTotal)
176 .flags(total | nozero | nonan)
177 ;
178 for (int i = 0; i < system()->maxMasters(); i++) {
179 bwTotal.subname(i, system()->getMasterName(i));
180 }
181 bwRead = bytesRead / simSeconds;
182 bwInstRead = bytesInstRead / simSeconds;
183 bwWrite = bytesWritten / simSeconds;
184 bwTotal = (bytesRead + bytesWritten) / simSeconds;
185}
186
187AddrRange
188AbstractMemory::getAddrRange() const
189{
190 return range;
191}
192
193// Add load-locked to tracking list. Should only be called if the
194// operation is a load and the LLSC flag is set.
195void
196AbstractMemory::trackLoadLocked(PacketPtr pkt)
197{
198 Request *req = pkt->req;
199 Addr paddr = LockedAddr::mask(req->getPaddr());
200
201 // first we check if we already have a locked addr for this
202 // xc. Since each xc only gets one, we just update the
203 // existing record with the new address.
204 list<LockedAddr>::iterator i;
205
206 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
207 if (i->matchesContext(req)) {
208 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
209 req->contextId(), paddr);
210 i->addr = paddr;
211 return;
212 }
213 }
214
215 // no record for this xc: need to allocate a new one
216 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
217 req->contextId(), paddr);
218 lockedAddrList.push_front(LockedAddr(req));
219}
220
221
222// Called on *writes* only... both regular stores and
223// store-conditional operations. Check for conventional stores which
224// conflict with locked addresses, and for success/failure of store
225// conditionals.
226bool
227AbstractMemory::checkLockedAddrList(PacketPtr pkt)
228{
229 Request *req = pkt->req;
230 Addr paddr = LockedAddr::mask(req->getPaddr());
231 bool isLLSC = pkt->isLLSC();
232
233 // Initialize return value. Non-conditional stores always
234 // succeed. Assume conditional stores will fail until proven
235 // otherwise.
236 bool allowStore = !isLLSC;
237
238 // Iterate over list. Note that there could be multiple matching records,
239 // as more than one context could have done a load locked to this location.
240 // Only remove records when we succeed in finding a record for (xc, addr);
241 // then, remove all records with this address. Failed store-conditionals do
242 // not blow unrelated reservations.
243 list<LockedAddr>::iterator i = lockedAddrList.begin();
244
245 if (isLLSC) {
246 while (i != lockedAddrList.end()) {
247 if (i->addr == paddr && i->matchesContext(req)) {
248 // it's a store conditional, and as far as the memory system can
249 // tell, the requesting context's lock is still valid.
250 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
251 req->contextId(), paddr);
252 allowStore = true;
253 break;
254 }
255 // If we didn't find a match, keep searching! Someone else may well
256 // have a reservation on this line here but we may find ours in just
257 // a little while.
258 i++;
259 }
260 req->setExtraData(allowStore ? 1 : 0);
261 }
262 // LLSCs that succeeded AND non-LLSC stores both fall into here:
263 if (allowStore) {
264 // We write address paddr. However, there may be several entries with a
265 // reservation on this address (for other contextIds) and they must all
266 // be removed.
267 i = lockedAddrList.begin();
268 while (i != lockedAddrList.end()) {
269 if (i->addr == paddr) {
270 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
271 i->contextId, paddr);
272 // For ARM, a spinlock would typically include a Wait
273 // For Event (WFE) to conserve energy. The ARMv8
274 // architecture specifies that an event is
275 // automatically generated when clearing the exclusive
276 // monitor to wake up the processor in WFE.
277 ThreadContext* ctx = system()->getThreadContext(i->contextId);
278 ctx->getCpuPtr()->wakeup(ctx->threadId());
279 i = lockedAddrList.erase(i);
280 } else {
281 i++;
282 }
283 }
284 }
285
286 return allowStore;
287}
288
289
290#if TRACING_ON
291
292#define CASE(A, T) \
293 case sizeof(T): \
294 DPRINTF(MemoryAccess,"%s from %s of size %i on address 0x%x data " \
295 "0x%x %c\n", A, system()->getMasterName(pkt->req->masterId()),\
296 pkt->getSize(), pkt->getAddr(), pkt->get<T>(), \
297 pkt->req->isUncacheable() ? 'U' : 'C'); \
298 break
299
300
301#define TRACE_PACKET(A) \
302 do { \
303 switch (pkt->getSize()) { \
304 CASE(A, uint64_t); \
305 CASE(A, uint32_t); \
306 CASE(A, uint16_t); \
307 CASE(A, uint8_t); \
308 default: \
309 DPRINTF(MemoryAccess, "%s from %s of size %i on address 0x%x %c\n",\
310 A, system()->getMasterName(pkt->req->masterId()), \
311 pkt->getSize(), pkt->getAddr(), \
312 pkt->req->isUncacheable() ? 'U' : 'C'); \
313 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize()); \
314 } \
315 } while (0)
316
317#else
318
319#define TRACE_PACKET(A)
320
321#endif
322
323void
324AbstractMemory::access(PacketPtr pkt)
325{
326 if (pkt->cacheResponding()) {
327 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
328 pkt->getAddr());
329 return;
330 }
331
332 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
333 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
334 pkt->getAddr());
335 return;
336 }
337
338 assert(AddrRange(pkt->getAddr(),
339 pkt->getAddr() + (pkt->getSize() - 1)).isSubset(range));
340
341 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
342
343 if (pkt->cmd == MemCmd::SwapReq) {
1/*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 * Ali Saidi
42 * Andreas Hansson
43 */
44
45#include <vector>
46
47#include "cpu/base.hh"
48#include "cpu/thread_context.hh"
49#include "debug/LLSC.hh"
50#include "debug/MemoryAccess.hh"
51#include "mem/abstract_mem.hh"
52#include "mem/packet_access.hh"
53#include "sim/system.hh"
54
55using namespace std;
56
57AbstractMemory::AbstractMemory(const Params *p) :
58 MemObject(p), range(params()->range), pmemAddr(NULL),
59 confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
60 _system(NULL)
61{
62}
63
64void
65AbstractMemory::init()
66{
67 assert(system());
68
69 if (size() % _system->getPageBytes() != 0)
70 panic("Memory Size not divisible by page size\n");
71}
72
73void
74AbstractMemory::setBackingStore(uint8_t* pmem_addr)
75{
76 pmemAddr = pmem_addr;
77}
78
79void
80AbstractMemory::regStats()
81{
82 using namespace Stats;
83
84 assert(system());
85
86 bytesRead
87 .init(system()->maxMasters())
88 .name(name() + ".bytes_read")
89 .desc("Number of bytes read from this memory")
90 .flags(total | nozero | nonan)
91 ;
92 for (int i = 0; i < system()->maxMasters(); i++) {
93 bytesRead.subname(i, system()->getMasterName(i));
94 }
95 bytesInstRead
96 .init(system()->maxMasters())
97 .name(name() + ".bytes_inst_read")
98 .desc("Number of instructions bytes read from this memory")
99 .flags(total | nozero | nonan)
100 ;
101 for (int i = 0; i < system()->maxMasters(); i++) {
102 bytesInstRead.subname(i, system()->getMasterName(i));
103 }
104 bytesWritten
105 .init(system()->maxMasters())
106 .name(name() + ".bytes_written")
107 .desc("Number of bytes written to this memory")
108 .flags(total | nozero | nonan)
109 ;
110 for (int i = 0; i < system()->maxMasters(); i++) {
111 bytesWritten.subname(i, system()->getMasterName(i));
112 }
113 numReads
114 .init(system()->maxMasters())
115 .name(name() + ".num_reads")
116 .desc("Number of read requests responded to by this memory")
117 .flags(total | nozero | nonan)
118 ;
119 for (int i = 0; i < system()->maxMasters(); i++) {
120 numReads.subname(i, system()->getMasterName(i));
121 }
122 numWrites
123 .init(system()->maxMasters())
124 .name(name() + ".num_writes")
125 .desc("Number of write requests responded to by this memory")
126 .flags(total | nozero | nonan)
127 ;
128 for (int i = 0; i < system()->maxMasters(); i++) {
129 numWrites.subname(i, system()->getMasterName(i));
130 }
131 numOther
132 .init(system()->maxMasters())
133 .name(name() + ".num_other")
134 .desc("Number of other requests responded to by this memory")
135 .flags(total | nozero | nonan)
136 ;
137 for (int i = 0; i < system()->maxMasters(); i++) {
138 numOther.subname(i, system()->getMasterName(i));
139 }
140 bwRead
141 .name(name() + ".bw_read")
142 .desc("Total read bandwidth from this memory (bytes/s)")
143 .precision(0)
144 .prereq(bytesRead)
145 .flags(total | nozero | nonan)
146 ;
147 for (int i = 0; i < system()->maxMasters(); i++) {
148 bwRead.subname(i, system()->getMasterName(i));
149 }
150
151 bwInstRead
152 .name(name() + ".bw_inst_read")
153 .desc("Instruction read bandwidth from this memory (bytes/s)")
154 .precision(0)
155 .prereq(bytesInstRead)
156 .flags(total | nozero | nonan)
157 ;
158 for (int i = 0; i < system()->maxMasters(); i++) {
159 bwInstRead.subname(i, system()->getMasterName(i));
160 }
161 bwWrite
162 .name(name() + ".bw_write")
163 .desc("Write bandwidth from this memory (bytes/s)")
164 .precision(0)
165 .prereq(bytesWritten)
166 .flags(total | nozero | nonan)
167 ;
168 for (int i = 0; i < system()->maxMasters(); i++) {
169 bwWrite.subname(i, system()->getMasterName(i));
170 }
171 bwTotal
172 .name(name() + ".bw_total")
173 .desc("Total bandwidth to/from this memory (bytes/s)")
174 .precision(0)
175 .prereq(bwTotal)
176 .flags(total | nozero | nonan)
177 ;
178 for (int i = 0; i < system()->maxMasters(); i++) {
179 bwTotal.subname(i, system()->getMasterName(i));
180 }
181 bwRead = bytesRead / simSeconds;
182 bwInstRead = bytesInstRead / simSeconds;
183 bwWrite = bytesWritten / simSeconds;
184 bwTotal = (bytesRead + bytesWritten) / simSeconds;
185}
186
187AddrRange
188AbstractMemory::getAddrRange() const
189{
190 return range;
191}
192
193// Add load-locked to tracking list. Should only be called if the
194// operation is a load and the LLSC flag is set.
195void
196AbstractMemory::trackLoadLocked(PacketPtr pkt)
197{
198 Request *req = pkt->req;
199 Addr paddr = LockedAddr::mask(req->getPaddr());
200
201 // first we check if we already have a locked addr for this
202 // xc. Since each xc only gets one, we just update the
203 // existing record with the new address.
204 list<LockedAddr>::iterator i;
205
206 for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
207 if (i->matchesContext(req)) {
208 DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
209 req->contextId(), paddr);
210 i->addr = paddr;
211 return;
212 }
213 }
214
215 // no record for this xc: need to allocate a new one
216 DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
217 req->contextId(), paddr);
218 lockedAddrList.push_front(LockedAddr(req));
219}
220
221
222// Called on *writes* only... both regular stores and
223// store-conditional operations. Check for conventional stores which
224// conflict with locked addresses, and for success/failure of store
225// conditionals.
226bool
227AbstractMemory::checkLockedAddrList(PacketPtr pkt)
228{
229 Request *req = pkt->req;
230 Addr paddr = LockedAddr::mask(req->getPaddr());
231 bool isLLSC = pkt->isLLSC();
232
233 // Initialize return value. Non-conditional stores always
234 // succeed. Assume conditional stores will fail until proven
235 // otherwise.
236 bool allowStore = !isLLSC;
237
238 // Iterate over list. Note that there could be multiple matching records,
239 // as more than one context could have done a load locked to this location.
240 // Only remove records when we succeed in finding a record for (xc, addr);
241 // then, remove all records with this address. Failed store-conditionals do
242 // not blow unrelated reservations.
243 list<LockedAddr>::iterator i = lockedAddrList.begin();
244
245 if (isLLSC) {
246 while (i != lockedAddrList.end()) {
247 if (i->addr == paddr && i->matchesContext(req)) {
248 // it's a store conditional, and as far as the memory system can
249 // tell, the requesting context's lock is still valid.
250 DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
251 req->contextId(), paddr);
252 allowStore = true;
253 break;
254 }
255 // If we didn't find a match, keep searching! Someone else may well
256 // have a reservation on this line here but we may find ours in just
257 // a little while.
258 i++;
259 }
260 req->setExtraData(allowStore ? 1 : 0);
261 }
262 // LLSCs that succeeded AND non-LLSC stores both fall into here:
263 if (allowStore) {
264 // We write address paddr. However, there may be several entries with a
265 // reservation on this address (for other contextIds) and they must all
266 // be removed.
267 i = lockedAddrList.begin();
268 while (i != lockedAddrList.end()) {
269 if (i->addr == paddr) {
270 DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
271 i->contextId, paddr);
272 // For ARM, a spinlock would typically include a Wait
273 // For Event (WFE) to conserve energy. The ARMv8
274 // architecture specifies that an event is
275 // automatically generated when clearing the exclusive
276 // monitor to wake up the processor in WFE.
277 ThreadContext* ctx = system()->getThreadContext(i->contextId);
278 ctx->getCpuPtr()->wakeup(ctx->threadId());
279 i = lockedAddrList.erase(i);
280 } else {
281 i++;
282 }
283 }
284 }
285
286 return allowStore;
287}
288
289
290#if TRACING_ON
291
292#define CASE(A, T) \
293 case sizeof(T): \
294 DPRINTF(MemoryAccess,"%s from %s of size %i on address 0x%x data " \
295 "0x%x %c\n", A, system()->getMasterName(pkt->req->masterId()),\
296 pkt->getSize(), pkt->getAddr(), pkt->get<T>(), \
297 pkt->req->isUncacheable() ? 'U' : 'C'); \
298 break
299
300
301#define TRACE_PACKET(A) \
302 do { \
303 switch (pkt->getSize()) { \
304 CASE(A, uint64_t); \
305 CASE(A, uint32_t); \
306 CASE(A, uint16_t); \
307 CASE(A, uint8_t); \
308 default: \
309 DPRINTF(MemoryAccess, "%s from %s of size %i on address 0x%x %c\n",\
310 A, system()->getMasterName(pkt->req->masterId()), \
311 pkt->getSize(), pkt->getAddr(), \
312 pkt->req->isUncacheable() ? 'U' : 'C'); \
313 DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize()); \
314 } \
315 } while (0)
316
317#else
318
319#define TRACE_PACKET(A)
320
321#endif
322
323void
324AbstractMemory::access(PacketPtr pkt)
325{
326 if (pkt->cacheResponding()) {
327 DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
328 pkt->getAddr());
329 return;
330 }
331
332 if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
333 DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n",
334 pkt->getAddr());
335 return;
336 }
337
338 assert(AddrRange(pkt->getAddr(),
339 pkt->getAddr() + (pkt->getSize() - 1)).isSubset(range));
340
341 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
342
343 if (pkt->cmd == MemCmd::SwapReq) {
344 std::vector<uint8_t> overwrite_val(pkt->getSize());
345 uint64_t condition_val64;
346 uint32_t condition_val32;
344 if (pkt->isAtomicOp()) {
345 if (pmemAddr) {
346 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
347 (*(pkt->getAtomicOp()))(hostAddr);
348 }
349 } else {
350 std::vector<uint8_t> overwrite_val(pkt->getSize());
351 uint64_t condition_val64;
352 uint32_t condition_val32;
347
353
348 if (!pmemAddr)
349 panic("Swap only works if there is real memory (i.e. null=False)");
354 if (!pmemAddr)
355 panic("Swap only works if there is real memory (i.e. null=False)");
350
356
351 bool overwrite_mem = true;
352 // keep a copy of our possible write value, and copy what is at the
353 // memory address into the packet
354 std::memcpy(&overwrite_val[0], pkt->getConstPtr(),
355 pkt->getSize());
356 std::memcpy(pkt->getPtr(), hostAddr, pkt->getSize());
357 bool overwrite_mem = true;
358 // keep a copy of our possible write value, and copy what is at the
359 // memory address into the packet
360 std::memcpy(&overwrite_val[0], pkt->getConstPtr<uint8_t>(),
361 pkt->getSize());
362 std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
357
363
358 if (pkt->req->isCondSwap()) {
359 if (pkt->getSize() == sizeof(uint64_t)) {
360 condition_val64 = pkt->req->getExtraData();
361 overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
362 sizeof(uint64_t));
363 } else if (pkt->getSize() == sizeof(uint32_t)) {
364 condition_val32 = (uint32_t)pkt->req->getExtraData();
365 overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
366 sizeof(uint32_t));
367 } else
368 panic("Invalid size for conditional read/write\n");
369 }
364 if (pkt->req->isCondSwap()) {
365 if (pkt->getSize() == sizeof(uint64_t)) {
366 condition_val64 = pkt->req->getExtraData();
367 overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
368 sizeof(uint64_t));
369 } else if (pkt->getSize() == sizeof(uint32_t)) {
370 condition_val32 = (uint32_t)pkt->req->getExtraData();
371 overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
372 sizeof(uint32_t));
373 } else
374 panic("Invalid size for conditional read/write\n");
375 }
370
376
371 if (overwrite_mem)
372 std::memcpy(hostAddr, &overwrite_val[0], pkt->getSize());
377 if (overwrite_mem)
378 std::memcpy(hostAddr, &overwrite_val[0], pkt->getSize());
373
379
374 assert(!pkt->req->isInstFetch());
375 TRACE_PACKET("Read/Write");
376 numOther[pkt->req->masterId()]++;
380 assert(!pkt->req->isInstFetch());
381 TRACE_PACKET("Read/Write");
382 numOther[pkt->req->masterId()]++;
383 }
377 } else if (pkt->isRead()) {
378 assert(!pkt->isWrite());
379 if (pkt->isLLSC()) {
380 trackLoadLocked(pkt);
381 }
382 if (pmemAddr)
383 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
384 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
385 numReads[pkt->req->masterId()]++;
386 bytesRead[pkt->req->masterId()] += pkt->getSize();
387 if (pkt->req->isInstFetch())
388 bytesInstRead[pkt->req->masterId()] += pkt->getSize();
389 } else if (pkt->isInvalidate()) {
390 // no need to do anything
391 // this clause is intentionally before the write clause: the only
392 // transaction that is both a write and an invalidate is
393 // WriteInvalidate, and for the sake of consistency, it does not
394 // write to memory. in a cacheless system, there are no WriteInv's
395 // because the Write -> WriteInvalidate rewrite happens in the cache.
396 } else if (pkt->isWrite()) {
397 if (writeOK(pkt)) {
398 if (pmemAddr) {
399 memcpy(hostAddr, pkt->getConstPtr<uint8_t>(), pkt->getSize());
400 DPRINTF(MemoryAccess, "%s wrote %x bytes to address %x\n",
401 __func__, pkt->getSize(), pkt->getAddr());
402 }
403 assert(!pkt->req->isInstFetch());
404 TRACE_PACKET("Write");
405 numWrites[pkt->req->masterId()]++;
406 bytesWritten[pkt->req->masterId()] += pkt->getSize();
407 }
408 } else {
409 panic("unimplemented");
410 }
411
412 if (pkt->needsResponse()) {
413 pkt->makeResponse();
414 }
415}
416
417void
418AbstractMemory::functionalAccess(PacketPtr pkt)
419{
420 assert(AddrRange(pkt->getAddr(),
421 pkt->getAddr() + pkt->getSize() - 1).isSubset(range));
422
423 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
424
425 if (pkt->isRead()) {
426 if (pmemAddr)
427 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
428 TRACE_PACKET("Read");
429 pkt->makeResponse();
430 } else if (pkt->isWrite()) {
431 if (pmemAddr)
432 memcpy(hostAddr, pkt->getConstPtr<uint8_t>(), pkt->getSize());
433 TRACE_PACKET("Write");
434 pkt->makeResponse();
435 } else if (pkt->isPrint()) {
436 Packet::PrintReqState *prs =
437 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
438 assert(prs);
439 // Need to call printLabels() explicitly since we're not going
440 // through printObj().
441 prs->printLabels();
442 // Right now we just print the single byte at the specified address.
443 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
444 } else {
445 panic("AbstractMemory: unimplemented functional command %s",
446 pkt->cmdString());
447 }
448}
384 } else if (pkt->isRead()) {
385 assert(!pkt->isWrite());
386 if (pkt->isLLSC()) {
387 trackLoadLocked(pkt);
388 }
389 if (pmemAddr)
390 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
391 TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
392 numReads[pkt->req->masterId()]++;
393 bytesRead[pkt->req->masterId()] += pkt->getSize();
394 if (pkt->req->isInstFetch())
395 bytesInstRead[pkt->req->masterId()] += pkt->getSize();
396 } else if (pkt->isInvalidate()) {
397 // no need to do anything
398 // this clause is intentionally before the write clause: the only
399 // transaction that is both a write and an invalidate is
400 // WriteInvalidate, and for the sake of consistency, it does not
401 // write to memory. in a cacheless system, there are no WriteInv's
402 // because the Write -> WriteInvalidate rewrite happens in the cache.
403 } else if (pkt->isWrite()) {
404 if (writeOK(pkt)) {
405 if (pmemAddr) {
406 memcpy(hostAddr, pkt->getConstPtr<uint8_t>(), pkt->getSize());
407 DPRINTF(MemoryAccess, "%s wrote %x bytes to address %x\n",
408 __func__, pkt->getSize(), pkt->getAddr());
409 }
410 assert(!pkt->req->isInstFetch());
411 TRACE_PACKET("Write");
412 numWrites[pkt->req->masterId()]++;
413 bytesWritten[pkt->req->masterId()] += pkt->getSize();
414 }
415 } else {
416 panic("unimplemented");
417 }
418
419 if (pkt->needsResponse()) {
420 pkt->makeResponse();
421 }
422}
423
424void
425AbstractMemory::functionalAccess(PacketPtr pkt)
426{
427 assert(AddrRange(pkt->getAddr(),
428 pkt->getAddr() + pkt->getSize() - 1).isSubset(range));
429
430 uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
431
432 if (pkt->isRead()) {
433 if (pmemAddr)
434 memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
435 TRACE_PACKET("Read");
436 pkt->makeResponse();
437 } else if (pkt->isWrite()) {
438 if (pmemAddr)
439 memcpy(hostAddr, pkt->getConstPtr<uint8_t>(), pkt->getSize());
440 TRACE_PACKET("Write");
441 pkt->makeResponse();
442 } else if (pkt->isPrint()) {
443 Packet::PrintReqState *prs =
444 dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
445 assert(prs);
446 // Need to call printLabels() explicitly since we're not going
447 // through printObj().
448 prs->printLabels();
449 // Right now we just print the single byte at the specified address.
450 ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
451 } else {
452 panic("AbstractMemory: unimplemented functional command %s",
453 pkt->cmdString());
454 }
455}