1/*
2 * Copyright (c) 2010-2012,2017-2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder.  You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ron Dreslinski
41 *          Ali Saidi
42 *          Andreas Hansson
43 */
44
45#include "mem/abstract_mem.hh"
46
47#include <vector>
48
49#include "arch/locked_mem.hh"
50#include "cpu/base.hh"
51#include "cpu/thread_context.hh"
52#include "debug/LLSC.hh"
53#include "debug/MemoryAccess.hh"
54#include "mem/packet_access.hh"
55#include "sim/system.hh"
56
57using namespace std;
58
59AbstractMemory::AbstractMemory(const Params *p) :
60    ClockedObject(p), range(params()->range), pmemAddr(NULL),
61    backdoor(params()->range, nullptr,
62             (MemBackdoor::Flags)(MemBackdoor::Readable |
63                                  MemBackdoor::Writeable)),
64    confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
65    kvmMap(p->kvm_map), _system(NULL)
66{
67}
68
69void
70AbstractMemory::init()
71{
72    assert(system());
73
74    if (size() % _system->getPageBytes() != 0)
75        panic("Memory Size not divisible by page size\n");
76}
77
78void
79AbstractMemory::setBackingStore(uint8_t* pmem_addr)
80{
81    // If there was an existing backdoor, let everybody know it's going away.
82    if (backdoor.ptr())
83        backdoor.invalidate();
84
85    // The back door can't handle interleaved memory.
86    backdoor.ptr(range.interleaved() ? nullptr : pmem_addr);
87
88    pmemAddr = pmem_addr;
89}
90
91void
92AbstractMemory::regStats()
93{
94    ClockedObject::regStats();
95
96    using namespace Stats;
97
98    assert(system());
99
100    bytesRead
101        .init(system()->maxMasters())
102        .name(name() + ".bytes_read")
103        .desc("Number of bytes read from this memory")
104        .flags(total | nozero | nonan)
105        ;
106    for (int i = 0; i < system()->maxMasters(); i++) {
107        bytesRead.subname(i, system()->getMasterName(i));
108    }
109    bytesInstRead
110        .init(system()->maxMasters())
111        .name(name() + ".bytes_inst_read")
112        .desc("Number of instructions bytes read from this memory")
113        .flags(total | nozero | nonan)
114        ;
115    for (int i = 0; i < system()->maxMasters(); i++) {
116        bytesInstRead.subname(i, system()->getMasterName(i));
117    }
118    bytesWritten
119        .init(system()->maxMasters())
120        .name(name() + ".bytes_written")
121        .desc("Number of bytes written to this memory")
122        .flags(total | nozero | nonan)
123        ;
124    for (int i = 0; i < system()->maxMasters(); i++) {
125        bytesWritten.subname(i, system()->getMasterName(i));
126    }
127    numReads
128        .init(system()->maxMasters())
129        .name(name() + ".num_reads")
130        .desc("Number of read requests responded to by this memory")
131        .flags(total | nozero | nonan)
132        ;
133    for (int i = 0; i < system()->maxMasters(); i++) {
134        numReads.subname(i, system()->getMasterName(i));
135    }
136    numWrites
137        .init(system()->maxMasters())
138        .name(name() + ".num_writes")
139        .desc("Number of write requests responded to by this memory")
140        .flags(total | nozero | nonan)
141        ;
142    for (int i = 0; i < system()->maxMasters(); i++) {
143        numWrites.subname(i, system()->getMasterName(i));
144    }
145    numOther
146        .init(system()->maxMasters())
147        .name(name() + ".num_other")
148        .desc("Number of other requests responded to by this memory")
149        .flags(total | nozero | nonan)
150        ;
151    for (int i = 0; i < system()->maxMasters(); i++) {
152        numOther.subname(i, system()->getMasterName(i));
153    }
154    bwRead
155        .name(name() + ".bw_read")
156        .desc("Total read bandwidth from this memory (bytes/s)")
157        .precision(0)
158        .prereq(bytesRead)
159        .flags(total | nozero | nonan)
160        ;
161    for (int i = 0; i < system()->maxMasters(); i++) {
162        bwRead.subname(i, system()->getMasterName(i));
163    }
164
165    bwInstRead
166        .name(name() + ".bw_inst_read")
167        .desc("Instruction read bandwidth from this memory (bytes/s)")
168        .precision(0)
169        .prereq(bytesInstRead)
170        .flags(total | nozero | nonan)
171        ;
172    for (int i = 0; i < system()->maxMasters(); i++) {
173        bwInstRead.subname(i, system()->getMasterName(i));
174    }
175    bwWrite
176        .name(name() + ".bw_write")
177        .desc("Write bandwidth from this memory (bytes/s)")
178        .precision(0)
179        .prereq(bytesWritten)
180        .flags(total | nozero | nonan)
181        ;
182    for (int i = 0; i < system()->maxMasters(); i++) {
183        bwWrite.subname(i, system()->getMasterName(i));
184    }
185    bwTotal
186        .name(name() + ".bw_total")
187        .desc("Total bandwidth to/from this memory (bytes/s)")
188        .precision(0)
189        .prereq(bwTotal)
190        .flags(total | nozero | nonan)
191        ;
192    for (int i = 0; i < system()->maxMasters(); i++) {
193        bwTotal.subname(i, system()->getMasterName(i));
194    }
195    bwRead = bytesRead / simSeconds;
196    bwInstRead = bytesInstRead / simSeconds;
197    bwWrite = bytesWritten / simSeconds;
198    bwTotal = (bytesRead + bytesWritten) / simSeconds;
199}
200
201AddrRange
202AbstractMemory::getAddrRange() const
203{
204    return range;
205}
206
207// Add load-locked to tracking list.  Should only be called if the
208// operation is a load and the LLSC flag is set.
209void
210AbstractMemory::trackLoadLocked(PacketPtr pkt)
211{
212    const RequestPtr &req = pkt->req;
213    Addr paddr = LockedAddr::mask(req->getPaddr());
214
215    // first we check if we already have a locked addr for this
216    // xc.  Since each xc only gets one, we just update the
217    // existing record with the new address.
218    list<LockedAddr>::iterator i;
219
220    for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
221        if (i->matchesContext(req)) {
222            DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
223                    req->contextId(), paddr);
224            i->addr = paddr;
225            return;
226        }
227    }
228
229    // no record for this xc: need to allocate a new one
230    DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
231            req->contextId(), paddr);
232    lockedAddrList.push_front(LockedAddr(req));
233}
234
235
236// Called on *writes* only... both regular stores and
237// store-conditional operations.  Check for conventional stores which
238// conflict with locked addresses, and for success/failure of store
239// conditionals.
240bool
241AbstractMemory::checkLockedAddrList(PacketPtr pkt)
242{
243    const RequestPtr &req = pkt->req;
244    Addr paddr = LockedAddr::mask(req->getPaddr());
245    bool isLLSC = pkt->isLLSC();
246
247    // Initialize return value.  Non-conditional stores always
248    // succeed.  Assume conditional stores will fail until proven
249    // otherwise.
250    bool allowStore = !isLLSC;
251
252    // Iterate over list.  Note that there could be multiple matching records,
253    // as more than one context could have done a load locked to this location.
254    // Only remove records when we succeed in finding a record for (xc, addr);
255    // then, remove all records with this address.  Failed store-conditionals do
256    // not blow unrelated reservations.
257    list<LockedAddr>::iterator i = lockedAddrList.begin();
258
259    if (isLLSC) {
260        while (i != lockedAddrList.end()) {
261            if (i->addr == paddr && i->matchesContext(req)) {
262                // it's a store conditional, and as far as the memory system can
263                // tell, the requesting context's lock is still valid.
264                DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
265                        req->contextId(), paddr);
266                allowStore = true;
267                break;
268            }
269            // If we didn't find a match, keep searching!  Someone else may well
270            // have a reservation on this line here but we may find ours in just
271            // a little while.
272            i++;
273        }
274        req->setExtraData(allowStore ? 1 : 0);
275    }
276    // LLSCs that succeeded AND non-LLSC stores both fall into here:
277    if (allowStore) {
278        // We write address paddr.  However, there may be several entries with a
279        // reservation on this address (for other contextIds) and they must all
280        // be removed.
281        i = lockedAddrList.begin();
282        while (i != lockedAddrList.end()) {
283            if (i->addr == paddr) {
284                DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
285                        i->contextId, paddr);
286                ContextID owner_cid = i->contextId;
287                assert(owner_cid != InvalidContextID);
288                ContextID requester_cid = req->hasContextId() ?
289                                           req->contextId() :
290                                           InvalidContextID;
291                if (owner_cid != requester_cid) {
292                    ThreadContext* ctx = system()->getThreadContext(owner_cid);
293                    TheISA::globalClearExclusive(ctx);
294                }
295                i = lockedAddrList.erase(i);
296            } else {
297                i++;
298            }
299        }
300    }
301
302    return allowStore;
303}
304
305#if TRACING_ON
306static inline void
307tracePacket(System *sys, const char *label, PacketPtr pkt)
308{
309    int size = pkt->getSize();
310#if THE_ISA != NULL_ISA
311    if (size == 1 || size == 2 || size == 4 || size == 8) {
312        DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
313                "%#x %c\n", label, sys->getMasterName(pkt->req->masterId()),
314                size, pkt->getAddr(), pkt->getUintX(TheISA::GuestByteOrder),
315                pkt->req->isUncacheable() ? 'U' : 'C');
316        return;
317    }
318#endif
319    DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
320            label, sys->getMasterName(pkt->req->masterId()),
321            size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
322    DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
323}
324
325#   define TRACE_PACKET(A) tracePacket(system(), A, pkt)
326#else
327#   define TRACE_PACKET(A)
328#endif
329
330void
331AbstractMemory::access(PacketPtr pkt)
332{
333    if (pkt->cacheResponding()) {
334        DPRINTF(MemoryAccess, "Cache responding to %#llx: not responding\n",
335                pkt->getAddr());
336        return;
337    }
338
339    if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) {
340        DPRINTF(MemoryAccess, "CleanEvict  on 0x%x: not responding\n",
341                pkt->getAddr());
342      return;
343    }
344
345    assert(pkt->getAddrRange().isSubset(range));
346
347    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
348
349    if (pkt->cmd == MemCmd::SwapReq) {
350        if (pkt->isAtomicOp()) {
351            if (pmemAddr) {
352                pkt->setData(hostAddr);
353                (*(pkt->getAtomicOp()))(hostAddr);
354            }
355        } else {
356            std::vector<uint8_t> overwrite_val(pkt->getSize());
357            uint64_t condition_val64;
358            uint32_t condition_val32;
359
360            panic_if(!pmemAddr, "Swap only works if there is real memory " \
361                     "(i.e. null=False)");
362
363            bool overwrite_mem = true;
364            // keep a copy of our possible write value, and copy what is at the
365            // memory address into the packet
366            pkt->writeData(&overwrite_val[0]);
367            pkt->setData(hostAddr);
368
369            if (pkt->req->isCondSwap()) {
370                if (pkt->getSize() == sizeof(uint64_t)) {
371                    condition_val64 = pkt->req->getExtraData();
372                    overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
373                                                 sizeof(uint64_t));
374                } else if (pkt->getSize() == sizeof(uint32_t)) {
375                    condition_val32 = (uint32_t)pkt->req->getExtraData();
376                    overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
377                                                 sizeof(uint32_t));
378                } else
379                    panic("Invalid size for conditional read/write\n");
380            }
381
382            if (overwrite_mem)
383                std::memcpy(hostAddr, &overwrite_val[0], pkt->getSize());
384
385            assert(!pkt->req->isInstFetch());
386            TRACE_PACKET("Read/Write");
387            numOther[pkt->req->masterId()]++;
388        }
389    } else if (pkt->isRead()) {
390        assert(!pkt->isWrite());
391        if (pkt->isLLSC()) {
392            assert(!pkt->fromCache());
393            // if the packet is not coming from a cache then we have
394            // to do the LL/SC tracking here
395            trackLoadLocked(pkt);
396        }
397        if (pmemAddr) {
398            pkt->setData(hostAddr);
399        }
400        TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
401        numReads[pkt->req->masterId()]++;
402        bytesRead[pkt->req->masterId()] += pkt->getSize();
403        if (pkt->req->isInstFetch())
404            bytesInstRead[pkt->req->masterId()] += pkt->getSize();
405    } else if (pkt->isInvalidate() || pkt->isClean()) {
406        assert(!pkt->isWrite());
407        // in a fastmem system invalidating and/or cleaning packets
408        // can be seen due to cache maintenance requests
409
410        // no need to do anything
411    } else if (pkt->isWrite()) {
412        if (writeOK(pkt)) {
413            if (pmemAddr) {
414                pkt->writeData(hostAddr);
415                DPRINTF(MemoryAccess, "%s wrote %i bytes to address %x\n",
416                        __func__, pkt->getSize(), pkt->getAddr());
417            }
418            assert(!pkt->req->isInstFetch());
419            TRACE_PACKET("Write");
420            numWrites[pkt->req->masterId()]++;
421            bytesWritten[pkt->req->masterId()] += pkt->getSize();
422        }
423    } else {
424        panic("Unexpected packet %s", pkt->print());
425    }
426
427    if (pkt->needsResponse()) {
428        pkt->makeResponse();
429    }
430}
431
432void
433AbstractMemory::functionalAccess(PacketPtr pkt)
434{
435    assert(pkt->getAddrRange().isSubset(range));
436
437    uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start();
438
439    if (pkt->isRead()) {
440        if (pmemAddr) {
441            pkt->setData(hostAddr);
442        }
443        TRACE_PACKET("Read");
444        pkt->makeResponse();
445    } else if (pkt->isWrite()) {
446        if (pmemAddr) {
447            pkt->writeData(hostAddr);
448        }
449        TRACE_PACKET("Write");
450        pkt->makeResponse();
451    } else if (pkt->isPrint()) {
452        Packet::PrintReqState *prs =
453            dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
454        assert(prs);
455        // Need to call printLabels() explicitly since we're not going
456        // through printObj().
457        prs->printLabels();
458        // Right now we just print the single byte at the specified address.
459        ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
460    } else {
461        panic("AbstractMemory: unimplemented functional command %s",
462              pkt->cmdString());
463    }
464}
465