locked_mem.hh (11727:055ae402fbd0) locked_mem.hh (11729:f37b5fcd66fe)
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * Copyright (c) 2007-2008 The Florida State University
4 * Copyright (c) 2009 The University of Edinburgh
5 * Copyright (c) 2012 ARM Limited
6 * Copyright (c) 2014-2015 Sven Karlsson
7 * All rights reserved.
8 *

--- 34 unchanged lines hidden (view full) ---

43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * Authors: Steve Reinhardt
46 * Alec Roelke
47 */
48#ifndef __ARCH_RISCV_LOCKED_MEM_HH__
49#define __ARCH_RISCV_LOCKED_MEM_HH__
50
1/*
2 * Copyright (c) 2006 The Regents of The University of Michigan
3 * Copyright (c) 2007-2008 The Florida State University
4 * Copyright (c) 2009 The University of Edinburgh
5 * Copyright (c) 2012 ARM Limited
6 * Copyright (c) 2014-2015 Sven Karlsson
7 * All rights reserved.
8 *

--- 34 unchanged lines hidden (view full) ---

43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * Authors: Steve Reinhardt
46 * Alec Roelke
47 */
48#ifndef __ARCH_RISCV_LOCKED_MEM_HH__
49#define __ARCH_RISCV_LOCKED_MEM_HH__
50
51#include <stack>
52
51#include "arch/registers.hh"
52#include "base/misc.hh"
53#include "base/trace.hh"
54#include "debug/LLSC.hh"
55#include "mem/packet.hh"
56#include "mem/request.hh"
57
58/*
59 * ISA-specific helper functions for locked memory accesses.
60 */
61namespace RiscvISA
62{
53#include "arch/registers.hh"
54#include "base/misc.hh"
55#include "base/trace.hh"
56#include "debug/LLSC.hh"
57#include "mem/packet.hh"
58#include "mem/request.hh"
59
60/*
61 * ISA-specific helper functions for locked memory accesses.
62 */
63namespace RiscvISA
64{
63static bool lock_flag = false;
64static Addr lock_addr = 0;
65
65
66template <class XC>
67inline void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
68{
69 if (!lock_flag)
70 return;
66const int WARN_FAILURE = 10000;
71
67
72 DPRINTF(LLSC, "Locked snoop on address %x.\n",
73 pkt->getAddr()&cacheBlockMask);
68// RISC-V allows multiple locks per hart, but each SC has to unlock the most
69// recent one, so we use a stack here.
70static std::stack<Addr> locked_addrs;
74
71
75 Addr snoop_addr = pkt->getAddr()&cacheBlockMask;
76
77 if ((lock_addr&cacheBlockMask) == snoop_addr)
78 lock_flag = false;
72template <class XC> inline void
73handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
74{
75 if (locked_addrs.empty())
76 return;
77 Addr snoop_addr = pkt->getAddr() & cacheBlockMask;
78 DPRINTF(LLSC, "Locked snoop on address %x.\n", snoop_addr);
79 if ((locked_addrs.top() & cacheBlockMask) == snoop_addr)
80 locked_addrs.pop();
79}
80
81
81}
82
83
82template
83inline void handleLockedRead(XC *xc, Request *req)
84template <class XC> inline void
85handleLockedRead(XC *xc, Request *req)
84{
86{
85 lock_addr = req->getPaddr()&~0xF;
86 lock_flag = true;
87 DPRINTF(LLSC, "[cid:%i]: "
88 "Load-Link Flag Set & Load-Link Address set to %x.\n",
89 req->contextId(), req->getPaddr()&~0xF);
87 locked_addrs.push(req->getPaddr() & ~0xF);
88 DPRINTF(LLSC, "[cid:%d]: Reserved address %x.\n",
89 req->contextId(), req->getPaddr() & ~0xF);
90}
91
90}
91
92template
93inline void handleLockedSnoopHit(XC *xc)
92template <class XC> inline void
93handleLockedSnoopHit(XC *xc)
94{}
95
94{}
95
96template
97inline bool handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
96template <class XC> inline bool
97handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask)
98{
98{
99 if (req->isUncacheable()) {
100 // Funky Turbolaser mailbox access...don't update
101 // result register (see stq_c in decoder.isa)
102 req->setExtraData(2);
103 } else {
104 // standard store conditional
105 if (!lock_flag || (req->getPaddr()&~0xF) != lock_addr) {
106 // Lock flag not set or addr mismatch in CPU;
107 // don't even bother sending to memory system
108 req->setExtraData(0);
109 lock_flag = false;
99 // Normally RISC-V uses zero to indicate success and nonzero to indicate
100 // failure (right now only 1 is reserved), but in gem5 zero indicates
101 // failure and one indicates success, so here we conform to that (it should
102 // be switched in the instruction's implementation)
110
103
111 // the rest of this code is not architectural;
112 // it's just a debugging aid to help detect
113 // livelock by warning on long sequences of failed
114 // store conditionals
115 int stCondFailures = xc->readStCondFailures();
116 stCondFailures++;
117 xc->setStCondFailures(stCondFailures);
118 if (stCondFailures % 100000 == 0) {
119 warn("%i:"" context %d:"
120 " %d consecutive store conditional failures\n",
121 curTick(), xc->contextId(), stCondFailures);
122 }
123
124 if (!lock_flag){
125 DPRINTF(LLSC, "[cid:%i]:"
126 " Lock Flag Set, Store Conditional Failed.\n",
127 req->contextId());
128 } else if ((req->getPaddr() & ~0xf) != lock_addr) {
129 DPRINTF(LLSC, "[cid:%i]: Load-Link Address Mismatch, "
130 "Store Conditional Failed.\n", req->contextId());
131 }
132 // store conditional failed already, so don't issue it to mem
133 return false;
104 DPRINTF(LLSC, "[cid:%d]: locked_addrs empty? %s.\n", req->contextId(),
105 locked_addrs.empty() ? "yes" : "no");
106 if (!locked_addrs.empty()) {
107 DPRINTF(LLSC, "[cid:%d]: addr = %x.\n", req->contextId(),
108 req->getPaddr() & ~0xF);
109 DPRINTF(LLSC, "[cid:%d]: last locked addr = %x.\n", req->contextId(),
110 locked_addrs.top());
111 }
112 if (locked_addrs.empty()
113 || locked_addrs.top() != ((req->getPaddr() & ~0xF))) {
114 req->setExtraData(0);
115 int stCondFailures = xc->readStCondFailures();
116 xc->setStCondFailures(++stCondFailures);
117 if (stCondFailures % WARN_FAILURE == 0) {
118 warn("%i: context %d: %d consecutive SC failures.\n",
119 curTick(), xc->contextId(), stCondFailures);
134 }
120 }
121 return false;
135 }
122 }
136
123 if (req->isUncacheable()) {
124 req->setExtraData(2);
125 }
137 return true;
138}
139
140} // namespace RiscvISA
141
142#endif // __ARCH_RISCV_LOCKED_MEM_HH__
126 return true;
127}
128
129} // namespace RiscvISA
130
131#endif // __ARCH_RISCV_LOCKED_MEM_HH__