table_walker.cc (10579:e622a3e2ed14) table_walker.cc (10621:b7bc5b1084a4)
1/*
2 * Copyright (c) 2010, 2012-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include <memory>
42
43#include "arch/arm/faults.hh"
44#include "arch/arm/stage2_mmu.hh"
45#include "arch/arm/system.hh"
46#include "arch/arm/table_walker.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "sim/system.hh"
55
56using namespace ArmISA;
57
58TableWalker::TableWalker(const Params *p)
59 : MemObject(p), port(this, p->sys), drainManager(NULL),
60 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
61 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
62 numSquashable(p->num_squash_per_cycle),
1/*
2 * Copyright (c) 2010, 2012-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include <memory>
42
43#include "arch/arm/faults.hh"
44#include "arch/arm/stage2_mmu.hh"
45#include "arch/arm/system.hh"
46#include "arch/arm/table_walker.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "sim/system.hh"
55
56using namespace ArmISA;
57
58TableWalker::TableWalker(const Params *p)
59 : MemObject(p), port(this, p->sys), drainManager(NULL),
60 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
61 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
62 numSquashable(p->num_squash_per_cycle),
63 pendingReqs(0),
64 pendingChangeTick(curTick()),
63 doL1DescEvent(this), doL2DescEvent(this),
64 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
65 doL3LongDescEvent(this),
66 doProcessEvent(this)
67{
68 sctlr = 0;
69
70 // Cache system-level properties
71 if (FullSystem) {
72 armSys = dynamic_cast<ArmSystem *>(p->sys);
73 assert(armSys);
74 haveSecurity = armSys->haveSecurity();
75 _haveLPAE = armSys->haveLPAE();
76 _haveVirtualization = armSys->haveVirtualization();
77 physAddrRange = armSys->physAddrRange();
78 _haveLargeAsid64 = armSys->haveLargeAsid64();
79 } else {
80 armSys = NULL;
81 haveSecurity = _haveLPAE = _haveVirtualization = false;
82 _haveLargeAsid64 = false;
83 physAddrRange = 32;
84 }
85
86}
87
88TableWalker::~TableWalker()
89{
90 ;
91}
92
93TableWalker::WalkerState::WalkerState() :
94 tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
95 asid(0), vmid(0), isHyp(false), transState(nullptr),
96 vaddr(0), vaddr_tainted(0), isWrite(false), isFetch(false), isSecure(false),
97 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
98 pxnTable(false), stage2Req(false), doingStage2(false),
99 stage2Tran(nullptr), timing(false), functional(false),
100 mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
101 delayed(false), tableWalker(nullptr)
102{
103}
104
105void
106TableWalker::completeDrain()
107{
108 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
109 pendingQueue.empty()) {
110 setDrainState(Drainable::Drained);
111 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
112 drainManager->signalDrainDone();
113 drainManager = NULL;
114 }
115}
116
117unsigned int
118TableWalker::drain(DrainManager *dm)
119{
120 unsigned int count = port.drain(dm);
121
122 bool state_queues_not_empty = false;
123
124 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
125 if (!stateQueues[i].empty()) {
126 state_queues_not_empty = true;
127 break;
128 }
129 }
130
131 if (state_queues_not_empty || pendingQueue.size()) {
132 drainManager = dm;
133 setDrainState(Drainable::Draining);
134 DPRINTF(Drain, "TableWalker not drained\n");
135
136 // return port drain count plus the table walker itself needs to drain
137 return count + 1;
138 } else {
139 setDrainState(Drainable::Drained);
140 DPRINTF(Drain, "TableWalker free, no need to drain\n");
141
142 // table walker is drained, but its ports may still need to be drained
143 return count;
144 }
145}
146
147void
148TableWalker::drainResume()
149{
150 Drainable::drainResume();
151 if (params()->sys->isTimingMode() && currState) {
152 delete currState;
153 currState = NULL;
65 doL1DescEvent(this), doL2DescEvent(this),
66 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
67 doL3LongDescEvent(this),
68 doProcessEvent(this)
69{
70 sctlr = 0;
71
72 // Cache system-level properties
73 if (FullSystem) {
74 armSys = dynamic_cast<ArmSystem *>(p->sys);
75 assert(armSys);
76 haveSecurity = armSys->haveSecurity();
77 _haveLPAE = armSys->haveLPAE();
78 _haveVirtualization = armSys->haveVirtualization();
79 physAddrRange = armSys->physAddrRange();
80 _haveLargeAsid64 = armSys->haveLargeAsid64();
81 } else {
82 armSys = NULL;
83 haveSecurity = _haveLPAE = _haveVirtualization = false;
84 _haveLargeAsid64 = false;
85 physAddrRange = 32;
86 }
87
88}
89
90TableWalker::~TableWalker()
91{
92 ;
93}
94
95TableWalker::WalkerState::WalkerState() :
96 tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
97 asid(0), vmid(0), isHyp(false), transState(nullptr),
98 vaddr(0), vaddr_tainted(0), isWrite(false), isFetch(false), isSecure(false),
99 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
100 pxnTable(false), stage2Req(false), doingStage2(false),
101 stage2Tran(nullptr), timing(false), functional(false),
102 mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
103 delayed(false), tableWalker(nullptr)
104{
105}
106
107void
108TableWalker::completeDrain()
109{
110 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
111 pendingQueue.empty()) {
112 setDrainState(Drainable::Drained);
113 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
114 drainManager->signalDrainDone();
115 drainManager = NULL;
116 }
117}
118
119unsigned int
120TableWalker::drain(DrainManager *dm)
121{
122 unsigned int count = port.drain(dm);
123
124 bool state_queues_not_empty = false;
125
126 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
127 if (!stateQueues[i].empty()) {
128 state_queues_not_empty = true;
129 break;
130 }
131 }
132
133 if (state_queues_not_empty || pendingQueue.size()) {
134 drainManager = dm;
135 setDrainState(Drainable::Draining);
136 DPRINTF(Drain, "TableWalker not drained\n");
137
138 // return port drain count plus the table walker itself needs to drain
139 return count + 1;
140 } else {
141 setDrainState(Drainable::Drained);
142 DPRINTF(Drain, "TableWalker free, no need to drain\n");
143
144 // table walker is drained, but its ports may still need to be drained
145 return count;
146 }
147}
148
149void
150TableWalker::drainResume()
151{
152 Drainable::drainResume();
153 if (params()->sys->isTimingMode() && currState) {
154 delete currState;
155 currState = NULL;
156 pendingChange();
154 }
155}
156
157BaseMasterPort&
158TableWalker::getMasterPort(const std::string &if_name, PortID idx)
159{
160 if (if_name == "port") {
161 return port;
162 }
163 return MemObject::getMasterPort(if_name, idx);
164}
165
166Fault
167TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
168 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
169 TLB::Translation *_trans, bool _timing, bool _functional,
170 bool secure, TLB::ArmTranslationType tranType)
171{
172 assert(!(_functional && _timing));
157 }
158}
159
160BaseMasterPort&
161TableWalker::getMasterPort(const std::string &if_name, PortID idx)
162{
163 if (if_name == "port") {
164 return port;
165 }
166 return MemObject::getMasterPort(if_name, idx);
167}
168
169Fault
170TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
171 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
172 TLB::Translation *_trans, bool _timing, bool _functional,
173 bool secure, TLB::ArmTranslationType tranType)
174{
175 assert(!(_functional && _timing));
176 ++statWalks;
177
173 WalkerState *savedCurrState = NULL;
174
175 if (!currState && !_functional) {
176 // For atomic mode, a new WalkerState instance should be only created
177 // once per TLB. For timing mode, a new instance is generated for every
178 // TLB miss.
179 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
180
181 currState = new WalkerState();
182 currState->tableWalker = this;
183 } else if (_functional) {
184 // If we are mixing functional mode with timing (or even
185 // atomic), we need to to be careful and clean up after
186 // ourselves to not risk getting into an inconsistent state.
187 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
188 savedCurrState = currState;
189 currState = new WalkerState();
190 currState->tableWalker = this;
191 } else if (_timing) {
192 // This is a translation that was completed and then faulted again
193 // because some underlying parameters that affect the translation
194 // changed out from under us (e.g. asid). It will either be a
195 // misprediction, in which case nothing will happen or we'll use
196 // this fault to re-execute the faulting instruction which should clean
197 // up everything.
198 if (currState->vaddr_tainted == _req->getVaddr()) {
178 WalkerState *savedCurrState = NULL;
179
180 if (!currState && !_functional) {
181 // For atomic mode, a new WalkerState instance should be only created
182 // once per TLB. For timing mode, a new instance is generated for every
183 // TLB miss.
184 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
185
186 currState = new WalkerState();
187 currState->tableWalker = this;
188 } else if (_functional) {
189 // If we are mixing functional mode with timing (or even
190 // atomic), we need to to be careful and clean up after
191 // ourselves to not risk getting into an inconsistent state.
192 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
193 savedCurrState = currState;
194 currState = new WalkerState();
195 currState->tableWalker = this;
196 } else if (_timing) {
197 // This is a translation that was completed and then faulted again
198 // because some underlying parameters that affect the translation
199 // changed out from under us (e.g. asid). It will either be a
200 // misprediction, in which case nothing will happen or we'll use
201 // this fault to re-execute the faulting instruction which should clean
202 // up everything.
203 if (currState->vaddr_tainted == _req->getVaddr()) {
204 ++statSquashedBefore;
199 return std::make_shared<ReExec>();
200 }
201 }
205 return std::make_shared<ReExec>();
206 }
207 }
208 pendingChange();
202
209
210 currState->startTime = curTick();
203 currState->tc = _tc;
204 currState->aarch64 = opModeIs64(currOpMode(_tc));
205 currState->el = currEL(_tc);
206 currState->transState = _trans;
207 currState->req = _req;
208 currState->fault = NoFault;
209 currState->asid = _asid;
210 currState->vmid = _vmid;
211 currState->isHyp = _isHyp;
212 currState->timing = _timing;
213 currState->functional = _functional;
214 currState->mode = _mode;
215 currState->tranType = tranType;
216 currState->isSecure = secure;
217 currState->physAddrRange = physAddrRange;
218
219 /** @todo These should be cached or grabbed from cached copies in
220 the TLB, all these miscreg reads are expensive */
221 currState->vaddr_tainted = currState->req->getVaddr();
222 if (currState->aarch64)
223 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
224 currState->tc, currState->el);
225 else
226 currState->vaddr = currState->vaddr_tainted;
227
228 if (currState->aarch64) {
229 switch (currState->el) {
230 case EL0:
231 case EL1:
232 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
233 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
234 break;
235 // @todo: uncomment this to enable Virtualization
236 // case EL2:
237 // assert(haveVirtualization);
238 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
239 // currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
240 // break;
241 case EL3:
242 assert(haveSecurity);
243 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
244 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
245 break;
246 default:
247 panic("Invalid exception level");
248 break;
249 }
250 } else {
251 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
252 MISCREG_SCTLR, currState->tc, !currState->isSecure));
253 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
254 MISCREG_TTBCR, currState->tc, !currState->isSecure));
255 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
256 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
257 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
258 }
259 sctlr = currState->sctlr;
260
261 currState->isFetch = (currState->mode == TLB::Execute);
262 currState->isWrite = (currState->mode == TLB::Write);
263
211 currState->tc = _tc;
212 currState->aarch64 = opModeIs64(currOpMode(_tc));
213 currState->el = currEL(_tc);
214 currState->transState = _trans;
215 currState->req = _req;
216 currState->fault = NoFault;
217 currState->asid = _asid;
218 currState->vmid = _vmid;
219 currState->isHyp = _isHyp;
220 currState->timing = _timing;
221 currState->functional = _functional;
222 currState->mode = _mode;
223 currState->tranType = tranType;
224 currState->isSecure = secure;
225 currState->physAddrRange = physAddrRange;
226
227 /** @todo These should be cached or grabbed from cached copies in
228 the TLB, all these miscreg reads are expensive */
229 currState->vaddr_tainted = currState->req->getVaddr();
230 if (currState->aarch64)
231 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
232 currState->tc, currState->el);
233 else
234 currState->vaddr = currState->vaddr_tainted;
235
236 if (currState->aarch64) {
237 switch (currState->el) {
238 case EL0:
239 case EL1:
240 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
241 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
242 break;
243 // @todo: uncomment this to enable Virtualization
244 // case EL2:
245 // assert(haveVirtualization);
246 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
247 // currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
248 // break;
249 case EL3:
250 assert(haveSecurity);
251 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
252 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
253 break;
254 default:
255 panic("Invalid exception level");
256 break;
257 }
258 } else {
259 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
260 MISCREG_SCTLR, currState->tc, !currState->isSecure));
261 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
262 MISCREG_TTBCR, currState->tc, !currState->isSecure));
263 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
264 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
265 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
266 }
267 sctlr = currState->sctlr;
268
269 currState->isFetch = (currState->mode == TLB::Execute);
270 currState->isWrite = (currState->mode == TLB::Write);
271
272 statRequestOrigin[REQUESTED][currState->isFetch]++;
273
264 // We only do a second stage of translation if we're not secure, or in
265 // hyp mode, the second stage MMU is enabled, and this table walker
266 // instance is the first stage.
267 currState->doingStage2 = false;
268 // @todo: for now disable this in AArch64 (HCR is not set)
269 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
270 !isStage2 && !currState->isSecure && !currState->isHyp;
271
272 bool long_desc_format = currState->aarch64 ||
273 (_haveLPAE && currState->ttbcr.eae) ||
274 _isHyp || isStage2;
275
276 if (long_desc_format) {
277 // Helper variables used for hierarchical permissions
278 currState->secureLookup = currState->isSecure;
279 currState->rwTable = true;
280 currState->userTable = true;
281 currState->xnTable = false;
282 currState->pxnTable = false;
274 // We only do a second stage of translation if we're not secure, or in
275 // hyp mode, the second stage MMU is enabled, and this table walker
276 // instance is the first stage.
277 currState->doingStage2 = false;
278 // @todo: for now disable this in AArch64 (HCR is not set)
279 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
280 !isStage2 && !currState->isSecure && !currState->isHyp;
281
282 bool long_desc_format = currState->aarch64 ||
283 (_haveLPAE && currState->ttbcr.eae) ||
284 _isHyp || isStage2;
285
286 if (long_desc_format) {
287 // Helper variables used for hierarchical permissions
288 currState->secureLookup = currState->isSecure;
289 currState->rwTable = true;
290 currState->userTable = true;
291 currState->xnTable = false;
292 currState->pxnTable = false;
293
294 ++statWalksLongDescriptor;
295 } else {
296 ++statWalksShortDescriptor;
283 }
284
285 if (!currState->timing) {
286 Fault fault = NoFault;
287 if (currState->aarch64)
288 fault = processWalkAArch64();
289 else if (long_desc_format)
290 fault = processWalkLPAE();
291 else
292 fault = processWalk();
293
294 // If this was a functional non-timing access restore state to
295 // how we found it.
296 if (currState->functional) {
297 delete currState;
298 currState = savedCurrState;
299 }
300 return fault;
301 }
302
303 if (pending || pendingQueue.size()) {
304 pendingQueue.push_back(currState);
305 currState = NULL;
297 }
298
299 if (!currState->timing) {
300 Fault fault = NoFault;
301 if (currState->aarch64)
302 fault = processWalkAArch64();
303 else if (long_desc_format)
304 fault = processWalkLPAE();
305 else
306 fault = processWalk();
307
308 // If this was a functional non-timing access restore state to
309 // how we found it.
310 if (currState->functional) {
311 delete currState;
312 currState = savedCurrState;
313 }
314 return fault;
315 }
316
317 if (pending || pendingQueue.size()) {
318 pendingQueue.push_back(currState);
319 currState = NULL;
320 pendingChange();
306 } else {
307 pending = true;
321 } else {
322 pending = true;
323 pendingChange();
308 if (currState->aarch64)
309 return processWalkAArch64();
310 else if (long_desc_format)
311 return processWalkLPAE();
312 else
313 return processWalk();
314 }
315
316 return NoFault;
317}
318
319void
320TableWalker::processWalkWrapper()
321{
322 assert(!currState);
323 assert(pendingQueue.size());
324 if (currState->aarch64)
325 return processWalkAArch64();
326 else if (long_desc_format)
327 return processWalkLPAE();
328 else
329 return processWalk();
330 }
331
332 return NoFault;
333}
334
335void
336TableWalker::processWalkWrapper()
337{
338 assert(!currState);
339 assert(pendingQueue.size());
340 pendingChange();
324 currState = pendingQueue.front();
325
326 ExceptionLevel target_el = EL0;
327 if (currState->aarch64)
328 target_el = currEL(currState->tc);
329 else
330 target_el = EL1;
331
332 // Check if a previous walk filled this request already
333 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
334 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
335 currState->vmid, currState->isHyp, currState->isSecure, true, false,
336 target_el);
337
338 // Check if we still need to have a walk for this request. If the requesting
339 // instruction has been squashed, or a previous walk has filled the TLB with
340 // a match, we just want to get rid of the walk. The latter could happen
341 // when there are multiple outstanding misses to a single page and a
342 // previous request has been successfully translated.
343 if (!currState->transState->squashed() && !te) {
344 // We've got a valid request, lets process it
345 pending = true;
346 pendingQueue.pop_front();
347 // Keep currState in case one of the processWalk... calls NULLs it
348 WalkerState *curr_state_copy = currState;
349 Fault f;
350 if (currState->aarch64)
351 f = processWalkAArch64();
352 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
353 f = processWalkLPAE();
354 else
355 f = processWalk();
356
357 if (f != NoFault) {
358 curr_state_copy->transState->finish(f, curr_state_copy->req,
359 curr_state_copy->tc, curr_state_copy->mode);
360
361 delete curr_state_copy;
362 }
363 return;
364 }
365
366
367 // If the instruction that we were translating for has been
368 // squashed we shouldn't bother.
369 unsigned num_squashed = 0;
370 ThreadContext *tc = currState->tc;
371 while ((num_squashed < numSquashable) && currState &&
372 (currState->transState->squashed() || te)) {
373 pendingQueue.pop_front();
374 num_squashed++;
341 currState = pendingQueue.front();
342
343 ExceptionLevel target_el = EL0;
344 if (currState->aarch64)
345 target_el = currEL(currState->tc);
346 else
347 target_el = EL1;
348
349 // Check if a previous walk filled this request already
350 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
351 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
352 currState->vmid, currState->isHyp, currState->isSecure, true, false,
353 target_el);
354
355 // Check if we still need to have a walk for this request. If the requesting
356 // instruction has been squashed, or a previous walk has filled the TLB with
357 // a match, we just want to get rid of the walk. The latter could happen
358 // when there are multiple outstanding misses to a single page and a
359 // previous request has been successfully translated.
360 if (!currState->transState->squashed() && !te) {
361 // We've got a valid request, lets process it
362 pending = true;
363 pendingQueue.pop_front();
364 // Keep currState in case one of the processWalk... calls NULLs it
365 WalkerState *curr_state_copy = currState;
366 Fault f;
367 if (currState->aarch64)
368 f = processWalkAArch64();
369 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
370 f = processWalkLPAE();
371 else
372 f = processWalk();
373
374 if (f != NoFault) {
375 curr_state_copy->transState->finish(f, curr_state_copy->req,
376 curr_state_copy->tc, curr_state_copy->mode);
377
378 delete curr_state_copy;
379 }
380 return;
381 }
382
383
384 // If the instruction that we were translating for has been
385 // squashed we shouldn't bother.
386 unsigned num_squashed = 0;
387 ThreadContext *tc = currState->tc;
388 while ((num_squashed < numSquashable) && currState &&
389 (currState->transState->squashed() || te)) {
390 pendingQueue.pop_front();
391 num_squashed++;
392 statSquashedBefore++;
375
376 DPRINTF(TLB, "Squashing table walk for address %#x\n",
377 currState->vaddr_tainted);
378
379 if (currState->transState->squashed()) {
380 // finish the translation which will delete the translation object
381 currState->transState->finish(
382 std::make_shared<UnimpFault>("Squashed Inst"),
383 currState->req, currState->tc, currState->mode);
384 } else {
385 // translate the request now that we know it will work
393
394 DPRINTF(TLB, "Squashing table walk for address %#x\n",
395 currState->vaddr_tainted);
396
397 if (currState->transState->squashed()) {
398 // finish the translation which will delete the translation object
399 currState->transState->finish(
400 std::make_shared<UnimpFault>("Squashed Inst"),
401 currState->req, currState->tc, currState->mode);
402 } else {
403 // translate the request now that we know it will work
404 statWalkServiceTime.sample(curTick() - currState->startTime);
386 tlb->translateTiming(currState->req, currState->tc,
387 currState->transState, currState->mode);
388
389 }
390
391 // delete the current request
392 delete currState;
393
394 // peak at the next one
395 if (pendingQueue.size()) {
396 currState = pendingQueue.front();
397 te = tlb->lookup(currState->vaddr, currState->asid,
398 currState->vmid, currState->isHyp, currState->isSecure, true,
399 false, target_el);
400 } else {
401 // Terminate the loop, nothing more to do
402 currState = NULL;
403 }
404 }
405 tlb->translateTiming(currState->req, currState->tc,
406 currState->transState, currState->mode);
407
408 }
409
410 // delete the current request
411 delete currState;
412
413 // peak at the next one
414 if (pendingQueue.size()) {
415 currState = pendingQueue.front();
416 te = tlb->lookup(currState->vaddr, currState->asid,
417 currState->vmid, currState->isHyp, currState->isSecure, true,
418 false, target_el);
419 } else {
420 // Terminate the loop, nothing more to do
421 currState = NULL;
422 }
423 }
424 pendingChange();
405
425
406 // if we've still got pending translations schedule more work
426 // if we still have pending translations, schedule more work
407 nextWalk(tc);
408 currState = NULL;
409}
410
411Fault
412TableWalker::processWalk()
413{
414 Addr ttbr = 0;
415
416 // If translation isn't enabled, we shouldn't be here
417 assert(currState->sctlr.m || isStage2);
418
419 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
420 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
421 32 - currState->ttbcr.n));
422
427 nextWalk(tc);
428 currState = NULL;
429}
430
431Fault
432TableWalker::processWalk()
433{
434 Addr ttbr = 0;
435
436 // If translation isn't enabled, we shouldn't be here
437 assert(currState->sctlr.m || isStage2);
438
439 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
440 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
441 32 - currState->ttbcr.n));
442
443 statWalkWaitTime.sample(curTick() - currState->startTime);
444
423 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
424 32 - currState->ttbcr.n)) {
425 DPRINTF(TLB, " - Selecting TTBR0\n");
426 // Check if table walk is allowed when Security Extensions are enabled
427 if (haveSecurity && currState->ttbcr.pd0) {
428 if (currState->isFetch)
429 return std::make_shared<PrefetchAbort>(
430 currState->vaddr_tainted,
431 ArmFault::TranslationLL + L1,
432 isStage2,
433 ArmFault::VmsaTran);
434 else
435 return std::make_shared<DataAbort>(
436 currState->vaddr_tainted,
437 TlbEntry::DomainType::NoAccess, currState->isWrite,
438 ArmFault::TranslationLL + L1, isStage2,
439 ArmFault::VmsaTran);
440 }
441 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
442 MISCREG_TTBR0, currState->tc, !currState->isSecure));
443 } else {
444 DPRINTF(TLB, " - Selecting TTBR1\n");
445 // Check if table walk is allowed when Security Extensions are enabled
446 if (haveSecurity && currState->ttbcr.pd1) {
447 if (currState->isFetch)
448 return std::make_shared<PrefetchAbort>(
449 currState->vaddr_tainted,
450 ArmFault::TranslationLL + L1,
451 isStage2,
452 ArmFault::VmsaTran);
453 else
454 return std::make_shared<DataAbort>(
455 currState->vaddr_tainted,
456 TlbEntry::DomainType::NoAccess, currState->isWrite,
457 ArmFault::TranslationLL + L1, isStage2,
458 ArmFault::VmsaTran);
459 }
460 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
461 MISCREG_TTBR1, currState->tc, !currState->isSecure));
462 currState->ttbcr.n = 0;
463 }
464
465 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
466 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
467 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
468 currState->isSecure ? "s" : "ns");
469
470 // Trickbox address check
471 Fault f;
472 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
473 currState->vaddr, sizeof(uint32_t), currState->isFetch,
474 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
475 if (f) {
476 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
477 if (currState->timing) {
478 pending = false;
479 nextWalk(currState->tc);
480 currState = NULL;
481 } else {
482 currState->tc = NULL;
483 currState->req = NULL;
484 }
485 return f;
486 }
487
488 Request::Flags flag = 0;
489 if (currState->sctlr.c == 0) {
490 flag = Request::UNCACHEABLE;
491 }
492
493 bool delayed;
494 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
495 sizeof(uint32_t), flag, L1, &doL1DescEvent,
496 &TableWalker::doL1Descriptor);
497 if (!delayed) {
498 f = currState->fault;
499 }
500
501 return f;
502}
503
504Fault
505TableWalker::processWalkLPAE()
506{
507 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
508 int tsz, n;
509 LookupLevel start_lookup_level = L1;
510
511 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
512 currState->vaddr_tainted, currState->ttbcr);
513
445 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
446 32 - currState->ttbcr.n)) {
447 DPRINTF(TLB, " - Selecting TTBR0\n");
448 // Check if table walk is allowed when Security Extensions are enabled
449 if (haveSecurity && currState->ttbcr.pd0) {
450 if (currState->isFetch)
451 return std::make_shared<PrefetchAbort>(
452 currState->vaddr_tainted,
453 ArmFault::TranslationLL + L1,
454 isStage2,
455 ArmFault::VmsaTran);
456 else
457 return std::make_shared<DataAbort>(
458 currState->vaddr_tainted,
459 TlbEntry::DomainType::NoAccess, currState->isWrite,
460 ArmFault::TranslationLL + L1, isStage2,
461 ArmFault::VmsaTran);
462 }
463 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
464 MISCREG_TTBR0, currState->tc, !currState->isSecure));
465 } else {
466 DPRINTF(TLB, " - Selecting TTBR1\n");
467 // Check if table walk is allowed when Security Extensions are enabled
468 if (haveSecurity && currState->ttbcr.pd1) {
469 if (currState->isFetch)
470 return std::make_shared<PrefetchAbort>(
471 currState->vaddr_tainted,
472 ArmFault::TranslationLL + L1,
473 isStage2,
474 ArmFault::VmsaTran);
475 else
476 return std::make_shared<DataAbort>(
477 currState->vaddr_tainted,
478 TlbEntry::DomainType::NoAccess, currState->isWrite,
479 ArmFault::TranslationLL + L1, isStage2,
480 ArmFault::VmsaTran);
481 }
482 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
483 MISCREG_TTBR1, currState->tc, !currState->isSecure));
484 currState->ttbcr.n = 0;
485 }
486
487 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
488 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
489 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
490 currState->isSecure ? "s" : "ns");
491
492 // Trickbox address check
493 Fault f;
494 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
495 currState->vaddr, sizeof(uint32_t), currState->isFetch,
496 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
497 if (f) {
498 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
499 if (currState->timing) {
500 pending = false;
501 nextWalk(currState->tc);
502 currState = NULL;
503 } else {
504 currState->tc = NULL;
505 currState->req = NULL;
506 }
507 return f;
508 }
509
510 Request::Flags flag = 0;
511 if (currState->sctlr.c == 0) {
512 flag = Request::UNCACHEABLE;
513 }
514
515 bool delayed;
516 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
517 sizeof(uint32_t), flag, L1, &doL1DescEvent,
518 &TableWalker::doL1Descriptor);
519 if (!delayed) {
520 f = currState->fault;
521 }
522
523 return f;
524}
525
526Fault
527TableWalker::processWalkLPAE()
528{
529 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
530 int tsz, n;
531 LookupLevel start_lookup_level = L1;
532
533 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
534 currState->vaddr_tainted, currState->ttbcr);
535
536 statWalkWaitTime.sample(curTick() - currState->startTime);
537
514 Request::Flags flag = 0;
515 if (currState->isSecure)
516 flag.set(Request::SECURE);
517
518 // work out which base address register to use, if in hyp mode we always
519 // use HTTBR
520 if (isStage2) {
521 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
522 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
523 tsz = sext<4>(currState->vtcr.t0sz);
524 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
525 } else if (currState->isHyp) {
526 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
527 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
528 tsz = currState->htcr.t0sz;
529 } else {
530 assert(_haveLPAE && currState->ttbcr.eae);
531
532 // Determine boundaries of TTBR0/1 regions
533 if (currState->ttbcr.t0sz)
534 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
535 else if (currState->ttbcr.t1sz)
536 ttbr0_max = (1ULL << 32) -
537 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
538 else
539 ttbr0_max = (1ULL << 32) - 1;
540 if (currState->ttbcr.t1sz)
541 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
542 else
543 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
544
545 // The following code snippet selects the appropriate translation table base
546 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
547 // depending on the address range supported by the translation table (ARM
548 // ARM issue C B3.6.4)
549 if (currState->vaddr <= ttbr0_max) {
550 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
551 // Check if table walk is allowed
552 if (currState->ttbcr.epd0) {
553 if (currState->isFetch)
554 return std::make_shared<PrefetchAbort>(
555 currState->vaddr_tainted,
556 ArmFault::TranslationLL + L1,
557 isStage2,
558 ArmFault::LpaeTran);
559 else
560 return std::make_shared<DataAbort>(
561 currState->vaddr_tainted,
562 TlbEntry::DomainType::NoAccess,
563 currState->isWrite,
564 ArmFault::TranslationLL + L1,
565 isStage2,
566 ArmFault::LpaeTran);
567 }
568 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
569 MISCREG_TTBR0, currState->tc, !currState->isSecure));
570 tsz = currState->ttbcr.t0sz;
571 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
572 start_lookup_level = L2;
573 } else if (currState->vaddr >= ttbr1_min) {
574 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
575 // Check if table walk is allowed
576 if (currState->ttbcr.epd1) {
577 if (currState->isFetch)
578 return std::make_shared<PrefetchAbort>(
579 currState->vaddr_tainted,
580 ArmFault::TranslationLL + L1,
581 isStage2,
582 ArmFault::LpaeTran);
583 else
584 return std::make_shared<DataAbort>(
585 currState->vaddr_tainted,
586 TlbEntry::DomainType::NoAccess,
587 currState->isWrite,
588 ArmFault::TranslationLL + L1,
589 isStage2,
590 ArmFault::LpaeTran);
591 }
592 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
593 MISCREG_TTBR1, currState->tc, !currState->isSecure));
594 tsz = currState->ttbcr.t1sz;
595 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
596 start_lookup_level = L2;
597 } else {
598 // Out of boundaries -> translation fault
599 if (currState->isFetch)
600 return std::make_shared<PrefetchAbort>(
601 currState->vaddr_tainted,
602 ArmFault::TranslationLL + L1,
603 isStage2,
604 ArmFault::LpaeTran);
605 else
606 return std::make_shared<DataAbort>(
607 currState->vaddr_tainted,
608 TlbEntry::DomainType::NoAccess,
609 currState->isWrite, ArmFault::TranslationLL + L1,
610 isStage2, ArmFault::LpaeTran);
611 }
612
613 }
614
615 // Perform lookup (ARM ARM issue C B3.6.6)
616 if (start_lookup_level == L1) {
617 n = 5 - tsz;
618 desc_addr = mbits(ttbr, 39, n) |
619 (bits(currState->vaddr, n + 26, 30) << 3);
620 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
621 desc_addr, currState->isSecure ? "s" : "ns");
622 } else {
623 // Skip first-level lookup
624 n = (tsz >= 2 ? 14 - tsz : 12);
625 desc_addr = mbits(ttbr, 39, n) |
626 (bits(currState->vaddr, n + 17, 21) << 3);
627 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
628 desc_addr, currState->isSecure ? "s" : "ns");
629 }
630
631 // Trickbox address check
632 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
633 currState->vaddr, sizeof(uint64_t), currState->isFetch,
634 currState->isWrite, TlbEntry::DomainType::NoAccess,
635 start_lookup_level);
636 if (f) {
637 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
638 if (currState->timing) {
639 pending = false;
640 nextWalk(currState->tc);
641 currState = NULL;
642 } else {
643 currState->tc = NULL;
644 currState->req = NULL;
645 }
646 return f;
647 }
648
649 if (currState->sctlr.c == 0) {
650 flag = Request::UNCACHEABLE;
651 }
652
653 if (currState->isSecure)
654 flag.set(Request::SECURE);
655
656 currState->longDesc.lookupLevel = start_lookup_level;
657 currState->longDesc.aarch64 = false;
658 currState->longDesc.grainSize = Grain4KB;
659
660 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
661 : (Event *) &doL2LongDescEvent;
662
663 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
664 sizeof(uint64_t), flag, start_lookup_level,
665 event, &TableWalker::doLongDescriptor);
666 if (!delayed) {
667 f = currState->fault;
668 }
669
670 return f;
671}
672
673unsigned
674TableWalker::adjustTableSizeAArch64(unsigned tsz)
675{
676 if (tsz < 25)
677 return 25;
678 if (tsz > 48)
679 return 48;
680 return tsz;
681}
682
683bool
684TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
685{
686 return (currPhysAddrRange != MaxPhysAddrRange &&
687 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
688}
689
690Fault
691TableWalker::processWalkAArch64()
692{
693 assert(currState->aarch64);
694
695 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
696 currState->vaddr_tainted, currState->tcr);
697
698 static const GrainSize GrainMapDefault[] =
699 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
700 static const GrainSize GrainMap_EL1_tg1[] =
701 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
702
538 Request::Flags flag = 0;
539 if (currState->isSecure)
540 flag.set(Request::SECURE);
541
542 // work out which base address register to use, if in hyp mode we always
543 // use HTTBR
544 if (isStage2) {
545 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
546 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
547 tsz = sext<4>(currState->vtcr.t0sz);
548 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
549 } else if (currState->isHyp) {
550 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
551 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
552 tsz = currState->htcr.t0sz;
553 } else {
554 assert(_haveLPAE && currState->ttbcr.eae);
555
556 // Determine boundaries of TTBR0/1 regions
557 if (currState->ttbcr.t0sz)
558 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
559 else if (currState->ttbcr.t1sz)
560 ttbr0_max = (1ULL << 32) -
561 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
562 else
563 ttbr0_max = (1ULL << 32) - 1;
564 if (currState->ttbcr.t1sz)
565 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
566 else
567 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
568
569 // The following code snippet selects the appropriate translation table base
570 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
571 // depending on the address range supported by the translation table (ARM
572 // ARM issue C B3.6.4)
573 if (currState->vaddr <= ttbr0_max) {
574 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
575 // Check if table walk is allowed
576 if (currState->ttbcr.epd0) {
577 if (currState->isFetch)
578 return std::make_shared<PrefetchAbort>(
579 currState->vaddr_tainted,
580 ArmFault::TranslationLL + L1,
581 isStage2,
582 ArmFault::LpaeTran);
583 else
584 return std::make_shared<DataAbort>(
585 currState->vaddr_tainted,
586 TlbEntry::DomainType::NoAccess,
587 currState->isWrite,
588 ArmFault::TranslationLL + L1,
589 isStage2,
590 ArmFault::LpaeTran);
591 }
592 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
593 MISCREG_TTBR0, currState->tc, !currState->isSecure));
594 tsz = currState->ttbcr.t0sz;
595 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
596 start_lookup_level = L2;
597 } else if (currState->vaddr >= ttbr1_min) {
598 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
599 // Check if table walk is allowed
600 if (currState->ttbcr.epd1) {
601 if (currState->isFetch)
602 return std::make_shared<PrefetchAbort>(
603 currState->vaddr_tainted,
604 ArmFault::TranslationLL + L1,
605 isStage2,
606 ArmFault::LpaeTran);
607 else
608 return std::make_shared<DataAbort>(
609 currState->vaddr_tainted,
610 TlbEntry::DomainType::NoAccess,
611 currState->isWrite,
612 ArmFault::TranslationLL + L1,
613 isStage2,
614 ArmFault::LpaeTran);
615 }
616 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
617 MISCREG_TTBR1, currState->tc, !currState->isSecure));
618 tsz = currState->ttbcr.t1sz;
619 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
620 start_lookup_level = L2;
621 } else {
622 // Out of boundaries -> translation fault
623 if (currState->isFetch)
624 return std::make_shared<PrefetchAbort>(
625 currState->vaddr_tainted,
626 ArmFault::TranslationLL + L1,
627 isStage2,
628 ArmFault::LpaeTran);
629 else
630 return std::make_shared<DataAbort>(
631 currState->vaddr_tainted,
632 TlbEntry::DomainType::NoAccess,
633 currState->isWrite, ArmFault::TranslationLL + L1,
634 isStage2, ArmFault::LpaeTran);
635 }
636
637 }
638
639 // Perform lookup (ARM ARM issue C B3.6.6)
640 if (start_lookup_level == L1) {
641 n = 5 - tsz;
642 desc_addr = mbits(ttbr, 39, n) |
643 (bits(currState->vaddr, n + 26, 30) << 3);
644 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
645 desc_addr, currState->isSecure ? "s" : "ns");
646 } else {
647 // Skip first-level lookup
648 n = (tsz >= 2 ? 14 - tsz : 12);
649 desc_addr = mbits(ttbr, 39, n) |
650 (bits(currState->vaddr, n + 17, 21) << 3);
651 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
652 desc_addr, currState->isSecure ? "s" : "ns");
653 }
654
655 // Trickbox address check
656 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
657 currState->vaddr, sizeof(uint64_t), currState->isFetch,
658 currState->isWrite, TlbEntry::DomainType::NoAccess,
659 start_lookup_level);
660 if (f) {
661 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
662 if (currState->timing) {
663 pending = false;
664 nextWalk(currState->tc);
665 currState = NULL;
666 } else {
667 currState->tc = NULL;
668 currState->req = NULL;
669 }
670 return f;
671 }
672
673 if (currState->sctlr.c == 0) {
674 flag = Request::UNCACHEABLE;
675 }
676
677 if (currState->isSecure)
678 flag.set(Request::SECURE);
679
680 currState->longDesc.lookupLevel = start_lookup_level;
681 currState->longDesc.aarch64 = false;
682 currState->longDesc.grainSize = Grain4KB;
683
684 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
685 : (Event *) &doL2LongDescEvent;
686
687 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
688 sizeof(uint64_t), flag, start_lookup_level,
689 event, &TableWalker::doLongDescriptor);
690 if (!delayed) {
691 f = currState->fault;
692 }
693
694 return f;
695}
696
697unsigned
698TableWalker::adjustTableSizeAArch64(unsigned tsz)
699{
700 if (tsz < 25)
701 return 25;
702 if (tsz > 48)
703 return 48;
704 return tsz;
705}
706
707bool
708TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
709{
710 return (currPhysAddrRange != MaxPhysAddrRange &&
711 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
712}
713
714Fault
715TableWalker::processWalkAArch64()
716{
717 assert(currState->aarch64);
718
719 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
720 currState->vaddr_tainted, currState->tcr);
721
722 static const GrainSize GrainMapDefault[] =
723 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
724 static const GrainSize GrainMap_EL1_tg1[] =
725 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
726
727 statWalkWaitTime.sample(curTick() - currState->startTime);
728
703 // Determine TTBR, table size, granule size and phys. address range
704 Addr ttbr = 0;
705 int tsz = 0, ps = 0;
706 GrainSize tg = Grain4KB; // grain size computed from tg* field
707 bool fault = false;
708 switch (currState->el) {
709 case EL0:
710 case EL1:
711 switch (bits(currState->vaddr, 63,48)) {
712 case 0:
713 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
714 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
715 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
716 tg = GrainMapDefault[currState->tcr.tg0];
717 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
718 currState->tcr.epd0)
719 fault = true;
720 break;
721 case 0xffff:
722 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
723 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
724 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
725 tg = GrainMap_EL1_tg1[currState->tcr.tg1];
726 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
727 currState->tcr.epd1)
728 fault = true;
729 break;
730 default:
731 // top two bytes must be all 0s or all 1s, else invalid addr
732 fault = true;
733 }
734 ps = currState->tcr.ips;
735 break;
736 case EL2:
737 case EL3:
738 switch(bits(currState->vaddr, 63,48)) {
739 case 0:
740 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
741 if (currState->el == EL2)
742 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
743 else
744 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
745 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
746 tg = GrainMapDefault[currState->tcr.tg0];
747 break;
748 default:
749 // invalid addr if top two bytes are not all 0s
750 fault = true;
751 }
752 ps = currState->tcr.ips;
753 break;
754 }
755
756 if (fault) {
757 Fault f;
758 if (currState->isFetch)
759 f = std::make_shared<PrefetchAbort>(
760 currState->vaddr_tainted,
761 ArmFault::TranslationLL + L0, isStage2,
762 ArmFault::LpaeTran);
763 else
764 f = std::make_shared<DataAbort>(
765 currState->vaddr_tainted,
766 TlbEntry::DomainType::NoAccess,
767 currState->isWrite,
768 ArmFault::TranslationLL + L0,
769 isStage2, ArmFault::LpaeTran);
770
771 if (currState->timing) {
772 pending = false;
773 nextWalk(currState->tc);
774 currState = NULL;
775 } else {
776 currState->tc = NULL;
777 currState->req = NULL;
778 }
779 return f;
780
781 }
782
783 if (tg == ReservedGrain) {
784 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
785 "DEFINED behavior takes this to mean 4KB granules\n");
786 tg = Grain4KB;
787 }
788
789 int stride = tg - 3;
790 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
791
792 // Determine starting lookup level
793 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
794 // in ARM DDI 0487A. These table values correspond to the cascading tests
795 // to compute the lookup level and are of the form
796 // (grain_size + N*stride), for N = {1, 2, 3}.
797 // A value of 64 will never succeed and a value of 0 will always succeed.
798 {
799 struct GrainMap {
800 GrainSize grain_size;
801 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
802 };
803 static const GrainMap GM[] = {
804 { Grain4KB, { 39, 30, 0, 0 } },
805 { Grain16KB, { 47, 36, 25, 0 } },
806 { Grain64KB, { 64, 42, 29, 0 } }
807 };
808
809 const unsigned *lookup = NULL; // points to a lookup_level_cutoff
810
811 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
812 if (tg == GM[i].grain_size) {
813 lookup = GM[i].lookup_level_cutoff;
814 break;
815 }
816 }
817 assert(lookup);
818
819 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
820 if (tsz > lookup[L]) {
821 start_lookup_level = (LookupLevel) L;
822 break;
823 }
824 }
825 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
826 "Table walker couldn't find lookup level\n");
827 }
828
829 // Determine table base address
830 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
831 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
832
833 // Determine physical address size and raise an Address Size Fault if
834 // necessary
835 int pa_range = decodePhysAddrRange64(ps);
836 // Clamp to lower limit
837 if (pa_range > physAddrRange)
838 currState->physAddrRange = physAddrRange;
839 else
840 currState->physAddrRange = pa_range;
841 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
842 DPRINTF(TLB, "Address size fault before any lookup\n");
843 Fault f;
844 if (currState->isFetch)
845 f = std::make_shared<PrefetchAbort>(
846 currState->vaddr_tainted,
847 ArmFault::AddressSizeLL + start_lookup_level,
848 isStage2,
849 ArmFault::LpaeTran);
850 else
851 f = std::make_shared<DataAbort>(
852 currState->vaddr_tainted,
853 TlbEntry::DomainType::NoAccess,
854 currState->isWrite,
855 ArmFault::AddressSizeLL + start_lookup_level,
856 isStage2,
857 ArmFault::LpaeTran);
858
859
860 if (currState->timing) {
861 pending = false;
862 nextWalk(currState->tc);
863 currState = NULL;
864 } else {
865 currState->tc = NULL;
866 currState->req = NULL;
867 }
868 return f;
869
870 }
871
872 // Determine descriptor address
873 Addr desc_addr = base_addr |
874 (bits(currState->vaddr, tsz - 1,
875 stride * (3 - start_lookup_level) + tg) << 3);
876
877 // Trickbox address check
878 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
879 currState->vaddr, sizeof(uint64_t), currState->isFetch,
880 currState->isWrite, TlbEntry::DomainType::NoAccess,
881 start_lookup_level);
882 if (f) {
883 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
884 if (currState->timing) {
885 pending = false;
886 nextWalk(currState->tc);
887 currState = NULL;
888 } else {
889 currState->tc = NULL;
890 currState->req = NULL;
891 }
892 return f;
893 }
894
895 Request::Flags flag = 0;
896 if (currState->sctlr.c == 0) {
897 flag = Request::UNCACHEABLE;
898 }
899
900 currState->longDesc.lookupLevel = start_lookup_level;
901 currState->longDesc.aarch64 = true;
902 currState->longDesc.grainSize = tg;
903
904 if (currState->timing) {
905 Event *event;
906 switch (start_lookup_level) {
907 case L0:
908 event = (Event *) &doL0LongDescEvent;
909 break;
910 case L1:
911 event = (Event *) &doL1LongDescEvent;
912 break;
913 case L2:
914 event = (Event *) &doL2LongDescEvent;
915 break;
916 case L3:
917 event = (Event *) &doL3LongDescEvent;
918 break;
919 default:
920 panic("Invalid table lookup level");
921 break;
922 }
729 // Determine TTBR, table size, granule size and phys. address range
730 Addr ttbr = 0;
731 int tsz = 0, ps = 0;
732 GrainSize tg = Grain4KB; // grain size computed from tg* field
733 bool fault = false;
734 switch (currState->el) {
735 case EL0:
736 case EL1:
737 switch (bits(currState->vaddr, 63,48)) {
738 case 0:
739 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
740 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
741 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
742 tg = GrainMapDefault[currState->tcr.tg0];
743 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
744 currState->tcr.epd0)
745 fault = true;
746 break;
747 case 0xffff:
748 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
749 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
750 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
751 tg = GrainMap_EL1_tg1[currState->tcr.tg1];
752 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
753 currState->tcr.epd1)
754 fault = true;
755 break;
756 default:
757 // top two bytes must be all 0s or all 1s, else invalid addr
758 fault = true;
759 }
760 ps = currState->tcr.ips;
761 break;
762 case EL2:
763 case EL3:
764 switch(bits(currState->vaddr, 63,48)) {
765 case 0:
766 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
767 if (currState->el == EL2)
768 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
769 else
770 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
771 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
772 tg = GrainMapDefault[currState->tcr.tg0];
773 break;
774 default:
775 // invalid addr if top two bytes are not all 0s
776 fault = true;
777 }
778 ps = currState->tcr.ips;
779 break;
780 }
781
782 if (fault) {
783 Fault f;
784 if (currState->isFetch)
785 f = std::make_shared<PrefetchAbort>(
786 currState->vaddr_tainted,
787 ArmFault::TranslationLL + L0, isStage2,
788 ArmFault::LpaeTran);
789 else
790 f = std::make_shared<DataAbort>(
791 currState->vaddr_tainted,
792 TlbEntry::DomainType::NoAccess,
793 currState->isWrite,
794 ArmFault::TranslationLL + L0,
795 isStage2, ArmFault::LpaeTran);
796
797 if (currState->timing) {
798 pending = false;
799 nextWalk(currState->tc);
800 currState = NULL;
801 } else {
802 currState->tc = NULL;
803 currState->req = NULL;
804 }
805 return f;
806
807 }
808
809 if (tg == ReservedGrain) {
810 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
811 "DEFINED behavior takes this to mean 4KB granules\n");
812 tg = Grain4KB;
813 }
814
815 int stride = tg - 3;
816 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
817
818 // Determine starting lookup level
819 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
820 // in ARM DDI 0487A. These table values correspond to the cascading tests
821 // to compute the lookup level and are of the form
822 // (grain_size + N*stride), for N = {1, 2, 3}.
823 // A value of 64 will never succeed and a value of 0 will always succeed.
824 {
825 struct GrainMap {
826 GrainSize grain_size;
827 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
828 };
829 static const GrainMap GM[] = {
830 { Grain4KB, { 39, 30, 0, 0 } },
831 { Grain16KB, { 47, 36, 25, 0 } },
832 { Grain64KB, { 64, 42, 29, 0 } }
833 };
834
835 const unsigned *lookup = NULL; // points to a lookup_level_cutoff
836
837 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
838 if (tg == GM[i].grain_size) {
839 lookup = GM[i].lookup_level_cutoff;
840 break;
841 }
842 }
843 assert(lookup);
844
845 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
846 if (tsz > lookup[L]) {
847 start_lookup_level = (LookupLevel) L;
848 break;
849 }
850 }
851 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
852 "Table walker couldn't find lookup level\n");
853 }
854
855 // Determine table base address
856 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
857 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
858
859 // Determine physical address size and raise an Address Size Fault if
860 // necessary
861 int pa_range = decodePhysAddrRange64(ps);
862 // Clamp to lower limit
863 if (pa_range > physAddrRange)
864 currState->physAddrRange = physAddrRange;
865 else
866 currState->physAddrRange = pa_range;
867 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
868 DPRINTF(TLB, "Address size fault before any lookup\n");
869 Fault f;
870 if (currState->isFetch)
871 f = std::make_shared<PrefetchAbort>(
872 currState->vaddr_tainted,
873 ArmFault::AddressSizeLL + start_lookup_level,
874 isStage2,
875 ArmFault::LpaeTran);
876 else
877 f = std::make_shared<DataAbort>(
878 currState->vaddr_tainted,
879 TlbEntry::DomainType::NoAccess,
880 currState->isWrite,
881 ArmFault::AddressSizeLL + start_lookup_level,
882 isStage2,
883 ArmFault::LpaeTran);
884
885
886 if (currState->timing) {
887 pending = false;
888 nextWalk(currState->tc);
889 currState = NULL;
890 } else {
891 currState->tc = NULL;
892 currState->req = NULL;
893 }
894 return f;
895
896 }
897
898 // Determine descriptor address
899 Addr desc_addr = base_addr |
900 (bits(currState->vaddr, tsz - 1,
901 stride * (3 - start_lookup_level) + tg) << 3);
902
903 // Trickbox address check
904 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
905 currState->vaddr, sizeof(uint64_t), currState->isFetch,
906 currState->isWrite, TlbEntry::DomainType::NoAccess,
907 start_lookup_level);
908 if (f) {
909 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
910 if (currState->timing) {
911 pending = false;
912 nextWalk(currState->tc);
913 currState = NULL;
914 } else {
915 currState->tc = NULL;
916 currState->req = NULL;
917 }
918 return f;
919 }
920
921 Request::Flags flag = 0;
922 if (currState->sctlr.c == 0) {
923 flag = Request::UNCACHEABLE;
924 }
925
926 currState->longDesc.lookupLevel = start_lookup_level;
927 currState->longDesc.aarch64 = true;
928 currState->longDesc.grainSize = tg;
929
930 if (currState->timing) {
931 Event *event;
932 switch (start_lookup_level) {
933 case L0:
934 event = (Event *) &doL0LongDescEvent;
935 break;
936 case L1:
937 event = (Event *) &doL1LongDescEvent;
938 break;
939 case L2:
940 event = (Event *) &doL2LongDescEvent;
941 break;
942 case L3:
943 event = (Event *) &doL3LongDescEvent;
944 break;
945 default:
946 panic("Invalid table lookup level");
947 break;
948 }
923 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
924 (uint8_t*) &currState->longDesc.data,
949 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
950 event, (uint8_t*) &currState->longDesc.data,
925 currState->tc->getCpuPtr()->clockPeriod(), flag);
926 DPRINTF(TLBVerbose,
927 "Adding to walker fifo: queue size before adding: %d\n",
928 stateQueues[start_lookup_level].size());
929 stateQueues[start_lookup_level].push_back(currState);
930 currState = NULL;
931 } else if (!currState->functional) {
932 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
933 NULL, (uint8_t*) &currState->longDesc.data,
934 currState->tc->getCpuPtr()->clockPeriod(), flag);
935 doLongDescriptor();
936 f = currState->fault;
937 } else {
938 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
939 masterId);
940 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
941 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
942 port.sendFunctional(pkt);
943 doLongDescriptor();
944 delete req;
945 delete pkt;
946 f = currState->fault;
947 }
948
949 return f;
950}
951
952void
953TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
954 uint8_t texcb, bool s)
955{
956 // Note: tc and sctlr local variables are hiding tc and sctrl class
957 // variables
958 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
959 te.shareable = false; // default value
960 te.nonCacheable = false;
961 te.outerShareable = false;
962 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
963 switch(texcb) {
964 case 0: // Stongly-ordered
965 te.nonCacheable = true;
966 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
967 te.shareable = true;
968 te.innerAttrs = 1;
969 te.outerAttrs = 0;
970 break;
971 case 1: // Shareable Device
972 te.nonCacheable = true;
973 te.mtype = TlbEntry::MemoryType::Device;
974 te.shareable = true;
975 te.innerAttrs = 3;
976 te.outerAttrs = 0;
977 break;
978 case 2: // Outer and Inner Write-Through, no Write-Allocate
979 te.mtype = TlbEntry::MemoryType::Normal;
980 te.shareable = s;
981 te.innerAttrs = 6;
982 te.outerAttrs = bits(texcb, 1, 0);
983 break;
984 case 3: // Outer and Inner Write-Back, no Write-Allocate
985 te.mtype = TlbEntry::MemoryType::Normal;
986 te.shareable = s;
987 te.innerAttrs = 7;
988 te.outerAttrs = bits(texcb, 1, 0);
989 break;
990 case 4: // Outer and Inner Non-cacheable
991 te.nonCacheable = true;
992 te.mtype = TlbEntry::MemoryType::Normal;
993 te.shareable = s;
994 te.innerAttrs = 0;
995 te.outerAttrs = bits(texcb, 1, 0);
996 break;
997 case 5: // Reserved
998 panic("Reserved texcb value!\n");
999 break;
1000 case 6: // Implementation Defined
1001 panic("Implementation-defined texcb value!\n");
1002 break;
1003 case 7: // Outer and Inner Write-Back, Write-Allocate
1004 te.mtype = TlbEntry::MemoryType::Normal;
1005 te.shareable = s;
1006 te.innerAttrs = 5;
1007 te.outerAttrs = 1;
1008 break;
1009 case 8: // Non-shareable Device
1010 te.nonCacheable = true;
1011 te.mtype = TlbEntry::MemoryType::Device;
1012 te.shareable = false;
1013 te.innerAttrs = 3;
1014 te.outerAttrs = 0;
1015 break;
1016 case 9 ... 15: // Reserved
1017 panic("Reserved texcb value!\n");
1018 break;
1019 case 16 ... 31: // Cacheable Memory
1020 te.mtype = TlbEntry::MemoryType::Normal;
1021 te.shareable = s;
1022 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1023 te.nonCacheable = true;
1024 te.innerAttrs = bits(texcb, 1, 0);
1025 te.outerAttrs = bits(texcb, 3, 2);
1026 break;
1027 default:
1028 panic("More than 32 states for 5 bits?\n");
1029 }
1030 } else {
1031 assert(tc);
1032 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
1033 currState->tc, !currState->isSecure));
1034 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
1035 currState->tc, !currState->isSecure));
1036 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1037 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1038 switch(bits(texcb, 2,0)) {
1039 case 0:
1040 curr_tr = prrr.tr0;
1041 curr_ir = nmrr.ir0;
1042 curr_or = nmrr.or0;
1043 te.outerShareable = (prrr.nos0 == 0);
1044 break;
1045 case 1:
1046 curr_tr = prrr.tr1;
1047 curr_ir = nmrr.ir1;
1048 curr_or = nmrr.or1;
1049 te.outerShareable = (prrr.nos1 == 0);
1050 break;
1051 case 2:
1052 curr_tr = prrr.tr2;
1053 curr_ir = nmrr.ir2;
1054 curr_or = nmrr.or2;
1055 te.outerShareable = (prrr.nos2 == 0);
1056 break;
1057 case 3:
1058 curr_tr = prrr.tr3;
1059 curr_ir = nmrr.ir3;
1060 curr_or = nmrr.or3;
1061 te.outerShareable = (prrr.nos3 == 0);
1062 break;
1063 case 4:
1064 curr_tr = prrr.tr4;
1065 curr_ir = nmrr.ir4;
1066 curr_or = nmrr.or4;
1067 te.outerShareable = (prrr.nos4 == 0);
1068 break;
1069 case 5:
1070 curr_tr = prrr.tr5;
1071 curr_ir = nmrr.ir5;
1072 curr_or = nmrr.or5;
1073 te.outerShareable = (prrr.nos5 == 0);
1074 break;
1075 case 6:
1076 panic("Imp defined type\n");
1077 case 7:
1078 curr_tr = prrr.tr7;
1079 curr_ir = nmrr.ir7;
1080 curr_or = nmrr.or7;
1081 te.outerShareable = (prrr.nos7 == 0);
1082 break;
1083 }
1084
1085 switch(curr_tr) {
1086 case 0:
1087 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1088 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1089 te.nonCacheable = true;
1090 te.innerAttrs = 1;
1091 te.outerAttrs = 0;
1092 te.shareable = true;
1093 break;
1094 case 1:
1095 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1096 prrr.ds1, prrr.ds0, s);
1097 te.mtype = TlbEntry::MemoryType::Device;
1098 te.nonCacheable = true;
1099 te.innerAttrs = 3;
1100 te.outerAttrs = 0;
1101 if (prrr.ds1 && s)
1102 te.shareable = true;
1103 if (prrr.ds0 && !s)
1104 te.shareable = true;
1105 break;
1106 case 2:
1107 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1108 prrr.ns1, prrr.ns0, s);
1109 te.mtype = TlbEntry::MemoryType::Normal;
1110 if (prrr.ns1 && s)
1111 te.shareable = true;
1112 if (prrr.ns0 && !s)
1113 te.shareable = true;
1114 break;
1115 case 3:
1116 panic("Reserved type");
1117 }
1118
1119 if (te.mtype == TlbEntry::MemoryType::Normal){
1120 switch(curr_ir) {
1121 case 0:
1122 te.nonCacheable = true;
1123 te.innerAttrs = 0;
1124 break;
1125 case 1:
1126 te.innerAttrs = 5;
1127 break;
1128 case 2:
1129 te.innerAttrs = 6;
1130 break;
1131 case 3:
1132 te.innerAttrs = 7;
1133 break;
1134 }
1135
1136 switch(curr_or) {
1137 case 0:
1138 te.nonCacheable = true;
1139 te.outerAttrs = 0;
1140 break;
1141 case 1:
1142 te.outerAttrs = 1;
1143 break;
1144 case 2:
1145 te.outerAttrs = 2;
1146 break;
1147 case 3:
1148 te.outerAttrs = 3;
1149 break;
1150 }
1151 }
1152 }
1153 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1154 "outerAttrs: %d\n",
1155 te.shareable, te.innerAttrs, te.outerAttrs);
1156 te.setAttributes(false);
1157}
1158
1159void
1160TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1161 LongDescriptor &lDescriptor)
1162{
1163 assert(_haveLPAE);
1164
1165 uint8_t attr;
1166 uint8_t sh = lDescriptor.sh();
1167 // Different format and source of attributes if this is a stage 2
1168 // translation
1169 if (isStage2) {
1170 attr = lDescriptor.memAttr();
1171 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1172 uint8_t attr_1_0 = attr & 0x3;
1173
1174 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1175
1176 if (attr_3_2 == 0) {
1177 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1178 : TlbEntry::MemoryType::Device;
1179 te.outerAttrs = 0;
1180 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1181 te.nonCacheable = true;
1182 } else {
1183 te.mtype = TlbEntry::MemoryType::Normal;
1184 te.outerAttrs = attr_3_2 == 1 ? 0 :
1185 attr_3_2 == 2 ? 2 : 1;
1186 te.innerAttrs = attr_1_0 == 1 ? 0 :
1187 attr_1_0 == 2 ? 6 : 5;
1188 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1189 }
1190 } else {
1191 uint8_t attrIndx = lDescriptor.attrIndx();
1192
1193 // LPAE always uses remapping of memory attributes, irrespective of the
1194 // value of SCTLR.TRE
1195 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1196 int reg_as_int = flattenMiscRegNsBanked(reg, currState->tc,
1197 !currState->isSecure);
1198 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1199 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1200 uint8_t attr_7_4 = bits(attr, 7, 4);
1201 uint8_t attr_3_0 = bits(attr, 3, 0);
1202 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1203
1204 // Note: the memory subsystem only cares about the 'cacheable' memory
1205 // attribute. The other attributes are only used to fill the PAR register
1206 // accordingly to provide the illusion of full support
1207 te.nonCacheable = false;
1208
1209 switch (attr_7_4) {
1210 case 0x0:
1211 // Strongly-ordered or Device memory
1212 if (attr_3_0 == 0x0)
1213 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1214 else if (attr_3_0 == 0x4)
1215 te.mtype = TlbEntry::MemoryType::Device;
1216 else
1217 panic("Unpredictable behavior\n");
1218 te.nonCacheable = true;
1219 te.outerAttrs = 0;
1220 break;
1221 case 0x4:
1222 // Normal memory, Outer Non-cacheable
1223 te.mtype = TlbEntry::MemoryType::Normal;
1224 te.outerAttrs = 0;
1225 if (attr_3_0 == 0x4)
1226 // Inner Non-cacheable
1227 te.nonCacheable = true;
1228 else if (attr_3_0 < 0x8)
1229 panic("Unpredictable behavior\n");
1230 break;
1231 case 0x8:
1232 case 0x9:
1233 case 0xa:
1234 case 0xb:
1235 case 0xc:
1236 case 0xd:
1237 case 0xe:
1238 case 0xf:
1239 if (attr_7_4 & 0x4) {
1240 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1241 } else {
1242 te.outerAttrs = 0x2;
1243 }
1244 // Normal memory, Outer Cacheable
1245 te.mtype = TlbEntry::MemoryType::Normal;
1246 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1247 panic("Unpredictable behavior\n");
1248 break;
1249 default:
1250 panic("Unpredictable behavior\n");
1251 break;
1252 }
1253
1254 switch (attr_3_0) {
1255 case 0x0:
1256 te.innerAttrs = 0x1;
1257 break;
1258 case 0x4:
1259 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1260 break;
1261 case 0x8:
1262 case 0x9:
1263 case 0xA:
1264 case 0xB:
1265 te.innerAttrs = 6;
1266 break;
1267 case 0xC:
1268 case 0xD:
1269 case 0xE:
1270 case 0xF:
1271 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1272 break;
1273 default:
1274 panic("Unpredictable behavior\n");
1275 break;
1276 }
1277 }
1278
1279 te.outerShareable = sh == 2;
1280 te.shareable = (sh & 0x2) ? true : false;
1281 te.setAttributes(true);
1282 te.attributes |= (uint64_t) attr << 56;
1283}
1284
1285void
1286TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1287 uint8_t sh)
1288{
1289 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1290
1291 // Select MAIR
1292 uint64_t mair;
1293 switch (currState->el) {
1294 case EL0:
1295 case EL1:
1296 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1297 break;
1298 case EL2:
1299 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1300 break;
1301 case EL3:
1302 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1303 break;
1304 default:
1305 panic("Invalid exception level");
1306 break;
1307 }
1308
1309 // Select attributes
1310 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1311 uint8_t attr_lo = bits(attr, 3, 0);
1312 uint8_t attr_hi = bits(attr, 7, 4);
1313
1314 // Memory type
1315 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1316
1317 // Cacheability
1318 te.nonCacheable = false;
1319 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1320 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1321 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1322 te.nonCacheable = true;
1323 }
1324
1325 te.shareable = sh == 2;
1326 te.outerShareable = (sh & 0x2) ? true : false;
1327 // Attributes formatted according to the 64-bit PAR
1328 te.attributes = ((uint64_t) attr << 56) |
1329 (1 << 11) | // LPAE bit
1330 (te.ns << 9) | // NS bit
1331 (sh << 7);
1332}
1333
1334void
1335TableWalker::doL1Descriptor()
1336{
1337 if (currState->fault != NoFault) {
1338 return;
1339 }
1340
1341 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1342 currState->vaddr_tainted, currState->l1Desc.data);
1343 TlbEntry te;
1344
1345 switch (currState->l1Desc.type()) {
1346 case L1Descriptor::Ignore:
1347 case L1Descriptor::Reserved:
1348 if (!currState->timing) {
1349 currState->tc = NULL;
1350 currState->req = NULL;
1351 }
1352 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1353 if (currState->isFetch)
1354 currState->fault =
1355 std::make_shared<PrefetchAbort>(
1356 currState->vaddr_tainted,
1357 ArmFault::TranslationLL + L1,
1358 isStage2,
1359 ArmFault::VmsaTran);
1360 else
1361 currState->fault =
1362 std::make_shared<DataAbort>(
1363 currState->vaddr_tainted,
1364 TlbEntry::DomainType::NoAccess,
1365 currState->isWrite,
1366 ArmFault::TranslationLL + L1, isStage2,
1367 ArmFault::VmsaTran);
1368 return;
1369 case L1Descriptor::Section:
1370 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1371 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1372 * enabled if set, do l1.Desc.setAp0() instead of generating
1373 * AccessFlag0
1374 */
1375
1376 currState->fault = std::make_shared<DataAbort>(
1377 currState->vaddr_tainted,
1378 currState->l1Desc.domain(),
1379 currState->isWrite,
1380 ArmFault::AccessFlagLL + L1,
1381 isStage2,
1382 ArmFault::VmsaTran);
1383 }
1384 if (currState->l1Desc.supersection()) {
1385 panic("Haven't implemented supersections\n");
1386 }
1387 insertTableEntry(currState->l1Desc, false);
1388 return;
1389 case L1Descriptor::PageTable:
1390 {
1391 Addr l2desc_addr;
1392 l2desc_addr = currState->l1Desc.l2Addr() |
1393 (bits(currState->vaddr, 19, 12) << 2);
1394 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1395 l2desc_addr, currState->isSecure ? "s" : "ns");
1396
1397 // Trickbox address check
1398 currState->fault = tlb->walkTrickBoxCheck(
1399 l2desc_addr, currState->isSecure, currState->vaddr,
1400 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1401 currState->l1Desc.domain(), L2);
1402
1403 if (currState->fault) {
1404 if (!currState->timing) {
1405 currState->tc = NULL;
1406 currState->req = NULL;
1407 }
1408 return;
1409 }
1410
1411 Request::Flags flag = 0;
1412 if (currState->isSecure)
1413 flag.set(Request::SECURE);
1414
1415 bool delayed;
1416 delayed = fetchDescriptor(l2desc_addr,
1417 (uint8_t*)&currState->l2Desc.data,
1418 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1419 &TableWalker::doL2Descriptor);
1420 if (delayed) {
1421 currState->delayed = true;
1422 }
1423
1424 return;
1425 }
1426 default:
1427 panic("A new type in a 2 bit field?\n");
1428 }
1429}
1430
1431void
1432TableWalker::doLongDescriptor()
1433{
1434 if (currState->fault != NoFault) {
1435 return;
1436 }
1437
1438 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1439 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1440 currState->longDesc.data,
1441 currState->aarch64 ? "AArch64" : "long-desc.");
1442
1443 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1444 (currState->longDesc.type() == LongDescriptor::Page)) {
1445 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1446 "xn: %d, ap: %d, af: %d, type: %d\n",
1447 currState->longDesc.lookupLevel,
1448 currState->longDesc.data,
1449 currState->longDesc.pxn(),
1450 currState->longDesc.xn(),
1451 currState->longDesc.ap(),
1452 currState->longDesc.af(),
1453 currState->longDesc.type());
1454 } else {
1455 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1456 currState->longDesc.lookupLevel,
1457 currState->longDesc.data,
1458 currState->longDesc.type());
1459 }
1460
1461 TlbEntry te;
1462
1463 switch (currState->longDesc.type()) {
1464 case LongDescriptor::Invalid:
1465 if (!currState->timing) {
1466 currState->tc = NULL;
1467 currState->req = NULL;
1468 }
1469
1470 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1471 currState->longDesc.lookupLevel,
1472 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1473 if (currState->isFetch)
1474 currState->fault = std::make_shared<PrefetchAbort>(
1475 currState->vaddr_tainted,
1476 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1477 isStage2,
1478 ArmFault::LpaeTran);
1479 else
1480 currState->fault = std::make_shared<DataAbort>(
1481 currState->vaddr_tainted,
1482 TlbEntry::DomainType::NoAccess,
1483 currState->isWrite,
1484 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1485 isStage2,
1486 ArmFault::LpaeTran);
1487 return;
1488 case LongDescriptor::Block:
1489 case LongDescriptor::Page:
1490 {
1491 bool fault = false;
1492 bool aff = false;
1493 // Check for address size fault
1494 if (checkAddrSizeFaultAArch64(
1495 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1496 currState->longDesc.offsetBits()),
1497 currState->physAddrRange)) {
1498 fault = true;
1499 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1500 currState->longDesc.lookupLevel);
1501 // Check for access fault
1502 } else if (currState->longDesc.af() == 0) {
1503 fault = true;
1504 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1505 currState->longDesc.lookupLevel);
1506 aff = true;
1507 }
1508 if (fault) {
1509 if (currState->isFetch)
1510 currState->fault = std::make_shared<PrefetchAbort>(
1511 currState->vaddr_tainted,
1512 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1513 currState->longDesc.lookupLevel,
1514 isStage2,
1515 ArmFault::LpaeTran);
1516 else
1517 currState->fault = std::make_shared<DataAbort>(
1518 currState->vaddr_tainted,
1519 TlbEntry::DomainType::NoAccess, currState->isWrite,
1520 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1521 currState->longDesc.lookupLevel,
1522 isStage2,
1523 ArmFault::LpaeTran);
1524 } else {
1525 insertTableEntry(currState->longDesc, true);
1526 }
1527 }
1528 return;
1529 case LongDescriptor::Table:
1530 {
1531 // Set hierarchical permission flags
1532 currState->secureLookup = currState->secureLookup &&
1533 currState->longDesc.secureTable();
1534 currState->rwTable = currState->rwTable &&
1535 currState->longDesc.rwTable();
1536 currState->userTable = currState->userTable &&
1537 currState->longDesc.userTable();
1538 currState->xnTable = currState->xnTable ||
1539 currState->longDesc.xnTable();
1540 currState->pxnTable = currState->pxnTable ||
1541 currState->longDesc.pxnTable();
1542
1543 // Set up next level lookup
1544 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1545 currState->vaddr);
1546
1547 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1548 currState->longDesc.lookupLevel,
1549 currState->longDesc.lookupLevel + 1,
1550 next_desc_addr,
1551 currState->secureLookup ? "s" : "ns");
1552
1553 // Check for address size fault
1554 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1555 next_desc_addr, currState->physAddrRange)) {
1556 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1557 currState->longDesc.lookupLevel);
1558 if (currState->isFetch)
1559 currState->fault = std::make_shared<PrefetchAbort>(
1560 currState->vaddr_tainted,
1561 ArmFault::AddressSizeLL
1562 + currState->longDesc.lookupLevel,
1563 isStage2,
1564 ArmFault::LpaeTran);
1565 else
1566 currState->fault = std::make_shared<DataAbort>(
1567 currState->vaddr_tainted,
1568 TlbEntry::DomainType::NoAccess, currState->isWrite,
1569 ArmFault::AddressSizeLL
1570 + currState->longDesc.lookupLevel,
1571 isStage2,
1572 ArmFault::LpaeTran);
1573 return;
1574 }
1575
1576 // Trickbox address check
1577 currState->fault = tlb->walkTrickBoxCheck(
1578 next_desc_addr, currState->vaddr,
1579 currState->vaddr, sizeof(uint64_t),
1580 currState->isFetch, currState->isWrite,
1581 TlbEntry::DomainType::Client,
1582 toLookupLevel(currState->longDesc.lookupLevel +1));
1583
1584 if (currState->fault) {
1585 if (!currState->timing) {
1586 currState->tc = NULL;
1587 currState->req = NULL;
1588 }
1589 return;
1590 }
1591
1592 Request::Flags flag = 0;
1593 if (currState->secureLookup)
1594 flag.set(Request::SECURE);
1595
1596 currState->longDesc.lookupLevel =
1597 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1598 Event *event = NULL;
1599 switch (currState->longDesc.lookupLevel) {
1600 case L1:
1601 assert(currState->aarch64);
1602 event = &doL1LongDescEvent;
1603 break;
1604 case L2:
1605 event = &doL2LongDescEvent;
1606 break;
1607 case L3:
1608 event = &doL3LongDescEvent;
1609 break;
1610 default:
1611 panic("Wrong lookup level in table walk\n");
1612 break;
1613 }
1614
1615 bool delayed;
1616 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1617 sizeof(uint64_t), flag, -1, event,
1618 &TableWalker::doLongDescriptor);
1619 if (delayed) {
1620 currState->delayed = true;
1621 }
1622 }
1623 return;
1624 default:
1625 panic("A new type in a 2 bit field?\n");
1626 }
1627}
1628
1629void
1630TableWalker::doL2Descriptor()
1631{
1632 if (currState->fault != NoFault) {
1633 return;
1634 }
1635
1636 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1637 currState->vaddr_tainted, currState->l2Desc.data);
1638 TlbEntry te;
1639
1640 if (currState->l2Desc.invalid()) {
1641 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1642 if (!currState->timing) {
1643 currState->tc = NULL;
1644 currState->req = NULL;
1645 }
1646 if (currState->isFetch)
1647 currState->fault = std::make_shared<PrefetchAbort>(
1648 currState->vaddr_tainted,
1649 ArmFault::TranslationLL + L2,
1650 isStage2,
1651 ArmFault::VmsaTran);
1652 else
1653 currState->fault = std::make_shared<DataAbort>(
1654 currState->vaddr_tainted, currState->l1Desc.domain(),
1655 currState->isWrite, ArmFault::TranslationLL + L2,
1656 isStage2,
1657 ArmFault::VmsaTran);
1658 return;
1659 }
1660
1661 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1662 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1663 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1664 */
1665 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1666 currState->sctlr.afe, currState->l2Desc.ap());
1667
1668 currState->fault = std::make_shared<DataAbort>(
1669 currState->vaddr_tainted,
1670 TlbEntry::DomainType::NoAccess, currState->isWrite,
1671 ArmFault::AccessFlagLL + L2, isStage2,
1672 ArmFault::VmsaTran);
1673 }
1674
1675 insertTableEntry(currState->l2Desc, false);
1676}
1677
1678void
1679TableWalker::doL1DescriptorWrapper()
1680{
1681 currState = stateQueues[L1].front();
1682 currState->delayed = false;
1683 // if there's a stage2 translation object we don't need it any more
1684 if (currState->stage2Tran) {
1685 delete currState->stage2Tran;
1686 currState->stage2Tran = NULL;
1687 }
1688
1689
1690 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1691 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1692
1693 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1694 doL1Descriptor();
1695
1696 stateQueues[L1].pop_front();
1697 // Check if fault was generated
1698 if (currState->fault != NoFault) {
1699 currState->transState->finish(currState->fault, currState->req,
1700 currState->tc, currState->mode);
951 currState->tc->getCpuPtr()->clockPeriod(), flag);
952 DPRINTF(TLBVerbose,
953 "Adding to walker fifo: queue size before adding: %d\n",
954 stateQueues[start_lookup_level].size());
955 stateQueues[start_lookup_level].push_back(currState);
956 currState = NULL;
957 } else if (!currState->functional) {
958 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
959 NULL, (uint8_t*) &currState->longDesc.data,
960 currState->tc->getCpuPtr()->clockPeriod(), flag);
961 doLongDescriptor();
962 f = currState->fault;
963 } else {
964 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
965 masterId);
966 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
967 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
968 port.sendFunctional(pkt);
969 doLongDescriptor();
970 delete req;
971 delete pkt;
972 f = currState->fault;
973 }
974
975 return f;
976}
977
978void
979TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
980 uint8_t texcb, bool s)
981{
982 // Note: tc and sctlr local variables are hiding tc and sctrl class
983 // variables
984 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
985 te.shareable = false; // default value
986 te.nonCacheable = false;
987 te.outerShareable = false;
988 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
989 switch(texcb) {
990 case 0: // Stongly-ordered
991 te.nonCacheable = true;
992 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
993 te.shareable = true;
994 te.innerAttrs = 1;
995 te.outerAttrs = 0;
996 break;
997 case 1: // Shareable Device
998 te.nonCacheable = true;
999 te.mtype = TlbEntry::MemoryType::Device;
1000 te.shareable = true;
1001 te.innerAttrs = 3;
1002 te.outerAttrs = 0;
1003 break;
1004 case 2: // Outer and Inner Write-Through, no Write-Allocate
1005 te.mtype = TlbEntry::MemoryType::Normal;
1006 te.shareable = s;
1007 te.innerAttrs = 6;
1008 te.outerAttrs = bits(texcb, 1, 0);
1009 break;
1010 case 3: // Outer and Inner Write-Back, no Write-Allocate
1011 te.mtype = TlbEntry::MemoryType::Normal;
1012 te.shareable = s;
1013 te.innerAttrs = 7;
1014 te.outerAttrs = bits(texcb, 1, 0);
1015 break;
1016 case 4: // Outer and Inner Non-cacheable
1017 te.nonCacheable = true;
1018 te.mtype = TlbEntry::MemoryType::Normal;
1019 te.shareable = s;
1020 te.innerAttrs = 0;
1021 te.outerAttrs = bits(texcb, 1, 0);
1022 break;
1023 case 5: // Reserved
1024 panic("Reserved texcb value!\n");
1025 break;
1026 case 6: // Implementation Defined
1027 panic("Implementation-defined texcb value!\n");
1028 break;
1029 case 7: // Outer and Inner Write-Back, Write-Allocate
1030 te.mtype = TlbEntry::MemoryType::Normal;
1031 te.shareable = s;
1032 te.innerAttrs = 5;
1033 te.outerAttrs = 1;
1034 break;
1035 case 8: // Non-shareable Device
1036 te.nonCacheable = true;
1037 te.mtype = TlbEntry::MemoryType::Device;
1038 te.shareable = false;
1039 te.innerAttrs = 3;
1040 te.outerAttrs = 0;
1041 break;
1042 case 9 ... 15: // Reserved
1043 panic("Reserved texcb value!\n");
1044 break;
1045 case 16 ... 31: // Cacheable Memory
1046 te.mtype = TlbEntry::MemoryType::Normal;
1047 te.shareable = s;
1048 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1049 te.nonCacheable = true;
1050 te.innerAttrs = bits(texcb, 1, 0);
1051 te.outerAttrs = bits(texcb, 3, 2);
1052 break;
1053 default:
1054 panic("More than 32 states for 5 bits?\n");
1055 }
1056 } else {
1057 assert(tc);
1058 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
1059 currState->tc, !currState->isSecure));
1060 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
1061 currState->tc, !currState->isSecure));
1062 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1063 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1064 switch(bits(texcb, 2,0)) {
1065 case 0:
1066 curr_tr = prrr.tr0;
1067 curr_ir = nmrr.ir0;
1068 curr_or = nmrr.or0;
1069 te.outerShareable = (prrr.nos0 == 0);
1070 break;
1071 case 1:
1072 curr_tr = prrr.tr1;
1073 curr_ir = nmrr.ir1;
1074 curr_or = nmrr.or1;
1075 te.outerShareable = (prrr.nos1 == 0);
1076 break;
1077 case 2:
1078 curr_tr = prrr.tr2;
1079 curr_ir = nmrr.ir2;
1080 curr_or = nmrr.or2;
1081 te.outerShareable = (prrr.nos2 == 0);
1082 break;
1083 case 3:
1084 curr_tr = prrr.tr3;
1085 curr_ir = nmrr.ir3;
1086 curr_or = nmrr.or3;
1087 te.outerShareable = (prrr.nos3 == 0);
1088 break;
1089 case 4:
1090 curr_tr = prrr.tr4;
1091 curr_ir = nmrr.ir4;
1092 curr_or = nmrr.or4;
1093 te.outerShareable = (prrr.nos4 == 0);
1094 break;
1095 case 5:
1096 curr_tr = prrr.tr5;
1097 curr_ir = nmrr.ir5;
1098 curr_or = nmrr.or5;
1099 te.outerShareable = (prrr.nos5 == 0);
1100 break;
1101 case 6:
1102 panic("Imp defined type\n");
1103 case 7:
1104 curr_tr = prrr.tr7;
1105 curr_ir = nmrr.ir7;
1106 curr_or = nmrr.or7;
1107 te.outerShareable = (prrr.nos7 == 0);
1108 break;
1109 }
1110
1111 switch(curr_tr) {
1112 case 0:
1113 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1114 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1115 te.nonCacheable = true;
1116 te.innerAttrs = 1;
1117 te.outerAttrs = 0;
1118 te.shareable = true;
1119 break;
1120 case 1:
1121 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1122 prrr.ds1, prrr.ds0, s);
1123 te.mtype = TlbEntry::MemoryType::Device;
1124 te.nonCacheable = true;
1125 te.innerAttrs = 3;
1126 te.outerAttrs = 0;
1127 if (prrr.ds1 && s)
1128 te.shareable = true;
1129 if (prrr.ds0 && !s)
1130 te.shareable = true;
1131 break;
1132 case 2:
1133 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1134 prrr.ns1, prrr.ns0, s);
1135 te.mtype = TlbEntry::MemoryType::Normal;
1136 if (prrr.ns1 && s)
1137 te.shareable = true;
1138 if (prrr.ns0 && !s)
1139 te.shareable = true;
1140 break;
1141 case 3:
1142 panic("Reserved type");
1143 }
1144
1145 if (te.mtype == TlbEntry::MemoryType::Normal){
1146 switch(curr_ir) {
1147 case 0:
1148 te.nonCacheable = true;
1149 te.innerAttrs = 0;
1150 break;
1151 case 1:
1152 te.innerAttrs = 5;
1153 break;
1154 case 2:
1155 te.innerAttrs = 6;
1156 break;
1157 case 3:
1158 te.innerAttrs = 7;
1159 break;
1160 }
1161
1162 switch(curr_or) {
1163 case 0:
1164 te.nonCacheable = true;
1165 te.outerAttrs = 0;
1166 break;
1167 case 1:
1168 te.outerAttrs = 1;
1169 break;
1170 case 2:
1171 te.outerAttrs = 2;
1172 break;
1173 case 3:
1174 te.outerAttrs = 3;
1175 break;
1176 }
1177 }
1178 }
1179 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1180 "outerAttrs: %d\n",
1181 te.shareable, te.innerAttrs, te.outerAttrs);
1182 te.setAttributes(false);
1183}
1184
1185void
1186TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1187 LongDescriptor &lDescriptor)
1188{
1189 assert(_haveLPAE);
1190
1191 uint8_t attr;
1192 uint8_t sh = lDescriptor.sh();
1193 // Different format and source of attributes if this is a stage 2
1194 // translation
1195 if (isStage2) {
1196 attr = lDescriptor.memAttr();
1197 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1198 uint8_t attr_1_0 = attr & 0x3;
1199
1200 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1201
1202 if (attr_3_2 == 0) {
1203 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1204 : TlbEntry::MemoryType::Device;
1205 te.outerAttrs = 0;
1206 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1207 te.nonCacheable = true;
1208 } else {
1209 te.mtype = TlbEntry::MemoryType::Normal;
1210 te.outerAttrs = attr_3_2 == 1 ? 0 :
1211 attr_3_2 == 2 ? 2 : 1;
1212 te.innerAttrs = attr_1_0 == 1 ? 0 :
1213 attr_1_0 == 2 ? 6 : 5;
1214 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1215 }
1216 } else {
1217 uint8_t attrIndx = lDescriptor.attrIndx();
1218
1219 // LPAE always uses remapping of memory attributes, irrespective of the
1220 // value of SCTLR.TRE
1221 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1222 int reg_as_int = flattenMiscRegNsBanked(reg, currState->tc,
1223 !currState->isSecure);
1224 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1225 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1226 uint8_t attr_7_4 = bits(attr, 7, 4);
1227 uint8_t attr_3_0 = bits(attr, 3, 0);
1228 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1229
1230 // Note: the memory subsystem only cares about the 'cacheable' memory
1231 // attribute. The other attributes are only used to fill the PAR register
1232 // accordingly to provide the illusion of full support
1233 te.nonCacheable = false;
1234
1235 switch (attr_7_4) {
1236 case 0x0:
1237 // Strongly-ordered or Device memory
1238 if (attr_3_0 == 0x0)
1239 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1240 else if (attr_3_0 == 0x4)
1241 te.mtype = TlbEntry::MemoryType::Device;
1242 else
1243 panic("Unpredictable behavior\n");
1244 te.nonCacheable = true;
1245 te.outerAttrs = 0;
1246 break;
1247 case 0x4:
1248 // Normal memory, Outer Non-cacheable
1249 te.mtype = TlbEntry::MemoryType::Normal;
1250 te.outerAttrs = 0;
1251 if (attr_3_0 == 0x4)
1252 // Inner Non-cacheable
1253 te.nonCacheable = true;
1254 else if (attr_3_0 < 0x8)
1255 panic("Unpredictable behavior\n");
1256 break;
1257 case 0x8:
1258 case 0x9:
1259 case 0xa:
1260 case 0xb:
1261 case 0xc:
1262 case 0xd:
1263 case 0xe:
1264 case 0xf:
1265 if (attr_7_4 & 0x4) {
1266 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1267 } else {
1268 te.outerAttrs = 0x2;
1269 }
1270 // Normal memory, Outer Cacheable
1271 te.mtype = TlbEntry::MemoryType::Normal;
1272 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1273 panic("Unpredictable behavior\n");
1274 break;
1275 default:
1276 panic("Unpredictable behavior\n");
1277 break;
1278 }
1279
1280 switch (attr_3_0) {
1281 case 0x0:
1282 te.innerAttrs = 0x1;
1283 break;
1284 case 0x4:
1285 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1286 break;
1287 case 0x8:
1288 case 0x9:
1289 case 0xA:
1290 case 0xB:
1291 te.innerAttrs = 6;
1292 break;
1293 case 0xC:
1294 case 0xD:
1295 case 0xE:
1296 case 0xF:
1297 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1298 break;
1299 default:
1300 panic("Unpredictable behavior\n");
1301 break;
1302 }
1303 }
1304
1305 te.outerShareable = sh == 2;
1306 te.shareable = (sh & 0x2) ? true : false;
1307 te.setAttributes(true);
1308 te.attributes |= (uint64_t) attr << 56;
1309}
1310
1311void
1312TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1313 uint8_t sh)
1314{
1315 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1316
1317 // Select MAIR
1318 uint64_t mair;
1319 switch (currState->el) {
1320 case EL0:
1321 case EL1:
1322 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1323 break;
1324 case EL2:
1325 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1326 break;
1327 case EL3:
1328 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1329 break;
1330 default:
1331 panic("Invalid exception level");
1332 break;
1333 }
1334
1335 // Select attributes
1336 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1337 uint8_t attr_lo = bits(attr, 3, 0);
1338 uint8_t attr_hi = bits(attr, 7, 4);
1339
1340 // Memory type
1341 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1342
1343 // Cacheability
1344 te.nonCacheable = false;
1345 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1346 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1347 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1348 te.nonCacheable = true;
1349 }
1350
1351 te.shareable = sh == 2;
1352 te.outerShareable = (sh & 0x2) ? true : false;
1353 // Attributes formatted according to the 64-bit PAR
1354 te.attributes = ((uint64_t) attr << 56) |
1355 (1 << 11) | // LPAE bit
1356 (te.ns << 9) | // NS bit
1357 (sh << 7);
1358}
1359
1360void
1361TableWalker::doL1Descriptor()
1362{
1363 if (currState->fault != NoFault) {
1364 return;
1365 }
1366
1367 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1368 currState->vaddr_tainted, currState->l1Desc.data);
1369 TlbEntry te;
1370
1371 switch (currState->l1Desc.type()) {
1372 case L1Descriptor::Ignore:
1373 case L1Descriptor::Reserved:
1374 if (!currState->timing) {
1375 currState->tc = NULL;
1376 currState->req = NULL;
1377 }
1378 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1379 if (currState->isFetch)
1380 currState->fault =
1381 std::make_shared<PrefetchAbort>(
1382 currState->vaddr_tainted,
1383 ArmFault::TranslationLL + L1,
1384 isStage2,
1385 ArmFault::VmsaTran);
1386 else
1387 currState->fault =
1388 std::make_shared<DataAbort>(
1389 currState->vaddr_tainted,
1390 TlbEntry::DomainType::NoAccess,
1391 currState->isWrite,
1392 ArmFault::TranslationLL + L1, isStage2,
1393 ArmFault::VmsaTran);
1394 return;
1395 case L1Descriptor::Section:
1396 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1397 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1398 * enabled if set, do l1.Desc.setAp0() instead of generating
1399 * AccessFlag0
1400 */
1401
1402 currState->fault = std::make_shared<DataAbort>(
1403 currState->vaddr_tainted,
1404 currState->l1Desc.domain(),
1405 currState->isWrite,
1406 ArmFault::AccessFlagLL + L1,
1407 isStage2,
1408 ArmFault::VmsaTran);
1409 }
1410 if (currState->l1Desc.supersection()) {
1411 panic("Haven't implemented supersections\n");
1412 }
1413 insertTableEntry(currState->l1Desc, false);
1414 return;
1415 case L1Descriptor::PageTable:
1416 {
1417 Addr l2desc_addr;
1418 l2desc_addr = currState->l1Desc.l2Addr() |
1419 (bits(currState->vaddr, 19, 12) << 2);
1420 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1421 l2desc_addr, currState->isSecure ? "s" : "ns");
1422
1423 // Trickbox address check
1424 currState->fault = tlb->walkTrickBoxCheck(
1425 l2desc_addr, currState->isSecure, currState->vaddr,
1426 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1427 currState->l1Desc.domain(), L2);
1428
1429 if (currState->fault) {
1430 if (!currState->timing) {
1431 currState->tc = NULL;
1432 currState->req = NULL;
1433 }
1434 return;
1435 }
1436
1437 Request::Flags flag = 0;
1438 if (currState->isSecure)
1439 flag.set(Request::SECURE);
1440
1441 bool delayed;
1442 delayed = fetchDescriptor(l2desc_addr,
1443 (uint8_t*)&currState->l2Desc.data,
1444 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1445 &TableWalker::doL2Descriptor);
1446 if (delayed) {
1447 currState->delayed = true;
1448 }
1449
1450 return;
1451 }
1452 default:
1453 panic("A new type in a 2 bit field?\n");
1454 }
1455}
1456
1457void
1458TableWalker::doLongDescriptor()
1459{
1460 if (currState->fault != NoFault) {
1461 return;
1462 }
1463
1464 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1465 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1466 currState->longDesc.data,
1467 currState->aarch64 ? "AArch64" : "long-desc.");
1468
1469 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1470 (currState->longDesc.type() == LongDescriptor::Page)) {
1471 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1472 "xn: %d, ap: %d, af: %d, type: %d\n",
1473 currState->longDesc.lookupLevel,
1474 currState->longDesc.data,
1475 currState->longDesc.pxn(),
1476 currState->longDesc.xn(),
1477 currState->longDesc.ap(),
1478 currState->longDesc.af(),
1479 currState->longDesc.type());
1480 } else {
1481 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1482 currState->longDesc.lookupLevel,
1483 currState->longDesc.data,
1484 currState->longDesc.type());
1485 }
1486
1487 TlbEntry te;
1488
1489 switch (currState->longDesc.type()) {
1490 case LongDescriptor::Invalid:
1491 if (!currState->timing) {
1492 currState->tc = NULL;
1493 currState->req = NULL;
1494 }
1495
1496 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1497 currState->longDesc.lookupLevel,
1498 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1499 if (currState->isFetch)
1500 currState->fault = std::make_shared<PrefetchAbort>(
1501 currState->vaddr_tainted,
1502 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1503 isStage2,
1504 ArmFault::LpaeTran);
1505 else
1506 currState->fault = std::make_shared<DataAbort>(
1507 currState->vaddr_tainted,
1508 TlbEntry::DomainType::NoAccess,
1509 currState->isWrite,
1510 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1511 isStage2,
1512 ArmFault::LpaeTran);
1513 return;
1514 case LongDescriptor::Block:
1515 case LongDescriptor::Page:
1516 {
1517 bool fault = false;
1518 bool aff = false;
1519 // Check for address size fault
1520 if (checkAddrSizeFaultAArch64(
1521 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1522 currState->longDesc.offsetBits()),
1523 currState->physAddrRange)) {
1524 fault = true;
1525 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1526 currState->longDesc.lookupLevel);
1527 // Check for access fault
1528 } else if (currState->longDesc.af() == 0) {
1529 fault = true;
1530 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1531 currState->longDesc.lookupLevel);
1532 aff = true;
1533 }
1534 if (fault) {
1535 if (currState->isFetch)
1536 currState->fault = std::make_shared<PrefetchAbort>(
1537 currState->vaddr_tainted,
1538 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1539 currState->longDesc.lookupLevel,
1540 isStage2,
1541 ArmFault::LpaeTran);
1542 else
1543 currState->fault = std::make_shared<DataAbort>(
1544 currState->vaddr_tainted,
1545 TlbEntry::DomainType::NoAccess, currState->isWrite,
1546 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1547 currState->longDesc.lookupLevel,
1548 isStage2,
1549 ArmFault::LpaeTran);
1550 } else {
1551 insertTableEntry(currState->longDesc, true);
1552 }
1553 }
1554 return;
1555 case LongDescriptor::Table:
1556 {
1557 // Set hierarchical permission flags
1558 currState->secureLookup = currState->secureLookup &&
1559 currState->longDesc.secureTable();
1560 currState->rwTable = currState->rwTable &&
1561 currState->longDesc.rwTable();
1562 currState->userTable = currState->userTable &&
1563 currState->longDesc.userTable();
1564 currState->xnTable = currState->xnTable ||
1565 currState->longDesc.xnTable();
1566 currState->pxnTable = currState->pxnTable ||
1567 currState->longDesc.pxnTable();
1568
1569 // Set up next level lookup
1570 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1571 currState->vaddr);
1572
1573 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1574 currState->longDesc.lookupLevel,
1575 currState->longDesc.lookupLevel + 1,
1576 next_desc_addr,
1577 currState->secureLookup ? "s" : "ns");
1578
1579 // Check for address size fault
1580 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1581 next_desc_addr, currState->physAddrRange)) {
1582 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1583 currState->longDesc.lookupLevel);
1584 if (currState->isFetch)
1585 currState->fault = std::make_shared<PrefetchAbort>(
1586 currState->vaddr_tainted,
1587 ArmFault::AddressSizeLL
1588 + currState->longDesc.lookupLevel,
1589 isStage2,
1590 ArmFault::LpaeTran);
1591 else
1592 currState->fault = std::make_shared<DataAbort>(
1593 currState->vaddr_tainted,
1594 TlbEntry::DomainType::NoAccess, currState->isWrite,
1595 ArmFault::AddressSizeLL
1596 + currState->longDesc.lookupLevel,
1597 isStage2,
1598 ArmFault::LpaeTran);
1599 return;
1600 }
1601
1602 // Trickbox address check
1603 currState->fault = tlb->walkTrickBoxCheck(
1604 next_desc_addr, currState->vaddr,
1605 currState->vaddr, sizeof(uint64_t),
1606 currState->isFetch, currState->isWrite,
1607 TlbEntry::DomainType::Client,
1608 toLookupLevel(currState->longDesc.lookupLevel +1));
1609
1610 if (currState->fault) {
1611 if (!currState->timing) {
1612 currState->tc = NULL;
1613 currState->req = NULL;
1614 }
1615 return;
1616 }
1617
1618 Request::Flags flag = 0;
1619 if (currState->secureLookup)
1620 flag.set(Request::SECURE);
1621
1622 currState->longDesc.lookupLevel =
1623 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1624 Event *event = NULL;
1625 switch (currState->longDesc.lookupLevel) {
1626 case L1:
1627 assert(currState->aarch64);
1628 event = &doL1LongDescEvent;
1629 break;
1630 case L2:
1631 event = &doL2LongDescEvent;
1632 break;
1633 case L3:
1634 event = &doL3LongDescEvent;
1635 break;
1636 default:
1637 panic("Wrong lookup level in table walk\n");
1638 break;
1639 }
1640
1641 bool delayed;
1642 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1643 sizeof(uint64_t), flag, -1, event,
1644 &TableWalker::doLongDescriptor);
1645 if (delayed) {
1646 currState->delayed = true;
1647 }
1648 }
1649 return;
1650 default:
1651 panic("A new type in a 2 bit field?\n");
1652 }
1653}
1654
1655void
1656TableWalker::doL2Descriptor()
1657{
1658 if (currState->fault != NoFault) {
1659 return;
1660 }
1661
1662 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1663 currState->vaddr_tainted, currState->l2Desc.data);
1664 TlbEntry te;
1665
1666 if (currState->l2Desc.invalid()) {
1667 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1668 if (!currState->timing) {
1669 currState->tc = NULL;
1670 currState->req = NULL;
1671 }
1672 if (currState->isFetch)
1673 currState->fault = std::make_shared<PrefetchAbort>(
1674 currState->vaddr_tainted,
1675 ArmFault::TranslationLL + L2,
1676 isStage2,
1677 ArmFault::VmsaTran);
1678 else
1679 currState->fault = std::make_shared<DataAbort>(
1680 currState->vaddr_tainted, currState->l1Desc.domain(),
1681 currState->isWrite, ArmFault::TranslationLL + L2,
1682 isStage2,
1683 ArmFault::VmsaTran);
1684 return;
1685 }
1686
1687 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1688 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1689 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1690 */
1691 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1692 currState->sctlr.afe, currState->l2Desc.ap());
1693
1694 currState->fault = std::make_shared<DataAbort>(
1695 currState->vaddr_tainted,
1696 TlbEntry::DomainType::NoAccess, currState->isWrite,
1697 ArmFault::AccessFlagLL + L2, isStage2,
1698 ArmFault::VmsaTran);
1699 }
1700
1701 insertTableEntry(currState->l2Desc, false);
1702}
1703
1704void
1705TableWalker::doL1DescriptorWrapper()
1706{
1707 currState = stateQueues[L1].front();
1708 currState->delayed = false;
1709 // if there's a stage2 translation object we don't need it any more
1710 if (currState->stage2Tran) {
1711 delete currState->stage2Tran;
1712 currState->stage2Tran = NULL;
1713 }
1714
1715
1716 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1717 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1718
1719 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1720 doL1Descriptor();
1721
1722 stateQueues[L1].pop_front();
1723 // Check if fault was generated
1724 if (currState->fault != NoFault) {
1725 currState->transState->finish(currState->fault, currState->req,
1726 currState->tc, currState->mode);
1727 statWalksShortTerminatedAtLevel[0]++;
1701
1702 pending = false;
1703 nextWalk(currState->tc);
1704
1705 currState->req = NULL;
1706 currState->tc = NULL;
1707 currState->delayed = false;
1708 delete currState;
1709 }
1710 else if (!currState->delayed) {
1711 // delay is not set so there is no L2 to do
1712 // Don't finish the translation if a stage 2 look up is underway
1713 if (!currState->doingStage2) {
1728
1729 pending = false;
1730 nextWalk(currState->tc);
1731
1732 currState->req = NULL;
1733 currState->tc = NULL;
1734 currState->delayed = false;
1735 delete currState;
1736 }
1737 else if (!currState->delayed) {
1738 // delay is not set so there is no L2 to do
1739 // Don't finish the translation if a stage 2 look up is underway
1740 if (!currState->doingStage2) {
1741 statWalkServiceTime.sample(curTick() - currState->startTime);
1714 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1715 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1716 currState->transState, currState->mode);
1742 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1743 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1744 currState->transState, currState->mode);
1745 statWalksShortTerminatedAtLevel[0]++;
1717 }
1718
1719 pending = false;
1720 nextWalk(currState->tc);
1721
1722 currState->req = NULL;
1723 currState->tc = NULL;
1724 currState->delayed = false;
1725 delete currState;
1726 } else {
1727 // need to do L2 descriptor
1728 stateQueues[L2].push_back(currState);
1729 }
1730 currState = NULL;
1731}
1732
1733void
1734TableWalker::doL2DescriptorWrapper()
1735{
1736 currState = stateQueues[L2].front();
1737 assert(currState->delayed);
1738 // if there's a stage2 translation object we don't need it any more
1739 if (currState->stage2Tran) {
1740 delete currState->stage2Tran;
1741 currState->stage2Tran = NULL;
1742 }
1743
1744 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1745 currState->vaddr_tainted);
1746 doL2Descriptor();
1747
1748 // Check if fault was generated
1749 if (currState->fault != NoFault) {
1750 currState->transState->finish(currState->fault, currState->req,
1751 currState->tc, currState->mode);
1746 }
1747
1748 pending = false;
1749 nextWalk(currState->tc);
1750
1751 currState->req = NULL;
1752 currState->tc = NULL;
1753 currState->delayed = false;
1754 delete currState;
1755 } else {
1756 // need to do L2 descriptor
1757 stateQueues[L2].push_back(currState);
1758 }
1759 currState = NULL;
1760}
1761
1762void
1763TableWalker::doL2DescriptorWrapper()
1764{
1765 currState = stateQueues[L2].front();
1766 assert(currState->delayed);
1767 // if there's a stage2 translation object we don't need it any more
1768 if (currState->stage2Tran) {
1769 delete currState->stage2Tran;
1770 currState->stage2Tran = NULL;
1771 }
1772
1773 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1774 currState->vaddr_tainted);
1775 doL2Descriptor();
1776
1777 // Check if fault was generated
1778 if (currState->fault != NoFault) {
1779 currState->transState->finish(currState->fault, currState->req,
1780 currState->tc, currState->mode);
1781 statWalksShortTerminatedAtLevel[1]++;
1752 }
1753 else {
1754 // Don't finish the translation if a stage 2 look up is underway
1755 if (!currState->doingStage2) {
1782 }
1783 else {
1784 // Don't finish the translation if a stage 2 look up is underway
1785 if (!currState->doingStage2) {
1786 statWalkServiceTime.sample(curTick() - currState->startTime);
1756 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1757 currState->fault = tlb->translateTiming(currState->req,
1758 currState->tc, currState->transState, currState->mode);
1787 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1788 currState->fault = tlb->translateTiming(currState->req,
1789 currState->tc, currState->transState, currState->mode);
1790 statWalksShortTerminatedAtLevel[1]++;
1759 }
1760 }
1761
1762
1763 stateQueues[L2].pop_front();
1764 pending = false;
1765 nextWalk(currState->tc);
1766
1767 currState->req = NULL;
1768 currState->tc = NULL;
1769 currState->delayed = false;
1770
1771 delete currState;
1772 currState = NULL;
1773}
1774
1775void
1776TableWalker::doL0LongDescriptorWrapper()
1777{
1778 doLongDescriptorWrapper(L0);
1779}
1780
1781void
1782TableWalker::doL1LongDescriptorWrapper()
1783{
1784 doLongDescriptorWrapper(L1);
1785}
1786
1787void
1788TableWalker::doL2LongDescriptorWrapper()
1789{
1790 doLongDescriptorWrapper(L2);
1791}
1792
1793void
1794TableWalker::doL3LongDescriptorWrapper()
1795{
1796 doLongDescriptorWrapper(L3);
1797}
1798
1799void
1800TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1801{
1802 currState = stateQueues[curr_lookup_level].front();
1803 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1804 currState->delayed = false;
1805
1806 // if there's a stage2 translation object we don't need it any more
1807 if (currState->stage2Tran) {
1808 delete currState->stage2Tran;
1809 currState->stage2Tran = NULL;
1810 }
1811
1812 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1813 currState->vaddr_tainted);
1814 doLongDescriptor();
1815
1816 stateQueues[curr_lookup_level].pop_front();
1817
1818 if (currState->fault != NoFault) {
1819 // A fault was generated
1820 currState->transState->finish(currState->fault, currState->req,
1821 currState->tc, currState->mode);
1822
1823 pending = false;
1824 nextWalk(currState->tc);
1825
1826 currState->req = NULL;
1827 currState->tc = NULL;
1828 currState->delayed = false;
1829 delete currState;
1830 } else if (!currState->delayed) {
1831 // No additional lookups required
1832 // Don't finish the translation if a stage 2 look up is underway
1833 if (!currState->doingStage2) {
1834 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1791 }
1792 }
1793
1794
1795 stateQueues[L2].pop_front();
1796 pending = false;
1797 nextWalk(currState->tc);
1798
1799 currState->req = NULL;
1800 currState->tc = NULL;
1801 currState->delayed = false;
1802
1803 delete currState;
1804 currState = NULL;
1805}
1806
1807void
1808TableWalker::doL0LongDescriptorWrapper()
1809{
1810 doLongDescriptorWrapper(L0);
1811}
1812
1813void
1814TableWalker::doL1LongDescriptorWrapper()
1815{
1816 doLongDescriptorWrapper(L1);
1817}
1818
1819void
1820TableWalker::doL2LongDescriptorWrapper()
1821{
1822 doLongDescriptorWrapper(L2);
1823}
1824
1825void
1826TableWalker::doL3LongDescriptorWrapper()
1827{
1828 doLongDescriptorWrapper(L3);
1829}
1830
1831void
1832TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1833{
1834 currState = stateQueues[curr_lookup_level].front();
1835 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1836 currState->delayed = false;
1837
1838 // if there's a stage2 translation object we don't need it any more
1839 if (currState->stage2Tran) {
1840 delete currState->stage2Tran;
1841 currState->stage2Tran = NULL;
1842 }
1843
1844 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1845 currState->vaddr_tainted);
1846 doLongDescriptor();
1847
1848 stateQueues[curr_lookup_level].pop_front();
1849
1850 if (currState->fault != NoFault) {
1851 // A fault was generated
1852 currState->transState->finish(currState->fault, currState->req,
1853 currState->tc, currState->mode);
1854
1855 pending = false;
1856 nextWalk(currState->tc);
1857
1858 currState->req = NULL;
1859 currState->tc = NULL;
1860 currState->delayed = false;
1861 delete currState;
1862 } else if (!currState->delayed) {
1863 // No additional lookups required
1864 // Don't finish the translation if a stage 2 look up is underway
1865 if (!currState->doingStage2) {
1866 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1867 statWalkServiceTime.sample(curTick() - currState->startTime);
1835 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1836 currState->transState,
1837 currState->mode);
1868 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1869 currState->transState,
1870 currState->mode);
1871 statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1838 }
1839
1840 pending = false;
1841 nextWalk(currState->tc);
1842
1843 currState->req = NULL;
1844 currState->tc = NULL;
1845 currState->delayed = false;
1846 delete currState;
1847 } else {
1848 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1849 panic("Max. number of lookups already reached in table walk\n");
1850 // Need to perform additional lookups
1851 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1852 }
1853 currState = NULL;
1854}
1855
1856
1857void
1858TableWalker::nextWalk(ThreadContext *tc)
1859{
1860 if (pendingQueue.size())
1861 schedule(doProcessEvent, clockEdge(Cycles(1)));
1862 else
1863 completeDrain();
1864}
1865
1866bool
1867TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1868 Request::Flags flags, int queueIndex, Event *event,
1869 void (TableWalker::*doDescriptor)())
1870{
1871 bool isTiming = currState->timing;
1872
1873 // do the requests for the page table descriptors have to go through the
1874 // second stage MMU
1875 if (currState->stage2Req) {
1876 Fault fault;
1877 flags = flags | TLB::MustBeOne;
1878
1879 if (isTiming) {
1880 Stage2MMU::Stage2Translation *tran = new
1881 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1882 currState->vaddr);
1883 currState->stage2Tran = tran;
1884 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1885 flags, masterId);
1886 fault = tran->fault;
1887 } else {
1888 fault = stage2Mmu->readDataUntimed(currState->tc,
1889 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1890 currState->functional);
1891 }
1892
1893 if (fault != NoFault) {
1894 currState->fault = fault;
1895 }
1896 if (isTiming) {
1897 if (queueIndex >= 0) {
1898 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1899 stateQueues[queueIndex].size());
1900 stateQueues[queueIndex].push_back(currState);
1901 currState = NULL;
1902 }
1903 } else {
1904 (this->*doDescriptor)();
1905 }
1906 } else {
1907 if (isTiming) {
1908 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1872 }
1873
1874 pending = false;
1875 nextWalk(currState->tc);
1876
1877 currState->req = NULL;
1878 currState->tc = NULL;
1879 currState->delayed = false;
1880 delete currState;
1881 } else {
1882 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1883 panic("Max. number of lookups already reached in table walk\n");
1884 // Need to perform additional lookups
1885 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1886 }
1887 currState = NULL;
1888}
1889
1890
1891void
1892TableWalker::nextWalk(ThreadContext *tc)
1893{
1894 if (pendingQueue.size())
1895 schedule(doProcessEvent, clockEdge(Cycles(1)));
1896 else
1897 completeDrain();
1898}
1899
1900bool
1901TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1902 Request::Flags flags, int queueIndex, Event *event,
1903 void (TableWalker::*doDescriptor)())
1904{
1905 bool isTiming = currState->timing;
1906
1907 // do the requests for the page table descriptors have to go through the
1908 // second stage MMU
1909 if (currState->stage2Req) {
1910 Fault fault;
1911 flags = flags | TLB::MustBeOne;
1912
1913 if (isTiming) {
1914 Stage2MMU::Stage2Translation *tran = new
1915 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1916 currState->vaddr);
1917 currState->stage2Tran = tran;
1918 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1919 flags, masterId);
1920 fault = tran->fault;
1921 } else {
1922 fault = stage2Mmu->readDataUntimed(currState->tc,
1923 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1924 currState->functional);
1925 }
1926
1927 if (fault != NoFault) {
1928 currState->fault = fault;
1929 }
1930 if (isTiming) {
1931 if (queueIndex >= 0) {
1932 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1933 stateQueues[queueIndex].size());
1934 stateQueues[queueIndex].push_back(currState);
1935 currState = NULL;
1936 }
1937 } else {
1938 (this->*doDescriptor)();
1939 }
1940 } else {
1941 if (isTiming) {
1942 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1909 currState->tc->getCpuPtr()->clockPeriod(), flags);
1943 currState->tc->getCpuPtr()->clockPeriod(),flags);
1910 if (queueIndex >= 0) {
1911 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1912 stateQueues[queueIndex].size());
1913 stateQueues[queueIndex].push_back(currState);
1914 currState = NULL;
1915 }
1916 } else if (!currState->functional) {
1917 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1918 currState->tc->getCpuPtr()->clockPeriod(), flags);
1919 (this->*doDescriptor)();
1920 } else {
1921 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1922 req->taskId(ContextSwitchTaskId::DMA);
1923 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1924 pkt->dataStatic(data);
1925 port.sendFunctional(pkt);
1926 (this->*doDescriptor)();
1927 delete req;
1928 delete pkt;
1929 }
1930 }
1931 return (isTiming);
1932}
1933
1934void
1935TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1936{
1937 TlbEntry te;
1938
1939 // Create and fill a new page table entry
1940 te.valid = true;
1941 te.longDescFormat = longDescriptor;
1942 te.isHyp = currState->isHyp;
1943 te.asid = currState->asid;
1944 te.vmid = currState->vmid;
1945 te.N = descriptor.offsetBits();
1946 te.vpn = currState->vaddr >> te.N;
1947 te.size = (1<<te.N) - 1;
1948 te.pfn = descriptor.pfn();
1949 te.domain = descriptor.domain();
1950 te.lookupLevel = descriptor.lookupLevel;
1951 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1952 te.nstid = !currState->isSecure;
1953 te.xn = descriptor.xn();
1954 if (currState->aarch64)
1955 te.el = currState->el;
1956 else
1957 te.el = 1;
1958
1944 if (queueIndex >= 0) {
1945 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1946 stateQueues[queueIndex].size());
1947 stateQueues[queueIndex].push_back(currState);
1948 currState = NULL;
1949 }
1950 } else if (!currState->functional) {
1951 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1952 currState->tc->getCpuPtr()->clockPeriod(), flags);
1953 (this->*doDescriptor)();
1954 } else {
1955 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1956 req->taskId(ContextSwitchTaskId::DMA);
1957 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1958 pkt->dataStatic(data);
1959 port.sendFunctional(pkt);
1960 (this->*doDescriptor)();
1961 delete req;
1962 delete pkt;
1963 }
1964 }
1965 return (isTiming);
1966}
1967
1968void
1969TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1970{
1971 TlbEntry te;
1972
1973 // Create and fill a new page table entry
1974 te.valid = true;
1975 te.longDescFormat = longDescriptor;
1976 te.isHyp = currState->isHyp;
1977 te.asid = currState->asid;
1978 te.vmid = currState->vmid;
1979 te.N = descriptor.offsetBits();
1980 te.vpn = currState->vaddr >> te.N;
1981 te.size = (1<<te.N) - 1;
1982 te.pfn = descriptor.pfn();
1983 te.domain = descriptor.domain();
1984 te.lookupLevel = descriptor.lookupLevel;
1985 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1986 te.nstid = !currState->isSecure;
1987 te.xn = descriptor.xn();
1988 if (currState->aarch64)
1989 te.el = currState->el;
1990 else
1991 te.el = 1;
1992
1993 statPageSizes[pageSizeNtoStatBin(te.N)]++;
1994 statRequestOrigin[COMPLETED][currState->isFetch]++;
1995
1959 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1960 // as global
1961 te.global = descriptor.global(currState) || isStage2;
1962 if (longDescriptor) {
1963 LongDescriptor lDescriptor =
1964 dynamic_cast<LongDescriptor &>(descriptor);
1965
1966 te.xn |= currState->xnTable;
1967 te.pxn = currState->pxnTable || lDescriptor.pxn();
1968 if (isStage2) {
1969 // this is actually the HAP field, but its stored in the same bit
1970 // possitions as the AP field in a stage 1 translation.
1971 te.hap = lDescriptor.ap();
1972 } else {
1973 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1974 (currState->userTable && (descriptor.ap() & 0x1));
1975 }
1976 if (currState->aarch64)
1977 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1978 currState->longDesc.sh());
1979 else
1980 memAttrsLPAE(currState->tc, te, lDescriptor);
1981 } else {
1982 te.ap = descriptor.ap();
1983 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1984 descriptor.shareable());
1985 }
1986
1987 // Debug output
1988 DPRINTF(TLB, descriptor.dbgHeader().c_str());
1989 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1990 te.N, te.pfn, te.size, te.global, te.valid);
1991 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1992 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1993 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1994 te.nonCacheable, te.ns);
1995 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1996 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1997 descriptor.getRawData());
1998
1999 // Insert the entry into the TLB
2000 tlb->insert(currState->vaddr, te);
2001 if (!currState->timing) {
2002 currState->tc = NULL;
2003 currState->req = NULL;
2004 }
2005}
2006
2007ArmISA::TableWalker *
2008ArmTableWalkerParams::create()
2009{
2010 return new ArmISA::TableWalker(this);
2011}
2012
2013LookupLevel
2014TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2015{
2016 switch (lookup_level_as_int) {
2017 case L1:
2018 return L1;
2019 case L2:
2020 return L2;
2021 case L3:
2022 return L3;
2023 default:
2024 panic("Invalid lookup level conversion");
2025 }
2026}
1996 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1997 // as global
1998 te.global = descriptor.global(currState) || isStage2;
1999 if (longDescriptor) {
2000 LongDescriptor lDescriptor =
2001 dynamic_cast<LongDescriptor &>(descriptor);
2002
2003 te.xn |= currState->xnTable;
2004 te.pxn = currState->pxnTable || lDescriptor.pxn();
2005 if (isStage2) {
2006 // this is actually the HAP field, but its stored in the same bit
2007 // possitions as the AP field in a stage 1 translation.
2008 te.hap = lDescriptor.ap();
2009 } else {
2010 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2011 (currState->userTable && (descriptor.ap() & 0x1));
2012 }
2013 if (currState->aarch64)
2014 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
2015 currState->longDesc.sh());
2016 else
2017 memAttrsLPAE(currState->tc, te, lDescriptor);
2018 } else {
2019 te.ap = descriptor.ap();
2020 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2021 descriptor.shareable());
2022 }
2023
2024 // Debug output
2025 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2026 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2027 te.N, te.pfn, te.size, te.global, te.valid);
2028 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2029 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2030 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2031 te.nonCacheable, te.ns);
2032 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2033 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2034 descriptor.getRawData());
2035
2036 // Insert the entry into the TLB
2037 tlb->insert(currState->vaddr, te);
2038 if (!currState->timing) {
2039 currState->tc = NULL;
2040 currState->req = NULL;
2041 }
2042}
2043
2044ArmISA::TableWalker *
2045ArmTableWalkerParams::create()
2046{
2047 return new ArmISA::TableWalker(this);
2048}
2049
2050LookupLevel
2051TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2052{
2053 switch (lookup_level_as_int) {
2054 case L1:
2055 return L1;
2056 case L2:
2057 return L2;
2058 case L3:
2059 return L3;
2060 default:
2061 panic("Invalid lookup level conversion");
2062 }
2063}
2064
2065/* this method keeps track of the table walker queue's residency, so
2066 * needs to be called whenever requests start and complete. */
2067void
2068TableWalker::pendingChange()
2069{
2070 unsigned n = pendingQueue.size();
2071 if ((currState != NULL) && (currState != pendingQueue.front())) {
2072 ++n;
2073 }
2074
2075 if (n != pendingReqs) {
2076 Tick now = curTick();
2077 statPendingWalks.sample(pendingReqs, now - pendingChangeTick);
2078 pendingReqs = n;
2079 pendingChangeTick = now;
2080 }
2081}
2082
2083uint8_t
2084TableWalker::pageSizeNtoStatBin(uint8_t N)
2085{
2086 /* for statPageSizes */
2087 switch(N) {
2088 case 12: return 0; // 4K
2089 case 14: return 1; // 16K (using 16K granule in v8-64)
2090 case 16: return 2; // 64K
2091 case 20: return 3; // 1M
2092 case 21: return 4; // 2M-LPAE
2093 case 24: return 5; // 16M
2094 case 25: return 6; // 32M (using 16K granule in v8-64)
2095 case 29: return 7; // 512M (using 64K granule in v8-64)
2096 case 30: return 8; // 1G-LPAE
2097 default:
2098 panic("unknown page size");
2099 return 255;
2100 }
2101}
2102
2103void
2104TableWalker::regStats()
2105{
2106 statWalks
2107 .name(name() + ".walks")
2108 .desc("Table walker walks requested")
2109 ;
2110
2111 statWalksShortDescriptor
2112 .name(name() + ".walksShort")
2113 .desc("Table walker walks initiated with short descriptors")
2114 .flags(Stats::nozero)
2115 ;
2116
2117 statWalksLongDescriptor
2118 .name(name() + ".walksLong")
2119 .desc("Table walker walks initiated with long descriptors")
2120 .flags(Stats::nozero)
2121 ;
2122
2123 statWalksShortTerminatedAtLevel
2124 .init(2)
2125 .name(name() + ".walksShortTerminationLevel")
2126 .desc("Level at which table walker walks "
2127 "with short descriptors terminate")
2128 .flags(Stats::nozero)
2129 ;
2130 statWalksShortTerminatedAtLevel.subname(0, "Level1");
2131 statWalksShortTerminatedAtLevel.subname(1, "Level2");
2132
2133 statWalksLongTerminatedAtLevel
2134 .init(4)
2135 .name(name() + ".walksLongTerminationLevel")
2136 .desc("Level at which table walker walks "
2137 "with long descriptors terminate")
2138 .flags(Stats::nozero)
2139 ;
2140 statWalksLongTerminatedAtLevel.subname(0, "Level0");
2141 statWalksLongTerminatedAtLevel.subname(1, "Level1");
2142 statWalksLongTerminatedAtLevel.subname(2, "Level2");
2143 statWalksLongTerminatedAtLevel.subname(3, "Level3");
2144
2145 statSquashedBefore
2146 .name(name() + ".walksSquashedBefore")
2147 .desc("Table walks squashed before starting")
2148 .flags(Stats::nozero)
2149 ;
2150
2151 statSquashedAfter
2152 .name(name() + ".walksSquashedAfter")
2153 .desc("Table walks squashed after completion")
2154 .flags(Stats::nozero)
2155 ;
2156
2157 statWalkWaitTime
2158 .init(16)
2159 .name(name() + ".walkWaitTime")
2160 .desc("Table walker wait (enqueue to first request) latency")
2161 .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2162 ;
2163
2164 statWalkServiceTime
2165 .init(16)
2166 .name(name() + ".walkCompletionTime")
2167 .desc("Table walker service (enqueue to completion) latency")
2168 .flags(Stats::pdf | Stats::nozero | Stats::nonan)
2169 ;
2170
2171 statPendingWalks
2172 .init(16)
2173 .name(name() + ".walksPending")
2174 .desc("Table walker pending requests distribution")
2175 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan)
2176 ;
2177
2178 statPageSizes // see DDI 0487A D4-1661
2179 .init(9)
2180 .name(name() + ".walkPageSizes")
2181 .desc("Table walker page sizes translated")
2182 .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero)
2183 ;
2184 statPageSizes.subname(0, "4K");
2185 statPageSizes.subname(1, "16K");
2186 statPageSizes.subname(2, "64K");
2187 statPageSizes.subname(3, "1M");
2188 statPageSizes.subname(4, "2M");
2189 statPageSizes.subname(5, "16M");
2190 statPageSizes.subname(6, "32M");
2191 statPageSizes.subname(7, "512M");
2192 statPageSizes.subname(8, "1G");
2193
2194 statRequestOrigin
2195 .init(2,2) // Instruction/Data, requests/completed
2196 .name(name() + ".walkRequestOrigin")
2197 .desc("Table walker requests started/completed, data/inst")
2198 .flags(Stats::total)
2199 ;
2200 statRequestOrigin.subname(0,"Requested");
2201 statRequestOrigin.subname(1,"Completed");
2202 statRequestOrigin.ysubname(0,"Data");
2203 statRequestOrigin.ysubname(1,"Inst");
2204}