table_walker.cc (10109:b58c5c5854de) table_walker.cc (10324:f40134eb3f85)
1/*
1/*
2 * Copyright (c) 2010, 2012-2013 ARM Limited
2 * Copyright (c) 2010, 2012-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include "arch/arm/faults.hh"
42#include "arch/arm/stage2_mmu.hh"
43#include "arch/arm/system.hh"
44#include "arch/arm/table_walker.hh"
45#include "arch/arm/tlb.hh"
46#include "cpu/base.hh"
47#include "cpu/thread_context.hh"
48#include "debug/Checkpoint.hh"
49#include "debug/Drain.hh"
50#include "debug/TLB.hh"
51#include "debug/TLBVerbose.hh"
52#include "sim/system.hh"
53
54using namespace ArmISA;
55
56TableWalker::TableWalker(const Params *p)
57 : MemObject(p), port(this, p->sys), drainManager(NULL),
58 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
59 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
60 numSquashable(p->num_squash_per_cycle),
61 doL1DescEvent(this), doL2DescEvent(this),
62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
63 doL3LongDescEvent(this),
64 doProcessEvent(this)
65{
66 sctlr = 0;
67
68 // Cache system-level properties
69 if (FullSystem) {
70 armSys = dynamic_cast<ArmSystem *>(p->sys);
71 assert(armSys);
72 haveSecurity = armSys->haveSecurity();
73 _haveLPAE = armSys->haveLPAE();
74 _haveVirtualization = armSys->haveVirtualization();
75 physAddrRange = armSys->physAddrRange();
76 _haveLargeAsid64 = armSys->haveLargeAsid64();
77 } else {
78 armSys = NULL;
79 haveSecurity = _haveLPAE = _haveVirtualization = false;
80 _haveLargeAsid64 = false;
81 physAddrRange = 32;
82 }
83
84}
85
86TableWalker::~TableWalker()
87{
88 ;
89}
90
91TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc)
92{
93}
94
95void
96TableWalker::completeDrain()
97{
98 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
99 pendingQueue.empty()) {
100 setDrainState(Drainable::Drained);
101 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
102 drainManager->signalDrainDone();
103 drainManager = NULL;
104 }
105}
106
107unsigned int
108TableWalker::drain(DrainManager *dm)
109{
110 unsigned int count = port.drain(dm);
111
112 bool state_queues_not_empty = false;
113
114 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
115 if (!stateQueues[i].empty()) {
116 state_queues_not_empty = true;
117 break;
118 }
119 }
120
121 if (state_queues_not_empty || pendingQueue.size()) {
122 drainManager = dm;
123 setDrainState(Drainable::Draining);
124 DPRINTF(Drain, "TableWalker not drained\n");
125
126 // return port drain count plus the table walker itself needs to drain
127 return count + 1;
128 } else {
129 setDrainState(Drainable::Drained);
130 DPRINTF(Drain, "TableWalker free, no need to drain\n");
131
132 // table walker is drained, but its ports may still need to be drained
133 return count;
134 }
135}
136
137void
138TableWalker::drainResume()
139{
140 Drainable::drainResume();
141 if (params()->sys->isTimingMode() && currState) {
142 delete currState;
143 currState = NULL;
144 }
145}
146
147BaseMasterPort&
148TableWalker::getMasterPort(const std::string &if_name, PortID idx)
149{
150 if (if_name == "port") {
151 return port;
152 }
153 return MemObject::getMasterPort(if_name, idx);
154}
155
156Fault
157TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
158 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
159 TLB::Translation *_trans, bool _timing, bool _functional,
160 bool secure, TLB::ArmTranslationType tranType)
161{
162 assert(!(_functional && _timing));
163 WalkerState *savedCurrState = NULL;
164
165 if (!currState && !_functional) {
166 // For atomic mode, a new WalkerState instance should be only created
167 // once per TLB. For timing mode, a new instance is generated for every
168 // TLB miss.
169 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
170
171 currState = new WalkerState();
172 currState->tableWalker = this;
173 } else if (_functional) {
174 // If we are mixing functional mode with timing (or even
175 // atomic), we need to to be careful and clean up after
176 // ourselves to not risk getting into an inconsistent state.
177 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
178 savedCurrState = currState;
179 currState = new WalkerState();
180 currState->tableWalker = this;
181 } else if (_timing) {
182 // This is a translation that was completed and then faulted again
183 // because some underlying parameters that affect the translation
184 // changed out from under us (e.g. asid). It will either be a
185 // misprediction, in which case nothing will happen or we'll use
186 // this fault to re-execute the faulting instruction which should clean
187 // up everything.
188 if (currState->vaddr_tainted == _req->getVaddr()) {
189 return new ReExec;
190 }
191 }
192
193 currState->tc = _tc;
194 currState->aarch64 = opModeIs64(currOpMode(_tc));
195 currState->el = currEL(_tc);
196 currState->transState = _trans;
197 currState->req = _req;
198 currState->fault = NoFault;
199 currState->asid = _asid;
200 currState->vmid = _vmid;
201 currState->isHyp = _isHyp;
202 currState->timing = _timing;
203 currState->functional = _functional;
204 currState->mode = _mode;
205 currState->tranType = tranType;
206 currState->isSecure = secure;
207 currState->physAddrRange = physAddrRange;
208
209 /** @todo These should be cached or grabbed from cached copies in
210 the TLB, all these miscreg reads are expensive */
211 currState->vaddr_tainted = currState->req->getVaddr();
212 if (currState->aarch64)
213 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
214 currState->tc, currState->el);
215 else
216 currState->vaddr = currState->vaddr_tainted;
217
218 if (currState->aarch64) {
219 switch (currState->el) {
220 case EL0:
221 case EL1:
222 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include "arch/arm/faults.hh"
42#include "arch/arm/stage2_mmu.hh"
43#include "arch/arm/system.hh"
44#include "arch/arm/table_walker.hh"
45#include "arch/arm/tlb.hh"
46#include "cpu/base.hh"
47#include "cpu/thread_context.hh"
48#include "debug/Checkpoint.hh"
49#include "debug/Drain.hh"
50#include "debug/TLB.hh"
51#include "debug/TLBVerbose.hh"
52#include "sim/system.hh"
53
54using namespace ArmISA;
55
56TableWalker::TableWalker(const Params *p)
57 : MemObject(p), port(this, p->sys), drainManager(NULL),
58 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
59 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
60 numSquashable(p->num_squash_per_cycle),
61 doL1DescEvent(this), doL2DescEvent(this),
62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
63 doL3LongDescEvent(this),
64 doProcessEvent(this)
65{
66 sctlr = 0;
67
68 // Cache system-level properties
69 if (FullSystem) {
70 armSys = dynamic_cast<ArmSystem *>(p->sys);
71 assert(armSys);
72 haveSecurity = armSys->haveSecurity();
73 _haveLPAE = armSys->haveLPAE();
74 _haveVirtualization = armSys->haveVirtualization();
75 physAddrRange = armSys->physAddrRange();
76 _haveLargeAsid64 = armSys->haveLargeAsid64();
77 } else {
78 armSys = NULL;
79 haveSecurity = _haveLPAE = _haveVirtualization = false;
80 _haveLargeAsid64 = false;
81 physAddrRange = 32;
82 }
83
84}
85
86TableWalker::~TableWalker()
87{
88 ;
89}
90
91TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc)
92{
93}
94
95void
96TableWalker::completeDrain()
97{
98 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
99 pendingQueue.empty()) {
100 setDrainState(Drainable::Drained);
101 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
102 drainManager->signalDrainDone();
103 drainManager = NULL;
104 }
105}
106
107unsigned int
108TableWalker::drain(DrainManager *dm)
109{
110 unsigned int count = port.drain(dm);
111
112 bool state_queues_not_empty = false;
113
114 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
115 if (!stateQueues[i].empty()) {
116 state_queues_not_empty = true;
117 break;
118 }
119 }
120
121 if (state_queues_not_empty || pendingQueue.size()) {
122 drainManager = dm;
123 setDrainState(Drainable::Draining);
124 DPRINTF(Drain, "TableWalker not drained\n");
125
126 // return port drain count plus the table walker itself needs to drain
127 return count + 1;
128 } else {
129 setDrainState(Drainable::Drained);
130 DPRINTF(Drain, "TableWalker free, no need to drain\n");
131
132 // table walker is drained, but its ports may still need to be drained
133 return count;
134 }
135}
136
137void
138TableWalker::drainResume()
139{
140 Drainable::drainResume();
141 if (params()->sys->isTimingMode() && currState) {
142 delete currState;
143 currState = NULL;
144 }
145}
146
147BaseMasterPort&
148TableWalker::getMasterPort(const std::string &if_name, PortID idx)
149{
150 if (if_name == "port") {
151 return port;
152 }
153 return MemObject::getMasterPort(if_name, idx);
154}
155
156Fault
157TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
158 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
159 TLB::Translation *_trans, bool _timing, bool _functional,
160 bool secure, TLB::ArmTranslationType tranType)
161{
162 assert(!(_functional && _timing));
163 WalkerState *savedCurrState = NULL;
164
165 if (!currState && !_functional) {
166 // For atomic mode, a new WalkerState instance should be only created
167 // once per TLB. For timing mode, a new instance is generated for every
168 // TLB miss.
169 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
170
171 currState = new WalkerState();
172 currState->tableWalker = this;
173 } else if (_functional) {
174 // If we are mixing functional mode with timing (or even
175 // atomic), we need to to be careful and clean up after
176 // ourselves to not risk getting into an inconsistent state.
177 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
178 savedCurrState = currState;
179 currState = new WalkerState();
180 currState->tableWalker = this;
181 } else if (_timing) {
182 // This is a translation that was completed and then faulted again
183 // because some underlying parameters that affect the translation
184 // changed out from under us (e.g. asid). It will either be a
185 // misprediction, in which case nothing will happen or we'll use
186 // this fault to re-execute the faulting instruction which should clean
187 // up everything.
188 if (currState->vaddr_tainted == _req->getVaddr()) {
189 return new ReExec;
190 }
191 }
192
193 currState->tc = _tc;
194 currState->aarch64 = opModeIs64(currOpMode(_tc));
195 currState->el = currEL(_tc);
196 currState->transState = _trans;
197 currState->req = _req;
198 currState->fault = NoFault;
199 currState->asid = _asid;
200 currState->vmid = _vmid;
201 currState->isHyp = _isHyp;
202 currState->timing = _timing;
203 currState->functional = _functional;
204 currState->mode = _mode;
205 currState->tranType = tranType;
206 currState->isSecure = secure;
207 currState->physAddrRange = physAddrRange;
208
209 /** @todo These should be cached or grabbed from cached copies in
210 the TLB, all these miscreg reads are expensive */
211 currState->vaddr_tainted = currState->req->getVaddr();
212 if (currState->aarch64)
213 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
214 currState->tc, currState->el);
215 else
216 currState->vaddr = currState->vaddr_tainted;
217
218 if (currState->aarch64) {
219 switch (currState->el) {
220 case EL0:
221 case EL1:
222 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
223 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
223 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
224 break;
225 // @todo: uncomment this to enable Virtualization
226 // case EL2:
227 // assert(haveVirtualization);
228 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
224 break;
225 // @todo: uncomment this to enable Virtualization
226 // case EL2:
227 // assert(haveVirtualization);
228 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
229 // currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
229 // currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
230 // break;
231 case EL3:
232 assert(haveSecurity);
233 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
230 // break;
231 case EL3:
232 assert(haveSecurity);
233 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
234 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
234 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
235 break;
236 default:
237 panic("Invalid exception level");
238 break;
239 }
240 } else {
241 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
242 MISCREG_SCTLR, currState->tc, !currState->isSecure));
243 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
244 MISCREG_TTBCR, currState->tc, !currState->isSecure));
245 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
246 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
247 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
248 }
249 sctlr = currState->sctlr;
250
251 currState->isFetch = (currState->mode == TLB::Execute);
252 currState->isWrite = (currState->mode == TLB::Write);
253
254 // We only do a second stage of translation if we're not secure, or in
255 // hyp mode, the second stage MMU is enabled, and this table walker
256 // instance is the first stage.
257 currState->doingStage2 = false;
258 // @todo: for now disable this in AArch64 (HCR is not set)
259 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
260 !isStage2 && !currState->isSecure && !currState->isHyp;
261
262 bool long_desc_format = currState->aarch64 ||
263 (_haveLPAE && currState->ttbcr.eae) ||
264 _isHyp || isStage2;
265
266 if (long_desc_format) {
267 // Helper variables used for hierarchical permissions
268 currState->secureLookup = currState->isSecure;
269 currState->rwTable = true;
270 currState->userTable = true;
271 currState->xnTable = false;
272 currState->pxnTable = false;
273 }
274
275 if (!currState->timing) {
276 Fault fault = NoFault;
277 if (currState->aarch64)
278 fault = processWalkAArch64();
279 else if (long_desc_format)
280 fault = processWalkLPAE();
281 else
282 fault = processWalk();
283
284 // If this was a functional non-timing access restore state to
285 // how we found it.
286 if (currState->functional) {
287 delete currState;
288 currState = savedCurrState;
289 }
290 return fault;
291 }
292
293 if (pending || pendingQueue.size()) {
294 pendingQueue.push_back(currState);
295 currState = NULL;
296 } else {
297 pending = true;
298 if (currState->aarch64)
299 return processWalkAArch64();
300 else if (long_desc_format)
301 return processWalkLPAE();
302 else
303 return processWalk();
304 }
305
306 return NoFault;
307}
308
309void
310TableWalker::processWalkWrapper()
311{
312 assert(!currState);
313 assert(pendingQueue.size());
314 currState = pendingQueue.front();
315
316 ExceptionLevel target_el = EL0;
317 if (currState->aarch64)
318 target_el = currEL(currState->tc);
319 else
320 target_el = EL1;
321
322 // Check if a previous walk filled this request already
323 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
324 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
325 currState->vmid, currState->isHyp, currState->isSecure, true, false,
326 target_el);
327
328 // Check if we still need to have a walk for this request. If the requesting
329 // instruction has been squashed, or a previous walk has filled the TLB with
330 // a match, we just want to get rid of the walk. The latter could happen
331 // when there are multiple outstanding misses to a single page and a
332 // previous request has been successfully translated.
333 if (!currState->transState->squashed() && !te) {
334 // We've got a valid request, lets process it
335 pending = true;
336 pendingQueue.pop_front();
337 if (currState->aarch64)
338 processWalkAArch64();
339 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
340 processWalkLPAE();
341 else
342 processWalk();
343 return;
344 }
345
346
347 // If the instruction that we were translating for has been
348 // squashed we shouldn't bother.
349 unsigned num_squashed = 0;
350 ThreadContext *tc = currState->tc;
351 while ((num_squashed < numSquashable) && currState &&
352 (currState->transState->squashed() || te)) {
353 pendingQueue.pop_front();
354 num_squashed++;
355
356 DPRINTF(TLB, "Squashing table walk for address %#x\n",
357 currState->vaddr_tainted);
358
359 if (currState->transState->squashed()) {
360 // finish the translation which will delete the translation object
361 currState->transState->finish(new UnimpFault("Squashed Inst"),
362 currState->req, currState->tc, currState->mode);
363 } else {
364 // translate the request now that we know it will work
365 tlb->translateTiming(currState->req, currState->tc,
366 currState->transState, currState->mode);
367
368 }
369
370 // delete the current request
371 delete currState;
372
373 // peak at the next one
374 if (pendingQueue.size()) {
375 currState = pendingQueue.front();
376 te = tlb->lookup(currState->vaddr, currState->asid,
377 currState->vmid, currState->isHyp, currState->isSecure, true,
378 false, target_el);
379 } else {
380 // Terminate the loop, nothing more to do
381 currState = NULL;
382 }
383 }
384
385 // if we've still got pending translations schedule more work
386 nextWalk(tc);
387 currState = NULL;
388 completeDrain();
389}
390
391Fault
392TableWalker::processWalk()
393{
394 Addr ttbr = 0;
395
396 // If translation isn't enabled, we shouldn't be here
397 assert(currState->sctlr.m || isStage2);
398
399 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
400 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
401 32 - currState->ttbcr.n));
402
403 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
404 32 - currState->ttbcr.n)) {
405 DPRINTF(TLB, " - Selecting TTBR0\n");
406 // Check if table walk is allowed when Security Extensions are enabled
407 if (haveSecurity && currState->ttbcr.pd0) {
408 if (currState->isFetch)
409 return new PrefetchAbort(currState->vaddr_tainted,
410 ArmFault::TranslationLL + L1,
411 isStage2,
412 ArmFault::VmsaTran);
413 else
414 return new DataAbort(currState->vaddr_tainted,
415 TlbEntry::DomainType::NoAccess, currState->isWrite,
416 ArmFault::TranslationLL + L1, isStage2,
417 ArmFault::VmsaTran);
418 }
419 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
420 MISCREG_TTBR0, currState->tc, !currState->isSecure));
421 } else {
422 DPRINTF(TLB, " - Selecting TTBR1\n");
423 // Check if table walk is allowed when Security Extensions are enabled
424 if (haveSecurity && currState->ttbcr.pd1) {
425 if (currState->isFetch)
426 return new PrefetchAbort(currState->vaddr_tainted,
427 ArmFault::TranslationLL + L1,
428 isStage2,
429 ArmFault::VmsaTran);
430 else
431 return new DataAbort(currState->vaddr_tainted,
432 TlbEntry::DomainType::NoAccess, currState->isWrite,
433 ArmFault::TranslationLL + L1, isStage2,
434 ArmFault::VmsaTran);
435 }
436 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
437 MISCREG_TTBR1, currState->tc, !currState->isSecure));
438 currState->ttbcr.n = 0;
439 }
440
441 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
442 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
443 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
444 currState->isSecure ? "s" : "ns");
445
446 // Trickbox address check
447 Fault f;
448 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
449 currState->vaddr, sizeof(uint32_t), currState->isFetch,
450 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
451 if (f) {
452 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
453 if (currState->timing) {
454 pending = false;
455 nextWalk(currState->tc);
456 currState = NULL;
457 } else {
458 currState->tc = NULL;
459 currState->req = NULL;
460 }
461 return f;
462 }
463
464 Request::Flags flag = 0;
465 if (currState->sctlr.c == 0) {
466 flag = Request::UNCACHEABLE;
467 }
468
469 bool delayed;
470 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
471 sizeof(uint32_t), flag, L1, &doL1DescEvent,
472 &TableWalker::doL1Descriptor);
473 if (!delayed) {
474 f = currState->fault;
475 }
476
477 return f;
478}
479
480Fault
481TableWalker::processWalkLPAE()
482{
483 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
484 int tsz, n;
485 LookupLevel start_lookup_level = L1;
486
487 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
488 currState->vaddr_tainted, currState->ttbcr);
489
490 Request::Flags flag = 0;
491 if (currState->isSecure)
492 flag.set(Request::SECURE);
493
494 // work out which base address register to use, if in hyp mode we always
495 // use HTTBR
496 if (isStage2) {
497 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
498 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
499 tsz = sext<4>(currState->vtcr.t0sz);
500 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
501 } else if (currState->isHyp) {
502 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
503 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
504 tsz = currState->htcr.t0sz;
505 } else {
506 assert(_haveLPAE && currState->ttbcr.eae);
507
508 // Determine boundaries of TTBR0/1 regions
509 if (currState->ttbcr.t0sz)
510 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
511 else if (currState->ttbcr.t1sz)
512 ttbr0_max = (1ULL << 32) -
513 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
514 else
515 ttbr0_max = (1ULL << 32) - 1;
516 if (currState->ttbcr.t1sz)
517 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
518 else
519 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
520
521 // The following code snippet selects the appropriate translation table base
522 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
523 // depending on the address range supported by the translation table (ARM
524 // ARM issue C B3.6.4)
525 if (currState->vaddr <= ttbr0_max) {
526 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
527 // Check if table walk is allowed
528 if (currState->ttbcr.epd0) {
529 if (currState->isFetch)
530 return new PrefetchAbort(currState->vaddr_tainted,
531 ArmFault::TranslationLL + L1,
532 isStage2,
533 ArmFault::LpaeTran);
534 else
535 return new DataAbort(currState->vaddr_tainted,
536 TlbEntry::DomainType::NoAccess,
537 currState->isWrite,
538 ArmFault::TranslationLL + L1,
539 isStage2,
540 ArmFault::LpaeTran);
541 }
542 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
543 MISCREG_TTBR0, currState->tc, !currState->isSecure));
544 tsz = currState->ttbcr.t0sz;
545 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
546 start_lookup_level = L2;
547 } else if (currState->vaddr >= ttbr1_min) {
548 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
549 // Check if table walk is allowed
550 if (currState->ttbcr.epd1) {
551 if (currState->isFetch)
552 return new PrefetchAbort(currState->vaddr_tainted,
553 ArmFault::TranslationLL + L1,
554 isStage2,
555 ArmFault::LpaeTran);
556 else
557 return new DataAbort(currState->vaddr_tainted,
558 TlbEntry::DomainType::NoAccess,
559 currState->isWrite,
560 ArmFault::TranslationLL + L1,
561 isStage2,
562 ArmFault::LpaeTran);
563 }
564 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
565 MISCREG_TTBR1, currState->tc, !currState->isSecure));
566 tsz = currState->ttbcr.t1sz;
567 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
568 start_lookup_level = L2;
569 } else {
570 // Out of boundaries -> translation fault
571 if (currState->isFetch)
572 return new PrefetchAbort(currState->vaddr_tainted,
573 ArmFault::TranslationLL + L1,
574 isStage2,
575 ArmFault::LpaeTran);
576 else
577 return new DataAbort(currState->vaddr_tainted,
578 TlbEntry::DomainType::NoAccess,
579 currState->isWrite, ArmFault::TranslationLL + L1,
580 isStage2, ArmFault::LpaeTran);
581 }
582
583 }
584
585 // Perform lookup (ARM ARM issue C B3.6.6)
586 if (start_lookup_level == L1) {
587 n = 5 - tsz;
588 desc_addr = mbits(ttbr, 39, n) |
589 (bits(currState->vaddr, n + 26, 30) << 3);
590 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
591 desc_addr, currState->isSecure ? "s" : "ns");
592 } else {
593 // Skip first-level lookup
594 n = (tsz >= 2 ? 14 - tsz : 12);
595 desc_addr = mbits(ttbr, 39, n) |
596 (bits(currState->vaddr, n + 17, 21) << 3);
597 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
598 desc_addr, currState->isSecure ? "s" : "ns");
599 }
600
601 // Trickbox address check
602 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
603 currState->vaddr, sizeof(uint64_t), currState->isFetch,
604 currState->isWrite, TlbEntry::DomainType::NoAccess,
605 start_lookup_level);
606 if (f) {
607 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
608 if (currState->timing) {
609 pending = false;
610 nextWalk(currState->tc);
611 currState = NULL;
612 } else {
613 currState->tc = NULL;
614 currState->req = NULL;
615 }
616 return f;
617 }
618
619 if (currState->sctlr.c == 0) {
620 flag = Request::UNCACHEABLE;
621 }
622
623 if (currState->isSecure)
624 flag.set(Request::SECURE);
625
626 currState->longDesc.lookupLevel = start_lookup_level;
627 currState->longDesc.aarch64 = false;
235 break;
236 default:
237 panic("Invalid exception level");
238 break;
239 }
240 } else {
241 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
242 MISCREG_SCTLR, currState->tc, !currState->isSecure));
243 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
244 MISCREG_TTBCR, currState->tc, !currState->isSecure));
245 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
246 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
247 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
248 }
249 sctlr = currState->sctlr;
250
251 currState->isFetch = (currState->mode == TLB::Execute);
252 currState->isWrite = (currState->mode == TLB::Write);
253
254 // We only do a second stage of translation if we're not secure, or in
255 // hyp mode, the second stage MMU is enabled, and this table walker
256 // instance is the first stage.
257 currState->doingStage2 = false;
258 // @todo: for now disable this in AArch64 (HCR is not set)
259 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
260 !isStage2 && !currState->isSecure && !currState->isHyp;
261
262 bool long_desc_format = currState->aarch64 ||
263 (_haveLPAE && currState->ttbcr.eae) ||
264 _isHyp || isStage2;
265
266 if (long_desc_format) {
267 // Helper variables used for hierarchical permissions
268 currState->secureLookup = currState->isSecure;
269 currState->rwTable = true;
270 currState->userTable = true;
271 currState->xnTable = false;
272 currState->pxnTable = false;
273 }
274
275 if (!currState->timing) {
276 Fault fault = NoFault;
277 if (currState->aarch64)
278 fault = processWalkAArch64();
279 else if (long_desc_format)
280 fault = processWalkLPAE();
281 else
282 fault = processWalk();
283
284 // If this was a functional non-timing access restore state to
285 // how we found it.
286 if (currState->functional) {
287 delete currState;
288 currState = savedCurrState;
289 }
290 return fault;
291 }
292
293 if (pending || pendingQueue.size()) {
294 pendingQueue.push_back(currState);
295 currState = NULL;
296 } else {
297 pending = true;
298 if (currState->aarch64)
299 return processWalkAArch64();
300 else if (long_desc_format)
301 return processWalkLPAE();
302 else
303 return processWalk();
304 }
305
306 return NoFault;
307}
308
309void
310TableWalker::processWalkWrapper()
311{
312 assert(!currState);
313 assert(pendingQueue.size());
314 currState = pendingQueue.front();
315
316 ExceptionLevel target_el = EL0;
317 if (currState->aarch64)
318 target_el = currEL(currState->tc);
319 else
320 target_el = EL1;
321
322 // Check if a previous walk filled this request already
323 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
324 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
325 currState->vmid, currState->isHyp, currState->isSecure, true, false,
326 target_el);
327
328 // Check if we still need to have a walk for this request. If the requesting
329 // instruction has been squashed, or a previous walk has filled the TLB with
330 // a match, we just want to get rid of the walk. The latter could happen
331 // when there are multiple outstanding misses to a single page and a
332 // previous request has been successfully translated.
333 if (!currState->transState->squashed() && !te) {
334 // We've got a valid request, lets process it
335 pending = true;
336 pendingQueue.pop_front();
337 if (currState->aarch64)
338 processWalkAArch64();
339 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
340 processWalkLPAE();
341 else
342 processWalk();
343 return;
344 }
345
346
347 // If the instruction that we were translating for has been
348 // squashed we shouldn't bother.
349 unsigned num_squashed = 0;
350 ThreadContext *tc = currState->tc;
351 while ((num_squashed < numSquashable) && currState &&
352 (currState->transState->squashed() || te)) {
353 pendingQueue.pop_front();
354 num_squashed++;
355
356 DPRINTF(TLB, "Squashing table walk for address %#x\n",
357 currState->vaddr_tainted);
358
359 if (currState->transState->squashed()) {
360 // finish the translation which will delete the translation object
361 currState->transState->finish(new UnimpFault("Squashed Inst"),
362 currState->req, currState->tc, currState->mode);
363 } else {
364 // translate the request now that we know it will work
365 tlb->translateTiming(currState->req, currState->tc,
366 currState->transState, currState->mode);
367
368 }
369
370 // delete the current request
371 delete currState;
372
373 // peak at the next one
374 if (pendingQueue.size()) {
375 currState = pendingQueue.front();
376 te = tlb->lookup(currState->vaddr, currState->asid,
377 currState->vmid, currState->isHyp, currState->isSecure, true,
378 false, target_el);
379 } else {
380 // Terminate the loop, nothing more to do
381 currState = NULL;
382 }
383 }
384
385 // if we've still got pending translations schedule more work
386 nextWalk(tc);
387 currState = NULL;
388 completeDrain();
389}
390
391Fault
392TableWalker::processWalk()
393{
394 Addr ttbr = 0;
395
396 // If translation isn't enabled, we shouldn't be here
397 assert(currState->sctlr.m || isStage2);
398
399 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
400 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
401 32 - currState->ttbcr.n));
402
403 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
404 32 - currState->ttbcr.n)) {
405 DPRINTF(TLB, " - Selecting TTBR0\n");
406 // Check if table walk is allowed when Security Extensions are enabled
407 if (haveSecurity && currState->ttbcr.pd0) {
408 if (currState->isFetch)
409 return new PrefetchAbort(currState->vaddr_tainted,
410 ArmFault::TranslationLL + L1,
411 isStage2,
412 ArmFault::VmsaTran);
413 else
414 return new DataAbort(currState->vaddr_tainted,
415 TlbEntry::DomainType::NoAccess, currState->isWrite,
416 ArmFault::TranslationLL + L1, isStage2,
417 ArmFault::VmsaTran);
418 }
419 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
420 MISCREG_TTBR0, currState->tc, !currState->isSecure));
421 } else {
422 DPRINTF(TLB, " - Selecting TTBR1\n");
423 // Check if table walk is allowed when Security Extensions are enabled
424 if (haveSecurity && currState->ttbcr.pd1) {
425 if (currState->isFetch)
426 return new PrefetchAbort(currState->vaddr_tainted,
427 ArmFault::TranslationLL + L1,
428 isStage2,
429 ArmFault::VmsaTran);
430 else
431 return new DataAbort(currState->vaddr_tainted,
432 TlbEntry::DomainType::NoAccess, currState->isWrite,
433 ArmFault::TranslationLL + L1, isStage2,
434 ArmFault::VmsaTran);
435 }
436 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
437 MISCREG_TTBR1, currState->tc, !currState->isSecure));
438 currState->ttbcr.n = 0;
439 }
440
441 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
442 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
443 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
444 currState->isSecure ? "s" : "ns");
445
446 // Trickbox address check
447 Fault f;
448 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
449 currState->vaddr, sizeof(uint32_t), currState->isFetch,
450 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
451 if (f) {
452 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
453 if (currState->timing) {
454 pending = false;
455 nextWalk(currState->tc);
456 currState = NULL;
457 } else {
458 currState->tc = NULL;
459 currState->req = NULL;
460 }
461 return f;
462 }
463
464 Request::Flags flag = 0;
465 if (currState->sctlr.c == 0) {
466 flag = Request::UNCACHEABLE;
467 }
468
469 bool delayed;
470 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
471 sizeof(uint32_t), flag, L1, &doL1DescEvent,
472 &TableWalker::doL1Descriptor);
473 if (!delayed) {
474 f = currState->fault;
475 }
476
477 return f;
478}
479
480Fault
481TableWalker::processWalkLPAE()
482{
483 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
484 int tsz, n;
485 LookupLevel start_lookup_level = L1;
486
487 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
488 currState->vaddr_tainted, currState->ttbcr);
489
490 Request::Flags flag = 0;
491 if (currState->isSecure)
492 flag.set(Request::SECURE);
493
494 // work out which base address register to use, if in hyp mode we always
495 // use HTTBR
496 if (isStage2) {
497 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
498 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
499 tsz = sext<4>(currState->vtcr.t0sz);
500 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
501 } else if (currState->isHyp) {
502 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
503 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
504 tsz = currState->htcr.t0sz;
505 } else {
506 assert(_haveLPAE && currState->ttbcr.eae);
507
508 // Determine boundaries of TTBR0/1 regions
509 if (currState->ttbcr.t0sz)
510 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
511 else if (currState->ttbcr.t1sz)
512 ttbr0_max = (1ULL << 32) -
513 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
514 else
515 ttbr0_max = (1ULL << 32) - 1;
516 if (currState->ttbcr.t1sz)
517 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
518 else
519 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
520
521 // The following code snippet selects the appropriate translation table base
522 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
523 // depending on the address range supported by the translation table (ARM
524 // ARM issue C B3.6.4)
525 if (currState->vaddr <= ttbr0_max) {
526 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
527 // Check if table walk is allowed
528 if (currState->ttbcr.epd0) {
529 if (currState->isFetch)
530 return new PrefetchAbort(currState->vaddr_tainted,
531 ArmFault::TranslationLL + L1,
532 isStage2,
533 ArmFault::LpaeTran);
534 else
535 return new DataAbort(currState->vaddr_tainted,
536 TlbEntry::DomainType::NoAccess,
537 currState->isWrite,
538 ArmFault::TranslationLL + L1,
539 isStage2,
540 ArmFault::LpaeTran);
541 }
542 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
543 MISCREG_TTBR0, currState->tc, !currState->isSecure));
544 tsz = currState->ttbcr.t0sz;
545 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
546 start_lookup_level = L2;
547 } else if (currState->vaddr >= ttbr1_min) {
548 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
549 // Check if table walk is allowed
550 if (currState->ttbcr.epd1) {
551 if (currState->isFetch)
552 return new PrefetchAbort(currState->vaddr_tainted,
553 ArmFault::TranslationLL + L1,
554 isStage2,
555 ArmFault::LpaeTran);
556 else
557 return new DataAbort(currState->vaddr_tainted,
558 TlbEntry::DomainType::NoAccess,
559 currState->isWrite,
560 ArmFault::TranslationLL + L1,
561 isStage2,
562 ArmFault::LpaeTran);
563 }
564 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
565 MISCREG_TTBR1, currState->tc, !currState->isSecure));
566 tsz = currState->ttbcr.t1sz;
567 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
568 start_lookup_level = L2;
569 } else {
570 // Out of boundaries -> translation fault
571 if (currState->isFetch)
572 return new PrefetchAbort(currState->vaddr_tainted,
573 ArmFault::TranslationLL + L1,
574 isStage2,
575 ArmFault::LpaeTran);
576 else
577 return new DataAbort(currState->vaddr_tainted,
578 TlbEntry::DomainType::NoAccess,
579 currState->isWrite, ArmFault::TranslationLL + L1,
580 isStage2, ArmFault::LpaeTran);
581 }
582
583 }
584
585 // Perform lookup (ARM ARM issue C B3.6.6)
586 if (start_lookup_level == L1) {
587 n = 5 - tsz;
588 desc_addr = mbits(ttbr, 39, n) |
589 (bits(currState->vaddr, n + 26, 30) << 3);
590 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
591 desc_addr, currState->isSecure ? "s" : "ns");
592 } else {
593 // Skip first-level lookup
594 n = (tsz >= 2 ? 14 - tsz : 12);
595 desc_addr = mbits(ttbr, 39, n) |
596 (bits(currState->vaddr, n + 17, 21) << 3);
597 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
598 desc_addr, currState->isSecure ? "s" : "ns");
599 }
600
601 // Trickbox address check
602 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
603 currState->vaddr, sizeof(uint64_t), currState->isFetch,
604 currState->isWrite, TlbEntry::DomainType::NoAccess,
605 start_lookup_level);
606 if (f) {
607 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
608 if (currState->timing) {
609 pending = false;
610 nextWalk(currState->tc);
611 currState = NULL;
612 } else {
613 currState->tc = NULL;
614 currState->req = NULL;
615 }
616 return f;
617 }
618
619 if (currState->sctlr.c == 0) {
620 flag = Request::UNCACHEABLE;
621 }
622
623 if (currState->isSecure)
624 flag.set(Request::SECURE);
625
626 currState->longDesc.lookupLevel = start_lookup_level;
627 currState->longDesc.aarch64 = false;
628 currState->longDesc.largeGrain = false;
629 currState->longDesc.grainSize = 12;
628 currState->longDesc.grainSize = Grain4KB;
630
631 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
632 : (Event *) &doL2LongDescEvent;
633
634 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
635 sizeof(uint64_t), flag, start_lookup_level,
636 event, &TableWalker::doLongDescriptor);
637 if (!delayed) {
638 f = currState->fault;
639 }
640
641 return f;
642}
643
644unsigned
645TableWalker::adjustTableSizeAArch64(unsigned tsz)
646{
647 if (tsz < 25)
648 return 25;
649 if (tsz > 48)
650 return 48;
651 return tsz;
652}
653
654bool
655TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
656{
657 return (currPhysAddrRange != MaxPhysAddrRange &&
658 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
659}
660
661Fault
662TableWalker::processWalkAArch64()
663{
664 assert(currState->aarch64);
665
629
630 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
631 : (Event *) &doL2LongDescEvent;
632
633 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
634 sizeof(uint64_t), flag, start_lookup_level,
635 event, &TableWalker::doLongDescriptor);
636 if (!delayed) {
637 f = currState->fault;
638 }
639
640 return f;
641}
642
643unsigned
644TableWalker::adjustTableSizeAArch64(unsigned tsz)
645{
646 if (tsz < 25)
647 return 25;
648 if (tsz > 48)
649 return 48;
650 return tsz;
651}
652
653bool
654TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
655{
656 return (currPhysAddrRange != MaxPhysAddrRange &&
657 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
658}
659
660Fault
661TableWalker::processWalkAArch64()
662{
663 assert(currState->aarch64);
664
666 DPRINTF(TLB, "Beginning table walk for address %#llx, TTBCR: %#llx\n",
667 currState->vaddr_tainted, currState->ttbcr);
665 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
666 currState->vaddr_tainted, currState->tcr);
668
667
668 static const GrainSize GrainMapDefault[] =
669 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
670 static const GrainSize GrainMap_EL1_tg1[] =
671 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
672
669 // Determine TTBR, table size, granule size and phys. address range
670 Addr ttbr = 0;
671 int tsz = 0, ps = 0;
673 // Determine TTBR, table size, granule size and phys. address range
674 Addr ttbr = 0;
675 int tsz = 0, ps = 0;
672 bool large_grain = false;
676 GrainSize tg = Grain4KB; // grain size computed from tg* field
673 bool fault = false;
674 switch (currState->el) {
675 case EL0:
676 case EL1:
677 switch (bits(currState->vaddr, 63,48)) {
678 case 0:
679 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
680 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
677 bool fault = false;
678 switch (currState->el) {
679 case EL0:
680 case EL1:
681 switch (bits(currState->vaddr, 63,48)) {
682 case 0:
683 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
684 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
681 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
682 large_grain = currState->ttbcr.tg0;
685 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
686 tg = GrainMapDefault[currState->tcr.tg0];
683 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
687 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
684 currState->ttbcr.epd0)
688 currState->tcr.epd0)
685 fault = true;
686 break;
687 case 0xffff:
688 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
689 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
689 fault = true;
690 break;
691 case 0xffff:
692 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
693 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
690 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t1sz);
691 large_grain = currState->ttbcr.tg1;
694 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
695 tg = GrainMap_EL1_tg1[currState->tcr.tg1];
692 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
696 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
693 currState->ttbcr.epd1)
697 currState->tcr.epd1)
694 fault = true;
695 break;
696 default:
697 // top two bytes must be all 0s or all 1s, else invalid addr
698 fault = true;
699 }
698 fault = true;
699 break;
700 default:
701 // top two bytes must be all 0s or all 1s, else invalid addr
702 fault = true;
703 }
700 ps = currState->ttbcr.ips;
704 ps = currState->tcr.ips;
701 break;
702 case EL2:
703 case EL3:
704 switch(bits(currState->vaddr, 63,48)) {
705 case 0:
705 break;
706 case EL2:
707 case EL3:
708 switch(bits(currState->vaddr, 63,48)) {
709 case 0:
706 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
707 if (currState->el == EL2)
708 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
709 else
710 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
711 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
712 large_grain = currState->ttbcr.tg0;
710 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
711 if (currState->el == EL2)
712 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
713 else
714 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
715 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
716 tg = GrainMapDefault[currState->tcr.tg0];
713 break;
714 default:
715 // invalid addr if top two bytes are not all 0s
717 break;
718 default:
719 // invalid addr if top two bytes are not all 0s
716 fault = true;
720 fault = true;
717 }
721 }
718 ps = currState->ttbcr.ps;
722 ps = currState->tcr.ips;
719 break;
720 }
721
722 if (fault) {
723 Fault f;
724 if (currState->isFetch)
725 f = new PrefetchAbort(currState->vaddr_tainted,
726 ArmFault::TranslationLL + L0, isStage2,
727 ArmFault::LpaeTran);
728 else
729 f = new DataAbort(currState->vaddr_tainted,
730 TlbEntry::DomainType::NoAccess,
731 currState->isWrite,
732 ArmFault::TranslationLL + L0,
733 isStage2, ArmFault::LpaeTran);
734
735 if (currState->timing) {
736 pending = false;
737 nextWalk(currState->tc);
738 currState = NULL;
739 } else {
740 currState->tc = NULL;
741 currState->req = NULL;
742 }
743 return f;
744
745 }
746
723 break;
724 }
725
726 if (fault) {
727 Fault f;
728 if (currState->isFetch)
729 f = new PrefetchAbort(currState->vaddr_tainted,
730 ArmFault::TranslationLL + L0, isStage2,
731 ArmFault::LpaeTran);
732 else
733 f = new DataAbort(currState->vaddr_tainted,
734 TlbEntry::DomainType::NoAccess,
735 currState->isWrite,
736 ArmFault::TranslationLL + L0,
737 isStage2, ArmFault::LpaeTran);
738
739 if (currState->timing) {
740 pending = false;
741 nextWalk(currState->tc);
742 currState = NULL;
743 } else {
744 currState->tc = NULL;
745 currState->req = NULL;
746 }
747 return f;
748
749 }
750
751 if (tg == ReservedGrain) {
752 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
753 "DEFINED behavior takes this to mean 4KB granules\n");
754 tg = Grain4KB;
755 }
756
757 int stride = tg - 3;
758 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
759
747 // Determine starting lookup level
760 // Determine starting lookup level
748 LookupLevel start_lookup_level;
749 int grain_size, stride;
750 if (large_grain) { // 64 KB granule
751 grain_size = 16;
752 stride = grain_size - 3;
753 if (tsz > grain_size + 2 * stride)
754 start_lookup_level = L1;
755 else if (tsz > grain_size + stride)
756 start_lookup_level = L2;
757 else
758 start_lookup_level = L3;
759 } else { // 4 KB granule
760 grain_size = 12;
761 stride = grain_size - 3;
762 if (tsz > grain_size + 3 * stride)
763 start_lookup_level = L0;
764 else if (tsz > grain_size + 2 * stride)
765 start_lookup_level = L1;
766 else
767 start_lookup_level = L2;
761 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
762 // in ARM DDI 0487A. These table values correspond to the cascading tests
763 // to compute the lookup level and are of the form
764 // (grain_size + N*stride), for N = {1, 2, 3}.
765 // A value of 64 will never succeed and a value of 0 will always succeed.
766 {
767 struct GrainMap {
768 GrainSize grain_size;
769 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
770 };
771 static const GrainMap GM[] = {
772 { Grain4KB, { 39, 30, 0, 0 } },
773 { Grain16KB, { 47, 36, 25, 0 } },
774 { Grain64KB, { 64, 42, 29, 0 } }
775 };
776
777 const unsigned *lookup = NULL; // points to a lookup_level_cutoff
778
779 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
780 if (tg == GM[i].grain_size) {
781 lookup = GM[i].lookup_level_cutoff;
782 break;
783 }
784 }
785 assert(lookup);
786
787 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
788 if (tsz > lookup[L]) {
789 start_lookup_level = (LookupLevel) L;
790 break;
791 }
792 }
793 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
794 "Table walker couldn't find lookup level\n");
768 }
769
770 // Determine table base address
795 }
796
797 // Determine table base address
771 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) -
772 grain_size;
798 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
773 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
774
775 // Determine physical address size and raise an Address Size Fault if
776 // necessary
777 int pa_range = decodePhysAddrRange64(ps);
778 // Clamp to lower limit
779 if (pa_range > physAddrRange)
780 currState->physAddrRange = physAddrRange;
781 else
782 currState->physAddrRange = pa_range;
783 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
784 DPRINTF(TLB, "Address size fault before any lookup\n");
785 Fault f;
786 if (currState->isFetch)
787 f = new PrefetchAbort(currState->vaddr_tainted,
788 ArmFault::AddressSizeLL + start_lookup_level,
789 isStage2,
790 ArmFault::LpaeTran);
791 else
792 f = new DataAbort(currState->vaddr_tainted,
793 TlbEntry::DomainType::NoAccess,
794 currState->isWrite,
795 ArmFault::AddressSizeLL + start_lookup_level,
796 isStage2,
797 ArmFault::LpaeTran);
798
799
800 if (currState->timing) {
801 pending = false;
802 nextWalk(currState->tc);
803 currState = NULL;
804 } else {
805 currState->tc = NULL;
806 currState->req = NULL;
807 }
808 return f;
809
810 }
811
812 // Determine descriptor address
813 Addr desc_addr = base_addr |
814 (bits(currState->vaddr, tsz - 1,
799 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
800
801 // Determine physical address size and raise an Address Size Fault if
802 // necessary
803 int pa_range = decodePhysAddrRange64(ps);
804 // Clamp to lower limit
805 if (pa_range > physAddrRange)
806 currState->physAddrRange = physAddrRange;
807 else
808 currState->physAddrRange = pa_range;
809 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
810 DPRINTF(TLB, "Address size fault before any lookup\n");
811 Fault f;
812 if (currState->isFetch)
813 f = new PrefetchAbort(currState->vaddr_tainted,
814 ArmFault::AddressSizeLL + start_lookup_level,
815 isStage2,
816 ArmFault::LpaeTran);
817 else
818 f = new DataAbort(currState->vaddr_tainted,
819 TlbEntry::DomainType::NoAccess,
820 currState->isWrite,
821 ArmFault::AddressSizeLL + start_lookup_level,
822 isStage2,
823 ArmFault::LpaeTran);
824
825
826 if (currState->timing) {
827 pending = false;
828 nextWalk(currState->tc);
829 currState = NULL;
830 } else {
831 currState->tc = NULL;
832 currState->req = NULL;
833 }
834 return f;
835
836 }
837
838 // Determine descriptor address
839 Addr desc_addr = base_addr |
840 (bits(currState->vaddr, tsz - 1,
815 stride * (3 - start_lookup_level) + grain_size) << 3);
841 stride * (3 - start_lookup_level) + tg) << 3);
816
817 // Trickbox address check
818 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
819 currState->vaddr, sizeof(uint64_t), currState->isFetch,
820 currState->isWrite, TlbEntry::DomainType::NoAccess,
821 start_lookup_level);
822 if (f) {
823 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
824 if (currState->timing) {
825 pending = false;
826 nextWalk(currState->tc);
827 currState = NULL;
828 } else {
829 currState->tc = NULL;
830 currState->req = NULL;
831 }
832 return f;
833 }
834
835 Request::Flags flag = 0;
836 if (currState->sctlr.c == 0) {
837 flag = Request::UNCACHEABLE;
838 }
839
840 currState->longDesc.lookupLevel = start_lookup_level;
841 currState->longDesc.aarch64 = true;
842
843 // Trickbox address check
844 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
845 currState->vaddr, sizeof(uint64_t), currState->isFetch,
846 currState->isWrite, TlbEntry::DomainType::NoAccess,
847 start_lookup_level);
848 if (f) {
849 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
850 if (currState->timing) {
851 pending = false;
852 nextWalk(currState->tc);
853 currState = NULL;
854 } else {
855 currState->tc = NULL;
856 currState->req = NULL;
857 }
858 return f;
859 }
860
861 Request::Flags flag = 0;
862 if (currState->sctlr.c == 0) {
863 flag = Request::UNCACHEABLE;
864 }
865
866 currState->longDesc.lookupLevel = start_lookup_level;
867 currState->longDesc.aarch64 = true;
842 currState->longDesc.largeGrain = large_grain;
843 currState->longDesc.grainSize = grain_size;
868 currState->longDesc.grainSize = tg;
844
845 if (currState->timing) {
846 Event *event;
847 switch (start_lookup_level) {
848 case L0:
849 event = (Event *) &doL0LongDescEvent;
850 break;
851 case L1:
852 event = (Event *) &doL1LongDescEvent;
853 break;
854 case L2:
855 event = (Event *) &doL2LongDescEvent;
856 break;
857 case L3:
858 event = (Event *) &doL3LongDescEvent;
859 break;
860 default:
861 panic("Invalid table lookup level");
862 break;
863 }
864 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
865 (uint8_t*) &currState->longDesc.data,
866 currState->tc->getCpuPtr()->clockPeriod(), flag);
867 DPRINTF(TLBVerbose,
868 "Adding to walker fifo: queue size before adding: %d\n",
869 stateQueues[start_lookup_level].size());
870 stateQueues[start_lookup_level].push_back(currState);
871 currState = NULL;
872 } else if (!currState->functional) {
873 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
874 NULL, (uint8_t*) &currState->longDesc.data,
875 currState->tc->getCpuPtr()->clockPeriod(), flag);
876 doLongDescriptor();
877 f = currState->fault;
878 } else {
879 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
880 masterId);
881 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
882 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
883 port.sendFunctional(pkt);
884 doLongDescriptor();
885 delete req;
886 delete pkt;
887 f = currState->fault;
888 }
889
890 return f;
891}
892
893void
894TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
895 uint8_t texcb, bool s)
896{
897 // Note: tc and sctlr local variables are hiding tc and sctrl class
898 // variables
899 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
900 te.shareable = false; // default value
901 te.nonCacheable = false;
902 te.outerShareable = false;
903 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
904 switch(texcb) {
905 case 0: // Stongly-ordered
906 te.nonCacheable = true;
907 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
908 te.shareable = true;
909 te.innerAttrs = 1;
910 te.outerAttrs = 0;
911 break;
912 case 1: // Shareable Device
913 te.nonCacheable = true;
914 te.mtype = TlbEntry::MemoryType::Device;
915 te.shareable = true;
916 te.innerAttrs = 3;
917 te.outerAttrs = 0;
918 break;
919 case 2: // Outer and Inner Write-Through, no Write-Allocate
920 te.mtype = TlbEntry::MemoryType::Normal;
921 te.shareable = s;
922 te.innerAttrs = 6;
923 te.outerAttrs = bits(texcb, 1, 0);
924 break;
925 case 3: // Outer and Inner Write-Back, no Write-Allocate
926 te.mtype = TlbEntry::MemoryType::Normal;
927 te.shareable = s;
928 te.innerAttrs = 7;
929 te.outerAttrs = bits(texcb, 1, 0);
930 break;
931 case 4: // Outer and Inner Non-cacheable
932 te.nonCacheable = true;
933 te.mtype = TlbEntry::MemoryType::Normal;
934 te.shareable = s;
935 te.innerAttrs = 0;
936 te.outerAttrs = bits(texcb, 1, 0);
937 break;
938 case 5: // Reserved
939 panic("Reserved texcb value!\n");
940 break;
941 case 6: // Implementation Defined
942 panic("Implementation-defined texcb value!\n");
943 break;
944 case 7: // Outer and Inner Write-Back, Write-Allocate
945 te.mtype = TlbEntry::MemoryType::Normal;
946 te.shareable = s;
947 te.innerAttrs = 5;
948 te.outerAttrs = 1;
949 break;
950 case 8: // Non-shareable Device
951 te.nonCacheable = true;
952 te.mtype = TlbEntry::MemoryType::Device;
953 te.shareable = false;
954 te.innerAttrs = 3;
955 te.outerAttrs = 0;
956 break;
957 case 9 ... 15: // Reserved
958 panic("Reserved texcb value!\n");
959 break;
960 case 16 ... 31: // Cacheable Memory
961 te.mtype = TlbEntry::MemoryType::Normal;
962 te.shareable = s;
963 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
964 te.nonCacheable = true;
965 te.innerAttrs = bits(texcb, 1, 0);
966 te.outerAttrs = bits(texcb, 3, 2);
967 break;
968 default:
969 panic("More than 32 states for 5 bits?\n");
970 }
971 } else {
972 assert(tc);
973 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
974 currState->tc, !currState->isSecure));
975 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
976 currState->tc, !currState->isSecure));
977 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
978 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
979 switch(bits(texcb, 2,0)) {
980 case 0:
981 curr_tr = prrr.tr0;
982 curr_ir = nmrr.ir0;
983 curr_or = nmrr.or0;
984 te.outerShareable = (prrr.nos0 == 0);
985 break;
986 case 1:
987 curr_tr = prrr.tr1;
988 curr_ir = nmrr.ir1;
989 curr_or = nmrr.or1;
990 te.outerShareable = (prrr.nos1 == 0);
991 break;
992 case 2:
993 curr_tr = prrr.tr2;
994 curr_ir = nmrr.ir2;
995 curr_or = nmrr.or2;
996 te.outerShareable = (prrr.nos2 == 0);
997 break;
998 case 3:
999 curr_tr = prrr.tr3;
1000 curr_ir = nmrr.ir3;
1001 curr_or = nmrr.or3;
1002 te.outerShareable = (prrr.nos3 == 0);
1003 break;
1004 case 4:
1005 curr_tr = prrr.tr4;
1006 curr_ir = nmrr.ir4;
1007 curr_or = nmrr.or4;
1008 te.outerShareable = (prrr.nos4 == 0);
1009 break;
1010 case 5:
1011 curr_tr = prrr.tr5;
1012 curr_ir = nmrr.ir5;
1013 curr_or = nmrr.or5;
1014 te.outerShareable = (prrr.nos5 == 0);
1015 break;
1016 case 6:
1017 panic("Imp defined type\n");
1018 case 7:
1019 curr_tr = prrr.tr7;
1020 curr_ir = nmrr.ir7;
1021 curr_or = nmrr.or7;
1022 te.outerShareable = (prrr.nos7 == 0);
1023 break;
1024 }
1025
1026 switch(curr_tr) {
1027 case 0:
1028 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1029 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1030 te.nonCacheable = true;
1031 te.innerAttrs = 1;
1032 te.outerAttrs = 0;
1033 te.shareable = true;
1034 break;
1035 case 1:
1036 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1037 prrr.ds1, prrr.ds0, s);
1038 te.mtype = TlbEntry::MemoryType::Device;
1039 te.nonCacheable = true;
1040 te.innerAttrs = 3;
1041 te.outerAttrs = 0;
1042 if (prrr.ds1 && s)
1043 te.shareable = true;
1044 if (prrr.ds0 && !s)
1045 te.shareable = true;
1046 break;
1047 case 2:
1048 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1049 prrr.ns1, prrr.ns0, s);
1050 te.mtype = TlbEntry::MemoryType::Normal;
1051 if (prrr.ns1 && s)
1052 te.shareable = true;
1053 if (prrr.ns0 && !s)
1054 te.shareable = true;
1055 break;
1056 case 3:
1057 panic("Reserved type");
1058 }
1059
1060 if (te.mtype == TlbEntry::MemoryType::Normal){
1061 switch(curr_ir) {
1062 case 0:
1063 te.nonCacheable = true;
1064 te.innerAttrs = 0;
1065 break;
1066 case 1:
1067 te.innerAttrs = 5;
1068 break;
1069 case 2:
1070 te.innerAttrs = 6;
1071 break;
1072 case 3:
1073 te.innerAttrs = 7;
1074 break;
1075 }
1076
1077 switch(curr_or) {
1078 case 0:
1079 te.nonCacheable = true;
1080 te.outerAttrs = 0;
1081 break;
1082 case 1:
1083 te.outerAttrs = 1;
1084 break;
1085 case 2:
1086 te.outerAttrs = 2;
1087 break;
1088 case 3:
1089 te.outerAttrs = 3;
1090 break;
1091 }
1092 }
1093 }
1094 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, \
1095 outerAttrs: %d\n",
1096 te.shareable, te.innerAttrs, te.outerAttrs);
1097 te.setAttributes(false);
1098}
1099
1100void
1101TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1102 LongDescriptor &lDescriptor)
1103{
1104 assert(_haveLPAE);
1105
1106 uint8_t attr;
1107 uint8_t sh = lDescriptor.sh();
1108 // Different format and source of attributes if this is a stage 2
1109 // translation
1110 if (isStage2) {
1111 attr = lDescriptor.memAttr();
1112 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1113 uint8_t attr_1_0 = attr & 0x3;
1114
1115 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1116
1117 if (attr_3_2 == 0) {
1118 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1119 : TlbEntry::MemoryType::Device;
1120 te.outerAttrs = 0;
1121 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1122 te.nonCacheable = true;
1123 } else {
1124 te.mtype = TlbEntry::MemoryType::Normal;
1125 te.outerAttrs = attr_3_2 == 1 ? 0 :
1126 attr_3_2 == 2 ? 2 : 1;
1127 te.innerAttrs = attr_1_0 == 1 ? 0 :
1128 attr_1_0 == 2 ? 6 : 5;
1129 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1130 }
1131 } else {
1132 uint8_t attrIndx = lDescriptor.attrIndx();
1133
1134 // LPAE always uses remapping of memory attributes, irrespective of the
1135 // value of SCTLR.TRE
1136 int reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1137 reg = flattenMiscRegNsBanked(reg, currState->tc, !currState->isSecure);
1138 uint32_t mair = currState->tc->readMiscReg(reg);
1139 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1140 uint8_t attr_7_4 = bits(attr, 7, 4);
1141 uint8_t attr_3_0 = bits(attr, 3, 0);
1142 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1143
1144 // Note: the memory subsystem only cares about the 'cacheable' memory
1145 // attribute. The other attributes are only used to fill the PAR register
1146 // accordingly to provide the illusion of full support
1147 te.nonCacheable = false;
1148
1149 switch (attr_7_4) {
1150 case 0x0:
1151 // Strongly-ordered or Device memory
1152 if (attr_3_0 == 0x0)
1153 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1154 else if (attr_3_0 == 0x4)
1155 te.mtype = TlbEntry::MemoryType::Device;
1156 else
1157 panic("Unpredictable behavior\n");
1158 te.nonCacheable = true;
1159 te.outerAttrs = 0;
1160 break;
1161 case 0x4:
1162 // Normal memory, Outer Non-cacheable
1163 te.mtype = TlbEntry::MemoryType::Normal;
1164 te.outerAttrs = 0;
1165 if (attr_3_0 == 0x4)
1166 // Inner Non-cacheable
1167 te.nonCacheable = true;
1168 else if (attr_3_0 < 0x8)
1169 panic("Unpredictable behavior\n");
1170 break;
1171 case 0x8:
1172 case 0x9:
1173 case 0xa:
1174 case 0xb:
1175 case 0xc:
1176 case 0xd:
1177 case 0xe:
1178 case 0xf:
1179 if (attr_7_4 & 0x4) {
1180 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1181 } else {
1182 te.outerAttrs = 0x2;
1183 }
1184 // Normal memory, Outer Cacheable
1185 te.mtype = TlbEntry::MemoryType::Normal;
1186 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1187 panic("Unpredictable behavior\n");
1188 break;
1189 default:
1190 panic("Unpredictable behavior\n");
1191 break;
1192 }
1193
1194 switch (attr_3_0) {
1195 case 0x0:
1196 te.innerAttrs = 0x1;
1197 break;
1198 case 0x4:
1199 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1200 break;
1201 case 0x8:
1202 case 0x9:
1203 case 0xA:
1204 case 0xB:
1205 te.innerAttrs = 6;
1206 break;
1207 case 0xC:
1208 case 0xD:
1209 case 0xE:
1210 case 0xF:
1211 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1212 break;
1213 default:
1214 panic("Unpredictable behavior\n");
1215 break;
1216 }
1217 }
1218
1219 te.outerShareable = sh == 2;
1220 te.shareable = (sh & 0x2) ? true : false;
1221 te.setAttributes(true);
1222 te.attributes |= (uint64_t) attr << 56;
1223}
1224
1225void
1226TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1227 uint8_t sh)
1228{
1229 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1230
1231 // Select MAIR
1232 uint64_t mair;
1233 switch (currState->el) {
1234 case EL0:
1235 case EL1:
1236 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1237 break;
1238 case EL2:
1239 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1240 break;
1241 case EL3:
1242 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1243 break;
1244 default:
1245 panic("Invalid exception level");
1246 break;
1247 }
1248
1249 // Select attributes
1250 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1251 uint8_t attr_lo = bits(attr, 3, 0);
1252 uint8_t attr_hi = bits(attr, 7, 4);
1253
1254 // Memory type
1255 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1256
1257 // Cacheability
1258 te.nonCacheable = false;
1259 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1260 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1261 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1262 te.nonCacheable = true;
1263 }
1264
1265 te.shareable = sh == 2;
1266 te.outerShareable = (sh & 0x2) ? true : false;
1267 // Attributes formatted according to the 64-bit PAR
1268 te.attributes = ((uint64_t) attr << 56) |
1269 (1 << 11) | // LPAE bit
1270 (te.ns << 9) | // NS bit
1271 (sh << 7);
1272}
1273
1274void
1275TableWalker::doL1Descriptor()
1276{
1277 if (currState->fault != NoFault) {
1278 return;
1279 }
1280
1281 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1282 currState->vaddr_tainted, currState->l1Desc.data);
1283 TlbEntry te;
1284
1285 switch (currState->l1Desc.type()) {
1286 case L1Descriptor::Ignore:
1287 case L1Descriptor::Reserved:
1288 if (!currState->timing) {
1289 currState->tc = NULL;
1290 currState->req = NULL;
1291 }
1292 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1293 if (currState->isFetch)
1294 currState->fault =
1295 new PrefetchAbort(currState->vaddr_tainted,
1296 ArmFault::TranslationLL + L1,
1297 isStage2,
1298 ArmFault::VmsaTran);
1299 else
1300 currState->fault =
1301 new DataAbort(currState->vaddr_tainted,
1302 TlbEntry::DomainType::NoAccess,
1303 currState->isWrite,
1304 ArmFault::TranslationLL + L1, isStage2,
1305 ArmFault::VmsaTran);
1306 return;
1307 case L1Descriptor::Section:
1308 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1309 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1310 * enabled if set, do l1.Desc.setAp0() instead of generating
1311 * AccessFlag0
1312 */
1313
1314 currState->fault = new DataAbort(currState->vaddr_tainted,
1315 currState->l1Desc.domain(),
1316 currState->isWrite,
1317 ArmFault::AccessFlagLL + L1,
1318 isStage2,
1319 ArmFault::VmsaTran);
1320 }
1321 if (currState->l1Desc.supersection()) {
1322 panic("Haven't implemented supersections\n");
1323 }
1324 insertTableEntry(currState->l1Desc, false);
1325 return;
1326 case L1Descriptor::PageTable:
1327 {
1328 Addr l2desc_addr;
1329 l2desc_addr = currState->l1Desc.l2Addr() |
1330 (bits(currState->vaddr, 19, 12) << 2);
1331 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1332 l2desc_addr, currState->isSecure ? "s" : "ns");
1333
1334 // Trickbox address check
1335 currState->fault = tlb->walkTrickBoxCheck(
1336 l2desc_addr, currState->isSecure, currState->vaddr,
1337 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1338 currState->l1Desc.domain(), L2);
1339
1340 if (currState->fault) {
1341 if (!currState->timing) {
1342 currState->tc = NULL;
1343 currState->req = NULL;
1344 }
1345 return;
1346 }
1347
1348 Request::Flags flag = 0;
1349 if (currState->isSecure)
1350 flag.set(Request::SECURE);
1351
1352 bool delayed;
1353 delayed = fetchDescriptor(l2desc_addr,
1354 (uint8_t*)&currState->l2Desc.data,
1355 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1356 &TableWalker::doL2Descriptor);
1357 if (delayed) {
1358 currState->delayed = true;
1359 }
1360
1361 return;
1362 }
1363 default:
1364 panic("A new type in a 2 bit field?\n");
1365 }
1366}
1367
1368void
1369TableWalker::doLongDescriptor()
1370{
1371 if (currState->fault != NoFault) {
1372 return;
1373 }
1374
1375 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1376 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1377 currState->longDesc.data,
1378 currState->aarch64 ? "AArch64" : "long-desc.");
1379
1380 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1381 (currState->longDesc.type() == LongDescriptor::Page)) {
1382 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1383 "xn: %d, ap: %d, af: %d, type: %d\n",
1384 currState->longDesc.lookupLevel,
1385 currState->longDesc.data,
1386 currState->longDesc.pxn(),
1387 currState->longDesc.xn(),
1388 currState->longDesc.ap(),
1389 currState->longDesc.af(),
1390 currState->longDesc.type());
1391 } else {
1392 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1393 currState->longDesc.lookupLevel,
1394 currState->longDesc.data,
1395 currState->longDesc.type());
1396 }
1397
1398 TlbEntry te;
1399
1400 switch (currState->longDesc.type()) {
1401 case LongDescriptor::Invalid:
1402 if (!currState->timing) {
1403 currState->tc = NULL;
1404 currState->req = NULL;
1405 }
1406
1407 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1408 currState->longDesc.lookupLevel,
1409 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1410 if (currState->isFetch)
1411 currState->fault = new PrefetchAbort(
1412 currState->vaddr_tainted,
1413 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1414 isStage2,
1415 ArmFault::LpaeTran);
1416 else
1417 currState->fault = new DataAbort(
1418 currState->vaddr_tainted,
1419 TlbEntry::DomainType::NoAccess,
1420 currState->isWrite,
1421 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1422 isStage2,
1423 ArmFault::LpaeTran);
1424 return;
1425 case LongDescriptor::Block:
1426 case LongDescriptor::Page:
1427 {
1428 bool fault = false;
1429 bool aff = false;
1430 // Check for address size fault
1431 if (checkAddrSizeFaultAArch64(
1432 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1433 currState->longDesc.offsetBits()),
1434 currState->physAddrRange)) {
1435 fault = true;
1436 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1437 currState->longDesc.lookupLevel);
1438 // Check for access fault
1439 } else if (currState->longDesc.af() == 0) {
1440 fault = true;
1441 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1442 currState->longDesc.lookupLevel);
1443 aff = true;
1444 }
1445 if (fault) {
1446 if (currState->isFetch)
1447 currState->fault = new PrefetchAbort(
1448 currState->vaddr_tainted,
1449 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1450 currState->longDesc.lookupLevel,
1451 isStage2,
1452 ArmFault::LpaeTran);
1453 else
1454 currState->fault = new DataAbort(
1455 currState->vaddr_tainted,
1456 TlbEntry::DomainType::NoAccess, currState->isWrite,
1457 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1458 currState->longDesc.lookupLevel,
1459 isStage2,
1460 ArmFault::LpaeTran);
1461 } else {
1462 insertTableEntry(currState->longDesc, true);
1463 }
1464 }
1465 return;
1466 case LongDescriptor::Table:
1467 {
1468 // Set hierarchical permission flags
1469 currState->secureLookup = currState->secureLookup &&
1470 currState->longDesc.secureTable();
1471 currState->rwTable = currState->rwTable &&
1472 currState->longDesc.rwTable();
1473 currState->userTable = currState->userTable &&
1474 currState->longDesc.userTable();
1475 currState->xnTable = currState->xnTable ||
1476 currState->longDesc.xnTable();
1477 currState->pxnTable = currState->pxnTable ||
1478 currState->longDesc.pxnTable();
1479
1480 // Set up next level lookup
1481 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1482 currState->vaddr);
1483
1484 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1485 currState->longDesc.lookupLevel,
1486 currState->longDesc.lookupLevel + 1,
1487 next_desc_addr,
1488 currState->secureLookup ? "s" : "ns");
1489
1490 // Check for address size fault
1491 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1492 next_desc_addr, currState->physAddrRange)) {
1493 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1494 currState->longDesc.lookupLevel);
1495 if (currState->isFetch)
1496 currState->fault = new PrefetchAbort(
1497 currState->vaddr_tainted,
1498 ArmFault::AddressSizeLL
1499 + currState->longDesc.lookupLevel,
1500 isStage2,
1501 ArmFault::LpaeTran);
1502 else
1503 currState->fault = new DataAbort(
1504 currState->vaddr_tainted,
1505 TlbEntry::DomainType::NoAccess, currState->isWrite,
1506 ArmFault::AddressSizeLL
1507 + currState->longDesc.lookupLevel,
1508 isStage2,
1509 ArmFault::LpaeTran);
1510 return;
1511 }
1512
1513 // Trickbox address check
1514 currState->fault = tlb->walkTrickBoxCheck(
1515 next_desc_addr, currState->vaddr,
1516 currState->vaddr, sizeof(uint64_t),
1517 currState->isFetch, currState->isWrite,
1518 TlbEntry::DomainType::Client,
1519 toLookupLevel(currState->longDesc.lookupLevel +1));
1520
1521 if (currState->fault) {
1522 if (!currState->timing) {
1523 currState->tc = NULL;
1524 currState->req = NULL;
1525 }
1526 return;
1527 }
1528
1529 Request::Flags flag = 0;
1530 if (currState->secureLookup)
1531 flag.set(Request::SECURE);
1532
1533 currState->longDesc.lookupLevel =
1534 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1535 Event *event = NULL;
1536 switch (currState->longDesc.lookupLevel) {
1537 case L1:
1538 assert(currState->aarch64);
1539 event = &doL1LongDescEvent;
1540 break;
1541 case L2:
1542 event = &doL2LongDescEvent;
1543 break;
1544 case L3:
1545 event = &doL3LongDescEvent;
1546 break;
1547 default:
1548 panic("Wrong lookup level in table walk\n");
1549 break;
1550 }
1551
1552 bool delayed;
1553 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1554 sizeof(uint64_t), flag, -1, event,
1555 &TableWalker::doLongDescriptor);
1556 if (delayed) {
1557 currState->delayed = true;
1558 }
1559 }
1560 return;
1561 default:
1562 panic("A new type in a 2 bit field?\n");
1563 }
1564}
1565
1566void
1567TableWalker::doL2Descriptor()
1568{
1569 if (currState->fault != NoFault) {
1570 return;
1571 }
1572
1573 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1574 currState->vaddr_tainted, currState->l2Desc.data);
1575 TlbEntry te;
1576
1577 if (currState->l2Desc.invalid()) {
1578 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1579 if (!currState->timing) {
1580 currState->tc = NULL;
1581 currState->req = NULL;
1582 }
1583 if (currState->isFetch)
1584 currState->fault =
1585 new PrefetchAbort(currState->vaddr_tainted,
1586 ArmFault::TranslationLL + L2,
1587 isStage2,
1588 ArmFault::VmsaTran);
1589 else
1590 currState->fault =
1591 new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(),
1592 currState->isWrite, ArmFault::TranslationLL + L2,
1593 isStage2,
1594 ArmFault::VmsaTran);
1595 return;
1596 }
1597
1598 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1599 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1600 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1601 */
1602 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1603 currState->sctlr.afe, currState->l2Desc.ap());
1604
1605 currState->fault =
1606 new DataAbort(currState->vaddr_tainted,
1607 TlbEntry::DomainType::NoAccess, currState->isWrite,
1608 ArmFault::AccessFlagLL + L2, isStage2,
1609 ArmFault::VmsaTran);
1610 }
1611
1612 insertTableEntry(currState->l2Desc, false);
1613}
1614
1615void
1616TableWalker::doL1DescriptorWrapper()
1617{
1618 currState = stateQueues[L1].front();
1619 currState->delayed = false;
1620 // if there's a stage2 translation object we don't need it any more
1621 if (currState->stage2Tran) {
1622 delete currState->stage2Tran;
1623 currState->stage2Tran = NULL;
1624 }
1625
1626
1627 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1628 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1629
1630 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1631 doL1Descriptor();
1632
1633 stateQueues[L1].pop_front();
1634 completeDrain();
1635 // Check if fault was generated
1636 if (currState->fault != NoFault) {
1637 currState->transState->finish(currState->fault, currState->req,
1638 currState->tc, currState->mode);
1639
1640 pending = false;
1641 nextWalk(currState->tc);
1642
1643 currState->req = NULL;
1644 currState->tc = NULL;
1645 currState->delayed = false;
1646 delete currState;
1647 }
1648 else if (!currState->delayed) {
1649 // delay is not set so there is no L2 to do
1650 // Don't finish the translation if a stage 2 look up is underway
1651 if (!currState->doingStage2) {
1652 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1653 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1654 currState->transState, currState->mode);
1655 }
1656
1657 pending = false;
1658 nextWalk(currState->tc);
1659
1660 currState->req = NULL;
1661 currState->tc = NULL;
1662 currState->delayed = false;
1663 delete currState;
1664 } else {
1665 // need to do L2 descriptor
1666 stateQueues[L2].push_back(currState);
1667 }
1668 currState = NULL;
1669}
1670
1671void
1672TableWalker::doL2DescriptorWrapper()
1673{
1674 currState = stateQueues[L2].front();
1675 assert(currState->delayed);
1676 // if there's a stage2 translation object we don't need it any more
1677 if (currState->stage2Tran) {
1678 delete currState->stage2Tran;
1679 currState->stage2Tran = NULL;
1680 }
1681
1682 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1683 currState->vaddr_tainted);
1684 doL2Descriptor();
1685
1686 // Check if fault was generated
1687 if (currState->fault != NoFault) {
1688 currState->transState->finish(currState->fault, currState->req,
1689 currState->tc, currState->mode);
1690 }
1691 else {
1692 // Don't finish the translation if a stage 2 look up is underway
1693 if (!currState->doingStage2) {
1694 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1695 currState->fault = tlb->translateTiming(currState->req,
1696 currState->tc, currState->transState, currState->mode);
1697 }
1698 }
1699
1700
1701 stateQueues[L2].pop_front();
1702 completeDrain();
1703 pending = false;
1704 nextWalk(currState->tc);
1705
1706 currState->req = NULL;
1707 currState->tc = NULL;
1708 currState->delayed = false;
1709
1710 delete currState;
1711 currState = NULL;
1712}
1713
1714void
1715TableWalker::doL0LongDescriptorWrapper()
1716{
1717 doLongDescriptorWrapper(L0);
1718}
1719
1720void
1721TableWalker::doL1LongDescriptorWrapper()
1722{
1723 doLongDescriptorWrapper(L1);
1724}
1725
1726void
1727TableWalker::doL2LongDescriptorWrapper()
1728{
1729 doLongDescriptorWrapper(L2);
1730}
1731
1732void
1733TableWalker::doL3LongDescriptorWrapper()
1734{
1735 doLongDescriptorWrapper(L3);
1736}
1737
1738void
1739TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1740{
1741 currState = stateQueues[curr_lookup_level].front();
1742 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1743 currState->delayed = false;
1744
1745 // if there's a stage2 translation object we don't need it any more
1746 if (currState->stage2Tran) {
1747 delete currState->stage2Tran;
1748 currState->stage2Tran = NULL;
1749 }
1750
1751 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1752 currState->vaddr_tainted);
1753 doLongDescriptor();
1754
1755 stateQueues[curr_lookup_level].pop_front();
1756
1757 if (currState->fault != NoFault) {
1758 // A fault was generated
1759 currState->transState->finish(currState->fault, currState->req,
1760 currState->tc, currState->mode);
1761
1762 pending = false;
1763 nextWalk(currState->tc);
1764
1765 currState->req = NULL;
1766 currState->tc = NULL;
1767 currState->delayed = false;
1768 delete currState;
1769 } else if (!currState->delayed) {
1770 // No additional lookups required
1771 // Don't finish the translation if a stage 2 look up is underway
1772 if (!currState->doingStage2) {
1773 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1774 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1775 currState->transState,
1776 currState->mode);
1777 }
1778
1779 pending = false;
1780 nextWalk(currState->tc);
1781
1782 currState->req = NULL;
1783 currState->tc = NULL;
1784 currState->delayed = false;
1785 delete currState;
1786 } else {
1787 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1788 panic("Max. number of lookups already reached in table walk\n");
1789 // Need to perform additional lookups
1790 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1791 }
1792 currState = NULL;
1793}
1794
1795
1796void
1797TableWalker::nextWalk(ThreadContext *tc)
1798{
1799 if (pendingQueue.size())
1800 schedule(doProcessEvent, clockEdge(Cycles(1)));
1801}
1802
1803bool
1804TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1805 Request::Flags flags, int queueIndex, Event *event,
1806 void (TableWalker::*doDescriptor)())
1807{
1808 bool isTiming = currState->timing;
1809
1810 // do the requests for the page table descriptors have to go through the
1811 // second stage MMU
1812 if (currState->stage2Req) {
1813 Fault fault;
1814 flags = flags | TLB::MustBeOne;
1815
1816 if (isTiming) {
1817 Stage2MMU::Stage2Translation *tran = new
1818 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1819 currState->vaddr);
1820 currState->stage2Tran = tran;
1821 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1822 flags, masterId);
1823 fault = tran->fault;
1824 } else {
1825 fault = stage2Mmu->readDataUntimed(currState->tc,
1826 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1827 currState->functional);
1828 }
1829
1830 if (fault != NoFault) {
1831 currState->fault = fault;
1832 }
1833 if (isTiming) {
1834 if (queueIndex >= 0) {
1835 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1836 stateQueues[queueIndex].size());
1837 stateQueues[queueIndex].push_back(currState);
1838 currState = NULL;
1839 }
1840 } else {
1841 (this->*doDescriptor)();
1842 }
1843 } else {
1844 if (isTiming) {
1845 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1846 currState->tc->getCpuPtr()->clockPeriod(), flags);
1847 if (queueIndex >= 0) {
1848 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1849 stateQueues[queueIndex].size());
1850 stateQueues[queueIndex].push_back(currState);
1851 currState = NULL;
1852 }
1853 } else if (!currState->functional) {
1854 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1855 currState->tc->getCpuPtr()->clockPeriod(), flags);
1856 (this->*doDescriptor)();
1857 } else {
1858 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1859 req->taskId(ContextSwitchTaskId::DMA);
1860 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1861 pkt->dataStatic(data);
1862 port.sendFunctional(pkt);
1863 (this->*doDescriptor)();
1864 delete req;
1865 delete pkt;
1866 }
1867 }
1868 return (isTiming);
1869}
1870
1871void
1872TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1873{
1874 TlbEntry te;
1875
1876 // Create and fill a new page table entry
1877 te.valid = true;
1878 te.longDescFormat = longDescriptor;
1879 te.isHyp = currState->isHyp;
1880 te.asid = currState->asid;
1881 te.vmid = currState->vmid;
1882 te.N = descriptor.offsetBits();
1883 te.vpn = currState->vaddr >> te.N;
1884 te.size = (1<<te.N) - 1;
1885 te.pfn = descriptor.pfn();
1886 te.domain = descriptor.domain();
1887 te.lookupLevel = descriptor.lookupLevel;
1888 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1889 te.nstid = !currState->isSecure;
1890 te.xn = descriptor.xn();
1891 if (currState->aarch64)
1892 te.el = currState->el;
1893 else
1894 te.el = 1;
1895
1896 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1897 // as global
1898 te.global = descriptor.global(currState) || isStage2;
1899 if (longDescriptor) {
1900 LongDescriptor lDescriptor =
1901 dynamic_cast<LongDescriptor &>(descriptor);
1902
1903 te.xn |= currState->xnTable;
1904 te.pxn = currState->pxnTable || lDescriptor.pxn();
1905 if (isStage2) {
1906 // this is actually the HAP field, but its stored in the same bit
1907 // possitions as the AP field in a stage 1 translation.
1908 te.hap = lDescriptor.ap();
1909 } else {
1910 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1911 (currState->userTable && (descriptor.ap() & 0x1));
1912 }
1913 if (currState->aarch64)
1914 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1915 currState->longDesc.sh());
1916 else
1917 memAttrsLPAE(currState->tc, te, lDescriptor);
1918 } else {
1919 te.ap = descriptor.ap();
1920 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1921 descriptor.shareable());
1922 }
1923
1924 // Debug output
1925 DPRINTF(TLB, descriptor.dbgHeader().c_str());
1926 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1927 te.N, te.pfn, te.size, te.global, te.valid);
1928 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1929 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1930 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1931 te.nonCacheable, te.ns);
1932 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1933 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1934 descriptor.getRawData());
1935
1936 // Insert the entry into the TLB
1937 tlb->insert(currState->vaddr, te);
1938 if (!currState->timing) {
1939 currState->tc = NULL;
1940 currState->req = NULL;
1941 }
1942}
1943
1944ArmISA::TableWalker *
1945ArmTableWalkerParams::create()
1946{
1947 return new ArmISA::TableWalker(this);
1948}
1949
1950LookupLevel
1951TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
1952{
1953 switch (lookup_level_as_int) {
1954 case L1:
1955 return L1;
1956 case L2:
1957 return L2;
1958 case L3:
1959 return L3;
1960 default:
1961 panic("Invalid lookup level conversion");
1962 }
1963}
869
870 if (currState->timing) {
871 Event *event;
872 switch (start_lookup_level) {
873 case L0:
874 event = (Event *) &doL0LongDescEvent;
875 break;
876 case L1:
877 event = (Event *) &doL1LongDescEvent;
878 break;
879 case L2:
880 event = (Event *) &doL2LongDescEvent;
881 break;
882 case L3:
883 event = (Event *) &doL3LongDescEvent;
884 break;
885 default:
886 panic("Invalid table lookup level");
887 break;
888 }
889 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
890 (uint8_t*) &currState->longDesc.data,
891 currState->tc->getCpuPtr()->clockPeriod(), flag);
892 DPRINTF(TLBVerbose,
893 "Adding to walker fifo: queue size before adding: %d\n",
894 stateQueues[start_lookup_level].size());
895 stateQueues[start_lookup_level].push_back(currState);
896 currState = NULL;
897 } else if (!currState->functional) {
898 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
899 NULL, (uint8_t*) &currState->longDesc.data,
900 currState->tc->getCpuPtr()->clockPeriod(), flag);
901 doLongDescriptor();
902 f = currState->fault;
903 } else {
904 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
905 masterId);
906 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
907 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
908 port.sendFunctional(pkt);
909 doLongDescriptor();
910 delete req;
911 delete pkt;
912 f = currState->fault;
913 }
914
915 return f;
916}
917
918void
919TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
920 uint8_t texcb, bool s)
921{
922 // Note: tc and sctlr local variables are hiding tc and sctrl class
923 // variables
924 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
925 te.shareable = false; // default value
926 te.nonCacheable = false;
927 te.outerShareable = false;
928 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
929 switch(texcb) {
930 case 0: // Stongly-ordered
931 te.nonCacheable = true;
932 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
933 te.shareable = true;
934 te.innerAttrs = 1;
935 te.outerAttrs = 0;
936 break;
937 case 1: // Shareable Device
938 te.nonCacheable = true;
939 te.mtype = TlbEntry::MemoryType::Device;
940 te.shareable = true;
941 te.innerAttrs = 3;
942 te.outerAttrs = 0;
943 break;
944 case 2: // Outer and Inner Write-Through, no Write-Allocate
945 te.mtype = TlbEntry::MemoryType::Normal;
946 te.shareable = s;
947 te.innerAttrs = 6;
948 te.outerAttrs = bits(texcb, 1, 0);
949 break;
950 case 3: // Outer and Inner Write-Back, no Write-Allocate
951 te.mtype = TlbEntry::MemoryType::Normal;
952 te.shareable = s;
953 te.innerAttrs = 7;
954 te.outerAttrs = bits(texcb, 1, 0);
955 break;
956 case 4: // Outer and Inner Non-cacheable
957 te.nonCacheable = true;
958 te.mtype = TlbEntry::MemoryType::Normal;
959 te.shareable = s;
960 te.innerAttrs = 0;
961 te.outerAttrs = bits(texcb, 1, 0);
962 break;
963 case 5: // Reserved
964 panic("Reserved texcb value!\n");
965 break;
966 case 6: // Implementation Defined
967 panic("Implementation-defined texcb value!\n");
968 break;
969 case 7: // Outer and Inner Write-Back, Write-Allocate
970 te.mtype = TlbEntry::MemoryType::Normal;
971 te.shareable = s;
972 te.innerAttrs = 5;
973 te.outerAttrs = 1;
974 break;
975 case 8: // Non-shareable Device
976 te.nonCacheable = true;
977 te.mtype = TlbEntry::MemoryType::Device;
978 te.shareable = false;
979 te.innerAttrs = 3;
980 te.outerAttrs = 0;
981 break;
982 case 9 ... 15: // Reserved
983 panic("Reserved texcb value!\n");
984 break;
985 case 16 ... 31: // Cacheable Memory
986 te.mtype = TlbEntry::MemoryType::Normal;
987 te.shareable = s;
988 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
989 te.nonCacheable = true;
990 te.innerAttrs = bits(texcb, 1, 0);
991 te.outerAttrs = bits(texcb, 3, 2);
992 break;
993 default:
994 panic("More than 32 states for 5 bits?\n");
995 }
996 } else {
997 assert(tc);
998 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
999 currState->tc, !currState->isSecure));
1000 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
1001 currState->tc, !currState->isSecure));
1002 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1003 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1004 switch(bits(texcb, 2,0)) {
1005 case 0:
1006 curr_tr = prrr.tr0;
1007 curr_ir = nmrr.ir0;
1008 curr_or = nmrr.or0;
1009 te.outerShareable = (prrr.nos0 == 0);
1010 break;
1011 case 1:
1012 curr_tr = prrr.tr1;
1013 curr_ir = nmrr.ir1;
1014 curr_or = nmrr.or1;
1015 te.outerShareable = (prrr.nos1 == 0);
1016 break;
1017 case 2:
1018 curr_tr = prrr.tr2;
1019 curr_ir = nmrr.ir2;
1020 curr_or = nmrr.or2;
1021 te.outerShareable = (prrr.nos2 == 0);
1022 break;
1023 case 3:
1024 curr_tr = prrr.tr3;
1025 curr_ir = nmrr.ir3;
1026 curr_or = nmrr.or3;
1027 te.outerShareable = (prrr.nos3 == 0);
1028 break;
1029 case 4:
1030 curr_tr = prrr.tr4;
1031 curr_ir = nmrr.ir4;
1032 curr_or = nmrr.or4;
1033 te.outerShareable = (prrr.nos4 == 0);
1034 break;
1035 case 5:
1036 curr_tr = prrr.tr5;
1037 curr_ir = nmrr.ir5;
1038 curr_or = nmrr.or5;
1039 te.outerShareable = (prrr.nos5 == 0);
1040 break;
1041 case 6:
1042 panic("Imp defined type\n");
1043 case 7:
1044 curr_tr = prrr.tr7;
1045 curr_ir = nmrr.ir7;
1046 curr_or = nmrr.or7;
1047 te.outerShareable = (prrr.nos7 == 0);
1048 break;
1049 }
1050
1051 switch(curr_tr) {
1052 case 0:
1053 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1054 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1055 te.nonCacheable = true;
1056 te.innerAttrs = 1;
1057 te.outerAttrs = 0;
1058 te.shareable = true;
1059 break;
1060 case 1:
1061 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1062 prrr.ds1, prrr.ds0, s);
1063 te.mtype = TlbEntry::MemoryType::Device;
1064 te.nonCacheable = true;
1065 te.innerAttrs = 3;
1066 te.outerAttrs = 0;
1067 if (prrr.ds1 && s)
1068 te.shareable = true;
1069 if (prrr.ds0 && !s)
1070 te.shareable = true;
1071 break;
1072 case 2:
1073 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1074 prrr.ns1, prrr.ns0, s);
1075 te.mtype = TlbEntry::MemoryType::Normal;
1076 if (prrr.ns1 && s)
1077 te.shareable = true;
1078 if (prrr.ns0 && !s)
1079 te.shareable = true;
1080 break;
1081 case 3:
1082 panic("Reserved type");
1083 }
1084
1085 if (te.mtype == TlbEntry::MemoryType::Normal){
1086 switch(curr_ir) {
1087 case 0:
1088 te.nonCacheable = true;
1089 te.innerAttrs = 0;
1090 break;
1091 case 1:
1092 te.innerAttrs = 5;
1093 break;
1094 case 2:
1095 te.innerAttrs = 6;
1096 break;
1097 case 3:
1098 te.innerAttrs = 7;
1099 break;
1100 }
1101
1102 switch(curr_or) {
1103 case 0:
1104 te.nonCacheable = true;
1105 te.outerAttrs = 0;
1106 break;
1107 case 1:
1108 te.outerAttrs = 1;
1109 break;
1110 case 2:
1111 te.outerAttrs = 2;
1112 break;
1113 case 3:
1114 te.outerAttrs = 3;
1115 break;
1116 }
1117 }
1118 }
1119 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, \
1120 outerAttrs: %d\n",
1121 te.shareable, te.innerAttrs, te.outerAttrs);
1122 te.setAttributes(false);
1123}
1124
1125void
1126TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1127 LongDescriptor &lDescriptor)
1128{
1129 assert(_haveLPAE);
1130
1131 uint8_t attr;
1132 uint8_t sh = lDescriptor.sh();
1133 // Different format and source of attributes if this is a stage 2
1134 // translation
1135 if (isStage2) {
1136 attr = lDescriptor.memAttr();
1137 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1138 uint8_t attr_1_0 = attr & 0x3;
1139
1140 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1141
1142 if (attr_3_2 == 0) {
1143 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1144 : TlbEntry::MemoryType::Device;
1145 te.outerAttrs = 0;
1146 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1147 te.nonCacheable = true;
1148 } else {
1149 te.mtype = TlbEntry::MemoryType::Normal;
1150 te.outerAttrs = attr_3_2 == 1 ? 0 :
1151 attr_3_2 == 2 ? 2 : 1;
1152 te.innerAttrs = attr_1_0 == 1 ? 0 :
1153 attr_1_0 == 2 ? 6 : 5;
1154 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1155 }
1156 } else {
1157 uint8_t attrIndx = lDescriptor.attrIndx();
1158
1159 // LPAE always uses remapping of memory attributes, irrespective of the
1160 // value of SCTLR.TRE
1161 int reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1162 reg = flattenMiscRegNsBanked(reg, currState->tc, !currState->isSecure);
1163 uint32_t mair = currState->tc->readMiscReg(reg);
1164 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1165 uint8_t attr_7_4 = bits(attr, 7, 4);
1166 uint8_t attr_3_0 = bits(attr, 3, 0);
1167 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1168
1169 // Note: the memory subsystem only cares about the 'cacheable' memory
1170 // attribute. The other attributes are only used to fill the PAR register
1171 // accordingly to provide the illusion of full support
1172 te.nonCacheable = false;
1173
1174 switch (attr_7_4) {
1175 case 0x0:
1176 // Strongly-ordered or Device memory
1177 if (attr_3_0 == 0x0)
1178 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1179 else if (attr_3_0 == 0x4)
1180 te.mtype = TlbEntry::MemoryType::Device;
1181 else
1182 panic("Unpredictable behavior\n");
1183 te.nonCacheable = true;
1184 te.outerAttrs = 0;
1185 break;
1186 case 0x4:
1187 // Normal memory, Outer Non-cacheable
1188 te.mtype = TlbEntry::MemoryType::Normal;
1189 te.outerAttrs = 0;
1190 if (attr_3_0 == 0x4)
1191 // Inner Non-cacheable
1192 te.nonCacheable = true;
1193 else if (attr_3_0 < 0x8)
1194 panic("Unpredictable behavior\n");
1195 break;
1196 case 0x8:
1197 case 0x9:
1198 case 0xa:
1199 case 0xb:
1200 case 0xc:
1201 case 0xd:
1202 case 0xe:
1203 case 0xf:
1204 if (attr_7_4 & 0x4) {
1205 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1206 } else {
1207 te.outerAttrs = 0x2;
1208 }
1209 // Normal memory, Outer Cacheable
1210 te.mtype = TlbEntry::MemoryType::Normal;
1211 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1212 panic("Unpredictable behavior\n");
1213 break;
1214 default:
1215 panic("Unpredictable behavior\n");
1216 break;
1217 }
1218
1219 switch (attr_3_0) {
1220 case 0x0:
1221 te.innerAttrs = 0x1;
1222 break;
1223 case 0x4:
1224 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1225 break;
1226 case 0x8:
1227 case 0x9:
1228 case 0xA:
1229 case 0xB:
1230 te.innerAttrs = 6;
1231 break;
1232 case 0xC:
1233 case 0xD:
1234 case 0xE:
1235 case 0xF:
1236 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1237 break;
1238 default:
1239 panic("Unpredictable behavior\n");
1240 break;
1241 }
1242 }
1243
1244 te.outerShareable = sh == 2;
1245 te.shareable = (sh & 0x2) ? true : false;
1246 te.setAttributes(true);
1247 te.attributes |= (uint64_t) attr << 56;
1248}
1249
1250void
1251TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1252 uint8_t sh)
1253{
1254 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1255
1256 // Select MAIR
1257 uint64_t mair;
1258 switch (currState->el) {
1259 case EL0:
1260 case EL1:
1261 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1262 break;
1263 case EL2:
1264 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1265 break;
1266 case EL3:
1267 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1268 break;
1269 default:
1270 panic("Invalid exception level");
1271 break;
1272 }
1273
1274 // Select attributes
1275 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1276 uint8_t attr_lo = bits(attr, 3, 0);
1277 uint8_t attr_hi = bits(attr, 7, 4);
1278
1279 // Memory type
1280 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1281
1282 // Cacheability
1283 te.nonCacheable = false;
1284 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1285 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1286 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1287 te.nonCacheable = true;
1288 }
1289
1290 te.shareable = sh == 2;
1291 te.outerShareable = (sh & 0x2) ? true : false;
1292 // Attributes formatted according to the 64-bit PAR
1293 te.attributes = ((uint64_t) attr << 56) |
1294 (1 << 11) | // LPAE bit
1295 (te.ns << 9) | // NS bit
1296 (sh << 7);
1297}
1298
1299void
1300TableWalker::doL1Descriptor()
1301{
1302 if (currState->fault != NoFault) {
1303 return;
1304 }
1305
1306 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1307 currState->vaddr_tainted, currState->l1Desc.data);
1308 TlbEntry te;
1309
1310 switch (currState->l1Desc.type()) {
1311 case L1Descriptor::Ignore:
1312 case L1Descriptor::Reserved:
1313 if (!currState->timing) {
1314 currState->tc = NULL;
1315 currState->req = NULL;
1316 }
1317 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1318 if (currState->isFetch)
1319 currState->fault =
1320 new PrefetchAbort(currState->vaddr_tainted,
1321 ArmFault::TranslationLL + L1,
1322 isStage2,
1323 ArmFault::VmsaTran);
1324 else
1325 currState->fault =
1326 new DataAbort(currState->vaddr_tainted,
1327 TlbEntry::DomainType::NoAccess,
1328 currState->isWrite,
1329 ArmFault::TranslationLL + L1, isStage2,
1330 ArmFault::VmsaTran);
1331 return;
1332 case L1Descriptor::Section:
1333 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1334 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1335 * enabled if set, do l1.Desc.setAp0() instead of generating
1336 * AccessFlag0
1337 */
1338
1339 currState->fault = new DataAbort(currState->vaddr_tainted,
1340 currState->l1Desc.domain(),
1341 currState->isWrite,
1342 ArmFault::AccessFlagLL + L1,
1343 isStage2,
1344 ArmFault::VmsaTran);
1345 }
1346 if (currState->l1Desc.supersection()) {
1347 panic("Haven't implemented supersections\n");
1348 }
1349 insertTableEntry(currState->l1Desc, false);
1350 return;
1351 case L1Descriptor::PageTable:
1352 {
1353 Addr l2desc_addr;
1354 l2desc_addr = currState->l1Desc.l2Addr() |
1355 (bits(currState->vaddr, 19, 12) << 2);
1356 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1357 l2desc_addr, currState->isSecure ? "s" : "ns");
1358
1359 // Trickbox address check
1360 currState->fault = tlb->walkTrickBoxCheck(
1361 l2desc_addr, currState->isSecure, currState->vaddr,
1362 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1363 currState->l1Desc.domain(), L2);
1364
1365 if (currState->fault) {
1366 if (!currState->timing) {
1367 currState->tc = NULL;
1368 currState->req = NULL;
1369 }
1370 return;
1371 }
1372
1373 Request::Flags flag = 0;
1374 if (currState->isSecure)
1375 flag.set(Request::SECURE);
1376
1377 bool delayed;
1378 delayed = fetchDescriptor(l2desc_addr,
1379 (uint8_t*)&currState->l2Desc.data,
1380 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1381 &TableWalker::doL2Descriptor);
1382 if (delayed) {
1383 currState->delayed = true;
1384 }
1385
1386 return;
1387 }
1388 default:
1389 panic("A new type in a 2 bit field?\n");
1390 }
1391}
1392
1393void
1394TableWalker::doLongDescriptor()
1395{
1396 if (currState->fault != NoFault) {
1397 return;
1398 }
1399
1400 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1401 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1402 currState->longDesc.data,
1403 currState->aarch64 ? "AArch64" : "long-desc.");
1404
1405 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1406 (currState->longDesc.type() == LongDescriptor::Page)) {
1407 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1408 "xn: %d, ap: %d, af: %d, type: %d\n",
1409 currState->longDesc.lookupLevel,
1410 currState->longDesc.data,
1411 currState->longDesc.pxn(),
1412 currState->longDesc.xn(),
1413 currState->longDesc.ap(),
1414 currState->longDesc.af(),
1415 currState->longDesc.type());
1416 } else {
1417 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1418 currState->longDesc.lookupLevel,
1419 currState->longDesc.data,
1420 currState->longDesc.type());
1421 }
1422
1423 TlbEntry te;
1424
1425 switch (currState->longDesc.type()) {
1426 case LongDescriptor::Invalid:
1427 if (!currState->timing) {
1428 currState->tc = NULL;
1429 currState->req = NULL;
1430 }
1431
1432 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1433 currState->longDesc.lookupLevel,
1434 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1435 if (currState->isFetch)
1436 currState->fault = new PrefetchAbort(
1437 currState->vaddr_tainted,
1438 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1439 isStage2,
1440 ArmFault::LpaeTran);
1441 else
1442 currState->fault = new DataAbort(
1443 currState->vaddr_tainted,
1444 TlbEntry::DomainType::NoAccess,
1445 currState->isWrite,
1446 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1447 isStage2,
1448 ArmFault::LpaeTran);
1449 return;
1450 case LongDescriptor::Block:
1451 case LongDescriptor::Page:
1452 {
1453 bool fault = false;
1454 bool aff = false;
1455 // Check for address size fault
1456 if (checkAddrSizeFaultAArch64(
1457 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1458 currState->longDesc.offsetBits()),
1459 currState->physAddrRange)) {
1460 fault = true;
1461 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1462 currState->longDesc.lookupLevel);
1463 // Check for access fault
1464 } else if (currState->longDesc.af() == 0) {
1465 fault = true;
1466 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1467 currState->longDesc.lookupLevel);
1468 aff = true;
1469 }
1470 if (fault) {
1471 if (currState->isFetch)
1472 currState->fault = new PrefetchAbort(
1473 currState->vaddr_tainted,
1474 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1475 currState->longDesc.lookupLevel,
1476 isStage2,
1477 ArmFault::LpaeTran);
1478 else
1479 currState->fault = new DataAbort(
1480 currState->vaddr_tainted,
1481 TlbEntry::DomainType::NoAccess, currState->isWrite,
1482 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1483 currState->longDesc.lookupLevel,
1484 isStage2,
1485 ArmFault::LpaeTran);
1486 } else {
1487 insertTableEntry(currState->longDesc, true);
1488 }
1489 }
1490 return;
1491 case LongDescriptor::Table:
1492 {
1493 // Set hierarchical permission flags
1494 currState->secureLookup = currState->secureLookup &&
1495 currState->longDesc.secureTable();
1496 currState->rwTable = currState->rwTable &&
1497 currState->longDesc.rwTable();
1498 currState->userTable = currState->userTable &&
1499 currState->longDesc.userTable();
1500 currState->xnTable = currState->xnTable ||
1501 currState->longDesc.xnTable();
1502 currState->pxnTable = currState->pxnTable ||
1503 currState->longDesc.pxnTable();
1504
1505 // Set up next level lookup
1506 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1507 currState->vaddr);
1508
1509 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1510 currState->longDesc.lookupLevel,
1511 currState->longDesc.lookupLevel + 1,
1512 next_desc_addr,
1513 currState->secureLookup ? "s" : "ns");
1514
1515 // Check for address size fault
1516 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1517 next_desc_addr, currState->physAddrRange)) {
1518 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1519 currState->longDesc.lookupLevel);
1520 if (currState->isFetch)
1521 currState->fault = new PrefetchAbort(
1522 currState->vaddr_tainted,
1523 ArmFault::AddressSizeLL
1524 + currState->longDesc.lookupLevel,
1525 isStage2,
1526 ArmFault::LpaeTran);
1527 else
1528 currState->fault = new DataAbort(
1529 currState->vaddr_tainted,
1530 TlbEntry::DomainType::NoAccess, currState->isWrite,
1531 ArmFault::AddressSizeLL
1532 + currState->longDesc.lookupLevel,
1533 isStage2,
1534 ArmFault::LpaeTran);
1535 return;
1536 }
1537
1538 // Trickbox address check
1539 currState->fault = tlb->walkTrickBoxCheck(
1540 next_desc_addr, currState->vaddr,
1541 currState->vaddr, sizeof(uint64_t),
1542 currState->isFetch, currState->isWrite,
1543 TlbEntry::DomainType::Client,
1544 toLookupLevel(currState->longDesc.lookupLevel +1));
1545
1546 if (currState->fault) {
1547 if (!currState->timing) {
1548 currState->tc = NULL;
1549 currState->req = NULL;
1550 }
1551 return;
1552 }
1553
1554 Request::Flags flag = 0;
1555 if (currState->secureLookup)
1556 flag.set(Request::SECURE);
1557
1558 currState->longDesc.lookupLevel =
1559 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1560 Event *event = NULL;
1561 switch (currState->longDesc.lookupLevel) {
1562 case L1:
1563 assert(currState->aarch64);
1564 event = &doL1LongDescEvent;
1565 break;
1566 case L2:
1567 event = &doL2LongDescEvent;
1568 break;
1569 case L3:
1570 event = &doL3LongDescEvent;
1571 break;
1572 default:
1573 panic("Wrong lookup level in table walk\n");
1574 break;
1575 }
1576
1577 bool delayed;
1578 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1579 sizeof(uint64_t), flag, -1, event,
1580 &TableWalker::doLongDescriptor);
1581 if (delayed) {
1582 currState->delayed = true;
1583 }
1584 }
1585 return;
1586 default:
1587 panic("A new type in a 2 bit field?\n");
1588 }
1589}
1590
1591void
1592TableWalker::doL2Descriptor()
1593{
1594 if (currState->fault != NoFault) {
1595 return;
1596 }
1597
1598 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1599 currState->vaddr_tainted, currState->l2Desc.data);
1600 TlbEntry te;
1601
1602 if (currState->l2Desc.invalid()) {
1603 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1604 if (!currState->timing) {
1605 currState->tc = NULL;
1606 currState->req = NULL;
1607 }
1608 if (currState->isFetch)
1609 currState->fault =
1610 new PrefetchAbort(currState->vaddr_tainted,
1611 ArmFault::TranslationLL + L2,
1612 isStage2,
1613 ArmFault::VmsaTran);
1614 else
1615 currState->fault =
1616 new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(),
1617 currState->isWrite, ArmFault::TranslationLL + L2,
1618 isStage2,
1619 ArmFault::VmsaTran);
1620 return;
1621 }
1622
1623 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1624 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1625 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1626 */
1627 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1628 currState->sctlr.afe, currState->l2Desc.ap());
1629
1630 currState->fault =
1631 new DataAbort(currState->vaddr_tainted,
1632 TlbEntry::DomainType::NoAccess, currState->isWrite,
1633 ArmFault::AccessFlagLL + L2, isStage2,
1634 ArmFault::VmsaTran);
1635 }
1636
1637 insertTableEntry(currState->l2Desc, false);
1638}
1639
1640void
1641TableWalker::doL1DescriptorWrapper()
1642{
1643 currState = stateQueues[L1].front();
1644 currState->delayed = false;
1645 // if there's a stage2 translation object we don't need it any more
1646 if (currState->stage2Tran) {
1647 delete currState->stage2Tran;
1648 currState->stage2Tran = NULL;
1649 }
1650
1651
1652 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1653 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1654
1655 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1656 doL1Descriptor();
1657
1658 stateQueues[L1].pop_front();
1659 completeDrain();
1660 // Check if fault was generated
1661 if (currState->fault != NoFault) {
1662 currState->transState->finish(currState->fault, currState->req,
1663 currState->tc, currState->mode);
1664
1665 pending = false;
1666 nextWalk(currState->tc);
1667
1668 currState->req = NULL;
1669 currState->tc = NULL;
1670 currState->delayed = false;
1671 delete currState;
1672 }
1673 else if (!currState->delayed) {
1674 // delay is not set so there is no L2 to do
1675 // Don't finish the translation if a stage 2 look up is underway
1676 if (!currState->doingStage2) {
1677 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1678 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1679 currState->transState, currState->mode);
1680 }
1681
1682 pending = false;
1683 nextWalk(currState->tc);
1684
1685 currState->req = NULL;
1686 currState->tc = NULL;
1687 currState->delayed = false;
1688 delete currState;
1689 } else {
1690 // need to do L2 descriptor
1691 stateQueues[L2].push_back(currState);
1692 }
1693 currState = NULL;
1694}
1695
1696void
1697TableWalker::doL2DescriptorWrapper()
1698{
1699 currState = stateQueues[L2].front();
1700 assert(currState->delayed);
1701 // if there's a stage2 translation object we don't need it any more
1702 if (currState->stage2Tran) {
1703 delete currState->stage2Tran;
1704 currState->stage2Tran = NULL;
1705 }
1706
1707 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1708 currState->vaddr_tainted);
1709 doL2Descriptor();
1710
1711 // Check if fault was generated
1712 if (currState->fault != NoFault) {
1713 currState->transState->finish(currState->fault, currState->req,
1714 currState->tc, currState->mode);
1715 }
1716 else {
1717 // Don't finish the translation if a stage 2 look up is underway
1718 if (!currState->doingStage2) {
1719 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1720 currState->fault = tlb->translateTiming(currState->req,
1721 currState->tc, currState->transState, currState->mode);
1722 }
1723 }
1724
1725
1726 stateQueues[L2].pop_front();
1727 completeDrain();
1728 pending = false;
1729 nextWalk(currState->tc);
1730
1731 currState->req = NULL;
1732 currState->tc = NULL;
1733 currState->delayed = false;
1734
1735 delete currState;
1736 currState = NULL;
1737}
1738
1739void
1740TableWalker::doL0LongDescriptorWrapper()
1741{
1742 doLongDescriptorWrapper(L0);
1743}
1744
1745void
1746TableWalker::doL1LongDescriptorWrapper()
1747{
1748 doLongDescriptorWrapper(L1);
1749}
1750
1751void
1752TableWalker::doL2LongDescriptorWrapper()
1753{
1754 doLongDescriptorWrapper(L2);
1755}
1756
1757void
1758TableWalker::doL3LongDescriptorWrapper()
1759{
1760 doLongDescriptorWrapper(L3);
1761}
1762
1763void
1764TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1765{
1766 currState = stateQueues[curr_lookup_level].front();
1767 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1768 currState->delayed = false;
1769
1770 // if there's a stage2 translation object we don't need it any more
1771 if (currState->stage2Tran) {
1772 delete currState->stage2Tran;
1773 currState->stage2Tran = NULL;
1774 }
1775
1776 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1777 currState->vaddr_tainted);
1778 doLongDescriptor();
1779
1780 stateQueues[curr_lookup_level].pop_front();
1781
1782 if (currState->fault != NoFault) {
1783 // A fault was generated
1784 currState->transState->finish(currState->fault, currState->req,
1785 currState->tc, currState->mode);
1786
1787 pending = false;
1788 nextWalk(currState->tc);
1789
1790 currState->req = NULL;
1791 currState->tc = NULL;
1792 currState->delayed = false;
1793 delete currState;
1794 } else if (!currState->delayed) {
1795 // No additional lookups required
1796 // Don't finish the translation if a stage 2 look up is underway
1797 if (!currState->doingStage2) {
1798 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1799 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1800 currState->transState,
1801 currState->mode);
1802 }
1803
1804 pending = false;
1805 nextWalk(currState->tc);
1806
1807 currState->req = NULL;
1808 currState->tc = NULL;
1809 currState->delayed = false;
1810 delete currState;
1811 } else {
1812 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1813 panic("Max. number of lookups already reached in table walk\n");
1814 // Need to perform additional lookups
1815 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1816 }
1817 currState = NULL;
1818}
1819
1820
1821void
1822TableWalker::nextWalk(ThreadContext *tc)
1823{
1824 if (pendingQueue.size())
1825 schedule(doProcessEvent, clockEdge(Cycles(1)));
1826}
1827
1828bool
1829TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1830 Request::Flags flags, int queueIndex, Event *event,
1831 void (TableWalker::*doDescriptor)())
1832{
1833 bool isTiming = currState->timing;
1834
1835 // do the requests for the page table descriptors have to go through the
1836 // second stage MMU
1837 if (currState->stage2Req) {
1838 Fault fault;
1839 flags = flags | TLB::MustBeOne;
1840
1841 if (isTiming) {
1842 Stage2MMU::Stage2Translation *tran = new
1843 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1844 currState->vaddr);
1845 currState->stage2Tran = tran;
1846 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1847 flags, masterId);
1848 fault = tran->fault;
1849 } else {
1850 fault = stage2Mmu->readDataUntimed(currState->tc,
1851 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1852 currState->functional);
1853 }
1854
1855 if (fault != NoFault) {
1856 currState->fault = fault;
1857 }
1858 if (isTiming) {
1859 if (queueIndex >= 0) {
1860 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1861 stateQueues[queueIndex].size());
1862 stateQueues[queueIndex].push_back(currState);
1863 currState = NULL;
1864 }
1865 } else {
1866 (this->*doDescriptor)();
1867 }
1868 } else {
1869 if (isTiming) {
1870 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1871 currState->tc->getCpuPtr()->clockPeriod(), flags);
1872 if (queueIndex >= 0) {
1873 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1874 stateQueues[queueIndex].size());
1875 stateQueues[queueIndex].push_back(currState);
1876 currState = NULL;
1877 }
1878 } else if (!currState->functional) {
1879 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1880 currState->tc->getCpuPtr()->clockPeriod(), flags);
1881 (this->*doDescriptor)();
1882 } else {
1883 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1884 req->taskId(ContextSwitchTaskId::DMA);
1885 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1886 pkt->dataStatic(data);
1887 port.sendFunctional(pkt);
1888 (this->*doDescriptor)();
1889 delete req;
1890 delete pkt;
1891 }
1892 }
1893 return (isTiming);
1894}
1895
1896void
1897TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1898{
1899 TlbEntry te;
1900
1901 // Create and fill a new page table entry
1902 te.valid = true;
1903 te.longDescFormat = longDescriptor;
1904 te.isHyp = currState->isHyp;
1905 te.asid = currState->asid;
1906 te.vmid = currState->vmid;
1907 te.N = descriptor.offsetBits();
1908 te.vpn = currState->vaddr >> te.N;
1909 te.size = (1<<te.N) - 1;
1910 te.pfn = descriptor.pfn();
1911 te.domain = descriptor.domain();
1912 te.lookupLevel = descriptor.lookupLevel;
1913 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1914 te.nstid = !currState->isSecure;
1915 te.xn = descriptor.xn();
1916 if (currState->aarch64)
1917 te.el = currState->el;
1918 else
1919 te.el = 1;
1920
1921 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1922 // as global
1923 te.global = descriptor.global(currState) || isStage2;
1924 if (longDescriptor) {
1925 LongDescriptor lDescriptor =
1926 dynamic_cast<LongDescriptor &>(descriptor);
1927
1928 te.xn |= currState->xnTable;
1929 te.pxn = currState->pxnTable || lDescriptor.pxn();
1930 if (isStage2) {
1931 // this is actually the HAP field, but its stored in the same bit
1932 // possitions as the AP field in a stage 1 translation.
1933 te.hap = lDescriptor.ap();
1934 } else {
1935 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1936 (currState->userTable && (descriptor.ap() & 0x1));
1937 }
1938 if (currState->aarch64)
1939 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1940 currState->longDesc.sh());
1941 else
1942 memAttrsLPAE(currState->tc, te, lDescriptor);
1943 } else {
1944 te.ap = descriptor.ap();
1945 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1946 descriptor.shareable());
1947 }
1948
1949 // Debug output
1950 DPRINTF(TLB, descriptor.dbgHeader().c_str());
1951 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1952 te.N, te.pfn, te.size, te.global, te.valid);
1953 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1954 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1955 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1956 te.nonCacheable, te.ns);
1957 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1958 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1959 descriptor.getRawData());
1960
1961 // Insert the entry into the TLB
1962 tlb->insert(currState->vaddr, te);
1963 if (!currState->timing) {
1964 currState->tc = NULL;
1965 currState->req = NULL;
1966 }
1967}
1968
1969ArmISA::TableWalker *
1970ArmTableWalkerParams::create()
1971{
1972 return new ArmISA::TableWalker(this);
1973}
1974
1975LookupLevel
1976TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
1977{
1978 switch (lookup_level_as_int) {
1979 case L1:
1980 return L1;
1981 case L2:
1982 return L2;
1983 case L3:
1984 return L3;
1985 default:
1986 panic("Invalid lookup level conversion");
1987 }
1988}