table_walker.cc (10037:5cac77888310) table_walker.cc (10109:b58c5c5854de)
1/*
2 * Copyright (c) 2010, 2012-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include "arch/arm/faults.hh"
42#include "arch/arm/stage2_mmu.hh"
43#include "arch/arm/system.hh"
44#include "arch/arm/table_walker.hh"
45#include "arch/arm/tlb.hh"
46#include "cpu/base.hh"
47#include "cpu/thread_context.hh"
48#include "debug/Checkpoint.hh"
49#include "debug/Drain.hh"
50#include "debug/TLB.hh"
51#include "debug/TLBVerbose.hh"
52#include "sim/system.hh"
53
54using namespace ArmISA;
55
56TableWalker::TableWalker(const Params *p)
57 : MemObject(p), port(this, p->sys), drainManager(NULL),
58 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
59 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
60 numSquashable(p->num_squash_per_cycle),
61 doL1DescEvent(this), doL2DescEvent(this),
62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
63 doL3LongDescEvent(this),
64 doProcessEvent(this)
65{
66 sctlr = 0;
67
68 // Cache system-level properties
69 if (FullSystem) {
70 armSys = dynamic_cast<ArmSystem *>(p->sys);
71 assert(armSys);
72 haveSecurity = armSys->haveSecurity();
73 _haveLPAE = armSys->haveLPAE();
74 _haveVirtualization = armSys->haveVirtualization();
75 physAddrRange = armSys->physAddrRange();
76 _haveLargeAsid64 = armSys->haveLargeAsid64();
77 } else {
78 armSys = NULL;
79 haveSecurity = _haveLPAE = _haveVirtualization = false;
80 _haveLargeAsid64 = false;
81 physAddrRange = 32;
82 }
83
84}
85
86TableWalker::~TableWalker()
87{
88 ;
89}
90
91TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc)
92{
93}
94
95void
96TableWalker::completeDrain()
97{
98 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
99 pendingQueue.empty()) {
100 setDrainState(Drainable::Drained);
101 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
102 drainManager->signalDrainDone();
103 drainManager = NULL;
104 }
105}
106
107unsigned int
108TableWalker::drain(DrainManager *dm)
109{
110 unsigned int count = port.drain(dm);
111
112 bool state_queues_not_empty = false;
113
114 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
115 if (!stateQueues[i].empty()) {
116 state_queues_not_empty = true;
117 break;
118 }
119 }
120
121 if (state_queues_not_empty || pendingQueue.size()) {
122 drainManager = dm;
123 setDrainState(Drainable::Draining);
124 DPRINTF(Drain, "TableWalker not drained\n");
125
126 // return port drain count plus the table walker itself needs to drain
127 return count + 1;
128 } else {
129 setDrainState(Drainable::Drained);
130 DPRINTF(Drain, "TableWalker free, no need to drain\n");
131
132 // table walker is drained, but its ports may still need to be drained
133 return count;
134 }
135}
136
137void
138TableWalker::drainResume()
139{
140 Drainable::drainResume();
141 if (params()->sys->isTimingMode() && currState) {
142 delete currState;
143 currState = NULL;
144 }
145}
146
147BaseMasterPort&
148TableWalker::getMasterPort(const std::string &if_name, PortID idx)
149{
150 if (if_name == "port") {
151 return port;
152 }
153 return MemObject::getMasterPort(if_name, idx);
154}
155
156Fault
157TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
158 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
159 TLB::Translation *_trans, bool _timing, bool _functional,
160 bool secure, TLB::ArmTranslationType tranType)
161{
162 assert(!(_functional && _timing));
1/*
2 * Copyright (c) 2010, 2012-2013 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include "arch/arm/faults.hh"
42#include "arch/arm/stage2_mmu.hh"
43#include "arch/arm/system.hh"
44#include "arch/arm/table_walker.hh"
45#include "arch/arm/tlb.hh"
46#include "cpu/base.hh"
47#include "cpu/thread_context.hh"
48#include "debug/Checkpoint.hh"
49#include "debug/Drain.hh"
50#include "debug/TLB.hh"
51#include "debug/TLBVerbose.hh"
52#include "sim/system.hh"
53
54using namespace ArmISA;
55
56TableWalker::TableWalker(const Params *p)
57 : MemObject(p), port(this, p->sys), drainManager(NULL),
58 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
59 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
60 numSquashable(p->num_squash_per_cycle),
61 doL1DescEvent(this), doL2DescEvent(this),
62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
63 doL3LongDescEvent(this),
64 doProcessEvent(this)
65{
66 sctlr = 0;
67
68 // Cache system-level properties
69 if (FullSystem) {
70 armSys = dynamic_cast<ArmSystem *>(p->sys);
71 assert(armSys);
72 haveSecurity = armSys->haveSecurity();
73 _haveLPAE = armSys->haveLPAE();
74 _haveVirtualization = armSys->haveVirtualization();
75 physAddrRange = armSys->physAddrRange();
76 _haveLargeAsid64 = armSys->haveLargeAsid64();
77 } else {
78 armSys = NULL;
79 haveSecurity = _haveLPAE = _haveVirtualization = false;
80 _haveLargeAsid64 = false;
81 physAddrRange = 32;
82 }
83
84}
85
86TableWalker::~TableWalker()
87{
88 ;
89}
90
91TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc)
92{
93}
94
95void
96TableWalker::completeDrain()
97{
98 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
99 pendingQueue.empty()) {
100 setDrainState(Drainable::Drained);
101 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
102 drainManager->signalDrainDone();
103 drainManager = NULL;
104 }
105}
106
107unsigned int
108TableWalker::drain(DrainManager *dm)
109{
110 unsigned int count = port.drain(dm);
111
112 bool state_queues_not_empty = false;
113
114 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
115 if (!stateQueues[i].empty()) {
116 state_queues_not_empty = true;
117 break;
118 }
119 }
120
121 if (state_queues_not_empty || pendingQueue.size()) {
122 drainManager = dm;
123 setDrainState(Drainable::Draining);
124 DPRINTF(Drain, "TableWalker not drained\n");
125
126 // return port drain count plus the table walker itself needs to drain
127 return count + 1;
128 } else {
129 setDrainState(Drainable::Drained);
130 DPRINTF(Drain, "TableWalker free, no need to drain\n");
131
132 // table walker is drained, but its ports may still need to be drained
133 return count;
134 }
135}
136
137void
138TableWalker::drainResume()
139{
140 Drainable::drainResume();
141 if (params()->sys->isTimingMode() && currState) {
142 delete currState;
143 currState = NULL;
144 }
145}
146
147BaseMasterPort&
148TableWalker::getMasterPort(const std::string &if_name, PortID idx)
149{
150 if (if_name == "port") {
151 return port;
152 }
153 return MemObject::getMasterPort(if_name, idx);
154}
155
156Fault
157TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
158 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
159 TLB::Translation *_trans, bool _timing, bool _functional,
160 bool secure, TLB::ArmTranslationType tranType)
161{
162 assert(!(_functional && _timing));
163 WalkerState *savedCurrState = NULL;
163
164
164 if (!currState) {
165 if (!currState && !_functional) {
165 // For atomic mode, a new WalkerState instance should be only created
166 // once per TLB. For timing mode, a new instance is generated for every
167 // TLB miss.
168 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
169
170 currState = new WalkerState();
171 currState->tableWalker = this;
166 // For atomic mode, a new WalkerState instance should be only created
167 // once per TLB. For timing mode, a new instance is generated for every
168 // TLB miss.
169 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
170
171 currState = new WalkerState();
172 currState->tableWalker = this;
173 } else if (_functional) {
174 // If we are mixing functional mode with timing (or even
175 // atomic), we need to to be careful and clean up after
176 // ourselves to not risk getting into an inconsistent state.
177 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
178 savedCurrState = currState;
179 currState = new WalkerState();
180 currState->tableWalker = this;
172 } else if (_timing) {
173 // This is a translation that was completed and then faulted again
174 // because some underlying parameters that affect the translation
175 // changed out from under us (e.g. asid). It will either be a
176 // misprediction, in which case nothing will happen or we'll use
177 // this fault to re-execute the faulting instruction which should clean
178 // up everything.
179 if (currState->vaddr_tainted == _req->getVaddr()) {
180 return new ReExec;
181 }
182 }
183
184 currState->tc = _tc;
185 currState->aarch64 = opModeIs64(currOpMode(_tc));
186 currState->el = currEL(_tc);
187 currState->transState = _trans;
188 currState->req = _req;
189 currState->fault = NoFault;
190 currState->asid = _asid;
191 currState->vmid = _vmid;
192 currState->isHyp = _isHyp;
193 currState->timing = _timing;
194 currState->functional = _functional;
195 currState->mode = _mode;
196 currState->tranType = tranType;
197 currState->isSecure = secure;
198 currState->physAddrRange = physAddrRange;
199
200 /** @todo These should be cached or grabbed from cached copies in
201 the TLB, all these miscreg reads are expensive */
202 currState->vaddr_tainted = currState->req->getVaddr();
203 if (currState->aarch64)
204 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
205 currState->tc, currState->el);
206 else
207 currState->vaddr = currState->vaddr_tainted;
208
209 if (currState->aarch64) {
210 switch (currState->el) {
211 case EL0:
212 case EL1:
213 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
214 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
215 break;
216 // @todo: uncomment this to enable Virtualization
217 // case EL2:
218 // assert(haveVirtualization);
219 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
220 // currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
221 // break;
222 case EL3:
223 assert(haveSecurity);
224 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
225 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
226 break;
227 default:
228 panic("Invalid exception level");
229 break;
230 }
231 } else {
232 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
233 MISCREG_SCTLR, currState->tc, !currState->isSecure));
234 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
235 MISCREG_TTBCR, currState->tc, !currState->isSecure));
236 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
237 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
238 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
239 }
240 sctlr = currState->sctlr;
241
242 currState->isFetch = (currState->mode == TLB::Execute);
243 currState->isWrite = (currState->mode == TLB::Write);
244
245 // We only do a second stage of translation if we're not secure, or in
246 // hyp mode, the second stage MMU is enabled, and this table walker
247 // instance is the first stage.
248 currState->doingStage2 = false;
249 // @todo: for now disable this in AArch64 (HCR is not set)
250 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
251 !isStage2 && !currState->isSecure && !currState->isHyp;
252
253 bool long_desc_format = currState->aarch64 ||
254 (_haveLPAE && currState->ttbcr.eae) ||
255 _isHyp || isStage2;
256
257 if (long_desc_format) {
258 // Helper variables used for hierarchical permissions
259 currState->secureLookup = currState->isSecure;
260 currState->rwTable = true;
261 currState->userTable = true;
262 currState->xnTable = false;
263 currState->pxnTable = false;
264 }
265
266 if (!currState->timing) {
181 } else if (_timing) {
182 // This is a translation that was completed and then faulted again
183 // because some underlying parameters that affect the translation
184 // changed out from under us (e.g. asid). It will either be a
185 // misprediction, in which case nothing will happen or we'll use
186 // this fault to re-execute the faulting instruction which should clean
187 // up everything.
188 if (currState->vaddr_tainted == _req->getVaddr()) {
189 return new ReExec;
190 }
191 }
192
193 currState->tc = _tc;
194 currState->aarch64 = opModeIs64(currOpMode(_tc));
195 currState->el = currEL(_tc);
196 currState->transState = _trans;
197 currState->req = _req;
198 currState->fault = NoFault;
199 currState->asid = _asid;
200 currState->vmid = _vmid;
201 currState->isHyp = _isHyp;
202 currState->timing = _timing;
203 currState->functional = _functional;
204 currState->mode = _mode;
205 currState->tranType = tranType;
206 currState->isSecure = secure;
207 currState->physAddrRange = physAddrRange;
208
209 /** @todo These should be cached or grabbed from cached copies in
210 the TLB, all these miscreg reads are expensive */
211 currState->vaddr_tainted = currState->req->getVaddr();
212 if (currState->aarch64)
213 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
214 currState->tc, currState->el);
215 else
216 currState->vaddr = currState->vaddr_tainted;
217
218 if (currState->aarch64) {
219 switch (currState->el) {
220 case EL0:
221 case EL1:
222 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
223 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
224 break;
225 // @todo: uncomment this to enable Virtualization
226 // case EL2:
227 // assert(haveVirtualization);
228 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
229 // currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
230 // break;
231 case EL3:
232 assert(haveSecurity);
233 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
234 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
235 break;
236 default:
237 panic("Invalid exception level");
238 break;
239 }
240 } else {
241 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
242 MISCREG_SCTLR, currState->tc, !currState->isSecure));
243 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
244 MISCREG_TTBCR, currState->tc, !currState->isSecure));
245 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
246 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
247 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
248 }
249 sctlr = currState->sctlr;
250
251 currState->isFetch = (currState->mode == TLB::Execute);
252 currState->isWrite = (currState->mode == TLB::Write);
253
254 // We only do a second stage of translation if we're not secure, or in
255 // hyp mode, the second stage MMU is enabled, and this table walker
256 // instance is the first stage.
257 currState->doingStage2 = false;
258 // @todo: for now disable this in AArch64 (HCR is not set)
259 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
260 !isStage2 && !currState->isSecure && !currState->isHyp;
261
262 bool long_desc_format = currState->aarch64 ||
263 (_haveLPAE && currState->ttbcr.eae) ||
264 _isHyp || isStage2;
265
266 if (long_desc_format) {
267 // Helper variables used for hierarchical permissions
268 currState->secureLookup = currState->isSecure;
269 currState->rwTable = true;
270 currState->userTable = true;
271 currState->xnTable = false;
272 currState->pxnTable = false;
273 }
274
275 if (!currState->timing) {
276 Fault fault = NoFault;
267 if (currState->aarch64)
277 if (currState->aarch64)
268 return processWalkAArch64();
278 fault = processWalkAArch64();
269 else if (long_desc_format)
279 else if (long_desc_format)
270 return processWalkLPAE();
280 fault = processWalkLPAE();
271 else
281 else
272 return processWalk();
282 fault = processWalk();
283
284 // If this was a functional non-timing access restore state to
285 // how we found it.
286 if (currState->functional) {
287 delete currState;
288 currState = savedCurrState;
289 }
290 return fault;
273 }
274
275 if (pending || pendingQueue.size()) {
276 pendingQueue.push_back(currState);
277 currState = NULL;
278 } else {
279 pending = true;
280 if (currState->aarch64)
281 return processWalkAArch64();
282 else if (long_desc_format)
283 return processWalkLPAE();
284 else
285 return processWalk();
286 }
287
288 return NoFault;
289}
290
291void
292TableWalker::processWalkWrapper()
293{
294 assert(!currState);
295 assert(pendingQueue.size());
296 currState = pendingQueue.front();
297
298 ExceptionLevel target_el = EL0;
299 if (currState->aarch64)
300 target_el = currEL(currState->tc);
301 else
302 target_el = EL1;
303
304 // Check if a previous walk filled this request already
305 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
306 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
307 currState->vmid, currState->isHyp, currState->isSecure, true, false,
308 target_el);
309
310 // Check if we still need to have a walk for this request. If the requesting
311 // instruction has been squashed, or a previous walk has filled the TLB with
312 // a match, we just want to get rid of the walk. The latter could happen
313 // when there are multiple outstanding misses to a single page and a
314 // previous request has been successfully translated.
315 if (!currState->transState->squashed() && !te) {
316 // We've got a valid request, lets process it
317 pending = true;
318 pendingQueue.pop_front();
319 if (currState->aarch64)
320 processWalkAArch64();
321 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
322 processWalkLPAE();
323 else
324 processWalk();
325 return;
326 }
327
328
329 // If the instruction that we were translating for has been
330 // squashed we shouldn't bother.
331 unsigned num_squashed = 0;
332 ThreadContext *tc = currState->tc;
333 while ((num_squashed < numSquashable) && currState &&
334 (currState->transState->squashed() || te)) {
335 pendingQueue.pop_front();
336 num_squashed++;
337
338 DPRINTF(TLB, "Squashing table walk for address %#x\n",
339 currState->vaddr_tainted);
340
341 if (currState->transState->squashed()) {
342 // finish the translation which will delete the translation object
343 currState->transState->finish(new UnimpFault("Squashed Inst"),
344 currState->req, currState->tc, currState->mode);
345 } else {
346 // translate the request now that we know it will work
347 tlb->translateTiming(currState->req, currState->tc,
348 currState->transState, currState->mode);
349
350 }
351
352 // delete the current request
353 delete currState;
354
355 // peak at the next one
356 if (pendingQueue.size()) {
357 currState = pendingQueue.front();
358 te = tlb->lookup(currState->vaddr, currState->asid,
359 currState->vmid, currState->isHyp, currState->isSecure, true,
360 false, target_el);
361 } else {
362 // Terminate the loop, nothing more to do
363 currState = NULL;
364 }
365 }
366
367 // if we've still got pending translations schedule more work
368 nextWalk(tc);
369 currState = NULL;
370 completeDrain();
371}
372
373Fault
374TableWalker::processWalk()
375{
376 Addr ttbr = 0;
377
378 // If translation isn't enabled, we shouldn't be here
379 assert(currState->sctlr.m || isStage2);
380
381 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
382 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
383 32 - currState->ttbcr.n));
384
385 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
386 32 - currState->ttbcr.n)) {
387 DPRINTF(TLB, " - Selecting TTBR0\n");
388 // Check if table walk is allowed when Security Extensions are enabled
389 if (haveSecurity && currState->ttbcr.pd0) {
390 if (currState->isFetch)
391 return new PrefetchAbort(currState->vaddr_tainted,
392 ArmFault::TranslationLL + L1,
393 isStage2,
394 ArmFault::VmsaTran);
395 else
396 return new DataAbort(currState->vaddr_tainted,
397 TlbEntry::DomainType::NoAccess, currState->isWrite,
398 ArmFault::TranslationLL + L1, isStage2,
399 ArmFault::VmsaTran);
400 }
401 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
402 MISCREG_TTBR0, currState->tc, !currState->isSecure));
403 } else {
404 DPRINTF(TLB, " - Selecting TTBR1\n");
405 // Check if table walk is allowed when Security Extensions are enabled
406 if (haveSecurity && currState->ttbcr.pd1) {
407 if (currState->isFetch)
408 return new PrefetchAbort(currState->vaddr_tainted,
409 ArmFault::TranslationLL + L1,
410 isStage2,
411 ArmFault::VmsaTran);
412 else
413 return new DataAbort(currState->vaddr_tainted,
414 TlbEntry::DomainType::NoAccess, currState->isWrite,
415 ArmFault::TranslationLL + L1, isStage2,
416 ArmFault::VmsaTran);
417 }
418 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
419 MISCREG_TTBR1, currState->tc, !currState->isSecure));
420 currState->ttbcr.n = 0;
421 }
422
423 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
424 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
425 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
426 currState->isSecure ? "s" : "ns");
427
428 // Trickbox address check
429 Fault f;
430 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
431 currState->vaddr, sizeof(uint32_t), currState->isFetch,
432 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
433 if (f) {
434 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
435 if (currState->timing) {
436 pending = false;
437 nextWalk(currState->tc);
438 currState = NULL;
439 } else {
440 currState->tc = NULL;
441 currState->req = NULL;
442 }
443 return f;
444 }
445
446 Request::Flags flag = 0;
447 if (currState->sctlr.c == 0) {
448 flag = Request::UNCACHEABLE;
449 }
450
451 bool delayed;
452 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
453 sizeof(uint32_t), flag, L1, &doL1DescEvent,
454 &TableWalker::doL1Descriptor);
455 if (!delayed) {
456 f = currState->fault;
457 }
458
459 return f;
460}
461
462Fault
463TableWalker::processWalkLPAE()
464{
465 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
466 int tsz, n;
467 LookupLevel start_lookup_level = L1;
468
469 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
470 currState->vaddr_tainted, currState->ttbcr);
471
472 Request::Flags flag = 0;
473 if (currState->isSecure)
474 flag.set(Request::SECURE);
475
476 // work out which base address register to use, if in hyp mode we always
477 // use HTTBR
478 if (isStage2) {
479 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
480 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
481 tsz = sext<4>(currState->vtcr.t0sz);
482 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
483 } else if (currState->isHyp) {
484 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
485 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
486 tsz = currState->htcr.t0sz;
487 } else {
488 assert(_haveLPAE && currState->ttbcr.eae);
489
490 // Determine boundaries of TTBR0/1 regions
491 if (currState->ttbcr.t0sz)
492 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
493 else if (currState->ttbcr.t1sz)
494 ttbr0_max = (1ULL << 32) -
495 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
496 else
497 ttbr0_max = (1ULL << 32) - 1;
498 if (currState->ttbcr.t1sz)
499 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
500 else
501 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
502
503 // The following code snippet selects the appropriate translation table base
504 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
505 // depending on the address range supported by the translation table (ARM
506 // ARM issue C B3.6.4)
507 if (currState->vaddr <= ttbr0_max) {
508 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
509 // Check if table walk is allowed
510 if (currState->ttbcr.epd0) {
511 if (currState->isFetch)
512 return new PrefetchAbort(currState->vaddr_tainted,
513 ArmFault::TranslationLL + L1,
514 isStage2,
515 ArmFault::LpaeTran);
516 else
517 return new DataAbort(currState->vaddr_tainted,
518 TlbEntry::DomainType::NoAccess,
519 currState->isWrite,
520 ArmFault::TranslationLL + L1,
521 isStage2,
522 ArmFault::LpaeTran);
523 }
524 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
525 MISCREG_TTBR0, currState->tc, !currState->isSecure));
526 tsz = currState->ttbcr.t0sz;
527 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
528 start_lookup_level = L2;
529 } else if (currState->vaddr >= ttbr1_min) {
530 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
531 // Check if table walk is allowed
532 if (currState->ttbcr.epd1) {
533 if (currState->isFetch)
534 return new PrefetchAbort(currState->vaddr_tainted,
535 ArmFault::TranslationLL + L1,
536 isStage2,
537 ArmFault::LpaeTran);
538 else
539 return new DataAbort(currState->vaddr_tainted,
540 TlbEntry::DomainType::NoAccess,
541 currState->isWrite,
542 ArmFault::TranslationLL + L1,
543 isStage2,
544 ArmFault::LpaeTran);
545 }
546 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
547 MISCREG_TTBR1, currState->tc, !currState->isSecure));
548 tsz = currState->ttbcr.t1sz;
549 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
550 start_lookup_level = L2;
551 } else {
552 // Out of boundaries -> translation fault
553 if (currState->isFetch)
554 return new PrefetchAbort(currState->vaddr_tainted,
555 ArmFault::TranslationLL + L1,
556 isStage2,
557 ArmFault::LpaeTran);
558 else
559 return new DataAbort(currState->vaddr_tainted,
560 TlbEntry::DomainType::NoAccess,
561 currState->isWrite, ArmFault::TranslationLL + L1,
562 isStage2, ArmFault::LpaeTran);
563 }
564
565 }
566
567 // Perform lookup (ARM ARM issue C B3.6.6)
568 if (start_lookup_level == L1) {
569 n = 5 - tsz;
570 desc_addr = mbits(ttbr, 39, n) |
571 (bits(currState->vaddr, n + 26, 30) << 3);
572 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
573 desc_addr, currState->isSecure ? "s" : "ns");
574 } else {
575 // Skip first-level lookup
576 n = (tsz >= 2 ? 14 - tsz : 12);
577 desc_addr = mbits(ttbr, 39, n) |
578 (bits(currState->vaddr, n + 17, 21) << 3);
579 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
580 desc_addr, currState->isSecure ? "s" : "ns");
581 }
582
583 // Trickbox address check
584 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
585 currState->vaddr, sizeof(uint64_t), currState->isFetch,
586 currState->isWrite, TlbEntry::DomainType::NoAccess,
587 start_lookup_level);
588 if (f) {
589 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
590 if (currState->timing) {
591 pending = false;
592 nextWalk(currState->tc);
593 currState = NULL;
594 } else {
595 currState->tc = NULL;
596 currState->req = NULL;
597 }
598 return f;
599 }
600
601 if (currState->sctlr.c == 0) {
602 flag = Request::UNCACHEABLE;
603 }
604
605 if (currState->isSecure)
606 flag.set(Request::SECURE);
607
608 currState->longDesc.lookupLevel = start_lookup_level;
609 currState->longDesc.aarch64 = false;
610 currState->longDesc.largeGrain = false;
611 currState->longDesc.grainSize = 12;
612
613 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
614 : (Event *) &doL2LongDescEvent;
615
616 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
617 sizeof(uint64_t), flag, start_lookup_level,
618 event, &TableWalker::doLongDescriptor);
619 if (!delayed) {
620 f = currState->fault;
621 }
622
623 return f;
624}
625
626unsigned
627TableWalker::adjustTableSizeAArch64(unsigned tsz)
628{
629 if (tsz < 25)
630 return 25;
631 if (tsz > 48)
632 return 48;
633 return tsz;
634}
635
636bool
637TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
638{
639 return (currPhysAddrRange != MaxPhysAddrRange &&
640 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
641}
642
643Fault
644TableWalker::processWalkAArch64()
645{
646 assert(currState->aarch64);
647
648 DPRINTF(TLB, "Beginning table walk for address %#llx, TTBCR: %#llx\n",
649 currState->vaddr_tainted, currState->ttbcr);
650
651 // Determine TTBR, table size, granule size and phys. address range
652 Addr ttbr = 0;
653 int tsz = 0, ps = 0;
654 bool large_grain = false;
655 bool fault = false;
656 switch (currState->el) {
657 case EL0:
658 case EL1:
659 switch (bits(currState->vaddr, 63,48)) {
660 case 0:
661 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
662 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
663 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
664 large_grain = currState->ttbcr.tg0;
665 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
666 currState->ttbcr.epd0)
667 fault = true;
668 break;
669 case 0xffff:
670 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
671 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
672 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t1sz);
673 large_grain = currState->ttbcr.tg1;
674 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
675 currState->ttbcr.epd1)
676 fault = true;
677 break;
678 default:
679 // top two bytes must be all 0s or all 1s, else invalid addr
680 fault = true;
681 }
682 ps = currState->ttbcr.ips;
683 break;
684 case EL2:
685 case EL3:
686 switch(bits(currState->vaddr, 63,48)) {
687 case 0:
688 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
689 if (currState->el == EL2)
690 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
691 else
692 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
693 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
694 large_grain = currState->ttbcr.tg0;
695 break;
696 default:
697 // invalid addr if top two bytes are not all 0s
698 fault = true;
699 }
700 ps = currState->ttbcr.ps;
701 break;
702 }
703
704 if (fault) {
705 Fault f;
706 if (currState->isFetch)
707 f = new PrefetchAbort(currState->vaddr_tainted,
708 ArmFault::TranslationLL + L0, isStage2,
709 ArmFault::LpaeTran);
710 else
711 f = new DataAbort(currState->vaddr_tainted,
712 TlbEntry::DomainType::NoAccess,
713 currState->isWrite,
714 ArmFault::TranslationLL + L0,
715 isStage2, ArmFault::LpaeTran);
716
717 if (currState->timing) {
718 pending = false;
719 nextWalk(currState->tc);
720 currState = NULL;
721 } else {
722 currState->tc = NULL;
723 currState->req = NULL;
724 }
725 return f;
726
727 }
728
729 // Determine starting lookup level
730 LookupLevel start_lookup_level;
731 int grain_size, stride;
732 if (large_grain) { // 64 KB granule
733 grain_size = 16;
734 stride = grain_size - 3;
735 if (tsz > grain_size + 2 * stride)
736 start_lookup_level = L1;
737 else if (tsz > grain_size + stride)
738 start_lookup_level = L2;
739 else
740 start_lookup_level = L3;
741 } else { // 4 KB granule
742 grain_size = 12;
743 stride = grain_size - 3;
744 if (tsz > grain_size + 3 * stride)
745 start_lookup_level = L0;
746 else if (tsz > grain_size + 2 * stride)
747 start_lookup_level = L1;
748 else
749 start_lookup_level = L2;
750 }
751
752 // Determine table base address
753 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) -
754 grain_size;
755 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
756
757 // Determine physical address size and raise an Address Size Fault if
758 // necessary
759 int pa_range = decodePhysAddrRange64(ps);
760 // Clamp to lower limit
761 if (pa_range > physAddrRange)
762 currState->physAddrRange = physAddrRange;
763 else
764 currState->physAddrRange = pa_range;
765 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
766 DPRINTF(TLB, "Address size fault before any lookup\n");
767 Fault f;
768 if (currState->isFetch)
769 f = new PrefetchAbort(currState->vaddr_tainted,
770 ArmFault::AddressSizeLL + start_lookup_level,
771 isStage2,
772 ArmFault::LpaeTran);
773 else
774 f = new DataAbort(currState->vaddr_tainted,
775 TlbEntry::DomainType::NoAccess,
776 currState->isWrite,
777 ArmFault::AddressSizeLL + start_lookup_level,
778 isStage2,
779 ArmFault::LpaeTran);
780
781
782 if (currState->timing) {
783 pending = false;
784 nextWalk(currState->tc);
785 currState = NULL;
786 } else {
787 currState->tc = NULL;
788 currState->req = NULL;
789 }
790 return f;
791
792 }
793
794 // Determine descriptor address
795 Addr desc_addr = base_addr |
796 (bits(currState->vaddr, tsz - 1,
797 stride * (3 - start_lookup_level) + grain_size) << 3);
798
799 // Trickbox address check
800 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
801 currState->vaddr, sizeof(uint64_t), currState->isFetch,
802 currState->isWrite, TlbEntry::DomainType::NoAccess,
803 start_lookup_level);
804 if (f) {
805 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
806 if (currState->timing) {
807 pending = false;
808 nextWalk(currState->tc);
809 currState = NULL;
810 } else {
811 currState->tc = NULL;
812 currState->req = NULL;
813 }
814 return f;
815 }
816
817 Request::Flags flag = 0;
818 if (currState->sctlr.c == 0) {
819 flag = Request::UNCACHEABLE;
820 }
821
822 currState->longDesc.lookupLevel = start_lookup_level;
823 currState->longDesc.aarch64 = true;
824 currState->longDesc.largeGrain = large_grain;
825 currState->longDesc.grainSize = grain_size;
826
827 if (currState->timing) {
828 Event *event;
829 switch (start_lookup_level) {
830 case L0:
831 event = (Event *) &doL0LongDescEvent;
832 break;
833 case L1:
834 event = (Event *) &doL1LongDescEvent;
835 break;
836 case L2:
837 event = (Event *) &doL2LongDescEvent;
838 break;
839 case L3:
840 event = (Event *) &doL3LongDescEvent;
841 break;
842 default:
843 panic("Invalid table lookup level");
844 break;
845 }
846 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
847 (uint8_t*) &currState->longDesc.data,
848 currState->tc->getCpuPtr()->clockPeriod(), flag);
849 DPRINTF(TLBVerbose,
850 "Adding to walker fifo: queue size before adding: %d\n",
851 stateQueues[start_lookup_level].size());
852 stateQueues[start_lookup_level].push_back(currState);
853 currState = NULL;
854 } else if (!currState->functional) {
855 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
856 NULL, (uint8_t*) &currState->longDesc.data,
857 currState->tc->getCpuPtr()->clockPeriod(), flag);
858 doLongDescriptor();
859 f = currState->fault;
860 } else {
861 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
862 masterId);
863 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
864 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
865 port.sendFunctional(pkt);
866 doLongDescriptor();
867 delete req;
868 delete pkt;
869 f = currState->fault;
870 }
871
872 return f;
873}
874
875void
876TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
877 uint8_t texcb, bool s)
878{
879 // Note: tc and sctlr local variables are hiding tc and sctrl class
880 // variables
881 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
882 te.shareable = false; // default value
883 te.nonCacheable = false;
884 te.outerShareable = false;
885 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
886 switch(texcb) {
887 case 0: // Stongly-ordered
888 te.nonCacheable = true;
889 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
890 te.shareable = true;
891 te.innerAttrs = 1;
892 te.outerAttrs = 0;
893 break;
894 case 1: // Shareable Device
895 te.nonCacheable = true;
896 te.mtype = TlbEntry::MemoryType::Device;
897 te.shareable = true;
898 te.innerAttrs = 3;
899 te.outerAttrs = 0;
900 break;
901 case 2: // Outer and Inner Write-Through, no Write-Allocate
902 te.mtype = TlbEntry::MemoryType::Normal;
903 te.shareable = s;
904 te.innerAttrs = 6;
905 te.outerAttrs = bits(texcb, 1, 0);
906 break;
907 case 3: // Outer and Inner Write-Back, no Write-Allocate
908 te.mtype = TlbEntry::MemoryType::Normal;
909 te.shareable = s;
910 te.innerAttrs = 7;
911 te.outerAttrs = bits(texcb, 1, 0);
912 break;
913 case 4: // Outer and Inner Non-cacheable
914 te.nonCacheable = true;
915 te.mtype = TlbEntry::MemoryType::Normal;
916 te.shareable = s;
917 te.innerAttrs = 0;
918 te.outerAttrs = bits(texcb, 1, 0);
919 break;
920 case 5: // Reserved
921 panic("Reserved texcb value!\n");
922 break;
923 case 6: // Implementation Defined
924 panic("Implementation-defined texcb value!\n");
925 break;
926 case 7: // Outer and Inner Write-Back, Write-Allocate
927 te.mtype = TlbEntry::MemoryType::Normal;
928 te.shareable = s;
929 te.innerAttrs = 5;
930 te.outerAttrs = 1;
931 break;
932 case 8: // Non-shareable Device
933 te.nonCacheable = true;
934 te.mtype = TlbEntry::MemoryType::Device;
935 te.shareable = false;
936 te.innerAttrs = 3;
937 te.outerAttrs = 0;
938 break;
939 case 9 ... 15: // Reserved
940 panic("Reserved texcb value!\n");
941 break;
942 case 16 ... 31: // Cacheable Memory
943 te.mtype = TlbEntry::MemoryType::Normal;
944 te.shareable = s;
945 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
946 te.nonCacheable = true;
947 te.innerAttrs = bits(texcb, 1, 0);
948 te.outerAttrs = bits(texcb, 3, 2);
949 break;
950 default:
951 panic("More than 32 states for 5 bits?\n");
952 }
953 } else {
954 assert(tc);
955 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
956 currState->tc, !currState->isSecure));
957 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
958 currState->tc, !currState->isSecure));
959 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
960 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
961 switch(bits(texcb, 2,0)) {
962 case 0:
963 curr_tr = prrr.tr0;
964 curr_ir = nmrr.ir0;
965 curr_or = nmrr.or0;
966 te.outerShareable = (prrr.nos0 == 0);
967 break;
968 case 1:
969 curr_tr = prrr.tr1;
970 curr_ir = nmrr.ir1;
971 curr_or = nmrr.or1;
972 te.outerShareable = (prrr.nos1 == 0);
973 break;
974 case 2:
975 curr_tr = prrr.tr2;
976 curr_ir = nmrr.ir2;
977 curr_or = nmrr.or2;
978 te.outerShareable = (prrr.nos2 == 0);
979 break;
980 case 3:
981 curr_tr = prrr.tr3;
982 curr_ir = nmrr.ir3;
983 curr_or = nmrr.or3;
984 te.outerShareable = (prrr.nos3 == 0);
985 break;
986 case 4:
987 curr_tr = prrr.tr4;
988 curr_ir = nmrr.ir4;
989 curr_or = nmrr.or4;
990 te.outerShareable = (prrr.nos4 == 0);
991 break;
992 case 5:
993 curr_tr = prrr.tr5;
994 curr_ir = nmrr.ir5;
995 curr_or = nmrr.or5;
996 te.outerShareable = (prrr.nos5 == 0);
997 break;
998 case 6:
999 panic("Imp defined type\n");
1000 case 7:
1001 curr_tr = prrr.tr7;
1002 curr_ir = nmrr.ir7;
1003 curr_or = nmrr.or7;
1004 te.outerShareable = (prrr.nos7 == 0);
1005 break;
1006 }
1007
1008 switch(curr_tr) {
1009 case 0:
1010 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1011 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1012 te.nonCacheable = true;
1013 te.innerAttrs = 1;
1014 te.outerAttrs = 0;
1015 te.shareable = true;
1016 break;
1017 case 1:
1018 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1019 prrr.ds1, prrr.ds0, s);
1020 te.mtype = TlbEntry::MemoryType::Device;
1021 te.nonCacheable = true;
1022 te.innerAttrs = 3;
1023 te.outerAttrs = 0;
1024 if (prrr.ds1 && s)
1025 te.shareable = true;
1026 if (prrr.ds0 && !s)
1027 te.shareable = true;
1028 break;
1029 case 2:
1030 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1031 prrr.ns1, prrr.ns0, s);
1032 te.mtype = TlbEntry::MemoryType::Normal;
1033 if (prrr.ns1 && s)
1034 te.shareable = true;
1035 if (prrr.ns0 && !s)
1036 te.shareable = true;
1037 break;
1038 case 3:
1039 panic("Reserved type");
1040 }
1041
1042 if (te.mtype == TlbEntry::MemoryType::Normal){
1043 switch(curr_ir) {
1044 case 0:
1045 te.nonCacheable = true;
1046 te.innerAttrs = 0;
1047 break;
1048 case 1:
1049 te.innerAttrs = 5;
1050 break;
1051 case 2:
1052 te.innerAttrs = 6;
1053 break;
1054 case 3:
1055 te.innerAttrs = 7;
1056 break;
1057 }
1058
1059 switch(curr_or) {
1060 case 0:
1061 te.nonCacheable = true;
1062 te.outerAttrs = 0;
1063 break;
1064 case 1:
1065 te.outerAttrs = 1;
1066 break;
1067 case 2:
1068 te.outerAttrs = 2;
1069 break;
1070 case 3:
1071 te.outerAttrs = 3;
1072 break;
1073 }
1074 }
1075 }
1076 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, \
1077 outerAttrs: %d\n",
1078 te.shareable, te.innerAttrs, te.outerAttrs);
1079 te.setAttributes(false);
1080}
1081
1082void
1083TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1084 LongDescriptor &lDescriptor)
1085{
1086 assert(_haveLPAE);
1087
1088 uint8_t attr;
1089 uint8_t sh = lDescriptor.sh();
1090 // Different format and source of attributes if this is a stage 2
1091 // translation
1092 if (isStage2) {
1093 attr = lDescriptor.memAttr();
1094 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1095 uint8_t attr_1_0 = attr & 0x3;
1096
1097 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1098
1099 if (attr_3_2 == 0) {
1100 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1101 : TlbEntry::MemoryType::Device;
1102 te.outerAttrs = 0;
1103 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1104 te.nonCacheable = true;
1105 } else {
1106 te.mtype = TlbEntry::MemoryType::Normal;
1107 te.outerAttrs = attr_3_2 == 1 ? 0 :
1108 attr_3_2 == 2 ? 2 : 1;
1109 te.innerAttrs = attr_1_0 == 1 ? 0 :
1110 attr_1_0 == 2 ? 6 : 5;
1111 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1112 }
1113 } else {
1114 uint8_t attrIndx = lDescriptor.attrIndx();
1115
1116 // LPAE always uses remapping of memory attributes, irrespective of the
1117 // value of SCTLR.TRE
1118 int reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1119 reg = flattenMiscRegNsBanked(reg, currState->tc, !currState->isSecure);
1120 uint32_t mair = currState->tc->readMiscReg(reg);
1121 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1122 uint8_t attr_7_4 = bits(attr, 7, 4);
1123 uint8_t attr_3_0 = bits(attr, 3, 0);
1124 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1125
1126 // Note: the memory subsystem only cares about the 'cacheable' memory
1127 // attribute. The other attributes are only used to fill the PAR register
1128 // accordingly to provide the illusion of full support
1129 te.nonCacheable = false;
1130
1131 switch (attr_7_4) {
1132 case 0x0:
1133 // Strongly-ordered or Device memory
1134 if (attr_3_0 == 0x0)
1135 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1136 else if (attr_3_0 == 0x4)
1137 te.mtype = TlbEntry::MemoryType::Device;
1138 else
1139 panic("Unpredictable behavior\n");
1140 te.nonCacheable = true;
1141 te.outerAttrs = 0;
1142 break;
1143 case 0x4:
1144 // Normal memory, Outer Non-cacheable
1145 te.mtype = TlbEntry::MemoryType::Normal;
1146 te.outerAttrs = 0;
1147 if (attr_3_0 == 0x4)
1148 // Inner Non-cacheable
1149 te.nonCacheable = true;
1150 else if (attr_3_0 < 0x8)
1151 panic("Unpredictable behavior\n");
1152 break;
1153 case 0x8:
1154 case 0x9:
1155 case 0xa:
1156 case 0xb:
1157 case 0xc:
1158 case 0xd:
1159 case 0xe:
1160 case 0xf:
1161 if (attr_7_4 & 0x4) {
1162 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1163 } else {
1164 te.outerAttrs = 0x2;
1165 }
1166 // Normal memory, Outer Cacheable
1167 te.mtype = TlbEntry::MemoryType::Normal;
1168 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1169 panic("Unpredictable behavior\n");
1170 break;
1171 default:
1172 panic("Unpredictable behavior\n");
1173 break;
1174 }
1175
1176 switch (attr_3_0) {
1177 case 0x0:
1178 te.innerAttrs = 0x1;
1179 break;
1180 case 0x4:
1181 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1182 break;
1183 case 0x8:
1184 case 0x9:
1185 case 0xA:
1186 case 0xB:
1187 te.innerAttrs = 6;
1188 break;
1189 case 0xC:
1190 case 0xD:
1191 case 0xE:
1192 case 0xF:
1193 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1194 break;
1195 default:
1196 panic("Unpredictable behavior\n");
1197 break;
1198 }
1199 }
1200
1201 te.outerShareable = sh == 2;
1202 te.shareable = (sh & 0x2) ? true : false;
1203 te.setAttributes(true);
1204 te.attributes |= (uint64_t) attr << 56;
1205}
1206
1207void
1208TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1209 uint8_t sh)
1210{
1211 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1212
1213 // Select MAIR
1214 uint64_t mair;
1215 switch (currState->el) {
1216 case EL0:
1217 case EL1:
1218 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1219 break;
1220 case EL2:
1221 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1222 break;
1223 case EL3:
1224 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1225 break;
1226 default:
1227 panic("Invalid exception level");
1228 break;
1229 }
1230
1231 // Select attributes
1232 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1233 uint8_t attr_lo = bits(attr, 3, 0);
1234 uint8_t attr_hi = bits(attr, 7, 4);
1235
1236 // Memory type
1237 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1238
1239 // Cacheability
1240 te.nonCacheable = false;
1241 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1242 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1243 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1244 te.nonCacheable = true;
1245 }
1246
1247 te.shareable = sh == 2;
1248 te.outerShareable = (sh & 0x2) ? true : false;
1249 // Attributes formatted according to the 64-bit PAR
1250 te.attributes = ((uint64_t) attr << 56) |
1251 (1 << 11) | // LPAE bit
1252 (te.ns << 9) | // NS bit
1253 (sh << 7);
1254}
1255
1256void
1257TableWalker::doL1Descriptor()
1258{
1259 if (currState->fault != NoFault) {
1260 return;
1261 }
1262
1263 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1264 currState->vaddr_tainted, currState->l1Desc.data);
1265 TlbEntry te;
1266
1267 switch (currState->l1Desc.type()) {
1268 case L1Descriptor::Ignore:
1269 case L1Descriptor::Reserved:
1270 if (!currState->timing) {
1271 currState->tc = NULL;
1272 currState->req = NULL;
1273 }
1274 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1275 if (currState->isFetch)
1276 currState->fault =
1277 new PrefetchAbort(currState->vaddr_tainted,
1278 ArmFault::TranslationLL + L1,
1279 isStage2,
1280 ArmFault::VmsaTran);
1281 else
1282 currState->fault =
1283 new DataAbort(currState->vaddr_tainted,
1284 TlbEntry::DomainType::NoAccess,
1285 currState->isWrite,
1286 ArmFault::TranslationLL + L1, isStage2,
1287 ArmFault::VmsaTran);
1288 return;
1289 case L1Descriptor::Section:
1290 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1291 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1292 * enabled if set, do l1.Desc.setAp0() instead of generating
1293 * AccessFlag0
1294 */
1295
1296 currState->fault = new DataAbort(currState->vaddr_tainted,
1297 currState->l1Desc.domain(),
1298 currState->isWrite,
1299 ArmFault::AccessFlagLL + L1,
1300 isStage2,
1301 ArmFault::VmsaTran);
1302 }
1303 if (currState->l1Desc.supersection()) {
1304 panic("Haven't implemented supersections\n");
1305 }
1306 insertTableEntry(currState->l1Desc, false);
1307 return;
1308 case L1Descriptor::PageTable:
1309 {
1310 Addr l2desc_addr;
1311 l2desc_addr = currState->l1Desc.l2Addr() |
1312 (bits(currState->vaddr, 19, 12) << 2);
1313 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1314 l2desc_addr, currState->isSecure ? "s" : "ns");
1315
1316 // Trickbox address check
1317 currState->fault = tlb->walkTrickBoxCheck(
1318 l2desc_addr, currState->isSecure, currState->vaddr,
1319 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1320 currState->l1Desc.domain(), L2);
1321
1322 if (currState->fault) {
1323 if (!currState->timing) {
1324 currState->tc = NULL;
1325 currState->req = NULL;
1326 }
1327 return;
1328 }
1329
1330 Request::Flags flag = 0;
1331 if (currState->isSecure)
1332 flag.set(Request::SECURE);
1333
1334 bool delayed;
1335 delayed = fetchDescriptor(l2desc_addr,
1336 (uint8_t*)&currState->l2Desc.data,
1337 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1338 &TableWalker::doL2Descriptor);
1339 if (delayed) {
1340 currState->delayed = true;
1341 }
1342
1343 return;
1344 }
1345 default:
1346 panic("A new type in a 2 bit field?\n");
1347 }
1348}
1349
1350void
1351TableWalker::doLongDescriptor()
1352{
1353 if (currState->fault != NoFault) {
1354 return;
1355 }
1356
1357 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1358 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1359 currState->longDesc.data,
1360 currState->aarch64 ? "AArch64" : "long-desc.");
1361
1362 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1363 (currState->longDesc.type() == LongDescriptor::Page)) {
1364 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1365 "xn: %d, ap: %d, af: %d, type: %d\n",
1366 currState->longDesc.lookupLevel,
1367 currState->longDesc.data,
1368 currState->longDesc.pxn(),
1369 currState->longDesc.xn(),
1370 currState->longDesc.ap(),
1371 currState->longDesc.af(),
1372 currState->longDesc.type());
1373 } else {
1374 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1375 currState->longDesc.lookupLevel,
1376 currState->longDesc.data,
1377 currState->longDesc.type());
1378 }
1379
1380 TlbEntry te;
1381
1382 switch (currState->longDesc.type()) {
1383 case LongDescriptor::Invalid:
1384 if (!currState->timing) {
1385 currState->tc = NULL;
1386 currState->req = NULL;
1387 }
1388
1389 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1390 currState->longDesc.lookupLevel,
1391 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1392 if (currState->isFetch)
1393 currState->fault = new PrefetchAbort(
1394 currState->vaddr_tainted,
1395 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1396 isStage2,
1397 ArmFault::LpaeTran);
1398 else
1399 currState->fault = new DataAbort(
1400 currState->vaddr_tainted,
1401 TlbEntry::DomainType::NoAccess,
1402 currState->isWrite,
1403 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1404 isStage2,
1405 ArmFault::LpaeTran);
1406 return;
1407 case LongDescriptor::Block:
1408 case LongDescriptor::Page:
1409 {
1410 bool fault = false;
1411 bool aff = false;
1412 // Check for address size fault
1413 if (checkAddrSizeFaultAArch64(
1414 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1415 currState->longDesc.offsetBits()),
1416 currState->physAddrRange)) {
1417 fault = true;
1418 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1419 currState->longDesc.lookupLevel);
1420 // Check for access fault
1421 } else if (currState->longDesc.af() == 0) {
1422 fault = true;
1423 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1424 currState->longDesc.lookupLevel);
1425 aff = true;
1426 }
1427 if (fault) {
1428 if (currState->isFetch)
1429 currState->fault = new PrefetchAbort(
1430 currState->vaddr_tainted,
1431 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1432 currState->longDesc.lookupLevel,
1433 isStage2,
1434 ArmFault::LpaeTran);
1435 else
1436 currState->fault = new DataAbort(
1437 currState->vaddr_tainted,
1438 TlbEntry::DomainType::NoAccess, currState->isWrite,
1439 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1440 currState->longDesc.lookupLevel,
1441 isStage2,
1442 ArmFault::LpaeTran);
1443 } else {
1444 insertTableEntry(currState->longDesc, true);
1445 }
1446 }
1447 return;
1448 case LongDescriptor::Table:
1449 {
1450 // Set hierarchical permission flags
1451 currState->secureLookup = currState->secureLookup &&
1452 currState->longDesc.secureTable();
1453 currState->rwTable = currState->rwTable &&
1454 currState->longDesc.rwTable();
1455 currState->userTable = currState->userTable &&
1456 currState->longDesc.userTable();
1457 currState->xnTable = currState->xnTable ||
1458 currState->longDesc.xnTable();
1459 currState->pxnTable = currState->pxnTable ||
1460 currState->longDesc.pxnTable();
1461
1462 // Set up next level lookup
1463 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1464 currState->vaddr);
1465
1466 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1467 currState->longDesc.lookupLevel,
1468 currState->longDesc.lookupLevel + 1,
1469 next_desc_addr,
1470 currState->secureLookup ? "s" : "ns");
1471
1472 // Check for address size fault
1473 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1474 next_desc_addr, currState->physAddrRange)) {
1475 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1476 currState->longDesc.lookupLevel);
1477 if (currState->isFetch)
1478 currState->fault = new PrefetchAbort(
1479 currState->vaddr_tainted,
1480 ArmFault::AddressSizeLL
1481 + currState->longDesc.lookupLevel,
1482 isStage2,
1483 ArmFault::LpaeTran);
1484 else
1485 currState->fault = new DataAbort(
1486 currState->vaddr_tainted,
1487 TlbEntry::DomainType::NoAccess, currState->isWrite,
1488 ArmFault::AddressSizeLL
1489 + currState->longDesc.lookupLevel,
1490 isStage2,
1491 ArmFault::LpaeTran);
1492 return;
1493 }
1494
1495 // Trickbox address check
1496 currState->fault = tlb->walkTrickBoxCheck(
1497 next_desc_addr, currState->vaddr,
1498 currState->vaddr, sizeof(uint64_t),
1499 currState->isFetch, currState->isWrite,
1500 TlbEntry::DomainType::Client,
1501 toLookupLevel(currState->longDesc.lookupLevel +1));
1502
1503 if (currState->fault) {
1504 if (!currState->timing) {
1505 currState->tc = NULL;
1506 currState->req = NULL;
1507 }
1508 return;
1509 }
1510
1511 Request::Flags flag = 0;
1512 if (currState->secureLookup)
1513 flag.set(Request::SECURE);
1514
1515 currState->longDesc.lookupLevel =
1516 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1517 Event *event = NULL;
1518 switch (currState->longDesc.lookupLevel) {
1519 case L1:
1520 assert(currState->aarch64);
1521 event = &doL1LongDescEvent;
1522 break;
1523 case L2:
1524 event = &doL2LongDescEvent;
1525 break;
1526 case L3:
1527 event = &doL3LongDescEvent;
1528 break;
1529 default:
1530 panic("Wrong lookup level in table walk\n");
1531 break;
1532 }
1533
1534 bool delayed;
1535 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1536 sizeof(uint64_t), flag, -1, event,
1537 &TableWalker::doLongDescriptor);
1538 if (delayed) {
1539 currState->delayed = true;
1540 }
1541 }
1542 return;
1543 default:
1544 panic("A new type in a 2 bit field?\n");
1545 }
1546}
1547
1548void
1549TableWalker::doL2Descriptor()
1550{
1551 if (currState->fault != NoFault) {
1552 return;
1553 }
1554
1555 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1556 currState->vaddr_tainted, currState->l2Desc.data);
1557 TlbEntry te;
1558
1559 if (currState->l2Desc.invalid()) {
1560 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1561 if (!currState->timing) {
1562 currState->tc = NULL;
1563 currState->req = NULL;
1564 }
1565 if (currState->isFetch)
1566 currState->fault =
1567 new PrefetchAbort(currState->vaddr_tainted,
1568 ArmFault::TranslationLL + L2,
1569 isStage2,
1570 ArmFault::VmsaTran);
1571 else
1572 currState->fault =
1573 new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(),
1574 currState->isWrite, ArmFault::TranslationLL + L2,
1575 isStage2,
1576 ArmFault::VmsaTran);
1577 return;
1578 }
1579
1580 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1581 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1582 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1583 */
1584 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1585 currState->sctlr.afe, currState->l2Desc.ap());
1586
1587 currState->fault =
1588 new DataAbort(currState->vaddr_tainted,
1589 TlbEntry::DomainType::NoAccess, currState->isWrite,
1590 ArmFault::AccessFlagLL + L2, isStage2,
1591 ArmFault::VmsaTran);
1592 }
1593
1594 insertTableEntry(currState->l2Desc, false);
1595}
1596
1597void
1598TableWalker::doL1DescriptorWrapper()
1599{
1600 currState = stateQueues[L1].front();
1601 currState->delayed = false;
1602 // if there's a stage2 translation object we don't need it any more
1603 if (currState->stage2Tran) {
1604 delete currState->stage2Tran;
1605 currState->stage2Tran = NULL;
1606 }
1607
1608
1609 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1610 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1611
1612 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1613 doL1Descriptor();
1614
1615 stateQueues[L1].pop_front();
1616 completeDrain();
1617 // Check if fault was generated
1618 if (currState->fault != NoFault) {
1619 currState->transState->finish(currState->fault, currState->req,
1620 currState->tc, currState->mode);
1621
1622 pending = false;
1623 nextWalk(currState->tc);
1624
1625 currState->req = NULL;
1626 currState->tc = NULL;
1627 currState->delayed = false;
1628 delete currState;
1629 }
1630 else if (!currState->delayed) {
1631 // delay is not set so there is no L2 to do
1632 // Don't finish the translation if a stage 2 look up is underway
1633 if (!currState->doingStage2) {
1634 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1635 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1636 currState->transState, currState->mode);
1637 }
1638
1639 pending = false;
1640 nextWalk(currState->tc);
1641
1642 currState->req = NULL;
1643 currState->tc = NULL;
1644 currState->delayed = false;
1645 delete currState;
1646 } else {
1647 // need to do L2 descriptor
1648 stateQueues[L2].push_back(currState);
1649 }
1650 currState = NULL;
1651}
1652
1653void
1654TableWalker::doL2DescriptorWrapper()
1655{
1656 currState = stateQueues[L2].front();
1657 assert(currState->delayed);
1658 // if there's a stage2 translation object we don't need it any more
1659 if (currState->stage2Tran) {
1660 delete currState->stage2Tran;
1661 currState->stage2Tran = NULL;
1662 }
1663
1664 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1665 currState->vaddr_tainted);
1666 doL2Descriptor();
1667
1668 // Check if fault was generated
1669 if (currState->fault != NoFault) {
1670 currState->transState->finish(currState->fault, currState->req,
1671 currState->tc, currState->mode);
1672 }
1673 else {
1674 // Don't finish the translation if a stage 2 look up is underway
1675 if (!currState->doingStage2) {
1676 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1677 currState->fault = tlb->translateTiming(currState->req,
1678 currState->tc, currState->transState, currState->mode);
1679 }
1680 }
1681
1682
1683 stateQueues[L2].pop_front();
1684 completeDrain();
1685 pending = false;
1686 nextWalk(currState->tc);
1687
1688 currState->req = NULL;
1689 currState->tc = NULL;
1690 currState->delayed = false;
1691
1692 delete currState;
1693 currState = NULL;
1694}
1695
1696void
1697TableWalker::doL0LongDescriptorWrapper()
1698{
1699 doLongDescriptorWrapper(L0);
1700}
1701
1702void
1703TableWalker::doL1LongDescriptorWrapper()
1704{
1705 doLongDescriptorWrapper(L1);
1706}
1707
1708void
1709TableWalker::doL2LongDescriptorWrapper()
1710{
1711 doLongDescriptorWrapper(L2);
1712}
1713
1714void
1715TableWalker::doL3LongDescriptorWrapper()
1716{
1717 doLongDescriptorWrapper(L3);
1718}
1719
1720void
1721TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1722{
1723 currState = stateQueues[curr_lookup_level].front();
1724 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1725 currState->delayed = false;
1726
1727 // if there's a stage2 translation object we don't need it any more
1728 if (currState->stage2Tran) {
1729 delete currState->stage2Tran;
1730 currState->stage2Tran = NULL;
1731 }
1732
1733 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1734 currState->vaddr_tainted);
1735 doLongDescriptor();
1736
1737 stateQueues[curr_lookup_level].pop_front();
1738
1739 if (currState->fault != NoFault) {
1740 // A fault was generated
1741 currState->transState->finish(currState->fault, currState->req,
1742 currState->tc, currState->mode);
1743
1744 pending = false;
1745 nextWalk(currState->tc);
1746
1747 currState->req = NULL;
1748 currState->tc = NULL;
1749 currState->delayed = false;
1750 delete currState;
1751 } else if (!currState->delayed) {
1752 // No additional lookups required
1753 // Don't finish the translation if a stage 2 look up is underway
1754 if (!currState->doingStage2) {
1755 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1756 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1757 currState->transState,
1758 currState->mode);
1759 }
1760
1761 pending = false;
1762 nextWalk(currState->tc);
1763
1764 currState->req = NULL;
1765 currState->tc = NULL;
1766 currState->delayed = false;
1767 delete currState;
1768 } else {
1769 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1770 panic("Max. number of lookups already reached in table walk\n");
1771 // Need to perform additional lookups
1772 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1773 }
1774 currState = NULL;
1775}
1776
1777
1778void
1779TableWalker::nextWalk(ThreadContext *tc)
1780{
1781 if (pendingQueue.size())
1782 schedule(doProcessEvent, clockEdge(Cycles(1)));
1783}
1784
1785bool
1786TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1787 Request::Flags flags, int queueIndex, Event *event,
1788 void (TableWalker::*doDescriptor)())
1789{
1790 bool isTiming = currState->timing;
1791
1792 // do the requests for the page table descriptors have to go through the
1793 // second stage MMU
1794 if (currState->stage2Req) {
1795 Fault fault;
1796 flags = flags | TLB::MustBeOne;
1797
1798 if (isTiming) {
1799 Stage2MMU::Stage2Translation *tran = new
1800 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1801 currState->vaddr);
1802 currState->stage2Tran = tran;
1803 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1804 flags, masterId);
1805 fault = tran->fault;
1806 } else {
1807 fault = stage2Mmu->readDataUntimed(currState->tc,
1808 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1809 currState->functional);
1810 }
1811
1812 if (fault != NoFault) {
1813 currState->fault = fault;
1814 }
1815 if (isTiming) {
1816 if (queueIndex >= 0) {
1817 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1818 stateQueues[queueIndex].size());
1819 stateQueues[queueIndex].push_back(currState);
1820 currState = NULL;
1821 }
1822 } else {
1823 (this->*doDescriptor)();
1824 }
1825 } else {
1826 if (isTiming) {
1827 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1828 currState->tc->getCpuPtr()->clockPeriod(), flags);
1829 if (queueIndex >= 0) {
1830 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1831 stateQueues[queueIndex].size());
1832 stateQueues[queueIndex].push_back(currState);
1833 currState = NULL;
1834 }
1835 } else if (!currState->functional) {
1836 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1837 currState->tc->getCpuPtr()->clockPeriod(), flags);
1838 (this->*doDescriptor)();
1839 } else {
1840 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1841 req->taskId(ContextSwitchTaskId::DMA);
1842 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1843 pkt->dataStatic(data);
1844 port.sendFunctional(pkt);
1845 (this->*doDescriptor)();
1846 delete req;
1847 delete pkt;
1848 }
1849 }
1850 return (isTiming);
1851}
1852
1853void
1854TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1855{
1856 TlbEntry te;
1857
1858 // Create and fill a new page table entry
1859 te.valid = true;
1860 te.longDescFormat = longDescriptor;
1861 te.isHyp = currState->isHyp;
1862 te.asid = currState->asid;
1863 te.vmid = currState->vmid;
1864 te.N = descriptor.offsetBits();
1865 te.vpn = currState->vaddr >> te.N;
1866 te.size = (1<<te.N) - 1;
1867 te.pfn = descriptor.pfn();
1868 te.domain = descriptor.domain();
1869 te.lookupLevel = descriptor.lookupLevel;
1870 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1871 te.nstid = !currState->isSecure;
1872 te.xn = descriptor.xn();
1873 if (currState->aarch64)
1874 te.el = currState->el;
1875 else
1876 te.el = 1;
1877
1878 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1879 // as global
1880 te.global = descriptor.global(currState) || isStage2;
1881 if (longDescriptor) {
1882 LongDescriptor lDescriptor =
1883 dynamic_cast<LongDescriptor &>(descriptor);
1884
1885 te.xn |= currState->xnTable;
1886 te.pxn = currState->pxnTable || lDescriptor.pxn();
1887 if (isStage2) {
1888 // this is actually the HAP field, but its stored in the same bit
1889 // possitions as the AP field in a stage 1 translation.
1890 te.hap = lDescriptor.ap();
1891 } else {
1892 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1893 (currState->userTable && (descriptor.ap() & 0x1));
1894 }
1895 if (currState->aarch64)
1896 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1897 currState->longDesc.sh());
1898 else
1899 memAttrsLPAE(currState->tc, te, lDescriptor);
1900 } else {
1901 te.ap = descriptor.ap();
1902 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1903 descriptor.shareable());
1904 }
1905
1906 // Debug output
1907 DPRINTF(TLB, descriptor.dbgHeader().c_str());
1908 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1909 te.N, te.pfn, te.size, te.global, te.valid);
1910 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1911 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1912 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1913 te.nonCacheable, te.ns);
1914 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1915 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1916 descriptor.getRawData());
1917
1918 // Insert the entry into the TLB
1919 tlb->insert(currState->vaddr, te);
1920 if (!currState->timing) {
1921 currState->tc = NULL;
1922 currState->req = NULL;
1923 }
1924}
1925
1926ArmISA::TableWalker *
1927ArmTableWalkerParams::create()
1928{
1929 return new ArmISA::TableWalker(this);
1930}
1931
1932LookupLevel
1933TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
1934{
1935 switch (lookup_level_as_int) {
1936 case L1:
1937 return L1;
1938 case L2:
1939 return L2;
1940 case L3:
1941 return L3;
1942 default:
1943 panic("Invalid lookup level conversion");
1944 }
1945}
291 }
292
293 if (pending || pendingQueue.size()) {
294 pendingQueue.push_back(currState);
295 currState = NULL;
296 } else {
297 pending = true;
298 if (currState->aarch64)
299 return processWalkAArch64();
300 else if (long_desc_format)
301 return processWalkLPAE();
302 else
303 return processWalk();
304 }
305
306 return NoFault;
307}
308
309void
310TableWalker::processWalkWrapper()
311{
312 assert(!currState);
313 assert(pendingQueue.size());
314 currState = pendingQueue.front();
315
316 ExceptionLevel target_el = EL0;
317 if (currState->aarch64)
318 target_el = currEL(currState->tc);
319 else
320 target_el = EL1;
321
322 // Check if a previous walk filled this request already
323 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
324 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
325 currState->vmid, currState->isHyp, currState->isSecure, true, false,
326 target_el);
327
328 // Check if we still need to have a walk for this request. If the requesting
329 // instruction has been squashed, or a previous walk has filled the TLB with
330 // a match, we just want to get rid of the walk. The latter could happen
331 // when there are multiple outstanding misses to a single page and a
332 // previous request has been successfully translated.
333 if (!currState->transState->squashed() && !te) {
334 // We've got a valid request, lets process it
335 pending = true;
336 pendingQueue.pop_front();
337 if (currState->aarch64)
338 processWalkAArch64();
339 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
340 processWalkLPAE();
341 else
342 processWalk();
343 return;
344 }
345
346
347 // If the instruction that we were translating for has been
348 // squashed we shouldn't bother.
349 unsigned num_squashed = 0;
350 ThreadContext *tc = currState->tc;
351 while ((num_squashed < numSquashable) && currState &&
352 (currState->transState->squashed() || te)) {
353 pendingQueue.pop_front();
354 num_squashed++;
355
356 DPRINTF(TLB, "Squashing table walk for address %#x\n",
357 currState->vaddr_tainted);
358
359 if (currState->transState->squashed()) {
360 // finish the translation which will delete the translation object
361 currState->transState->finish(new UnimpFault("Squashed Inst"),
362 currState->req, currState->tc, currState->mode);
363 } else {
364 // translate the request now that we know it will work
365 tlb->translateTiming(currState->req, currState->tc,
366 currState->transState, currState->mode);
367
368 }
369
370 // delete the current request
371 delete currState;
372
373 // peak at the next one
374 if (pendingQueue.size()) {
375 currState = pendingQueue.front();
376 te = tlb->lookup(currState->vaddr, currState->asid,
377 currState->vmid, currState->isHyp, currState->isSecure, true,
378 false, target_el);
379 } else {
380 // Terminate the loop, nothing more to do
381 currState = NULL;
382 }
383 }
384
385 // if we've still got pending translations schedule more work
386 nextWalk(tc);
387 currState = NULL;
388 completeDrain();
389}
390
391Fault
392TableWalker::processWalk()
393{
394 Addr ttbr = 0;
395
396 // If translation isn't enabled, we shouldn't be here
397 assert(currState->sctlr.m || isStage2);
398
399 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
400 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
401 32 - currState->ttbcr.n));
402
403 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
404 32 - currState->ttbcr.n)) {
405 DPRINTF(TLB, " - Selecting TTBR0\n");
406 // Check if table walk is allowed when Security Extensions are enabled
407 if (haveSecurity && currState->ttbcr.pd0) {
408 if (currState->isFetch)
409 return new PrefetchAbort(currState->vaddr_tainted,
410 ArmFault::TranslationLL + L1,
411 isStage2,
412 ArmFault::VmsaTran);
413 else
414 return new DataAbort(currState->vaddr_tainted,
415 TlbEntry::DomainType::NoAccess, currState->isWrite,
416 ArmFault::TranslationLL + L1, isStage2,
417 ArmFault::VmsaTran);
418 }
419 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
420 MISCREG_TTBR0, currState->tc, !currState->isSecure));
421 } else {
422 DPRINTF(TLB, " - Selecting TTBR1\n");
423 // Check if table walk is allowed when Security Extensions are enabled
424 if (haveSecurity && currState->ttbcr.pd1) {
425 if (currState->isFetch)
426 return new PrefetchAbort(currState->vaddr_tainted,
427 ArmFault::TranslationLL + L1,
428 isStage2,
429 ArmFault::VmsaTran);
430 else
431 return new DataAbort(currState->vaddr_tainted,
432 TlbEntry::DomainType::NoAccess, currState->isWrite,
433 ArmFault::TranslationLL + L1, isStage2,
434 ArmFault::VmsaTran);
435 }
436 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
437 MISCREG_TTBR1, currState->tc, !currState->isSecure));
438 currState->ttbcr.n = 0;
439 }
440
441 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
442 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
443 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
444 currState->isSecure ? "s" : "ns");
445
446 // Trickbox address check
447 Fault f;
448 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
449 currState->vaddr, sizeof(uint32_t), currState->isFetch,
450 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
451 if (f) {
452 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
453 if (currState->timing) {
454 pending = false;
455 nextWalk(currState->tc);
456 currState = NULL;
457 } else {
458 currState->tc = NULL;
459 currState->req = NULL;
460 }
461 return f;
462 }
463
464 Request::Flags flag = 0;
465 if (currState->sctlr.c == 0) {
466 flag = Request::UNCACHEABLE;
467 }
468
469 bool delayed;
470 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
471 sizeof(uint32_t), flag, L1, &doL1DescEvent,
472 &TableWalker::doL1Descriptor);
473 if (!delayed) {
474 f = currState->fault;
475 }
476
477 return f;
478}
479
480Fault
481TableWalker::processWalkLPAE()
482{
483 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
484 int tsz, n;
485 LookupLevel start_lookup_level = L1;
486
487 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
488 currState->vaddr_tainted, currState->ttbcr);
489
490 Request::Flags flag = 0;
491 if (currState->isSecure)
492 flag.set(Request::SECURE);
493
494 // work out which base address register to use, if in hyp mode we always
495 // use HTTBR
496 if (isStage2) {
497 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
498 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
499 tsz = sext<4>(currState->vtcr.t0sz);
500 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
501 } else if (currState->isHyp) {
502 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
503 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
504 tsz = currState->htcr.t0sz;
505 } else {
506 assert(_haveLPAE && currState->ttbcr.eae);
507
508 // Determine boundaries of TTBR0/1 regions
509 if (currState->ttbcr.t0sz)
510 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
511 else if (currState->ttbcr.t1sz)
512 ttbr0_max = (1ULL << 32) -
513 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
514 else
515 ttbr0_max = (1ULL << 32) - 1;
516 if (currState->ttbcr.t1sz)
517 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
518 else
519 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
520
521 // The following code snippet selects the appropriate translation table base
522 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
523 // depending on the address range supported by the translation table (ARM
524 // ARM issue C B3.6.4)
525 if (currState->vaddr <= ttbr0_max) {
526 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
527 // Check if table walk is allowed
528 if (currState->ttbcr.epd0) {
529 if (currState->isFetch)
530 return new PrefetchAbort(currState->vaddr_tainted,
531 ArmFault::TranslationLL + L1,
532 isStage2,
533 ArmFault::LpaeTran);
534 else
535 return new DataAbort(currState->vaddr_tainted,
536 TlbEntry::DomainType::NoAccess,
537 currState->isWrite,
538 ArmFault::TranslationLL + L1,
539 isStage2,
540 ArmFault::LpaeTran);
541 }
542 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
543 MISCREG_TTBR0, currState->tc, !currState->isSecure));
544 tsz = currState->ttbcr.t0sz;
545 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
546 start_lookup_level = L2;
547 } else if (currState->vaddr >= ttbr1_min) {
548 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
549 // Check if table walk is allowed
550 if (currState->ttbcr.epd1) {
551 if (currState->isFetch)
552 return new PrefetchAbort(currState->vaddr_tainted,
553 ArmFault::TranslationLL + L1,
554 isStage2,
555 ArmFault::LpaeTran);
556 else
557 return new DataAbort(currState->vaddr_tainted,
558 TlbEntry::DomainType::NoAccess,
559 currState->isWrite,
560 ArmFault::TranslationLL + L1,
561 isStage2,
562 ArmFault::LpaeTran);
563 }
564 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
565 MISCREG_TTBR1, currState->tc, !currState->isSecure));
566 tsz = currState->ttbcr.t1sz;
567 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
568 start_lookup_level = L2;
569 } else {
570 // Out of boundaries -> translation fault
571 if (currState->isFetch)
572 return new PrefetchAbort(currState->vaddr_tainted,
573 ArmFault::TranslationLL + L1,
574 isStage2,
575 ArmFault::LpaeTran);
576 else
577 return new DataAbort(currState->vaddr_tainted,
578 TlbEntry::DomainType::NoAccess,
579 currState->isWrite, ArmFault::TranslationLL + L1,
580 isStage2, ArmFault::LpaeTran);
581 }
582
583 }
584
585 // Perform lookup (ARM ARM issue C B3.6.6)
586 if (start_lookup_level == L1) {
587 n = 5 - tsz;
588 desc_addr = mbits(ttbr, 39, n) |
589 (bits(currState->vaddr, n + 26, 30) << 3);
590 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
591 desc_addr, currState->isSecure ? "s" : "ns");
592 } else {
593 // Skip first-level lookup
594 n = (tsz >= 2 ? 14 - tsz : 12);
595 desc_addr = mbits(ttbr, 39, n) |
596 (bits(currState->vaddr, n + 17, 21) << 3);
597 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
598 desc_addr, currState->isSecure ? "s" : "ns");
599 }
600
601 // Trickbox address check
602 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
603 currState->vaddr, sizeof(uint64_t), currState->isFetch,
604 currState->isWrite, TlbEntry::DomainType::NoAccess,
605 start_lookup_level);
606 if (f) {
607 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
608 if (currState->timing) {
609 pending = false;
610 nextWalk(currState->tc);
611 currState = NULL;
612 } else {
613 currState->tc = NULL;
614 currState->req = NULL;
615 }
616 return f;
617 }
618
619 if (currState->sctlr.c == 0) {
620 flag = Request::UNCACHEABLE;
621 }
622
623 if (currState->isSecure)
624 flag.set(Request::SECURE);
625
626 currState->longDesc.lookupLevel = start_lookup_level;
627 currState->longDesc.aarch64 = false;
628 currState->longDesc.largeGrain = false;
629 currState->longDesc.grainSize = 12;
630
631 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
632 : (Event *) &doL2LongDescEvent;
633
634 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
635 sizeof(uint64_t), flag, start_lookup_level,
636 event, &TableWalker::doLongDescriptor);
637 if (!delayed) {
638 f = currState->fault;
639 }
640
641 return f;
642}
643
644unsigned
645TableWalker::adjustTableSizeAArch64(unsigned tsz)
646{
647 if (tsz < 25)
648 return 25;
649 if (tsz > 48)
650 return 48;
651 return tsz;
652}
653
654bool
655TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
656{
657 return (currPhysAddrRange != MaxPhysAddrRange &&
658 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
659}
660
661Fault
662TableWalker::processWalkAArch64()
663{
664 assert(currState->aarch64);
665
666 DPRINTF(TLB, "Beginning table walk for address %#llx, TTBCR: %#llx\n",
667 currState->vaddr_tainted, currState->ttbcr);
668
669 // Determine TTBR, table size, granule size and phys. address range
670 Addr ttbr = 0;
671 int tsz = 0, ps = 0;
672 bool large_grain = false;
673 bool fault = false;
674 switch (currState->el) {
675 case EL0:
676 case EL1:
677 switch (bits(currState->vaddr, 63,48)) {
678 case 0:
679 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
680 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
681 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
682 large_grain = currState->ttbcr.tg0;
683 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
684 currState->ttbcr.epd0)
685 fault = true;
686 break;
687 case 0xffff:
688 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
689 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
690 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t1sz);
691 large_grain = currState->ttbcr.tg1;
692 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
693 currState->ttbcr.epd1)
694 fault = true;
695 break;
696 default:
697 // top two bytes must be all 0s or all 1s, else invalid addr
698 fault = true;
699 }
700 ps = currState->ttbcr.ips;
701 break;
702 case EL2:
703 case EL3:
704 switch(bits(currState->vaddr, 63,48)) {
705 case 0:
706 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
707 if (currState->el == EL2)
708 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
709 else
710 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
711 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz);
712 large_grain = currState->ttbcr.tg0;
713 break;
714 default:
715 // invalid addr if top two bytes are not all 0s
716 fault = true;
717 }
718 ps = currState->ttbcr.ps;
719 break;
720 }
721
722 if (fault) {
723 Fault f;
724 if (currState->isFetch)
725 f = new PrefetchAbort(currState->vaddr_tainted,
726 ArmFault::TranslationLL + L0, isStage2,
727 ArmFault::LpaeTran);
728 else
729 f = new DataAbort(currState->vaddr_tainted,
730 TlbEntry::DomainType::NoAccess,
731 currState->isWrite,
732 ArmFault::TranslationLL + L0,
733 isStage2, ArmFault::LpaeTran);
734
735 if (currState->timing) {
736 pending = false;
737 nextWalk(currState->tc);
738 currState = NULL;
739 } else {
740 currState->tc = NULL;
741 currState->req = NULL;
742 }
743 return f;
744
745 }
746
747 // Determine starting lookup level
748 LookupLevel start_lookup_level;
749 int grain_size, stride;
750 if (large_grain) { // 64 KB granule
751 grain_size = 16;
752 stride = grain_size - 3;
753 if (tsz > grain_size + 2 * stride)
754 start_lookup_level = L1;
755 else if (tsz > grain_size + stride)
756 start_lookup_level = L2;
757 else
758 start_lookup_level = L3;
759 } else { // 4 KB granule
760 grain_size = 12;
761 stride = grain_size - 3;
762 if (tsz > grain_size + 3 * stride)
763 start_lookup_level = L0;
764 else if (tsz > grain_size + 2 * stride)
765 start_lookup_level = L1;
766 else
767 start_lookup_level = L2;
768 }
769
770 // Determine table base address
771 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) -
772 grain_size;
773 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
774
775 // Determine physical address size and raise an Address Size Fault if
776 // necessary
777 int pa_range = decodePhysAddrRange64(ps);
778 // Clamp to lower limit
779 if (pa_range > physAddrRange)
780 currState->physAddrRange = physAddrRange;
781 else
782 currState->physAddrRange = pa_range;
783 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
784 DPRINTF(TLB, "Address size fault before any lookup\n");
785 Fault f;
786 if (currState->isFetch)
787 f = new PrefetchAbort(currState->vaddr_tainted,
788 ArmFault::AddressSizeLL + start_lookup_level,
789 isStage2,
790 ArmFault::LpaeTran);
791 else
792 f = new DataAbort(currState->vaddr_tainted,
793 TlbEntry::DomainType::NoAccess,
794 currState->isWrite,
795 ArmFault::AddressSizeLL + start_lookup_level,
796 isStage2,
797 ArmFault::LpaeTran);
798
799
800 if (currState->timing) {
801 pending = false;
802 nextWalk(currState->tc);
803 currState = NULL;
804 } else {
805 currState->tc = NULL;
806 currState->req = NULL;
807 }
808 return f;
809
810 }
811
812 // Determine descriptor address
813 Addr desc_addr = base_addr |
814 (bits(currState->vaddr, tsz - 1,
815 stride * (3 - start_lookup_level) + grain_size) << 3);
816
817 // Trickbox address check
818 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
819 currState->vaddr, sizeof(uint64_t), currState->isFetch,
820 currState->isWrite, TlbEntry::DomainType::NoAccess,
821 start_lookup_level);
822 if (f) {
823 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
824 if (currState->timing) {
825 pending = false;
826 nextWalk(currState->tc);
827 currState = NULL;
828 } else {
829 currState->tc = NULL;
830 currState->req = NULL;
831 }
832 return f;
833 }
834
835 Request::Flags flag = 0;
836 if (currState->sctlr.c == 0) {
837 flag = Request::UNCACHEABLE;
838 }
839
840 currState->longDesc.lookupLevel = start_lookup_level;
841 currState->longDesc.aarch64 = true;
842 currState->longDesc.largeGrain = large_grain;
843 currState->longDesc.grainSize = grain_size;
844
845 if (currState->timing) {
846 Event *event;
847 switch (start_lookup_level) {
848 case L0:
849 event = (Event *) &doL0LongDescEvent;
850 break;
851 case L1:
852 event = (Event *) &doL1LongDescEvent;
853 break;
854 case L2:
855 event = (Event *) &doL2LongDescEvent;
856 break;
857 case L3:
858 event = (Event *) &doL3LongDescEvent;
859 break;
860 default:
861 panic("Invalid table lookup level");
862 break;
863 }
864 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
865 (uint8_t*) &currState->longDesc.data,
866 currState->tc->getCpuPtr()->clockPeriod(), flag);
867 DPRINTF(TLBVerbose,
868 "Adding to walker fifo: queue size before adding: %d\n",
869 stateQueues[start_lookup_level].size());
870 stateQueues[start_lookup_level].push_back(currState);
871 currState = NULL;
872 } else if (!currState->functional) {
873 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
874 NULL, (uint8_t*) &currState->longDesc.data,
875 currState->tc->getCpuPtr()->clockPeriod(), flag);
876 doLongDescriptor();
877 f = currState->fault;
878 } else {
879 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
880 masterId);
881 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
882 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
883 port.sendFunctional(pkt);
884 doLongDescriptor();
885 delete req;
886 delete pkt;
887 f = currState->fault;
888 }
889
890 return f;
891}
892
893void
894TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
895 uint8_t texcb, bool s)
896{
897 // Note: tc and sctlr local variables are hiding tc and sctrl class
898 // variables
899 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
900 te.shareable = false; // default value
901 te.nonCacheable = false;
902 te.outerShareable = false;
903 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
904 switch(texcb) {
905 case 0: // Stongly-ordered
906 te.nonCacheable = true;
907 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
908 te.shareable = true;
909 te.innerAttrs = 1;
910 te.outerAttrs = 0;
911 break;
912 case 1: // Shareable Device
913 te.nonCacheable = true;
914 te.mtype = TlbEntry::MemoryType::Device;
915 te.shareable = true;
916 te.innerAttrs = 3;
917 te.outerAttrs = 0;
918 break;
919 case 2: // Outer and Inner Write-Through, no Write-Allocate
920 te.mtype = TlbEntry::MemoryType::Normal;
921 te.shareable = s;
922 te.innerAttrs = 6;
923 te.outerAttrs = bits(texcb, 1, 0);
924 break;
925 case 3: // Outer and Inner Write-Back, no Write-Allocate
926 te.mtype = TlbEntry::MemoryType::Normal;
927 te.shareable = s;
928 te.innerAttrs = 7;
929 te.outerAttrs = bits(texcb, 1, 0);
930 break;
931 case 4: // Outer and Inner Non-cacheable
932 te.nonCacheable = true;
933 te.mtype = TlbEntry::MemoryType::Normal;
934 te.shareable = s;
935 te.innerAttrs = 0;
936 te.outerAttrs = bits(texcb, 1, 0);
937 break;
938 case 5: // Reserved
939 panic("Reserved texcb value!\n");
940 break;
941 case 6: // Implementation Defined
942 panic("Implementation-defined texcb value!\n");
943 break;
944 case 7: // Outer and Inner Write-Back, Write-Allocate
945 te.mtype = TlbEntry::MemoryType::Normal;
946 te.shareable = s;
947 te.innerAttrs = 5;
948 te.outerAttrs = 1;
949 break;
950 case 8: // Non-shareable Device
951 te.nonCacheable = true;
952 te.mtype = TlbEntry::MemoryType::Device;
953 te.shareable = false;
954 te.innerAttrs = 3;
955 te.outerAttrs = 0;
956 break;
957 case 9 ... 15: // Reserved
958 panic("Reserved texcb value!\n");
959 break;
960 case 16 ... 31: // Cacheable Memory
961 te.mtype = TlbEntry::MemoryType::Normal;
962 te.shareable = s;
963 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
964 te.nonCacheable = true;
965 te.innerAttrs = bits(texcb, 1, 0);
966 te.outerAttrs = bits(texcb, 3, 2);
967 break;
968 default:
969 panic("More than 32 states for 5 bits?\n");
970 }
971 } else {
972 assert(tc);
973 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
974 currState->tc, !currState->isSecure));
975 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
976 currState->tc, !currState->isSecure));
977 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
978 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
979 switch(bits(texcb, 2,0)) {
980 case 0:
981 curr_tr = prrr.tr0;
982 curr_ir = nmrr.ir0;
983 curr_or = nmrr.or0;
984 te.outerShareable = (prrr.nos0 == 0);
985 break;
986 case 1:
987 curr_tr = prrr.tr1;
988 curr_ir = nmrr.ir1;
989 curr_or = nmrr.or1;
990 te.outerShareable = (prrr.nos1 == 0);
991 break;
992 case 2:
993 curr_tr = prrr.tr2;
994 curr_ir = nmrr.ir2;
995 curr_or = nmrr.or2;
996 te.outerShareable = (prrr.nos2 == 0);
997 break;
998 case 3:
999 curr_tr = prrr.tr3;
1000 curr_ir = nmrr.ir3;
1001 curr_or = nmrr.or3;
1002 te.outerShareable = (prrr.nos3 == 0);
1003 break;
1004 case 4:
1005 curr_tr = prrr.tr4;
1006 curr_ir = nmrr.ir4;
1007 curr_or = nmrr.or4;
1008 te.outerShareable = (prrr.nos4 == 0);
1009 break;
1010 case 5:
1011 curr_tr = prrr.tr5;
1012 curr_ir = nmrr.ir5;
1013 curr_or = nmrr.or5;
1014 te.outerShareable = (prrr.nos5 == 0);
1015 break;
1016 case 6:
1017 panic("Imp defined type\n");
1018 case 7:
1019 curr_tr = prrr.tr7;
1020 curr_ir = nmrr.ir7;
1021 curr_or = nmrr.or7;
1022 te.outerShareable = (prrr.nos7 == 0);
1023 break;
1024 }
1025
1026 switch(curr_tr) {
1027 case 0:
1028 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1029 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1030 te.nonCacheable = true;
1031 te.innerAttrs = 1;
1032 te.outerAttrs = 0;
1033 te.shareable = true;
1034 break;
1035 case 1:
1036 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1037 prrr.ds1, prrr.ds0, s);
1038 te.mtype = TlbEntry::MemoryType::Device;
1039 te.nonCacheable = true;
1040 te.innerAttrs = 3;
1041 te.outerAttrs = 0;
1042 if (prrr.ds1 && s)
1043 te.shareable = true;
1044 if (prrr.ds0 && !s)
1045 te.shareable = true;
1046 break;
1047 case 2:
1048 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1049 prrr.ns1, prrr.ns0, s);
1050 te.mtype = TlbEntry::MemoryType::Normal;
1051 if (prrr.ns1 && s)
1052 te.shareable = true;
1053 if (prrr.ns0 && !s)
1054 te.shareable = true;
1055 break;
1056 case 3:
1057 panic("Reserved type");
1058 }
1059
1060 if (te.mtype == TlbEntry::MemoryType::Normal){
1061 switch(curr_ir) {
1062 case 0:
1063 te.nonCacheable = true;
1064 te.innerAttrs = 0;
1065 break;
1066 case 1:
1067 te.innerAttrs = 5;
1068 break;
1069 case 2:
1070 te.innerAttrs = 6;
1071 break;
1072 case 3:
1073 te.innerAttrs = 7;
1074 break;
1075 }
1076
1077 switch(curr_or) {
1078 case 0:
1079 te.nonCacheable = true;
1080 te.outerAttrs = 0;
1081 break;
1082 case 1:
1083 te.outerAttrs = 1;
1084 break;
1085 case 2:
1086 te.outerAttrs = 2;
1087 break;
1088 case 3:
1089 te.outerAttrs = 3;
1090 break;
1091 }
1092 }
1093 }
1094 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, \
1095 outerAttrs: %d\n",
1096 te.shareable, te.innerAttrs, te.outerAttrs);
1097 te.setAttributes(false);
1098}
1099
1100void
1101TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1102 LongDescriptor &lDescriptor)
1103{
1104 assert(_haveLPAE);
1105
1106 uint8_t attr;
1107 uint8_t sh = lDescriptor.sh();
1108 // Different format and source of attributes if this is a stage 2
1109 // translation
1110 if (isStage2) {
1111 attr = lDescriptor.memAttr();
1112 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1113 uint8_t attr_1_0 = attr & 0x3;
1114
1115 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1116
1117 if (attr_3_2 == 0) {
1118 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1119 : TlbEntry::MemoryType::Device;
1120 te.outerAttrs = 0;
1121 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1122 te.nonCacheable = true;
1123 } else {
1124 te.mtype = TlbEntry::MemoryType::Normal;
1125 te.outerAttrs = attr_3_2 == 1 ? 0 :
1126 attr_3_2 == 2 ? 2 : 1;
1127 te.innerAttrs = attr_1_0 == 1 ? 0 :
1128 attr_1_0 == 2 ? 6 : 5;
1129 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1130 }
1131 } else {
1132 uint8_t attrIndx = lDescriptor.attrIndx();
1133
1134 // LPAE always uses remapping of memory attributes, irrespective of the
1135 // value of SCTLR.TRE
1136 int reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1137 reg = flattenMiscRegNsBanked(reg, currState->tc, !currState->isSecure);
1138 uint32_t mair = currState->tc->readMiscReg(reg);
1139 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1140 uint8_t attr_7_4 = bits(attr, 7, 4);
1141 uint8_t attr_3_0 = bits(attr, 3, 0);
1142 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1143
1144 // Note: the memory subsystem only cares about the 'cacheable' memory
1145 // attribute. The other attributes are only used to fill the PAR register
1146 // accordingly to provide the illusion of full support
1147 te.nonCacheable = false;
1148
1149 switch (attr_7_4) {
1150 case 0x0:
1151 // Strongly-ordered or Device memory
1152 if (attr_3_0 == 0x0)
1153 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1154 else if (attr_3_0 == 0x4)
1155 te.mtype = TlbEntry::MemoryType::Device;
1156 else
1157 panic("Unpredictable behavior\n");
1158 te.nonCacheable = true;
1159 te.outerAttrs = 0;
1160 break;
1161 case 0x4:
1162 // Normal memory, Outer Non-cacheable
1163 te.mtype = TlbEntry::MemoryType::Normal;
1164 te.outerAttrs = 0;
1165 if (attr_3_0 == 0x4)
1166 // Inner Non-cacheable
1167 te.nonCacheable = true;
1168 else if (attr_3_0 < 0x8)
1169 panic("Unpredictable behavior\n");
1170 break;
1171 case 0x8:
1172 case 0x9:
1173 case 0xa:
1174 case 0xb:
1175 case 0xc:
1176 case 0xd:
1177 case 0xe:
1178 case 0xf:
1179 if (attr_7_4 & 0x4) {
1180 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1181 } else {
1182 te.outerAttrs = 0x2;
1183 }
1184 // Normal memory, Outer Cacheable
1185 te.mtype = TlbEntry::MemoryType::Normal;
1186 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1187 panic("Unpredictable behavior\n");
1188 break;
1189 default:
1190 panic("Unpredictable behavior\n");
1191 break;
1192 }
1193
1194 switch (attr_3_0) {
1195 case 0x0:
1196 te.innerAttrs = 0x1;
1197 break;
1198 case 0x4:
1199 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1200 break;
1201 case 0x8:
1202 case 0x9:
1203 case 0xA:
1204 case 0xB:
1205 te.innerAttrs = 6;
1206 break;
1207 case 0xC:
1208 case 0xD:
1209 case 0xE:
1210 case 0xF:
1211 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1212 break;
1213 default:
1214 panic("Unpredictable behavior\n");
1215 break;
1216 }
1217 }
1218
1219 te.outerShareable = sh == 2;
1220 te.shareable = (sh & 0x2) ? true : false;
1221 te.setAttributes(true);
1222 te.attributes |= (uint64_t) attr << 56;
1223}
1224
1225void
1226TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1227 uint8_t sh)
1228{
1229 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1230
1231 // Select MAIR
1232 uint64_t mair;
1233 switch (currState->el) {
1234 case EL0:
1235 case EL1:
1236 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1237 break;
1238 case EL2:
1239 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1240 break;
1241 case EL3:
1242 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1243 break;
1244 default:
1245 panic("Invalid exception level");
1246 break;
1247 }
1248
1249 // Select attributes
1250 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1251 uint8_t attr_lo = bits(attr, 3, 0);
1252 uint8_t attr_hi = bits(attr, 7, 4);
1253
1254 // Memory type
1255 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1256
1257 // Cacheability
1258 te.nonCacheable = false;
1259 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1260 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1261 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1262 te.nonCacheable = true;
1263 }
1264
1265 te.shareable = sh == 2;
1266 te.outerShareable = (sh & 0x2) ? true : false;
1267 // Attributes formatted according to the 64-bit PAR
1268 te.attributes = ((uint64_t) attr << 56) |
1269 (1 << 11) | // LPAE bit
1270 (te.ns << 9) | // NS bit
1271 (sh << 7);
1272}
1273
1274void
1275TableWalker::doL1Descriptor()
1276{
1277 if (currState->fault != NoFault) {
1278 return;
1279 }
1280
1281 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1282 currState->vaddr_tainted, currState->l1Desc.data);
1283 TlbEntry te;
1284
1285 switch (currState->l1Desc.type()) {
1286 case L1Descriptor::Ignore:
1287 case L1Descriptor::Reserved:
1288 if (!currState->timing) {
1289 currState->tc = NULL;
1290 currState->req = NULL;
1291 }
1292 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1293 if (currState->isFetch)
1294 currState->fault =
1295 new PrefetchAbort(currState->vaddr_tainted,
1296 ArmFault::TranslationLL + L1,
1297 isStage2,
1298 ArmFault::VmsaTran);
1299 else
1300 currState->fault =
1301 new DataAbort(currState->vaddr_tainted,
1302 TlbEntry::DomainType::NoAccess,
1303 currState->isWrite,
1304 ArmFault::TranslationLL + L1, isStage2,
1305 ArmFault::VmsaTran);
1306 return;
1307 case L1Descriptor::Section:
1308 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1309 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1310 * enabled if set, do l1.Desc.setAp0() instead of generating
1311 * AccessFlag0
1312 */
1313
1314 currState->fault = new DataAbort(currState->vaddr_tainted,
1315 currState->l1Desc.domain(),
1316 currState->isWrite,
1317 ArmFault::AccessFlagLL + L1,
1318 isStage2,
1319 ArmFault::VmsaTran);
1320 }
1321 if (currState->l1Desc.supersection()) {
1322 panic("Haven't implemented supersections\n");
1323 }
1324 insertTableEntry(currState->l1Desc, false);
1325 return;
1326 case L1Descriptor::PageTable:
1327 {
1328 Addr l2desc_addr;
1329 l2desc_addr = currState->l1Desc.l2Addr() |
1330 (bits(currState->vaddr, 19, 12) << 2);
1331 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1332 l2desc_addr, currState->isSecure ? "s" : "ns");
1333
1334 // Trickbox address check
1335 currState->fault = tlb->walkTrickBoxCheck(
1336 l2desc_addr, currState->isSecure, currState->vaddr,
1337 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1338 currState->l1Desc.domain(), L2);
1339
1340 if (currState->fault) {
1341 if (!currState->timing) {
1342 currState->tc = NULL;
1343 currState->req = NULL;
1344 }
1345 return;
1346 }
1347
1348 Request::Flags flag = 0;
1349 if (currState->isSecure)
1350 flag.set(Request::SECURE);
1351
1352 bool delayed;
1353 delayed = fetchDescriptor(l2desc_addr,
1354 (uint8_t*)&currState->l2Desc.data,
1355 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1356 &TableWalker::doL2Descriptor);
1357 if (delayed) {
1358 currState->delayed = true;
1359 }
1360
1361 return;
1362 }
1363 default:
1364 panic("A new type in a 2 bit field?\n");
1365 }
1366}
1367
1368void
1369TableWalker::doLongDescriptor()
1370{
1371 if (currState->fault != NoFault) {
1372 return;
1373 }
1374
1375 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1376 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1377 currState->longDesc.data,
1378 currState->aarch64 ? "AArch64" : "long-desc.");
1379
1380 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1381 (currState->longDesc.type() == LongDescriptor::Page)) {
1382 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1383 "xn: %d, ap: %d, af: %d, type: %d\n",
1384 currState->longDesc.lookupLevel,
1385 currState->longDesc.data,
1386 currState->longDesc.pxn(),
1387 currState->longDesc.xn(),
1388 currState->longDesc.ap(),
1389 currState->longDesc.af(),
1390 currState->longDesc.type());
1391 } else {
1392 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1393 currState->longDesc.lookupLevel,
1394 currState->longDesc.data,
1395 currState->longDesc.type());
1396 }
1397
1398 TlbEntry te;
1399
1400 switch (currState->longDesc.type()) {
1401 case LongDescriptor::Invalid:
1402 if (!currState->timing) {
1403 currState->tc = NULL;
1404 currState->req = NULL;
1405 }
1406
1407 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1408 currState->longDesc.lookupLevel,
1409 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1410 if (currState->isFetch)
1411 currState->fault = new PrefetchAbort(
1412 currState->vaddr_tainted,
1413 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1414 isStage2,
1415 ArmFault::LpaeTran);
1416 else
1417 currState->fault = new DataAbort(
1418 currState->vaddr_tainted,
1419 TlbEntry::DomainType::NoAccess,
1420 currState->isWrite,
1421 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1422 isStage2,
1423 ArmFault::LpaeTran);
1424 return;
1425 case LongDescriptor::Block:
1426 case LongDescriptor::Page:
1427 {
1428 bool fault = false;
1429 bool aff = false;
1430 // Check for address size fault
1431 if (checkAddrSizeFaultAArch64(
1432 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1433 currState->longDesc.offsetBits()),
1434 currState->physAddrRange)) {
1435 fault = true;
1436 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1437 currState->longDesc.lookupLevel);
1438 // Check for access fault
1439 } else if (currState->longDesc.af() == 0) {
1440 fault = true;
1441 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1442 currState->longDesc.lookupLevel);
1443 aff = true;
1444 }
1445 if (fault) {
1446 if (currState->isFetch)
1447 currState->fault = new PrefetchAbort(
1448 currState->vaddr_tainted,
1449 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1450 currState->longDesc.lookupLevel,
1451 isStage2,
1452 ArmFault::LpaeTran);
1453 else
1454 currState->fault = new DataAbort(
1455 currState->vaddr_tainted,
1456 TlbEntry::DomainType::NoAccess, currState->isWrite,
1457 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1458 currState->longDesc.lookupLevel,
1459 isStage2,
1460 ArmFault::LpaeTran);
1461 } else {
1462 insertTableEntry(currState->longDesc, true);
1463 }
1464 }
1465 return;
1466 case LongDescriptor::Table:
1467 {
1468 // Set hierarchical permission flags
1469 currState->secureLookup = currState->secureLookup &&
1470 currState->longDesc.secureTable();
1471 currState->rwTable = currState->rwTable &&
1472 currState->longDesc.rwTable();
1473 currState->userTable = currState->userTable &&
1474 currState->longDesc.userTable();
1475 currState->xnTable = currState->xnTable ||
1476 currState->longDesc.xnTable();
1477 currState->pxnTable = currState->pxnTable ||
1478 currState->longDesc.pxnTable();
1479
1480 // Set up next level lookup
1481 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1482 currState->vaddr);
1483
1484 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1485 currState->longDesc.lookupLevel,
1486 currState->longDesc.lookupLevel + 1,
1487 next_desc_addr,
1488 currState->secureLookup ? "s" : "ns");
1489
1490 // Check for address size fault
1491 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1492 next_desc_addr, currState->physAddrRange)) {
1493 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1494 currState->longDesc.lookupLevel);
1495 if (currState->isFetch)
1496 currState->fault = new PrefetchAbort(
1497 currState->vaddr_tainted,
1498 ArmFault::AddressSizeLL
1499 + currState->longDesc.lookupLevel,
1500 isStage2,
1501 ArmFault::LpaeTran);
1502 else
1503 currState->fault = new DataAbort(
1504 currState->vaddr_tainted,
1505 TlbEntry::DomainType::NoAccess, currState->isWrite,
1506 ArmFault::AddressSizeLL
1507 + currState->longDesc.lookupLevel,
1508 isStage2,
1509 ArmFault::LpaeTran);
1510 return;
1511 }
1512
1513 // Trickbox address check
1514 currState->fault = tlb->walkTrickBoxCheck(
1515 next_desc_addr, currState->vaddr,
1516 currState->vaddr, sizeof(uint64_t),
1517 currState->isFetch, currState->isWrite,
1518 TlbEntry::DomainType::Client,
1519 toLookupLevel(currState->longDesc.lookupLevel +1));
1520
1521 if (currState->fault) {
1522 if (!currState->timing) {
1523 currState->tc = NULL;
1524 currState->req = NULL;
1525 }
1526 return;
1527 }
1528
1529 Request::Flags flag = 0;
1530 if (currState->secureLookup)
1531 flag.set(Request::SECURE);
1532
1533 currState->longDesc.lookupLevel =
1534 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1535 Event *event = NULL;
1536 switch (currState->longDesc.lookupLevel) {
1537 case L1:
1538 assert(currState->aarch64);
1539 event = &doL1LongDescEvent;
1540 break;
1541 case L2:
1542 event = &doL2LongDescEvent;
1543 break;
1544 case L3:
1545 event = &doL3LongDescEvent;
1546 break;
1547 default:
1548 panic("Wrong lookup level in table walk\n");
1549 break;
1550 }
1551
1552 bool delayed;
1553 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1554 sizeof(uint64_t), flag, -1, event,
1555 &TableWalker::doLongDescriptor);
1556 if (delayed) {
1557 currState->delayed = true;
1558 }
1559 }
1560 return;
1561 default:
1562 panic("A new type in a 2 bit field?\n");
1563 }
1564}
1565
1566void
1567TableWalker::doL2Descriptor()
1568{
1569 if (currState->fault != NoFault) {
1570 return;
1571 }
1572
1573 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1574 currState->vaddr_tainted, currState->l2Desc.data);
1575 TlbEntry te;
1576
1577 if (currState->l2Desc.invalid()) {
1578 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1579 if (!currState->timing) {
1580 currState->tc = NULL;
1581 currState->req = NULL;
1582 }
1583 if (currState->isFetch)
1584 currState->fault =
1585 new PrefetchAbort(currState->vaddr_tainted,
1586 ArmFault::TranslationLL + L2,
1587 isStage2,
1588 ArmFault::VmsaTran);
1589 else
1590 currState->fault =
1591 new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(),
1592 currState->isWrite, ArmFault::TranslationLL + L2,
1593 isStage2,
1594 ArmFault::VmsaTran);
1595 return;
1596 }
1597
1598 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1599 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1600 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1601 */
1602 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1603 currState->sctlr.afe, currState->l2Desc.ap());
1604
1605 currState->fault =
1606 new DataAbort(currState->vaddr_tainted,
1607 TlbEntry::DomainType::NoAccess, currState->isWrite,
1608 ArmFault::AccessFlagLL + L2, isStage2,
1609 ArmFault::VmsaTran);
1610 }
1611
1612 insertTableEntry(currState->l2Desc, false);
1613}
1614
1615void
1616TableWalker::doL1DescriptorWrapper()
1617{
1618 currState = stateQueues[L1].front();
1619 currState->delayed = false;
1620 // if there's a stage2 translation object we don't need it any more
1621 if (currState->stage2Tran) {
1622 delete currState->stage2Tran;
1623 currState->stage2Tran = NULL;
1624 }
1625
1626
1627 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1628 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1629
1630 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1631 doL1Descriptor();
1632
1633 stateQueues[L1].pop_front();
1634 completeDrain();
1635 // Check if fault was generated
1636 if (currState->fault != NoFault) {
1637 currState->transState->finish(currState->fault, currState->req,
1638 currState->tc, currState->mode);
1639
1640 pending = false;
1641 nextWalk(currState->tc);
1642
1643 currState->req = NULL;
1644 currState->tc = NULL;
1645 currState->delayed = false;
1646 delete currState;
1647 }
1648 else if (!currState->delayed) {
1649 // delay is not set so there is no L2 to do
1650 // Don't finish the translation if a stage 2 look up is underway
1651 if (!currState->doingStage2) {
1652 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1653 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1654 currState->transState, currState->mode);
1655 }
1656
1657 pending = false;
1658 nextWalk(currState->tc);
1659
1660 currState->req = NULL;
1661 currState->tc = NULL;
1662 currState->delayed = false;
1663 delete currState;
1664 } else {
1665 // need to do L2 descriptor
1666 stateQueues[L2].push_back(currState);
1667 }
1668 currState = NULL;
1669}
1670
1671void
1672TableWalker::doL2DescriptorWrapper()
1673{
1674 currState = stateQueues[L2].front();
1675 assert(currState->delayed);
1676 // if there's a stage2 translation object we don't need it any more
1677 if (currState->stage2Tran) {
1678 delete currState->stage2Tran;
1679 currState->stage2Tran = NULL;
1680 }
1681
1682 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1683 currState->vaddr_tainted);
1684 doL2Descriptor();
1685
1686 // Check if fault was generated
1687 if (currState->fault != NoFault) {
1688 currState->transState->finish(currState->fault, currState->req,
1689 currState->tc, currState->mode);
1690 }
1691 else {
1692 // Don't finish the translation if a stage 2 look up is underway
1693 if (!currState->doingStage2) {
1694 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1695 currState->fault = tlb->translateTiming(currState->req,
1696 currState->tc, currState->transState, currState->mode);
1697 }
1698 }
1699
1700
1701 stateQueues[L2].pop_front();
1702 completeDrain();
1703 pending = false;
1704 nextWalk(currState->tc);
1705
1706 currState->req = NULL;
1707 currState->tc = NULL;
1708 currState->delayed = false;
1709
1710 delete currState;
1711 currState = NULL;
1712}
1713
1714void
1715TableWalker::doL0LongDescriptorWrapper()
1716{
1717 doLongDescriptorWrapper(L0);
1718}
1719
1720void
1721TableWalker::doL1LongDescriptorWrapper()
1722{
1723 doLongDescriptorWrapper(L1);
1724}
1725
1726void
1727TableWalker::doL2LongDescriptorWrapper()
1728{
1729 doLongDescriptorWrapper(L2);
1730}
1731
1732void
1733TableWalker::doL3LongDescriptorWrapper()
1734{
1735 doLongDescriptorWrapper(L3);
1736}
1737
1738void
1739TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1740{
1741 currState = stateQueues[curr_lookup_level].front();
1742 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1743 currState->delayed = false;
1744
1745 // if there's a stage2 translation object we don't need it any more
1746 if (currState->stage2Tran) {
1747 delete currState->stage2Tran;
1748 currState->stage2Tran = NULL;
1749 }
1750
1751 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1752 currState->vaddr_tainted);
1753 doLongDescriptor();
1754
1755 stateQueues[curr_lookup_level].pop_front();
1756
1757 if (currState->fault != NoFault) {
1758 // A fault was generated
1759 currState->transState->finish(currState->fault, currState->req,
1760 currState->tc, currState->mode);
1761
1762 pending = false;
1763 nextWalk(currState->tc);
1764
1765 currState->req = NULL;
1766 currState->tc = NULL;
1767 currState->delayed = false;
1768 delete currState;
1769 } else if (!currState->delayed) {
1770 // No additional lookups required
1771 // Don't finish the translation if a stage 2 look up is underway
1772 if (!currState->doingStage2) {
1773 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1774 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1775 currState->transState,
1776 currState->mode);
1777 }
1778
1779 pending = false;
1780 nextWalk(currState->tc);
1781
1782 currState->req = NULL;
1783 currState->tc = NULL;
1784 currState->delayed = false;
1785 delete currState;
1786 } else {
1787 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1788 panic("Max. number of lookups already reached in table walk\n");
1789 // Need to perform additional lookups
1790 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1791 }
1792 currState = NULL;
1793}
1794
1795
1796void
1797TableWalker::nextWalk(ThreadContext *tc)
1798{
1799 if (pendingQueue.size())
1800 schedule(doProcessEvent, clockEdge(Cycles(1)));
1801}
1802
1803bool
1804TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1805 Request::Flags flags, int queueIndex, Event *event,
1806 void (TableWalker::*doDescriptor)())
1807{
1808 bool isTiming = currState->timing;
1809
1810 // do the requests for the page table descriptors have to go through the
1811 // second stage MMU
1812 if (currState->stage2Req) {
1813 Fault fault;
1814 flags = flags | TLB::MustBeOne;
1815
1816 if (isTiming) {
1817 Stage2MMU::Stage2Translation *tran = new
1818 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1819 currState->vaddr);
1820 currState->stage2Tran = tran;
1821 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1822 flags, masterId);
1823 fault = tran->fault;
1824 } else {
1825 fault = stage2Mmu->readDataUntimed(currState->tc,
1826 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1827 currState->functional);
1828 }
1829
1830 if (fault != NoFault) {
1831 currState->fault = fault;
1832 }
1833 if (isTiming) {
1834 if (queueIndex >= 0) {
1835 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1836 stateQueues[queueIndex].size());
1837 stateQueues[queueIndex].push_back(currState);
1838 currState = NULL;
1839 }
1840 } else {
1841 (this->*doDescriptor)();
1842 }
1843 } else {
1844 if (isTiming) {
1845 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1846 currState->tc->getCpuPtr()->clockPeriod(), flags);
1847 if (queueIndex >= 0) {
1848 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1849 stateQueues[queueIndex].size());
1850 stateQueues[queueIndex].push_back(currState);
1851 currState = NULL;
1852 }
1853 } else if (!currState->functional) {
1854 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1855 currState->tc->getCpuPtr()->clockPeriod(), flags);
1856 (this->*doDescriptor)();
1857 } else {
1858 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1859 req->taskId(ContextSwitchTaskId::DMA);
1860 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1861 pkt->dataStatic(data);
1862 port.sendFunctional(pkt);
1863 (this->*doDescriptor)();
1864 delete req;
1865 delete pkt;
1866 }
1867 }
1868 return (isTiming);
1869}
1870
1871void
1872TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1873{
1874 TlbEntry te;
1875
1876 // Create and fill a new page table entry
1877 te.valid = true;
1878 te.longDescFormat = longDescriptor;
1879 te.isHyp = currState->isHyp;
1880 te.asid = currState->asid;
1881 te.vmid = currState->vmid;
1882 te.N = descriptor.offsetBits();
1883 te.vpn = currState->vaddr >> te.N;
1884 te.size = (1<<te.N) - 1;
1885 te.pfn = descriptor.pfn();
1886 te.domain = descriptor.domain();
1887 te.lookupLevel = descriptor.lookupLevel;
1888 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1889 te.nstid = !currState->isSecure;
1890 te.xn = descriptor.xn();
1891 if (currState->aarch64)
1892 te.el = currState->el;
1893 else
1894 te.el = 1;
1895
1896 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1897 // as global
1898 te.global = descriptor.global(currState) || isStage2;
1899 if (longDescriptor) {
1900 LongDescriptor lDescriptor =
1901 dynamic_cast<LongDescriptor &>(descriptor);
1902
1903 te.xn |= currState->xnTable;
1904 te.pxn = currState->pxnTable || lDescriptor.pxn();
1905 if (isStage2) {
1906 // this is actually the HAP field, but its stored in the same bit
1907 // possitions as the AP field in a stage 1 translation.
1908 te.hap = lDescriptor.ap();
1909 } else {
1910 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1911 (currState->userTable && (descriptor.ap() & 0x1));
1912 }
1913 if (currState->aarch64)
1914 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1915 currState->longDesc.sh());
1916 else
1917 memAttrsLPAE(currState->tc, te, lDescriptor);
1918 } else {
1919 te.ap = descriptor.ap();
1920 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1921 descriptor.shareable());
1922 }
1923
1924 // Debug output
1925 DPRINTF(TLB, descriptor.dbgHeader().c_str());
1926 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1927 te.N, te.pfn, te.size, te.global, te.valid);
1928 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1929 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1930 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1931 te.nonCacheable, te.ns);
1932 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1933 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1934 descriptor.getRawData());
1935
1936 // Insert the entry into the TLB
1937 tlb->insert(currState->vaddr, te);
1938 if (!currState->timing) {
1939 currState->tc = NULL;
1940 currState->req = NULL;
1941 }
1942}
1943
1944ArmISA::TableWalker *
1945ArmTableWalkerParams::create()
1946{
1947 return new ArmISA::TableWalker(this);
1948}
1949
1950LookupLevel
1951TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
1952{
1953 switch (lookup_level_as_int) {
1954 case L1:
1955 return L1;
1956 case L2:
1957 return L2;
1958 case L3:
1959 return L3;
1960 default:
1961 panic("Invalid lookup level conversion");
1962 }
1963}