Deleted Added
sdiff udiff text old ( 10579:e622a3e2ed14 ) new ( 10621:b7bc5b1084a4 )
full compact
1/*
2 * Copyright (c) 2010, 2012-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include <memory>
42
43#include "arch/arm/faults.hh"
44#include "arch/arm/stage2_mmu.hh"
45#include "arch/arm/system.hh"
46#include "arch/arm/table_walker.hh"
47#include "arch/arm/tlb.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/TLB.hh"
53#include "debug/TLBVerbose.hh"
54#include "sim/system.hh"
55
56using namespace ArmISA;
57
58TableWalker::TableWalker(const Params *p)
59 : MemObject(p), port(this, p->sys), drainManager(NULL),
60 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
61 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
62 numSquashable(p->num_squash_per_cycle),
63 doL1DescEvent(this), doL2DescEvent(this),
64 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
65 doL3LongDescEvent(this),
66 doProcessEvent(this)
67{
68 sctlr = 0;
69
70 // Cache system-level properties
71 if (FullSystem) {
72 armSys = dynamic_cast<ArmSystem *>(p->sys);
73 assert(armSys);
74 haveSecurity = armSys->haveSecurity();
75 _haveLPAE = armSys->haveLPAE();
76 _haveVirtualization = armSys->haveVirtualization();
77 physAddrRange = armSys->physAddrRange();
78 _haveLargeAsid64 = armSys->haveLargeAsid64();
79 } else {
80 armSys = NULL;
81 haveSecurity = _haveLPAE = _haveVirtualization = false;
82 _haveLargeAsid64 = false;
83 physAddrRange = 32;
84 }
85
86}
87
88TableWalker::~TableWalker()
89{
90 ;
91}
92
93TableWalker::WalkerState::WalkerState() :
94 tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
95 asid(0), vmid(0), isHyp(false), transState(nullptr),
96 vaddr(0), vaddr_tainted(0), isWrite(false), isFetch(false), isSecure(false),
97 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
98 pxnTable(false), stage2Req(false), doingStage2(false),
99 stage2Tran(nullptr), timing(false), functional(false),
100 mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
101 delayed(false), tableWalker(nullptr)
102{
103}
104
105void
106TableWalker::completeDrain()
107{
108 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
109 pendingQueue.empty()) {
110 setDrainState(Drainable::Drained);
111 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
112 drainManager->signalDrainDone();
113 drainManager = NULL;
114 }
115}
116
117unsigned int
118TableWalker::drain(DrainManager *dm)
119{
120 unsigned int count = port.drain(dm);
121
122 bool state_queues_not_empty = false;
123
124 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
125 if (!stateQueues[i].empty()) {
126 state_queues_not_empty = true;
127 break;
128 }
129 }
130
131 if (state_queues_not_empty || pendingQueue.size()) {
132 drainManager = dm;
133 setDrainState(Drainable::Draining);
134 DPRINTF(Drain, "TableWalker not drained\n");
135
136 // return port drain count plus the table walker itself needs to drain
137 return count + 1;
138 } else {
139 setDrainState(Drainable::Drained);
140 DPRINTF(Drain, "TableWalker free, no need to drain\n");
141
142 // table walker is drained, but its ports may still need to be drained
143 return count;
144 }
145}
146
147void
148TableWalker::drainResume()
149{
150 Drainable::drainResume();
151 if (params()->sys->isTimingMode() && currState) {
152 delete currState;
153 currState = NULL;
154 }
155}
156
157BaseMasterPort&
158TableWalker::getMasterPort(const std::string &if_name, PortID idx)
159{
160 if (if_name == "port") {
161 return port;
162 }
163 return MemObject::getMasterPort(if_name, idx);
164}
165
166Fault
167TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
168 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
169 TLB::Translation *_trans, bool _timing, bool _functional,
170 bool secure, TLB::ArmTranslationType tranType)
171{
172 assert(!(_functional && _timing));
173 WalkerState *savedCurrState = NULL;
174
175 if (!currState && !_functional) {
176 // For atomic mode, a new WalkerState instance should be only created
177 // once per TLB. For timing mode, a new instance is generated for every
178 // TLB miss.
179 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
180
181 currState = new WalkerState();
182 currState->tableWalker = this;
183 } else if (_functional) {
184 // If we are mixing functional mode with timing (or even
185 // atomic), we need to to be careful and clean up after
186 // ourselves to not risk getting into an inconsistent state.
187 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
188 savedCurrState = currState;
189 currState = new WalkerState();
190 currState->tableWalker = this;
191 } else if (_timing) {
192 // This is a translation that was completed and then faulted again
193 // because some underlying parameters that affect the translation
194 // changed out from under us (e.g. asid). It will either be a
195 // misprediction, in which case nothing will happen or we'll use
196 // this fault to re-execute the faulting instruction which should clean
197 // up everything.
198 if (currState->vaddr_tainted == _req->getVaddr()) {
199 return std::make_shared<ReExec>();
200 }
201 }
202
203 currState->tc = _tc;
204 currState->aarch64 = opModeIs64(currOpMode(_tc));
205 currState->el = currEL(_tc);
206 currState->transState = _trans;
207 currState->req = _req;
208 currState->fault = NoFault;
209 currState->asid = _asid;
210 currState->vmid = _vmid;
211 currState->isHyp = _isHyp;
212 currState->timing = _timing;
213 currState->functional = _functional;
214 currState->mode = _mode;
215 currState->tranType = tranType;
216 currState->isSecure = secure;
217 currState->physAddrRange = physAddrRange;
218
219 /** @todo These should be cached or grabbed from cached copies in
220 the TLB, all these miscreg reads are expensive */
221 currState->vaddr_tainted = currState->req->getVaddr();
222 if (currState->aarch64)
223 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
224 currState->tc, currState->el);
225 else
226 currState->vaddr = currState->vaddr_tainted;
227
228 if (currState->aarch64) {
229 switch (currState->el) {
230 case EL0:
231 case EL1:
232 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
233 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
234 break;
235 // @todo: uncomment this to enable Virtualization
236 // case EL2:
237 // assert(haveVirtualization);
238 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
239 // currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
240 // break;
241 case EL3:
242 assert(haveSecurity);
243 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
244 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
245 break;
246 default:
247 panic("Invalid exception level");
248 break;
249 }
250 } else {
251 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
252 MISCREG_SCTLR, currState->tc, !currState->isSecure));
253 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
254 MISCREG_TTBCR, currState->tc, !currState->isSecure));
255 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
256 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
257 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
258 }
259 sctlr = currState->sctlr;
260
261 currState->isFetch = (currState->mode == TLB::Execute);
262 currState->isWrite = (currState->mode == TLB::Write);
263
264 // We only do a second stage of translation if we're not secure, or in
265 // hyp mode, the second stage MMU is enabled, and this table walker
266 // instance is the first stage.
267 currState->doingStage2 = false;
268 // @todo: for now disable this in AArch64 (HCR is not set)
269 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
270 !isStage2 && !currState->isSecure && !currState->isHyp;
271
272 bool long_desc_format = currState->aarch64 ||
273 (_haveLPAE && currState->ttbcr.eae) ||
274 _isHyp || isStage2;
275
276 if (long_desc_format) {
277 // Helper variables used for hierarchical permissions
278 currState->secureLookup = currState->isSecure;
279 currState->rwTable = true;
280 currState->userTable = true;
281 currState->xnTable = false;
282 currState->pxnTable = false;
283 }
284
285 if (!currState->timing) {
286 Fault fault = NoFault;
287 if (currState->aarch64)
288 fault = processWalkAArch64();
289 else if (long_desc_format)
290 fault = processWalkLPAE();
291 else
292 fault = processWalk();
293
294 // If this was a functional non-timing access restore state to
295 // how we found it.
296 if (currState->functional) {
297 delete currState;
298 currState = savedCurrState;
299 }
300 return fault;
301 }
302
303 if (pending || pendingQueue.size()) {
304 pendingQueue.push_back(currState);
305 currState = NULL;
306 } else {
307 pending = true;
308 if (currState->aarch64)
309 return processWalkAArch64();
310 else if (long_desc_format)
311 return processWalkLPAE();
312 else
313 return processWalk();
314 }
315
316 return NoFault;
317}
318
319void
320TableWalker::processWalkWrapper()
321{
322 assert(!currState);
323 assert(pendingQueue.size());
324 currState = pendingQueue.front();
325
326 ExceptionLevel target_el = EL0;
327 if (currState->aarch64)
328 target_el = currEL(currState->tc);
329 else
330 target_el = EL1;
331
332 // Check if a previous walk filled this request already
333 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
334 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
335 currState->vmid, currState->isHyp, currState->isSecure, true, false,
336 target_el);
337
338 // Check if we still need to have a walk for this request. If the requesting
339 // instruction has been squashed, or a previous walk has filled the TLB with
340 // a match, we just want to get rid of the walk. The latter could happen
341 // when there are multiple outstanding misses to a single page and a
342 // previous request has been successfully translated.
343 if (!currState->transState->squashed() && !te) {
344 // We've got a valid request, lets process it
345 pending = true;
346 pendingQueue.pop_front();
347 // Keep currState in case one of the processWalk... calls NULLs it
348 WalkerState *curr_state_copy = currState;
349 Fault f;
350 if (currState->aarch64)
351 f = processWalkAArch64();
352 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
353 f = processWalkLPAE();
354 else
355 f = processWalk();
356
357 if (f != NoFault) {
358 curr_state_copy->transState->finish(f, curr_state_copy->req,
359 curr_state_copy->tc, curr_state_copy->mode);
360
361 delete curr_state_copy;
362 }
363 return;
364 }
365
366
367 // If the instruction that we were translating for has been
368 // squashed we shouldn't bother.
369 unsigned num_squashed = 0;
370 ThreadContext *tc = currState->tc;
371 while ((num_squashed < numSquashable) && currState &&
372 (currState->transState->squashed() || te)) {
373 pendingQueue.pop_front();
374 num_squashed++;
375
376 DPRINTF(TLB, "Squashing table walk for address %#x\n",
377 currState->vaddr_tainted);
378
379 if (currState->transState->squashed()) {
380 // finish the translation which will delete the translation object
381 currState->transState->finish(
382 std::make_shared<UnimpFault>("Squashed Inst"),
383 currState->req, currState->tc, currState->mode);
384 } else {
385 // translate the request now that we know it will work
386 tlb->translateTiming(currState->req, currState->tc,
387 currState->transState, currState->mode);
388
389 }
390
391 // delete the current request
392 delete currState;
393
394 // peak at the next one
395 if (pendingQueue.size()) {
396 currState = pendingQueue.front();
397 te = tlb->lookup(currState->vaddr, currState->asid,
398 currState->vmid, currState->isHyp, currState->isSecure, true,
399 false, target_el);
400 } else {
401 // Terminate the loop, nothing more to do
402 currState = NULL;
403 }
404 }
405
406 // if we've still got pending translations schedule more work
407 nextWalk(tc);
408 currState = NULL;
409}
410
411Fault
412TableWalker::processWalk()
413{
414 Addr ttbr = 0;
415
416 // If translation isn't enabled, we shouldn't be here
417 assert(currState->sctlr.m || isStage2);
418
419 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
420 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
421 32 - currState->ttbcr.n));
422
423 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
424 32 - currState->ttbcr.n)) {
425 DPRINTF(TLB, " - Selecting TTBR0\n");
426 // Check if table walk is allowed when Security Extensions are enabled
427 if (haveSecurity && currState->ttbcr.pd0) {
428 if (currState->isFetch)
429 return std::make_shared<PrefetchAbort>(
430 currState->vaddr_tainted,
431 ArmFault::TranslationLL + L1,
432 isStage2,
433 ArmFault::VmsaTran);
434 else
435 return std::make_shared<DataAbort>(
436 currState->vaddr_tainted,
437 TlbEntry::DomainType::NoAccess, currState->isWrite,
438 ArmFault::TranslationLL + L1, isStage2,
439 ArmFault::VmsaTran);
440 }
441 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
442 MISCREG_TTBR0, currState->tc, !currState->isSecure));
443 } else {
444 DPRINTF(TLB, " - Selecting TTBR1\n");
445 // Check if table walk is allowed when Security Extensions are enabled
446 if (haveSecurity && currState->ttbcr.pd1) {
447 if (currState->isFetch)
448 return std::make_shared<PrefetchAbort>(
449 currState->vaddr_tainted,
450 ArmFault::TranslationLL + L1,
451 isStage2,
452 ArmFault::VmsaTran);
453 else
454 return std::make_shared<DataAbort>(
455 currState->vaddr_tainted,
456 TlbEntry::DomainType::NoAccess, currState->isWrite,
457 ArmFault::TranslationLL + L1, isStage2,
458 ArmFault::VmsaTran);
459 }
460 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
461 MISCREG_TTBR1, currState->tc, !currState->isSecure));
462 currState->ttbcr.n = 0;
463 }
464
465 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
466 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
467 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
468 currState->isSecure ? "s" : "ns");
469
470 // Trickbox address check
471 Fault f;
472 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
473 currState->vaddr, sizeof(uint32_t), currState->isFetch,
474 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
475 if (f) {
476 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
477 if (currState->timing) {
478 pending = false;
479 nextWalk(currState->tc);
480 currState = NULL;
481 } else {
482 currState->tc = NULL;
483 currState->req = NULL;
484 }
485 return f;
486 }
487
488 Request::Flags flag = 0;
489 if (currState->sctlr.c == 0) {
490 flag = Request::UNCACHEABLE;
491 }
492
493 bool delayed;
494 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
495 sizeof(uint32_t), flag, L1, &doL1DescEvent,
496 &TableWalker::doL1Descriptor);
497 if (!delayed) {
498 f = currState->fault;
499 }
500
501 return f;
502}
503
504Fault
505TableWalker::processWalkLPAE()
506{
507 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
508 int tsz, n;
509 LookupLevel start_lookup_level = L1;
510
511 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
512 currState->vaddr_tainted, currState->ttbcr);
513
514 Request::Flags flag = 0;
515 if (currState->isSecure)
516 flag.set(Request::SECURE);
517
518 // work out which base address register to use, if in hyp mode we always
519 // use HTTBR
520 if (isStage2) {
521 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
522 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
523 tsz = sext<4>(currState->vtcr.t0sz);
524 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
525 } else if (currState->isHyp) {
526 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
527 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
528 tsz = currState->htcr.t0sz;
529 } else {
530 assert(_haveLPAE && currState->ttbcr.eae);
531
532 // Determine boundaries of TTBR0/1 regions
533 if (currState->ttbcr.t0sz)
534 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
535 else if (currState->ttbcr.t1sz)
536 ttbr0_max = (1ULL << 32) -
537 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
538 else
539 ttbr0_max = (1ULL << 32) - 1;
540 if (currState->ttbcr.t1sz)
541 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
542 else
543 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
544
545 // The following code snippet selects the appropriate translation table base
546 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
547 // depending on the address range supported by the translation table (ARM
548 // ARM issue C B3.6.4)
549 if (currState->vaddr <= ttbr0_max) {
550 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
551 // Check if table walk is allowed
552 if (currState->ttbcr.epd0) {
553 if (currState->isFetch)
554 return std::make_shared<PrefetchAbort>(
555 currState->vaddr_tainted,
556 ArmFault::TranslationLL + L1,
557 isStage2,
558 ArmFault::LpaeTran);
559 else
560 return std::make_shared<DataAbort>(
561 currState->vaddr_tainted,
562 TlbEntry::DomainType::NoAccess,
563 currState->isWrite,
564 ArmFault::TranslationLL + L1,
565 isStage2,
566 ArmFault::LpaeTran);
567 }
568 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
569 MISCREG_TTBR0, currState->tc, !currState->isSecure));
570 tsz = currState->ttbcr.t0sz;
571 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
572 start_lookup_level = L2;
573 } else if (currState->vaddr >= ttbr1_min) {
574 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
575 // Check if table walk is allowed
576 if (currState->ttbcr.epd1) {
577 if (currState->isFetch)
578 return std::make_shared<PrefetchAbort>(
579 currState->vaddr_tainted,
580 ArmFault::TranslationLL + L1,
581 isStage2,
582 ArmFault::LpaeTran);
583 else
584 return std::make_shared<DataAbort>(
585 currState->vaddr_tainted,
586 TlbEntry::DomainType::NoAccess,
587 currState->isWrite,
588 ArmFault::TranslationLL + L1,
589 isStage2,
590 ArmFault::LpaeTran);
591 }
592 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
593 MISCREG_TTBR1, currState->tc, !currState->isSecure));
594 tsz = currState->ttbcr.t1sz;
595 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
596 start_lookup_level = L2;
597 } else {
598 // Out of boundaries -> translation fault
599 if (currState->isFetch)
600 return std::make_shared<PrefetchAbort>(
601 currState->vaddr_tainted,
602 ArmFault::TranslationLL + L1,
603 isStage2,
604 ArmFault::LpaeTran);
605 else
606 return std::make_shared<DataAbort>(
607 currState->vaddr_tainted,
608 TlbEntry::DomainType::NoAccess,
609 currState->isWrite, ArmFault::TranslationLL + L1,
610 isStage2, ArmFault::LpaeTran);
611 }
612
613 }
614
615 // Perform lookup (ARM ARM issue C B3.6.6)
616 if (start_lookup_level == L1) {
617 n = 5 - tsz;
618 desc_addr = mbits(ttbr, 39, n) |
619 (bits(currState->vaddr, n + 26, 30) << 3);
620 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
621 desc_addr, currState->isSecure ? "s" : "ns");
622 } else {
623 // Skip first-level lookup
624 n = (tsz >= 2 ? 14 - tsz : 12);
625 desc_addr = mbits(ttbr, 39, n) |
626 (bits(currState->vaddr, n + 17, 21) << 3);
627 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
628 desc_addr, currState->isSecure ? "s" : "ns");
629 }
630
631 // Trickbox address check
632 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
633 currState->vaddr, sizeof(uint64_t), currState->isFetch,
634 currState->isWrite, TlbEntry::DomainType::NoAccess,
635 start_lookup_level);
636 if (f) {
637 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
638 if (currState->timing) {
639 pending = false;
640 nextWalk(currState->tc);
641 currState = NULL;
642 } else {
643 currState->tc = NULL;
644 currState->req = NULL;
645 }
646 return f;
647 }
648
649 if (currState->sctlr.c == 0) {
650 flag = Request::UNCACHEABLE;
651 }
652
653 if (currState->isSecure)
654 flag.set(Request::SECURE);
655
656 currState->longDesc.lookupLevel = start_lookup_level;
657 currState->longDesc.aarch64 = false;
658 currState->longDesc.grainSize = Grain4KB;
659
660 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
661 : (Event *) &doL2LongDescEvent;
662
663 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
664 sizeof(uint64_t), flag, start_lookup_level,
665 event, &TableWalker::doLongDescriptor);
666 if (!delayed) {
667 f = currState->fault;
668 }
669
670 return f;
671}
672
673unsigned
674TableWalker::adjustTableSizeAArch64(unsigned tsz)
675{
676 if (tsz < 25)
677 return 25;
678 if (tsz > 48)
679 return 48;
680 return tsz;
681}
682
683bool
684TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
685{
686 return (currPhysAddrRange != MaxPhysAddrRange &&
687 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
688}
689
690Fault
691TableWalker::processWalkAArch64()
692{
693 assert(currState->aarch64);
694
695 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
696 currState->vaddr_tainted, currState->tcr);
697
698 static const GrainSize GrainMapDefault[] =
699 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
700 static const GrainSize GrainMap_EL1_tg1[] =
701 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
702
703 // Determine TTBR, table size, granule size and phys. address range
704 Addr ttbr = 0;
705 int tsz = 0, ps = 0;
706 GrainSize tg = Grain4KB; // grain size computed from tg* field
707 bool fault = false;
708 switch (currState->el) {
709 case EL0:
710 case EL1:
711 switch (bits(currState->vaddr, 63,48)) {
712 case 0:
713 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
714 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
715 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
716 tg = GrainMapDefault[currState->tcr.tg0];
717 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
718 currState->tcr.epd0)
719 fault = true;
720 break;
721 case 0xffff:
722 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
723 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
724 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
725 tg = GrainMap_EL1_tg1[currState->tcr.tg1];
726 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
727 currState->tcr.epd1)
728 fault = true;
729 break;
730 default:
731 // top two bytes must be all 0s or all 1s, else invalid addr
732 fault = true;
733 }
734 ps = currState->tcr.ips;
735 break;
736 case EL2:
737 case EL3:
738 switch(bits(currState->vaddr, 63,48)) {
739 case 0:
740 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
741 if (currState->el == EL2)
742 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
743 else
744 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
745 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
746 tg = GrainMapDefault[currState->tcr.tg0];
747 break;
748 default:
749 // invalid addr if top two bytes are not all 0s
750 fault = true;
751 }
752 ps = currState->tcr.ips;
753 break;
754 }
755
756 if (fault) {
757 Fault f;
758 if (currState->isFetch)
759 f = std::make_shared<PrefetchAbort>(
760 currState->vaddr_tainted,
761 ArmFault::TranslationLL + L0, isStage2,
762 ArmFault::LpaeTran);
763 else
764 f = std::make_shared<DataAbort>(
765 currState->vaddr_tainted,
766 TlbEntry::DomainType::NoAccess,
767 currState->isWrite,
768 ArmFault::TranslationLL + L0,
769 isStage2, ArmFault::LpaeTran);
770
771 if (currState->timing) {
772 pending = false;
773 nextWalk(currState->tc);
774 currState = NULL;
775 } else {
776 currState->tc = NULL;
777 currState->req = NULL;
778 }
779 return f;
780
781 }
782
783 if (tg == ReservedGrain) {
784 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
785 "DEFINED behavior takes this to mean 4KB granules\n");
786 tg = Grain4KB;
787 }
788
789 int stride = tg - 3;
790 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
791
792 // Determine starting lookup level
793 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
794 // in ARM DDI 0487A. These table values correspond to the cascading tests
795 // to compute the lookup level and are of the form
796 // (grain_size + N*stride), for N = {1, 2, 3}.
797 // A value of 64 will never succeed and a value of 0 will always succeed.
798 {
799 struct GrainMap {
800 GrainSize grain_size;
801 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
802 };
803 static const GrainMap GM[] = {
804 { Grain4KB, { 39, 30, 0, 0 } },
805 { Grain16KB, { 47, 36, 25, 0 } },
806 { Grain64KB, { 64, 42, 29, 0 } }
807 };
808
809 const unsigned *lookup = NULL; // points to a lookup_level_cutoff
810
811 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
812 if (tg == GM[i].grain_size) {
813 lookup = GM[i].lookup_level_cutoff;
814 break;
815 }
816 }
817 assert(lookup);
818
819 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
820 if (tsz > lookup[L]) {
821 start_lookup_level = (LookupLevel) L;
822 break;
823 }
824 }
825 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
826 "Table walker couldn't find lookup level\n");
827 }
828
829 // Determine table base address
830 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
831 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
832
833 // Determine physical address size and raise an Address Size Fault if
834 // necessary
835 int pa_range = decodePhysAddrRange64(ps);
836 // Clamp to lower limit
837 if (pa_range > physAddrRange)
838 currState->physAddrRange = physAddrRange;
839 else
840 currState->physAddrRange = pa_range;
841 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
842 DPRINTF(TLB, "Address size fault before any lookup\n");
843 Fault f;
844 if (currState->isFetch)
845 f = std::make_shared<PrefetchAbort>(
846 currState->vaddr_tainted,
847 ArmFault::AddressSizeLL + start_lookup_level,
848 isStage2,
849 ArmFault::LpaeTran);
850 else
851 f = std::make_shared<DataAbort>(
852 currState->vaddr_tainted,
853 TlbEntry::DomainType::NoAccess,
854 currState->isWrite,
855 ArmFault::AddressSizeLL + start_lookup_level,
856 isStage2,
857 ArmFault::LpaeTran);
858
859
860 if (currState->timing) {
861 pending = false;
862 nextWalk(currState->tc);
863 currState = NULL;
864 } else {
865 currState->tc = NULL;
866 currState->req = NULL;
867 }
868 return f;
869
870 }
871
872 // Determine descriptor address
873 Addr desc_addr = base_addr |
874 (bits(currState->vaddr, tsz - 1,
875 stride * (3 - start_lookup_level) + tg) << 3);
876
877 // Trickbox address check
878 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
879 currState->vaddr, sizeof(uint64_t), currState->isFetch,
880 currState->isWrite, TlbEntry::DomainType::NoAccess,
881 start_lookup_level);
882 if (f) {
883 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
884 if (currState->timing) {
885 pending = false;
886 nextWalk(currState->tc);
887 currState = NULL;
888 } else {
889 currState->tc = NULL;
890 currState->req = NULL;
891 }
892 return f;
893 }
894
895 Request::Flags flag = 0;
896 if (currState->sctlr.c == 0) {
897 flag = Request::UNCACHEABLE;
898 }
899
900 currState->longDesc.lookupLevel = start_lookup_level;
901 currState->longDesc.aarch64 = true;
902 currState->longDesc.grainSize = tg;
903
904 if (currState->timing) {
905 Event *event;
906 switch (start_lookup_level) {
907 case L0:
908 event = (Event *) &doL0LongDescEvent;
909 break;
910 case L1:
911 event = (Event *) &doL1LongDescEvent;
912 break;
913 case L2:
914 event = (Event *) &doL2LongDescEvent;
915 break;
916 case L3:
917 event = (Event *) &doL3LongDescEvent;
918 break;
919 default:
920 panic("Invalid table lookup level");
921 break;
922 }
923 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
924 (uint8_t*) &currState->longDesc.data,
925 currState->tc->getCpuPtr()->clockPeriod(), flag);
926 DPRINTF(TLBVerbose,
927 "Adding to walker fifo: queue size before adding: %d\n",
928 stateQueues[start_lookup_level].size());
929 stateQueues[start_lookup_level].push_back(currState);
930 currState = NULL;
931 } else if (!currState->functional) {
932 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
933 NULL, (uint8_t*) &currState->longDesc.data,
934 currState->tc->getCpuPtr()->clockPeriod(), flag);
935 doLongDescriptor();
936 f = currState->fault;
937 } else {
938 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
939 masterId);
940 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
941 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
942 port.sendFunctional(pkt);
943 doLongDescriptor();
944 delete req;
945 delete pkt;
946 f = currState->fault;
947 }
948
949 return f;
950}
951
952void
953TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
954 uint8_t texcb, bool s)
955{
956 // Note: tc and sctlr local variables are hiding tc and sctrl class
957 // variables
958 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
959 te.shareable = false; // default value
960 te.nonCacheable = false;
961 te.outerShareable = false;
962 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
963 switch(texcb) {
964 case 0: // Stongly-ordered
965 te.nonCacheable = true;
966 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
967 te.shareable = true;
968 te.innerAttrs = 1;
969 te.outerAttrs = 0;
970 break;
971 case 1: // Shareable Device
972 te.nonCacheable = true;
973 te.mtype = TlbEntry::MemoryType::Device;
974 te.shareable = true;
975 te.innerAttrs = 3;
976 te.outerAttrs = 0;
977 break;
978 case 2: // Outer and Inner Write-Through, no Write-Allocate
979 te.mtype = TlbEntry::MemoryType::Normal;
980 te.shareable = s;
981 te.innerAttrs = 6;
982 te.outerAttrs = bits(texcb, 1, 0);
983 break;
984 case 3: // Outer and Inner Write-Back, no Write-Allocate
985 te.mtype = TlbEntry::MemoryType::Normal;
986 te.shareable = s;
987 te.innerAttrs = 7;
988 te.outerAttrs = bits(texcb, 1, 0);
989 break;
990 case 4: // Outer and Inner Non-cacheable
991 te.nonCacheable = true;
992 te.mtype = TlbEntry::MemoryType::Normal;
993 te.shareable = s;
994 te.innerAttrs = 0;
995 te.outerAttrs = bits(texcb, 1, 0);
996 break;
997 case 5: // Reserved
998 panic("Reserved texcb value!\n");
999 break;
1000 case 6: // Implementation Defined
1001 panic("Implementation-defined texcb value!\n");
1002 break;
1003 case 7: // Outer and Inner Write-Back, Write-Allocate
1004 te.mtype = TlbEntry::MemoryType::Normal;
1005 te.shareable = s;
1006 te.innerAttrs = 5;
1007 te.outerAttrs = 1;
1008 break;
1009 case 8: // Non-shareable Device
1010 te.nonCacheable = true;
1011 te.mtype = TlbEntry::MemoryType::Device;
1012 te.shareable = false;
1013 te.innerAttrs = 3;
1014 te.outerAttrs = 0;
1015 break;
1016 case 9 ... 15: // Reserved
1017 panic("Reserved texcb value!\n");
1018 break;
1019 case 16 ... 31: // Cacheable Memory
1020 te.mtype = TlbEntry::MemoryType::Normal;
1021 te.shareable = s;
1022 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1023 te.nonCacheable = true;
1024 te.innerAttrs = bits(texcb, 1, 0);
1025 te.outerAttrs = bits(texcb, 3, 2);
1026 break;
1027 default:
1028 panic("More than 32 states for 5 bits?\n");
1029 }
1030 } else {
1031 assert(tc);
1032 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
1033 currState->tc, !currState->isSecure));
1034 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
1035 currState->tc, !currState->isSecure));
1036 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1037 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1038 switch(bits(texcb, 2,0)) {
1039 case 0:
1040 curr_tr = prrr.tr0;
1041 curr_ir = nmrr.ir0;
1042 curr_or = nmrr.or0;
1043 te.outerShareable = (prrr.nos0 == 0);
1044 break;
1045 case 1:
1046 curr_tr = prrr.tr1;
1047 curr_ir = nmrr.ir1;
1048 curr_or = nmrr.or1;
1049 te.outerShareable = (prrr.nos1 == 0);
1050 break;
1051 case 2:
1052 curr_tr = prrr.tr2;
1053 curr_ir = nmrr.ir2;
1054 curr_or = nmrr.or2;
1055 te.outerShareable = (prrr.nos2 == 0);
1056 break;
1057 case 3:
1058 curr_tr = prrr.tr3;
1059 curr_ir = nmrr.ir3;
1060 curr_or = nmrr.or3;
1061 te.outerShareable = (prrr.nos3 == 0);
1062 break;
1063 case 4:
1064 curr_tr = prrr.tr4;
1065 curr_ir = nmrr.ir4;
1066 curr_or = nmrr.or4;
1067 te.outerShareable = (prrr.nos4 == 0);
1068 break;
1069 case 5:
1070 curr_tr = prrr.tr5;
1071 curr_ir = nmrr.ir5;
1072 curr_or = nmrr.or5;
1073 te.outerShareable = (prrr.nos5 == 0);
1074 break;
1075 case 6:
1076 panic("Imp defined type\n");
1077 case 7:
1078 curr_tr = prrr.tr7;
1079 curr_ir = nmrr.ir7;
1080 curr_or = nmrr.or7;
1081 te.outerShareable = (prrr.nos7 == 0);
1082 break;
1083 }
1084
1085 switch(curr_tr) {
1086 case 0:
1087 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1088 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1089 te.nonCacheable = true;
1090 te.innerAttrs = 1;
1091 te.outerAttrs = 0;
1092 te.shareable = true;
1093 break;
1094 case 1:
1095 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1096 prrr.ds1, prrr.ds0, s);
1097 te.mtype = TlbEntry::MemoryType::Device;
1098 te.nonCacheable = true;
1099 te.innerAttrs = 3;
1100 te.outerAttrs = 0;
1101 if (prrr.ds1 && s)
1102 te.shareable = true;
1103 if (prrr.ds0 && !s)
1104 te.shareable = true;
1105 break;
1106 case 2:
1107 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1108 prrr.ns1, prrr.ns0, s);
1109 te.mtype = TlbEntry::MemoryType::Normal;
1110 if (prrr.ns1 && s)
1111 te.shareable = true;
1112 if (prrr.ns0 && !s)
1113 te.shareable = true;
1114 break;
1115 case 3:
1116 panic("Reserved type");
1117 }
1118
1119 if (te.mtype == TlbEntry::MemoryType::Normal){
1120 switch(curr_ir) {
1121 case 0:
1122 te.nonCacheable = true;
1123 te.innerAttrs = 0;
1124 break;
1125 case 1:
1126 te.innerAttrs = 5;
1127 break;
1128 case 2:
1129 te.innerAttrs = 6;
1130 break;
1131 case 3:
1132 te.innerAttrs = 7;
1133 break;
1134 }
1135
1136 switch(curr_or) {
1137 case 0:
1138 te.nonCacheable = true;
1139 te.outerAttrs = 0;
1140 break;
1141 case 1:
1142 te.outerAttrs = 1;
1143 break;
1144 case 2:
1145 te.outerAttrs = 2;
1146 break;
1147 case 3:
1148 te.outerAttrs = 3;
1149 break;
1150 }
1151 }
1152 }
1153 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1154 "outerAttrs: %d\n",
1155 te.shareable, te.innerAttrs, te.outerAttrs);
1156 te.setAttributes(false);
1157}
1158
1159void
1160TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1161 LongDescriptor &lDescriptor)
1162{
1163 assert(_haveLPAE);
1164
1165 uint8_t attr;
1166 uint8_t sh = lDescriptor.sh();
1167 // Different format and source of attributes if this is a stage 2
1168 // translation
1169 if (isStage2) {
1170 attr = lDescriptor.memAttr();
1171 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1172 uint8_t attr_1_0 = attr & 0x3;
1173
1174 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1175
1176 if (attr_3_2 == 0) {
1177 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1178 : TlbEntry::MemoryType::Device;
1179 te.outerAttrs = 0;
1180 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1181 te.nonCacheable = true;
1182 } else {
1183 te.mtype = TlbEntry::MemoryType::Normal;
1184 te.outerAttrs = attr_3_2 == 1 ? 0 :
1185 attr_3_2 == 2 ? 2 : 1;
1186 te.innerAttrs = attr_1_0 == 1 ? 0 :
1187 attr_1_0 == 2 ? 6 : 5;
1188 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1189 }
1190 } else {
1191 uint8_t attrIndx = lDescriptor.attrIndx();
1192
1193 // LPAE always uses remapping of memory attributes, irrespective of the
1194 // value of SCTLR.TRE
1195 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1196 int reg_as_int = flattenMiscRegNsBanked(reg, currState->tc,
1197 !currState->isSecure);
1198 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1199 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1200 uint8_t attr_7_4 = bits(attr, 7, 4);
1201 uint8_t attr_3_0 = bits(attr, 3, 0);
1202 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1203
1204 // Note: the memory subsystem only cares about the 'cacheable' memory
1205 // attribute. The other attributes are only used to fill the PAR register
1206 // accordingly to provide the illusion of full support
1207 te.nonCacheable = false;
1208
1209 switch (attr_7_4) {
1210 case 0x0:
1211 // Strongly-ordered or Device memory
1212 if (attr_3_0 == 0x0)
1213 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1214 else if (attr_3_0 == 0x4)
1215 te.mtype = TlbEntry::MemoryType::Device;
1216 else
1217 panic("Unpredictable behavior\n");
1218 te.nonCacheable = true;
1219 te.outerAttrs = 0;
1220 break;
1221 case 0x4:
1222 // Normal memory, Outer Non-cacheable
1223 te.mtype = TlbEntry::MemoryType::Normal;
1224 te.outerAttrs = 0;
1225 if (attr_3_0 == 0x4)
1226 // Inner Non-cacheable
1227 te.nonCacheable = true;
1228 else if (attr_3_0 < 0x8)
1229 panic("Unpredictable behavior\n");
1230 break;
1231 case 0x8:
1232 case 0x9:
1233 case 0xa:
1234 case 0xb:
1235 case 0xc:
1236 case 0xd:
1237 case 0xe:
1238 case 0xf:
1239 if (attr_7_4 & 0x4) {
1240 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1241 } else {
1242 te.outerAttrs = 0x2;
1243 }
1244 // Normal memory, Outer Cacheable
1245 te.mtype = TlbEntry::MemoryType::Normal;
1246 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1247 panic("Unpredictable behavior\n");
1248 break;
1249 default:
1250 panic("Unpredictable behavior\n");
1251 break;
1252 }
1253
1254 switch (attr_3_0) {
1255 case 0x0:
1256 te.innerAttrs = 0x1;
1257 break;
1258 case 0x4:
1259 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1260 break;
1261 case 0x8:
1262 case 0x9:
1263 case 0xA:
1264 case 0xB:
1265 te.innerAttrs = 6;
1266 break;
1267 case 0xC:
1268 case 0xD:
1269 case 0xE:
1270 case 0xF:
1271 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1272 break;
1273 default:
1274 panic("Unpredictable behavior\n");
1275 break;
1276 }
1277 }
1278
1279 te.outerShareable = sh == 2;
1280 te.shareable = (sh & 0x2) ? true : false;
1281 te.setAttributes(true);
1282 te.attributes |= (uint64_t) attr << 56;
1283}
1284
1285void
1286TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1287 uint8_t sh)
1288{
1289 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1290
1291 // Select MAIR
1292 uint64_t mair;
1293 switch (currState->el) {
1294 case EL0:
1295 case EL1:
1296 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1297 break;
1298 case EL2:
1299 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1300 break;
1301 case EL3:
1302 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1303 break;
1304 default:
1305 panic("Invalid exception level");
1306 break;
1307 }
1308
1309 // Select attributes
1310 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1311 uint8_t attr_lo = bits(attr, 3, 0);
1312 uint8_t attr_hi = bits(attr, 7, 4);
1313
1314 // Memory type
1315 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1316
1317 // Cacheability
1318 te.nonCacheable = false;
1319 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1320 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1321 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1322 te.nonCacheable = true;
1323 }
1324
1325 te.shareable = sh == 2;
1326 te.outerShareable = (sh & 0x2) ? true : false;
1327 // Attributes formatted according to the 64-bit PAR
1328 te.attributes = ((uint64_t) attr << 56) |
1329 (1 << 11) | // LPAE bit
1330 (te.ns << 9) | // NS bit
1331 (sh << 7);
1332}
1333
1334void
1335TableWalker::doL1Descriptor()
1336{
1337 if (currState->fault != NoFault) {
1338 return;
1339 }
1340
1341 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1342 currState->vaddr_tainted, currState->l1Desc.data);
1343 TlbEntry te;
1344
1345 switch (currState->l1Desc.type()) {
1346 case L1Descriptor::Ignore:
1347 case L1Descriptor::Reserved:
1348 if (!currState->timing) {
1349 currState->tc = NULL;
1350 currState->req = NULL;
1351 }
1352 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1353 if (currState->isFetch)
1354 currState->fault =
1355 std::make_shared<PrefetchAbort>(
1356 currState->vaddr_tainted,
1357 ArmFault::TranslationLL + L1,
1358 isStage2,
1359 ArmFault::VmsaTran);
1360 else
1361 currState->fault =
1362 std::make_shared<DataAbort>(
1363 currState->vaddr_tainted,
1364 TlbEntry::DomainType::NoAccess,
1365 currState->isWrite,
1366 ArmFault::TranslationLL + L1, isStage2,
1367 ArmFault::VmsaTran);
1368 return;
1369 case L1Descriptor::Section:
1370 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1371 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1372 * enabled if set, do l1.Desc.setAp0() instead of generating
1373 * AccessFlag0
1374 */
1375
1376 currState->fault = std::make_shared<DataAbort>(
1377 currState->vaddr_tainted,
1378 currState->l1Desc.domain(),
1379 currState->isWrite,
1380 ArmFault::AccessFlagLL + L1,
1381 isStage2,
1382 ArmFault::VmsaTran);
1383 }
1384 if (currState->l1Desc.supersection()) {
1385 panic("Haven't implemented supersections\n");
1386 }
1387 insertTableEntry(currState->l1Desc, false);
1388 return;
1389 case L1Descriptor::PageTable:
1390 {
1391 Addr l2desc_addr;
1392 l2desc_addr = currState->l1Desc.l2Addr() |
1393 (bits(currState->vaddr, 19, 12) << 2);
1394 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1395 l2desc_addr, currState->isSecure ? "s" : "ns");
1396
1397 // Trickbox address check
1398 currState->fault = tlb->walkTrickBoxCheck(
1399 l2desc_addr, currState->isSecure, currState->vaddr,
1400 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1401 currState->l1Desc.domain(), L2);
1402
1403 if (currState->fault) {
1404 if (!currState->timing) {
1405 currState->tc = NULL;
1406 currState->req = NULL;
1407 }
1408 return;
1409 }
1410
1411 Request::Flags flag = 0;
1412 if (currState->isSecure)
1413 flag.set(Request::SECURE);
1414
1415 bool delayed;
1416 delayed = fetchDescriptor(l2desc_addr,
1417 (uint8_t*)&currState->l2Desc.data,
1418 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1419 &TableWalker::doL2Descriptor);
1420 if (delayed) {
1421 currState->delayed = true;
1422 }
1423
1424 return;
1425 }
1426 default:
1427 panic("A new type in a 2 bit field?\n");
1428 }
1429}
1430
1431void
1432TableWalker::doLongDescriptor()
1433{
1434 if (currState->fault != NoFault) {
1435 return;
1436 }
1437
1438 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1439 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1440 currState->longDesc.data,
1441 currState->aarch64 ? "AArch64" : "long-desc.");
1442
1443 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1444 (currState->longDesc.type() == LongDescriptor::Page)) {
1445 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1446 "xn: %d, ap: %d, af: %d, type: %d\n",
1447 currState->longDesc.lookupLevel,
1448 currState->longDesc.data,
1449 currState->longDesc.pxn(),
1450 currState->longDesc.xn(),
1451 currState->longDesc.ap(),
1452 currState->longDesc.af(),
1453 currState->longDesc.type());
1454 } else {
1455 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1456 currState->longDesc.lookupLevel,
1457 currState->longDesc.data,
1458 currState->longDesc.type());
1459 }
1460
1461 TlbEntry te;
1462
1463 switch (currState->longDesc.type()) {
1464 case LongDescriptor::Invalid:
1465 if (!currState->timing) {
1466 currState->tc = NULL;
1467 currState->req = NULL;
1468 }
1469
1470 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1471 currState->longDesc.lookupLevel,
1472 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1473 if (currState->isFetch)
1474 currState->fault = std::make_shared<PrefetchAbort>(
1475 currState->vaddr_tainted,
1476 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1477 isStage2,
1478 ArmFault::LpaeTran);
1479 else
1480 currState->fault = std::make_shared<DataAbort>(
1481 currState->vaddr_tainted,
1482 TlbEntry::DomainType::NoAccess,
1483 currState->isWrite,
1484 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1485 isStage2,
1486 ArmFault::LpaeTran);
1487 return;
1488 case LongDescriptor::Block:
1489 case LongDescriptor::Page:
1490 {
1491 bool fault = false;
1492 bool aff = false;
1493 // Check for address size fault
1494 if (checkAddrSizeFaultAArch64(
1495 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1496 currState->longDesc.offsetBits()),
1497 currState->physAddrRange)) {
1498 fault = true;
1499 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1500 currState->longDesc.lookupLevel);
1501 // Check for access fault
1502 } else if (currState->longDesc.af() == 0) {
1503 fault = true;
1504 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1505 currState->longDesc.lookupLevel);
1506 aff = true;
1507 }
1508 if (fault) {
1509 if (currState->isFetch)
1510 currState->fault = std::make_shared<PrefetchAbort>(
1511 currState->vaddr_tainted,
1512 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1513 currState->longDesc.lookupLevel,
1514 isStage2,
1515 ArmFault::LpaeTran);
1516 else
1517 currState->fault = std::make_shared<DataAbort>(
1518 currState->vaddr_tainted,
1519 TlbEntry::DomainType::NoAccess, currState->isWrite,
1520 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1521 currState->longDesc.lookupLevel,
1522 isStage2,
1523 ArmFault::LpaeTran);
1524 } else {
1525 insertTableEntry(currState->longDesc, true);
1526 }
1527 }
1528 return;
1529 case LongDescriptor::Table:
1530 {
1531 // Set hierarchical permission flags
1532 currState->secureLookup = currState->secureLookup &&
1533 currState->longDesc.secureTable();
1534 currState->rwTable = currState->rwTable &&
1535 currState->longDesc.rwTable();
1536 currState->userTable = currState->userTable &&
1537 currState->longDesc.userTable();
1538 currState->xnTable = currState->xnTable ||
1539 currState->longDesc.xnTable();
1540 currState->pxnTable = currState->pxnTable ||
1541 currState->longDesc.pxnTable();
1542
1543 // Set up next level lookup
1544 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1545 currState->vaddr);
1546
1547 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1548 currState->longDesc.lookupLevel,
1549 currState->longDesc.lookupLevel + 1,
1550 next_desc_addr,
1551 currState->secureLookup ? "s" : "ns");
1552
1553 // Check for address size fault
1554 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1555 next_desc_addr, currState->physAddrRange)) {
1556 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1557 currState->longDesc.lookupLevel);
1558 if (currState->isFetch)
1559 currState->fault = std::make_shared<PrefetchAbort>(
1560 currState->vaddr_tainted,
1561 ArmFault::AddressSizeLL
1562 + currState->longDesc.lookupLevel,
1563 isStage2,
1564 ArmFault::LpaeTran);
1565 else
1566 currState->fault = std::make_shared<DataAbort>(
1567 currState->vaddr_tainted,
1568 TlbEntry::DomainType::NoAccess, currState->isWrite,
1569 ArmFault::AddressSizeLL
1570 + currState->longDesc.lookupLevel,
1571 isStage2,
1572 ArmFault::LpaeTran);
1573 return;
1574 }
1575
1576 // Trickbox address check
1577 currState->fault = tlb->walkTrickBoxCheck(
1578 next_desc_addr, currState->vaddr,
1579 currState->vaddr, sizeof(uint64_t),
1580 currState->isFetch, currState->isWrite,
1581 TlbEntry::DomainType::Client,
1582 toLookupLevel(currState->longDesc.lookupLevel +1));
1583
1584 if (currState->fault) {
1585 if (!currState->timing) {
1586 currState->tc = NULL;
1587 currState->req = NULL;
1588 }
1589 return;
1590 }
1591
1592 Request::Flags flag = 0;
1593 if (currState->secureLookup)
1594 flag.set(Request::SECURE);
1595
1596 currState->longDesc.lookupLevel =
1597 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1598 Event *event = NULL;
1599 switch (currState->longDesc.lookupLevel) {
1600 case L1:
1601 assert(currState->aarch64);
1602 event = &doL1LongDescEvent;
1603 break;
1604 case L2:
1605 event = &doL2LongDescEvent;
1606 break;
1607 case L3:
1608 event = &doL3LongDescEvent;
1609 break;
1610 default:
1611 panic("Wrong lookup level in table walk\n");
1612 break;
1613 }
1614
1615 bool delayed;
1616 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1617 sizeof(uint64_t), flag, -1, event,
1618 &TableWalker::doLongDescriptor);
1619 if (delayed) {
1620 currState->delayed = true;
1621 }
1622 }
1623 return;
1624 default:
1625 panic("A new type in a 2 bit field?\n");
1626 }
1627}
1628
1629void
1630TableWalker::doL2Descriptor()
1631{
1632 if (currState->fault != NoFault) {
1633 return;
1634 }
1635
1636 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1637 currState->vaddr_tainted, currState->l2Desc.data);
1638 TlbEntry te;
1639
1640 if (currState->l2Desc.invalid()) {
1641 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1642 if (!currState->timing) {
1643 currState->tc = NULL;
1644 currState->req = NULL;
1645 }
1646 if (currState->isFetch)
1647 currState->fault = std::make_shared<PrefetchAbort>(
1648 currState->vaddr_tainted,
1649 ArmFault::TranslationLL + L2,
1650 isStage2,
1651 ArmFault::VmsaTran);
1652 else
1653 currState->fault = std::make_shared<DataAbort>(
1654 currState->vaddr_tainted, currState->l1Desc.domain(),
1655 currState->isWrite, ArmFault::TranslationLL + L2,
1656 isStage2,
1657 ArmFault::VmsaTran);
1658 return;
1659 }
1660
1661 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1662 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1663 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1664 */
1665 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1666 currState->sctlr.afe, currState->l2Desc.ap());
1667
1668 currState->fault = std::make_shared<DataAbort>(
1669 currState->vaddr_tainted,
1670 TlbEntry::DomainType::NoAccess, currState->isWrite,
1671 ArmFault::AccessFlagLL + L2, isStage2,
1672 ArmFault::VmsaTran);
1673 }
1674
1675 insertTableEntry(currState->l2Desc, false);
1676}
1677
1678void
1679TableWalker::doL1DescriptorWrapper()
1680{
1681 currState = stateQueues[L1].front();
1682 currState->delayed = false;
1683 // if there's a stage2 translation object we don't need it any more
1684 if (currState->stage2Tran) {
1685 delete currState->stage2Tran;
1686 currState->stage2Tran = NULL;
1687 }
1688
1689
1690 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1691 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1692
1693 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1694 doL1Descriptor();
1695
1696 stateQueues[L1].pop_front();
1697 // Check if fault was generated
1698 if (currState->fault != NoFault) {
1699 currState->transState->finish(currState->fault, currState->req,
1700 currState->tc, currState->mode);
1701
1702 pending = false;
1703 nextWalk(currState->tc);
1704
1705 currState->req = NULL;
1706 currState->tc = NULL;
1707 currState->delayed = false;
1708 delete currState;
1709 }
1710 else if (!currState->delayed) {
1711 // delay is not set so there is no L2 to do
1712 // Don't finish the translation if a stage 2 look up is underway
1713 if (!currState->doingStage2) {
1714 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1715 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1716 currState->transState, currState->mode);
1717 }
1718
1719 pending = false;
1720 nextWalk(currState->tc);
1721
1722 currState->req = NULL;
1723 currState->tc = NULL;
1724 currState->delayed = false;
1725 delete currState;
1726 } else {
1727 // need to do L2 descriptor
1728 stateQueues[L2].push_back(currState);
1729 }
1730 currState = NULL;
1731}
1732
1733void
1734TableWalker::doL2DescriptorWrapper()
1735{
1736 currState = stateQueues[L2].front();
1737 assert(currState->delayed);
1738 // if there's a stage2 translation object we don't need it any more
1739 if (currState->stage2Tran) {
1740 delete currState->stage2Tran;
1741 currState->stage2Tran = NULL;
1742 }
1743
1744 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1745 currState->vaddr_tainted);
1746 doL2Descriptor();
1747
1748 // Check if fault was generated
1749 if (currState->fault != NoFault) {
1750 currState->transState->finish(currState->fault, currState->req,
1751 currState->tc, currState->mode);
1752 }
1753 else {
1754 // Don't finish the translation if a stage 2 look up is underway
1755 if (!currState->doingStage2) {
1756 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1757 currState->fault = tlb->translateTiming(currState->req,
1758 currState->tc, currState->transState, currState->mode);
1759 }
1760 }
1761
1762
1763 stateQueues[L2].pop_front();
1764 pending = false;
1765 nextWalk(currState->tc);
1766
1767 currState->req = NULL;
1768 currState->tc = NULL;
1769 currState->delayed = false;
1770
1771 delete currState;
1772 currState = NULL;
1773}
1774
1775void
1776TableWalker::doL0LongDescriptorWrapper()
1777{
1778 doLongDescriptorWrapper(L0);
1779}
1780
1781void
1782TableWalker::doL1LongDescriptorWrapper()
1783{
1784 doLongDescriptorWrapper(L1);
1785}
1786
1787void
1788TableWalker::doL2LongDescriptorWrapper()
1789{
1790 doLongDescriptorWrapper(L2);
1791}
1792
1793void
1794TableWalker::doL3LongDescriptorWrapper()
1795{
1796 doLongDescriptorWrapper(L3);
1797}
1798
1799void
1800TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1801{
1802 currState = stateQueues[curr_lookup_level].front();
1803 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1804 currState->delayed = false;
1805
1806 // if there's a stage2 translation object we don't need it any more
1807 if (currState->stage2Tran) {
1808 delete currState->stage2Tran;
1809 currState->stage2Tran = NULL;
1810 }
1811
1812 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1813 currState->vaddr_tainted);
1814 doLongDescriptor();
1815
1816 stateQueues[curr_lookup_level].pop_front();
1817
1818 if (currState->fault != NoFault) {
1819 // A fault was generated
1820 currState->transState->finish(currState->fault, currState->req,
1821 currState->tc, currState->mode);
1822
1823 pending = false;
1824 nextWalk(currState->tc);
1825
1826 currState->req = NULL;
1827 currState->tc = NULL;
1828 currState->delayed = false;
1829 delete currState;
1830 } else if (!currState->delayed) {
1831 // No additional lookups required
1832 // Don't finish the translation if a stage 2 look up is underway
1833 if (!currState->doingStage2) {
1834 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1835 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1836 currState->transState,
1837 currState->mode);
1838 }
1839
1840 pending = false;
1841 nextWalk(currState->tc);
1842
1843 currState->req = NULL;
1844 currState->tc = NULL;
1845 currState->delayed = false;
1846 delete currState;
1847 } else {
1848 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1849 panic("Max. number of lookups already reached in table walk\n");
1850 // Need to perform additional lookups
1851 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1852 }
1853 currState = NULL;
1854}
1855
1856
1857void
1858TableWalker::nextWalk(ThreadContext *tc)
1859{
1860 if (pendingQueue.size())
1861 schedule(doProcessEvent, clockEdge(Cycles(1)));
1862 else
1863 completeDrain();
1864}
1865
1866bool
1867TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1868 Request::Flags flags, int queueIndex, Event *event,
1869 void (TableWalker::*doDescriptor)())
1870{
1871 bool isTiming = currState->timing;
1872
1873 // do the requests for the page table descriptors have to go through the
1874 // second stage MMU
1875 if (currState->stage2Req) {
1876 Fault fault;
1877 flags = flags | TLB::MustBeOne;
1878
1879 if (isTiming) {
1880 Stage2MMU::Stage2Translation *tran = new
1881 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1882 currState->vaddr);
1883 currState->stage2Tran = tran;
1884 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1885 flags, masterId);
1886 fault = tran->fault;
1887 } else {
1888 fault = stage2Mmu->readDataUntimed(currState->tc,
1889 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1890 currState->functional);
1891 }
1892
1893 if (fault != NoFault) {
1894 currState->fault = fault;
1895 }
1896 if (isTiming) {
1897 if (queueIndex >= 0) {
1898 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1899 stateQueues[queueIndex].size());
1900 stateQueues[queueIndex].push_back(currState);
1901 currState = NULL;
1902 }
1903 } else {
1904 (this->*doDescriptor)();
1905 }
1906 } else {
1907 if (isTiming) {
1908 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1909 currState->tc->getCpuPtr()->clockPeriod(), flags);
1910 if (queueIndex >= 0) {
1911 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1912 stateQueues[queueIndex].size());
1913 stateQueues[queueIndex].push_back(currState);
1914 currState = NULL;
1915 }
1916 } else if (!currState->functional) {
1917 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1918 currState->tc->getCpuPtr()->clockPeriod(), flags);
1919 (this->*doDescriptor)();
1920 } else {
1921 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1922 req->taskId(ContextSwitchTaskId::DMA);
1923 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1924 pkt->dataStatic(data);
1925 port.sendFunctional(pkt);
1926 (this->*doDescriptor)();
1927 delete req;
1928 delete pkt;
1929 }
1930 }
1931 return (isTiming);
1932}
1933
1934void
1935TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1936{
1937 TlbEntry te;
1938
1939 // Create and fill a new page table entry
1940 te.valid = true;
1941 te.longDescFormat = longDescriptor;
1942 te.isHyp = currState->isHyp;
1943 te.asid = currState->asid;
1944 te.vmid = currState->vmid;
1945 te.N = descriptor.offsetBits();
1946 te.vpn = currState->vaddr >> te.N;
1947 te.size = (1<<te.N) - 1;
1948 te.pfn = descriptor.pfn();
1949 te.domain = descriptor.domain();
1950 te.lookupLevel = descriptor.lookupLevel;
1951 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1952 te.nstid = !currState->isSecure;
1953 te.xn = descriptor.xn();
1954 if (currState->aarch64)
1955 te.el = currState->el;
1956 else
1957 te.el = 1;
1958
1959 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1960 // as global
1961 te.global = descriptor.global(currState) || isStage2;
1962 if (longDescriptor) {
1963 LongDescriptor lDescriptor =
1964 dynamic_cast<LongDescriptor &>(descriptor);
1965
1966 te.xn |= currState->xnTable;
1967 te.pxn = currState->pxnTable || lDescriptor.pxn();
1968 if (isStage2) {
1969 // this is actually the HAP field, but its stored in the same bit
1970 // possitions as the AP field in a stage 1 translation.
1971 te.hap = lDescriptor.ap();
1972 } else {
1973 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1974 (currState->userTable && (descriptor.ap() & 0x1));
1975 }
1976 if (currState->aarch64)
1977 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1978 currState->longDesc.sh());
1979 else
1980 memAttrsLPAE(currState->tc, te, lDescriptor);
1981 } else {
1982 te.ap = descriptor.ap();
1983 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1984 descriptor.shareable());
1985 }
1986
1987 // Debug output
1988 DPRINTF(TLB, descriptor.dbgHeader().c_str());
1989 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1990 te.N, te.pfn, te.size, te.global, te.valid);
1991 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1992 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1993 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1994 te.nonCacheable, te.ns);
1995 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1996 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1997 descriptor.getRawData());
1998
1999 // Insert the entry into the TLB
2000 tlb->insert(currState->vaddr, te);
2001 if (!currState->timing) {
2002 currState->tc = NULL;
2003 currState->req = NULL;
2004 }
2005}
2006
2007ArmISA::TableWalker *
2008ArmTableWalkerParams::create()
2009{
2010 return new ArmISA::TableWalker(this);
2011}
2012
2013LookupLevel
2014TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2015{
2016 switch (lookup_level_as_int) {
2017 case L1:
2018 return L1;
2019 case L2:
2020 return L2;
2021 case L3:
2022 return L3;
2023 default:
2024 panic("Invalid lookup level conversion");
2025 }
2026}