Deleted Added
sdiff udiff text old ( 10109:b58c5c5854de ) new ( 10324:f40134eb3f85 )
full compact
1/*
2 * Copyright (c) 2010, 2012-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Authors: Ali Saidi
38 * Giacomo Gabrielli
39 */
40
41#include "arch/arm/faults.hh"
42#include "arch/arm/stage2_mmu.hh"
43#include "arch/arm/system.hh"
44#include "arch/arm/table_walker.hh"
45#include "arch/arm/tlb.hh"
46#include "cpu/base.hh"
47#include "cpu/thread_context.hh"
48#include "debug/Checkpoint.hh"
49#include "debug/Drain.hh"
50#include "debug/TLB.hh"
51#include "debug/TLBVerbose.hh"
52#include "sim/system.hh"
53
54using namespace ArmISA;
55
56TableWalker::TableWalker(const Params *p)
57 : MemObject(p), port(this, p->sys), drainManager(NULL),
58 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL),
59 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())),
60 numSquashable(p->num_squash_per_cycle),
61 doL1DescEvent(this), doL2DescEvent(this),
62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this),
63 doL3LongDescEvent(this),
64 doProcessEvent(this)
65{
66 sctlr = 0;
67
68 // Cache system-level properties
69 if (FullSystem) {
70 armSys = dynamic_cast<ArmSystem *>(p->sys);
71 assert(armSys);
72 haveSecurity = armSys->haveSecurity();
73 _haveLPAE = armSys->haveLPAE();
74 _haveVirtualization = armSys->haveVirtualization();
75 physAddrRange = armSys->physAddrRange();
76 _haveLargeAsid64 = armSys->haveLargeAsid64();
77 } else {
78 armSys = NULL;
79 haveSecurity = _haveLPAE = _haveVirtualization = false;
80 _haveLargeAsid64 = false;
81 physAddrRange = 32;
82 }
83
84}
85
86TableWalker::~TableWalker()
87{
88 ;
89}
90
91TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc)
92{
93}
94
95void
96TableWalker::completeDrain()
97{
98 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() &&
99 pendingQueue.empty()) {
100 setDrainState(Drainable::Drained);
101 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
102 drainManager->signalDrainDone();
103 drainManager = NULL;
104 }
105}
106
107unsigned int
108TableWalker::drain(DrainManager *dm)
109{
110 unsigned int count = port.drain(dm);
111
112 bool state_queues_not_empty = false;
113
114 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
115 if (!stateQueues[i].empty()) {
116 state_queues_not_empty = true;
117 break;
118 }
119 }
120
121 if (state_queues_not_empty || pendingQueue.size()) {
122 drainManager = dm;
123 setDrainState(Drainable::Draining);
124 DPRINTF(Drain, "TableWalker not drained\n");
125
126 // return port drain count plus the table walker itself needs to drain
127 return count + 1;
128 } else {
129 setDrainState(Drainable::Drained);
130 DPRINTF(Drain, "TableWalker free, no need to drain\n");
131
132 // table walker is drained, but its ports may still need to be drained
133 return count;
134 }
135}
136
137void
138TableWalker::drainResume()
139{
140 Drainable::drainResume();
141 if (params()->sys->isTimingMode() && currState) {
142 delete currState;
143 currState = NULL;
144 }
145}
146
147BaseMasterPort&
148TableWalker::getMasterPort(const std::string &if_name, PortID idx)
149{
150 if (if_name == "port") {
151 return port;
152 }
153 return MemObject::getMasterPort(if_name, idx);
154}
155
156Fault
157TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid,
158 uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
159 TLB::Translation *_trans, bool _timing, bool _functional,
160 bool secure, TLB::ArmTranslationType tranType)
161{
162 assert(!(_functional && _timing));
163 WalkerState *savedCurrState = NULL;
164
165 if (!currState && !_functional) {
166 // For atomic mode, a new WalkerState instance should be only created
167 // once per TLB. For timing mode, a new instance is generated for every
168 // TLB miss.
169 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
170
171 currState = new WalkerState();
172 currState->tableWalker = this;
173 } else if (_functional) {
174 // If we are mixing functional mode with timing (or even
175 // atomic), we need to to be careful and clean up after
176 // ourselves to not risk getting into an inconsistent state.
177 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
178 savedCurrState = currState;
179 currState = new WalkerState();
180 currState->tableWalker = this;
181 } else if (_timing) {
182 // This is a translation that was completed and then faulted again
183 // because some underlying parameters that affect the translation
184 // changed out from under us (e.g. asid). It will either be a
185 // misprediction, in which case nothing will happen or we'll use
186 // this fault to re-execute the faulting instruction which should clean
187 // up everything.
188 if (currState->vaddr_tainted == _req->getVaddr()) {
189 return new ReExec;
190 }
191 }
192
193 currState->tc = _tc;
194 currState->aarch64 = opModeIs64(currOpMode(_tc));
195 currState->el = currEL(_tc);
196 currState->transState = _trans;
197 currState->req = _req;
198 currState->fault = NoFault;
199 currState->asid = _asid;
200 currState->vmid = _vmid;
201 currState->isHyp = _isHyp;
202 currState->timing = _timing;
203 currState->functional = _functional;
204 currState->mode = _mode;
205 currState->tranType = tranType;
206 currState->isSecure = secure;
207 currState->physAddrRange = physAddrRange;
208
209 /** @todo These should be cached or grabbed from cached copies in
210 the TLB, all these miscreg reads are expensive */
211 currState->vaddr_tainted = currState->req->getVaddr();
212 if (currState->aarch64)
213 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
214 currState->tc, currState->el);
215 else
216 currState->vaddr = currState->vaddr_tainted;
217
218 if (currState->aarch64) {
219 switch (currState->el) {
220 case EL0:
221 case EL1:
222 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
223 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
224 break;
225 // @todo: uncomment this to enable Virtualization
226 // case EL2:
227 // assert(haveVirtualization);
228 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
229 // currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
230 // break;
231 case EL3:
232 assert(haveSecurity);
233 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
234 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
235 break;
236 default:
237 panic("Invalid exception level");
238 break;
239 }
240 } else {
241 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
242 MISCREG_SCTLR, currState->tc, !currState->isSecure));
243 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
244 MISCREG_TTBCR, currState->tc, !currState->isSecure));
245 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
246 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
247 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
248 }
249 sctlr = currState->sctlr;
250
251 currState->isFetch = (currState->mode == TLB::Execute);
252 currState->isWrite = (currState->mode == TLB::Write);
253
254 // We only do a second stage of translation if we're not secure, or in
255 // hyp mode, the second stage MMU is enabled, and this table walker
256 // instance is the first stage.
257 currState->doingStage2 = false;
258 // @todo: for now disable this in AArch64 (HCR is not set)
259 currState->stage2Req = !currState->aarch64 && currState->hcr.vm &&
260 !isStage2 && !currState->isSecure && !currState->isHyp;
261
262 bool long_desc_format = currState->aarch64 ||
263 (_haveLPAE && currState->ttbcr.eae) ||
264 _isHyp || isStage2;
265
266 if (long_desc_format) {
267 // Helper variables used for hierarchical permissions
268 currState->secureLookup = currState->isSecure;
269 currState->rwTable = true;
270 currState->userTable = true;
271 currState->xnTable = false;
272 currState->pxnTable = false;
273 }
274
275 if (!currState->timing) {
276 Fault fault = NoFault;
277 if (currState->aarch64)
278 fault = processWalkAArch64();
279 else if (long_desc_format)
280 fault = processWalkLPAE();
281 else
282 fault = processWalk();
283
284 // If this was a functional non-timing access restore state to
285 // how we found it.
286 if (currState->functional) {
287 delete currState;
288 currState = savedCurrState;
289 }
290 return fault;
291 }
292
293 if (pending || pendingQueue.size()) {
294 pendingQueue.push_back(currState);
295 currState = NULL;
296 } else {
297 pending = true;
298 if (currState->aarch64)
299 return processWalkAArch64();
300 else if (long_desc_format)
301 return processWalkLPAE();
302 else
303 return processWalk();
304 }
305
306 return NoFault;
307}
308
309void
310TableWalker::processWalkWrapper()
311{
312 assert(!currState);
313 assert(pendingQueue.size());
314 currState = pendingQueue.front();
315
316 ExceptionLevel target_el = EL0;
317 if (currState->aarch64)
318 target_el = currEL(currState->tc);
319 else
320 target_el = EL1;
321
322 // Check if a previous walk filled this request already
323 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
324 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid,
325 currState->vmid, currState->isHyp, currState->isSecure, true, false,
326 target_el);
327
328 // Check if we still need to have a walk for this request. If the requesting
329 // instruction has been squashed, or a previous walk has filled the TLB with
330 // a match, we just want to get rid of the walk. The latter could happen
331 // when there are multiple outstanding misses to a single page and a
332 // previous request has been successfully translated.
333 if (!currState->transState->squashed() && !te) {
334 // We've got a valid request, lets process it
335 pending = true;
336 pendingQueue.pop_front();
337 if (currState->aarch64)
338 processWalkAArch64();
339 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2)
340 processWalkLPAE();
341 else
342 processWalk();
343 return;
344 }
345
346
347 // If the instruction that we were translating for has been
348 // squashed we shouldn't bother.
349 unsigned num_squashed = 0;
350 ThreadContext *tc = currState->tc;
351 while ((num_squashed < numSquashable) && currState &&
352 (currState->transState->squashed() || te)) {
353 pendingQueue.pop_front();
354 num_squashed++;
355
356 DPRINTF(TLB, "Squashing table walk for address %#x\n",
357 currState->vaddr_tainted);
358
359 if (currState->transState->squashed()) {
360 // finish the translation which will delete the translation object
361 currState->transState->finish(new UnimpFault("Squashed Inst"),
362 currState->req, currState->tc, currState->mode);
363 } else {
364 // translate the request now that we know it will work
365 tlb->translateTiming(currState->req, currState->tc,
366 currState->transState, currState->mode);
367
368 }
369
370 // delete the current request
371 delete currState;
372
373 // peak at the next one
374 if (pendingQueue.size()) {
375 currState = pendingQueue.front();
376 te = tlb->lookup(currState->vaddr, currState->asid,
377 currState->vmid, currState->isHyp, currState->isSecure, true,
378 false, target_el);
379 } else {
380 // Terminate the loop, nothing more to do
381 currState = NULL;
382 }
383 }
384
385 // if we've still got pending translations schedule more work
386 nextWalk(tc);
387 currState = NULL;
388 completeDrain();
389}
390
391Fault
392TableWalker::processWalk()
393{
394 Addr ttbr = 0;
395
396 // If translation isn't enabled, we shouldn't be here
397 assert(currState->sctlr.m || isStage2);
398
399 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
400 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
401 32 - currState->ttbcr.n));
402
403 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
404 32 - currState->ttbcr.n)) {
405 DPRINTF(TLB, " - Selecting TTBR0\n");
406 // Check if table walk is allowed when Security Extensions are enabled
407 if (haveSecurity && currState->ttbcr.pd0) {
408 if (currState->isFetch)
409 return new PrefetchAbort(currState->vaddr_tainted,
410 ArmFault::TranslationLL + L1,
411 isStage2,
412 ArmFault::VmsaTran);
413 else
414 return new DataAbort(currState->vaddr_tainted,
415 TlbEntry::DomainType::NoAccess, currState->isWrite,
416 ArmFault::TranslationLL + L1, isStage2,
417 ArmFault::VmsaTran);
418 }
419 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
420 MISCREG_TTBR0, currState->tc, !currState->isSecure));
421 } else {
422 DPRINTF(TLB, " - Selecting TTBR1\n");
423 // Check if table walk is allowed when Security Extensions are enabled
424 if (haveSecurity && currState->ttbcr.pd1) {
425 if (currState->isFetch)
426 return new PrefetchAbort(currState->vaddr_tainted,
427 ArmFault::TranslationLL + L1,
428 isStage2,
429 ArmFault::VmsaTran);
430 else
431 return new DataAbort(currState->vaddr_tainted,
432 TlbEntry::DomainType::NoAccess, currState->isWrite,
433 ArmFault::TranslationLL + L1, isStage2,
434 ArmFault::VmsaTran);
435 }
436 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
437 MISCREG_TTBR1, currState->tc, !currState->isSecure));
438 currState->ttbcr.n = 0;
439 }
440
441 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
442 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
443 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
444 currState->isSecure ? "s" : "ns");
445
446 // Trickbox address check
447 Fault f;
448 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure,
449 currState->vaddr, sizeof(uint32_t), currState->isFetch,
450 currState->isWrite, TlbEntry::DomainType::NoAccess, L1);
451 if (f) {
452 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
453 if (currState->timing) {
454 pending = false;
455 nextWalk(currState->tc);
456 currState = NULL;
457 } else {
458 currState->tc = NULL;
459 currState->req = NULL;
460 }
461 return f;
462 }
463
464 Request::Flags flag = 0;
465 if (currState->sctlr.c == 0) {
466 flag = Request::UNCACHEABLE;
467 }
468
469 bool delayed;
470 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
471 sizeof(uint32_t), flag, L1, &doL1DescEvent,
472 &TableWalker::doL1Descriptor);
473 if (!delayed) {
474 f = currState->fault;
475 }
476
477 return f;
478}
479
480Fault
481TableWalker::processWalkLPAE()
482{
483 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
484 int tsz, n;
485 LookupLevel start_lookup_level = L1;
486
487 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
488 currState->vaddr_tainted, currState->ttbcr);
489
490 Request::Flags flag = 0;
491 if (currState->isSecure)
492 flag.set(Request::SECURE);
493
494 // work out which base address register to use, if in hyp mode we always
495 // use HTTBR
496 if (isStage2) {
497 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
498 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
499 tsz = sext<4>(currState->vtcr.t0sz);
500 start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
501 } else if (currState->isHyp) {
502 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
503 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
504 tsz = currState->htcr.t0sz;
505 } else {
506 assert(_haveLPAE && currState->ttbcr.eae);
507
508 // Determine boundaries of TTBR0/1 regions
509 if (currState->ttbcr.t0sz)
510 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
511 else if (currState->ttbcr.t1sz)
512 ttbr0_max = (1ULL << 32) -
513 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
514 else
515 ttbr0_max = (1ULL << 32) - 1;
516 if (currState->ttbcr.t1sz)
517 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
518 else
519 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
520
521 // The following code snippet selects the appropriate translation table base
522 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
523 // depending on the address range supported by the translation table (ARM
524 // ARM issue C B3.6.4)
525 if (currState->vaddr <= ttbr0_max) {
526 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
527 // Check if table walk is allowed
528 if (currState->ttbcr.epd0) {
529 if (currState->isFetch)
530 return new PrefetchAbort(currState->vaddr_tainted,
531 ArmFault::TranslationLL + L1,
532 isStage2,
533 ArmFault::LpaeTran);
534 else
535 return new DataAbort(currState->vaddr_tainted,
536 TlbEntry::DomainType::NoAccess,
537 currState->isWrite,
538 ArmFault::TranslationLL + L1,
539 isStage2,
540 ArmFault::LpaeTran);
541 }
542 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
543 MISCREG_TTBR0, currState->tc, !currState->isSecure));
544 tsz = currState->ttbcr.t0sz;
545 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
546 start_lookup_level = L2;
547 } else if (currState->vaddr >= ttbr1_min) {
548 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
549 // Check if table walk is allowed
550 if (currState->ttbcr.epd1) {
551 if (currState->isFetch)
552 return new PrefetchAbort(currState->vaddr_tainted,
553 ArmFault::TranslationLL + L1,
554 isStage2,
555 ArmFault::LpaeTran);
556 else
557 return new DataAbort(currState->vaddr_tainted,
558 TlbEntry::DomainType::NoAccess,
559 currState->isWrite,
560 ArmFault::TranslationLL + L1,
561 isStage2,
562 ArmFault::LpaeTran);
563 }
564 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked(
565 MISCREG_TTBR1, currState->tc, !currState->isSecure));
566 tsz = currState->ttbcr.t1sz;
567 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB
568 start_lookup_level = L2;
569 } else {
570 // Out of boundaries -> translation fault
571 if (currState->isFetch)
572 return new PrefetchAbort(currState->vaddr_tainted,
573 ArmFault::TranslationLL + L1,
574 isStage2,
575 ArmFault::LpaeTran);
576 else
577 return new DataAbort(currState->vaddr_tainted,
578 TlbEntry::DomainType::NoAccess,
579 currState->isWrite, ArmFault::TranslationLL + L1,
580 isStage2, ArmFault::LpaeTran);
581 }
582
583 }
584
585 // Perform lookup (ARM ARM issue C B3.6.6)
586 if (start_lookup_level == L1) {
587 n = 5 - tsz;
588 desc_addr = mbits(ttbr, 39, n) |
589 (bits(currState->vaddr, n + 26, 30) << 3);
590 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
591 desc_addr, currState->isSecure ? "s" : "ns");
592 } else {
593 // Skip first-level lookup
594 n = (tsz >= 2 ? 14 - tsz : 12);
595 desc_addr = mbits(ttbr, 39, n) |
596 (bits(currState->vaddr, n + 17, 21) << 3);
597 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
598 desc_addr, currState->isSecure ? "s" : "ns");
599 }
600
601 // Trickbox address check
602 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
603 currState->vaddr, sizeof(uint64_t), currState->isFetch,
604 currState->isWrite, TlbEntry::DomainType::NoAccess,
605 start_lookup_level);
606 if (f) {
607 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
608 if (currState->timing) {
609 pending = false;
610 nextWalk(currState->tc);
611 currState = NULL;
612 } else {
613 currState->tc = NULL;
614 currState->req = NULL;
615 }
616 return f;
617 }
618
619 if (currState->sctlr.c == 0) {
620 flag = Request::UNCACHEABLE;
621 }
622
623 if (currState->isSecure)
624 flag.set(Request::SECURE);
625
626 currState->longDesc.lookupLevel = start_lookup_level;
627 currState->longDesc.aarch64 = false;
628 currState->longDesc.grainSize = Grain4KB;
629
630 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent
631 : (Event *) &doL2LongDescEvent;
632
633 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
634 sizeof(uint64_t), flag, start_lookup_level,
635 event, &TableWalker::doLongDescriptor);
636 if (!delayed) {
637 f = currState->fault;
638 }
639
640 return f;
641}
642
643unsigned
644TableWalker::adjustTableSizeAArch64(unsigned tsz)
645{
646 if (tsz < 25)
647 return 25;
648 if (tsz > 48)
649 return 48;
650 return tsz;
651}
652
653bool
654TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
655{
656 return (currPhysAddrRange != MaxPhysAddrRange &&
657 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
658}
659
660Fault
661TableWalker::processWalkAArch64()
662{
663 assert(currState->aarch64);
664
665 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
666 currState->vaddr_tainted, currState->tcr);
667
668 static const GrainSize GrainMapDefault[] =
669 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain };
670 static const GrainSize GrainMap_EL1_tg1[] =
671 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB };
672
673 // Determine TTBR, table size, granule size and phys. address range
674 Addr ttbr = 0;
675 int tsz = 0, ps = 0;
676 GrainSize tg = Grain4KB; // grain size computed from tg* field
677 bool fault = false;
678 switch (currState->el) {
679 case EL0:
680 case EL1:
681 switch (bits(currState->vaddr, 63,48)) {
682 case 0:
683 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
684 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
685 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
686 tg = GrainMapDefault[currState->tcr.tg0];
687 if (bits(currState->vaddr, 63, tsz) != 0x0 ||
688 currState->tcr.epd0)
689 fault = true;
690 break;
691 case 0xffff:
692 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
693 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
694 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
695 tg = GrainMap_EL1_tg1[currState->tcr.tg1];
696 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
697 currState->tcr.epd1)
698 fault = true;
699 break;
700 default:
701 // top two bytes must be all 0s or all 1s, else invalid addr
702 fault = true;
703 }
704 ps = currState->tcr.ips;
705 break;
706 case EL2:
707 case EL3:
708 switch(bits(currState->vaddr, 63,48)) {
709 case 0:
710 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
711 if (currState->el == EL2)
712 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
713 else
714 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
715 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
716 tg = GrainMapDefault[currState->tcr.tg0];
717 break;
718 default:
719 // invalid addr if top two bytes are not all 0s
720 fault = true;
721 }
722 ps = currState->tcr.ips;
723 break;
724 }
725
726 if (fault) {
727 Fault f;
728 if (currState->isFetch)
729 f = new PrefetchAbort(currState->vaddr_tainted,
730 ArmFault::TranslationLL + L0, isStage2,
731 ArmFault::LpaeTran);
732 else
733 f = new DataAbort(currState->vaddr_tainted,
734 TlbEntry::DomainType::NoAccess,
735 currState->isWrite,
736 ArmFault::TranslationLL + L0,
737 isStage2, ArmFault::LpaeTran);
738
739 if (currState->timing) {
740 pending = false;
741 nextWalk(currState->tc);
742 currState = NULL;
743 } else {
744 currState->tc = NULL;
745 currState->req = NULL;
746 }
747 return f;
748
749 }
750
751 if (tg == ReservedGrain) {
752 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
753 "DEFINED behavior takes this to mean 4KB granules\n");
754 tg = Grain4KB;
755 }
756
757 int stride = tg - 3;
758 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
759
760 // Determine starting lookup level
761 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
762 // in ARM DDI 0487A. These table values correspond to the cascading tests
763 // to compute the lookup level and are of the form
764 // (grain_size + N*stride), for N = {1, 2, 3}.
765 // A value of 64 will never succeed and a value of 0 will always succeed.
766 {
767 struct GrainMap {
768 GrainSize grain_size;
769 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
770 };
771 static const GrainMap GM[] = {
772 { Grain4KB, { 39, 30, 0, 0 } },
773 { Grain16KB, { 47, 36, 25, 0 } },
774 { Grain64KB, { 64, 42, 29, 0 } }
775 };
776
777 const unsigned *lookup = NULL; // points to a lookup_level_cutoff
778
779 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
780 if (tg == GM[i].grain_size) {
781 lookup = GM[i].lookup_level_cutoff;
782 break;
783 }
784 }
785 assert(lookup);
786
787 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
788 if (tsz > lookup[L]) {
789 start_lookup_level = (LookupLevel) L;
790 break;
791 }
792 }
793 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
794 "Table walker couldn't find lookup level\n");
795 }
796
797 // Determine table base address
798 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
799 Addr base_addr = mbits(ttbr, 47, base_addr_lo);
800
801 // Determine physical address size and raise an Address Size Fault if
802 // necessary
803 int pa_range = decodePhysAddrRange64(ps);
804 // Clamp to lower limit
805 if (pa_range > physAddrRange)
806 currState->physAddrRange = physAddrRange;
807 else
808 currState->physAddrRange = pa_range;
809 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) {
810 DPRINTF(TLB, "Address size fault before any lookup\n");
811 Fault f;
812 if (currState->isFetch)
813 f = new PrefetchAbort(currState->vaddr_tainted,
814 ArmFault::AddressSizeLL + start_lookup_level,
815 isStage2,
816 ArmFault::LpaeTran);
817 else
818 f = new DataAbort(currState->vaddr_tainted,
819 TlbEntry::DomainType::NoAccess,
820 currState->isWrite,
821 ArmFault::AddressSizeLL + start_lookup_level,
822 isStage2,
823 ArmFault::LpaeTran);
824
825
826 if (currState->timing) {
827 pending = false;
828 nextWalk(currState->tc);
829 currState = NULL;
830 } else {
831 currState->tc = NULL;
832 currState->req = NULL;
833 }
834 return f;
835
836 }
837
838 // Determine descriptor address
839 Addr desc_addr = base_addr |
840 (bits(currState->vaddr, tsz - 1,
841 stride * (3 - start_lookup_level) + tg) << 3);
842
843 // Trickbox address check
844 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure,
845 currState->vaddr, sizeof(uint64_t), currState->isFetch,
846 currState->isWrite, TlbEntry::DomainType::NoAccess,
847 start_lookup_level);
848 if (f) {
849 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
850 if (currState->timing) {
851 pending = false;
852 nextWalk(currState->tc);
853 currState = NULL;
854 } else {
855 currState->tc = NULL;
856 currState->req = NULL;
857 }
858 return f;
859 }
860
861 Request::Flags flag = 0;
862 if (currState->sctlr.c == 0) {
863 flag = Request::UNCACHEABLE;
864 }
865
866 currState->longDesc.lookupLevel = start_lookup_level;
867 currState->longDesc.aarch64 = true;
868 currState->longDesc.grainSize = tg;
869
870 if (currState->timing) {
871 Event *event;
872 switch (start_lookup_level) {
873 case L0:
874 event = (Event *) &doL0LongDescEvent;
875 break;
876 case L1:
877 event = (Event *) &doL1LongDescEvent;
878 break;
879 case L2:
880 event = (Event *) &doL2LongDescEvent;
881 break;
882 case L3:
883 event = (Event *) &doL3LongDescEvent;
884 break;
885 default:
886 panic("Invalid table lookup level");
887 break;
888 }
889 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event,
890 (uint8_t*) &currState->longDesc.data,
891 currState->tc->getCpuPtr()->clockPeriod(), flag);
892 DPRINTF(TLBVerbose,
893 "Adding to walker fifo: queue size before adding: %d\n",
894 stateQueues[start_lookup_level].size());
895 stateQueues[start_lookup_level].push_back(currState);
896 currState = NULL;
897 } else if (!currState->functional) {
898 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t),
899 NULL, (uint8_t*) &currState->longDesc.data,
900 currState->tc->getCpuPtr()->clockPeriod(), flag);
901 doLongDescriptor();
902 f = currState->fault;
903 } else {
904 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag,
905 masterId);
906 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
907 pkt->dataStatic((uint8_t*) &currState->longDesc.data);
908 port.sendFunctional(pkt);
909 doLongDescriptor();
910 delete req;
911 delete pkt;
912 f = currState->fault;
913 }
914
915 return f;
916}
917
918void
919TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
920 uint8_t texcb, bool s)
921{
922 // Note: tc and sctlr local variables are hiding tc and sctrl class
923 // variables
924 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
925 te.shareable = false; // default value
926 te.nonCacheable = false;
927 te.outerShareable = false;
928 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
929 switch(texcb) {
930 case 0: // Stongly-ordered
931 te.nonCacheable = true;
932 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
933 te.shareable = true;
934 te.innerAttrs = 1;
935 te.outerAttrs = 0;
936 break;
937 case 1: // Shareable Device
938 te.nonCacheable = true;
939 te.mtype = TlbEntry::MemoryType::Device;
940 te.shareable = true;
941 te.innerAttrs = 3;
942 te.outerAttrs = 0;
943 break;
944 case 2: // Outer and Inner Write-Through, no Write-Allocate
945 te.mtype = TlbEntry::MemoryType::Normal;
946 te.shareable = s;
947 te.innerAttrs = 6;
948 te.outerAttrs = bits(texcb, 1, 0);
949 break;
950 case 3: // Outer and Inner Write-Back, no Write-Allocate
951 te.mtype = TlbEntry::MemoryType::Normal;
952 te.shareable = s;
953 te.innerAttrs = 7;
954 te.outerAttrs = bits(texcb, 1, 0);
955 break;
956 case 4: // Outer and Inner Non-cacheable
957 te.nonCacheable = true;
958 te.mtype = TlbEntry::MemoryType::Normal;
959 te.shareable = s;
960 te.innerAttrs = 0;
961 te.outerAttrs = bits(texcb, 1, 0);
962 break;
963 case 5: // Reserved
964 panic("Reserved texcb value!\n");
965 break;
966 case 6: // Implementation Defined
967 panic("Implementation-defined texcb value!\n");
968 break;
969 case 7: // Outer and Inner Write-Back, Write-Allocate
970 te.mtype = TlbEntry::MemoryType::Normal;
971 te.shareable = s;
972 te.innerAttrs = 5;
973 te.outerAttrs = 1;
974 break;
975 case 8: // Non-shareable Device
976 te.nonCacheable = true;
977 te.mtype = TlbEntry::MemoryType::Device;
978 te.shareable = false;
979 te.innerAttrs = 3;
980 te.outerAttrs = 0;
981 break;
982 case 9 ... 15: // Reserved
983 panic("Reserved texcb value!\n");
984 break;
985 case 16 ... 31: // Cacheable Memory
986 te.mtype = TlbEntry::MemoryType::Normal;
987 te.shareable = s;
988 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
989 te.nonCacheable = true;
990 te.innerAttrs = bits(texcb, 1, 0);
991 te.outerAttrs = bits(texcb, 3, 2);
992 break;
993 default:
994 panic("More than 32 states for 5 bits?\n");
995 }
996 } else {
997 assert(tc);
998 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR,
999 currState->tc, !currState->isSecure));
1000 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR,
1001 currState->tc, !currState->isSecure));
1002 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1003 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1004 switch(bits(texcb, 2,0)) {
1005 case 0:
1006 curr_tr = prrr.tr0;
1007 curr_ir = nmrr.ir0;
1008 curr_or = nmrr.or0;
1009 te.outerShareable = (prrr.nos0 == 0);
1010 break;
1011 case 1:
1012 curr_tr = prrr.tr1;
1013 curr_ir = nmrr.ir1;
1014 curr_or = nmrr.or1;
1015 te.outerShareable = (prrr.nos1 == 0);
1016 break;
1017 case 2:
1018 curr_tr = prrr.tr2;
1019 curr_ir = nmrr.ir2;
1020 curr_or = nmrr.or2;
1021 te.outerShareable = (prrr.nos2 == 0);
1022 break;
1023 case 3:
1024 curr_tr = prrr.tr3;
1025 curr_ir = nmrr.ir3;
1026 curr_or = nmrr.or3;
1027 te.outerShareable = (prrr.nos3 == 0);
1028 break;
1029 case 4:
1030 curr_tr = prrr.tr4;
1031 curr_ir = nmrr.ir4;
1032 curr_or = nmrr.or4;
1033 te.outerShareable = (prrr.nos4 == 0);
1034 break;
1035 case 5:
1036 curr_tr = prrr.tr5;
1037 curr_ir = nmrr.ir5;
1038 curr_or = nmrr.or5;
1039 te.outerShareable = (prrr.nos5 == 0);
1040 break;
1041 case 6:
1042 panic("Imp defined type\n");
1043 case 7:
1044 curr_tr = prrr.tr7;
1045 curr_ir = nmrr.ir7;
1046 curr_or = nmrr.or7;
1047 te.outerShareable = (prrr.nos7 == 0);
1048 break;
1049 }
1050
1051 switch(curr_tr) {
1052 case 0:
1053 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1054 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1055 te.nonCacheable = true;
1056 te.innerAttrs = 1;
1057 te.outerAttrs = 0;
1058 te.shareable = true;
1059 break;
1060 case 1:
1061 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1062 prrr.ds1, prrr.ds0, s);
1063 te.mtype = TlbEntry::MemoryType::Device;
1064 te.nonCacheable = true;
1065 te.innerAttrs = 3;
1066 te.outerAttrs = 0;
1067 if (prrr.ds1 && s)
1068 te.shareable = true;
1069 if (prrr.ds0 && !s)
1070 te.shareable = true;
1071 break;
1072 case 2:
1073 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1074 prrr.ns1, prrr.ns0, s);
1075 te.mtype = TlbEntry::MemoryType::Normal;
1076 if (prrr.ns1 && s)
1077 te.shareable = true;
1078 if (prrr.ns0 && !s)
1079 te.shareable = true;
1080 break;
1081 case 3:
1082 panic("Reserved type");
1083 }
1084
1085 if (te.mtype == TlbEntry::MemoryType::Normal){
1086 switch(curr_ir) {
1087 case 0:
1088 te.nonCacheable = true;
1089 te.innerAttrs = 0;
1090 break;
1091 case 1:
1092 te.innerAttrs = 5;
1093 break;
1094 case 2:
1095 te.innerAttrs = 6;
1096 break;
1097 case 3:
1098 te.innerAttrs = 7;
1099 break;
1100 }
1101
1102 switch(curr_or) {
1103 case 0:
1104 te.nonCacheable = true;
1105 te.outerAttrs = 0;
1106 break;
1107 case 1:
1108 te.outerAttrs = 1;
1109 break;
1110 case 2:
1111 te.outerAttrs = 2;
1112 break;
1113 case 3:
1114 te.outerAttrs = 3;
1115 break;
1116 }
1117 }
1118 }
1119 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, \
1120 outerAttrs: %d\n",
1121 te.shareable, te.innerAttrs, te.outerAttrs);
1122 te.setAttributes(false);
1123}
1124
1125void
1126TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
1127 LongDescriptor &lDescriptor)
1128{
1129 assert(_haveLPAE);
1130
1131 uint8_t attr;
1132 uint8_t sh = lDescriptor.sh();
1133 // Different format and source of attributes if this is a stage 2
1134 // translation
1135 if (isStage2) {
1136 attr = lDescriptor.memAttr();
1137 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1138 uint8_t attr_1_0 = attr & 0x3;
1139
1140 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1141
1142 if (attr_3_2 == 0) {
1143 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1144 : TlbEntry::MemoryType::Device;
1145 te.outerAttrs = 0;
1146 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1147 te.nonCacheable = true;
1148 } else {
1149 te.mtype = TlbEntry::MemoryType::Normal;
1150 te.outerAttrs = attr_3_2 == 1 ? 0 :
1151 attr_3_2 == 2 ? 2 : 1;
1152 te.innerAttrs = attr_1_0 == 1 ? 0 :
1153 attr_1_0 == 2 ? 6 : 5;
1154 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1155 }
1156 } else {
1157 uint8_t attrIndx = lDescriptor.attrIndx();
1158
1159 // LPAE always uses remapping of memory attributes, irrespective of the
1160 // value of SCTLR.TRE
1161 int reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1162 reg = flattenMiscRegNsBanked(reg, currState->tc, !currState->isSecure);
1163 uint32_t mair = currState->tc->readMiscReg(reg);
1164 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1165 uint8_t attr_7_4 = bits(attr, 7, 4);
1166 uint8_t attr_3_0 = bits(attr, 3, 0);
1167 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1168
1169 // Note: the memory subsystem only cares about the 'cacheable' memory
1170 // attribute. The other attributes are only used to fill the PAR register
1171 // accordingly to provide the illusion of full support
1172 te.nonCacheable = false;
1173
1174 switch (attr_7_4) {
1175 case 0x0:
1176 // Strongly-ordered or Device memory
1177 if (attr_3_0 == 0x0)
1178 te.mtype = TlbEntry::MemoryType::StronglyOrdered;
1179 else if (attr_3_0 == 0x4)
1180 te.mtype = TlbEntry::MemoryType::Device;
1181 else
1182 panic("Unpredictable behavior\n");
1183 te.nonCacheable = true;
1184 te.outerAttrs = 0;
1185 break;
1186 case 0x4:
1187 // Normal memory, Outer Non-cacheable
1188 te.mtype = TlbEntry::MemoryType::Normal;
1189 te.outerAttrs = 0;
1190 if (attr_3_0 == 0x4)
1191 // Inner Non-cacheable
1192 te.nonCacheable = true;
1193 else if (attr_3_0 < 0x8)
1194 panic("Unpredictable behavior\n");
1195 break;
1196 case 0x8:
1197 case 0x9:
1198 case 0xa:
1199 case 0xb:
1200 case 0xc:
1201 case 0xd:
1202 case 0xe:
1203 case 0xf:
1204 if (attr_7_4 & 0x4) {
1205 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1206 } else {
1207 te.outerAttrs = 0x2;
1208 }
1209 // Normal memory, Outer Cacheable
1210 te.mtype = TlbEntry::MemoryType::Normal;
1211 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1212 panic("Unpredictable behavior\n");
1213 break;
1214 default:
1215 panic("Unpredictable behavior\n");
1216 break;
1217 }
1218
1219 switch (attr_3_0) {
1220 case 0x0:
1221 te.innerAttrs = 0x1;
1222 break;
1223 case 0x4:
1224 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1225 break;
1226 case 0x8:
1227 case 0x9:
1228 case 0xA:
1229 case 0xB:
1230 te.innerAttrs = 6;
1231 break;
1232 case 0xC:
1233 case 0xD:
1234 case 0xE:
1235 case 0xF:
1236 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1237 break;
1238 default:
1239 panic("Unpredictable behavior\n");
1240 break;
1241 }
1242 }
1243
1244 te.outerShareable = sh == 2;
1245 te.shareable = (sh & 0x2) ? true : false;
1246 te.setAttributes(true);
1247 te.attributes |= (uint64_t) attr << 56;
1248}
1249
1250void
1251TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx,
1252 uint8_t sh)
1253{
1254 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1255
1256 // Select MAIR
1257 uint64_t mair;
1258 switch (currState->el) {
1259 case EL0:
1260 case EL1:
1261 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1262 break;
1263 case EL2:
1264 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1265 break;
1266 case EL3:
1267 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1268 break;
1269 default:
1270 panic("Invalid exception level");
1271 break;
1272 }
1273
1274 // Select attributes
1275 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1276 uint8_t attr_lo = bits(attr, 3, 0);
1277 uint8_t attr_hi = bits(attr, 7, 4);
1278
1279 // Memory type
1280 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal;
1281
1282 // Cacheability
1283 te.nonCacheable = false;
1284 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory
1285 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable
1286 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable
1287 te.nonCacheable = true;
1288 }
1289
1290 te.shareable = sh == 2;
1291 te.outerShareable = (sh & 0x2) ? true : false;
1292 // Attributes formatted according to the 64-bit PAR
1293 te.attributes = ((uint64_t) attr << 56) |
1294 (1 << 11) | // LPAE bit
1295 (te.ns << 9) | // NS bit
1296 (sh << 7);
1297}
1298
1299void
1300TableWalker::doL1Descriptor()
1301{
1302 if (currState->fault != NoFault) {
1303 return;
1304 }
1305
1306 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1307 currState->vaddr_tainted, currState->l1Desc.data);
1308 TlbEntry te;
1309
1310 switch (currState->l1Desc.type()) {
1311 case L1Descriptor::Ignore:
1312 case L1Descriptor::Reserved:
1313 if (!currState->timing) {
1314 currState->tc = NULL;
1315 currState->req = NULL;
1316 }
1317 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1318 if (currState->isFetch)
1319 currState->fault =
1320 new PrefetchAbort(currState->vaddr_tainted,
1321 ArmFault::TranslationLL + L1,
1322 isStage2,
1323 ArmFault::VmsaTran);
1324 else
1325 currState->fault =
1326 new DataAbort(currState->vaddr_tainted,
1327 TlbEntry::DomainType::NoAccess,
1328 currState->isWrite,
1329 ArmFault::TranslationLL + L1, isStage2,
1330 ArmFault::VmsaTran);
1331 return;
1332 case L1Descriptor::Section:
1333 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1334 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is
1335 * enabled if set, do l1.Desc.setAp0() instead of generating
1336 * AccessFlag0
1337 */
1338
1339 currState->fault = new DataAbort(currState->vaddr_tainted,
1340 currState->l1Desc.domain(),
1341 currState->isWrite,
1342 ArmFault::AccessFlagLL + L1,
1343 isStage2,
1344 ArmFault::VmsaTran);
1345 }
1346 if (currState->l1Desc.supersection()) {
1347 panic("Haven't implemented supersections\n");
1348 }
1349 insertTableEntry(currState->l1Desc, false);
1350 return;
1351 case L1Descriptor::PageTable:
1352 {
1353 Addr l2desc_addr;
1354 l2desc_addr = currState->l1Desc.l2Addr() |
1355 (bits(currState->vaddr, 19, 12) << 2);
1356 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1357 l2desc_addr, currState->isSecure ? "s" : "ns");
1358
1359 // Trickbox address check
1360 currState->fault = tlb->walkTrickBoxCheck(
1361 l2desc_addr, currState->isSecure, currState->vaddr,
1362 sizeof(uint32_t), currState->isFetch, currState->isWrite,
1363 currState->l1Desc.domain(), L2);
1364
1365 if (currState->fault) {
1366 if (!currState->timing) {
1367 currState->tc = NULL;
1368 currState->req = NULL;
1369 }
1370 return;
1371 }
1372
1373 Request::Flags flag = 0;
1374 if (currState->isSecure)
1375 flag.set(Request::SECURE);
1376
1377 bool delayed;
1378 delayed = fetchDescriptor(l2desc_addr,
1379 (uint8_t*)&currState->l2Desc.data,
1380 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1381 &TableWalker::doL2Descriptor);
1382 if (delayed) {
1383 currState->delayed = true;
1384 }
1385
1386 return;
1387 }
1388 default:
1389 panic("A new type in a 2 bit field?\n");
1390 }
1391}
1392
1393void
1394TableWalker::doLongDescriptor()
1395{
1396 if (currState->fault != NoFault) {
1397 return;
1398 }
1399
1400 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1401 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1402 currState->longDesc.data,
1403 currState->aarch64 ? "AArch64" : "long-desc.");
1404
1405 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1406 (currState->longDesc.type() == LongDescriptor::Page)) {
1407 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1408 "xn: %d, ap: %d, af: %d, type: %d\n",
1409 currState->longDesc.lookupLevel,
1410 currState->longDesc.data,
1411 currState->longDesc.pxn(),
1412 currState->longDesc.xn(),
1413 currState->longDesc.ap(),
1414 currState->longDesc.af(),
1415 currState->longDesc.type());
1416 } else {
1417 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1418 currState->longDesc.lookupLevel,
1419 currState->longDesc.data,
1420 currState->longDesc.type());
1421 }
1422
1423 TlbEntry te;
1424
1425 switch (currState->longDesc.type()) {
1426 case LongDescriptor::Invalid:
1427 if (!currState->timing) {
1428 currState->tc = NULL;
1429 currState->req = NULL;
1430 }
1431
1432 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1433 currState->longDesc.lookupLevel,
1434 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1435 if (currState->isFetch)
1436 currState->fault = new PrefetchAbort(
1437 currState->vaddr_tainted,
1438 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1439 isStage2,
1440 ArmFault::LpaeTran);
1441 else
1442 currState->fault = new DataAbort(
1443 currState->vaddr_tainted,
1444 TlbEntry::DomainType::NoAccess,
1445 currState->isWrite,
1446 ArmFault::TranslationLL + currState->longDesc.lookupLevel,
1447 isStage2,
1448 ArmFault::LpaeTran);
1449 return;
1450 case LongDescriptor::Block:
1451 case LongDescriptor::Page:
1452 {
1453 bool fault = false;
1454 bool aff = false;
1455 // Check for address size fault
1456 if (checkAddrSizeFaultAArch64(
1457 mbits(currState->longDesc.data, MaxPhysAddrRange - 1,
1458 currState->longDesc.offsetBits()),
1459 currState->physAddrRange)) {
1460 fault = true;
1461 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1462 currState->longDesc.lookupLevel);
1463 // Check for access fault
1464 } else if (currState->longDesc.af() == 0) {
1465 fault = true;
1466 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1467 currState->longDesc.lookupLevel);
1468 aff = true;
1469 }
1470 if (fault) {
1471 if (currState->isFetch)
1472 currState->fault = new PrefetchAbort(
1473 currState->vaddr_tainted,
1474 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1475 currState->longDesc.lookupLevel,
1476 isStage2,
1477 ArmFault::LpaeTran);
1478 else
1479 currState->fault = new DataAbort(
1480 currState->vaddr_tainted,
1481 TlbEntry::DomainType::NoAccess, currState->isWrite,
1482 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) +
1483 currState->longDesc.lookupLevel,
1484 isStage2,
1485 ArmFault::LpaeTran);
1486 } else {
1487 insertTableEntry(currState->longDesc, true);
1488 }
1489 }
1490 return;
1491 case LongDescriptor::Table:
1492 {
1493 // Set hierarchical permission flags
1494 currState->secureLookup = currState->secureLookup &&
1495 currState->longDesc.secureTable();
1496 currState->rwTable = currState->rwTable &&
1497 currState->longDesc.rwTable();
1498 currState->userTable = currState->userTable &&
1499 currState->longDesc.userTable();
1500 currState->xnTable = currState->xnTable ||
1501 currState->longDesc.xnTable();
1502 currState->pxnTable = currState->pxnTable ||
1503 currState->longDesc.pxnTable();
1504
1505 // Set up next level lookup
1506 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1507 currState->vaddr);
1508
1509 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1510 currState->longDesc.lookupLevel,
1511 currState->longDesc.lookupLevel + 1,
1512 next_desc_addr,
1513 currState->secureLookup ? "s" : "ns");
1514
1515 // Check for address size fault
1516 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1517 next_desc_addr, currState->physAddrRange)) {
1518 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1519 currState->longDesc.lookupLevel);
1520 if (currState->isFetch)
1521 currState->fault = new PrefetchAbort(
1522 currState->vaddr_tainted,
1523 ArmFault::AddressSizeLL
1524 + currState->longDesc.lookupLevel,
1525 isStage2,
1526 ArmFault::LpaeTran);
1527 else
1528 currState->fault = new DataAbort(
1529 currState->vaddr_tainted,
1530 TlbEntry::DomainType::NoAccess, currState->isWrite,
1531 ArmFault::AddressSizeLL
1532 + currState->longDesc.lookupLevel,
1533 isStage2,
1534 ArmFault::LpaeTran);
1535 return;
1536 }
1537
1538 // Trickbox address check
1539 currState->fault = tlb->walkTrickBoxCheck(
1540 next_desc_addr, currState->vaddr,
1541 currState->vaddr, sizeof(uint64_t),
1542 currState->isFetch, currState->isWrite,
1543 TlbEntry::DomainType::Client,
1544 toLookupLevel(currState->longDesc.lookupLevel +1));
1545
1546 if (currState->fault) {
1547 if (!currState->timing) {
1548 currState->tc = NULL;
1549 currState->req = NULL;
1550 }
1551 return;
1552 }
1553
1554 Request::Flags flag = 0;
1555 if (currState->secureLookup)
1556 flag.set(Request::SECURE);
1557
1558 currState->longDesc.lookupLevel =
1559 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1560 Event *event = NULL;
1561 switch (currState->longDesc.lookupLevel) {
1562 case L1:
1563 assert(currState->aarch64);
1564 event = &doL1LongDescEvent;
1565 break;
1566 case L2:
1567 event = &doL2LongDescEvent;
1568 break;
1569 case L3:
1570 event = &doL3LongDescEvent;
1571 break;
1572 default:
1573 panic("Wrong lookup level in table walk\n");
1574 break;
1575 }
1576
1577 bool delayed;
1578 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1579 sizeof(uint64_t), flag, -1, event,
1580 &TableWalker::doLongDescriptor);
1581 if (delayed) {
1582 currState->delayed = true;
1583 }
1584 }
1585 return;
1586 default:
1587 panic("A new type in a 2 bit field?\n");
1588 }
1589}
1590
1591void
1592TableWalker::doL2Descriptor()
1593{
1594 if (currState->fault != NoFault) {
1595 return;
1596 }
1597
1598 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1599 currState->vaddr_tainted, currState->l2Desc.data);
1600 TlbEntry te;
1601
1602 if (currState->l2Desc.invalid()) {
1603 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1604 if (!currState->timing) {
1605 currState->tc = NULL;
1606 currState->req = NULL;
1607 }
1608 if (currState->isFetch)
1609 currState->fault =
1610 new PrefetchAbort(currState->vaddr_tainted,
1611 ArmFault::TranslationLL + L2,
1612 isStage2,
1613 ArmFault::VmsaTran);
1614 else
1615 currState->fault =
1616 new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(),
1617 currState->isWrite, ArmFault::TranslationLL + L2,
1618 isStage2,
1619 ArmFault::VmsaTran);
1620 return;
1621 }
1622
1623 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1624 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled
1625 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0
1626 */
1627 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1628 currState->sctlr.afe, currState->l2Desc.ap());
1629
1630 currState->fault =
1631 new DataAbort(currState->vaddr_tainted,
1632 TlbEntry::DomainType::NoAccess, currState->isWrite,
1633 ArmFault::AccessFlagLL + L2, isStage2,
1634 ArmFault::VmsaTran);
1635 }
1636
1637 insertTableEntry(currState->l2Desc, false);
1638}
1639
1640void
1641TableWalker::doL1DescriptorWrapper()
1642{
1643 currState = stateQueues[L1].front();
1644 currState->delayed = false;
1645 // if there's a stage2 translation object we don't need it any more
1646 if (currState->stage2Tran) {
1647 delete currState->stage2Tran;
1648 currState->stage2Tran = NULL;
1649 }
1650
1651
1652 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1653 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1654
1655 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1656 doL1Descriptor();
1657
1658 stateQueues[L1].pop_front();
1659 completeDrain();
1660 // Check if fault was generated
1661 if (currState->fault != NoFault) {
1662 currState->transState->finish(currState->fault, currState->req,
1663 currState->tc, currState->mode);
1664
1665 pending = false;
1666 nextWalk(currState->tc);
1667
1668 currState->req = NULL;
1669 currState->tc = NULL;
1670 currState->delayed = false;
1671 delete currState;
1672 }
1673 else if (!currState->delayed) {
1674 // delay is not set so there is no L2 to do
1675 // Don't finish the translation if a stage 2 look up is underway
1676 if (!currState->doingStage2) {
1677 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1678 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1679 currState->transState, currState->mode);
1680 }
1681
1682 pending = false;
1683 nextWalk(currState->tc);
1684
1685 currState->req = NULL;
1686 currState->tc = NULL;
1687 currState->delayed = false;
1688 delete currState;
1689 } else {
1690 // need to do L2 descriptor
1691 stateQueues[L2].push_back(currState);
1692 }
1693 currState = NULL;
1694}
1695
1696void
1697TableWalker::doL2DescriptorWrapper()
1698{
1699 currState = stateQueues[L2].front();
1700 assert(currState->delayed);
1701 // if there's a stage2 translation object we don't need it any more
1702 if (currState->stage2Tran) {
1703 delete currState->stage2Tran;
1704 currState->stage2Tran = NULL;
1705 }
1706
1707 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1708 currState->vaddr_tainted);
1709 doL2Descriptor();
1710
1711 // Check if fault was generated
1712 if (currState->fault != NoFault) {
1713 currState->transState->finish(currState->fault, currState->req,
1714 currState->tc, currState->mode);
1715 }
1716 else {
1717 // Don't finish the translation if a stage 2 look up is underway
1718 if (!currState->doingStage2) {
1719 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1720 currState->fault = tlb->translateTiming(currState->req,
1721 currState->tc, currState->transState, currState->mode);
1722 }
1723 }
1724
1725
1726 stateQueues[L2].pop_front();
1727 completeDrain();
1728 pending = false;
1729 nextWalk(currState->tc);
1730
1731 currState->req = NULL;
1732 currState->tc = NULL;
1733 currState->delayed = false;
1734
1735 delete currState;
1736 currState = NULL;
1737}
1738
1739void
1740TableWalker::doL0LongDescriptorWrapper()
1741{
1742 doLongDescriptorWrapper(L0);
1743}
1744
1745void
1746TableWalker::doL1LongDescriptorWrapper()
1747{
1748 doLongDescriptorWrapper(L1);
1749}
1750
1751void
1752TableWalker::doL2LongDescriptorWrapper()
1753{
1754 doLongDescriptorWrapper(L2);
1755}
1756
1757void
1758TableWalker::doL3LongDescriptorWrapper()
1759{
1760 doLongDescriptorWrapper(L3);
1761}
1762
1763void
1764TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
1765{
1766 currState = stateQueues[curr_lookup_level].front();
1767 assert(curr_lookup_level == currState->longDesc.lookupLevel);
1768 currState->delayed = false;
1769
1770 // if there's a stage2 translation object we don't need it any more
1771 if (currState->stage2Tran) {
1772 delete currState->stage2Tran;
1773 currState->stage2Tran = NULL;
1774 }
1775
1776 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1777 currState->vaddr_tainted);
1778 doLongDescriptor();
1779
1780 stateQueues[curr_lookup_level].pop_front();
1781
1782 if (currState->fault != NoFault) {
1783 // A fault was generated
1784 currState->transState->finish(currState->fault, currState->req,
1785 currState->tc, currState->mode);
1786
1787 pending = false;
1788 nextWalk(currState->tc);
1789
1790 currState->req = NULL;
1791 currState->tc = NULL;
1792 currState->delayed = false;
1793 delete currState;
1794 } else if (!currState->delayed) {
1795 // No additional lookups required
1796 // Don't finish the translation if a stage 2 look up is underway
1797 if (!currState->doingStage2) {
1798 DPRINTF(TLBVerbose, "calling translateTiming again\n");
1799 currState->fault = tlb->translateTiming(currState->req, currState->tc,
1800 currState->transState,
1801 currState->mode);
1802 }
1803
1804 pending = false;
1805 nextWalk(currState->tc);
1806
1807 currState->req = NULL;
1808 currState->tc = NULL;
1809 currState->delayed = false;
1810 delete currState;
1811 } else {
1812 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1813 panic("Max. number of lookups already reached in table walk\n");
1814 // Need to perform additional lookups
1815 stateQueues[currState->longDesc.lookupLevel].push_back(currState);
1816 }
1817 currState = NULL;
1818}
1819
1820
1821void
1822TableWalker::nextWalk(ThreadContext *tc)
1823{
1824 if (pendingQueue.size())
1825 schedule(doProcessEvent, clockEdge(Cycles(1)));
1826}
1827
1828bool
1829TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
1830 Request::Flags flags, int queueIndex, Event *event,
1831 void (TableWalker::*doDescriptor)())
1832{
1833 bool isTiming = currState->timing;
1834
1835 // do the requests for the page table descriptors have to go through the
1836 // second stage MMU
1837 if (currState->stage2Req) {
1838 Fault fault;
1839 flags = flags | TLB::MustBeOne;
1840
1841 if (isTiming) {
1842 Stage2MMU::Stage2Translation *tran = new
1843 Stage2MMU::Stage2Translation(*stage2Mmu, data, event,
1844 currState->vaddr);
1845 currState->stage2Tran = tran;
1846 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
1847 flags, masterId);
1848 fault = tran->fault;
1849 } else {
1850 fault = stage2Mmu->readDataUntimed(currState->tc,
1851 currState->vaddr, descAddr, data, numBytes, flags, masterId,
1852 currState->functional);
1853 }
1854
1855 if (fault != NoFault) {
1856 currState->fault = fault;
1857 }
1858 if (isTiming) {
1859 if (queueIndex >= 0) {
1860 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1861 stateQueues[queueIndex].size());
1862 stateQueues[queueIndex].push_back(currState);
1863 currState = NULL;
1864 }
1865 } else {
1866 (this->*doDescriptor)();
1867 }
1868 } else {
1869 if (isTiming) {
1870 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
1871 currState->tc->getCpuPtr()->clockPeriod(), flags);
1872 if (queueIndex >= 0) {
1873 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
1874 stateQueues[queueIndex].size());
1875 stateQueues[queueIndex].push_back(currState);
1876 currState = NULL;
1877 }
1878 } else if (!currState->functional) {
1879 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
1880 currState->tc->getCpuPtr()->clockPeriod(), flags);
1881 (this->*doDescriptor)();
1882 } else {
1883 RequestPtr req = new Request(descAddr, numBytes, flags, masterId);
1884 req->taskId(ContextSwitchTaskId::DMA);
1885 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
1886 pkt->dataStatic(data);
1887 port.sendFunctional(pkt);
1888 (this->*doDescriptor)();
1889 delete req;
1890 delete pkt;
1891 }
1892 }
1893 return (isTiming);
1894}
1895
1896void
1897TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
1898{
1899 TlbEntry te;
1900
1901 // Create and fill a new page table entry
1902 te.valid = true;
1903 te.longDescFormat = longDescriptor;
1904 te.isHyp = currState->isHyp;
1905 te.asid = currState->asid;
1906 te.vmid = currState->vmid;
1907 te.N = descriptor.offsetBits();
1908 te.vpn = currState->vaddr >> te.N;
1909 te.size = (1<<te.N) - 1;
1910 te.pfn = descriptor.pfn();
1911 te.domain = descriptor.domain();
1912 te.lookupLevel = descriptor.lookupLevel;
1913 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
1914 te.nstid = !currState->isSecure;
1915 te.xn = descriptor.xn();
1916 if (currState->aarch64)
1917 te.el = currState->el;
1918 else
1919 te.el = 1;
1920
1921 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
1922 // as global
1923 te.global = descriptor.global(currState) || isStage2;
1924 if (longDescriptor) {
1925 LongDescriptor lDescriptor =
1926 dynamic_cast<LongDescriptor &>(descriptor);
1927
1928 te.xn |= currState->xnTable;
1929 te.pxn = currState->pxnTable || lDescriptor.pxn();
1930 if (isStage2) {
1931 // this is actually the HAP field, but its stored in the same bit
1932 // possitions as the AP field in a stage 1 translation.
1933 te.hap = lDescriptor.ap();
1934 } else {
1935 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
1936 (currState->userTable && (descriptor.ap() & 0x1));
1937 }
1938 if (currState->aarch64)
1939 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(),
1940 currState->longDesc.sh());
1941 else
1942 memAttrsLPAE(currState->tc, te, lDescriptor);
1943 } else {
1944 te.ap = descriptor.ap();
1945 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
1946 descriptor.shareable());
1947 }
1948
1949 // Debug output
1950 DPRINTF(TLB, descriptor.dbgHeader().c_str());
1951 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
1952 te.N, te.pfn, te.size, te.global, te.valid);
1953 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
1954 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
1955 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
1956 te.nonCacheable, te.ns);
1957 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
1958 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
1959 descriptor.getRawData());
1960
1961 // Insert the entry into the TLB
1962 tlb->insert(currState->vaddr, te);
1963 if (!currState->timing) {
1964 currState->tc = NULL;
1965 currState->req = NULL;
1966 }
1967}
1968
1969ArmISA::TableWalker *
1970ArmTableWalkerParams::create()
1971{
1972 return new ArmISA::TableWalker(this);
1973}
1974
1975LookupLevel
1976TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
1977{
1978 switch (lookup_level_as_int) {
1979 case L1:
1980 return L1;
1981 case L2:
1982 return L2;
1983 case L3:
1984 return L3;
1985 default:
1986 panic("Invalid lookup level conversion");
1987 }
1988}