table_walker.cc revision 10109
1/* 2 * Copyright (c) 2010, 2012-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Ali Saidi 38 * Giacomo Gabrielli 39 */ 40 41#include "arch/arm/faults.hh" 42#include "arch/arm/stage2_mmu.hh" 43#include "arch/arm/system.hh" 44#include "arch/arm/table_walker.hh" 45#include "arch/arm/tlb.hh" 46#include "cpu/base.hh" 47#include "cpu/thread_context.hh" 48#include "debug/Checkpoint.hh" 49#include "debug/Drain.hh" 50#include "debug/TLB.hh" 51#include "debug/TLBVerbose.hh" 52#include "sim/system.hh" 53 54using namespace ArmISA; 55 56TableWalker::TableWalker(const Params *p) 57 : MemObject(p), port(this, p->sys), drainManager(NULL), 58 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL), 59 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())), 60 numSquashable(p->num_squash_per_cycle), 61 doL1DescEvent(this), doL2DescEvent(this), 62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this), 63 doL3LongDescEvent(this), 64 doProcessEvent(this) 65{ 66 sctlr = 0; 67 68 // Cache system-level properties 69 if (FullSystem) { 70 armSys = dynamic_cast<ArmSystem *>(p->sys); 71 assert(armSys); 72 haveSecurity = armSys->haveSecurity(); 73 _haveLPAE = armSys->haveLPAE(); 74 _haveVirtualization = armSys->haveVirtualization(); 75 physAddrRange = armSys->physAddrRange(); 76 _haveLargeAsid64 = armSys->haveLargeAsid64(); 77 } else { 78 armSys = NULL; 79 haveSecurity = _haveLPAE = _haveVirtualization = false; 80 _haveLargeAsid64 = false; 81 physAddrRange = 32; 82 } 83 84} 85 86TableWalker::~TableWalker() 87{ 88 ; 89} 90 91TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc) 92{ 93} 94 95void 96TableWalker::completeDrain() 97{ 98 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() && 99 pendingQueue.empty()) { 100 setDrainState(Drainable::Drained); 101 DPRINTF(Drain, "TableWalker done draining, processing drain event\n"); 102 drainManager->signalDrainDone(); 103 drainManager = NULL; 104 } 105} 106 107unsigned int 108TableWalker::drain(DrainManager *dm) 109{ 110 unsigned int count = port.drain(dm); 111 112 bool state_queues_not_empty = false; 113 114 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) { 115 if (!stateQueues[i].empty()) { 116 state_queues_not_empty = true; 117 break; 118 } 119 } 120 121 if (state_queues_not_empty || pendingQueue.size()) { 122 drainManager = dm; 123 setDrainState(Drainable::Draining); 124 DPRINTF(Drain, "TableWalker not drained\n"); 125 126 // return port drain count plus the table walker itself needs to drain 127 return count + 1; 128 } else { 129 setDrainState(Drainable::Drained); 130 DPRINTF(Drain, "TableWalker free, no need to drain\n"); 131 132 // table walker is drained, but its ports may still need to be drained 133 return count; 134 } 135} 136 137void 138TableWalker::drainResume() 139{ 140 Drainable::drainResume(); 141 if (params()->sys->isTimingMode() && currState) { 142 delete currState; 143 currState = NULL; 144 } 145} 146 147BaseMasterPort& 148TableWalker::getMasterPort(const std::string &if_name, PortID idx) 149{ 150 if (if_name == "port") { 151 return port; 152 } 153 return MemObject::getMasterPort(if_name, idx); 154} 155 156Fault 157TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid, 158 uint8_t _vmid, bool _isHyp, TLB::Mode _mode, 159 TLB::Translation *_trans, bool _timing, bool _functional, 160 bool secure, TLB::ArmTranslationType tranType) 161{ 162 assert(!(_functional && _timing)); 163 WalkerState *savedCurrState = NULL; 164 165 if (!currState && !_functional) { 166 // For atomic mode, a new WalkerState instance should be only created 167 // once per TLB. For timing mode, a new instance is generated for every 168 // TLB miss. 169 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n"); 170 171 currState = new WalkerState(); 172 currState->tableWalker = this; 173 } else if (_functional) { 174 // If we are mixing functional mode with timing (or even 175 // atomic), we need to to be careful and clean up after 176 // ourselves to not risk getting into an inconsistent state. 177 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n"); 178 savedCurrState = currState; 179 currState = new WalkerState(); 180 currState->tableWalker = this; 181 } else if (_timing) { 182 // This is a translation that was completed and then faulted again 183 // because some underlying parameters that affect the translation 184 // changed out from under us (e.g. asid). It will either be a 185 // misprediction, in which case nothing will happen or we'll use 186 // this fault to re-execute the faulting instruction which should clean 187 // up everything. 188 if (currState->vaddr_tainted == _req->getVaddr()) { 189 return new ReExec; 190 } 191 } 192 193 currState->tc = _tc; 194 currState->aarch64 = opModeIs64(currOpMode(_tc)); 195 currState->el = currEL(_tc); 196 currState->transState = _trans; 197 currState->req = _req; 198 currState->fault = NoFault; 199 currState->asid = _asid; 200 currState->vmid = _vmid; 201 currState->isHyp = _isHyp; 202 currState->timing = _timing; 203 currState->functional = _functional; 204 currState->mode = _mode; 205 currState->tranType = tranType; 206 currState->isSecure = secure; 207 currState->physAddrRange = physAddrRange; 208 209 /** @todo These should be cached or grabbed from cached copies in 210 the TLB, all these miscreg reads are expensive */ 211 currState->vaddr_tainted = currState->req->getVaddr(); 212 if (currState->aarch64) 213 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted, 214 currState->tc, currState->el); 215 else 216 currState->vaddr = currState->vaddr_tainted; 217 218 if (currState->aarch64) { 219 switch (currState->el) { 220 case EL0: 221 case EL1: 222 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1); 223 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL1); 224 break; 225 // @todo: uncomment this to enable Virtualization 226 // case EL2: 227 // assert(haveVirtualization); 228 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2); 229 // currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL2); 230 // break; 231 case EL3: 232 assert(haveSecurity); 233 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3); 234 currState->ttbcr = currState->tc->readMiscReg(MISCREG_TCR_EL3); 235 break; 236 default: 237 panic("Invalid exception level"); 238 break; 239 } 240 } else { 241 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 242 MISCREG_SCTLR, currState->tc, !currState->isSecure)); 243 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 244 MISCREG_TTBCR, currState->tc, !currState->isSecure)); 245 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR); 246 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR); 247 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR); 248 } 249 sctlr = currState->sctlr; 250 251 currState->isFetch = (currState->mode == TLB::Execute); 252 currState->isWrite = (currState->mode == TLB::Write); 253 254 // We only do a second stage of translation if we're not secure, or in 255 // hyp mode, the second stage MMU is enabled, and this table walker 256 // instance is the first stage. 257 currState->doingStage2 = false; 258 // @todo: for now disable this in AArch64 (HCR is not set) 259 currState->stage2Req = !currState->aarch64 && currState->hcr.vm && 260 !isStage2 && !currState->isSecure && !currState->isHyp; 261 262 bool long_desc_format = currState->aarch64 || 263 (_haveLPAE && currState->ttbcr.eae) || 264 _isHyp || isStage2; 265 266 if (long_desc_format) { 267 // Helper variables used for hierarchical permissions 268 currState->secureLookup = currState->isSecure; 269 currState->rwTable = true; 270 currState->userTable = true; 271 currState->xnTable = false; 272 currState->pxnTable = false; 273 } 274 275 if (!currState->timing) { 276 Fault fault = NoFault; 277 if (currState->aarch64) 278 fault = processWalkAArch64(); 279 else if (long_desc_format) 280 fault = processWalkLPAE(); 281 else 282 fault = processWalk(); 283 284 // If this was a functional non-timing access restore state to 285 // how we found it. 286 if (currState->functional) { 287 delete currState; 288 currState = savedCurrState; 289 } 290 return fault; 291 } 292 293 if (pending || pendingQueue.size()) { 294 pendingQueue.push_back(currState); 295 currState = NULL; 296 } else { 297 pending = true; 298 if (currState->aarch64) 299 return processWalkAArch64(); 300 else if (long_desc_format) 301 return processWalkLPAE(); 302 else 303 return processWalk(); 304 } 305 306 return NoFault; 307} 308 309void 310TableWalker::processWalkWrapper() 311{ 312 assert(!currState); 313 assert(pendingQueue.size()); 314 currState = pendingQueue.front(); 315 316 ExceptionLevel target_el = EL0; 317 if (currState->aarch64) 318 target_el = currEL(currState->tc); 319 else 320 target_el = EL1; 321 322 // Check if a previous walk filled this request already 323 // @TODO Should this always be the TLB or should we look in the stage2 TLB? 324 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid, 325 currState->vmid, currState->isHyp, currState->isSecure, true, false, 326 target_el); 327 328 // Check if we still need to have a walk for this request. If the requesting 329 // instruction has been squashed, or a previous walk has filled the TLB with 330 // a match, we just want to get rid of the walk. The latter could happen 331 // when there are multiple outstanding misses to a single page and a 332 // previous request has been successfully translated. 333 if (!currState->transState->squashed() && !te) { 334 // We've got a valid request, lets process it 335 pending = true; 336 pendingQueue.pop_front(); 337 if (currState->aarch64) 338 processWalkAArch64(); 339 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2) 340 processWalkLPAE(); 341 else 342 processWalk(); 343 return; 344 } 345 346 347 // If the instruction that we were translating for has been 348 // squashed we shouldn't bother. 349 unsigned num_squashed = 0; 350 ThreadContext *tc = currState->tc; 351 while ((num_squashed < numSquashable) && currState && 352 (currState->transState->squashed() || te)) { 353 pendingQueue.pop_front(); 354 num_squashed++; 355 356 DPRINTF(TLB, "Squashing table walk for address %#x\n", 357 currState->vaddr_tainted); 358 359 if (currState->transState->squashed()) { 360 // finish the translation which will delete the translation object 361 currState->transState->finish(new UnimpFault("Squashed Inst"), 362 currState->req, currState->tc, currState->mode); 363 } else { 364 // translate the request now that we know it will work 365 tlb->translateTiming(currState->req, currState->tc, 366 currState->transState, currState->mode); 367 368 } 369 370 // delete the current request 371 delete currState; 372 373 // peak at the next one 374 if (pendingQueue.size()) { 375 currState = pendingQueue.front(); 376 te = tlb->lookup(currState->vaddr, currState->asid, 377 currState->vmid, currState->isHyp, currState->isSecure, true, 378 false, target_el); 379 } else { 380 // Terminate the loop, nothing more to do 381 currState = NULL; 382 } 383 } 384 385 // if we've still got pending translations schedule more work 386 nextWalk(tc); 387 currState = NULL; 388 completeDrain(); 389} 390 391Fault 392TableWalker::processWalk() 393{ 394 Addr ttbr = 0; 395 396 // If translation isn't enabled, we shouldn't be here 397 assert(currState->sctlr.m || isStage2); 398 399 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n", 400 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31, 401 32 - currState->ttbcr.n)); 402 403 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31, 404 32 - currState->ttbcr.n)) { 405 DPRINTF(TLB, " - Selecting TTBR0\n"); 406 // Check if table walk is allowed when Security Extensions are enabled 407 if (haveSecurity && currState->ttbcr.pd0) { 408 if (currState->isFetch) 409 return new PrefetchAbort(currState->vaddr_tainted, 410 ArmFault::TranslationLL + L1, 411 isStage2, 412 ArmFault::VmsaTran); 413 else 414 return new DataAbort(currState->vaddr_tainted, 415 TlbEntry::DomainType::NoAccess, currState->isWrite, 416 ArmFault::TranslationLL + L1, isStage2, 417 ArmFault::VmsaTran); 418 } 419 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 420 MISCREG_TTBR0, currState->tc, !currState->isSecure)); 421 } else { 422 DPRINTF(TLB, " - Selecting TTBR1\n"); 423 // Check if table walk is allowed when Security Extensions are enabled 424 if (haveSecurity && currState->ttbcr.pd1) { 425 if (currState->isFetch) 426 return new PrefetchAbort(currState->vaddr_tainted, 427 ArmFault::TranslationLL + L1, 428 isStage2, 429 ArmFault::VmsaTran); 430 else 431 return new DataAbort(currState->vaddr_tainted, 432 TlbEntry::DomainType::NoAccess, currState->isWrite, 433 ArmFault::TranslationLL + L1, isStage2, 434 ArmFault::VmsaTran); 435 } 436 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 437 MISCREG_TTBR1, currState->tc, !currState->isSecure)); 438 currState->ttbcr.n = 0; 439 } 440 441 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) | 442 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2); 443 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr, 444 currState->isSecure ? "s" : "ns"); 445 446 // Trickbox address check 447 Fault f; 448 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure, 449 currState->vaddr, sizeof(uint32_t), currState->isFetch, 450 currState->isWrite, TlbEntry::DomainType::NoAccess, L1); 451 if (f) { 452 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 453 if (currState->timing) { 454 pending = false; 455 nextWalk(currState->tc); 456 currState = NULL; 457 } else { 458 currState->tc = NULL; 459 currState->req = NULL; 460 } 461 return f; 462 } 463 464 Request::Flags flag = 0; 465 if (currState->sctlr.c == 0) { 466 flag = Request::UNCACHEABLE; 467 } 468 469 bool delayed; 470 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data, 471 sizeof(uint32_t), flag, L1, &doL1DescEvent, 472 &TableWalker::doL1Descriptor); 473 if (!delayed) { 474 f = currState->fault; 475 } 476 477 return f; 478} 479 480Fault 481TableWalker::processWalkLPAE() 482{ 483 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr; 484 int tsz, n; 485 LookupLevel start_lookup_level = L1; 486 487 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n", 488 currState->vaddr_tainted, currState->ttbcr); 489 490 Request::Flags flag = 0; 491 if (currState->isSecure) 492 flag.set(Request::SECURE); 493 494 // work out which base address register to use, if in hyp mode we always 495 // use HTTBR 496 if (isStage2) { 497 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n"); 498 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR); 499 tsz = sext<4>(currState->vtcr.t0sz); 500 start_lookup_level = currState->vtcr.sl0 ? L1 : L2; 501 } else if (currState->isHyp) { 502 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n"); 503 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR); 504 tsz = currState->htcr.t0sz; 505 } else { 506 assert(_haveLPAE && currState->ttbcr.eae); 507 508 // Determine boundaries of TTBR0/1 regions 509 if (currState->ttbcr.t0sz) 510 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1; 511 else if (currState->ttbcr.t1sz) 512 ttbr0_max = (1ULL << 32) - 513 (1ULL << (32 - currState->ttbcr.t1sz)) - 1; 514 else 515 ttbr0_max = (1ULL << 32) - 1; 516 if (currState->ttbcr.t1sz) 517 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz)); 518 else 519 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz)); 520 521 // The following code snippet selects the appropriate translation table base 522 // address (TTBR0 or TTBR1) and the appropriate starting lookup level 523 // depending on the address range supported by the translation table (ARM 524 // ARM issue C B3.6.4) 525 if (currState->vaddr <= ttbr0_max) { 526 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n"); 527 // Check if table walk is allowed 528 if (currState->ttbcr.epd0) { 529 if (currState->isFetch) 530 return new PrefetchAbort(currState->vaddr_tainted, 531 ArmFault::TranslationLL + L1, 532 isStage2, 533 ArmFault::LpaeTran); 534 else 535 return new DataAbort(currState->vaddr_tainted, 536 TlbEntry::DomainType::NoAccess, 537 currState->isWrite, 538 ArmFault::TranslationLL + L1, 539 isStage2, 540 ArmFault::LpaeTran); 541 } 542 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 543 MISCREG_TTBR0, currState->tc, !currState->isSecure)); 544 tsz = currState->ttbcr.t0sz; 545 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB 546 start_lookup_level = L2; 547 } else if (currState->vaddr >= ttbr1_min) { 548 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n"); 549 // Check if table walk is allowed 550 if (currState->ttbcr.epd1) { 551 if (currState->isFetch) 552 return new PrefetchAbort(currState->vaddr_tainted, 553 ArmFault::TranslationLL + L1, 554 isStage2, 555 ArmFault::LpaeTran); 556 else 557 return new DataAbort(currState->vaddr_tainted, 558 TlbEntry::DomainType::NoAccess, 559 currState->isWrite, 560 ArmFault::TranslationLL + L1, 561 isStage2, 562 ArmFault::LpaeTran); 563 } 564 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 565 MISCREG_TTBR1, currState->tc, !currState->isSecure)); 566 tsz = currState->ttbcr.t1sz; 567 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB 568 start_lookup_level = L2; 569 } else { 570 // Out of boundaries -> translation fault 571 if (currState->isFetch) 572 return new PrefetchAbort(currState->vaddr_tainted, 573 ArmFault::TranslationLL + L1, 574 isStage2, 575 ArmFault::LpaeTran); 576 else 577 return new DataAbort(currState->vaddr_tainted, 578 TlbEntry::DomainType::NoAccess, 579 currState->isWrite, ArmFault::TranslationLL + L1, 580 isStage2, ArmFault::LpaeTran); 581 } 582 583 } 584 585 // Perform lookup (ARM ARM issue C B3.6.6) 586 if (start_lookup_level == L1) { 587 n = 5 - tsz; 588 desc_addr = mbits(ttbr, 39, n) | 589 (bits(currState->vaddr, n + 26, 30) << 3); 590 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n", 591 desc_addr, currState->isSecure ? "s" : "ns"); 592 } else { 593 // Skip first-level lookup 594 n = (tsz >= 2 ? 14 - tsz : 12); 595 desc_addr = mbits(ttbr, 39, n) | 596 (bits(currState->vaddr, n + 17, 21) << 3); 597 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n", 598 desc_addr, currState->isSecure ? "s" : "ns"); 599 } 600 601 // Trickbox address check 602 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure, 603 currState->vaddr, sizeof(uint64_t), currState->isFetch, 604 currState->isWrite, TlbEntry::DomainType::NoAccess, 605 start_lookup_level); 606 if (f) { 607 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 608 if (currState->timing) { 609 pending = false; 610 nextWalk(currState->tc); 611 currState = NULL; 612 } else { 613 currState->tc = NULL; 614 currState->req = NULL; 615 } 616 return f; 617 } 618 619 if (currState->sctlr.c == 0) { 620 flag = Request::UNCACHEABLE; 621 } 622 623 if (currState->isSecure) 624 flag.set(Request::SECURE); 625 626 currState->longDesc.lookupLevel = start_lookup_level; 627 currState->longDesc.aarch64 = false; 628 currState->longDesc.largeGrain = false; 629 currState->longDesc.grainSize = 12; 630 631 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent 632 : (Event *) &doL2LongDescEvent; 633 634 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data, 635 sizeof(uint64_t), flag, start_lookup_level, 636 event, &TableWalker::doLongDescriptor); 637 if (!delayed) { 638 f = currState->fault; 639 } 640 641 return f; 642} 643 644unsigned 645TableWalker::adjustTableSizeAArch64(unsigned tsz) 646{ 647 if (tsz < 25) 648 return 25; 649 if (tsz > 48) 650 return 48; 651 return tsz; 652} 653 654bool 655TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange) 656{ 657 return (currPhysAddrRange != MaxPhysAddrRange && 658 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange)); 659} 660 661Fault 662TableWalker::processWalkAArch64() 663{ 664 assert(currState->aarch64); 665 666 DPRINTF(TLB, "Beginning table walk for address %#llx, TTBCR: %#llx\n", 667 currState->vaddr_tainted, currState->ttbcr); 668 669 // Determine TTBR, table size, granule size and phys. address range 670 Addr ttbr = 0; 671 int tsz = 0, ps = 0; 672 bool large_grain = false; 673 bool fault = false; 674 switch (currState->el) { 675 case EL0: 676 case EL1: 677 switch (bits(currState->vaddr, 63,48)) { 678 case 0: 679 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); 680 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1); 681 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz); 682 large_grain = currState->ttbcr.tg0; 683 if (bits(currState->vaddr, 63, tsz) != 0x0 || 684 currState->ttbcr.epd0) 685 fault = true; 686 break; 687 case 0xffff: 688 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n"); 689 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1); 690 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t1sz); 691 large_grain = currState->ttbcr.tg1; 692 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) || 693 currState->ttbcr.epd1) 694 fault = true; 695 break; 696 default: 697 // top two bytes must be all 0s or all 1s, else invalid addr 698 fault = true; 699 } 700 ps = currState->ttbcr.ips; 701 break; 702 case EL2: 703 case EL3: 704 switch(bits(currState->vaddr, 63,48)) { 705 case 0: 706 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); 707 if (currState->el == EL2) 708 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2); 709 else 710 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3); 711 tsz = adjustTableSizeAArch64(64 - currState->ttbcr.t0sz); 712 large_grain = currState->ttbcr.tg0; 713 break; 714 default: 715 // invalid addr if top two bytes are not all 0s 716 fault = true; 717 } 718 ps = currState->ttbcr.ps; 719 break; 720 } 721 722 if (fault) { 723 Fault f; 724 if (currState->isFetch) 725 f = new PrefetchAbort(currState->vaddr_tainted, 726 ArmFault::TranslationLL + L0, isStage2, 727 ArmFault::LpaeTran); 728 else 729 f = new DataAbort(currState->vaddr_tainted, 730 TlbEntry::DomainType::NoAccess, 731 currState->isWrite, 732 ArmFault::TranslationLL + L0, 733 isStage2, ArmFault::LpaeTran); 734 735 if (currState->timing) { 736 pending = false; 737 nextWalk(currState->tc); 738 currState = NULL; 739 } else { 740 currState->tc = NULL; 741 currState->req = NULL; 742 } 743 return f; 744 745 } 746 747 // Determine starting lookup level 748 LookupLevel start_lookup_level; 749 int grain_size, stride; 750 if (large_grain) { // 64 KB granule 751 grain_size = 16; 752 stride = grain_size - 3; 753 if (tsz > grain_size + 2 * stride) 754 start_lookup_level = L1; 755 else if (tsz > grain_size + stride) 756 start_lookup_level = L2; 757 else 758 start_lookup_level = L3; 759 } else { // 4 KB granule 760 grain_size = 12; 761 stride = grain_size - 3; 762 if (tsz > grain_size + 3 * stride) 763 start_lookup_level = L0; 764 else if (tsz > grain_size + 2 * stride) 765 start_lookup_level = L1; 766 else 767 start_lookup_level = L2; 768 } 769 770 // Determine table base address 771 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - 772 grain_size; 773 Addr base_addr = mbits(ttbr, 47, base_addr_lo); 774 775 // Determine physical address size and raise an Address Size Fault if 776 // necessary 777 int pa_range = decodePhysAddrRange64(ps); 778 // Clamp to lower limit 779 if (pa_range > physAddrRange) 780 currState->physAddrRange = physAddrRange; 781 else 782 currState->physAddrRange = pa_range; 783 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) { 784 DPRINTF(TLB, "Address size fault before any lookup\n"); 785 Fault f; 786 if (currState->isFetch) 787 f = new PrefetchAbort(currState->vaddr_tainted, 788 ArmFault::AddressSizeLL + start_lookup_level, 789 isStage2, 790 ArmFault::LpaeTran); 791 else 792 f = new DataAbort(currState->vaddr_tainted, 793 TlbEntry::DomainType::NoAccess, 794 currState->isWrite, 795 ArmFault::AddressSizeLL + start_lookup_level, 796 isStage2, 797 ArmFault::LpaeTran); 798 799 800 if (currState->timing) { 801 pending = false; 802 nextWalk(currState->tc); 803 currState = NULL; 804 } else { 805 currState->tc = NULL; 806 currState->req = NULL; 807 } 808 return f; 809 810 } 811 812 // Determine descriptor address 813 Addr desc_addr = base_addr | 814 (bits(currState->vaddr, tsz - 1, 815 stride * (3 - start_lookup_level) + grain_size) << 3); 816 817 // Trickbox address check 818 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure, 819 currState->vaddr, sizeof(uint64_t), currState->isFetch, 820 currState->isWrite, TlbEntry::DomainType::NoAccess, 821 start_lookup_level); 822 if (f) { 823 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 824 if (currState->timing) { 825 pending = false; 826 nextWalk(currState->tc); 827 currState = NULL; 828 } else { 829 currState->tc = NULL; 830 currState->req = NULL; 831 } 832 return f; 833 } 834 835 Request::Flags flag = 0; 836 if (currState->sctlr.c == 0) { 837 flag = Request::UNCACHEABLE; 838 } 839 840 currState->longDesc.lookupLevel = start_lookup_level; 841 currState->longDesc.aarch64 = true; 842 currState->longDesc.largeGrain = large_grain; 843 currState->longDesc.grainSize = grain_size; 844 845 if (currState->timing) { 846 Event *event; 847 switch (start_lookup_level) { 848 case L0: 849 event = (Event *) &doL0LongDescEvent; 850 break; 851 case L1: 852 event = (Event *) &doL1LongDescEvent; 853 break; 854 case L2: 855 event = (Event *) &doL2LongDescEvent; 856 break; 857 case L3: 858 event = (Event *) &doL3LongDescEvent; 859 break; 860 default: 861 panic("Invalid table lookup level"); 862 break; 863 } 864 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event, 865 (uint8_t*) &currState->longDesc.data, 866 currState->tc->getCpuPtr()->clockPeriod(), flag); 867 DPRINTF(TLBVerbose, 868 "Adding to walker fifo: queue size before adding: %d\n", 869 stateQueues[start_lookup_level].size()); 870 stateQueues[start_lookup_level].push_back(currState); 871 currState = NULL; 872 } else if (!currState->functional) { 873 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), 874 NULL, (uint8_t*) &currState->longDesc.data, 875 currState->tc->getCpuPtr()->clockPeriod(), flag); 876 doLongDescriptor(); 877 f = currState->fault; 878 } else { 879 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag, 880 masterId); 881 PacketPtr pkt = new Packet(req, MemCmd::ReadReq); 882 pkt->dataStatic((uint8_t*) &currState->longDesc.data); 883 port.sendFunctional(pkt); 884 doLongDescriptor(); 885 delete req; 886 delete pkt; 887 f = currState->fault; 888 } 889 890 return f; 891} 892 893void 894TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, 895 uint8_t texcb, bool s) 896{ 897 // Note: tc and sctlr local variables are hiding tc and sctrl class 898 // variables 899 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s); 900 te.shareable = false; // default value 901 te.nonCacheable = false; 902 te.outerShareable = false; 903 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) { 904 switch(texcb) { 905 case 0: // Stongly-ordered 906 te.nonCacheable = true; 907 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 908 te.shareable = true; 909 te.innerAttrs = 1; 910 te.outerAttrs = 0; 911 break; 912 case 1: // Shareable Device 913 te.nonCacheable = true; 914 te.mtype = TlbEntry::MemoryType::Device; 915 te.shareable = true; 916 te.innerAttrs = 3; 917 te.outerAttrs = 0; 918 break; 919 case 2: // Outer and Inner Write-Through, no Write-Allocate 920 te.mtype = TlbEntry::MemoryType::Normal; 921 te.shareable = s; 922 te.innerAttrs = 6; 923 te.outerAttrs = bits(texcb, 1, 0); 924 break; 925 case 3: // Outer and Inner Write-Back, no Write-Allocate 926 te.mtype = TlbEntry::MemoryType::Normal; 927 te.shareable = s; 928 te.innerAttrs = 7; 929 te.outerAttrs = bits(texcb, 1, 0); 930 break; 931 case 4: // Outer and Inner Non-cacheable 932 te.nonCacheable = true; 933 te.mtype = TlbEntry::MemoryType::Normal; 934 te.shareable = s; 935 te.innerAttrs = 0; 936 te.outerAttrs = bits(texcb, 1, 0); 937 break; 938 case 5: // Reserved 939 panic("Reserved texcb value!\n"); 940 break; 941 case 6: // Implementation Defined 942 panic("Implementation-defined texcb value!\n"); 943 break; 944 case 7: // Outer and Inner Write-Back, Write-Allocate 945 te.mtype = TlbEntry::MemoryType::Normal; 946 te.shareable = s; 947 te.innerAttrs = 5; 948 te.outerAttrs = 1; 949 break; 950 case 8: // Non-shareable Device 951 te.nonCacheable = true; 952 te.mtype = TlbEntry::MemoryType::Device; 953 te.shareable = false; 954 te.innerAttrs = 3; 955 te.outerAttrs = 0; 956 break; 957 case 9 ... 15: // Reserved 958 panic("Reserved texcb value!\n"); 959 break; 960 case 16 ... 31: // Cacheable Memory 961 te.mtype = TlbEntry::MemoryType::Normal; 962 te.shareable = s; 963 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0) 964 te.nonCacheable = true; 965 te.innerAttrs = bits(texcb, 1, 0); 966 te.outerAttrs = bits(texcb, 3, 2); 967 break; 968 default: 969 panic("More than 32 states for 5 bits?\n"); 970 } 971 } else { 972 assert(tc); 973 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, 974 currState->tc, !currState->isSecure)); 975 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, 976 currState->tc, !currState->isSecure)); 977 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr); 978 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0; 979 switch(bits(texcb, 2,0)) { 980 case 0: 981 curr_tr = prrr.tr0; 982 curr_ir = nmrr.ir0; 983 curr_or = nmrr.or0; 984 te.outerShareable = (prrr.nos0 == 0); 985 break; 986 case 1: 987 curr_tr = prrr.tr1; 988 curr_ir = nmrr.ir1; 989 curr_or = nmrr.or1; 990 te.outerShareable = (prrr.nos1 == 0); 991 break; 992 case 2: 993 curr_tr = prrr.tr2; 994 curr_ir = nmrr.ir2; 995 curr_or = nmrr.or2; 996 te.outerShareable = (prrr.nos2 == 0); 997 break; 998 case 3: 999 curr_tr = prrr.tr3; 1000 curr_ir = nmrr.ir3; 1001 curr_or = nmrr.or3; 1002 te.outerShareable = (prrr.nos3 == 0); 1003 break; 1004 case 4: 1005 curr_tr = prrr.tr4; 1006 curr_ir = nmrr.ir4; 1007 curr_or = nmrr.or4; 1008 te.outerShareable = (prrr.nos4 == 0); 1009 break; 1010 case 5: 1011 curr_tr = prrr.tr5; 1012 curr_ir = nmrr.ir5; 1013 curr_or = nmrr.or5; 1014 te.outerShareable = (prrr.nos5 == 0); 1015 break; 1016 case 6: 1017 panic("Imp defined type\n"); 1018 case 7: 1019 curr_tr = prrr.tr7; 1020 curr_ir = nmrr.ir7; 1021 curr_or = nmrr.or7; 1022 te.outerShareable = (prrr.nos7 == 0); 1023 break; 1024 } 1025 1026 switch(curr_tr) { 1027 case 0: 1028 DPRINTF(TLBVerbose, "StronglyOrdered\n"); 1029 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 1030 te.nonCacheable = true; 1031 te.innerAttrs = 1; 1032 te.outerAttrs = 0; 1033 te.shareable = true; 1034 break; 1035 case 1: 1036 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n", 1037 prrr.ds1, prrr.ds0, s); 1038 te.mtype = TlbEntry::MemoryType::Device; 1039 te.nonCacheable = true; 1040 te.innerAttrs = 3; 1041 te.outerAttrs = 0; 1042 if (prrr.ds1 && s) 1043 te.shareable = true; 1044 if (prrr.ds0 && !s) 1045 te.shareable = true; 1046 break; 1047 case 2: 1048 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n", 1049 prrr.ns1, prrr.ns0, s); 1050 te.mtype = TlbEntry::MemoryType::Normal; 1051 if (prrr.ns1 && s) 1052 te.shareable = true; 1053 if (prrr.ns0 && !s) 1054 te.shareable = true; 1055 break; 1056 case 3: 1057 panic("Reserved type"); 1058 } 1059 1060 if (te.mtype == TlbEntry::MemoryType::Normal){ 1061 switch(curr_ir) { 1062 case 0: 1063 te.nonCacheable = true; 1064 te.innerAttrs = 0; 1065 break; 1066 case 1: 1067 te.innerAttrs = 5; 1068 break; 1069 case 2: 1070 te.innerAttrs = 6; 1071 break; 1072 case 3: 1073 te.innerAttrs = 7; 1074 break; 1075 } 1076 1077 switch(curr_or) { 1078 case 0: 1079 te.nonCacheable = true; 1080 te.outerAttrs = 0; 1081 break; 1082 case 1: 1083 te.outerAttrs = 1; 1084 break; 1085 case 2: 1086 te.outerAttrs = 2; 1087 break; 1088 case 3: 1089 te.outerAttrs = 3; 1090 break; 1091 } 1092 } 1093 } 1094 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, \ 1095 outerAttrs: %d\n", 1096 te.shareable, te.innerAttrs, te.outerAttrs); 1097 te.setAttributes(false); 1098} 1099 1100void 1101TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te, 1102 LongDescriptor &lDescriptor) 1103{ 1104 assert(_haveLPAE); 1105 1106 uint8_t attr; 1107 uint8_t sh = lDescriptor.sh(); 1108 // Different format and source of attributes if this is a stage 2 1109 // translation 1110 if (isStage2) { 1111 attr = lDescriptor.memAttr(); 1112 uint8_t attr_3_2 = (attr >> 2) & 0x3; 1113 uint8_t attr_1_0 = attr & 0x3; 1114 1115 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh); 1116 1117 if (attr_3_2 == 0) { 1118 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered 1119 : TlbEntry::MemoryType::Device; 1120 te.outerAttrs = 0; 1121 te.innerAttrs = attr_1_0 == 0 ? 1 : 3; 1122 te.nonCacheable = true; 1123 } else { 1124 te.mtype = TlbEntry::MemoryType::Normal; 1125 te.outerAttrs = attr_3_2 == 1 ? 0 : 1126 attr_3_2 == 2 ? 2 : 1; 1127 te.innerAttrs = attr_1_0 == 1 ? 0 : 1128 attr_1_0 == 2 ? 6 : 5; 1129 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1); 1130 } 1131 } else { 1132 uint8_t attrIndx = lDescriptor.attrIndx(); 1133 1134 // LPAE always uses remapping of memory attributes, irrespective of the 1135 // value of SCTLR.TRE 1136 int reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0; 1137 reg = flattenMiscRegNsBanked(reg, currState->tc, !currState->isSecure); 1138 uint32_t mair = currState->tc->readMiscReg(reg); 1139 attr = (mair >> (8 * (attrIndx % 4))) & 0xff; 1140 uint8_t attr_7_4 = bits(attr, 7, 4); 1141 uint8_t attr_3_0 = bits(attr, 3, 0); 1142 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr); 1143 1144 // Note: the memory subsystem only cares about the 'cacheable' memory 1145 // attribute. The other attributes are only used to fill the PAR register 1146 // accordingly to provide the illusion of full support 1147 te.nonCacheable = false; 1148 1149 switch (attr_7_4) { 1150 case 0x0: 1151 // Strongly-ordered or Device memory 1152 if (attr_3_0 == 0x0) 1153 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 1154 else if (attr_3_0 == 0x4) 1155 te.mtype = TlbEntry::MemoryType::Device; 1156 else 1157 panic("Unpredictable behavior\n"); 1158 te.nonCacheable = true; 1159 te.outerAttrs = 0; 1160 break; 1161 case 0x4: 1162 // Normal memory, Outer Non-cacheable 1163 te.mtype = TlbEntry::MemoryType::Normal; 1164 te.outerAttrs = 0; 1165 if (attr_3_0 == 0x4) 1166 // Inner Non-cacheable 1167 te.nonCacheable = true; 1168 else if (attr_3_0 < 0x8) 1169 panic("Unpredictable behavior\n"); 1170 break; 1171 case 0x8: 1172 case 0x9: 1173 case 0xa: 1174 case 0xb: 1175 case 0xc: 1176 case 0xd: 1177 case 0xe: 1178 case 0xf: 1179 if (attr_7_4 & 0x4) { 1180 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3; 1181 } else { 1182 te.outerAttrs = 0x2; 1183 } 1184 // Normal memory, Outer Cacheable 1185 te.mtype = TlbEntry::MemoryType::Normal; 1186 if (attr_3_0 != 0x4 && attr_3_0 < 0x8) 1187 panic("Unpredictable behavior\n"); 1188 break; 1189 default: 1190 panic("Unpredictable behavior\n"); 1191 break; 1192 } 1193 1194 switch (attr_3_0) { 1195 case 0x0: 1196 te.innerAttrs = 0x1; 1197 break; 1198 case 0x4: 1199 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0; 1200 break; 1201 case 0x8: 1202 case 0x9: 1203 case 0xA: 1204 case 0xB: 1205 te.innerAttrs = 6; 1206 break; 1207 case 0xC: 1208 case 0xD: 1209 case 0xE: 1210 case 0xF: 1211 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7; 1212 break; 1213 default: 1214 panic("Unpredictable behavior\n"); 1215 break; 1216 } 1217 } 1218 1219 te.outerShareable = sh == 2; 1220 te.shareable = (sh & 0x2) ? true : false; 1221 te.setAttributes(true); 1222 te.attributes |= (uint64_t) attr << 56; 1223} 1224 1225void 1226TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx, 1227 uint8_t sh) 1228{ 1229 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh); 1230 1231 // Select MAIR 1232 uint64_t mair; 1233 switch (currState->el) { 1234 case EL0: 1235 case EL1: 1236 mair = tc->readMiscReg(MISCREG_MAIR_EL1); 1237 break; 1238 case EL2: 1239 mair = tc->readMiscReg(MISCREG_MAIR_EL2); 1240 break; 1241 case EL3: 1242 mair = tc->readMiscReg(MISCREG_MAIR_EL3); 1243 break; 1244 default: 1245 panic("Invalid exception level"); 1246 break; 1247 } 1248 1249 // Select attributes 1250 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx); 1251 uint8_t attr_lo = bits(attr, 3, 0); 1252 uint8_t attr_hi = bits(attr, 7, 4); 1253 1254 // Memory type 1255 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal; 1256 1257 // Cacheability 1258 te.nonCacheable = false; 1259 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory 1260 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable 1261 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable 1262 te.nonCacheable = true; 1263 } 1264 1265 te.shareable = sh == 2; 1266 te.outerShareable = (sh & 0x2) ? true : false; 1267 // Attributes formatted according to the 64-bit PAR 1268 te.attributes = ((uint64_t) attr << 56) | 1269 (1 << 11) | // LPAE bit 1270 (te.ns << 9) | // NS bit 1271 (sh << 7); 1272} 1273 1274void 1275TableWalker::doL1Descriptor() 1276{ 1277 if (currState->fault != NoFault) { 1278 return; 1279 } 1280 1281 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n", 1282 currState->vaddr_tainted, currState->l1Desc.data); 1283 TlbEntry te; 1284 1285 switch (currState->l1Desc.type()) { 1286 case L1Descriptor::Ignore: 1287 case L1Descriptor::Reserved: 1288 if (!currState->timing) { 1289 currState->tc = NULL; 1290 currState->req = NULL; 1291 } 1292 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n"); 1293 if (currState->isFetch) 1294 currState->fault = 1295 new PrefetchAbort(currState->vaddr_tainted, 1296 ArmFault::TranslationLL + L1, 1297 isStage2, 1298 ArmFault::VmsaTran); 1299 else 1300 currState->fault = 1301 new DataAbort(currState->vaddr_tainted, 1302 TlbEntry::DomainType::NoAccess, 1303 currState->isWrite, 1304 ArmFault::TranslationLL + L1, isStage2, 1305 ArmFault::VmsaTran); 1306 return; 1307 case L1Descriptor::Section: 1308 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) { 1309 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is 1310 * enabled if set, do l1.Desc.setAp0() instead of generating 1311 * AccessFlag0 1312 */ 1313 1314 currState->fault = new DataAbort(currState->vaddr_tainted, 1315 currState->l1Desc.domain(), 1316 currState->isWrite, 1317 ArmFault::AccessFlagLL + L1, 1318 isStage2, 1319 ArmFault::VmsaTran); 1320 } 1321 if (currState->l1Desc.supersection()) { 1322 panic("Haven't implemented supersections\n"); 1323 } 1324 insertTableEntry(currState->l1Desc, false); 1325 return; 1326 case L1Descriptor::PageTable: 1327 { 1328 Addr l2desc_addr; 1329 l2desc_addr = currState->l1Desc.l2Addr() | 1330 (bits(currState->vaddr, 19, 12) << 2); 1331 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n", 1332 l2desc_addr, currState->isSecure ? "s" : "ns"); 1333 1334 // Trickbox address check 1335 currState->fault = tlb->walkTrickBoxCheck( 1336 l2desc_addr, currState->isSecure, currState->vaddr, 1337 sizeof(uint32_t), currState->isFetch, currState->isWrite, 1338 currState->l1Desc.domain(), L2); 1339 1340 if (currState->fault) { 1341 if (!currState->timing) { 1342 currState->tc = NULL; 1343 currState->req = NULL; 1344 } 1345 return; 1346 } 1347 1348 Request::Flags flag = 0; 1349 if (currState->isSecure) 1350 flag.set(Request::SECURE); 1351 1352 bool delayed; 1353 delayed = fetchDescriptor(l2desc_addr, 1354 (uint8_t*)&currState->l2Desc.data, 1355 sizeof(uint32_t), flag, -1, &doL2DescEvent, 1356 &TableWalker::doL2Descriptor); 1357 if (delayed) { 1358 currState->delayed = true; 1359 } 1360 1361 return; 1362 } 1363 default: 1364 panic("A new type in a 2 bit field?\n"); 1365 } 1366} 1367 1368void 1369TableWalker::doLongDescriptor() 1370{ 1371 if (currState->fault != NoFault) { 1372 return; 1373 } 1374 1375 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n", 1376 currState->longDesc.lookupLevel, currState->vaddr_tainted, 1377 currState->longDesc.data, 1378 currState->aarch64 ? "AArch64" : "long-desc."); 1379 1380 if ((currState->longDesc.type() == LongDescriptor::Block) || 1381 (currState->longDesc.type() == LongDescriptor::Page)) { 1382 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, " 1383 "xn: %d, ap: %d, af: %d, type: %d\n", 1384 currState->longDesc.lookupLevel, 1385 currState->longDesc.data, 1386 currState->longDesc.pxn(), 1387 currState->longDesc.xn(), 1388 currState->longDesc.ap(), 1389 currState->longDesc.af(), 1390 currState->longDesc.type()); 1391 } else { 1392 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n", 1393 currState->longDesc.lookupLevel, 1394 currState->longDesc.data, 1395 currState->longDesc.type()); 1396 } 1397 1398 TlbEntry te; 1399 1400 switch (currState->longDesc.type()) { 1401 case LongDescriptor::Invalid: 1402 if (!currState->timing) { 1403 currState->tc = NULL; 1404 currState->req = NULL; 1405 } 1406 1407 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n", 1408 currState->longDesc.lookupLevel, 1409 ArmFault::TranslationLL + currState->longDesc.lookupLevel); 1410 if (currState->isFetch) 1411 currState->fault = new PrefetchAbort( 1412 currState->vaddr_tainted, 1413 ArmFault::TranslationLL + currState->longDesc.lookupLevel, 1414 isStage2, 1415 ArmFault::LpaeTran); 1416 else 1417 currState->fault = new DataAbort( 1418 currState->vaddr_tainted, 1419 TlbEntry::DomainType::NoAccess, 1420 currState->isWrite, 1421 ArmFault::TranslationLL + currState->longDesc.lookupLevel, 1422 isStage2, 1423 ArmFault::LpaeTran); 1424 return; 1425 case LongDescriptor::Block: 1426 case LongDescriptor::Page: 1427 { 1428 bool fault = false; 1429 bool aff = false; 1430 // Check for address size fault 1431 if (checkAddrSizeFaultAArch64( 1432 mbits(currState->longDesc.data, MaxPhysAddrRange - 1, 1433 currState->longDesc.offsetBits()), 1434 currState->physAddrRange)) { 1435 fault = true; 1436 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n", 1437 currState->longDesc.lookupLevel); 1438 // Check for access fault 1439 } else if (currState->longDesc.af() == 0) { 1440 fault = true; 1441 DPRINTF(TLB, "L%d descriptor causing Access Fault\n", 1442 currState->longDesc.lookupLevel); 1443 aff = true; 1444 } 1445 if (fault) { 1446 if (currState->isFetch) 1447 currState->fault = new PrefetchAbort( 1448 currState->vaddr_tainted, 1449 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) + 1450 currState->longDesc.lookupLevel, 1451 isStage2, 1452 ArmFault::LpaeTran); 1453 else 1454 currState->fault = new DataAbort( 1455 currState->vaddr_tainted, 1456 TlbEntry::DomainType::NoAccess, currState->isWrite, 1457 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) + 1458 currState->longDesc.lookupLevel, 1459 isStage2, 1460 ArmFault::LpaeTran); 1461 } else { 1462 insertTableEntry(currState->longDesc, true); 1463 } 1464 } 1465 return; 1466 case LongDescriptor::Table: 1467 { 1468 // Set hierarchical permission flags 1469 currState->secureLookup = currState->secureLookup && 1470 currState->longDesc.secureTable(); 1471 currState->rwTable = currState->rwTable && 1472 currState->longDesc.rwTable(); 1473 currState->userTable = currState->userTable && 1474 currState->longDesc.userTable(); 1475 currState->xnTable = currState->xnTable || 1476 currState->longDesc.xnTable(); 1477 currState->pxnTable = currState->pxnTable || 1478 currState->longDesc.pxnTable(); 1479 1480 // Set up next level lookup 1481 Addr next_desc_addr = currState->longDesc.nextDescAddr( 1482 currState->vaddr); 1483 1484 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n", 1485 currState->longDesc.lookupLevel, 1486 currState->longDesc.lookupLevel + 1, 1487 next_desc_addr, 1488 currState->secureLookup ? "s" : "ns"); 1489 1490 // Check for address size fault 1491 if (currState->aarch64 && checkAddrSizeFaultAArch64( 1492 next_desc_addr, currState->physAddrRange)) { 1493 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n", 1494 currState->longDesc.lookupLevel); 1495 if (currState->isFetch) 1496 currState->fault = new PrefetchAbort( 1497 currState->vaddr_tainted, 1498 ArmFault::AddressSizeLL 1499 + currState->longDesc.lookupLevel, 1500 isStage2, 1501 ArmFault::LpaeTran); 1502 else 1503 currState->fault = new DataAbort( 1504 currState->vaddr_tainted, 1505 TlbEntry::DomainType::NoAccess, currState->isWrite, 1506 ArmFault::AddressSizeLL 1507 + currState->longDesc.lookupLevel, 1508 isStage2, 1509 ArmFault::LpaeTran); 1510 return; 1511 } 1512 1513 // Trickbox address check 1514 currState->fault = tlb->walkTrickBoxCheck( 1515 next_desc_addr, currState->vaddr, 1516 currState->vaddr, sizeof(uint64_t), 1517 currState->isFetch, currState->isWrite, 1518 TlbEntry::DomainType::Client, 1519 toLookupLevel(currState->longDesc.lookupLevel +1)); 1520 1521 if (currState->fault) { 1522 if (!currState->timing) { 1523 currState->tc = NULL; 1524 currState->req = NULL; 1525 } 1526 return; 1527 } 1528 1529 Request::Flags flag = 0; 1530 if (currState->secureLookup) 1531 flag.set(Request::SECURE); 1532 1533 currState->longDesc.lookupLevel = 1534 (LookupLevel) (currState->longDesc.lookupLevel + 1); 1535 Event *event = NULL; 1536 switch (currState->longDesc.lookupLevel) { 1537 case L1: 1538 assert(currState->aarch64); 1539 event = &doL1LongDescEvent; 1540 break; 1541 case L2: 1542 event = &doL2LongDescEvent; 1543 break; 1544 case L3: 1545 event = &doL3LongDescEvent; 1546 break; 1547 default: 1548 panic("Wrong lookup level in table walk\n"); 1549 break; 1550 } 1551 1552 bool delayed; 1553 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data, 1554 sizeof(uint64_t), flag, -1, event, 1555 &TableWalker::doLongDescriptor); 1556 if (delayed) { 1557 currState->delayed = true; 1558 } 1559 } 1560 return; 1561 default: 1562 panic("A new type in a 2 bit field?\n"); 1563 } 1564} 1565 1566void 1567TableWalker::doL2Descriptor() 1568{ 1569 if (currState->fault != NoFault) { 1570 return; 1571 } 1572 1573 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n", 1574 currState->vaddr_tainted, currState->l2Desc.data); 1575 TlbEntry te; 1576 1577 if (currState->l2Desc.invalid()) { 1578 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n"); 1579 if (!currState->timing) { 1580 currState->tc = NULL; 1581 currState->req = NULL; 1582 } 1583 if (currState->isFetch) 1584 currState->fault = 1585 new PrefetchAbort(currState->vaddr_tainted, 1586 ArmFault::TranslationLL + L2, 1587 isStage2, 1588 ArmFault::VmsaTran); 1589 else 1590 currState->fault = 1591 new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(), 1592 currState->isWrite, ArmFault::TranslationLL + L2, 1593 isStage2, 1594 ArmFault::VmsaTran); 1595 return; 1596 } 1597 1598 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) { 1599 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled 1600 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0 1601 */ 1602 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n", 1603 currState->sctlr.afe, currState->l2Desc.ap()); 1604 1605 currState->fault = 1606 new DataAbort(currState->vaddr_tainted, 1607 TlbEntry::DomainType::NoAccess, currState->isWrite, 1608 ArmFault::AccessFlagLL + L2, isStage2, 1609 ArmFault::VmsaTran); 1610 } 1611 1612 insertTableEntry(currState->l2Desc, false); 1613} 1614 1615void 1616TableWalker::doL1DescriptorWrapper() 1617{ 1618 currState = stateQueues[L1].front(); 1619 currState->delayed = false; 1620 // if there's a stage2 translation object we don't need it any more 1621 if (currState->stage2Tran) { 1622 delete currState->stage2Tran; 1623 currState->stage2Tran = NULL; 1624 } 1625 1626 1627 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data); 1628 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data); 1629 1630 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted); 1631 doL1Descriptor(); 1632 1633 stateQueues[L1].pop_front(); 1634 completeDrain(); 1635 // Check if fault was generated 1636 if (currState->fault != NoFault) { 1637 currState->transState->finish(currState->fault, currState->req, 1638 currState->tc, currState->mode); 1639 1640 pending = false; 1641 nextWalk(currState->tc); 1642 1643 currState->req = NULL; 1644 currState->tc = NULL; 1645 currState->delayed = false; 1646 delete currState; 1647 } 1648 else if (!currState->delayed) { 1649 // delay is not set so there is no L2 to do 1650 // Don't finish the translation if a stage 2 look up is underway 1651 if (!currState->doingStage2) { 1652 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1653 currState->fault = tlb->translateTiming(currState->req, currState->tc, 1654 currState->transState, currState->mode); 1655 } 1656 1657 pending = false; 1658 nextWalk(currState->tc); 1659 1660 currState->req = NULL; 1661 currState->tc = NULL; 1662 currState->delayed = false; 1663 delete currState; 1664 } else { 1665 // need to do L2 descriptor 1666 stateQueues[L2].push_back(currState); 1667 } 1668 currState = NULL; 1669} 1670 1671void 1672TableWalker::doL2DescriptorWrapper() 1673{ 1674 currState = stateQueues[L2].front(); 1675 assert(currState->delayed); 1676 // if there's a stage2 translation object we don't need it any more 1677 if (currState->stage2Tran) { 1678 delete currState->stage2Tran; 1679 currState->stage2Tran = NULL; 1680 } 1681 1682 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n", 1683 currState->vaddr_tainted); 1684 doL2Descriptor(); 1685 1686 // Check if fault was generated 1687 if (currState->fault != NoFault) { 1688 currState->transState->finish(currState->fault, currState->req, 1689 currState->tc, currState->mode); 1690 } 1691 else { 1692 // Don't finish the translation if a stage 2 look up is underway 1693 if (!currState->doingStage2) { 1694 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1695 currState->fault = tlb->translateTiming(currState->req, 1696 currState->tc, currState->transState, currState->mode); 1697 } 1698 } 1699 1700 1701 stateQueues[L2].pop_front(); 1702 completeDrain(); 1703 pending = false; 1704 nextWalk(currState->tc); 1705 1706 currState->req = NULL; 1707 currState->tc = NULL; 1708 currState->delayed = false; 1709 1710 delete currState; 1711 currState = NULL; 1712} 1713 1714void 1715TableWalker::doL0LongDescriptorWrapper() 1716{ 1717 doLongDescriptorWrapper(L0); 1718} 1719 1720void 1721TableWalker::doL1LongDescriptorWrapper() 1722{ 1723 doLongDescriptorWrapper(L1); 1724} 1725 1726void 1727TableWalker::doL2LongDescriptorWrapper() 1728{ 1729 doLongDescriptorWrapper(L2); 1730} 1731 1732void 1733TableWalker::doL3LongDescriptorWrapper() 1734{ 1735 doLongDescriptorWrapper(L3); 1736} 1737 1738void 1739TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level) 1740{ 1741 currState = stateQueues[curr_lookup_level].front(); 1742 assert(curr_lookup_level == currState->longDesc.lookupLevel); 1743 currState->delayed = false; 1744 1745 // if there's a stage2 translation object we don't need it any more 1746 if (currState->stage2Tran) { 1747 delete currState->stage2Tran; 1748 currState->stage2Tran = NULL; 1749 } 1750 1751 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n", 1752 currState->vaddr_tainted); 1753 doLongDescriptor(); 1754 1755 stateQueues[curr_lookup_level].pop_front(); 1756 1757 if (currState->fault != NoFault) { 1758 // A fault was generated 1759 currState->transState->finish(currState->fault, currState->req, 1760 currState->tc, currState->mode); 1761 1762 pending = false; 1763 nextWalk(currState->tc); 1764 1765 currState->req = NULL; 1766 currState->tc = NULL; 1767 currState->delayed = false; 1768 delete currState; 1769 } else if (!currState->delayed) { 1770 // No additional lookups required 1771 // Don't finish the translation if a stage 2 look up is underway 1772 if (!currState->doingStage2) { 1773 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1774 currState->fault = tlb->translateTiming(currState->req, currState->tc, 1775 currState->transState, 1776 currState->mode); 1777 } 1778 1779 pending = false; 1780 nextWalk(currState->tc); 1781 1782 currState->req = NULL; 1783 currState->tc = NULL; 1784 currState->delayed = false; 1785 delete currState; 1786 } else { 1787 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1) 1788 panic("Max. number of lookups already reached in table walk\n"); 1789 // Need to perform additional lookups 1790 stateQueues[currState->longDesc.lookupLevel].push_back(currState); 1791 } 1792 currState = NULL; 1793} 1794 1795 1796void 1797TableWalker::nextWalk(ThreadContext *tc) 1798{ 1799 if (pendingQueue.size()) 1800 schedule(doProcessEvent, clockEdge(Cycles(1))); 1801} 1802 1803bool 1804TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, 1805 Request::Flags flags, int queueIndex, Event *event, 1806 void (TableWalker::*doDescriptor)()) 1807{ 1808 bool isTiming = currState->timing; 1809 1810 // do the requests for the page table descriptors have to go through the 1811 // second stage MMU 1812 if (currState->stage2Req) { 1813 Fault fault; 1814 flags = flags | TLB::MustBeOne; 1815 1816 if (isTiming) { 1817 Stage2MMU::Stage2Translation *tran = new 1818 Stage2MMU::Stage2Translation(*stage2Mmu, data, event, 1819 currState->vaddr); 1820 currState->stage2Tran = tran; 1821 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes, 1822 flags, masterId); 1823 fault = tran->fault; 1824 } else { 1825 fault = stage2Mmu->readDataUntimed(currState->tc, 1826 currState->vaddr, descAddr, data, numBytes, flags, masterId, 1827 currState->functional); 1828 } 1829 1830 if (fault != NoFault) { 1831 currState->fault = fault; 1832 } 1833 if (isTiming) { 1834 if (queueIndex >= 0) { 1835 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", 1836 stateQueues[queueIndex].size()); 1837 stateQueues[queueIndex].push_back(currState); 1838 currState = NULL; 1839 } 1840 } else { 1841 (this->*doDescriptor)(); 1842 } 1843 } else { 1844 if (isTiming) { 1845 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data, 1846 currState->tc->getCpuPtr()->clockPeriod(), flags); 1847 if (queueIndex >= 0) { 1848 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", 1849 stateQueues[queueIndex].size()); 1850 stateQueues[queueIndex].push_back(currState); 1851 currState = NULL; 1852 } 1853 } else if (!currState->functional) { 1854 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data, 1855 currState->tc->getCpuPtr()->clockPeriod(), flags); 1856 (this->*doDescriptor)(); 1857 } else { 1858 RequestPtr req = new Request(descAddr, numBytes, flags, masterId); 1859 req->taskId(ContextSwitchTaskId::DMA); 1860 PacketPtr pkt = new Packet(req, MemCmd::ReadReq); 1861 pkt->dataStatic(data); 1862 port.sendFunctional(pkt); 1863 (this->*doDescriptor)(); 1864 delete req; 1865 delete pkt; 1866 } 1867 } 1868 return (isTiming); 1869} 1870 1871void 1872TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor) 1873{ 1874 TlbEntry te; 1875 1876 // Create and fill a new page table entry 1877 te.valid = true; 1878 te.longDescFormat = longDescriptor; 1879 te.isHyp = currState->isHyp; 1880 te.asid = currState->asid; 1881 te.vmid = currState->vmid; 1882 te.N = descriptor.offsetBits(); 1883 te.vpn = currState->vaddr >> te.N; 1884 te.size = (1<<te.N) - 1; 1885 te.pfn = descriptor.pfn(); 1886 te.domain = descriptor.domain(); 1887 te.lookupLevel = descriptor.lookupLevel; 1888 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2; 1889 te.nstid = !currState->isSecure; 1890 te.xn = descriptor.xn(); 1891 if (currState->aarch64) 1892 te.el = currState->el; 1893 else 1894 te.el = 1; 1895 1896 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries 1897 // as global 1898 te.global = descriptor.global(currState) || isStage2; 1899 if (longDescriptor) { 1900 LongDescriptor lDescriptor = 1901 dynamic_cast<LongDescriptor &>(descriptor); 1902 1903 te.xn |= currState->xnTable; 1904 te.pxn = currState->pxnTable || lDescriptor.pxn(); 1905 if (isStage2) { 1906 // this is actually the HAP field, but its stored in the same bit 1907 // possitions as the AP field in a stage 1 translation. 1908 te.hap = lDescriptor.ap(); 1909 } else { 1910 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) | 1911 (currState->userTable && (descriptor.ap() & 0x1)); 1912 } 1913 if (currState->aarch64) 1914 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(), 1915 currState->longDesc.sh()); 1916 else 1917 memAttrsLPAE(currState->tc, te, lDescriptor); 1918 } else { 1919 te.ap = descriptor.ap(); 1920 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(), 1921 descriptor.shareable()); 1922 } 1923 1924 // Debug output 1925 DPRINTF(TLB, descriptor.dbgHeader().c_str()); 1926 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n", 1927 te.N, te.pfn, te.size, te.global, te.valid); 1928 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d " 1929 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn, 1930 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp, 1931 te.nonCacheable, te.ns); 1932 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n", 1933 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()), 1934 descriptor.getRawData()); 1935 1936 // Insert the entry into the TLB 1937 tlb->insert(currState->vaddr, te); 1938 if (!currState->timing) { 1939 currState->tc = NULL; 1940 currState->req = NULL; 1941 } 1942} 1943 1944ArmISA::TableWalker * 1945ArmTableWalkerParams::create() 1946{ 1947 return new ArmISA::TableWalker(this); 1948} 1949 1950LookupLevel 1951TableWalker::toLookupLevel(uint8_t lookup_level_as_int) 1952{ 1953 switch (lookup_level_as_int) { 1954 case L1: 1955 return L1; 1956 case L2: 1957 return L2; 1958 case L3: 1959 return L3; 1960 default: 1961 panic("Invalid lookup level conversion"); 1962 } 1963} 1964