table_walker.cc revision 10421
1/* 2 * Copyright (c) 2010, 2012-2014 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Ali Saidi 38 * Giacomo Gabrielli 39 */ 40 41#include "arch/arm/faults.hh" 42#include "arch/arm/stage2_mmu.hh" 43#include "arch/arm/system.hh" 44#include "arch/arm/table_walker.hh" 45#include "arch/arm/tlb.hh" 46#include "cpu/base.hh" 47#include "cpu/thread_context.hh" 48#include "debug/Checkpoint.hh" 49#include "debug/Drain.hh" 50#include "debug/TLB.hh" 51#include "debug/TLBVerbose.hh" 52#include "sim/system.hh" 53 54using namespace ArmISA; 55 56TableWalker::TableWalker(const Params *p) 57 : MemObject(p), port(this, p->sys), drainManager(NULL), 58 stage2Mmu(NULL), isStage2(p->is_stage2), tlb(NULL), 59 currState(NULL), pending(false), masterId(p->sys->getMasterId(name())), 60 numSquashable(p->num_squash_per_cycle), 61 doL1DescEvent(this), doL2DescEvent(this), 62 doL0LongDescEvent(this), doL1LongDescEvent(this), doL2LongDescEvent(this), 63 doL3LongDescEvent(this), 64 doProcessEvent(this) 65{ 66 sctlr = 0; 67 68 // Cache system-level properties 69 if (FullSystem) { 70 armSys = dynamic_cast<ArmSystem *>(p->sys); 71 assert(armSys); 72 haveSecurity = armSys->haveSecurity(); 73 _haveLPAE = armSys->haveLPAE(); 74 _haveVirtualization = armSys->haveVirtualization(); 75 physAddrRange = armSys->physAddrRange(); 76 _haveLargeAsid64 = armSys->haveLargeAsid64(); 77 } else { 78 armSys = NULL; 79 haveSecurity = _haveLPAE = _haveVirtualization = false; 80 _haveLargeAsid64 = false; 81 physAddrRange = 32; 82 } 83 84} 85 86TableWalker::~TableWalker() 87{ 88 ; 89} 90 91TableWalker::WalkerState::WalkerState() : stage2Tran(NULL), l2Desc(l1Desc) 92{ 93} 94 95void 96TableWalker::completeDrain() 97{ 98 if (drainManager && stateQueues[L1].empty() && stateQueues[L2].empty() && 99 pendingQueue.empty()) { 100 setDrainState(Drainable::Drained); 101 DPRINTF(Drain, "TableWalker done draining, processing drain event\n"); 102 drainManager->signalDrainDone(); 103 drainManager = NULL; 104 } 105} 106 107unsigned int 108TableWalker::drain(DrainManager *dm) 109{ 110 unsigned int count = port.drain(dm); 111 112 bool state_queues_not_empty = false; 113 114 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) { 115 if (!stateQueues[i].empty()) { 116 state_queues_not_empty = true; 117 break; 118 } 119 } 120 121 if (state_queues_not_empty || pendingQueue.size()) { 122 drainManager = dm; 123 setDrainState(Drainable::Draining); 124 DPRINTF(Drain, "TableWalker not drained\n"); 125 126 // return port drain count plus the table walker itself needs to drain 127 return count + 1; 128 } else { 129 setDrainState(Drainable::Drained); 130 DPRINTF(Drain, "TableWalker free, no need to drain\n"); 131 132 // table walker is drained, but its ports may still need to be drained 133 return count; 134 } 135} 136 137void 138TableWalker::drainResume() 139{ 140 Drainable::drainResume(); 141 if (params()->sys->isTimingMode() && currState) { 142 delete currState; 143 currState = NULL; 144 } 145} 146 147BaseMasterPort& 148TableWalker::getMasterPort(const std::string &if_name, PortID idx) 149{ 150 if (if_name == "port") { 151 return port; 152 } 153 return MemObject::getMasterPort(if_name, idx); 154} 155 156Fault 157TableWalker::walk(RequestPtr _req, ThreadContext *_tc, uint16_t _asid, 158 uint8_t _vmid, bool _isHyp, TLB::Mode _mode, 159 TLB::Translation *_trans, bool _timing, bool _functional, 160 bool secure, TLB::ArmTranslationType tranType) 161{ 162 assert(!(_functional && _timing)); 163 WalkerState *savedCurrState = NULL; 164 165 if (!currState && !_functional) { 166 // For atomic mode, a new WalkerState instance should be only created 167 // once per TLB. For timing mode, a new instance is generated for every 168 // TLB miss. 169 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n"); 170 171 currState = new WalkerState(); 172 currState->tableWalker = this; 173 } else if (_functional) { 174 // If we are mixing functional mode with timing (or even 175 // atomic), we need to to be careful and clean up after 176 // ourselves to not risk getting into an inconsistent state. 177 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n"); 178 savedCurrState = currState; 179 currState = new WalkerState(); 180 currState->tableWalker = this; 181 } else if (_timing) { 182 // This is a translation that was completed and then faulted again 183 // because some underlying parameters that affect the translation 184 // changed out from under us (e.g. asid). It will either be a 185 // misprediction, in which case nothing will happen or we'll use 186 // this fault to re-execute the faulting instruction which should clean 187 // up everything. 188 if (currState->vaddr_tainted == _req->getVaddr()) { 189 return new ReExec; 190 } 191 } 192 193 currState->tc = _tc; 194 currState->aarch64 = opModeIs64(currOpMode(_tc)); 195 currState->el = currEL(_tc); 196 currState->transState = _trans; 197 currState->req = _req; 198 currState->fault = NoFault; 199 currState->asid = _asid; 200 currState->vmid = _vmid; 201 currState->isHyp = _isHyp; 202 currState->timing = _timing; 203 currState->functional = _functional; 204 currState->mode = _mode; 205 currState->tranType = tranType; 206 currState->isSecure = secure; 207 currState->physAddrRange = physAddrRange; 208 209 /** @todo These should be cached or grabbed from cached copies in 210 the TLB, all these miscreg reads are expensive */ 211 currState->vaddr_tainted = currState->req->getVaddr(); 212 if (currState->aarch64) 213 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted, 214 currState->tc, currState->el); 215 else 216 currState->vaddr = currState->vaddr_tainted; 217 218 if (currState->aarch64) { 219 switch (currState->el) { 220 case EL0: 221 case EL1: 222 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1); 223 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1); 224 break; 225 // @todo: uncomment this to enable Virtualization 226 // case EL2: 227 // assert(haveVirtualization); 228 // currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2); 229 // currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2); 230 // break; 231 case EL3: 232 assert(haveSecurity); 233 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3); 234 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3); 235 break; 236 default: 237 panic("Invalid exception level"); 238 break; 239 } 240 } else { 241 currState->sctlr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 242 MISCREG_SCTLR, currState->tc, !currState->isSecure)); 243 currState->ttbcr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 244 MISCREG_TTBCR, currState->tc, !currState->isSecure)); 245 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR); 246 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR); 247 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR); 248 } 249 sctlr = currState->sctlr; 250 251 currState->isFetch = (currState->mode == TLB::Execute); 252 currState->isWrite = (currState->mode == TLB::Write); 253 254 // We only do a second stage of translation if we're not secure, or in 255 // hyp mode, the second stage MMU is enabled, and this table walker 256 // instance is the first stage. 257 currState->doingStage2 = false; 258 // @todo: for now disable this in AArch64 (HCR is not set) 259 currState->stage2Req = !currState->aarch64 && currState->hcr.vm && 260 !isStage2 && !currState->isSecure && !currState->isHyp; 261 262 bool long_desc_format = currState->aarch64 || 263 (_haveLPAE && currState->ttbcr.eae) || 264 _isHyp || isStage2; 265 266 if (long_desc_format) { 267 // Helper variables used for hierarchical permissions 268 currState->secureLookup = currState->isSecure; 269 currState->rwTable = true; 270 currState->userTable = true; 271 currState->xnTable = false; 272 currState->pxnTable = false; 273 } 274 275 if (!currState->timing) { 276 Fault fault = NoFault; 277 if (currState->aarch64) 278 fault = processWalkAArch64(); 279 else if (long_desc_format) 280 fault = processWalkLPAE(); 281 else 282 fault = processWalk(); 283 284 // If this was a functional non-timing access restore state to 285 // how we found it. 286 if (currState->functional) { 287 delete currState; 288 currState = savedCurrState; 289 } 290 return fault; 291 } 292 293 if (pending || pendingQueue.size()) { 294 pendingQueue.push_back(currState); 295 currState = NULL; 296 } else { 297 pending = true; 298 if (currState->aarch64) 299 return processWalkAArch64(); 300 else if (long_desc_format) 301 return processWalkLPAE(); 302 else 303 return processWalk(); 304 } 305 306 return NoFault; 307} 308 309void 310TableWalker::processWalkWrapper() 311{ 312 assert(!currState); 313 assert(pendingQueue.size()); 314 currState = pendingQueue.front(); 315 316 ExceptionLevel target_el = EL0; 317 if (currState->aarch64) 318 target_el = currEL(currState->tc); 319 else 320 target_el = EL1; 321 322 // Check if a previous walk filled this request already 323 // @TODO Should this always be the TLB or should we look in the stage2 TLB? 324 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid, 325 currState->vmid, currState->isHyp, currState->isSecure, true, false, 326 target_el); 327 328 // Check if we still need to have a walk for this request. If the requesting 329 // instruction has been squashed, or a previous walk has filled the TLB with 330 // a match, we just want to get rid of the walk. The latter could happen 331 // when there are multiple outstanding misses to a single page and a 332 // previous request has been successfully translated. 333 if (!currState->transState->squashed() && !te) { 334 // We've got a valid request, lets process it 335 pending = true; 336 pendingQueue.pop_front(); 337 if (currState->aarch64) 338 processWalkAArch64(); 339 else if ((_haveLPAE && currState->ttbcr.eae) || currState->isHyp || isStage2) 340 processWalkLPAE(); 341 else 342 processWalk(); 343 return; 344 } 345 346 347 // If the instruction that we were translating for has been 348 // squashed we shouldn't bother. 349 unsigned num_squashed = 0; 350 ThreadContext *tc = currState->tc; 351 while ((num_squashed < numSquashable) && currState && 352 (currState->transState->squashed() || te)) { 353 pendingQueue.pop_front(); 354 num_squashed++; 355 356 DPRINTF(TLB, "Squashing table walk for address %#x\n", 357 currState->vaddr_tainted); 358 359 if (currState->transState->squashed()) { 360 // finish the translation which will delete the translation object 361 currState->transState->finish(new UnimpFault("Squashed Inst"), 362 currState->req, currState->tc, currState->mode); 363 } else { 364 // translate the request now that we know it will work 365 tlb->translateTiming(currState->req, currState->tc, 366 currState->transState, currState->mode); 367 368 } 369 370 // delete the current request 371 delete currState; 372 373 // peak at the next one 374 if (pendingQueue.size()) { 375 currState = pendingQueue.front(); 376 te = tlb->lookup(currState->vaddr, currState->asid, 377 currState->vmid, currState->isHyp, currState->isSecure, true, 378 false, target_el); 379 } else { 380 // Terminate the loop, nothing more to do 381 currState = NULL; 382 } 383 } 384 385 // if we've still got pending translations schedule more work 386 nextWalk(tc); 387 currState = NULL; 388 completeDrain(); 389} 390 391Fault 392TableWalker::processWalk() 393{ 394 Addr ttbr = 0; 395 396 // If translation isn't enabled, we shouldn't be here 397 assert(currState->sctlr.m || isStage2); 398 399 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n", 400 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31, 401 32 - currState->ttbcr.n)); 402 403 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31, 404 32 - currState->ttbcr.n)) { 405 DPRINTF(TLB, " - Selecting TTBR0\n"); 406 // Check if table walk is allowed when Security Extensions are enabled 407 if (haveSecurity && currState->ttbcr.pd0) { 408 if (currState->isFetch) 409 return new PrefetchAbort(currState->vaddr_tainted, 410 ArmFault::TranslationLL + L1, 411 isStage2, 412 ArmFault::VmsaTran); 413 else 414 return new DataAbort(currState->vaddr_tainted, 415 TlbEntry::DomainType::NoAccess, currState->isWrite, 416 ArmFault::TranslationLL + L1, isStage2, 417 ArmFault::VmsaTran); 418 } 419 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 420 MISCREG_TTBR0, currState->tc, !currState->isSecure)); 421 } else { 422 DPRINTF(TLB, " - Selecting TTBR1\n"); 423 // Check if table walk is allowed when Security Extensions are enabled 424 if (haveSecurity && currState->ttbcr.pd1) { 425 if (currState->isFetch) 426 return new PrefetchAbort(currState->vaddr_tainted, 427 ArmFault::TranslationLL + L1, 428 isStage2, 429 ArmFault::VmsaTran); 430 else 431 return new DataAbort(currState->vaddr_tainted, 432 TlbEntry::DomainType::NoAccess, currState->isWrite, 433 ArmFault::TranslationLL + L1, isStage2, 434 ArmFault::VmsaTran); 435 } 436 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 437 MISCREG_TTBR1, currState->tc, !currState->isSecure)); 438 currState->ttbcr.n = 0; 439 } 440 441 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) | 442 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2); 443 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr, 444 currState->isSecure ? "s" : "ns"); 445 446 // Trickbox address check 447 Fault f; 448 f = tlb->walkTrickBoxCheck(l1desc_addr, currState->isSecure, 449 currState->vaddr, sizeof(uint32_t), currState->isFetch, 450 currState->isWrite, TlbEntry::DomainType::NoAccess, L1); 451 if (f) { 452 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 453 if (currState->timing) { 454 pending = false; 455 nextWalk(currState->tc); 456 currState = NULL; 457 } else { 458 currState->tc = NULL; 459 currState->req = NULL; 460 } 461 return f; 462 } 463 464 Request::Flags flag = 0; 465 if (currState->sctlr.c == 0) { 466 flag = Request::UNCACHEABLE; 467 } 468 469 bool delayed; 470 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data, 471 sizeof(uint32_t), flag, L1, &doL1DescEvent, 472 &TableWalker::doL1Descriptor); 473 if (!delayed) { 474 f = currState->fault; 475 } 476 477 return f; 478} 479 480Fault 481TableWalker::processWalkLPAE() 482{ 483 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr; 484 int tsz, n; 485 LookupLevel start_lookup_level = L1; 486 487 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n", 488 currState->vaddr_tainted, currState->ttbcr); 489 490 Request::Flags flag = 0; 491 if (currState->isSecure) 492 flag.set(Request::SECURE); 493 494 // work out which base address register to use, if in hyp mode we always 495 // use HTTBR 496 if (isStage2) { 497 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n"); 498 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR); 499 tsz = sext<4>(currState->vtcr.t0sz); 500 start_lookup_level = currState->vtcr.sl0 ? L1 : L2; 501 } else if (currState->isHyp) { 502 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n"); 503 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR); 504 tsz = currState->htcr.t0sz; 505 } else { 506 assert(_haveLPAE && currState->ttbcr.eae); 507 508 // Determine boundaries of TTBR0/1 regions 509 if (currState->ttbcr.t0sz) 510 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1; 511 else if (currState->ttbcr.t1sz) 512 ttbr0_max = (1ULL << 32) - 513 (1ULL << (32 - currState->ttbcr.t1sz)) - 1; 514 else 515 ttbr0_max = (1ULL << 32) - 1; 516 if (currState->ttbcr.t1sz) 517 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz)); 518 else 519 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz)); 520 521 // The following code snippet selects the appropriate translation table base 522 // address (TTBR0 or TTBR1) and the appropriate starting lookup level 523 // depending on the address range supported by the translation table (ARM 524 // ARM issue C B3.6.4) 525 if (currState->vaddr <= ttbr0_max) { 526 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n"); 527 // Check if table walk is allowed 528 if (currState->ttbcr.epd0) { 529 if (currState->isFetch) 530 return new PrefetchAbort(currState->vaddr_tainted, 531 ArmFault::TranslationLL + L1, 532 isStage2, 533 ArmFault::LpaeTran); 534 else 535 return new DataAbort(currState->vaddr_tainted, 536 TlbEntry::DomainType::NoAccess, 537 currState->isWrite, 538 ArmFault::TranslationLL + L1, 539 isStage2, 540 ArmFault::LpaeTran); 541 } 542 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 543 MISCREG_TTBR0, currState->tc, !currState->isSecure)); 544 tsz = currState->ttbcr.t0sz; 545 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB 546 start_lookup_level = L2; 547 } else if (currState->vaddr >= ttbr1_min) { 548 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n"); 549 // Check if table walk is allowed 550 if (currState->ttbcr.epd1) { 551 if (currState->isFetch) 552 return new PrefetchAbort(currState->vaddr_tainted, 553 ArmFault::TranslationLL + L1, 554 isStage2, 555 ArmFault::LpaeTran); 556 else 557 return new DataAbort(currState->vaddr_tainted, 558 TlbEntry::DomainType::NoAccess, 559 currState->isWrite, 560 ArmFault::TranslationLL + L1, 561 isStage2, 562 ArmFault::LpaeTran); 563 } 564 ttbr = currState->tc->readMiscReg(flattenMiscRegNsBanked( 565 MISCREG_TTBR1, currState->tc, !currState->isSecure)); 566 tsz = currState->ttbcr.t1sz; 567 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB 568 start_lookup_level = L2; 569 } else { 570 // Out of boundaries -> translation fault 571 if (currState->isFetch) 572 return new PrefetchAbort(currState->vaddr_tainted, 573 ArmFault::TranslationLL + L1, 574 isStage2, 575 ArmFault::LpaeTran); 576 else 577 return new DataAbort(currState->vaddr_tainted, 578 TlbEntry::DomainType::NoAccess, 579 currState->isWrite, ArmFault::TranslationLL + L1, 580 isStage2, ArmFault::LpaeTran); 581 } 582 583 } 584 585 // Perform lookup (ARM ARM issue C B3.6.6) 586 if (start_lookup_level == L1) { 587 n = 5 - tsz; 588 desc_addr = mbits(ttbr, 39, n) | 589 (bits(currState->vaddr, n + 26, 30) << 3); 590 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n", 591 desc_addr, currState->isSecure ? "s" : "ns"); 592 } else { 593 // Skip first-level lookup 594 n = (tsz >= 2 ? 14 - tsz : 12); 595 desc_addr = mbits(ttbr, 39, n) | 596 (bits(currState->vaddr, n + 17, 21) << 3); 597 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n", 598 desc_addr, currState->isSecure ? "s" : "ns"); 599 } 600 601 // Trickbox address check 602 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure, 603 currState->vaddr, sizeof(uint64_t), currState->isFetch, 604 currState->isWrite, TlbEntry::DomainType::NoAccess, 605 start_lookup_level); 606 if (f) { 607 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 608 if (currState->timing) { 609 pending = false; 610 nextWalk(currState->tc); 611 currState = NULL; 612 } else { 613 currState->tc = NULL; 614 currState->req = NULL; 615 } 616 return f; 617 } 618 619 if (currState->sctlr.c == 0) { 620 flag = Request::UNCACHEABLE; 621 } 622 623 if (currState->isSecure) 624 flag.set(Request::SECURE); 625 626 currState->longDesc.lookupLevel = start_lookup_level; 627 currState->longDesc.aarch64 = false; 628 currState->longDesc.grainSize = Grain4KB; 629 630 Event *event = start_lookup_level == L1 ? (Event *) &doL1LongDescEvent 631 : (Event *) &doL2LongDescEvent; 632 633 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data, 634 sizeof(uint64_t), flag, start_lookup_level, 635 event, &TableWalker::doLongDescriptor); 636 if (!delayed) { 637 f = currState->fault; 638 } 639 640 return f; 641} 642 643unsigned 644TableWalker::adjustTableSizeAArch64(unsigned tsz) 645{ 646 if (tsz < 25) 647 return 25; 648 if (tsz > 48) 649 return 48; 650 return tsz; 651} 652 653bool 654TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange) 655{ 656 return (currPhysAddrRange != MaxPhysAddrRange && 657 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange)); 658} 659 660Fault 661TableWalker::processWalkAArch64() 662{ 663 assert(currState->aarch64); 664 665 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n", 666 currState->vaddr_tainted, currState->tcr); 667 668 static const GrainSize GrainMapDefault[] = 669 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain }; 670 static const GrainSize GrainMap_EL1_tg1[] = 671 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB }; 672 673 // Determine TTBR, table size, granule size and phys. address range 674 Addr ttbr = 0; 675 int tsz = 0, ps = 0; 676 GrainSize tg = Grain4KB; // grain size computed from tg* field 677 bool fault = false; 678 switch (currState->el) { 679 case EL0: 680 case EL1: 681 switch (bits(currState->vaddr, 63,48)) { 682 case 0: 683 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); 684 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1); 685 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); 686 tg = GrainMapDefault[currState->tcr.tg0]; 687 if (bits(currState->vaddr, 63, tsz) != 0x0 || 688 currState->tcr.epd0) 689 fault = true; 690 break; 691 case 0xffff: 692 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n"); 693 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1); 694 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz); 695 tg = GrainMap_EL1_tg1[currState->tcr.tg1]; 696 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) || 697 currState->tcr.epd1) 698 fault = true; 699 break; 700 default: 701 // top two bytes must be all 0s or all 1s, else invalid addr 702 fault = true; 703 } 704 ps = currState->tcr.ips; 705 break; 706 case EL2: 707 case EL3: 708 switch(bits(currState->vaddr, 63,48)) { 709 case 0: 710 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); 711 if (currState->el == EL2) 712 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2); 713 else 714 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3); 715 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); 716 tg = GrainMapDefault[currState->tcr.tg0]; 717 break; 718 default: 719 // invalid addr if top two bytes are not all 0s 720 fault = true; 721 } 722 ps = currState->tcr.ips; 723 break; 724 } 725 726 if (fault) { 727 Fault f; 728 if (currState->isFetch) 729 f = new PrefetchAbort(currState->vaddr_tainted, 730 ArmFault::TranslationLL + L0, isStage2, 731 ArmFault::LpaeTran); 732 else 733 f = new DataAbort(currState->vaddr_tainted, 734 TlbEntry::DomainType::NoAccess, 735 currState->isWrite, 736 ArmFault::TranslationLL + L0, 737 isStage2, ArmFault::LpaeTran); 738 739 if (currState->timing) { 740 pending = false; 741 nextWalk(currState->tc); 742 currState = NULL; 743 } else { 744 currState->tc = NULL; 745 currState->req = NULL; 746 } 747 return f; 748 749 } 750 751 if (tg == ReservedGrain) { 752 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION " 753 "DEFINED behavior takes this to mean 4KB granules\n"); 754 tg = Grain4KB; 755 } 756 757 int stride = tg - 3; 758 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS; 759 760 // Determine starting lookup level 761 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library 762 // in ARM DDI 0487A. These table values correspond to the cascading tests 763 // to compute the lookup level and are of the form 764 // (grain_size + N*stride), for N = {1, 2, 3}. 765 // A value of 64 will never succeed and a value of 0 will always succeed. 766 { 767 struct GrainMap { 768 GrainSize grain_size; 769 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS]; 770 }; 771 static const GrainMap GM[] = { 772 { Grain4KB, { 39, 30, 0, 0 } }, 773 { Grain16KB, { 47, 36, 25, 0 } }, 774 { Grain64KB, { 64, 42, 29, 0 } } 775 }; 776 777 const unsigned *lookup = NULL; // points to a lookup_level_cutoff 778 779 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[] 780 if (tg == GM[i].grain_size) { 781 lookup = GM[i].lookup_level_cutoff; 782 break; 783 } 784 } 785 assert(lookup); 786 787 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) { 788 if (tsz > lookup[L]) { 789 start_lookup_level = (LookupLevel) L; 790 break; 791 } 792 } 793 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS, 794 "Table walker couldn't find lookup level\n"); 795 } 796 797 // Determine table base address 798 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg; 799 Addr base_addr = mbits(ttbr, 47, base_addr_lo); 800 801 // Determine physical address size and raise an Address Size Fault if 802 // necessary 803 int pa_range = decodePhysAddrRange64(ps); 804 // Clamp to lower limit 805 if (pa_range > physAddrRange) 806 currState->physAddrRange = physAddrRange; 807 else 808 currState->physAddrRange = pa_range; 809 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) { 810 DPRINTF(TLB, "Address size fault before any lookup\n"); 811 Fault f; 812 if (currState->isFetch) 813 f = new PrefetchAbort(currState->vaddr_tainted, 814 ArmFault::AddressSizeLL + start_lookup_level, 815 isStage2, 816 ArmFault::LpaeTran); 817 else 818 f = new DataAbort(currState->vaddr_tainted, 819 TlbEntry::DomainType::NoAccess, 820 currState->isWrite, 821 ArmFault::AddressSizeLL + start_lookup_level, 822 isStage2, 823 ArmFault::LpaeTran); 824 825 826 if (currState->timing) { 827 pending = false; 828 nextWalk(currState->tc); 829 currState = NULL; 830 } else { 831 currState->tc = NULL; 832 currState->req = NULL; 833 } 834 return f; 835 836 } 837 838 // Determine descriptor address 839 Addr desc_addr = base_addr | 840 (bits(currState->vaddr, tsz - 1, 841 stride * (3 - start_lookup_level) + tg) << 3); 842 843 // Trickbox address check 844 Fault f = tlb->walkTrickBoxCheck(desc_addr, currState->isSecure, 845 currState->vaddr, sizeof(uint64_t), currState->isFetch, 846 currState->isWrite, TlbEntry::DomainType::NoAccess, 847 start_lookup_level); 848 if (f) { 849 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 850 if (currState->timing) { 851 pending = false; 852 nextWalk(currState->tc); 853 currState = NULL; 854 } else { 855 currState->tc = NULL; 856 currState->req = NULL; 857 } 858 return f; 859 } 860 861 Request::Flags flag = 0; 862 if (currState->sctlr.c == 0) { 863 flag = Request::UNCACHEABLE; 864 } 865 866 currState->longDesc.lookupLevel = start_lookup_level; 867 currState->longDesc.aarch64 = true; 868 currState->longDesc.grainSize = tg; 869 870 if (currState->timing) { 871 Event *event; 872 switch (start_lookup_level) { 873 case L0: 874 event = (Event *) &doL0LongDescEvent; 875 break; 876 case L1: 877 event = (Event *) &doL1LongDescEvent; 878 break; 879 case L2: 880 event = (Event *) &doL2LongDescEvent; 881 break; 882 case L3: 883 event = (Event *) &doL3LongDescEvent; 884 break; 885 default: 886 panic("Invalid table lookup level"); 887 break; 888 } 889 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), event, 890 (uint8_t*) &currState->longDesc.data, 891 currState->tc->getCpuPtr()->clockPeriod(), flag); 892 DPRINTF(TLBVerbose, 893 "Adding to walker fifo: queue size before adding: %d\n", 894 stateQueues[start_lookup_level].size()); 895 stateQueues[start_lookup_level].push_back(currState); 896 currState = NULL; 897 } else if (!currState->functional) { 898 port.dmaAction(MemCmd::ReadReq, desc_addr, sizeof(uint64_t), 899 NULL, (uint8_t*) &currState->longDesc.data, 900 currState->tc->getCpuPtr()->clockPeriod(), flag); 901 doLongDescriptor(); 902 f = currState->fault; 903 } else { 904 RequestPtr req = new Request(desc_addr, sizeof(uint64_t), flag, 905 masterId); 906 PacketPtr pkt = new Packet(req, MemCmd::ReadReq); 907 pkt->dataStatic((uint8_t*) &currState->longDesc.data); 908 port.sendFunctional(pkt); 909 doLongDescriptor(); 910 delete req; 911 delete pkt; 912 f = currState->fault; 913 } 914 915 return f; 916} 917 918void 919TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, 920 uint8_t texcb, bool s) 921{ 922 // Note: tc and sctlr local variables are hiding tc and sctrl class 923 // variables 924 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s); 925 te.shareable = false; // default value 926 te.nonCacheable = false; 927 te.outerShareable = false; 928 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) { 929 switch(texcb) { 930 case 0: // Stongly-ordered 931 te.nonCacheable = true; 932 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 933 te.shareable = true; 934 te.innerAttrs = 1; 935 te.outerAttrs = 0; 936 break; 937 case 1: // Shareable Device 938 te.nonCacheable = true; 939 te.mtype = TlbEntry::MemoryType::Device; 940 te.shareable = true; 941 te.innerAttrs = 3; 942 te.outerAttrs = 0; 943 break; 944 case 2: // Outer and Inner Write-Through, no Write-Allocate 945 te.mtype = TlbEntry::MemoryType::Normal; 946 te.shareable = s; 947 te.innerAttrs = 6; 948 te.outerAttrs = bits(texcb, 1, 0); 949 break; 950 case 3: // Outer and Inner Write-Back, no Write-Allocate 951 te.mtype = TlbEntry::MemoryType::Normal; 952 te.shareable = s; 953 te.innerAttrs = 7; 954 te.outerAttrs = bits(texcb, 1, 0); 955 break; 956 case 4: // Outer and Inner Non-cacheable 957 te.nonCacheable = true; 958 te.mtype = TlbEntry::MemoryType::Normal; 959 te.shareable = s; 960 te.innerAttrs = 0; 961 te.outerAttrs = bits(texcb, 1, 0); 962 break; 963 case 5: // Reserved 964 panic("Reserved texcb value!\n"); 965 break; 966 case 6: // Implementation Defined 967 panic("Implementation-defined texcb value!\n"); 968 break; 969 case 7: // Outer and Inner Write-Back, Write-Allocate 970 te.mtype = TlbEntry::MemoryType::Normal; 971 te.shareable = s; 972 te.innerAttrs = 5; 973 te.outerAttrs = 1; 974 break; 975 case 8: // Non-shareable Device 976 te.nonCacheable = true; 977 te.mtype = TlbEntry::MemoryType::Device; 978 te.shareable = false; 979 te.innerAttrs = 3; 980 te.outerAttrs = 0; 981 break; 982 case 9 ... 15: // Reserved 983 panic("Reserved texcb value!\n"); 984 break; 985 case 16 ... 31: // Cacheable Memory 986 te.mtype = TlbEntry::MemoryType::Normal; 987 te.shareable = s; 988 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0) 989 te.nonCacheable = true; 990 te.innerAttrs = bits(texcb, 1, 0); 991 te.outerAttrs = bits(texcb, 3, 2); 992 break; 993 default: 994 panic("More than 32 states for 5 bits?\n"); 995 } 996 } else { 997 assert(tc); 998 PRRR prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, 999 currState->tc, !currState->isSecure)); 1000 NMRR nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, 1001 currState->tc, !currState->isSecure)); 1002 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr); 1003 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0; 1004 switch(bits(texcb, 2,0)) { 1005 case 0: 1006 curr_tr = prrr.tr0; 1007 curr_ir = nmrr.ir0; 1008 curr_or = nmrr.or0; 1009 te.outerShareable = (prrr.nos0 == 0); 1010 break; 1011 case 1: 1012 curr_tr = prrr.tr1; 1013 curr_ir = nmrr.ir1; 1014 curr_or = nmrr.or1; 1015 te.outerShareable = (prrr.nos1 == 0); 1016 break; 1017 case 2: 1018 curr_tr = prrr.tr2; 1019 curr_ir = nmrr.ir2; 1020 curr_or = nmrr.or2; 1021 te.outerShareable = (prrr.nos2 == 0); 1022 break; 1023 case 3: 1024 curr_tr = prrr.tr3; 1025 curr_ir = nmrr.ir3; 1026 curr_or = nmrr.or3; 1027 te.outerShareable = (prrr.nos3 == 0); 1028 break; 1029 case 4: 1030 curr_tr = prrr.tr4; 1031 curr_ir = nmrr.ir4; 1032 curr_or = nmrr.or4; 1033 te.outerShareable = (prrr.nos4 == 0); 1034 break; 1035 case 5: 1036 curr_tr = prrr.tr5; 1037 curr_ir = nmrr.ir5; 1038 curr_or = nmrr.or5; 1039 te.outerShareable = (prrr.nos5 == 0); 1040 break; 1041 case 6: 1042 panic("Imp defined type\n"); 1043 case 7: 1044 curr_tr = prrr.tr7; 1045 curr_ir = nmrr.ir7; 1046 curr_or = nmrr.or7; 1047 te.outerShareable = (prrr.nos7 == 0); 1048 break; 1049 } 1050 1051 switch(curr_tr) { 1052 case 0: 1053 DPRINTF(TLBVerbose, "StronglyOrdered\n"); 1054 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 1055 te.nonCacheable = true; 1056 te.innerAttrs = 1; 1057 te.outerAttrs = 0; 1058 te.shareable = true; 1059 break; 1060 case 1: 1061 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n", 1062 prrr.ds1, prrr.ds0, s); 1063 te.mtype = TlbEntry::MemoryType::Device; 1064 te.nonCacheable = true; 1065 te.innerAttrs = 3; 1066 te.outerAttrs = 0; 1067 if (prrr.ds1 && s) 1068 te.shareable = true; 1069 if (prrr.ds0 && !s) 1070 te.shareable = true; 1071 break; 1072 case 2: 1073 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n", 1074 prrr.ns1, prrr.ns0, s); 1075 te.mtype = TlbEntry::MemoryType::Normal; 1076 if (prrr.ns1 && s) 1077 te.shareable = true; 1078 if (prrr.ns0 && !s) 1079 te.shareable = true; 1080 break; 1081 case 3: 1082 panic("Reserved type"); 1083 } 1084 1085 if (te.mtype == TlbEntry::MemoryType::Normal){ 1086 switch(curr_ir) { 1087 case 0: 1088 te.nonCacheable = true; 1089 te.innerAttrs = 0; 1090 break; 1091 case 1: 1092 te.innerAttrs = 5; 1093 break; 1094 case 2: 1095 te.innerAttrs = 6; 1096 break; 1097 case 3: 1098 te.innerAttrs = 7; 1099 break; 1100 } 1101 1102 switch(curr_or) { 1103 case 0: 1104 te.nonCacheable = true; 1105 te.outerAttrs = 0; 1106 break; 1107 case 1: 1108 te.outerAttrs = 1; 1109 break; 1110 case 2: 1111 te.outerAttrs = 2; 1112 break; 1113 case 3: 1114 te.outerAttrs = 3; 1115 break; 1116 } 1117 } 1118 } 1119 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, " 1120 "outerAttrs: %d\n", 1121 te.shareable, te.innerAttrs, te.outerAttrs); 1122 te.setAttributes(false); 1123} 1124 1125void 1126TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te, 1127 LongDescriptor &lDescriptor) 1128{ 1129 assert(_haveLPAE); 1130 1131 uint8_t attr; 1132 uint8_t sh = lDescriptor.sh(); 1133 // Different format and source of attributes if this is a stage 2 1134 // translation 1135 if (isStage2) { 1136 attr = lDescriptor.memAttr(); 1137 uint8_t attr_3_2 = (attr >> 2) & 0x3; 1138 uint8_t attr_1_0 = attr & 0x3; 1139 1140 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh); 1141 1142 if (attr_3_2 == 0) { 1143 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered 1144 : TlbEntry::MemoryType::Device; 1145 te.outerAttrs = 0; 1146 te.innerAttrs = attr_1_0 == 0 ? 1 : 3; 1147 te.nonCacheable = true; 1148 } else { 1149 te.mtype = TlbEntry::MemoryType::Normal; 1150 te.outerAttrs = attr_3_2 == 1 ? 0 : 1151 attr_3_2 == 2 ? 2 : 1; 1152 te.innerAttrs = attr_1_0 == 1 ? 0 : 1153 attr_1_0 == 2 ? 6 : 5; 1154 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1); 1155 } 1156 } else { 1157 uint8_t attrIndx = lDescriptor.attrIndx(); 1158 1159 // LPAE always uses remapping of memory attributes, irrespective of the 1160 // value of SCTLR.TRE 1161 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0; 1162 int reg_as_int = flattenMiscRegNsBanked(reg, currState->tc, 1163 !currState->isSecure); 1164 uint32_t mair = currState->tc->readMiscReg(reg_as_int); 1165 attr = (mair >> (8 * (attrIndx % 4))) & 0xff; 1166 uint8_t attr_7_4 = bits(attr, 7, 4); 1167 uint8_t attr_3_0 = bits(attr, 3, 0); 1168 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr); 1169 1170 // Note: the memory subsystem only cares about the 'cacheable' memory 1171 // attribute. The other attributes are only used to fill the PAR register 1172 // accordingly to provide the illusion of full support 1173 te.nonCacheable = false; 1174 1175 switch (attr_7_4) { 1176 case 0x0: 1177 // Strongly-ordered or Device memory 1178 if (attr_3_0 == 0x0) 1179 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 1180 else if (attr_3_0 == 0x4) 1181 te.mtype = TlbEntry::MemoryType::Device; 1182 else 1183 panic("Unpredictable behavior\n"); 1184 te.nonCacheable = true; 1185 te.outerAttrs = 0; 1186 break; 1187 case 0x4: 1188 // Normal memory, Outer Non-cacheable 1189 te.mtype = TlbEntry::MemoryType::Normal; 1190 te.outerAttrs = 0; 1191 if (attr_3_0 == 0x4) 1192 // Inner Non-cacheable 1193 te.nonCacheable = true; 1194 else if (attr_3_0 < 0x8) 1195 panic("Unpredictable behavior\n"); 1196 break; 1197 case 0x8: 1198 case 0x9: 1199 case 0xa: 1200 case 0xb: 1201 case 0xc: 1202 case 0xd: 1203 case 0xe: 1204 case 0xf: 1205 if (attr_7_4 & 0x4) { 1206 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3; 1207 } else { 1208 te.outerAttrs = 0x2; 1209 } 1210 // Normal memory, Outer Cacheable 1211 te.mtype = TlbEntry::MemoryType::Normal; 1212 if (attr_3_0 != 0x4 && attr_3_0 < 0x8) 1213 panic("Unpredictable behavior\n"); 1214 break; 1215 default: 1216 panic("Unpredictable behavior\n"); 1217 break; 1218 } 1219 1220 switch (attr_3_0) { 1221 case 0x0: 1222 te.innerAttrs = 0x1; 1223 break; 1224 case 0x4: 1225 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0; 1226 break; 1227 case 0x8: 1228 case 0x9: 1229 case 0xA: 1230 case 0xB: 1231 te.innerAttrs = 6; 1232 break; 1233 case 0xC: 1234 case 0xD: 1235 case 0xE: 1236 case 0xF: 1237 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7; 1238 break; 1239 default: 1240 panic("Unpredictable behavior\n"); 1241 break; 1242 } 1243 } 1244 1245 te.outerShareable = sh == 2; 1246 te.shareable = (sh & 0x2) ? true : false; 1247 te.setAttributes(true); 1248 te.attributes |= (uint64_t) attr << 56; 1249} 1250 1251void 1252TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, uint8_t attrIndx, 1253 uint8_t sh) 1254{ 1255 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh); 1256 1257 // Select MAIR 1258 uint64_t mair; 1259 switch (currState->el) { 1260 case EL0: 1261 case EL1: 1262 mair = tc->readMiscReg(MISCREG_MAIR_EL1); 1263 break; 1264 case EL2: 1265 mair = tc->readMiscReg(MISCREG_MAIR_EL2); 1266 break; 1267 case EL3: 1268 mair = tc->readMiscReg(MISCREG_MAIR_EL3); 1269 break; 1270 default: 1271 panic("Invalid exception level"); 1272 break; 1273 } 1274 1275 // Select attributes 1276 uint8_t attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx); 1277 uint8_t attr_lo = bits(attr, 3, 0); 1278 uint8_t attr_hi = bits(attr, 7, 4); 1279 1280 // Memory type 1281 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal; 1282 1283 // Cacheability 1284 te.nonCacheable = false; 1285 if (te.mtype == TlbEntry::MemoryType::Device || // Device memory 1286 attr_hi == 0x8 || // Normal memory, Outer Non-cacheable 1287 attr_lo == 0x8) { // Normal memory, Inner Non-cacheable 1288 te.nonCacheable = true; 1289 } 1290 1291 te.shareable = sh == 2; 1292 te.outerShareable = (sh & 0x2) ? true : false; 1293 // Attributes formatted according to the 64-bit PAR 1294 te.attributes = ((uint64_t) attr << 56) | 1295 (1 << 11) | // LPAE bit 1296 (te.ns << 9) | // NS bit 1297 (sh << 7); 1298} 1299 1300void 1301TableWalker::doL1Descriptor() 1302{ 1303 if (currState->fault != NoFault) { 1304 return; 1305 } 1306 1307 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n", 1308 currState->vaddr_tainted, currState->l1Desc.data); 1309 TlbEntry te; 1310 1311 switch (currState->l1Desc.type()) { 1312 case L1Descriptor::Ignore: 1313 case L1Descriptor::Reserved: 1314 if (!currState->timing) { 1315 currState->tc = NULL; 1316 currState->req = NULL; 1317 } 1318 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n"); 1319 if (currState->isFetch) 1320 currState->fault = 1321 new PrefetchAbort(currState->vaddr_tainted, 1322 ArmFault::TranslationLL + L1, 1323 isStage2, 1324 ArmFault::VmsaTran); 1325 else 1326 currState->fault = 1327 new DataAbort(currState->vaddr_tainted, 1328 TlbEntry::DomainType::NoAccess, 1329 currState->isWrite, 1330 ArmFault::TranslationLL + L1, isStage2, 1331 ArmFault::VmsaTran); 1332 return; 1333 case L1Descriptor::Section: 1334 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) { 1335 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is 1336 * enabled if set, do l1.Desc.setAp0() instead of generating 1337 * AccessFlag0 1338 */ 1339 1340 currState->fault = new DataAbort(currState->vaddr_tainted, 1341 currState->l1Desc.domain(), 1342 currState->isWrite, 1343 ArmFault::AccessFlagLL + L1, 1344 isStage2, 1345 ArmFault::VmsaTran); 1346 } 1347 if (currState->l1Desc.supersection()) { 1348 panic("Haven't implemented supersections\n"); 1349 } 1350 insertTableEntry(currState->l1Desc, false); 1351 return; 1352 case L1Descriptor::PageTable: 1353 { 1354 Addr l2desc_addr; 1355 l2desc_addr = currState->l1Desc.l2Addr() | 1356 (bits(currState->vaddr, 19, 12) << 2); 1357 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n", 1358 l2desc_addr, currState->isSecure ? "s" : "ns"); 1359 1360 // Trickbox address check 1361 currState->fault = tlb->walkTrickBoxCheck( 1362 l2desc_addr, currState->isSecure, currState->vaddr, 1363 sizeof(uint32_t), currState->isFetch, currState->isWrite, 1364 currState->l1Desc.domain(), L2); 1365 1366 if (currState->fault) { 1367 if (!currState->timing) { 1368 currState->tc = NULL; 1369 currState->req = NULL; 1370 } 1371 return; 1372 } 1373 1374 Request::Flags flag = 0; 1375 if (currState->isSecure) 1376 flag.set(Request::SECURE); 1377 1378 bool delayed; 1379 delayed = fetchDescriptor(l2desc_addr, 1380 (uint8_t*)&currState->l2Desc.data, 1381 sizeof(uint32_t), flag, -1, &doL2DescEvent, 1382 &TableWalker::doL2Descriptor); 1383 if (delayed) { 1384 currState->delayed = true; 1385 } 1386 1387 return; 1388 } 1389 default: 1390 panic("A new type in a 2 bit field?\n"); 1391 } 1392} 1393 1394void 1395TableWalker::doLongDescriptor() 1396{ 1397 if (currState->fault != NoFault) { 1398 return; 1399 } 1400 1401 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n", 1402 currState->longDesc.lookupLevel, currState->vaddr_tainted, 1403 currState->longDesc.data, 1404 currState->aarch64 ? "AArch64" : "long-desc."); 1405 1406 if ((currState->longDesc.type() == LongDescriptor::Block) || 1407 (currState->longDesc.type() == LongDescriptor::Page)) { 1408 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, " 1409 "xn: %d, ap: %d, af: %d, type: %d\n", 1410 currState->longDesc.lookupLevel, 1411 currState->longDesc.data, 1412 currState->longDesc.pxn(), 1413 currState->longDesc.xn(), 1414 currState->longDesc.ap(), 1415 currState->longDesc.af(), 1416 currState->longDesc.type()); 1417 } else { 1418 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n", 1419 currState->longDesc.lookupLevel, 1420 currState->longDesc.data, 1421 currState->longDesc.type()); 1422 } 1423 1424 TlbEntry te; 1425 1426 switch (currState->longDesc.type()) { 1427 case LongDescriptor::Invalid: 1428 if (!currState->timing) { 1429 currState->tc = NULL; 1430 currState->req = NULL; 1431 } 1432 1433 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n", 1434 currState->longDesc.lookupLevel, 1435 ArmFault::TranslationLL + currState->longDesc.lookupLevel); 1436 if (currState->isFetch) 1437 currState->fault = new PrefetchAbort( 1438 currState->vaddr_tainted, 1439 ArmFault::TranslationLL + currState->longDesc.lookupLevel, 1440 isStage2, 1441 ArmFault::LpaeTran); 1442 else 1443 currState->fault = new DataAbort( 1444 currState->vaddr_tainted, 1445 TlbEntry::DomainType::NoAccess, 1446 currState->isWrite, 1447 ArmFault::TranslationLL + currState->longDesc.lookupLevel, 1448 isStage2, 1449 ArmFault::LpaeTran); 1450 return; 1451 case LongDescriptor::Block: 1452 case LongDescriptor::Page: 1453 { 1454 bool fault = false; 1455 bool aff = false; 1456 // Check for address size fault 1457 if (checkAddrSizeFaultAArch64( 1458 mbits(currState->longDesc.data, MaxPhysAddrRange - 1, 1459 currState->longDesc.offsetBits()), 1460 currState->physAddrRange)) { 1461 fault = true; 1462 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n", 1463 currState->longDesc.lookupLevel); 1464 // Check for access fault 1465 } else if (currState->longDesc.af() == 0) { 1466 fault = true; 1467 DPRINTF(TLB, "L%d descriptor causing Access Fault\n", 1468 currState->longDesc.lookupLevel); 1469 aff = true; 1470 } 1471 if (fault) { 1472 if (currState->isFetch) 1473 currState->fault = new PrefetchAbort( 1474 currState->vaddr_tainted, 1475 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) + 1476 currState->longDesc.lookupLevel, 1477 isStage2, 1478 ArmFault::LpaeTran); 1479 else 1480 currState->fault = new DataAbort( 1481 currState->vaddr_tainted, 1482 TlbEntry::DomainType::NoAccess, currState->isWrite, 1483 (aff ? ArmFault::AccessFlagLL : ArmFault::AddressSizeLL) + 1484 currState->longDesc.lookupLevel, 1485 isStage2, 1486 ArmFault::LpaeTran); 1487 } else { 1488 insertTableEntry(currState->longDesc, true); 1489 } 1490 } 1491 return; 1492 case LongDescriptor::Table: 1493 { 1494 // Set hierarchical permission flags 1495 currState->secureLookup = currState->secureLookup && 1496 currState->longDesc.secureTable(); 1497 currState->rwTable = currState->rwTable && 1498 currState->longDesc.rwTable(); 1499 currState->userTable = currState->userTable && 1500 currState->longDesc.userTable(); 1501 currState->xnTable = currState->xnTable || 1502 currState->longDesc.xnTable(); 1503 currState->pxnTable = currState->pxnTable || 1504 currState->longDesc.pxnTable(); 1505 1506 // Set up next level lookup 1507 Addr next_desc_addr = currState->longDesc.nextDescAddr( 1508 currState->vaddr); 1509 1510 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n", 1511 currState->longDesc.lookupLevel, 1512 currState->longDesc.lookupLevel + 1, 1513 next_desc_addr, 1514 currState->secureLookup ? "s" : "ns"); 1515 1516 // Check for address size fault 1517 if (currState->aarch64 && checkAddrSizeFaultAArch64( 1518 next_desc_addr, currState->physAddrRange)) { 1519 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n", 1520 currState->longDesc.lookupLevel); 1521 if (currState->isFetch) 1522 currState->fault = new PrefetchAbort( 1523 currState->vaddr_tainted, 1524 ArmFault::AddressSizeLL 1525 + currState->longDesc.lookupLevel, 1526 isStage2, 1527 ArmFault::LpaeTran); 1528 else 1529 currState->fault = new DataAbort( 1530 currState->vaddr_tainted, 1531 TlbEntry::DomainType::NoAccess, currState->isWrite, 1532 ArmFault::AddressSizeLL 1533 + currState->longDesc.lookupLevel, 1534 isStage2, 1535 ArmFault::LpaeTran); 1536 return; 1537 } 1538 1539 // Trickbox address check 1540 currState->fault = tlb->walkTrickBoxCheck( 1541 next_desc_addr, currState->vaddr, 1542 currState->vaddr, sizeof(uint64_t), 1543 currState->isFetch, currState->isWrite, 1544 TlbEntry::DomainType::Client, 1545 toLookupLevel(currState->longDesc.lookupLevel +1)); 1546 1547 if (currState->fault) { 1548 if (!currState->timing) { 1549 currState->tc = NULL; 1550 currState->req = NULL; 1551 } 1552 return; 1553 } 1554 1555 Request::Flags flag = 0; 1556 if (currState->secureLookup) 1557 flag.set(Request::SECURE); 1558 1559 currState->longDesc.lookupLevel = 1560 (LookupLevel) (currState->longDesc.lookupLevel + 1); 1561 Event *event = NULL; 1562 switch (currState->longDesc.lookupLevel) { 1563 case L1: 1564 assert(currState->aarch64); 1565 event = &doL1LongDescEvent; 1566 break; 1567 case L2: 1568 event = &doL2LongDescEvent; 1569 break; 1570 case L3: 1571 event = &doL3LongDescEvent; 1572 break; 1573 default: 1574 panic("Wrong lookup level in table walk\n"); 1575 break; 1576 } 1577 1578 bool delayed; 1579 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data, 1580 sizeof(uint64_t), flag, -1, event, 1581 &TableWalker::doLongDescriptor); 1582 if (delayed) { 1583 currState->delayed = true; 1584 } 1585 } 1586 return; 1587 default: 1588 panic("A new type in a 2 bit field?\n"); 1589 } 1590} 1591 1592void 1593TableWalker::doL2Descriptor() 1594{ 1595 if (currState->fault != NoFault) { 1596 return; 1597 } 1598 1599 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n", 1600 currState->vaddr_tainted, currState->l2Desc.data); 1601 TlbEntry te; 1602 1603 if (currState->l2Desc.invalid()) { 1604 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n"); 1605 if (!currState->timing) { 1606 currState->tc = NULL; 1607 currState->req = NULL; 1608 } 1609 if (currState->isFetch) 1610 currState->fault = 1611 new PrefetchAbort(currState->vaddr_tainted, 1612 ArmFault::TranslationLL + L2, 1613 isStage2, 1614 ArmFault::VmsaTran); 1615 else 1616 currState->fault = 1617 new DataAbort(currState->vaddr_tainted, currState->l1Desc.domain(), 1618 currState->isWrite, ArmFault::TranslationLL + L2, 1619 isStage2, 1620 ArmFault::VmsaTran); 1621 return; 1622 } 1623 1624 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) { 1625 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled 1626 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0 1627 */ 1628 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n", 1629 currState->sctlr.afe, currState->l2Desc.ap()); 1630 1631 currState->fault = 1632 new DataAbort(currState->vaddr_tainted, 1633 TlbEntry::DomainType::NoAccess, currState->isWrite, 1634 ArmFault::AccessFlagLL + L2, isStage2, 1635 ArmFault::VmsaTran); 1636 } 1637 1638 insertTableEntry(currState->l2Desc, false); 1639} 1640 1641void 1642TableWalker::doL1DescriptorWrapper() 1643{ 1644 currState = stateQueues[L1].front(); 1645 currState->delayed = false; 1646 // if there's a stage2 translation object we don't need it any more 1647 if (currState->stage2Tran) { 1648 delete currState->stage2Tran; 1649 currState->stage2Tran = NULL; 1650 } 1651 1652 1653 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data); 1654 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data); 1655 1656 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted); 1657 doL1Descriptor(); 1658 1659 stateQueues[L1].pop_front(); 1660 completeDrain(); 1661 // Check if fault was generated 1662 if (currState->fault != NoFault) { 1663 currState->transState->finish(currState->fault, currState->req, 1664 currState->tc, currState->mode); 1665 1666 pending = false; 1667 nextWalk(currState->tc); 1668 1669 currState->req = NULL; 1670 currState->tc = NULL; 1671 currState->delayed = false; 1672 delete currState; 1673 } 1674 else if (!currState->delayed) { 1675 // delay is not set so there is no L2 to do 1676 // Don't finish the translation if a stage 2 look up is underway 1677 if (!currState->doingStage2) { 1678 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1679 currState->fault = tlb->translateTiming(currState->req, currState->tc, 1680 currState->transState, currState->mode); 1681 } 1682 1683 pending = false; 1684 nextWalk(currState->tc); 1685 1686 currState->req = NULL; 1687 currState->tc = NULL; 1688 currState->delayed = false; 1689 delete currState; 1690 } else { 1691 // need to do L2 descriptor 1692 stateQueues[L2].push_back(currState); 1693 } 1694 currState = NULL; 1695} 1696 1697void 1698TableWalker::doL2DescriptorWrapper() 1699{ 1700 currState = stateQueues[L2].front(); 1701 assert(currState->delayed); 1702 // if there's a stage2 translation object we don't need it any more 1703 if (currState->stage2Tran) { 1704 delete currState->stage2Tran; 1705 currState->stage2Tran = NULL; 1706 } 1707 1708 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n", 1709 currState->vaddr_tainted); 1710 doL2Descriptor(); 1711 1712 // Check if fault was generated 1713 if (currState->fault != NoFault) { 1714 currState->transState->finish(currState->fault, currState->req, 1715 currState->tc, currState->mode); 1716 } 1717 else { 1718 // Don't finish the translation if a stage 2 look up is underway 1719 if (!currState->doingStage2) { 1720 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1721 currState->fault = tlb->translateTiming(currState->req, 1722 currState->tc, currState->transState, currState->mode); 1723 } 1724 } 1725 1726 1727 stateQueues[L2].pop_front(); 1728 completeDrain(); 1729 pending = false; 1730 nextWalk(currState->tc); 1731 1732 currState->req = NULL; 1733 currState->tc = NULL; 1734 currState->delayed = false; 1735 1736 delete currState; 1737 currState = NULL; 1738} 1739 1740void 1741TableWalker::doL0LongDescriptorWrapper() 1742{ 1743 doLongDescriptorWrapper(L0); 1744} 1745 1746void 1747TableWalker::doL1LongDescriptorWrapper() 1748{ 1749 doLongDescriptorWrapper(L1); 1750} 1751 1752void 1753TableWalker::doL2LongDescriptorWrapper() 1754{ 1755 doLongDescriptorWrapper(L2); 1756} 1757 1758void 1759TableWalker::doL3LongDescriptorWrapper() 1760{ 1761 doLongDescriptorWrapper(L3); 1762} 1763 1764void 1765TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level) 1766{ 1767 currState = stateQueues[curr_lookup_level].front(); 1768 assert(curr_lookup_level == currState->longDesc.lookupLevel); 1769 currState->delayed = false; 1770 1771 // if there's a stage2 translation object we don't need it any more 1772 if (currState->stage2Tran) { 1773 delete currState->stage2Tran; 1774 currState->stage2Tran = NULL; 1775 } 1776 1777 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n", 1778 currState->vaddr_tainted); 1779 doLongDescriptor(); 1780 1781 stateQueues[curr_lookup_level].pop_front(); 1782 1783 if (currState->fault != NoFault) { 1784 // A fault was generated 1785 currState->transState->finish(currState->fault, currState->req, 1786 currState->tc, currState->mode); 1787 1788 pending = false; 1789 nextWalk(currState->tc); 1790 1791 currState->req = NULL; 1792 currState->tc = NULL; 1793 currState->delayed = false; 1794 delete currState; 1795 } else if (!currState->delayed) { 1796 // No additional lookups required 1797 // Don't finish the translation if a stage 2 look up is underway 1798 if (!currState->doingStage2) { 1799 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1800 currState->fault = tlb->translateTiming(currState->req, currState->tc, 1801 currState->transState, 1802 currState->mode); 1803 } 1804 1805 pending = false; 1806 nextWalk(currState->tc); 1807 1808 currState->req = NULL; 1809 currState->tc = NULL; 1810 currState->delayed = false; 1811 delete currState; 1812 } else { 1813 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1) 1814 panic("Max. number of lookups already reached in table walk\n"); 1815 // Need to perform additional lookups 1816 stateQueues[currState->longDesc.lookupLevel].push_back(currState); 1817 } 1818 currState = NULL; 1819} 1820 1821 1822void 1823TableWalker::nextWalk(ThreadContext *tc) 1824{ 1825 if (pendingQueue.size()) 1826 schedule(doProcessEvent, clockEdge(Cycles(1))); 1827} 1828 1829bool 1830TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, 1831 Request::Flags flags, int queueIndex, Event *event, 1832 void (TableWalker::*doDescriptor)()) 1833{ 1834 bool isTiming = currState->timing; 1835 1836 // do the requests for the page table descriptors have to go through the 1837 // second stage MMU 1838 if (currState->stage2Req) { 1839 Fault fault; 1840 flags = flags | TLB::MustBeOne; 1841 1842 if (isTiming) { 1843 Stage2MMU::Stage2Translation *tran = new 1844 Stage2MMU::Stage2Translation(*stage2Mmu, data, event, 1845 currState->vaddr); 1846 currState->stage2Tran = tran; 1847 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes, 1848 flags, masterId); 1849 fault = tran->fault; 1850 } else { 1851 fault = stage2Mmu->readDataUntimed(currState->tc, 1852 currState->vaddr, descAddr, data, numBytes, flags, masterId, 1853 currState->functional); 1854 } 1855 1856 if (fault != NoFault) { 1857 currState->fault = fault; 1858 } 1859 if (isTiming) { 1860 if (queueIndex >= 0) { 1861 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", 1862 stateQueues[queueIndex].size()); 1863 stateQueues[queueIndex].push_back(currState); 1864 currState = NULL; 1865 } 1866 } else { 1867 (this->*doDescriptor)(); 1868 } 1869 } else { 1870 if (isTiming) { 1871 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data, 1872 currState->tc->getCpuPtr()->clockPeriod(), flags); 1873 if (queueIndex >= 0) { 1874 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", 1875 stateQueues[queueIndex].size()); 1876 stateQueues[queueIndex].push_back(currState); 1877 currState = NULL; 1878 } 1879 } else if (!currState->functional) { 1880 port.dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data, 1881 currState->tc->getCpuPtr()->clockPeriod(), flags); 1882 (this->*doDescriptor)(); 1883 } else { 1884 RequestPtr req = new Request(descAddr, numBytes, flags, masterId); 1885 req->taskId(ContextSwitchTaskId::DMA); 1886 PacketPtr pkt = new Packet(req, MemCmd::ReadReq); 1887 pkt->dataStatic(data); 1888 port.sendFunctional(pkt); 1889 (this->*doDescriptor)(); 1890 delete req; 1891 delete pkt; 1892 } 1893 } 1894 return (isTiming); 1895} 1896 1897void 1898TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor) 1899{ 1900 TlbEntry te; 1901 1902 // Create and fill a new page table entry 1903 te.valid = true; 1904 te.longDescFormat = longDescriptor; 1905 te.isHyp = currState->isHyp; 1906 te.asid = currState->asid; 1907 te.vmid = currState->vmid; 1908 te.N = descriptor.offsetBits(); 1909 te.vpn = currState->vaddr >> te.N; 1910 te.size = (1<<te.N) - 1; 1911 te.pfn = descriptor.pfn(); 1912 te.domain = descriptor.domain(); 1913 te.lookupLevel = descriptor.lookupLevel; 1914 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2; 1915 te.nstid = !currState->isSecure; 1916 te.xn = descriptor.xn(); 1917 if (currState->aarch64) 1918 te.el = currState->el; 1919 else 1920 te.el = 1; 1921 1922 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries 1923 // as global 1924 te.global = descriptor.global(currState) || isStage2; 1925 if (longDescriptor) { 1926 LongDescriptor lDescriptor = 1927 dynamic_cast<LongDescriptor &>(descriptor); 1928 1929 te.xn |= currState->xnTable; 1930 te.pxn = currState->pxnTable || lDescriptor.pxn(); 1931 if (isStage2) { 1932 // this is actually the HAP field, but its stored in the same bit 1933 // possitions as the AP field in a stage 1 translation. 1934 te.hap = lDescriptor.ap(); 1935 } else { 1936 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) | 1937 (currState->userTable && (descriptor.ap() & 0x1)); 1938 } 1939 if (currState->aarch64) 1940 memAttrsAArch64(currState->tc, te, currState->longDesc.attrIndx(), 1941 currState->longDesc.sh()); 1942 else 1943 memAttrsLPAE(currState->tc, te, lDescriptor); 1944 } else { 1945 te.ap = descriptor.ap(); 1946 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(), 1947 descriptor.shareable()); 1948 } 1949 1950 // Debug output 1951 DPRINTF(TLB, descriptor.dbgHeader().c_str()); 1952 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n", 1953 te.N, te.pfn, te.size, te.global, te.valid); 1954 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d " 1955 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn, 1956 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp, 1957 te.nonCacheable, te.ns); 1958 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n", 1959 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()), 1960 descriptor.getRawData()); 1961 1962 // Insert the entry into the TLB 1963 tlb->insert(currState->vaddr, te); 1964 if (!currState->timing) { 1965 currState->tc = NULL; 1966 currState->req = NULL; 1967 } 1968} 1969 1970ArmISA::TableWalker * 1971ArmTableWalkerParams::create() 1972{ 1973 return new ArmISA::TableWalker(this); 1974} 1975 1976LookupLevel 1977TableWalker::toLookupLevel(uint8_t lookup_level_as_int) 1978{ 1979 switch (lookup_level_as_int) { 1980 case L1: 1981 return L1; 1982 case L2: 1983 return L2; 1984 case L3: 1985 return L3; 1986 default: 1987 panic("Invalid lookup level conversion"); 1988 } 1989} 1990