1/* 2 * Copyright (c) 2010, 2012-2019 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions are 16 * met: redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer; 18 * redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution; 21 * neither the name of the copyright holders nor the names of its 22 * contributors may be used to endorse or promote products derived from 23 * this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 * 37 * Authors: Ali Saidi 38 * Giacomo Gabrielli 39 */ 40#include "arch/arm/table_walker.hh" 41 42#include <memory> 43 44#include "arch/arm/faults.hh" 45#include "arch/arm/stage2_mmu.hh" 46#include "arch/arm/system.hh" 47#include "arch/arm/tlb.hh" 48#include "cpu/base.hh" 49#include "cpu/thread_context.hh" 50#include "debug/Checkpoint.hh" 51#include "debug/Drain.hh" 52#include "debug/TLB.hh" 53#include "debug/TLBVerbose.hh" 54#include "dev/dma_device.hh" 55#include "sim/system.hh" 56 57using namespace ArmISA; 58 59TableWalker::TableWalker(const Params *p) 60 : ClockedObject(p), 61 stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId), 62 isStage2(p->is_stage2), tlb(NULL), 63 currState(NULL), pending(false), 64 numSquashable(p->num_squash_per_cycle), 65 pendingReqs(0), 66 pendingChangeTick(curTick()), 67 doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()), 68 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()), 69 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()), 70 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()), 71 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()), 72 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()), 73 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent, 74 &doL2LongDescEvent, &doL3LongDescEvent }, 75 doProcessEvent([this]{ processWalkWrapper(); }, name()) 76{ 77 sctlr = 0; 78 79 // Cache system-level properties 80 if (FullSystem) { 81 ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys); 82 assert(armSys); 83 haveSecurity = armSys->haveSecurity(); 84 _haveLPAE = armSys->haveLPAE(); 85 _haveVirtualization = armSys->haveVirtualization(); 86 physAddrRange = armSys->physAddrRange(); 87 _haveLargeAsid64 = armSys->haveLargeAsid64(); 88 } else { 89 haveSecurity = _haveLPAE = _haveVirtualization = false; 90 _haveLargeAsid64 = false; 91 physAddrRange = 32; 92 } 93 94} 95 96TableWalker::~TableWalker() 97{ 98 ; 99} 100 101void 102TableWalker::setMMU(Stage2MMU *m, MasterID master_id) 103{ 104 stage2Mmu = m; 105 port = &m->getDMAPort(); 106 masterId = master_id; 107} 108 109void 110TableWalker::init() 111{ 112 fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n"); 113 fatal_if(!port, "Table walker must have a valid port\n"); 114 fatal_if(!tlb, "Table walker must have a valid TLB\n"); 115} 116 117Port & 118TableWalker::getPort(const std::string &if_name, PortID idx) 119{ 120 if (if_name == "port") { 121 if (!isStage2) { 122 return *port; 123 } else { 124 fatal("Cannot access table walker port through stage-two walker\n"); 125 } 126 } 127 return ClockedObject::getPort(if_name, idx); 128} 129 130TableWalker::WalkerState::WalkerState() : 131 tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr), 132 asid(0), vmid(0), isHyp(false), transState(nullptr), 133 vaddr(0), vaddr_tainted(0), 134 sctlr(0), scr(0), cpsr(0), tcr(0), 135 htcr(0), hcr(0), vtcr(0), 136 isWrite(false), isFetch(false), isSecure(false), 137 secureLookup(false), rwTable(false), userTable(false), xnTable(false), 138 pxnTable(false), hpd(false), stage2Req(false), 139 stage2Tran(nullptr), timing(false), functional(false), 140 mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc), 141 delayed(false), tableWalker(nullptr) 142{ 143} 144 145void 146TableWalker::completeDrain() 147{ 148 if (drainState() == DrainState::Draining && 149 stateQueues[L0].empty() && stateQueues[L1].empty() && 150 stateQueues[L2].empty() && stateQueues[L3].empty() && 151 pendingQueue.empty()) { 152 153 DPRINTF(Drain, "TableWalker done draining, processing drain event\n"); 154 signalDrainDone(); 155 } 156} 157 158DrainState 159TableWalker::drain() 160{ 161 bool state_queues_not_empty = false; 162 163 for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) { 164 if (!stateQueues[i].empty()) { 165 state_queues_not_empty = true; 166 break; 167 } 168 } 169 170 if (state_queues_not_empty || pendingQueue.size()) { 171 DPRINTF(Drain, "TableWalker not drained\n"); 172 return DrainState::Draining; 173 } else { 174 DPRINTF(Drain, "TableWalker free, no need to drain\n"); 175 return DrainState::Drained; 176 } 177} 178 179void 180TableWalker::drainResume() 181{ 182 if (params()->sys->isTimingMode() && currState) { 183 delete currState; 184 currState = NULL; 185 pendingChange(); 186 } 187} 188 189Fault 190TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid, 191 uint8_t _vmid, bool _isHyp, TLB::Mode _mode, 192 TLB::Translation *_trans, bool _timing, bool _functional, 193 bool secure, TLB::ArmTranslationType tranType, 194 bool _stage2Req) 195{ 196 assert(!(_functional && _timing)); 197 ++statWalks; 198 199 WalkerState *savedCurrState = NULL; 200 201 if (!currState && !_functional) { 202 // For atomic mode, a new WalkerState instance should be only created 203 // once per TLB. For timing mode, a new instance is generated for every 204 // TLB miss. 205 DPRINTF(TLBVerbose, "creating new instance of WalkerState\n"); 206 207 currState = new WalkerState(); 208 currState->tableWalker = this; 209 } else if (_functional) { 210 // If we are mixing functional mode with timing (or even 211 // atomic), we need to to be careful and clean up after 212 // ourselves to not risk getting into an inconsistent state. 213 DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n"); 214 savedCurrState = currState; 215 currState = new WalkerState(); 216 currState->tableWalker = this; 217 } else if (_timing) { 218 // This is a translation that was completed and then faulted again 219 // because some underlying parameters that affect the translation 220 // changed out from under us (e.g. asid). It will either be a 221 // misprediction, in which case nothing will happen or we'll use 222 // this fault to re-execute the faulting instruction which should clean 223 // up everything. 224 if (currState->vaddr_tainted == _req->getVaddr()) { 225 ++statSquashedBefore; 226 return std::make_shared<ReExec>(); 227 } 228 } 229 pendingChange(); 230 231 currState->startTime = curTick(); 232 currState->tc = _tc; 233 // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672 234 // aarch32/translation/translation/AArch32.TranslateAddress dictates 235 // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64. 236 if (isStage2) { 237 currState->el = EL1; 238 currState->aarch64 = ELIs64(_tc, EL2); 239 } else { 240 currState->el = 241 TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType); 242 currState->aarch64 = 243 ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el); 244 } 245 currState->transState = _trans; 246 currState->req = _req; 247 currState->fault = NoFault; 248 currState->asid = _asid; 249 currState->vmid = _vmid; 250 currState->isHyp = _isHyp; 251 currState->timing = _timing; 252 currState->functional = _functional; 253 currState->mode = _mode; 254 currState->tranType = tranType; 255 currState->isSecure = secure; 256 currState->physAddrRange = physAddrRange; 257 258 /** @todo These should be cached or grabbed from cached copies in 259 the TLB, all these miscreg reads are expensive */ 260 currState->vaddr_tainted = currState->req->getVaddr(); 261 if (currState->aarch64) 262 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted, 263 currState->tc, currState->el); 264 else 265 currState->vaddr = currState->vaddr_tainted; 266 267 if (currState->aarch64) { 268 if (isStage2) { 269 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1); 270 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR_EL2); 271 } else switch (currState->el) { 272 case EL0: 273 case EL1: 274 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1); 275 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1); 276 break; 277 case EL2: 278 assert(_haveVirtualization); 279 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2); 280 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2); 281 break; 282 case EL3: 283 assert(haveSecurity); 284 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3); 285 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3); 286 break; 287 default: 288 panic("Invalid exception level"); 289 break; 290 } 291 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2); 292 } else { 293 currState->sctlr = currState->tc->readMiscReg(snsBankedIndex( 294 MISCREG_SCTLR, currState->tc, !currState->isSecure)); 295 currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex( 296 MISCREG_TTBCR, currState->tc, !currState->isSecure)); 297 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR); 298 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR); 299 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR); 300 } 301 sctlr = currState->sctlr; 302 303 currState->isFetch = (currState->mode == TLB::Execute); 304 currState->isWrite = (currState->mode == TLB::Write); 305 306 statRequestOrigin[REQUESTED][currState->isFetch]++; 307 308 currState->stage2Req = _stage2Req && !isStage2; 309 310 bool long_desc_format = currState->aarch64 || _isHyp || isStage2 || 311 longDescFormatInUse(currState->tc); 312 313 if (long_desc_format) { 314 // Helper variables used for hierarchical permissions 315 currState->secureLookup = currState->isSecure; 316 currState->rwTable = true; 317 currState->userTable = true; 318 currState->xnTable = false; 319 currState->pxnTable = false; 320 321 ++statWalksLongDescriptor; 322 } else { 323 ++statWalksShortDescriptor; 324 } 325 326 if (!currState->timing) { 327 Fault fault = NoFault; 328 if (currState->aarch64) 329 fault = processWalkAArch64(); 330 else if (long_desc_format) 331 fault = processWalkLPAE(); 332 else 333 fault = processWalk(); 334 335 // If this was a functional non-timing access restore state to 336 // how we found it. 337 if (currState->functional) { 338 delete currState; 339 currState = savedCurrState; 340 } 341 return fault; 342 } 343 344 if (pending || pendingQueue.size()) { 345 pendingQueue.push_back(currState); 346 currState = NULL; 347 pendingChange(); 348 } else { 349 pending = true; 350 pendingChange(); 351 if (currState->aarch64) 352 return processWalkAArch64(); 353 else if (long_desc_format) 354 return processWalkLPAE(); 355 else 356 return processWalk(); 357 } 358 359 return NoFault; 360} 361 362void 363TableWalker::processWalkWrapper() 364{ 365 assert(!currState); 366 assert(pendingQueue.size()); 367 pendingChange(); 368 currState = pendingQueue.front(); 369 370 // Check if a previous walk filled this request already 371 // @TODO Should this always be the TLB or should we look in the stage2 TLB? 372 TlbEntry* te = tlb->lookup(currState->vaddr, currState->asid, 373 currState->vmid, currState->isHyp, currState->isSecure, true, false, 374 currState->el); 375 376 // Check if we still need to have a walk for this request. If the requesting 377 // instruction has been squashed, or a previous walk has filled the TLB with 378 // a match, we just want to get rid of the walk. The latter could happen 379 // when there are multiple outstanding misses to a single page and a 380 // previous request has been successfully translated. 381 if (!currState->transState->squashed() && !te) { 382 // We've got a valid request, lets process it 383 pending = true; 384 pendingQueue.pop_front(); 385 // Keep currState in case one of the processWalk... calls NULLs it 386 WalkerState *curr_state_copy = currState; 387 Fault f; 388 if (currState->aarch64) 389 f = processWalkAArch64(); 390 else if (longDescFormatInUse(currState->tc) || 391 currState->isHyp || isStage2) 392 f = processWalkLPAE(); 393 else 394 f = processWalk(); 395 396 if (f != NoFault) { 397 curr_state_copy->transState->finish(f, curr_state_copy->req, 398 curr_state_copy->tc, curr_state_copy->mode); 399 400 delete curr_state_copy; 401 } 402 return; 403 } 404 405 406 // If the instruction that we were translating for has been 407 // squashed we shouldn't bother. 408 unsigned num_squashed = 0; 409 ThreadContext *tc = currState->tc; 410 while ((num_squashed < numSquashable) && currState && 411 (currState->transState->squashed() || te)) { 412 pendingQueue.pop_front(); 413 num_squashed++; 414 statSquashedBefore++; 415 416 DPRINTF(TLB, "Squashing table walk for address %#x\n", 417 currState->vaddr_tainted); 418 419 if (currState->transState->squashed()) { 420 // finish the translation which will delete the translation object 421 currState->transState->finish( 422 std::make_shared<UnimpFault>("Squashed Inst"), 423 currState->req, currState->tc, currState->mode); 424 } else { 425 // translate the request now that we know it will work 426 statWalkServiceTime.sample(curTick() - currState->startTime); 427 tlb->translateTiming(currState->req, currState->tc, 428 currState->transState, currState->mode); 429 430 } 431 432 // delete the current request 433 delete currState; 434 435 // peak at the next one 436 if (pendingQueue.size()) { 437 currState = pendingQueue.front(); 438 te = tlb->lookup(currState->vaddr, currState->asid, 439 currState->vmid, currState->isHyp, currState->isSecure, true, 440 false, currState->el); 441 } else { 442 // Terminate the loop, nothing more to do 443 currState = NULL; 444 } 445 } 446 pendingChange(); 447 448 // if we still have pending translations, schedule more work 449 nextWalk(tc); 450 currState = NULL; 451} 452 453Fault 454TableWalker::processWalk() 455{ 456 Addr ttbr = 0; 457 458 // If translation isn't enabled, we shouldn't be here 459 assert(currState->sctlr.m || isStage2); 460 const bool is_atomic = currState->req->isAtomic(); 461 462 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n", 463 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31, 464 32 - currState->ttbcr.n)); 465 466 statWalkWaitTime.sample(curTick() - currState->startTime); 467 468 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31, 469 32 - currState->ttbcr.n)) { 470 DPRINTF(TLB, " - Selecting TTBR0\n"); 471 // Check if table walk is allowed when Security Extensions are enabled 472 if (haveSecurity && currState->ttbcr.pd0) { 473 if (currState->isFetch) 474 return std::make_shared<PrefetchAbort>( 475 currState->vaddr_tainted, 476 ArmFault::TranslationLL + L1, 477 isStage2, 478 ArmFault::VmsaTran); 479 else 480 return std::make_shared<DataAbort>( 481 currState->vaddr_tainted, 482 TlbEntry::DomainType::NoAccess, 483 is_atomic ? false : currState->isWrite, 484 ArmFault::TranslationLL + L1, isStage2, 485 ArmFault::VmsaTran); 486 } 487 ttbr = currState->tc->readMiscReg(snsBankedIndex( 488 MISCREG_TTBR0, currState->tc, !currState->isSecure)); 489 } else { 490 DPRINTF(TLB, " - Selecting TTBR1\n"); 491 // Check if table walk is allowed when Security Extensions are enabled 492 if (haveSecurity && currState->ttbcr.pd1) { 493 if (currState->isFetch) 494 return std::make_shared<PrefetchAbort>( 495 currState->vaddr_tainted, 496 ArmFault::TranslationLL + L1, 497 isStage2, 498 ArmFault::VmsaTran); 499 else 500 return std::make_shared<DataAbort>( 501 currState->vaddr_tainted, 502 TlbEntry::DomainType::NoAccess, 503 is_atomic ? false : currState->isWrite, 504 ArmFault::TranslationLL + L1, isStage2, 505 ArmFault::VmsaTran); 506 } 507 ttbr = currState->tc->readMiscReg(snsBankedIndex( 508 MISCREG_TTBR1, currState->tc, !currState->isSecure)); 509 currState->ttbcr.n = 0; 510 } 511 512 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) | 513 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2); 514 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr, 515 currState->isSecure ? "s" : "ns"); 516 517 // Trickbox address check 518 Fault f; 519 f = testWalk(l1desc_addr, sizeof(uint32_t), 520 TlbEntry::DomainType::NoAccess, L1); 521 if (f) { 522 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 523 if (currState->timing) { 524 pending = false; 525 nextWalk(currState->tc); 526 currState = NULL; 527 } else { 528 currState->tc = NULL; 529 currState->req = NULL; 530 } 531 return f; 532 } 533 534 Request::Flags flag = Request::PT_WALK; 535 if (currState->sctlr.c == 0) { 536 flag.set(Request::UNCACHEABLE); 537 } 538 539 if (currState->isSecure) { 540 flag.set(Request::SECURE); 541 } 542 543 bool delayed; 544 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data, 545 sizeof(uint32_t), flag, L1, &doL1DescEvent, 546 &TableWalker::doL1Descriptor); 547 if (!delayed) { 548 f = currState->fault; 549 } 550 551 return f; 552} 553 554Fault 555TableWalker::processWalkLPAE() 556{ 557 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr; 558 int tsz, n; 559 LookupLevel start_lookup_level = L1; 560 561 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n", 562 currState->vaddr_tainted, currState->ttbcr); 563 564 statWalkWaitTime.sample(curTick() - currState->startTime); 565 566 Request::Flags flag = Request::PT_WALK; 567 if (currState->isSecure) 568 flag.set(Request::SECURE); 569 570 // work out which base address register to use, if in hyp mode we always 571 // use HTTBR 572 if (isStage2) { 573 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n"); 574 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR); 575 tsz = sext<4>(currState->vtcr.t0sz); 576 start_lookup_level = currState->vtcr.sl0 ? L1 : L2; 577 } else if (currState->isHyp) { 578 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n"); 579 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR); 580 tsz = currState->htcr.t0sz; 581 } else { 582 assert(longDescFormatInUse(currState->tc)); 583 584 // Determine boundaries of TTBR0/1 regions 585 if (currState->ttbcr.t0sz) 586 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1; 587 else if (currState->ttbcr.t1sz) 588 ttbr0_max = (1ULL << 32) - 589 (1ULL << (32 - currState->ttbcr.t1sz)) - 1; 590 else 591 ttbr0_max = (1ULL << 32) - 1; 592 if (currState->ttbcr.t1sz) 593 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz)); 594 else 595 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz)); 596 597 const bool is_atomic = currState->req->isAtomic(); 598 599 // The following code snippet selects the appropriate translation table base 600 // address (TTBR0 or TTBR1) and the appropriate starting lookup level 601 // depending on the address range supported by the translation table (ARM 602 // ARM issue C B3.6.4) 603 if (currState->vaddr <= ttbr0_max) { 604 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n"); 605 // Check if table walk is allowed 606 if (currState->ttbcr.epd0) { 607 if (currState->isFetch) 608 return std::make_shared<PrefetchAbort>( 609 currState->vaddr_tainted, 610 ArmFault::TranslationLL + L1, 611 isStage2, 612 ArmFault::LpaeTran); 613 else 614 return std::make_shared<DataAbort>( 615 currState->vaddr_tainted, 616 TlbEntry::DomainType::NoAccess, 617 is_atomic ? false : currState->isWrite, 618 ArmFault::TranslationLL + L1, 619 isStage2, 620 ArmFault::LpaeTran); 621 } 622 ttbr = currState->tc->readMiscReg(snsBankedIndex( 623 MISCREG_TTBR0, currState->tc, !currState->isSecure)); 624 tsz = currState->ttbcr.t0sz; 625 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB 626 start_lookup_level = L2; 627 } else if (currState->vaddr >= ttbr1_min) { 628 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n"); 629 // Check if table walk is allowed 630 if (currState->ttbcr.epd1) { 631 if (currState->isFetch) 632 return std::make_shared<PrefetchAbort>( 633 currState->vaddr_tainted, 634 ArmFault::TranslationLL + L1, 635 isStage2, 636 ArmFault::LpaeTran); 637 else 638 return std::make_shared<DataAbort>( 639 currState->vaddr_tainted, 640 TlbEntry::DomainType::NoAccess, 641 is_atomic ? false : currState->isWrite, 642 ArmFault::TranslationLL + L1, 643 isStage2, 644 ArmFault::LpaeTran); 645 } 646 ttbr = currState->tc->readMiscReg(snsBankedIndex( 647 MISCREG_TTBR1, currState->tc, !currState->isSecure)); 648 tsz = currState->ttbcr.t1sz; 649 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30)) // Lower limit >= 3 GB 650 start_lookup_level = L2; 651 } else { 652 // Out of boundaries -> translation fault 653 if (currState->isFetch) 654 return std::make_shared<PrefetchAbort>( 655 currState->vaddr_tainted, 656 ArmFault::TranslationLL + L1, 657 isStage2, 658 ArmFault::LpaeTran); 659 else 660 return std::make_shared<DataAbort>( 661 currState->vaddr_tainted, 662 TlbEntry::DomainType::NoAccess, 663 is_atomic ? false : currState->isWrite, 664 ArmFault::TranslationLL + L1, 665 isStage2, ArmFault::LpaeTran); 666 } 667 668 } 669 670 // Perform lookup (ARM ARM issue C B3.6.6) 671 if (start_lookup_level == L1) { 672 n = 5 - tsz; 673 desc_addr = mbits(ttbr, 39, n) | 674 (bits(currState->vaddr, n + 26, 30) << 3); 675 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n", 676 desc_addr, currState->isSecure ? "s" : "ns"); 677 } else { 678 // Skip first-level lookup 679 n = (tsz >= 2 ? 14 - tsz : 12); 680 desc_addr = mbits(ttbr, 39, n) | 681 (bits(currState->vaddr, n + 17, 21) << 3); 682 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n", 683 desc_addr, currState->isSecure ? "s" : "ns"); 684 } 685 686 // Trickbox address check 687 Fault f = testWalk(desc_addr, sizeof(uint64_t), 688 TlbEntry::DomainType::NoAccess, start_lookup_level); 689 if (f) { 690 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 691 if (currState->timing) { 692 pending = false; 693 nextWalk(currState->tc); 694 currState = NULL; 695 } else { 696 currState->tc = NULL; 697 currState->req = NULL; 698 } 699 return f; 700 } 701 702 if (currState->sctlr.c == 0) { 703 flag.set(Request::UNCACHEABLE); 704 } 705 706 currState->longDesc.lookupLevel = start_lookup_level; 707 currState->longDesc.aarch64 = false; 708 currState->longDesc.grainSize = Grain4KB; 709 710 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data, 711 sizeof(uint64_t), flag, start_lookup_level, 712 LongDescEventByLevel[start_lookup_level], 713 &TableWalker::doLongDescriptor); 714 if (!delayed) { 715 f = currState->fault; 716 } 717 718 return f; 719} 720 721unsigned 722TableWalker::adjustTableSizeAArch64(unsigned tsz) 723{ 724 if (tsz < 25) 725 return 25; 726 if (tsz > 48) 727 return 48; 728 return tsz; 729} 730 731bool 732TableWalker::checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange) 733{ 734 return (currPhysAddrRange != MaxPhysAddrRange && 735 bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange)); 736} 737 738Fault 739TableWalker::processWalkAArch64() 740{ 741 assert(currState->aarch64); 742 743 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n", 744 currState->vaddr_tainted, currState->tcr); 745 746 static const GrainSize GrainMap_tg0[] = 747 { Grain4KB, Grain64KB, Grain16KB, ReservedGrain }; 748 static const GrainSize GrainMap_tg1[] = 749 { ReservedGrain, Grain16KB, Grain4KB, Grain64KB }; 750 751 statWalkWaitTime.sample(curTick() - currState->startTime); 752 753 // Determine TTBR, table size, granule size and phys. address range 754 Addr ttbr = 0; 755 int tsz = 0, ps = 0; 756 GrainSize tg = Grain4KB; // grain size computed from tg* field 757 bool fault = false; 758 759 LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS; 760 761 switch (currState->el) { 762 case EL0: 763 case EL1: 764 if (isStage2) { 765 DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n"); 766 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2); 767 tsz = 64 - currState->vtcr.t0sz64; 768 tg = GrainMap_tg0[currState->vtcr.tg0]; 769 // ARM DDI 0487A.f D7-2148 770 // The starting level of stage 2 translation depends on 771 // VTCR_EL2.SL0 and VTCR_EL2.TG0 772 LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level 773 uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0; 774 static const LookupLevel SLL[] = { 775 L2, L3, L3, __, // sl0 == 0 776 L1, L2, L2, __, // sl0 == 1, etc. 777 L0, L1, L1, __, 778 __, __, __, __ 779 }; 780 start_lookup_level = SLL[sl_tg]; 781 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS, 782 "Cannot discern lookup level from vtcr.{sl0,tg0}"); 783 ps = currState->vtcr.ps; 784 } else { 785 switch (bits(currState->vaddr, 63,48)) { 786 case 0: 787 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); 788 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1); 789 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); 790 tg = GrainMap_tg0[currState->tcr.tg0]; 791 currState->hpd = currState->tcr.hpd0; 792 if (bits(currState->vaddr, 63, tsz) != 0x0 || 793 currState->tcr.epd0) 794 fault = true; 795 break; 796 case 0xffff: 797 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n"); 798 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1); 799 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz); 800 tg = GrainMap_tg1[currState->tcr.tg1]; 801 currState->hpd = currState->tcr.hpd1; 802 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) || 803 currState->tcr.epd1) 804 fault = true; 805 break; 806 default: 807 // top two bytes must be all 0s or all 1s, else invalid addr 808 fault = true; 809 } 810 ps = currState->tcr.ips; 811 } 812 break; 813 case EL2: 814 switch(bits(currState->vaddr, 63,48)) { 815 case 0: 816 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); 817 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2); 818 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); 819 tg = GrainMap_tg0[currState->tcr.tg0]; 820 currState->hpd = currState->hcr.e2h ? 821 currState->tcr.hpd0 : currState->tcr.hpd; 822 break; 823 824 case 0xffff: 825 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n"); 826 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2); 827 tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz); 828 tg = GrainMap_tg1[currState->tcr.tg1]; 829 currState->hpd = currState->tcr.hpd1; 830 if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) || 831 currState->tcr.epd1 || !currState->hcr.e2h) 832 fault = true; 833 break; 834 835 default: 836 // invalid addr if top two bytes are not all 0s 837 fault = true; 838 } 839 ps = currState->tcr.ps; 840 break; 841 case EL3: 842 switch(bits(currState->vaddr, 63,48)) { 843 case 0: 844 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n"); 845 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3); 846 tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz); 847 tg = GrainMap_tg0[currState->tcr.tg0]; 848 currState->hpd = currState->tcr.hpd; 849 break; 850 default: 851 // invalid addr if top two bytes are not all 0s 852 fault = true; 853 } 854 ps = currState->tcr.ps; 855 break; 856 } 857 858 const bool is_atomic = currState->req->isAtomic(); 859 860 if (fault) { 861 Fault f; 862 if (currState->isFetch) 863 f = std::make_shared<PrefetchAbort>( 864 currState->vaddr_tainted, 865 ArmFault::TranslationLL + L0, isStage2, 866 ArmFault::LpaeTran); 867 else 868 f = std::make_shared<DataAbort>( 869 currState->vaddr_tainted, 870 TlbEntry::DomainType::NoAccess, 871 is_atomic ? false : currState->isWrite, 872 ArmFault::TranslationLL + L0, 873 isStage2, ArmFault::LpaeTran); 874 875 if (currState->timing) { 876 pending = false; 877 nextWalk(currState->tc); 878 currState = NULL; 879 } else { 880 currState->tc = NULL; 881 currState->req = NULL; 882 } 883 return f; 884 885 } 886 887 if (tg == ReservedGrain) { 888 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION " 889 "DEFINED behavior takes this to mean 4KB granules\n"); 890 tg = Grain4KB; 891 } 892 893 // Determine starting lookup level 894 // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library 895 // in ARM DDI 0487A. These table values correspond to the cascading tests 896 // to compute the lookup level and are of the form 897 // (grain_size + N*stride), for N = {1, 2, 3}. 898 // A value of 64 will never succeed and a value of 0 will always succeed. 899 if (start_lookup_level == MAX_LOOKUP_LEVELS) { 900 struct GrainMap { 901 GrainSize grain_size; 902 unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS]; 903 }; 904 static const GrainMap GM[] = { 905 { Grain4KB, { 39, 30, 0, 0 } }, 906 { Grain16KB, { 47, 36, 25, 0 } }, 907 { Grain64KB, { 64, 42, 29, 0 } } 908 }; 909 910 const unsigned *lookup = NULL; // points to a lookup_level_cutoff 911 912 for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[] 913 if (tg == GM[i].grain_size) { 914 lookup = GM[i].lookup_level_cutoff; 915 break; 916 } 917 } 918 assert(lookup); 919 920 for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) { 921 if (tsz > lookup[L]) { 922 start_lookup_level = (LookupLevel) L; 923 break; 924 } 925 } 926 panic_if(start_lookup_level == MAX_LOOKUP_LEVELS, 927 "Table walker couldn't find lookup level\n"); 928 } 929 930 int stride = tg - 3; 931 932 // Determine table base address 933 int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg; 934 Addr base_addr = mbits(ttbr, 47, base_addr_lo); 935 936 // Determine physical address size and raise an Address Size Fault if 937 // necessary 938 int pa_range = decodePhysAddrRange64(ps); 939 // Clamp to lower limit 940 if (pa_range > physAddrRange) 941 currState->physAddrRange = physAddrRange; 942 else 943 currState->physAddrRange = pa_range; 944 if (checkAddrSizeFaultAArch64(base_addr, currState->physAddrRange)) { 945 DPRINTF(TLB, "Address size fault before any lookup\n"); 946 Fault f; 947 if (currState->isFetch) 948 f = std::make_shared<PrefetchAbort>( 949 currState->vaddr_tainted, 950 ArmFault::AddressSizeLL + start_lookup_level, 951 isStage2, 952 ArmFault::LpaeTran); 953 else 954 f = std::make_shared<DataAbort>( 955 currState->vaddr_tainted, 956 TlbEntry::DomainType::NoAccess, 957 is_atomic ? false : currState->isWrite, 958 ArmFault::AddressSizeLL + start_lookup_level, 959 isStage2, 960 ArmFault::LpaeTran); 961 962 963 if (currState->timing) { 964 pending = false; 965 nextWalk(currState->tc); 966 currState = NULL; 967 } else { 968 currState->tc = NULL; 969 currState->req = NULL; 970 } 971 return f; 972 973 } 974 975 // Determine descriptor address 976 Addr desc_addr = base_addr | 977 (bits(currState->vaddr, tsz - 1, 978 stride * (3 - start_lookup_level) + tg) << 3); 979 980 // Trickbox address check 981 Fault f = testWalk(desc_addr, sizeof(uint64_t), 982 TlbEntry::DomainType::NoAccess, start_lookup_level); 983 if (f) { 984 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted); 985 if (currState->timing) { 986 pending = false; 987 nextWalk(currState->tc); 988 currState = NULL; 989 } else { 990 currState->tc = NULL; 991 currState->req = NULL; 992 } 993 return f; 994 } 995 996 Request::Flags flag = Request::PT_WALK; 997 if (currState->sctlr.c == 0) { 998 flag.set(Request::UNCACHEABLE); 999 } 1000 1001 if (currState->isSecure) { 1002 flag.set(Request::SECURE); 1003 } 1004 1005 currState->longDesc.lookupLevel = start_lookup_level; 1006 currState->longDesc.aarch64 = true; 1007 currState->longDesc.grainSize = tg; 1008 1009 if (currState->timing) { 1010 fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data, 1011 sizeof(uint64_t), flag, start_lookup_level, 1012 LongDescEventByLevel[start_lookup_level], NULL); 1013 } else { 1014 fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data, 1015 sizeof(uint64_t), flag, -1, NULL, 1016 &TableWalker::doLongDescriptor); 1017 f = currState->fault; 1018 } 1019 1020 return f; 1021} 1022 1023void 1024TableWalker::memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, 1025 uint8_t texcb, bool s) 1026{ 1027 // Note: tc and sctlr local variables are hiding tc and sctrl class 1028 // variables 1029 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s); 1030 te.shareable = false; // default value 1031 te.nonCacheable = false; 1032 te.outerShareable = false; 1033 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) { 1034 switch(texcb) { 1035 case 0: // Stongly-ordered 1036 te.nonCacheable = true; 1037 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 1038 te.shareable = true; 1039 te.innerAttrs = 1; 1040 te.outerAttrs = 0; 1041 break; 1042 case 1: // Shareable Device 1043 te.nonCacheable = true; 1044 te.mtype = TlbEntry::MemoryType::Device; 1045 te.shareable = true; 1046 te.innerAttrs = 3; 1047 te.outerAttrs = 0; 1048 break; 1049 case 2: // Outer and Inner Write-Through, no Write-Allocate 1050 te.mtype = TlbEntry::MemoryType::Normal; 1051 te.shareable = s; 1052 te.innerAttrs = 6; 1053 te.outerAttrs = bits(texcb, 1, 0); 1054 break; 1055 case 3: // Outer and Inner Write-Back, no Write-Allocate 1056 te.mtype = TlbEntry::MemoryType::Normal; 1057 te.shareable = s; 1058 te.innerAttrs = 7; 1059 te.outerAttrs = bits(texcb, 1, 0); 1060 break; 1061 case 4: // Outer and Inner Non-cacheable 1062 te.nonCacheable = true; 1063 te.mtype = TlbEntry::MemoryType::Normal; 1064 te.shareable = s; 1065 te.innerAttrs = 0; 1066 te.outerAttrs = bits(texcb, 1, 0); 1067 break; 1068 case 5: // Reserved 1069 panic("Reserved texcb value!\n"); 1070 break; 1071 case 6: // Implementation Defined 1072 panic("Implementation-defined texcb value!\n"); 1073 break; 1074 case 7: // Outer and Inner Write-Back, Write-Allocate 1075 te.mtype = TlbEntry::MemoryType::Normal; 1076 te.shareable = s; 1077 te.innerAttrs = 5; 1078 te.outerAttrs = 1; 1079 break; 1080 case 8: // Non-shareable Device 1081 te.nonCacheable = true; 1082 te.mtype = TlbEntry::MemoryType::Device; 1083 te.shareable = false; 1084 te.innerAttrs = 3; 1085 te.outerAttrs = 0; 1086 break; 1087 case 9 ... 15: // Reserved 1088 panic("Reserved texcb value!\n"); 1089 break; 1090 case 16 ... 31: // Cacheable Memory 1091 te.mtype = TlbEntry::MemoryType::Normal; 1092 te.shareable = s; 1093 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0) 1094 te.nonCacheable = true; 1095 te.innerAttrs = bits(texcb, 1, 0); 1096 te.outerAttrs = bits(texcb, 3, 2); 1097 break; 1098 default: 1099 panic("More than 32 states for 5 bits?\n"); 1100 } 1101 } else { 1102 assert(tc); 1103 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, 1104 currState->tc, !currState->isSecure)); 1105 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, 1106 currState->tc, !currState->isSecure)); 1107 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr); 1108 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0; 1109 switch(bits(texcb, 2,0)) { 1110 case 0: 1111 curr_tr = prrr.tr0; 1112 curr_ir = nmrr.ir0; 1113 curr_or = nmrr.or0; 1114 te.outerShareable = (prrr.nos0 == 0); 1115 break; 1116 case 1: 1117 curr_tr = prrr.tr1; 1118 curr_ir = nmrr.ir1; 1119 curr_or = nmrr.or1; 1120 te.outerShareable = (prrr.nos1 == 0); 1121 break; 1122 case 2: 1123 curr_tr = prrr.tr2; 1124 curr_ir = nmrr.ir2; 1125 curr_or = nmrr.or2; 1126 te.outerShareable = (prrr.nos2 == 0); 1127 break; 1128 case 3: 1129 curr_tr = prrr.tr3; 1130 curr_ir = nmrr.ir3; 1131 curr_or = nmrr.or3; 1132 te.outerShareable = (prrr.nos3 == 0); 1133 break; 1134 case 4: 1135 curr_tr = prrr.tr4; 1136 curr_ir = nmrr.ir4; 1137 curr_or = nmrr.or4; 1138 te.outerShareable = (prrr.nos4 == 0); 1139 break; 1140 case 5: 1141 curr_tr = prrr.tr5; 1142 curr_ir = nmrr.ir5; 1143 curr_or = nmrr.or5; 1144 te.outerShareable = (prrr.nos5 == 0); 1145 break; 1146 case 6: 1147 panic("Imp defined type\n"); 1148 case 7: 1149 curr_tr = prrr.tr7; 1150 curr_ir = nmrr.ir7; 1151 curr_or = nmrr.or7; 1152 te.outerShareable = (prrr.nos7 == 0); 1153 break; 1154 } 1155 1156 switch(curr_tr) { 1157 case 0: 1158 DPRINTF(TLBVerbose, "StronglyOrdered\n"); 1159 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 1160 te.nonCacheable = true; 1161 te.innerAttrs = 1; 1162 te.outerAttrs = 0; 1163 te.shareable = true; 1164 break; 1165 case 1: 1166 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n", 1167 prrr.ds1, prrr.ds0, s); 1168 te.mtype = TlbEntry::MemoryType::Device; 1169 te.nonCacheable = true; 1170 te.innerAttrs = 3; 1171 te.outerAttrs = 0; 1172 if (prrr.ds1 && s) 1173 te.shareable = true; 1174 if (prrr.ds0 && !s) 1175 te.shareable = true; 1176 break; 1177 case 2: 1178 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n", 1179 prrr.ns1, prrr.ns0, s); 1180 te.mtype = TlbEntry::MemoryType::Normal; 1181 if (prrr.ns1 && s) 1182 te.shareable = true; 1183 if (prrr.ns0 && !s) 1184 te.shareable = true; 1185 break; 1186 case 3: 1187 panic("Reserved type"); 1188 } 1189 1190 if (te.mtype == TlbEntry::MemoryType::Normal){ 1191 switch(curr_ir) { 1192 case 0: 1193 te.nonCacheable = true; 1194 te.innerAttrs = 0; 1195 break; 1196 case 1: 1197 te.innerAttrs = 5; 1198 break; 1199 case 2: 1200 te.innerAttrs = 6; 1201 break; 1202 case 3: 1203 te.innerAttrs = 7; 1204 break; 1205 } 1206 1207 switch(curr_or) { 1208 case 0: 1209 te.nonCacheable = true; 1210 te.outerAttrs = 0; 1211 break; 1212 case 1: 1213 te.outerAttrs = 1; 1214 break; 1215 case 2: 1216 te.outerAttrs = 2; 1217 break; 1218 case 3: 1219 te.outerAttrs = 3; 1220 break; 1221 } 1222 } 1223 } 1224 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, " 1225 "outerAttrs: %d\n", 1226 te.shareable, te.innerAttrs, te.outerAttrs); 1227 te.setAttributes(false); 1228} 1229 1230void 1231TableWalker::memAttrsLPAE(ThreadContext *tc, TlbEntry &te, 1232 LongDescriptor &lDescriptor) 1233{ 1234 assert(_haveLPAE); 1235 1236 uint8_t attr; 1237 uint8_t sh = lDescriptor.sh(); 1238 // Different format and source of attributes if this is a stage 2 1239 // translation 1240 if (isStage2) { 1241 attr = lDescriptor.memAttr(); 1242 uint8_t attr_3_2 = (attr >> 2) & 0x3; 1243 uint8_t attr_1_0 = attr & 0x3; 1244 1245 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh); 1246 1247 if (attr_3_2 == 0) { 1248 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered 1249 : TlbEntry::MemoryType::Device; 1250 te.outerAttrs = 0; 1251 te.innerAttrs = attr_1_0 == 0 ? 1 : 3; 1252 te.nonCacheable = true; 1253 } else { 1254 te.mtype = TlbEntry::MemoryType::Normal; 1255 te.outerAttrs = attr_3_2 == 1 ? 0 : 1256 attr_3_2 == 2 ? 2 : 1; 1257 te.innerAttrs = attr_1_0 == 1 ? 0 : 1258 attr_1_0 == 2 ? 6 : 5; 1259 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1); 1260 } 1261 } else { 1262 uint8_t attrIndx = lDescriptor.attrIndx(); 1263 1264 // LPAE always uses remapping of memory attributes, irrespective of the 1265 // value of SCTLR.TRE 1266 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0; 1267 int reg_as_int = snsBankedIndex(reg, currState->tc, 1268 !currState->isSecure); 1269 uint32_t mair = currState->tc->readMiscReg(reg_as_int); 1270 attr = (mair >> (8 * (attrIndx % 4))) & 0xff; 1271 uint8_t attr_7_4 = bits(attr, 7, 4); 1272 uint8_t attr_3_0 = bits(attr, 3, 0); 1273 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr); 1274 1275 // Note: the memory subsystem only cares about the 'cacheable' memory 1276 // attribute. The other attributes are only used to fill the PAR register 1277 // accordingly to provide the illusion of full support 1278 te.nonCacheable = false; 1279 1280 switch (attr_7_4) { 1281 case 0x0: 1282 // Strongly-ordered or Device memory 1283 if (attr_3_0 == 0x0) 1284 te.mtype = TlbEntry::MemoryType::StronglyOrdered; 1285 else if (attr_3_0 == 0x4) 1286 te.mtype = TlbEntry::MemoryType::Device; 1287 else 1288 panic("Unpredictable behavior\n"); 1289 te.nonCacheable = true; 1290 te.outerAttrs = 0; 1291 break; 1292 case 0x4: 1293 // Normal memory, Outer Non-cacheable 1294 te.mtype = TlbEntry::MemoryType::Normal; 1295 te.outerAttrs = 0; 1296 if (attr_3_0 == 0x4) 1297 // Inner Non-cacheable 1298 te.nonCacheable = true; 1299 else if (attr_3_0 < 0x8) 1300 panic("Unpredictable behavior\n"); 1301 break; 1302 case 0x8: 1303 case 0x9: 1304 case 0xa: 1305 case 0xb: 1306 case 0xc: 1307 case 0xd: 1308 case 0xe: 1309 case 0xf: 1310 if (attr_7_4 & 0x4) { 1311 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3; 1312 } else { 1313 te.outerAttrs = 0x2; 1314 } 1315 // Normal memory, Outer Cacheable 1316 te.mtype = TlbEntry::MemoryType::Normal; 1317 if (attr_3_0 != 0x4 && attr_3_0 < 0x8) 1318 panic("Unpredictable behavior\n"); 1319 break; 1320 default: 1321 panic("Unpredictable behavior\n"); 1322 break; 1323 } 1324 1325 switch (attr_3_0) { 1326 case 0x0: 1327 te.innerAttrs = 0x1; 1328 break; 1329 case 0x4: 1330 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0; 1331 break; 1332 case 0x8: 1333 case 0x9: 1334 case 0xA: 1335 case 0xB: 1336 te.innerAttrs = 6; 1337 break; 1338 case 0xC: 1339 case 0xD: 1340 case 0xE: 1341 case 0xF: 1342 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7; 1343 break; 1344 default: 1345 panic("Unpredictable behavior\n"); 1346 break; 1347 } 1348 } 1349 1350 te.outerShareable = sh == 2; 1351 te.shareable = (sh & 0x2) ? true : false; 1352 te.setAttributes(true); 1353 te.attributes |= (uint64_t) attr << 56; 1354} 1355 1356void 1357TableWalker::memAttrsAArch64(ThreadContext *tc, TlbEntry &te, 1358 LongDescriptor &lDescriptor) 1359{ 1360 uint8_t attr; 1361 uint8_t attr_hi; 1362 uint8_t attr_lo; 1363 uint8_t sh = lDescriptor.sh(); 1364 1365 if (isStage2) { 1366 attr = lDescriptor.memAttr(); 1367 uint8_t attr_hi = (attr >> 2) & 0x3; 1368 uint8_t attr_lo = attr & 0x3; 1369 1370 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh); 1371 1372 if (attr_hi == 0) { 1373 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered 1374 : TlbEntry::MemoryType::Device; 1375 te.outerAttrs = 0; 1376 te.innerAttrs = attr_lo == 0 ? 1 : 3; 1377 te.nonCacheable = true; 1378 } else { 1379 te.mtype = TlbEntry::MemoryType::Normal; 1380 te.outerAttrs = attr_hi == 1 ? 0 : 1381 attr_hi == 2 ? 2 : 1; 1382 te.innerAttrs = attr_lo == 1 ? 0 : 1383 attr_lo == 2 ? 6 : 5; 1384 // Treat write-through memory as uncacheable, this is safe 1385 // but for performance reasons not optimal. 1386 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) || 1387 (attr_lo == 1) || (attr_lo == 2); 1388 } 1389 } else { 1390 uint8_t attrIndx = lDescriptor.attrIndx(); 1391 1392 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh); 1393 1394 // Select MAIR 1395 uint64_t mair; 1396 switch (currState->el) { 1397 case EL0: 1398 case EL1: 1399 mair = tc->readMiscReg(MISCREG_MAIR_EL1); 1400 break; 1401 case EL2: 1402 mair = tc->readMiscReg(MISCREG_MAIR_EL2); 1403 break; 1404 case EL3: 1405 mair = tc->readMiscReg(MISCREG_MAIR_EL3); 1406 break; 1407 default: 1408 panic("Invalid exception level"); 1409 break; 1410 } 1411 1412 // Select attributes 1413 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx); 1414 attr_lo = bits(attr, 3, 0); 1415 attr_hi = bits(attr, 7, 4); 1416 1417 // Memory type 1418 te.mtype = attr_hi == 0 ? TlbEntry::MemoryType::Device : TlbEntry::MemoryType::Normal; 1419 1420 // Cacheability 1421 te.nonCacheable = false; 1422 if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory 1423 te.nonCacheable = true; 1424 } 1425 // Treat write-through memory as uncacheable, this is safe 1426 // but for performance reasons not optimal. 1427 switch (attr_hi) { 1428 case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient 1429 case 0x4: // Normal memory, Outer Non-cacheable 1430 case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient 1431 te.nonCacheable = true; 1432 } 1433 switch (attr_lo) { 1434 case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient 1435 case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient 1436 warn_if(!attr_hi, "Unpredictable behavior"); 1437 M5_FALLTHROUGH; 1438 case 0x4: // Device-nGnRE memory or 1439 // Normal memory, Inner Non-cacheable 1440 case 0x8: // Device-nGRE memory or 1441 // Normal memory, Inner Write-through non-transient 1442 te.nonCacheable = true; 1443 } 1444 1445 te.shareable = sh == 2; 1446 te.outerShareable = (sh & 0x2) ? true : false; 1447 // Attributes formatted according to the 64-bit PAR 1448 te.attributes = ((uint64_t) attr << 56) | 1449 (1 << 11) | // LPAE bit 1450 (te.ns << 9) | // NS bit 1451 (sh << 7); 1452 } 1453} 1454 1455void 1456TableWalker::doL1Descriptor() 1457{ 1458 if (currState->fault != NoFault) { 1459 return; 1460 } 1461 1462 currState->l1Desc.data = htog(currState->l1Desc.data, 1463 byteOrder(currState->tc)); 1464 1465 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n", 1466 currState->vaddr_tainted, currState->l1Desc.data); 1467 TlbEntry te; 1468 1469 const bool is_atomic = currState->req->isAtomic(); 1470 1471 switch (currState->l1Desc.type()) { 1472 case L1Descriptor::Ignore: 1473 case L1Descriptor::Reserved: 1474 if (!currState->timing) { 1475 currState->tc = NULL; 1476 currState->req = NULL; 1477 } 1478 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n"); 1479 if (currState->isFetch) 1480 currState->fault = 1481 std::make_shared<PrefetchAbort>( 1482 currState->vaddr_tainted, 1483 ArmFault::TranslationLL + L1, 1484 isStage2, 1485 ArmFault::VmsaTran); 1486 else 1487 currState->fault = 1488 std::make_shared<DataAbort>( 1489 currState->vaddr_tainted, 1490 TlbEntry::DomainType::NoAccess, 1491 is_atomic ? false : currState->isWrite, 1492 ArmFault::TranslationLL + L1, isStage2, 1493 ArmFault::VmsaTran); 1494 return; 1495 case L1Descriptor::Section: 1496 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) { 1497 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is 1498 * enabled if set, do l1.Desc.setAp0() instead of generating 1499 * AccessFlag0 1500 */ 1501 1502 currState->fault = std::make_shared<DataAbort>( 1503 currState->vaddr_tainted, 1504 currState->l1Desc.domain(), 1505 is_atomic ? false : currState->isWrite, 1506 ArmFault::AccessFlagLL + L1, 1507 isStage2, 1508 ArmFault::VmsaTran); 1509 } 1510 if (currState->l1Desc.supersection()) { 1511 panic("Haven't implemented supersections\n"); 1512 } 1513 insertTableEntry(currState->l1Desc, false); 1514 return; 1515 case L1Descriptor::PageTable: 1516 { 1517 Addr l2desc_addr; 1518 l2desc_addr = currState->l1Desc.l2Addr() | 1519 (bits(currState->vaddr, 19, 12) << 2); 1520 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n", 1521 l2desc_addr, currState->isSecure ? "s" : "ns"); 1522 1523 // Trickbox address check 1524 currState->fault = testWalk(l2desc_addr, sizeof(uint32_t), 1525 currState->l1Desc.domain(), L2); 1526 1527 if (currState->fault) { 1528 if (!currState->timing) { 1529 currState->tc = NULL; 1530 currState->req = NULL; 1531 } 1532 return; 1533 } 1534 1535 Request::Flags flag = Request::PT_WALK; 1536 if (currState->isSecure) 1537 flag.set(Request::SECURE); 1538 1539 bool delayed; 1540 delayed = fetchDescriptor(l2desc_addr, 1541 (uint8_t*)&currState->l2Desc.data, 1542 sizeof(uint32_t), flag, -1, &doL2DescEvent, 1543 &TableWalker::doL2Descriptor); 1544 if (delayed) { 1545 currState->delayed = true; 1546 } 1547 1548 return; 1549 } 1550 default: 1551 panic("A new type in a 2 bit field?\n"); 1552 } 1553} 1554 1555Fault 1556TableWalker::generateLongDescFault(ArmFault::FaultSource src) 1557{ 1558 if (currState->isFetch) { 1559 return std::make_shared<PrefetchAbort>( 1560 currState->vaddr_tainted, 1561 src + currState->longDesc.lookupLevel, 1562 isStage2, 1563 ArmFault::LpaeTran); 1564 } else { 1565 return std::make_shared<DataAbort>( 1566 currState->vaddr_tainted, 1567 TlbEntry::DomainType::NoAccess, 1568 currState->req->isAtomic() ? false : currState->isWrite, 1569 src + currState->longDesc.lookupLevel, 1570 isStage2, 1571 ArmFault::LpaeTran); 1572 } 1573} 1574 1575void 1576TableWalker::doLongDescriptor() 1577{ 1578 if (currState->fault != NoFault) { 1579 return; 1580 } 1581 1582 currState->longDesc.data = htog(currState->longDesc.data, 1583 byteOrder(currState->tc)); 1584 1585 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n", 1586 currState->longDesc.lookupLevel, currState->vaddr_tainted, 1587 currState->longDesc.data, 1588 currState->aarch64 ? "AArch64" : "long-desc."); 1589 1590 if ((currState->longDesc.type() == LongDescriptor::Block) || 1591 (currState->longDesc.type() == LongDescriptor::Page)) { 1592 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, " 1593 "xn: %d, ap: %d, af: %d, type: %d\n", 1594 currState->longDesc.lookupLevel, 1595 currState->longDesc.data, 1596 currState->longDesc.pxn(), 1597 currState->longDesc.xn(), 1598 currState->longDesc.ap(), 1599 currState->longDesc.af(), 1600 currState->longDesc.type()); 1601 } else { 1602 DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n", 1603 currState->longDesc.lookupLevel, 1604 currState->longDesc.data, 1605 currState->longDesc.type()); 1606 } 1607 1608 TlbEntry te; 1609 1610 switch (currState->longDesc.type()) { 1611 case LongDescriptor::Invalid: 1612 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n", 1613 currState->longDesc.lookupLevel, 1614 ArmFault::TranslationLL + currState->longDesc.lookupLevel); 1615 1616 currState->fault = generateLongDescFault(ArmFault::TranslationLL); 1617 if (!currState->timing) { 1618 currState->tc = NULL; 1619 currState->req = NULL; 1620 } 1621 return; 1622 1623 case LongDescriptor::Block: 1624 case LongDescriptor::Page: 1625 { 1626 auto fault_source = ArmFault::FaultSourceInvalid; 1627 // Check for address size fault 1628 if (checkAddrSizeFaultAArch64( 1629 mbits(currState->longDesc.data, MaxPhysAddrRange - 1, 1630 currState->longDesc.offsetBits()), 1631 currState->physAddrRange)) { 1632 1633 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n", 1634 currState->longDesc.lookupLevel); 1635 fault_source = ArmFault::AddressSizeLL; 1636 1637 // Check for access fault 1638 } else if (currState->longDesc.af() == 0) { 1639 1640 DPRINTF(TLB, "L%d descriptor causing Access Fault\n", 1641 currState->longDesc.lookupLevel); 1642 fault_source = ArmFault::AccessFlagLL; 1643 } 1644 1645 if (fault_source != ArmFault::FaultSourceInvalid) { 1646 currState->fault = generateLongDescFault(fault_source); 1647 } else { 1648 insertTableEntry(currState->longDesc, true); 1649 } 1650 } 1651 return; 1652 case LongDescriptor::Table: 1653 { 1654 // Set hierarchical permission flags 1655 currState->secureLookup = currState->secureLookup && 1656 currState->longDesc.secureTable(); 1657 currState->rwTable = currState->rwTable && 1658 (currState->longDesc.rwTable() || currState->hpd); 1659 currState->userTable = currState->userTable && 1660 (currState->longDesc.userTable() || currState->hpd); 1661 currState->xnTable = currState->xnTable || 1662 (currState->longDesc.xnTable() && !currState->hpd); 1663 currState->pxnTable = currState->pxnTable || 1664 (currState->longDesc.pxnTable() && !currState->hpd); 1665 1666 // Set up next level lookup 1667 Addr next_desc_addr = currState->longDesc.nextDescAddr( 1668 currState->vaddr); 1669 1670 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n", 1671 currState->longDesc.lookupLevel, 1672 currState->longDesc.lookupLevel + 1, 1673 next_desc_addr, 1674 currState->secureLookup ? "s" : "ns"); 1675 1676 // Check for address size fault 1677 if (currState->aarch64 && checkAddrSizeFaultAArch64( 1678 next_desc_addr, currState->physAddrRange)) { 1679 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n", 1680 currState->longDesc.lookupLevel); 1681 1682 currState->fault = generateLongDescFault( 1683 ArmFault::AddressSizeLL); 1684 return; 1685 } 1686 1687 // Trickbox address check 1688 currState->fault = testWalk( 1689 next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client, 1690 toLookupLevel(currState->longDesc.lookupLevel +1)); 1691 1692 if (currState->fault) { 1693 if (!currState->timing) { 1694 currState->tc = NULL; 1695 currState->req = NULL; 1696 } 1697 return; 1698 } 1699 1700 Request::Flags flag = Request::PT_WALK; 1701 if (currState->secureLookup) 1702 flag.set(Request::SECURE); 1703 1704 LookupLevel L = currState->longDesc.lookupLevel = 1705 (LookupLevel) (currState->longDesc.lookupLevel + 1); 1706 Event *event = NULL; 1707 switch (L) { 1708 case L1: 1709 assert(currState->aarch64); 1710 case L2: 1711 case L3: 1712 event = LongDescEventByLevel[L]; 1713 break; 1714 default: 1715 panic("Wrong lookup level in table walk\n"); 1716 break; 1717 } 1718 1719 bool delayed; 1720 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data, 1721 sizeof(uint64_t), flag, -1, event, 1722 &TableWalker::doLongDescriptor); 1723 if (delayed) { 1724 currState->delayed = true; 1725 } 1726 } 1727 return; 1728 default: 1729 panic("A new type in a 2 bit field?\n"); 1730 } 1731} 1732 1733void 1734TableWalker::doL2Descriptor() 1735{ 1736 if (currState->fault != NoFault) { 1737 return; 1738 } 1739 1740 currState->l2Desc.data = htog(currState->l2Desc.data, 1741 byteOrder(currState->tc)); 1742 1743 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n", 1744 currState->vaddr_tainted, currState->l2Desc.data); 1745 TlbEntry te; 1746 1747 const bool is_atomic = currState->req->isAtomic(); 1748 1749 if (currState->l2Desc.invalid()) { 1750 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n"); 1751 if (!currState->timing) { 1752 currState->tc = NULL; 1753 currState->req = NULL; 1754 } 1755 if (currState->isFetch) 1756 currState->fault = std::make_shared<PrefetchAbort>( 1757 currState->vaddr_tainted, 1758 ArmFault::TranslationLL + L2, 1759 isStage2, 1760 ArmFault::VmsaTran); 1761 else 1762 currState->fault = std::make_shared<DataAbort>( 1763 currState->vaddr_tainted, currState->l1Desc.domain(), 1764 is_atomic ? false : currState->isWrite, 1765 ArmFault::TranslationLL + L2, 1766 isStage2, 1767 ArmFault::VmsaTran); 1768 return; 1769 } 1770 1771 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) { 1772 /** @todo: check sctlr.ha (bit[17]) if Hardware Access Flag is enabled 1773 * if set, do l2.Desc.setAp0() instead of generating AccessFlag0 1774 */ 1775 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n", 1776 currState->sctlr.afe, currState->l2Desc.ap()); 1777 1778 currState->fault = std::make_shared<DataAbort>( 1779 currState->vaddr_tainted, 1780 TlbEntry::DomainType::NoAccess, 1781 is_atomic ? false : currState->isWrite, 1782 ArmFault::AccessFlagLL + L2, isStage2, 1783 ArmFault::VmsaTran); 1784 } 1785 1786 insertTableEntry(currState->l2Desc, false); 1787} 1788 1789void 1790TableWalker::doL1DescriptorWrapper() 1791{ 1792 currState = stateQueues[L1].front(); 1793 currState->delayed = false; 1794 // if there's a stage2 translation object we don't need it any more 1795 if (currState->stage2Tran) { 1796 delete currState->stage2Tran; 1797 currState->stage2Tran = NULL; 1798 } 1799 1800 1801 DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data); 1802 DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data); 1803 1804 DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted); 1805 doL1Descriptor(); 1806 1807 stateQueues[L1].pop_front(); 1808 // Check if fault was generated 1809 if (currState->fault != NoFault) { 1810 currState->transState->finish(currState->fault, currState->req, 1811 currState->tc, currState->mode); 1812 statWalksShortTerminatedAtLevel[0]++; 1813 1814 pending = false; 1815 nextWalk(currState->tc); 1816 1817 currState->req = NULL; 1818 currState->tc = NULL; 1819 currState->delayed = false; 1820 delete currState; 1821 } 1822 else if (!currState->delayed) { 1823 // delay is not set so there is no L2 to do 1824 // Don't finish the translation if a stage 2 look up is underway 1825 statWalkServiceTime.sample(curTick() - currState->startTime); 1826 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1827 tlb->translateTiming(currState->req, currState->tc, 1828 currState->transState, currState->mode); 1829 statWalksShortTerminatedAtLevel[0]++; 1830 1831 pending = false; 1832 nextWalk(currState->tc); 1833 1834 currState->req = NULL; 1835 currState->tc = NULL; 1836 currState->delayed = false; 1837 delete currState; 1838 } else { 1839 // need to do L2 descriptor 1840 stateQueues[L2].push_back(currState); 1841 } 1842 currState = NULL; 1843} 1844 1845void 1846TableWalker::doL2DescriptorWrapper() 1847{ 1848 currState = stateQueues[L2].front(); 1849 assert(currState->delayed); 1850 // if there's a stage2 translation object we don't need it any more 1851 if (currState->stage2Tran) { 1852 delete currState->stage2Tran; 1853 currState->stage2Tran = NULL; 1854 } 1855 1856 DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n", 1857 currState->vaddr_tainted); 1858 doL2Descriptor(); 1859 1860 // Check if fault was generated 1861 if (currState->fault != NoFault) { 1862 currState->transState->finish(currState->fault, currState->req, 1863 currState->tc, currState->mode); 1864 statWalksShortTerminatedAtLevel[1]++; 1865 } else { 1866 statWalkServiceTime.sample(curTick() - currState->startTime); 1867 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1868 tlb->translateTiming(currState->req, currState->tc, 1869 currState->transState, currState->mode); 1870 statWalksShortTerminatedAtLevel[1]++; 1871 } 1872 1873 1874 stateQueues[L2].pop_front(); 1875 pending = false; 1876 nextWalk(currState->tc); 1877 1878 currState->req = NULL; 1879 currState->tc = NULL; 1880 currState->delayed = false; 1881 1882 delete currState; 1883 currState = NULL; 1884} 1885 1886void 1887TableWalker::doL0LongDescriptorWrapper() 1888{ 1889 doLongDescriptorWrapper(L0); 1890} 1891 1892void 1893TableWalker::doL1LongDescriptorWrapper() 1894{ 1895 doLongDescriptorWrapper(L1); 1896} 1897 1898void 1899TableWalker::doL2LongDescriptorWrapper() 1900{ 1901 doLongDescriptorWrapper(L2); 1902} 1903 1904void 1905TableWalker::doL3LongDescriptorWrapper() 1906{ 1907 doLongDescriptorWrapper(L3); 1908} 1909 1910void 1911TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level) 1912{ 1913 currState = stateQueues[curr_lookup_level].front(); 1914 assert(curr_lookup_level == currState->longDesc.lookupLevel); 1915 currState->delayed = false; 1916 1917 // if there's a stage2 translation object we don't need it any more 1918 if (currState->stage2Tran) { 1919 delete currState->stage2Tran; 1920 currState->stage2Tran = NULL; 1921 } 1922 1923 DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n", 1924 currState->vaddr_tainted); 1925 doLongDescriptor(); 1926 1927 stateQueues[curr_lookup_level].pop_front(); 1928 1929 if (currState->fault != NoFault) { 1930 // A fault was generated 1931 currState->transState->finish(currState->fault, currState->req, 1932 currState->tc, currState->mode); 1933 1934 pending = false; 1935 nextWalk(currState->tc); 1936 1937 currState->req = NULL; 1938 currState->tc = NULL; 1939 currState->delayed = false; 1940 delete currState; 1941 } else if (!currState->delayed) { 1942 // No additional lookups required 1943 DPRINTF(TLBVerbose, "calling translateTiming again\n"); 1944 statWalkServiceTime.sample(curTick() - currState->startTime); 1945 tlb->translateTiming(currState->req, currState->tc, 1946 currState->transState, currState->mode); 1947 statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++; 1948 1949 pending = false; 1950 nextWalk(currState->tc); 1951 1952 currState->req = NULL; 1953 currState->tc = NULL; 1954 currState->delayed = false; 1955 delete currState; 1956 } else { 1957 if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1) 1958 panic("Max. number of lookups already reached in table walk\n"); 1959 // Need to perform additional lookups 1960 stateQueues[currState->longDesc.lookupLevel].push_back(currState); 1961 } 1962 currState = NULL; 1963} 1964 1965 1966void 1967TableWalker::nextWalk(ThreadContext *tc) 1968{ 1969 if (pendingQueue.size()) 1970 schedule(doProcessEvent, clockEdge(Cycles(1))); 1971 else 1972 completeDrain(); 1973} 1974 1975bool 1976TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, 1977 Request::Flags flags, int queueIndex, Event *event, 1978 void (TableWalker::*doDescriptor)()) 1979{ 1980 bool isTiming = currState->timing; 1981 1982 DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n", 1983 descAddr, currState->stage2Req); 1984 1985 // If this translation has a stage 2 then we know descAddr is an IPA and 1986 // needs to be translated before we can access the page table. Do that 1987 // check here. 1988 if (currState->stage2Req) { 1989 Fault fault; 1990 flags = flags | TLB::MustBeOne; 1991 1992 if (isTiming) { 1993 Stage2MMU::Stage2Translation *tran = new 1994 Stage2MMU::Stage2Translation(*stage2Mmu, data, event, 1995 currState->vaddr); 1996 currState->stage2Tran = tran; 1997 stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes, 1998 flags); 1999 fault = tran->fault; 2000 } else { 2001 fault = stage2Mmu->readDataUntimed(currState->tc, 2002 currState->vaddr, descAddr, data, numBytes, flags, 2003 currState->functional); 2004 } 2005 2006 if (fault != NoFault) { 2007 currState->fault = fault; 2008 } 2009 if (isTiming) { 2010 if (queueIndex >= 0) { 2011 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", 2012 stateQueues[queueIndex].size()); 2013 stateQueues[queueIndex].push_back(currState); 2014 currState = NULL; 2015 } 2016 } else { 2017 (this->*doDescriptor)(); 2018 } 2019 } else { 2020 if (isTiming) { 2021 port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data, 2022 currState->tc->getCpuPtr()->clockPeriod(),flags); 2023 if (queueIndex >= 0) { 2024 DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n", 2025 stateQueues[queueIndex].size()); 2026 stateQueues[queueIndex].push_back(currState); 2027 currState = NULL; 2028 } 2029 } else if (!currState->functional) { 2030 port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data, 2031 currState->tc->getCpuPtr()->clockPeriod(), flags); 2032 (this->*doDescriptor)(); 2033 } else { 2034 RequestPtr req = std::make_shared<Request>( 2035 descAddr, numBytes, flags, masterId); 2036 2037 req->taskId(ContextSwitchTaskId::DMA); 2038 PacketPtr pkt = new Packet(req, MemCmd::ReadReq); 2039 pkt->dataStatic(data); 2040 port->sendFunctional(pkt); 2041 (this->*doDescriptor)(); 2042 delete pkt; 2043 } 2044 } 2045 return (isTiming); 2046} 2047 2048void 2049TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor) 2050{ 2051 TlbEntry te; 2052 2053 // Create and fill a new page table entry 2054 te.valid = true; 2055 te.longDescFormat = longDescriptor; 2056 te.isHyp = currState->isHyp; 2057 te.asid = currState->asid; 2058 te.vmid = currState->vmid; 2059 te.N = descriptor.offsetBits(); 2060 te.vpn = currState->vaddr >> te.N; 2061 te.size = (1<<te.N) - 1; 2062 te.pfn = descriptor.pfn(); 2063 te.domain = descriptor.domain(); 2064 te.lookupLevel = descriptor.lookupLevel; 2065 te.ns = !descriptor.secure(haveSecurity, currState) || isStage2; 2066 te.nstid = !currState->isSecure; 2067 te.xn = descriptor.xn(); 2068 if (currState->aarch64) 2069 te.el = currState->el; 2070 else 2071 te.el = EL1; 2072 2073 statPageSizes[pageSizeNtoStatBin(te.N)]++; 2074 statRequestOrigin[COMPLETED][currState->isFetch]++; 2075 2076 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries 2077 // as global 2078 te.global = descriptor.global(currState) || isStage2; 2079 if (longDescriptor) { 2080 LongDescriptor lDescriptor = 2081 dynamic_cast<LongDescriptor &>(descriptor); 2082 2083 te.xn |= currState->xnTable; 2084 te.pxn = currState->pxnTable || lDescriptor.pxn(); 2085 if (isStage2) { 2086 // this is actually the HAP field, but its stored in the same bit 2087 // possitions as the AP field in a stage 1 translation. 2088 te.hap = lDescriptor.ap(); 2089 } else { 2090 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) | 2091 (currState->userTable && (descriptor.ap() & 0x1)); 2092 } 2093 if (currState->aarch64) 2094 memAttrsAArch64(currState->tc, te, lDescriptor); 2095 else 2096 memAttrsLPAE(currState->tc, te, lDescriptor); 2097 } else { 2098 te.ap = descriptor.ap(); 2099 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(), 2100 descriptor.shareable()); 2101 } 2102 2103 // Debug output 2104 DPRINTF(TLB, descriptor.dbgHeader().c_str()); 2105 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n", 2106 te.N, te.pfn, te.size, te.global, te.valid); 2107 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d " 2108 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn, 2109 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp, 2110 te.nonCacheable, te.ns); 2111 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n", 2112 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()), 2113 descriptor.getRawData()); 2114 2115 // Insert the entry into the TLB 2116 tlb->insert(currState->vaddr, te); 2117 if (!currState->timing) { 2118 currState->tc = NULL; 2119 currState->req = NULL; 2120 } 2121} 2122 2123ArmISA::TableWalker * 2124ArmTableWalkerParams::create() 2125{ 2126 return new ArmISA::TableWalker(this); 2127} 2128 2129LookupLevel 2130TableWalker::toLookupLevel(uint8_t lookup_level_as_int) 2131{ 2132 switch (lookup_level_as_int) { 2133 case L1: 2134 return L1; 2135 case L2: 2136 return L2; 2137 case L3: 2138 return L3; 2139 default: 2140 panic("Invalid lookup level conversion"); 2141 } 2142} 2143 2144/* this method keeps track of the table walker queue's residency, so 2145 * needs to be called whenever requests start and complete. */ 2146void 2147TableWalker::pendingChange() 2148{ 2149 unsigned n = pendingQueue.size(); 2150 if ((currState != NULL) && (currState != pendingQueue.front())) { 2151 ++n; 2152 } 2153 2154 if (n != pendingReqs) { 2155 Tick now = curTick(); 2156 statPendingWalks.sample(pendingReqs, now - pendingChangeTick); 2157 pendingReqs = n; 2158 pendingChangeTick = now; 2159 } 2160} 2161 2162Fault 2163TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, 2164 LookupLevel lookup_level) 2165{ 2166 return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure, 2167 currState->mode, domain, lookup_level); 2168} 2169 2170 2171uint8_t 2172TableWalker::pageSizeNtoStatBin(uint8_t N) 2173{ 2174 /* for statPageSizes */ 2175 switch(N) { 2176 case 12: return 0; // 4K 2177 case 14: return 1; // 16K (using 16K granule in v8-64) 2178 case 16: return 2; // 64K 2179 case 20: return 3; // 1M 2180 case 21: return 4; // 2M-LPAE 2181 case 24: return 5; // 16M 2182 case 25: return 6; // 32M (using 16K granule in v8-64) 2183 case 29: return 7; // 512M (using 64K granule in v8-64) 2184 case 30: return 8; // 1G-LPAE 2185 default: 2186 panic("unknown page size"); 2187 return 255; 2188 } 2189} 2190 2191void 2192TableWalker::regStats() 2193{ 2194 ClockedObject::regStats(); 2195 2196 statWalks 2197 .name(name() + ".walks") 2198 .desc("Table walker walks requested") 2199 ; 2200 2201 statWalksShortDescriptor 2202 .name(name() + ".walksShort") 2203 .desc("Table walker walks initiated with short descriptors") 2204 .flags(Stats::nozero) 2205 ; 2206 2207 statWalksLongDescriptor 2208 .name(name() + ".walksLong") 2209 .desc("Table walker walks initiated with long descriptors") 2210 .flags(Stats::nozero) 2211 ; 2212 2213 statWalksShortTerminatedAtLevel 2214 .init(2) 2215 .name(name() + ".walksShortTerminationLevel") 2216 .desc("Level at which table walker walks " 2217 "with short descriptors terminate") 2218 .flags(Stats::nozero) 2219 ; 2220 statWalksShortTerminatedAtLevel.subname(0, "Level1"); 2221 statWalksShortTerminatedAtLevel.subname(1, "Level2"); 2222 2223 statWalksLongTerminatedAtLevel 2224 .init(4) 2225 .name(name() + ".walksLongTerminationLevel") 2226 .desc("Level at which table walker walks " 2227 "with long descriptors terminate") 2228 .flags(Stats::nozero) 2229 ; 2230 statWalksLongTerminatedAtLevel.subname(0, "Level0"); 2231 statWalksLongTerminatedAtLevel.subname(1, "Level1"); 2232 statWalksLongTerminatedAtLevel.subname(2, "Level2"); 2233 statWalksLongTerminatedAtLevel.subname(3, "Level3"); 2234 2235 statSquashedBefore 2236 .name(name() + ".walksSquashedBefore") 2237 .desc("Table walks squashed before starting") 2238 .flags(Stats::nozero) 2239 ; 2240 2241 statSquashedAfter 2242 .name(name() + ".walksSquashedAfter") 2243 .desc("Table walks squashed after completion") 2244 .flags(Stats::nozero) 2245 ; 2246 2247 statWalkWaitTime 2248 .init(16) 2249 .name(name() + ".walkWaitTime") 2250 .desc("Table walker wait (enqueue to first request) latency") 2251 .flags(Stats::pdf | Stats::nozero | Stats::nonan) 2252 ; 2253 2254 statWalkServiceTime 2255 .init(16) 2256 .name(name() + ".walkCompletionTime") 2257 .desc("Table walker service (enqueue to completion) latency") 2258 .flags(Stats::pdf | Stats::nozero | Stats::nonan) 2259 ; 2260 2261 statPendingWalks 2262 .init(16) 2263 .name(name() + ".walksPending") 2264 .desc("Table walker pending requests distribution") 2265 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan) 2266 ; 2267 2268 statPageSizes // see DDI 0487A D4-1661 2269 .init(9) 2270 .name(name() + ".walkPageSizes") 2271 .desc("Table walker page sizes translated") 2272 .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero) 2273 ; 2274 statPageSizes.subname(0, "4K"); 2275 statPageSizes.subname(1, "16K"); 2276 statPageSizes.subname(2, "64K"); 2277 statPageSizes.subname(3, "1M"); 2278 statPageSizes.subname(4, "2M"); 2279 statPageSizes.subname(5, "16M"); 2280 statPageSizes.subname(6, "32M"); 2281 statPageSizes.subname(7, "512M"); 2282 statPageSizes.subname(8, "1G"); 2283 2284 statRequestOrigin 2285 .init(2,2) // Instruction/Data, requests/completed 2286 .name(name() + ".walkRequestOrigin") 2287 .desc("Table walker requests started/completed, data/inst") 2288 .flags(Stats::total) 2289 ; 2290 statRequestOrigin.subname(0,"Requested"); 2291 statRequestOrigin.subname(1,"Completed"); 2292 statRequestOrigin.ysubname(0,"Data"); 2293 statRequestOrigin.ysubname(1,"Inst"); 2294} 2295