tlb.cc revision 14088:8de55a7aa53b
1/* 2 * Copyright (c) 2010-2013, 2016-2019 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Steve Reinhardt 43 */ 44 45#include "arch/arm/tlb.hh" 46 47#include <memory> 48#include <string> 49#include <vector> 50 51#include "arch/arm/faults.hh" 52#include "arch/arm/pagetable.hh" 53#include "arch/arm/stage2_lookup.hh" 54#include "arch/arm/stage2_mmu.hh" 55#include "arch/arm/system.hh" 56#include "arch/arm/table_walker.hh" 57#include "arch/arm/utility.hh" 58#include "arch/generic/mmapped_ipr.hh" 59#include "base/inifile.hh" 60#include "base/str.hh" 61#include "base/trace.hh" 62#include "cpu/base.hh" 63#include "cpu/thread_context.hh" 64#include "debug/Checkpoint.hh" 65#include "debug/TLB.hh" 66#include "debug/TLBVerbose.hh" 67#include "mem/page_table.hh" 68#include "mem/request.hh" 69#include "params/ArmTLB.hh" 70#include "sim/full_system.hh" 71#include "sim/process.hh" 72 73using namespace std; 74using namespace ArmISA; 75 76TLB::TLB(const ArmTLBParams *p) 77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size), 78 isStage2(p->is_stage2), stage2Req(false), stage2DescReq(false), _attr(0), 79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL), 80 stage2Mmu(NULL), test(nullptr), rangeMRU(1), 81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false), 82 isHyp(false), asid(0), vmid(0), hcr(0), dacr(0), 83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran) 84{ 85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys); 86 87 tableWalker->setTlb(this); 88 89 // Cache system-level properties 90 haveLPAE = tableWalker->haveLPAE(); 91 haveVirtualization = tableWalker->haveVirtualization(); 92 haveLargeAsid64 = tableWalker->haveLargeAsid64(); 93 94 if (sys) 95 m5opRange = sys->m5opRange(); 96} 97 98TLB::~TLB() 99{ 100 delete[] table; 101} 102 103void 104TLB::init() 105{ 106 if (stage2Mmu && !isStage2) 107 stage2Tlb = stage2Mmu->stage2Tlb(); 108} 109 110void 111TLB::setMMU(Stage2MMU *m, MasterID master_id) 112{ 113 stage2Mmu = m; 114 tableWalker->setMMU(m, master_id); 115} 116 117bool 118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) 119{ 120 updateMiscReg(tc); 121 122 if (directToStage2) { 123 assert(stage2Tlb); 124 return stage2Tlb->translateFunctional(tc, va, pa); 125 } 126 127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false, 128 aarch64 ? aarch64EL : EL1); 129 if (!e) 130 return false; 131 pa = e->pAddr(va); 132 return true; 133} 134 135Fault 136TLB::finalizePhysical(const RequestPtr &req, 137 ThreadContext *tc, Mode mode) const 138{ 139 const Addr paddr = req->getPaddr(); 140 141 if (m5opRange.contains(paddr)) { 142 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR); 143 req->setPaddr(GenericISA::iprAddressPseudoInst( 144 (paddr >> 8) & 0xFF, 145 paddr & 0xFF)); 146 } 147 148 return NoFault; 149} 150 151TlbEntry* 152TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, 153 bool functional, bool ignore_asn, ExceptionLevel target_el) 154{ 155 156 TlbEntry *retval = NULL; 157 158 // Maintaining LRU array 159 int x = 0; 160 while (retval == NULL && x < size) { 161 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false, 162 target_el)) || 163 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) { 164 // We only move the hit entry ahead when the position is higher 165 // than rangeMRU 166 if (x > rangeMRU && !functional) { 167 TlbEntry tmp_entry = table[x]; 168 for (int i = x; i > 0; i--) 169 table[i] = table[i - 1]; 170 table[0] = tmp_entry; 171 retval = &table[0]; 172 } else { 173 retval = &table[x]; 174 } 175 break; 176 } 177 ++x; 178 } 179 180 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d " 181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d " 182 "el: %d\n", 183 va, asn, retval ? "hit" : "miss", vmid, hyp, secure, 184 retval ? retval->pfn : 0, retval ? retval->size : 0, 185 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0, 186 retval ? retval->ns : 0, retval ? retval->nstid : 0, 187 retval ? retval->global : 0, retval ? retval->asid : 0, 188 retval ? retval->el : 0); 189 190 return retval; 191} 192 193// insert a new TLB entry 194void 195TLB::insert(Addr addr, TlbEntry &entry) 196{ 197 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x" 198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d" 199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn, 200 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N, 201 entry.global, entry.valid, entry.nonCacheable, entry.xn, 202 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid, 203 entry.isHyp); 204 205 if (table[size - 1].valid) 206 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x " 207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n", 208 table[size-1].vpn << table[size-1].N, table[size-1].asid, 209 table[size-1].vmid, table[size-1].pfn << table[size-1].N, 210 table[size-1].size, table[size-1].ap, table[size-1].ns, 211 table[size-1].nstid, table[size-1].global, table[size-1].isHyp, 212 table[size-1].el); 213 214 //inserting to MRU position and evicting the LRU one 215 216 for (int i = size - 1; i > 0; --i) 217 table[i] = table[i-1]; 218 table[0] = entry; 219 220 inserts++; 221 ppRefills->notify(1); 222} 223 224void 225TLB::printTlb() const 226{ 227 int x = 0; 228 TlbEntry *te; 229 DPRINTF(TLB, "Current TLB contents:\n"); 230 while (x < size) { 231 te = &table[x]; 232 if (te->valid) 233 DPRINTF(TLB, " * %s\n", te->print()); 234 ++x; 235 } 236} 237 238void 239TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el, 240 bool ignore_el) 241{ 242 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", 243 (secure_lookup ? "secure" : "non-secure")); 244 int x = 0; 245 TlbEntry *te; 246 while (x < size) { 247 te = &table[x]; 248 const bool el_match = ignore_el ? 249 true : te->checkELMatch(target_el); 250 251 if (te->valid && secure_lookup == !te->nstid && 252 (te->vmid == vmid || secure_lookup) && el_match) { 253 254 DPRINTF(TLB, " - %s\n", te->print()); 255 te->valid = false; 256 flushedEntries++; 257 } 258 ++x; 259 } 260 261 flushTlb++; 262 263 // If there's a second stage TLB (and we're not it) then flush it as well 264 // if we're currently in hyp mode 265 if (!isStage2 && isHyp) { 266 stage2Tlb->flushAllSecurity(secure_lookup, EL1, true); 267 } 268} 269 270void 271TLB::flushAllNs(ExceptionLevel target_el, bool ignore_el) 272{ 273 bool hyp = target_el == EL2; 274 275 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n", 276 (hyp ? "hyp" : "non-hyp")); 277 int x = 0; 278 TlbEntry *te; 279 while (x < size) { 280 te = &table[x]; 281 const bool el_match = ignore_el ? 282 true : te->checkELMatch(target_el); 283 284 if (te->valid && te->nstid && te->isHyp == hyp && el_match) { 285 286 DPRINTF(TLB, " - %s\n", te->print()); 287 flushedEntries++; 288 te->valid = false; 289 } 290 ++x; 291 } 292 293 flushTlb++; 294 295 // If there's a second stage TLB (and we're not it) then flush it as well 296 if (!isStage2 && !hyp) { 297 stage2Tlb->flushAllNs(EL1, true); 298 } 299} 300 301void 302TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, 303 ExceptionLevel target_el) 304{ 305 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " 306 "(%s lookup)\n", mva, asn, (secure_lookup ? 307 "secure" : "non-secure")); 308 _flushMva(mva, asn, secure_lookup, false, target_el); 309 flushTlbMvaAsid++; 310} 311 312void 313TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el) 314{ 315 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn, 316 (secure_lookup ? "secure" : "non-secure")); 317 318 int x = 0 ; 319 TlbEntry *te; 320 321 while (x < size) { 322 te = &table[x]; 323 if (te->valid && te->asid == asn && secure_lookup == !te->nstid && 324 (te->vmid == vmid || secure_lookup) && 325 te->checkELMatch(target_el)) { 326 327 te->valid = false; 328 DPRINTF(TLB, " - %s\n", te->print()); 329 flushedEntries++; 330 } 331 ++x; 332 } 333 flushTlbAsid++; 334} 335 336void 337TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el) 338{ 339 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva, 340 (secure_lookup ? "secure" : "non-secure")); 341 _flushMva(mva, 0xbeef, secure_lookup, true, target_el); 342 flushTlbMva++; 343} 344 345void 346TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, 347 bool ignore_asn, ExceptionLevel target_el) 348{ 349 TlbEntry *te; 350 // D5.7.2: Sign-extend address to 64 bits 351 mva = sext<56>(mva); 352 353 bool hyp = target_el == EL2; 354 355 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 356 target_el); 357 while (te != NULL) { 358 if (secure_lookup == !te->nstid) { 359 DPRINTF(TLB, " - %s\n", te->print()); 360 te->valid = false; 361 flushedEntries++; 362 } 363 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 364 target_el); 365 } 366} 367 368void 369TLB::flushIpaVmid(Addr ipa, bool secure_lookup, ExceptionLevel target_el) 370{ 371 assert(!isStage2); 372 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el); 373} 374 375void 376TLB::drainResume() 377{ 378 // We might have unserialized something or switched CPUs, so make 379 // sure to re-read the misc regs. 380 miscRegValid = false; 381} 382 383void 384TLB::takeOverFrom(BaseTLB *_otlb) 385{ 386 TLB *otlb = dynamic_cast<TLB*>(_otlb); 387 /* Make sure we actually have a valid type */ 388 if (otlb) { 389 _attr = otlb->_attr; 390 haveLPAE = otlb->haveLPAE; 391 directToStage2 = otlb->directToStage2; 392 stage2Req = otlb->stage2Req; 393 stage2DescReq = otlb->stage2DescReq; 394 395 /* Sync the stage2 MMU if they exist in both 396 * the old CPU and the new 397 */ 398 if (!isStage2 && 399 stage2Tlb && otlb->stage2Tlb) { 400 stage2Tlb->takeOverFrom(otlb->stage2Tlb); 401 } 402 } else { 403 panic("Incompatible TLB type!"); 404 } 405} 406 407void 408TLB::serialize(CheckpointOut &cp) const 409{ 410 DPRINTF(Checkpoint, "Serializing Arm TLB\n"); 411 412 SERIALIZE_SCALAR(_attr); 413 SERIALIZE_SCALAR(haveLPAE); 414 SERIALIZE_SCALAR(directToStage2); 415 SERIALIZE_SCALAR(stage2Req); 416 SERIALIZE_SCALAR(stage2DescReq); 417 418 int num_entries = size; 419 SERIALIZE_SCALAR(num_entries); 420 for (int i = 0; i < size; i++) 421 table[i].serializeSection(cp, csprintf("TlbEntry%d", i)); 422} 423 424void 425TLB::unserialize(CheckpointIn &cp) 426{ 427 DPRINTF(Checkpoint, "Unserializing Arm TLB\n"); 428 429 UNSERIALIZE_SCALAR(_attr); 430 UNSERIALIZE_SCALAR(haveLPAE); 431 UNSERIALIZE_SCALAR(directToStage2); 432 UNSERIALIZE_SCALAR(stage2Req); 433 UNSERIALIZE_SCALAR(stage2DescReq); 434 435 int num_entries; 436 UNSERIALIZE_SCALAR(num_entries); 437 for (int i = 0; i < min(size, num_entries); i++) 438 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i)); 439} 440 441void 442TLB::regStats() 443{ 444 BaseTLB::regStats(); 445 instHits 446 .name(name() + ".inst_hits") 447 .desc("ITB inst hits") 448 ; 449 450 instMisses 451 .name(name() + ".inst_misses") 452 .desc("ITB inst misses") 453 ; 454 455 instAccesses 456 .name(name() + ".inst_accesses") 457 .desc("ITB inst accesses") 458 ; 459 460 readHits 461 .name(name() + ".read_hits") 462 .desc("DTB read hits") 463 ; 464 465 readMisses 466 .name(name() + ".read_misses") 467 .desc("DTB read misses") 468 ; 469 470 readAccesses 471 .name(name() + ".read_accesses") 472 .desc("DTB read accesses") 473 ; 474 475 writeHits 476 .name(name() + ".write_hits") 477 .desc("DTB write hits") 478 ; 479 480 writeMisses 481 .name(name() + ".write_misses") 482 .desc("DTB write misses") 483 ; 484 485 writeAccesses 486 .name(name() + ".write_accesses") 487 .desc("DTB write accesses") 488 ; 489 490 hits 491 .name(name() + ".hits") 492 .desc("DTB hits") 493 ; 494 495 misses 496 .name(name() + ".misses") 497 .desc("DTB misses") 498 ; 499 500 accesses 501 .name(name() + ".accesses") 502 .desc("DTB accesses") 503 ; 504 505 flushTlb 506 .name(name() + ".flush_tlb") 507 .desc("Number of times complete TLB was flushed") 508 ; 509 510 flushTlbMva 511 .name(name() + ".flush_tlb_mva") 512 .desc("Number of times TLB was flushed by MVA") 513 ; 514 515 flushTlbMvaAsid 516 .name(name() + ".flush_tlb_mva_asid") 517 .desc("Number of times TLB was flushed by MVA & ASID") 518 ; 519 520 flushTlbAsid 521 .name(name() + ".flush_tlb_asid") 522 .desc("Number of times TLB was flushed by ASID") 523 ; 524 525 flushedEntries 526 .name(name() + ".flush_entries") 527 .desc("Number of entries that have been flushed from TLB") 528 ; 529 530 alignFaults 531 .name(name() + ".align_faults") 532 .desc("Number of TLB faults due to alignment restrictions") 533 ; 534 535 prefetchFaults 536 .name(name() + ".prefetch_faults") 537 .desc("Number of TLB faults due to prefetch") 538 ; 539 540 domainFaults 541 .name(name() + ".domain_faults") 542 .desc("Number of TLB faults due to domain restrictions") 543 ; 544 545 permsFaults 546 .name(name() + ".perms_faults") 547 .desc("Number of TLB faults due to permissions restrictions") 548 ; 549 550 instAccesses = instHits + instMisses; 551 readAccesses = readHits + readMisses; 552 writeAccesses = writeHits + writeMisses; 553 hits = readHits + writeHits + instHits; 554 misses = readMisses + writeMisses + instMisses; 555 accesses = readAccesses + writeAccesses + instAccesses; 556} 557 558void 559TLB::regProbePoints() 560{ 561 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills")); 562} 563 564Fault 565TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, 566 Translation *translation, bool &delay, bool timing) 567{ 568 updateMiscReg(tc); 569 Addr vaddr_tainted = req->getVaddr(); 570 Addr vaddr = 0; 571 if (aarch64) 572 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 573 else 574 vaddr = vaddr_tainted; 575 Request::Flags flags = req->getFlags(); 576 577 bool is_fetch = (mode == Execute); 578 bool is_write = (mode == Write); 579 580 if (!is_fetch) { 581 assert(flags & MustBeOne || req->isPrefetch()); 582 if (sctlr.a || !(flags & AllowUnaligned)) { 583 if (vaddr & mask(flags & AlignmentMask)) { 584 // LPAE is always disabled in SE mode 585 return std::make_shared<DataAbort>( 586 vaddr_tainted, 587 TlbEntry::DomainType::NoAccess, is_write, 588 ArmFault::AlignmentFault, isStage2, 589 ArmFault::VmsaTran); 590 } 591 } 592 } 593 594 Addr paddr; 595 Process *p = tc->getProcessPtr(); 596 597 if (!p->pTable->translate(vaddr, paddr)) 598 return std::make_shared<GenericPageTableFault>(vaddr_tainted); 599 req->setPaddr(paddr); 600 601 return finalizePhysical(req, tc, mode); 602} 603 604Fault 605TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode) 606{ 607 // a data cache maintenance instruction that operates by MVA does 608 // not generate a Data Abort exeception due to a Permission fault 609 if (req->isCacheMaintenance()) { 610 return NoFault; 611 } 612 613 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify 614 Request::Flags flags = req->getFlags(); 615 bool is_fetch = (mode == Execute); 616 bool is_write = (mode == Write); 617 bool is_priv = isPriv && !(flags & UserMode); 618 619 // Get the translation type from the actuall table entry 620 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran 621 : ArmFault::VmsaTran; 622 623 // If this is the second stage of translation and the request is for a 624 // stage 1 page table walk then we need to check the HCR.PTW bit. This 625 // allows us to generate a fault if the request targets an area marked 626 // as a device or strongly ordered. 627 if (isStage2 && req->isPTWalk() && hcr.ptw && 628 (te->mtype != TlbEntry::MemoryType::Normal)) { 629 return std::make_shared<DataAbort>( 630 vaddr, te->domain, is_write, 631 ArmFault::PermissionLL + te->lookupLevel, 632 isStage2, tranMethod); 633 } 634 635 // Generate an alignment fault for unaligned data accesses to device or 636 // strongly ordered memory 637 if (!is_fetch) { 638 if (te->mtype != TlbEntry::MemoryType::Normal) { 639 if (vaddr & mask(flags & AlignmentMask)) { 640 alignFaults++; 641 return std::make_shared<DataAbort>( 642 vaddr, TlbEntry::DomainType::NoAccess, is_write, 643 ArmFault::AlignmentFault, isStage2, 644 tranMethod); 645 } 646 } 647 } 648 649 if (te->nonCacheable) { 650 // Prevent prefetching from I/O devices. 651 if (req->isPrefetch()) { 652 // Here we can safely use the fault status for the short 653 // desc. format in all cases 654 return std::make_shared<PrefetchAbort>( 655 vaddr, ArmFault::PrefetchUncacheable, 656 isStage2, tranMethod); 657 } 658 } 659 660 if (!te->longDescFormat) { 661 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) { 662 case 0: 663 domainFaults++; 664 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x" 665 " domain: %#x write:%d\n", dacr, 666 static_cast<uint8_t>(te->domain), is_write); 667 if (is_fetch) { 668 // Use PC value instead of vaddr because vaddr might 669 // be aligned to cache line and should not be the 670 // address reported in FAR 671 return std::make_shared<PrefetchAbort>( 672 req->getPC(), 673 ArmFault::DomainLL + te->lookupLevel, 674 isStage2, tranMethod); 675 } else 676 return std::make_shared<DataAbort>( 677 vaddr, te->domain, is_write, 678 ArmFault::DomainLL + te->lookupLevel, 679 isStage2, tranMethod); 680 case 1: 681 // Continue with permissions check 682 break; 683 case 2: 684 panic("UNPRED domain\n"); 685 case 3: 686 return NoFault; 687 } 688 } 689 690 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits 691 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap; 692 uint8_t hap = te->hap; 693 694 if (sctlr.afe == 1 || te->longDescFormat) 695 ap |= 1; 696 697 bool abt; 698 bool isWritable = true; 699 // If this is a stage 2 access (eg for reading stage 1 page table entries) 700 // then don't perform the AP permissions check, we stil do the HAP check 701 // below. 702 if (isStage2) { 703 abt = false; 704 } else { 705 switch (ap) { 706 case 0: 707 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n", 708 (int)sctlr.rs); 709 if (!sctlr.xp) { 710 switch ((int)sctlr.rs) { 711 case 2: 712 abt = is_write; 713 break; 714 case 1: 715 abt = is_write || !is_priv; 716 break; 717 case 0: 718 case 3: 719 default: 720 abt = true; 721 break; 722 } 723 } else { 724 abt = true; 725 } 726 break; 727 case 1: 728 abt = !is_priv; 729 break; 730 case 2: 731 abt = !is_priv && is_write; 732 isWritable = is_priv; 733 break; 734 case 3: 735 abt = false; 736 break; 737 case 4: 738 panic("UNPRED premissions\n"); 739 case 5: 740 abt = !is_priv || is_write; 741 isWritable = false; 742 break; 743 case 6: 744 case 7: 745 abt = is_write; 746 isWritable = false; 747 break; 748 default: 749 panic("Unknown permissions %#x\n", ap); 750 } 751 } 752 753 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1); 754 bool xn = te->xn || (isWritable && sctlr.wxn) || 755 (ap == 3 && sctlr.uwxn && is_priv); 756 if (is_fetch && (abt || xn || 757 (te->longDescFormat && te->pxn && is_priv) || 758 (isSecure && te->ns && scr.sif))) { 759 permsFaults++; 760 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d " 761 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n", 762 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe); 763 // Use PC value instead of vaddr because vaddr might be aligned to 764 // cache line and should not be the address reported in FAR 765 return std::make_shared<PrefetchAbort>( 766 req->getPC(), 767 ArmFault::PermissionLL + te->lookupLevel, 768 isStage2, tranMethod); 769 } else if (abt | hapAbt) { 770 permsFaults++; 771 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d" 772 " write:%d\n", ap, is_priv, is_write); 773 return std::make_shared<DataAbort>( 774 vaddr, te->domain, is_write, 775 ArmFault::PermissionLL + te->lookupLevel, 776 isStage2 | !abt, tranMethod); 777 } 778 return NoFault; 779} 780 781 782Fault 783TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, 784 ThreadContext *tc) 785{ 786 assert(aarch64); 787 788 // A data cache maintenance instruction that operates by VA does 789 // not generate a Permission fault unless: 790 // * It is a data cache invalidate (dc ivac) which requires write 791 // permissions to the VA, or 792 // * It is executed from EL0 793 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) { 794 return NoFault; 795 } 796 797 Addr vaddr_tainted = req->getVaddr(); 798 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 799 800 Request::Flags flags = req->getFlags(); 801 bool is_fetch = (mode == Execute); 802 // Cache clean operations require read permissions to the specified VA 803 bool is_write = !req->isCacheClean() && mode == Write; 804 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode); 805 806 updateMiscReg(tc, curTranType); 807 808 // If this is the second stage of translation and the request is for a 809 // stage 1 page table walk then we need to check the HCR.PTW bit. This 810 // allows us to generate a fault if the request targets an area marked 811 // as a device or strongly ordered. 812 if (isStage2 && req->isPTWalk() && hcr.ptw && 813 (te->mtype != TlbEntry::MemoryType::Normal)) { 814 return std::make_shared<DataAbort>( 815 vaddr_tainted, te->domain, is_write, 816 ArmFault::PermissionLL + te->lookupLevel, 817 isStage2, ArmFault::LpaeTran); 818 } 819 820 // Generate an alignment fault for unaligned accesses to device or 821 // strongly ordered memory 822 if (!is_fetch) { 823 if (te->mtype != TlbEntry::MemoryType::Normal) { 824 if (vaddr & mask(flags & AlignmentMask)) { 825 alignFaults++; 826 return std::make_shared<DataAbort>( 827 vaddr_tainted, 828 TlbEntry::DomainType::NoAccess, is_write, 829 ArmFault::AlignmentFault, isStage2, 830 ArmFault::LpaeTran); 831 } 832 } 833 } 834 835 if (te->nonCacheable) { 836 // Prevent prefetching from I/O devices. 837 if (req->isPrefetch()) { 838 // Here we can safely use the fault status for the short 839 // desc. format in all cases 840 return std::make_shared<PrefetchAbort>( 841 vaddr_tainted, 842 ArmFault::PrefetchUncacheable, 843 isStage2, ArmFault::LpaeTran); 844 } 845 } 846 847 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field 848 bool grant = false; 849 850 uint8_t xn = te->xn; 851 uint8_t pxn = te->pxn; 852 bool r = !is_write && !is_fetch; 853 bool w = is_write; 854 bool x = is_fetch; 855 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, " 856 "w:%d, x:%d\n", ap, xn, pxn, r, w, x); 857 858 if (isStage2) { 859 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2); 860 // In stage 2 we use the hypervisor access permission bits. 861 // The following permissions are described in ARM DDI 0487A.f 862 // D4-1802 863 uint8_t hap = 0x3 & te->hap; 864 if (is_fetch) { 865 // sctlr.wxn overrides the xn bit 866 grant = !sctlr.wxn && !xn; 867 } else if (is_write) { 868 grant = hap & 0x2; 869 } else { // is_read 870 grant = hap & 0x1; 871 } 872 } else { 873 switch (aarch64EL) { 874 case EL0: 875 { 876 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 877 switch (perm) { 878 case 0: 879 case 1: 880 case 8: 881 case 9: 882 grant = x; 883 break; 884 case 4: 885 case 5: 886 grant = r || w || (x && !sctlr.wxn); 887 break; 888 case 6: 889 case 7: 890 grant = r || w; 891 break; 892 case 12: 893 case 13: 894 grant = r || x; 895 break; 896 case 14: 897 case 15: 898 grant = r; 899 break; 900 default: 901 grant = false; 902 } 903 } 904 break; 905 case EL1: 906 { 907 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 908 switch (perm) { 909 case 0: 910 case 2: 911 grant = r || w || (x && !sctlr.wxn); 912 break; 913 case 1: 914 case 3: 915 case 4: 916 case 5: 917 case 6: 918 case 7: 919 // regions that are writeable at EL0 should not be 920 // executable at EL1 921 grant = r || w; 922 break; 923 case 8: 924 case 10: 925 case 12: 926 case 14: 927 grant = r || x; 928 break; 929 case 9: 930 case 11: 931 case 13: 932 case 15: 933 grant = r; 934 break; 935 default: 936 grant = false; 937 } 938 } 939 break; 940 case EL2: 941 case EL3: 942 { 943 uint8_t perm = (ap & 0x2) | xn; 944 switch (perm) { 945 case 0: 946 grant = r || w || (x && !sctlr.wxn) ; 947 break; 948 case 1: 949 grant = r || w; 950 break; 951 case 2: 952 grant = r || x; 953 break; 954 case 3: 955 grant = r; 956 break; 957 default: 958 grant = false; 959 } 960 } 961 break; 962 } 963 } 964 965 if (!grant) { 966 if (is_fetch) { 967 permsFaults++; 968 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. " 969 "AP:%d priv:%d write:%d ns:%d sif:%d " 970 "sctlr.afe: %d\n", 971 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe); 972 // Use PC value instead of vaddr because vaddr might be aligned to 973 // cache line and should not be the address reported in FAR 974 return std::make_shared<PrefetchAbort>( 975 req->getPC(), 976 ArmFault::PermissionLL + te->lookupLevel, 977 isStage2, ArmFault::LpaeTran); 978 } else { 979 permsFaults++; 980 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d " 981 "priv:%d write:%d\n", ap, is_priv, is_write); 982 return std::make_shared<DataAbort>( 983 vaddr_tainted, te->domain, is_write, 984 ArmFault::PermissionLL + te->lookupLevel, 985 isStage2, ArmFault::LpaeTran); 986 } 987 } 988 989 return NoFault; 990} 991 992Fault 993TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, 994 Translation *translation, bool &delay, bool timing, 995 TLB::ArmTranslationType tranType, bool functional) 996{ 997 // No such thing as a functional timing access 998 assert(!(timing && functional)); 999 1000 updateMiscReg(tc, tranType); 1001 1002 Addr vaddr_tainted = req->getVaddr(); 1003 Addr vaddr = 0; 1004 if (aarch64) 1005 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 1006 else 1007 vaddr = vaddr_tainted; 1008 Request::Flags flags = req->getFlags(); 1009 1010 bool is_fetch = (mode == Execute); 1011 bool is_write = (mode == Write); 1012 bool long_desc_format = aarch64 || longDescFormatInUse(tc); 1013 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran 1014 : ArmFault::VmsaTran; 1015 1016 req->setAsid(asid); 1017 1018 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n", 1019 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran); 1020 1021 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x " 1022 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2, 1023 scr, sctlr, flags, tranType); 1024 1025 if ((req->isInstFetch() && (!sctlr.i)) || 1026 ((!req->isInstFetch()) && (!sctlr.c))){ 1027 if (!req->isCacheMaintenance()) { 1028 req->setFlags(Request::UNCACHEABLE); 1029 } 1030 req->setFlags(Request::STRICT_ORDER); 1031 } 1032 if (!is_fetch) { 1033 assert(flags & MustBeOne || req->isPrefetch()); 1034 if (sctlr.a || !(flags & AllowUnaligned)) { 1035 if (vaddr & mask(flags & AlignmentMask)) { 1036 alignFaults++; 1037 return std::make_shared<DataAbort>( 1038 vaddr_tainted, 1039 TlbEntry::DomainType::NoAccess, is_write, 1040 ArmFault::AlignmentFault, isStage2, 1041 tranMethod); 1042 } 1043 } 1044 } 1045 1046 // If guest MMU is off or hcr.vm=0 go straight to stage2 1047 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) { 1048 1049 req->setPaddr(vaddr); 1050 // When the MMU is off the security attribute corresponds to the 1051 // security state of the processor 1052 if (isSecure) 1053 req->setFlags(Request::SECURE); 1054 1055 // @todo: double check this (ARM ARM issue C B3.2.1) 1056 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 || 1057 nmrr.or0 == 0 || prrr.tr0 != 0x2) { 1058 if (!req->isCacheMaintenance()) { 1059 req->setFlags(Request::UNCACHEABLE); 1060 } 1061 req->setFlags(Request::STRICT_ORDER); 1062 } 1063 1064 // Set memory attributes 1065 TlbEntry temp_te; 1066 temp_te.ns = !isSecure; 1067 if (isStage2 || hcr.dc == 0 || isSecure || 1068 (isHyp && !(tranType & S1CTran))) { 1069 1070 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal 1071 : TlbEntry::MemoryType::StronglyOrdered; 1072 temp_te.innerAttrs = 0x0; 1073 temp_te.outerAttrs = 0x0; 1074 temp_te.shareable = true; 1075 temp_te.outerShareable = true; 1076 } else { 1077 temp_te.mtype = TlbEntry::MemoryType::Normal; 1078 temp_te.innerAttrs = 0x3; 1079 temp_te.outerAttrs = 0x3; 1080 temp_te.shareable = false; 1081 temp_te.outerShareable = false; 1082 } 1083 temp_te.setAttributes(long_desc_format); 1084 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: " 1085 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n", 1086 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs, 1087 isStage2); 1088 setAttr(temp_te.attributes); 1089 1090 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess); 1091 } 1092 1093 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n", 1094 isStage2 ? "IPA" : "VA", vaddr_tainted, asid); 1095 // Translation enabled 1096 1097 TlbEntry *te = NULL; 1098 TlbEntry mergeTe; 1099 Fault fault = getResultTe(&te, req, tc, mode, translation, timing, 1100 functional, &mergeTe); 1101 // only proceed if we have a valid table entry 1102 if ((te == NULL) && (fault == NoFault)) delay = true; 1103 1104 // If we have the table entry transfer some of the attributes to the 1105 // request that triggered the translation 1106 if (te != NULL) { 1107 // Set memory attributes 1108 DPRINTF(TLBVerbose, 1109 "Setting memory attributes: shareable: %d, innerAttrs: %d, " 1110 "outerAttrs: %d, mtype: %d, isStage2: %d\n", 1111 te->shareable, te->innerAttrs, te->outerAttrs, 1112 static_cast<uint8_t>(te->mtype), isStage2); 1113 setAttr(te->attributes); 1114 1115 if (te->nonCacheable && !req->isCacheMaintenance()) 1116 req->setFlags(Request::UNCACHEABLE); 1117 1118 // Require requests to be ordered if the request goes to 1119 // strongly ordered or device memory (i.e., anything other 1120 // than normal memory requires strict order). 1121 if (te->mtype != TlbEntry::MemoryType::Normal) 1122 req->setFlags(Request::STRICT_ORDER); 1123 1124 Addr pa = te->pAddr(vaddr); 1125 req->setPaddr(pa); 1126 1127 if (isSecure && !te->ns) { 1128 req->setFlags(Request::SECURE); 1129 } 1130 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) && 1131 (te->mtype != TlbEntry::MemoryType::Normal)) { 1132 // Unaligned accesses to Device memory should always cause an 1133 // abort regardless of sctlr.a 1134 alignFaults++; 1135 return std::make_shared<DataAbort>( 1136 vaddr_tainted, 1137 TlbEntry::DomainType::NoAccess, is_write, 1138 ArmFault::AlignmentFault, isStage2, 1139 tranMethod); 1140 } 1141 1142 // Check for a trickbox generated address fault 1143 if (fault == NoFault) 1144 fault = testTranslation(req, mode, te->domain); 1145 } 1146 1147 if (fault == NoFault) { 1148 // Don't try to finalize a physical address unless the 1149 // translation has completed (i.e., there is a table entry). 1150 return te ? finalizePhysical(req, tc, mode) : NoFault; 1151 } else { 1152 return fault; 1153 } 1154} 1155 1156Fault 1157TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode, 1158 TLB::ArmTranslationType tranType) 1159{ 1160 updateMiscReg(tc, tranType); 1161 1162 if (directToStage2) { 1163 assert(stage2Tlb); 1164 return stage2Tlb->translateAtomic(req, tc, mode, tranType); 1165 } 1166 1167 bool delay = false; 1168 Fault fault; 1169 if (FullSystem) 1170 fault = translateFs(req, tc, mode, NULL, delay, false, tranType); 1171 else 1172 fault = translateSe(req, tc, mode, NULL, delay, false); 1173 assert(!delay); 1174 return fault; 1175} 1176 1177Fault 1178TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode, 1179 TLB::ArmTranslationType tranType) 1180{ 1181 updateMiscReg(tc, tranType); 1182 1183 if (directToStage2) { 1184 assert(stage2Tlb); 1185 return stage2Tlb->translateFunctional(req, tc, mode, tranType); 1186 } 1187 1188 bool delay = false; 1189 Fault fault; 1190 if (FullSystem) 1191 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true); 1192 else 1193 fault = translateSe(req, tc, mode, NULL, delay, false); 1194 assert(!delay); 1195 return fault; 1196} 1197 1198void 1199TLB::translateTiming(const RequestPtr &req, ThreadContext *tc, 1200 Translation *translation, Mode mode, TLB::ArmTranslationType tranType) 1201{ 1202 updateMiscReg(tc, tranType); 1203 1204 if (directToStage2) { 1205 assert(stage2Tlb); 1206 stage2Tlb->translateTiming(req, tc, translation, mode, tranType); 1207 return; 1208 } 1209 1210 assert(translation); 1211 1212 translateComplete(req, tc, translation, mode, tranType, isStage2); 1213} 1214 1215Fault 1216TLB::translateComplete(const RequestPtr &req, ThreadContext *tc, 1217 Translation *translation, Mode mode, TLB::ArmTranslationType tranType, 1218 bool callFromS2) 1219{ 1220 bool delay = false; 1221 Fault fault; 1222 if (FullSystem) 1223 fault = translateFs(req, tc, mode, translation, delay, true, tranType); 1224 else 1225 fault = translateSe(req, tc, mode, translation, delay, true); 1226 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault != 1227 NoFault); 1228 // If we have a translation, and we're not in the middle of doing a stage 1229 // 2 translation tell the translation that we've either finished or its 1230 // going to take a while. By not doing this when we're in the middle of a 1231 // stage 2 translation we prevent marking the translation as delayed twice, 1232 // one when the translation starts and again when the stage 1 translation 1233 // completes. 1234 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) { 1235 if (!delay) 1236 translation->finish(fault, req, tc, mode); 1237 else 1238 translation->markDelayed(); 1239 } 1240 return fault; 1241} 1242 1243Port * 1244TLB::getTableWalkerPort() 1245{ 1246 return &stage2Mmu->getDMAPort(); 1247} 1248 1249void 1250TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) 1251{ 1252 // check if the regs have changed, or the translation mode is different. 1253 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle 1254 // one type of translation anyway 1255 if (miscRegValid && miscRegContext == tc->contextId() && 1256 ((tranType == curTranType) || isStage2)) { 1257 return; 1258 } 1259 1260 DPRINTF(TLBVerbose, "TLB variables changed!\n"); 1261 cpsr = tc->readMiscReg(MISCREG_CPSR); 1262 1263 // Dependencies: SCR/SCR_EL3, CPSR 1264 isSecure = inSecureState(tc) && 1265 !(tranType & HypMode) && !(tranType & S1S2NsTran); 1266 1267 aarch64EL = tranTypeEL(cpsr, tranType); 1268 aarch64 = isStage2 ? 1269 ELIs64(tc, EL2) : 1270 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL); 1271 1272 if (aarch64) { // AArch64 1273 // determine EL we need to translate in 1274 switch (aarch64EL) { 1275 case EL0: 1276 case EL1: 1277 { 1278 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); 1279 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1); 1280 uint64_t ttbr_asid = ttbcr.a1 ? 1281 tc->readMiscReg(MISCREG_TTBR1_EL1) : 1282 tc->readMiscReg(MISCREG_TTBR0_EL1); 1283 asid = bits(ttbr_asid, 1284 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); 1285 } 1286 break; 1287 case EL2: 1288 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); 1289 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); 1290 asid = -1; 1291 break; 1292 case EL3: 1293 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3); 1294 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3); 1295 asid = -1; 1296 break; 1297 } 1298 hcr = tc->readMiscReg(MISCREG_HCR_EL2); 1299 scr = tc->readMiscReg(MISCREG_SCR_EL3); 1300 isPriv = aarch64EL != EL0; 1301 if (haveVirtualization) { 1302 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48); 1303 isHyp = aarch64EL == EL2; 1304 isHyp |= tranType & HypMode; 1305 isHyp &= (tranType & S1S2NsTran) == 0; 1306 isHyp &= (tranType & S1CTran) == 0; 1307 // Work out if we should skip the first stage of translation and go 1308 // directly to stage 2. This value is cached so we don't have to 1309 // compute it for every translation. 1310 stage2Req = isStage2 || 1311 (hcr.vm && !isHyp && !isSecure && 1312 !(tranType & S1CTran) && (aarch64EL < EL2) && 1313 !(tranType & S1E1Tran)); // <--- FIX THIS HACK 1314 stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure && 1315 (aarch64EL < EL2)); 1316 directToStage2 = !isStage2 && stage2Req && !sctlr.m; 1317 } else { 1318 vmid = 0; 1319 isHyp = false; 1320 directToStage2 = false; 1321 stage2Req = false; 1322 stage2DescReq = false; 1323 } 1324 } else { // AArch32 1325 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc, 1326 !isSecure)); 1327 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc, 1328 !isSecure)); 1329 scr = tc->readMiscReg(MISCREG_SCR); 1330 isPriv = cpsr.mode != MODE_USER; 1331 if (longDescFormatInUse(tc)) { 1332 uint64_t ttbr_asid = tc->readMiscReg( 1333 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 : 1334 MISCREG_TTBR0, 1335 tc, !isSecure)); 1336 asid = bits(ttbr_asid, 55, 48); 1337 } else { // Short-descriptor translation table format in use 1338 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex( 1339 MISCREG_CONTEXTIDR, tc,!isSecure)); 1340 asid = context_id.asid; 1341 } 1342 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc, 1343 !isSecure)); 1344 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc, 1345 !isSecure)); 1346 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc, 1347 !isSecure)); 1348 hcr = tc->readMiscReg(MISCREG_HCR); 1349 1350 if (haveVirtualization) { 1351 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); 1352 isHyp = cpsr.mode == MODE_HYP; 1353 isHyp |= tranType & HypMode; 1354 isHyp &= (tranType & S1S2NsTran) == 0; 1355 isHyp &= (tranType & S1CTran) == 0; 1356 if (isHyp) { 1357 sctlr = tc->readMiscReg(MISCREG_HSCTLR); 1358 } 1359 // Work out if we should skip the first stage of translation and go 1360 // directly to stage 2. This value is cached so we don't have to 1361 // compute it for every translation. 1362 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure && 1363 !(tranType & S1CTran); 1364 stage2DescReq = hcr.vm && !isStage2 && !isHyp && !isSecure; 1365 directToStage2 = stage2Req && !sctlr.m; 1366 } else { 1367 vmid = 0; 1368 stage2Req = false; 1369 isHyp = false; 1370 directToStage2 = false; 1371 stage2DescReq = false; 1372 } 1373 } 1374 miscRegValid = true; 1375 miscRegContext = tc->contextId(); 1376 curTranType = tranType; 1377} 1378 1379ExceptionLevel 1380TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type) 1381{ 1382 switch (type) { 1383 case S1E0Tran: 1384 case S12E0Tran: 1385 return EL0; 1386 1387 case S1E1Tran: 1388 case S12E1Tran: 1389 return EL1; 1390 1391 case S1E2Tran: 1392 return EL2; 1393 1394 case S1E3Tran: 1395 return EL3; 1396 1397 case NormalTran: 1398 case S1CTran: 1399 case S1S2NsTran: 1400 case HypMode: 1401 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode); 1402 1403 default: 1404 panic("Unknown translation mode!\n"); 1405 } 1406} 1407 1408Fault 1409TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, 1410 Translation *translation, bool timing, bool functional, 1411 bool is_secure, TLB::ArmTranslationType tranType) 1412{ 1413 // In a 2-stage system, the IPA->PA translation can be started via this 1414 // call so make sure the miscRegs are correct. 1415 if (isStage2) { 1416 updateMiscReg(tc, tranType); 1417 } 1418 bool is_fetch = (mode == Execute); 1419 bool is_write = (mode == Write); 1420 1421 Addr vaddr_tainted = req->getVaddr(); 1422 Addr vaddr = 0; 1423 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1; 1424 if (aarch64) { 1425 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr); 1426 } else { 1427 vaddr = vaddr_tainted; 1428 } 1429 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1430 if (*te == NULL) { 1431 if (req->isPrefetch()) { 1432 // if the request is a prefetch don't attempt to fill the TLB or go 1433 // any further with the memory access (here we can safely use the 1434 // fault status for the short desc. format in all cases) 1435 prefetchFaults++; 1436 return std::make_shared<PrefetchAbort>( 1437 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2); 1438 } 1439 1440 if (is_fetch) 1441 instMisses++; 1442 else if (is_write) 1443 writeMisses++; 1444 else 1445 readMisses++; 1446 1447 // start translation table walk, pass variables rather than 1448 // re-retreaving in table walker for speed 1449 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n", 1450 vaddr_tainted, asid, vmid); 1451 Fault fault; 1452 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode, 1453 translation, timing, functional, is_secure, 1454 tranType, stage2DescReq); 1455 // for timing mode, return and wait for table walk, 1456 if (timing || fault != NoFault) { 1457 return fault; 1458 } 1459 1460 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1461 if (!*te) 1462 printTlb(); 1463 assert(*te); 1464 } else { 1465 if (is_fetch) 1466 instHits++; 1467 else if (is_write) 1468 writeHits++; 1469 else 1470 readHits++; 1471 } 1472 return NoFault; 1473} 1474 1475Fault 1476TLB::getResultTe(TlbEntry **te, const RequestPtr &req, 1477 ThreadContext *tc, Mode mode, 1478 Translation *translation, bool timing, bool functional, 1479 TlbEntry *mergeTe) 1480{ 1481 Fault fault; 1482 1483 if (isStage2) { 1484 // We are already in the stage 2 TLB. Grab the table entry for stage 1485 // 2 only. We are here because stage 1 translation is disabled. 1486 TlbEntry *s2Te = NULL; 1487 // Get the stage 2 table entry 1488 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional, 1489 isSecure, curTranType); 1490 // Check permissions of stage 2 1491 if ((s2Te != NULL) && (fault == NoFault)) { 1492 if (aarch64) 1493 fault = checkPermissions64(s2Te, req, mode, tc); 1494 else 1495 fault = checkPermissions(s2Te, req, mode); 1496 } 1497 *te = s2Te; 1498 return fault; 1499 } 1500 1501 TlbEntry *s1Te = NULL; 1502 1503 Addr vaddr_tainted = req->getVaddr(); 1504 1505 // Get the stage 1 table entry 1506 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional, 1507 isSecure, curTranType); 1508 // only proceed if we have a valid table entry 1509 if ((s1Te != NULL) && (fault == NoFault)) { 1510 // Check stage 1 permissions before checking stage 2 1511 if (aarch64) 1512 fault = checkPermissions64(s1Te, req, mode, tc); 1513 else 1514 fault = checkPermissions(s1Te, req, mode); 1515 if (stage2Req & (fault == NoFault)) { 1516 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te, 1517 req, translation, mode, timing, functional, curTranType); 1518 fault = s2Lookup->getTe(tc, mergeTe); 1519 if (s2Lookup->isComplete()) { 1520 *te = mergeTe; 1521 // We've finished with the lookup so delete it 1522 delete s2Lookup; 1523 } else { 1524 // The lookup hasn't completed, so we can't delete it now. We 1525 // get round this by asking the object to self delete when the 1526 // translation is complete. 1527 s2Lookup->setSelfDelete(); 1528 } 1529 } else { 1530 // This case deals with an S1 hit (or bypass), followed by 1531 // an S2 hit-but-perms issue 1532 if (isStage2) { 1533 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n", 1534 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault); 1535 if (fault != NoFault) { 1536 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get()); 1537 armFault->annotate(ArmFault::S1PTW, false); 1538 armFault->annotate(ArmFault::OVA, vaddr_tainted); 1539 } 1540 } 1541 *te = s1Te; 1542 } 1543 } 1544 return fault; 1545} 1546 1547void 1548TLB::setTestInterface(SimObject *_ti) 1549{ 1550 if (!_ti) { 1551 test = nullptr; 1552 } else { 1553 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti)); 1554 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name()); 1555 test = ti; 1556 } 1557} 1558 1559Fault 1560TLB::testTranslation(const RequestPtr &req, Mode mode, 1561 TlbEntry::DomainType domain) 1562{ 1563 if (!test || !req->hasSize() || req->getSize() == 0 || 1564 req->isCacheMaintenance()) { 1565 return NoFault; 1566 } else { 1567 return test->translationCheck(req, isPriv, mode, domain); 1568 } 1569} 1570 1571Fault 1572TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, 1573 TlbEntry::DomainType domain, LookupLevel lookup_level) 1574{ 1575 if (!test) { 1576 return NoFault; 1577 } else { 1578 return test->walkCheck(pa, size, va, is_secure, isPriv, mode, 1579 domain, lookup_level); 1580 } 1581} 1582 1583 1584ArmISA::TLB * 1585ArmTLBParams::create() 1586{ 1587 return new ArmISA::TLB(this); 1588} 1589