tlb.cc revision 12735:e3da526a0654
1/* 2 * Copyright (c) 2010-2013, 2016-2018 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Steve Reinhardt 43 */ 44 45#include "arch/arm/tlb.hh" 46 47#include <memory> 48#include <string> 49#include <vector> 50 51#include "arch/arm/faults.hh" 52#include "arch/arm/pagetable.hh" 53#include "arch/arm/stage2_lookup.hh" 54#include "arch/arm/stage2_mmu.hh" 55#include "arch/arm/system.hh" 56#include "arch/arm/table_walker.hh" 57#include "arch/arm/utility.hh" 58#include "arch/generic/mmapped_ipr.hh" 59#include "base/inifile.hh" 60#include "base/str.hh" 61#include "base/trace.hh" 62#include "cpu/base.hh" 63#include "cpu/thread_context.hh" 64#include "debug/Checkpoint.hh" 65#include "debug/TLB.hh" 66#include "debug/TLBVerbose.hh" 67#include "mem/page_table.hh" 68#include "mem/request.hh" 69#include "params/ArmTLB.hh" 70#include "sim/full_system.hh" 71#include "sim/process.hh" 72 73using namespace std; 74using namespace ArmISA; 75 76TLB::TLB(const ArmTLBParams *p) 77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size), 78 isStage2(p->is_stage2), stage2Req(false), _attr(0), 79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL), 80 stage2Mmu(NULL), test(nullptr), rangeMRU(1), 81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false), 82 isHyp(false), asid(0), vmid(0), dacr(0), 83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran) 84{ 85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys); 86 87 tableWalker->setTlb(this); 88 89 // Cache system-level properties 90 haveLPAE = tableWalker->haveLPAE(); 91 haveVirtualization = tableWalker->haveVirtualization(); 92 haveLargeAsid64 = tableWalker->haveLargeAsid64(); 93 94 if (sys) 95 m5opRange = sys->m5opRange(); 96} 97 98TLB::~TLB() 99{ 100 delete[] table; 101} 102 103void 104TLB::init() 105{ 106 if (stage2Mmu && !isStage2) 107 stage2Tlb = stage2Mmu->stage2Tlb(); 108} 109 110void 111TLB::setMMU(Stage2MMU *m, MasterID master_id) 112{ 113 stage2Mmu = m; 114 tableWalker->setMMU(m, master_id); 115} 116 117bool 118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) 119{ 120 updateMiscReg(tc); 121 122 if (directToStage2) { 123 assert(stage2Tlb); 124 return stage2Tlb->translateFunctional(tc, va, pa); 125 } 126 127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false, 128 aarch64 ? aarch64EL : EL1); 129 if (!e) 130 return false; 131 pa = e->pAddr(va); 132 return true; 133} 134 135Fault 136TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const 137{ 138 const Addr paddr = req->getPaddr(); 139 140 if (m5opRange.contains(paddr)) { 141 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR); 142 req->setPaddr(GenericISA::iprAddressPseudoInst( 143 (paddr >> 8) & 0xFF, 144 paddr & 0xFF)); 145 } 146 147 return NoFault; 148} 149 150TlbEntry* 151TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, 152 bool functional, bool ignore_asn, uint8_t target_el) 153{ 154 155 TlbEntry *retval = NULL; 156 157 // Maintaining LRU array 158 int x = 0; 159 while (retval == NULL && x < size) { 160 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false, 161 target_el)) || 162 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) { 163 // We only move the hit entry ahead when the position is higher 164 // than rangeMRU 165 if (x > rangeMRU && !functional) { 166 TlbEntry tmp_entry = table[x]; 167 for (int i = x; i > 0; i--) 168 table[i] = table[i - 1]; 169 table[0] = tmp_entry; 170 retval = &table[0]; 171 } else { 172 retval = &table[x]; 173 } 174 break; 175 } 176 ++x; 177 } 178 179 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d " 180 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d " 181 "el: %d\n", 182 va, asn, retval ? "hit" : "miss", vmid, hyp, secure, 183 retval ? retval->pfn : 0, retval ? retval->size : 0, 184 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0, 185 retval ? retval->ns : 0, retval ? retval->nstid : 0, 186 retval ? retval->global : 0, retval ? retval->asid : 0, 187 retval ? retval->el : 0); 188 189 return retval; 190} 191 192// insert a new TLB entry 193void 194TLB::insert(Addr addr, TlbEntry &entry) 195{ 196 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x" 197 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d" 198 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn, 199 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N, 200 entry.global, entry.valid, entry.nonCacheable, entry.xn, 201 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid, 202 entry.isHyp); 203 204 if (table[size - 1].valid) 205 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x " 206 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n", 207 table[size-1].vpn << table[size-1].N, table[size-1].asid, 208 table[size-1].vmid, table[size-1].pfn << table[size-1].N, 209 table[size-1].size, table[size-1].ap, table[size-1].ns, 210 table[size-1].nstid, table[size-1].global, table[size-1].isHyp, 211 table[size-1].el); 212 213 //inserting to MRU position and evicting the LRU one 214 215 for (int i = size - 1; i > 0; --i) 216 table[i] = table[i-1]; 217 table[0] = entry; 218 219 inserts++; 220 ppRefills->notify(1); 221} 222 223void 224TLB::printTlb() const 225{ 226 int x = 0; 227 TlbEntry *te; 228 DPRINTF(TLB, "Current TLB contents:\n"); 229 while (x < size) { 230 te = &table[x]; 231 if (te->valid) 232 DPRINTF(TLB, " * %s\n", te->print()); 233 ++x; 234 } 235} 236 237void 238TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el) 239{ 240 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", 241 (secure_lookup ? "secure" : "non-secure")); 242 int x = 0; 243 TlbEntry *te; 244 while (x < size) { 245 te = &table[x]; 246 if (te->valid && secure_lookup == !te->nstid && 247 (te->vmid == vmid || secure_lookup) && 248 checkELMatch(target_el, te->el, ignore_el)) { 249 250 DPRINTF(TLB, " - %s\n", te->print()); 251 te->valid = false; 252 flushedEntries++; 253 } 254 ++x; 255 } 256 257 flushTlb++; 258 259 // If there's a second stage TLB (and we're not it) then flush it as well 260 // if we're currently in hyp mode 261 if (!isStage2 && isHyp) { 262 stage2Tlb->flushAllSecurity(secure_lookup, true); 263 } 264} 265 266void 267TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el) 268{ 269 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n", 270 (hyp ? "hyp" : "non-hyp")); 271 int x = 0; 272 TlbEntry *te; 273 while (x < size) { 274 te = &table[x]; 275 if (te->valid && te->nstid && te->isHyp == hyp && 276 checkELMatch(target_el, te->el, ignore_el)) { 277 278 DPRINTF(TLB, " - %s\n", te->print()); 279 flushedEntries++; 280 te->valid = false; 281 } 282 ++x; 283 } 284 285 flushTlb++; 286 287 // If there's a second stage TLB (and we're not it) then flush it as well 288 if (!isStage2 && !hyp) { 289 stage2Tlb->flushAllNs(false, true); 290 } 291} 292 293void 294TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el) 295{ 296 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " 297 "(%s lookup)\n", mva, asn, (secure_lookup ? 298 "secure" : "non-secure")); 299 _flushMva(mva, asn, secure_lookup, false, false, target_el); 300 flushTlbMvaAsid++; 301} 302 303void 304TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el) 305{ 306 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn, 307 (secure_lookup ? "secure" : "non-secure")); 308 309 int x = 0 ; 310 TlbEntry *te; 311 312 while (x < size) { 313 te = &table[x]; 314 if (te->valid && te->asid == asn && secure_lookup == !te->nstid && 315 (te->vmid == vmid || secure_lookup) && 316 checkELMatch(target_el, te->el, false)) { 317 318 te->valid = false; 319 DPRINTF(TLB, " - %s\n", te->print()); 320 flushedEntries++; 321 } 322 ++x; 323 } 324 flushTlbAsid++; 325} 326 327void 328TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el) 329{ 330 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva, 331 (secure_lookup ? "secure" : "non-secure")); 332 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el); 333 flushTlbMva++; 334} 335 336void 337TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp, 338 bool ignore_asn, uint8_t target_el) 339{ 340 TlbEntry *te; 341 // D5.7.2: Sign-extend address to 64 bits 342 mva = sext<56>(mva); 343 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 344 target_el); 345 while (te != NULL) { 346 if (secure_lookup == !te->nstid) { 347 DPRINTF(TLB, " - %s\n", te->print()); 348 te->valid = false; 349 flushedEntries++; 350 } 351 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 352 target_el); 353 } 354} 355 356void 357TLB::flushIpaVmid(Addr ipa, bool secure_lookup, bool hyp, uint8_t target_el) 358{ 359 assert(!isStage2); 360 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, hyp, true, target_el); 361} 362 363bool 364TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el) 365{ 366 bool elMatch = true; 367 if (!ignore_el) { 368 if (target_el == 2 || target_el == 3) { 369 elMatch = (tentry_el == target_el); 370 } else { 371 elMatch = (tentry_el == 0) || (tentry_el == 1); 372 } 373 } 374 return elMatch; 375} 376 377void 378TLB::drainResume() 379{ 380 // We might have unserialized something or switched CPUs, so make 381 // sure to re-read the misc regs. 382 miscRegValid = false; 383} 384 385void 386TLB::takeOverFrom(BaseTLB *_otlb) 387{ 388 TLB *otlb = dynamic_cast<TLB*>(_otlb); 389 /* Make sure we actually have a valid type */ 390 if (otlb) { 391 _attr = otlb->_attr; 392 haveLPAE = otlb->haveLPAE; 393 directToStage2 = otlb->directToStage2; 394 stage2Req = otlb->stage2Req; 395 396 /* Sync the stage2 MMU if they exist in both 397 * the old CPU and the new 398 */ 399 if (!isStage2 && 400 stage2Tlb && otlb->stage2Tlb) { 401 stage2Tlb->takeOverFrom(otlb->stage2Tlb); 402 } 403 } else { 404 panic("Incompatible TLB type!"); 405 } 406} 407 408void 409TLB::serialize(CheckpointOut &cp) const 410{ 411 DPRINTF(Checkpoint, "Serializing Arm TLB\n"); 412 413 SERIALIZE_SCALAR(_attr); 414 SERIALIZE_SCALAR(haveLPAE); 415 SERIALIZE_SCALAR(directToStage2); 416 SERIALIZE_SCALAR(stage2Req); 417 418 int num_entries = size; 419 SERIALIZE_SCALAR(num_entries); 420 for (int i = 0; i < size; i++) 421 table[i].serializeSection(cp, csprintf("TlbEntry%d", i)); 422} 423 424void 425TLB::unserialize(CheckpointIn &cp) 426{ 427 DPRINTF(Checkpoint, "Unserializing Arm TLB\n"); 428 429 UNSERIALIZE_SCALAR(_attr); 430 UNSERIALIZE_SCALAR(haveLPAE); 431 UNSERIALIZE_SCALAR(directToStage2); 432 UNSERIALIZE_SCALAR(stage2Req); 433 434 int num_entries; 435 UNSERIALIZE_SCALAR(num_entries); 436 for (int i = 0; i < min(size, num_entries); i++) 437 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i)); 438} 439 440void 441TLB::regStats() 442{ 443 BaseTLB::regStats(); 444 instHits 445 .name(name() + ".inst_hits") 446 .desc("ITB inst hits") 447 ; 448 449 instMisses 450 .name(name() + ".inst_misses") 451 .desc("ITB inst misses") 452 ; 453 454 instAccesses 455 .name(name() + ".inst_accesses") 456 .desc("ITB inst accesses") 457 ; 458 459 readHits 460 .name(name() + ".read_hits") 461 .desc("DTB read hits") 462 ; 463 464 readMisses 465 .name(name() + ".read_misses") 466 .desc("DTB read misses") 467 ; 468 469 readAccesses 470 .name(name() + ".read_accesses") 471 .desc("DTB read accesses") 472 ; 473 474 writeHits 475 .name(name() + ".write_hits") 476 .desc("DTB write hits") 477 ; 478 479 writeMisses 480 .name(name() + ".write_misses") 481 .desc("DTB write misses") 482 ; 483 484 writeAccesses 485 .name(name() + ".write_accesses") 486 .desc("DTB write accesses") 487 ; 488 489 hits 490 .name(name() + ".hits") 491 .desc("DTB hits") 492 ; 493 494 misses 495 .name(name() + ".misses") 496 .desc("DTB misses") 497 ; 498 499 accesses 500 .name(name() + ".accesses") 501 .desc("DTB accesses") 502 ; 503 504 flushTlb 505 .name(name() + ".flush_tlb") 506 .desc("Number of times complete TLB was flushed") 507 ; 508 509 flushTlbMva 510 .name(name() + ".flush_tlb_mva") 511 .desc("Number of times TLB was flushed by MVA") 512 ; 513 514 flushTlbMvaAsid 515 .name(name() + ".flush_tlb_mva_asid") 516 .desc("Number of times TLB was flushed by MVA & ASID") 517 ; 518 519 flushTlbAsid 520 .name(name() + ".flush_tlb_asid") 521 .desc("Number of times TLB was flushed by ASID") 522 ; 523 524 flushedEntries 525 .name(name() + ".flush_entries") 526 .desc("Number of entries that have been flushed from TLB") 527 ; 528 529 alignFaults 530 .name(name() + ".align_faults") 531 .desc("Number of TLB faults due to alignment restrictions") 532 ; 533 534 prefetchFaults 535 .name(name() + ".prefetch_faults") 536 .desc("Number of TLB faults due to prefetch") 537 ; 538 539 domainFaults 540 .name(name() + ".domain_faults") 541 .desc("Number of TLB faults due to domain restrictions") 542 ; 543 544 permsFaults 545 .name(name() + ".perms_faults") 546 .desc("Number of TLB faults due to permissions restrictions") 547 ; 548 549 instAccesses = instHits + instMisses; 550 readAccesses = readHits + readMisses; 551 writeAccesses = writeHits + writeMisses; 552 hits = readHits + writeHits + instHits; 553 misses = readMisses + writeMisses + instMisses; 554 accesses = readAccesses + writeAccesses + instAccesses; 555} 556 557void 558TLB::regProbePoints() 559{ 560 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills")); 561} 562 563Fault 564TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode, 565 Translation *translation, bool &delay, bool timing) 566{ 567 updateMiscReg(tc); 568 Addr vaddr_tainted = req->getVaddr(); 569 Addr vaddr = 0; 570 if (aarch64) 571 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 572 else 573 vaddr = vaddr_tainted; 574 Request::Flags flags = req->getFlags(); 575 576 bool is_fetch = (mode == Execute); 577 bool is_write = (mode == Write); 578 579 if (!is_fetch) { 580 assert(flags & MustBeOne); 581 if (sctlr.a || !(flags & AllowUnaligned)) { 582 if (vaddr & mask(flags & AlignmentMask)) { 583 // LPAE is always disabled in SE mode 584 return std::make_shared<DataAbort>( 585 vaddr_tainted, 586 TlbEntry::DomainType::NoAccess, is_write, 587 ArmFault::AlignmentFault, isStage2, 588 ArmFault::VmsaTran); 589 } 590 } 591 } 592 593 Addr paddr; 594 Process *p = tc->getProcessPtr(); 595 596 if (!p->pTable->translate(vaddr, paddr)) 597 return std::make_shared<GenericPageTableFault>(vaddr_tainted); 598 req->setPaddr(paddr); 599 600 return finalizePhysical(req, tc, mode); 601} 602 603Fault 604TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode) 605{ 606 // a data cache maintenance instruction that operates by MVA does 607 // not generate a Data Abort exeception due to a Permission fault 608 if (req->isCacheMaintenance()) { 609 return NoFault; 610 } 611 612 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify 613 Request::Flags flags = req->getFlags(); 614 bool is_fetch = (mode == Execute); 615 bool is_write = (mode == Write); 616 bool is_priv = isPriv && !(flags & UserMode); 617 618 // Get the translation type from the actuall table entry 619 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran 620 : ArmFault::VmsaTran; 621 622 // If this is the second stage of translation and the request is for a 623 // stage 1 page table walk then we need to check the HCR.PTW bit. This 624 // allows us to generate a fault if the request targets an area marked 625 // as a device or strongly ordered. 626 if (isStage2 && req->isPTWalk() && hcr.ptw && 627 (te->mtype != TlbEntry::MemoryType::Normal)) { 628 return std::make_shared<DataAbort>( 629 vaddr, te->domain, is_write, 630 ArmFault::PermissionLL + te->lookupLevel, 631 isStage2, tranMethod); 632 } 633 634 // Generate an alignment fault for unaligned data accesses to device or 635 // strongly ordered memory 636 if (!is_fetch) { 637 if (te->mtype != TlbEntry::MemoryType::Normal) { 638 if (vaddr & mask(flags & AlignmentMask)) { 639 alignFaults++; 640 return std::make_shared<DataAbort>( 641 vaddr, TlbEntry::DomainType::NoAccess, is_write, 642 ArmFault::AlignmentFault, isStage2, 643 tranMethod); 644 } 645 } 646 } 647 648 if (te->nonCacheable) { 649 // Prevent prefetching from I/O devices. 650 if (req->isPrefetch()) { 651 // Here we can safely use the fault status for the short 652 // desc. format in all cases 653 return std::make_shared<PrefetchAbort>( 654 vaddr, ArmFault::PrefetchUncacheable, 655 isStage2, tranMethod); 656 } 657 } 658 659 if (!te->longDescFormat) { 660 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) { 661 case 0: 662 domainFaults++; 663 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x" 664 " domain: %#x write:%d\n", dacr, 665 static_cast<uint8_t>(te->domain), is_write); 666 if (is_fetch) { 667 // Use PC value instead of vaddr because vaddr might 668 // be aligned to cache line and should not be the 669 // address reported in FAR 670 return std::make_shared<PrefetchAbort>( 671 req->getPC(), 672 ArmFault::DomainLL + te->lookupLevel, 673 isStage2, tranMethod); 674 } else 675 return std::make_shared<DataAbort>( 676 vaddr, te->domain, is_write, 677 ArmFault::DomainLL + te->lookupLevel, 678 isStage2, tranMethod); 679 case 1: 680 // Continue with permissions check 681 break; 682 case 2: 683 panic("UNPRED domain\n"); 684 case 3: 685 return NoFault; 686 } 687 } 688 689 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits 690 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap; 691 uint8_t hap = te->hap; 692 693 if (sctlr.afe == 1 || te->longDescFormat) 694 ap |= 1; 695 696 bool abt; 697 bool isWritable = true; 698 // If this is a stage 2 access (eg for reading stage 1 page table entries) 699 // then don't perform the AP permissions check, we stil do the HAP check 700 // below. 701 if (isStage2) { 702 abt = false; 703 } else { 704 switch (ap) { 705 case 0: 706 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n", 707 (int)sctlr.rs); 708 if (!sctlr.xp) { 709 switch ((int)sctlr.rs) { 710 case 2: 711 abt = is_write; 712 break; 713 case 1: 714 abt = is_write || !is_priv; 715 break; 716 case 0: 717 case 3: 718 default: 719 abt = true; 720 break; 721 } 722 } else { 723 abt = true; 724 } 725 break; 726 case 1: 727 abt = !is_priv; 728 break; 729 case 2: 730 abt = !is_priv && is_write; 731 isWritable = is_priv; 732 break; 733 case 3: 734 abt = false; 735 break; 736 case 4: 737 panic("UNPRED premissions\n"); 738 case 5: 739 abt = !is_priv || is_write; 740 isWritable = false; 741 break; 742 case 6: 743 case 7: 744 abt = is_write; 745 isWritable = false; 746 break; 747 default: 748 panic("Unknown permissions %#x\n", ap); 749 } 750 } 751 752 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1); 753 bool xn = te->xn || (isWritable && sctlr.wxn) || 754 (ap == 3 && sctlr.uwxn && is_priv); 755 if (is_fetch && (abt || xn || 756 (te->longDescFormat && te->pxn && is_priv) || 757 (isSecure && te->ns && scr.sif))) { 758 permsFaults++; 759 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d " 760 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n", 761 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe); 762 // Use PC value instead of vaddr because vaddr might be aligned to 763 // cache line and should not be the address reported in FAR 764 return std::make_shared<PrefetchAbort>( 765 req->getPC(), 766 ArmFault::PermissionLL + te->lookupLevel, 767 isStage2, tranMethod); 768 } else if (abt | hapAbt) { 769 permsFaults++; 770 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d" 771 " write:%d\n", ap, is_priv, is_write); 772 return std::make_shared<DataAbort>( 773 vaddr, te->domain, is_write, 774 ArmFault::PermissionLL + te->lookupLevel, 775 isStage2 | !abt, tranMethod); 776 } 777 return NoFault; 778} 779 780 781Fault 782TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode, 783 ThreadContext *tc) 784{ 785 assert(aarch64); 786 787 // A data cache maintenance instruction that operates by VA does 788 // not generate a Permission fault unless: 789 // * It is a data cache invalidate (dc ivac) which requires write 790 // permissions to the VA, or 791 // * It is executed from EL0 792 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) { 793 return NoFault; 794 } 795 796 Addr vaddr_tainted = req->getVaddr(); 797 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 798 799 Request::Flags flags = req->getFlags(); 800 bool is_fetch = (mode == Execute); 801 // Cache clean operations require read permissions to the specified VA 802 bool is_write = !req->isCacheClean() && mode == Write; 803 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode); 804 805 updateMiscReg(tc, curTranType); 806 807 // If this is the second stage of translation and the request is for a 808 // stage 1 page table walk then we need to check the HCR.PTW bit. This 809 // allows us to generate a fault if the request targets an area marked 810 // as a device or strongly ordered. 811 if (isStage2 && req->isPTWalk() && hcr.ptw && 812 (te->mtype != TlbEntry::MemoryType::Normal)) { 813 return std::make_shared<DataAbort>( 814 vaddr_tainted, te->domain, is_write, 815 ArmFault::PermissionLL + te->lookupLevel, 816 isStage2, ArmFault::LpaeTran); 817 } 818 819 // Generate an alignment fault for unaligned accesses to device or 820 // strongly ordered memory 821 if (!is_fetch) { 822 if (te->mtype != TlbEntry::MemoryType::Normal) { 823 if (vaddr & mask(flags & AlignmentMask)) { 824 alignFaults++; 825 return std::make_shared<DataAbort>( 826 vaddr_tainted, 827 TlbEntry::DomainType::NoAccess, is_write, 828 ArmFault::AlignmentFault, isStage2, 829 ArmFault::LpaeTran); 830 } 831 } 832 } 833 834 if (te->nonCacheable) { 835 // Prevent prefetching from I/O devices. 836 if (req->isPrefetch()) { 837 // Here we can safely use the fault status for the short 838 // desc. format in all cases 839 return std::make_shared<PrefetchAbort>( 840 vaddr_tainted, 841 ArmFault::PrefetchUncacheable, 842 isStage2, ArmFault::LpaeTran); 843 } 844 } 845 846 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field 847 bool grant = false; 848 849 uint8_t xn = te->xn; 850 uint8_t pxn = te->pxn; 851 bool r = !is_write && !is_fetch; 852 bool w = is_write; 853 bool x = is_fetch; 854 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, " 855 "w:%d, x:%d\n", ap, xn, pxn, r, w, x); 856 857 if (isStage2) { 858 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2); 859 // In stage 2 we use the hypervisor access permission bits. 860 // The following permissions are described in ARM DDI 0487A.f 861 // D4-1802 862 uint8_t hap = 0x3 & te->hap; 863 if (is_fetch) { 864 // sctlr.wxn overrides the xn bit 865 grant = !sctlr.wxn && !xn; 866 } else if (is_write) { 867 grant = hap & 0x2; 868 } else { // is_read 869 grant = hap & 0x1; 870 } 871 } else { 872 switch (aarch64EL) { 873 case EL0: 874 { 875 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 876 switch (perm) { 877 case 0: 878 case 1: 879 case 8: 880 case 9: 881 grant = x; 882 break; 883 case 4: 884 case 5: 885 grant = r || w || (x && !sctlr.wxn); 886 break; 887 case 6: 888 case 7: 889 grant = r || w; 890 break; 891 case 12: 892 case 13: 893 grant = r || x; 894 break; 895 case 14: 896 case 15: 897 grant = r; 898 break; 899 default: 900 grant = false; 901 } 902 } 903 break; 904 case EL1: 905 { 906 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 907 switch (perm) { 908 case 0: 909 case 2: 910 grant = r || w || (x && !sctlr.wxn); 911 break; 912 case 1: 913 case 3: 914 case 4: 915 case 5: 916 case 6: 917 case 7: 918 // regions that are writeable at EL0 should not be 919 // executable at EL1 920 grant = r || w; 921 break; 922 case 8: 923 case 10: 924 case 12: 925 case 14: 926 grant = r || x; 927 break; 928 case 9: 929 case 11: 930 case 13: 931 case 15: 932 grant = r; 933 break; 934 default: 935 grant = false; 936 } 937 } 938 break; 939 case EL2: 940 case EL3: 941 { 942 uint8_t perm = (ap & 0x2) | xn; 943 switch (perm) { 944 case 0: 945 grant = r || w || (x && !sctlr.wxn) ; 946 break; 947 case 1: 948 grant = r || w; 949 break; 950 case 2: 951 grant = r || x; 952 break; 953 case 3: 954 grant = r; 955 break; 956 default: 957 grant = false; 958 } 959 } 960 break; 961 } 962 } 963 964 if (!grant) { 965 if (is_fetch) { 966 permsFaults++; 967 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. " 968 "AP:%d priv:%d write:%d ns:%d sif:%d " 969 "sctlr.afe: %d\n", 970 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe); 971 // Use PC value instead of vaddr because vaddr might be aligned to 972 // cache line and should not be the address reported in FAR 973 return std::make_shared<PrefetchAbort>( 974 req->getPC(), 975 ArmFault::PermissionLL + te->lookupLevel, 976 isStage2, ArmFault::LpaeTran); 977 } else { 978 permsFaults++; 979 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d " 980 "priv:%d write:%d\n", ap, is_priv, is_write); 981 return std::make_shared<DataAbort>( 982 vaddr_tainted, te->domain, is_write, 983 ArmFault::PermissionLL + te->lookupLevel, 984 isStage2, ArmFault::LpaeTran); 985 } 986 } 987 988 return NoFault; 989} 990 991Fault 992TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode, 993 Translation *translation, bool &delay, bool timing, 994 TLB::ArmTranslationType tranType, bool functional) 995{ 996 // No such thing as a functional timing access 997 assert(!(timing && functional)); 998 999 updateMiscReg(tc, tranType); 1000 1001 Addr vaddr_tainted = req->getVaddr(); 1002 Addr vaddr = 0; 1003 if (aarch64) 1004 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 1005 else 1006 vaddr = vaddr_tainted; 1007 Request::Flags flags = req->getFlags(); 1008 1009 bool is_fetch = (mode == Execute); 1010 bool is_write = (mode == Write); 1011 bool long_desc_format = aarch64 || longDescFormatInUse(tc); 1012 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran 1013 : ArmFault::VmsaTran; 1014 1015 req->setAsid(asid); 1016 1017 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n", 1018 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran); 1019 1020 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x " 1021 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2, 1022 scr, sctlr, flags, tranType); 1023 1024 if ((req->isInstFetch() && (!sctlr.i)) || 1025 ((!req->isInstFetch()) && (!sctlr.c))){ 1026 if (!req->isCacheMaintenance()) { 1027 req->setFlags(Request::UNCACHEABLE); 1028 } 1029 req->setFlags(Request::STRICT_ORDER); 1030 } 1031 if (!is_fetch) { 1032 assert(flags & MustBeOne); 1033 if (sctlr.a || !(flags & AllowUnaligned)) { 1034 if (vaddr & mask(flags & AlignmentMask)) { 1035 alignFaults++; 1036 return std::make_shared<DataAbort>( 1037 vaddr_tainted, 1038 TlbEntry::DomainType::NoAccess, is_write, 1039 ArmFault::AlignmentFault, isStage2, 1040 tranMethod); 1041 } 1042 } 1043 } 1044 1045 // If guest MMU is off or hcr.vm=0 go straight to stage2 1046 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) { 1047 1048 req->setPaddr(vaddr); 1049 // When the MMU is off the security attribute corresponds to the 1050 // security state of the processor 1051 if (isSecure) 1052 req->setFlags(Request::SECURE); 1053 1054 // @todo: double check this (ARM ARM issue C B3.2.1) 1055 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 || 1056 nmrr.or0 == 0 || prrr.tr0 != 0x2) { 1057 if (!req->isCacheMaintenance()) { 1058 req->setFlags(Request::UNCACHEABLE); 1059 } 1060 req->setFlags(Request::STRICT_ORDER); 1061 } 1062 1063 // Set memory attributes 1064 TlbEntry temp_te; 1065 temp_te.ns = !isSecure; 1066 if (isStage2 || hcr.dc == 0 || isSecure || 1067 (isHyp && !(tranType & S1CTran))) { 1068 1069 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal 1070 : TlbEntry::MemoryType::StronglyOrdered; 1071 temp_te.innerAttrs = 0x0; 1072 temp_te.outerAttrs = 0x0; 1073 temp_te.shareable = true; 1074 temp_te.outerShareable = true; 1075 } else { 1076 temp_te.mtype = TlbEntry::MemoryType::Normal; 1077 temp_te.innerAttrs = 0x3; 1078 temp_te.outerAttrs = 0x3; 1079 temp_te.shareable = false; 1080 temp_te.outerShareable = false; 1081 } 1082 temp_te.setAttributes(long_desc_format); 1083 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: " 1084 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n", 1085 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs, 1086 isStage2); 1087 setAttr(temp_te.attributes); 1088 1089 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess); 1090 } 1091 1092 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n", 1093 isStage2 ? "IPA" : "VA", vaddr_tainted, asid); 1094 // Translation enabled 1095 1096 TlbEntry *te = NULL; 1097 TlbEntry mergeTe; 1098 Fault fault = getResultTe(&te, req, tc, mode, translation, timing, 1099 functional, &mergeTe); 1100 // only proceed if we have a valid table entry 1101 if ((te == NULL) && (fault == NoFault)) delay = true; 1102 1103 // If we have the table entry transfer some of the attributes to the 1104 // request that triggered the translation 1105 if (te != NULL) { 1106 // Set memory attributes 1107 DPRINTF(TLBVerbose, 1108 "Setting memory attributes: shareable: %d, innerAttrs: %d, " 1109 "outerAttrs: %d, mtype: %d, isStage2: %d\n", 1110 te->shareable, te->innerAttrs, te->outerAttrs, 1111 static_cast<uint8_t>(te->mtype), isStage2); 1112 setAttr(te->attributes); 1113 1114 if (te->nonCacheable && !req->isCacheMaintenance()) 1115 req->setFlags(Request::UNCACHEABLE); 1116 1117 // Require requests to be ordered if the request goes to 1118 // strongly ordered or device memory (i.e., anything other 1119 // than normal memory requires strict order). 1120 if (te->mtype != TlbEntry::MemoryType::Normal) 1121 req->setFlags(Request::STRICT_ORDER); 1122 1123 Addr pa = te->pAddr(vaddr); 1124 req->setPaddr(pa); 1125 1126 if (isSecure && !te->ns) { 1127 req->setFlags(Request::SECURE); 1128 } 1129 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) && 1130 (te->mtype != TlbEntry::MemoryType::Normal)) { 1131 // Unaligned accesses to Device memory should always cause an 1132 // abort regardless of sctlr.a 1133 alignFaults++; 1134 return std::make_shared<DataAbort>( 1135 vaddr_tainted, 1136 TlbEntry::DomainType::NoAccess, is_write, 1137 ArmFault::AlignmentFault, isStage2, 1138 tranMethod); 1139 } 1140 1141 // Check for a trickbox generated address fault 1142 if (fault == NoFault) 1143 fault = testTranslation(req, mode, te->domain); 1144 } 1145 1146 if (fault == NoFault) { 1147 // Generate Illegal Inst Set State fault if IL bit is set in CPSR 1148 if (aarch64 && is_fetch && cpsr.il == 1) { 1149 return std::make_shared<IllegalInstSetStateFault>(); 1150 } 1151 1152 // Don't try to finalize a physical address unless the 1153 // translation has completed (i.e., there is a table entry). 1154 return te ? finalizePhysical(req, tc, mode) : NoFault; 1155 } else { 1156 return fault; 1157 } 1158} 1159 1160Fault 1161TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode, 1162 TLB::ArmTranslationType tranType) 1163{ 1164 updateMiscReg(tc, tranType); 1165 1166 if (directToStage2) { 1167 assert(stage2Tlb); 1168 return stage2Tlb->translateAtomic(req, tc, mode, tranType); 1169 } 1170 1171 bool delay = false; 1172 Fault fault; 1173 if (FullSystem) 1174 fault = translateFs(req, tc, mode, NULL, delay, false, tranType); 1175 else 1176 fault = translateSe(req, tc, mode, NULL, delay, false); 1177 assert(!delay); 1178 return fault; 1179} 1180 1181Fault 1182TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode, 1183 TLB::ArmTranslationType tranType) 1184{ 1185 updateMiscReg(tc, tranType); 1186 1187 if (directToStage2) { 1188 assert(stage2Tlb); 1189 return stage2Tlb->translateFunctional(req, tc, mode, tranType); 1190 } 1191 1192 bool delay = false; 1193 Fault fault; 1194 if (FullSystem) 1195 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true); 1196 else 1197 fault = translateSe(req, tc, mode, NULL, delay, false); 1198 assert(!delay); 1199 return fault; 1200} 1201 1202void 1203TLB::translateTiming(RequestPtr req, ThreadContext *tc, 1204 Translation *translation, Mode mode, TLB::ArmTranslationType tranType) 1205{ 1206 updateMiscReg(tc, tranType); 1207 1208 if (directToStage2) { 1209 assert(stage2Tlb); 1210 stage2Tlb->translateTiming(req, tc, translation, mode, tranType); 1211 return; 1212 } 1213 1214 assert(translation); 1215 1216 translateComplete(req, tc, translation, mode, tranType, isStage2); 1217} 1218 1219Fault 1220TLB::translateComplete(RequestPtr req, ThreadContext *tc, 1221 Translation *translation, Mode mode, TLB::ArmTranslationType tranType, 1222 bool callFromS2) 1223{ 1224 bool delay = false; 1225 Fault fault; 1226 if (FullSystem) 1227 fault = translateFs(req, tc, mode, translation, delay, true, tranType); 1228 else 1229 fault = translateSe(req, tc, mode, translation, delay, true); 1230 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault != 1231 NoFault); 1232 // If we have a translation, and we're not in the middle of doing a stage 1233 // 2 translation tell the translation that we've either finished or its 1234 // going to take a while. By not doing this when we're in the middle of a 1235 // stage 2 translation we prevent marking the translation as delayed twice, 1236 // one when the translation starts and again when the stage 1 translation 1237 // completes. 1238 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) { 1239 if (!delay) 1240 translation->finish(fault, req, tc, mode); 1241 else 1242 translation->markDelayed(); 1243 } 1244 return fault; 1245} 1246 1247BaseMasterPort* 1248TLB::getMasterPort() 1249{ 1250 return &stage2Mmu->getPort(); 1251} 1252 1253void 1254TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) 1255{ 1256 // check if the regs have changed, or the translation mode is different. 1257 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle 1258 // one type of translation anyway 1259 if (miscRegValid && miscRegContext == tc->contextId() && 1260 ((tranType == curTranType) || isStage2)) { 1261 return; 1262 } 1263 1264 DPRINTF(TLBVerbose, "TLB variables changed!\n"); 1265 cpsr = tc->readMiscReg(MISCREG_CPSR); 1266 1267 // Dependencies: SCR/SCR_EL3, CPSR 1268 isSecure = inSecureState(tc) && 1269 !(tranType & HypMode) && !(tranType & S1S2NsTran); 1270 1271 aarch64EL = tranTypeEL(cpsr, tranType); 1272 aarch64 = isStage2 ? 1273 ELIs64(tc, EL2) : 1274 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL); 1275 1276 if (aarch64) { // AArch64 1277 // determine EL we need to translate in 1278 switch (aarch64EL) { 1279 case EL0: 1280 case EL1: 1281 { 1282 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); 1283 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1); 1284 uint64_t ttbr_asid = ttbcr.a1 ? 1285 tc->readMiscReg(MISCREG_TTBR1_EL1) : 1286 tc->readMiscReg(MISCREG_TTBR0_EL1); 1287 asid = bits(ttbr_asid, 1288 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); 1289 } 1290 break; 1291 case EL2: 1292 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); 1293 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); 1294 asid = -1; 1295 break; 1296 case EL3: 1297 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3); 1298 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3); 1299 asid = -1; 1300 break; 1301 } 1302 hcr = tc->readMiscReg(MISCREG_HCR_EL2); 1303 scr = tc->readMiscReg(MISCREG_SCR_EL3); 1304 isPriv = aarch64EL != EL0; 1305 if (haveVirtualization) { 1306 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48); 1307 isHyp = tranType & HypMode; 1308 isHyp &= (tranType & S1S2NsTran) == 0; 1309 isHyp &= (tranType & S1CTran) == 0; 1310 // Work out if we should skip the first stage of translation and go 1311 // directly to stage 2. This value is cached so we don't have to 1312 // compute it for every translation. 1313 stage2Req = isStage2 || 1314 (hcr.vm && !isHyp && !isSecure && 1315 !(tranType & S1CTran) && (aarch64EL < EL2) && 1316 !(tranType & S1E1Tran)); // <--- FIX THIS HACK 1317 directToStage2 = !isStage2 && stage2Req && !sctlr.m; 1318 } else { 1319 vmid = 0; 1320 isHyp = false; 1321 directToStage2 = false; 1322 stage2Req = false; 1323 } 1324 } else { // AArch32 1325 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc, 1326 !isSecure)); 1327 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc, 1328 !isSecure)); 1329 scr = tc->readMiscReg(MISCREG_SCR); 1330 isPriv = cpsr.mode != MODE_USER; 1331 if (longDescFormatInUse(tc)) { 1332 uint64_t ttbr_asid = tc->readMiscReg( 1333 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 : 1334 MISCREG_TTBR0, 1335 tc, !isSecure)); 1336 asid = bits(ttbr_asid, 55, 48); 1337 } else { // Short-descriptor translation table format in use 1338 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex( 1339 MISCREG_CONTEXTIDR, tc,!isSecure)); 1340 asid = context_id.asid; 1341 } 1342 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc, 1343 !isSecure)); 1344 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc, 1345 !isSecure)); 1346 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc, 1347 !isSecure)); 1348 hcr = tc->readMiscReg(MISCREG_HCR); 1349 1350 if (haveVirtualization) { 1351 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); 1352 isHyp = cpsr.mode == MODE_HYP; 1353 isHyp |= tranType & HypMode; 1354 isHyp &= (tranType & S1S2NsTran) == 0; 1355 isHyp &= (tranType & S1CTran) == 0; 1356 if (isHyp) { 1357 sctlr = tc->readMiscReg(MISCREG_HSCTLR); 1358 } 1359 // Work out if we should skip the first stage of translation and go 1360 // directly to stage 2. This value is cached so we don't have to 1361 // compute it for every translation. 1362 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure && 1363 !(tranType & S1CTran); 1364 directToStage2 = stage2Req && !sctlr.m; 1365 } else { 1366 vmid = 0; 1367 stage2Req = false; 1368 isHyp = false; 1369 directToStage2 = false; 1370 } 1371 } 1372 miscRegValid = true; 1373 miscRegContext = tc->contextId(); 1374 curTranType = tranType; 1375} 1376 1377ExceptionLevel 1378TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type) 1379{ 1380 switch (type) { 1381 case S1E0Tran: 1382 case S12E0Tran: 1383 return EL0; 1384 1385 case S1E1Tran: 1386 case S12E1Tran: 1387 return EL1; 1388 1389 case S1E2Tran: 1390 return EL2; 1391 1392 case S1E3Tran: 1393 return EL3; 1394 1395 case NormalTran: 1396 case S1CTran: 1397 case S1S2NsTran: 1398 case HypMode: 1399 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode); 1400 1401 default: 1402 panic("Unknown translation mode!\n"); 1403 } 1404} 1405 1406Fault 1407TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1408 Translation *translation, bool timing, bool functional, 1409 bool is_secure, TLB::ArmTranslationType tranType) 1410{ 1411 bool is_fetch = (mode == Execute); 1412 bool is_write = (mode == Write); 1413 1414 Addr vaddr_tainted = req->getVaddr(); 1415 Addr vaddr = 0; 1416 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1; 1417 if (aarch64) { 1418 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr); 1419 } else { 1420 vaddr = vaddr_tainted; 1421 } 1422 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1423 if (*te == NULL) { 1424 if (req->isPrefetch()) { 1425 // if the request is a prefetch don't attempt to fill the TLB or go 1426 // any further with the memory access (here we can safely use the 1427 // fault status for the short desc. format in all cases) 1428 prefetchFaults++; 1429 return std::make_shared<PrefetchAbort>( 1430 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2); 1431 } 1432 1433 if (is_fetch) 1434 instMisses++; 1435 else if (is_write) 1436 writeMisses++; 1437 else 1438 readMisses++; 1439 1440 // start translation table walk, pass variables rather than 1441 // re-retreaving in table walker for speed 1442 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n", 1443 vaddr_tainted, asid, vmid); 1444 Fault fault; 1445 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode, 1446 translation, timing, functional, is_secure, 1447 tranType, stage2Req); 1448 // for timing mode, return and wait for table walk, 1449 if (timing || fault != NoFault) { 1450 return fault; 1451 } 1452 1453 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1454 if (!*te) 1455 printTlb(); 1456 assert(*te); 1457 } else { 1458 if (is_fetch) 1459 instHits++; 1460 else if (is_write) 1461 writeHits++; 1462 else 1463 readHits++; 1464 } 1465 return NoFault; 1466} 1467 1468Fault 1469TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1470 Translation *translation, bool timing, bool functional, 1471 TlbEntry *mergeTe) 1472{ 1473 Fault fault; 1474 1475 if (isStage2) { 1476 // We are already in the stage 2 TLB. Grab the table entry for stage 1477 // 2 only. We are here because stage 1 translation is disabled. 1478 TlbEntry *s2Te = NULL; 1479 // Get the stage 2 table entry 1480 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional, 1481 isSecure, curTranType); 1482 // Check permissions of stage 2 1483 if ((s2Te != NULL) && (fault == NoFault)) { 1484 if (aarch64) 1485 fault = checkPermissions64(s2Te, req, mode, tc); 1486 else 1487 fault = checkPermissions(s2Te, req, mode); 1488 } 1489 *te = s2Te; 1490 return fault; 1491 } 1492 1493 TlbEntry *s1Te = NULL; 1494 1495 Addr vaddr_tainted = req->getVaddr(); 1496 1497 // Get the stage 1 table entry 1498 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional, 1499 isSecure, curTranType); 1500 // only proceed if we have a valid table entry 1501 if ((s1Te != NULL) && (fault == NoFault)) { 1502 // Check stage 1 permissions before checking stage 2 1503 if (aarch64) 1504 fault = checkPermissions64(s1Te, req, mode, tc); 1505 else 1506 fault = checkPermissions(s1Te, req, mode); 1507 if (stage2Req & (fault == NoFault)) { 1508 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te, 1509 req, translation, mode, timing, functional, curTranType); 1510 fault = s2Lookup->getTe(tc, mergeTe); 1511 if (s2Lookup->isComplete()) { 1512 *te = mergeTe; 1513 // We've finished with the lookup so delete it 1514 delete s2Lookup; 1515 } else { 1516 // The lookup hasn't completed, so we can't delete it now. We 1517 // get round this by asking the object to self delete when the 1518 // translation is complete. 1519 s2Lookup->setSelfDelete(); 1520 } 1521 } else { 1522 // This case deals with an S1 hit (or bypass), followed by 1523 // an S2 hit-but-perms issue 1524 if (isStage2) { 1525 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n", 1526 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault); 1527 if (fault != NoFault) { 1528 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get()); 1529 armFault->annotate(ArmFault::S1PTW, false); 1530 armFault->annotate(ArmFault::OVA, vaddr_tainted); 1531 } 1532 } 1533 *te = s1Te; 1534 } 1535 } 1536 return fault; 1537} 1538 1539void 1540TLB::setTestInterface(SimObject *_ti) 1541{ 1542 if (!_ti) { 1543 test = nullptr; 1544 } else { 1545 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti)); 1546 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name()); 1547 test = ti; 1548 } 1549} 1550 1551Fault 1552TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain) 1553{ 1554 if (!test || !req->hasSize() || req->getSize() == 0 || 1555 req->isCacheMaintenance()) { 1556 return NoFault; 1557 } else { 1558 return test->translationCheck(req, isPriv, mode, domain); 1559 } 1560} 1561 1562Fault 1563TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, 1564 TlbEntry::DomainType domain, LookupLevel lookup_level) 1565{ 1566 if (!test) { 1567 return NoFault; 1568 } else { 1569 return test->walkCheck(pa, size, va, is_secure, isPriv, mode, 1570 domain, lookup_level); 1571 } 1572} 1573 1574 1575ArmISA::TLB * 1576ArmTLBParams::create() 1577{ 1578 return new ArmISA::TLB(this); 1579} 1580