tlb.cc revision 13882:03fe9a85b435
1/* 2 * Copyright (c) 2010-2013, 2016-2019 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Steve Reinhardt 43 */ 44 45#include "arch/arm/tlb.hh" 46 47#include <memory> 48#include <string> 49#include <vector> 50 51#include "arch/arm/faults.hh" 52#include "arch/arm/pagetable.hh" 53#include "arch/arm/stage2_lookup.hh" 54#include "arch/arm/stage2_mmu.hh" 55#include "arch/arm/system.hh" 56#include "arch/arm/table_walker.hh" 57#include "arch/arm/utility.hh" 58#include "arch/generic/mmapped_ipr.hh" 59#include "base/inifile.hh" 60#include "base/str.hh" 61#include "base/trace.hh" 62#include "cpu/base.hh" 63#include "cpu/thread_context.hh" 64#include "debug/Checkpoint.hh" 65#include "debug/TLB.hh" 66#include "debug/TLBVerbose.hh" 67#include "mem/page_table.hh" 68#include "mem/request.hh" 69#include "params/ArmTLB.hh" 70#include "sim/full_system.hh" 71#include "sim/process.hh" 72 73using namespace std; 74using namespace ArmISA; 75 76TLB::TLB(const ArmTLBParams *p) 77 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size), 78 isStage2(p->is_stage2), stage2Req(false), stage2DescReq(false), _attr(0), 79 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL), 80 stage2Mmu(NULL), test(nullptr), rangeMRU(1), 81 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false), 82 isHyp(false), asid(0), vmid(0), hcr(0), dacr(0), 83 miscRegValid(false), miscRegContext(0), curTranType(NormalTran) 84{ 85 const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys); 86 87 tableWalker->setTlb(this); 88 89 // Cache system-level properties 90 haveLPAE = tableWalker->haveLPAE(); 91 haveVirtualization = tableWalker->haveVirtualization(); 92 haveLargeAsid64 = tableWalker->haveLargeAsid64(); 93 94 if (sys) 95 m5opRange = sys->m5opRange(); 96} 97 98TLB::~TLB() 99{ 100 delete[] table; 101} 102 103void 104TLB::init() 105{ 106 if (stage2Mmu && !isStage2) 107 stage2Tlb = stage2Mmu->stage2Tlb(); 108} 109 110void 111TLB::setMMU(Stage2MMU *m, MasterID master_id) 112{ 113 stage2Mmu = m; 114 tableWalker->setMMU(m, master_id); 115} 116 117bool 118TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) 119{ 120 updateMiscReg(tc); 121 122 if (directToStage2) { 123 assert(stage2Tlb); 124 return stage2Tlb->translateFunctional(tc, va, pa); 125 } 126 127 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false, 128 aarch64 ? aarch64EL : EL1); 129 if (!e) 130 return false; 131 pa = e->pAddr(va); 132 return true; 133} 134 135Fault 136TLB::finalizePhysical(const RequestPtr &req, 137 ThreadContext *tc, Mode mode) const 138{ 139 const Addr paddr = req->getPaddr(); 140 141 if (m5opRange.contains(paddr)) { 142 req->setFlags(Request::MMAPPED_IPR | Request::GENERIC_IPR); 143 req->setPaddr(GenericISA::iprAddressPseudoInst( 144 (paddr >> 8) & 0xFF, 145 paddr & 0xFF)); 146 } 147 148 return NoFault; 149} 150 151TlbEntry* 152TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, 153 bool functional, bool ignore_asn, uint8_t target_el) 154{ 155 156 TlbEntry *retval = NULL; 157 158 // Maintaining LRU array 159 int x = 0; 160 while (retval == NULL && x < size) { 161 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false, 162 target_el)) || 163 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) { 164 // We only move the hit entry ahead when the position is higher 165 // than rangeMRU 166 if (x > rangeMRU && !functional) { 167 TlbEntry tmp_entry = table[x]; 168 for (int i = x; i > 0; i--) 169 table[i] = table[i - 1]; 170 table[0] = tmp_entry; 171 retval = &table[0]; 172 } else { 173 retval = &table[x]; 174 } 175 break; 176 } 177 ++x; 178 } 179 180 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d " 181 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d " 182 "el: %d\n", 183 va, asn, retval ? "hit" : "miss", vmid, hyp, secure, 184 retval ? retval->pfn : 0, retval ? retval->size : 0, 185 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0, 186 retval ? retval->ns : 0, retval ? retval->nstid : 0, 187 retval ? retval->global : 0, retval ? retval->asid : 0, 188 retval ? retval->el : 0); 189 190 return retval; 191} 192 193// insert a new TLB entry 194void 195TLB::insert(Addr addr, TlbEntry &entry) 196{ 197 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x" 198 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d" 199 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn, 200 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N, 201 entry.global, entry.valid, entry.nonCacheable, entry.xn, 202 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid, 203 entry.isHyp); 204 205 if (table[size - 1].valid) 206 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x " 207 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n", 208 table[size-1].vpn << table[size-1].N, table[size-1].asid, 209 table[size-1].vmid, table[size-1].pfn << table[size-1].N, 210 table[size-1].size, table[size-1].ap, table[size-1].ns, 211 table[size-1].nstid, table[size-1].global, table[size-1].isHyp, 212 table[size-1].el); 213 214 //inserting to MRU position and evicting the LRU one 215 216 for (int i = size - 1; i > 0; --i) 217 table[i] = table[i-1]; 218 table[0] = entry; 219 220 inserts++; 221 ppRefills->notify(1); 222} 223 224void 225TLB::printTlb() const 226{ 227 int x = 0; 228 TlbEntry *te; 229 DPRINTF(TLB, "Current TLB contents:\n"); 230 while (x < size) { 231 te = &table[x]; 232 if (te->valid) 233 DPRINTF(TLB, " * %s\n", te->print()); 234 ++x; 235 } 236} 237 238void 239TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el) 240{ 241 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", 242 (secure_lookup ? "secure" : "non-secure")); 243 int x = 0; 244 TlbEntry *te; 245 while (x < size) { 246 te = &table[x]; 247 if (te->valid && secure_lookup == !te->nstid && 248 (te->vmid == vmid || secure_lookup) && 249 checkELMatch(target_el, te->el, ignore_el)) { 250 251 DPRINTF(TLB, " - %s\n", te->print()); 252 te->valid = false; 253 flushedEntries++; 254 } 255 ++x; 256 } 257 258 flushTlb++; 259 260 // If there's a second stage TLB (and we're not it) then flush it as well 261 // if we're currently in hyp mode 262 if (!isStage2 && isHyp) { 263 stage2Tlb->flushAllSecurity(secure_lookup, true); 264 } 265} 266 267void 268TLB::flushAllNs(uint8_t target_el, bool ignore_el) 269{ 270 bool hyp = target_el == EL2; 271 272 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n", 273 (hyp ? "hyp" : "non-hyp")); 274 int x = 0; 275 TlbEntry *te; 276 while (x < size) { 277 te = &table[x]; 278 if (te->valid && te->nstid && te->isHyp == hyp && 279 checkELMatch(target_el, te->el, ignore_el)) { 280 281 DPRINTF(TLB, " - %s\n", te->print()); 282 flushedEntries++; 283 te->valid = false; 284 } 285 ++x; 286 } 287 288 flushTlb++; 289 290 // If there's a second stage TLB (and we're not it) then flush it as well 291 if (!isStage2 && !hyp) { 292 stage2Tlb->flushAllNs(false, true); 293 } 294} 295 296void 297TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el) 298{ 299 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " 300 "(%s lookup)\n", mva, asn, (secure_lookup ? 301 "secure" : "non-secure")); 302 _flushMva(mva, asn, secure_lookup, false, target_el); 303 flushTlbMvaAsid++; 304} 305 306void 307TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el) 308{ 309 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn, 310 (secure_lookup ? "secure" : "non-secure")); 311 312 int x = 0 ; 313 TlbEntry *te; 314 315 while (x < size) { 316 te = &table[x]; 317 if (te->valid && te->asid == asn && secure_lookup == !te->nstid && 318 (te->vmid == vmid || secure_lookup) && 319 checkELMatch(target_el, te->el, false)) { 320 321 te->valid = false; 322 DPRINTF(TLB, " - %s\n", te->print()); 323 flushedEntries++; 324 } 325 ++x; 326 } 327 flushTlbAsid++; 328} 329 330void 331TLB::flushMva(Addr mva, bool secure_lookup, uint8_t target_el) 332{ 333 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva, 334 (secure_lookup ? "secure" : "non-secure")); 335 _flushMva(mva, 0xbeef, secure_lookup, true, target_el); 336 flushTlbMva++; 337} 338 339void 340TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, 341 bool ignore_asn, uint8_t target_el) 342{ 343 TlbEntry *te; 344 // D5.7.2: Sign-extend address to 64 bits 345 mva = sext<56>(mva); 346 347 bool hyp = target_el == EL2; 348 349 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 350 target_el); 351 while (te != NULL) { 352 if (secure_lookup == !te->nstid) { 353 DPRINTF(TLB, " - %s\n", te->print()); 354 te->valid = false; 355 flushedEntries++; 356 } 357 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 358 target_el); 359 } 360} 361 362void 363TLB::flushIpaVmid(Addr ipa, bool secure_lookup, uint8_t target_el) 364{ 365 assert(!isStage2); 366 stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el); 367} 368 369bool 370TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el) 371{ 372 bool elMatch = true; 373 if (!ignore_el) { 374 if (target_el == 2 || target_el == 3) { 375 elMatch = (tentry_el == target_el); 376 } else { 377 elMatch = (tentry_el == 0) || (tentry_el == 1); 378 } 379 } 380 return elMatch; 381} 382 383void 384TLB::drainResume() 385{ 386 // We might have unserialized something or switched CPUs, so make 387 // sure to re-read the misc regs. 388 miscRegValid = false; 389} 390 391void 392TLB::takeOverFrom(BaseTLB *_otlb) 393{ 394 TLB *otlb = dynamic_cast<TLB*>(_otlb); 395 /* Make sure we actually have a valid type */ 396 if (otlb) { 397 _attr = otlb->_attr; 398 haveLPAE = otlb->haveLPAE; 399 directToStage2 = otlb->directToStage2; 400 stage2Req = otlb->stage2Req; 401 stage2DescReq = otlb->stage2DescReq; 402 403 /* Sync the stage2 MMU if they exist in both 404 * the old CPU and the new 405 */ 406 if (!isStage2 && 407 stage2Tlb && otlb->stage2Tlb) { 408 stage2Tlb->takeOverFrom(otlb->stage2Tlb); 409 } 410 } else { 411 panic("Incompatible TLB type!"); 412 } 413} 414 415void 416TLB::serialize(CheckpointOut &cp) const 417{ 418 DPRINTF(Checkpoint, "Serializing Arm TLB\n"); 419 420 SERIALIZE_SCALAR(_attr); 421 SERIALIZE_SCALAR(haveLPAE); 422 SERIALIZE_SCALAR(directToStage2); 423 SERIALIZE_SCALAR(stage2Req); 424 SERIALIZE_SCALAR(stage2DescReq); 425 426 int num_entries = size; 427 SERIALIZE_SCALAR(num_entries); 428 for (int i = 0; i < size; i++) 429 table[i].serializeSection(cp, csprintf("TlbEntry%d", i)); 430} 431 432void 433TLB::unserialize(CheckpointIn &cp) 434{ 435 DPRINTF(Checkpoint, "Unserializing Arm TLB\n"); 436 437 UNSERIALIZE_SCALAR(_attr); 438 UNSERIALIZE_SCALAR(haveLPAE); 439 UNSERIALIZE_SCALAR(directToStage2); 440 UNSERIALIZE_SCALAR(stage2Req); 441 UNSERIALIZE_SCALAR(stage2DescReq); 442 443 int num_entries; 444 UNSERIALIZE_SCALAR(num_entries); 445 for (int i = 0; i < min(size, num_entries); i++) 446 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i)); 447} 448 449void 450TLB::regStats() 451{ 452 BaseTLB::regStats(); 453 instHits 454 .name(name() + ".inst_hits") 455 .desc("ITB inst hits") 456 ; 457 458 instMisses 459 .name(name() + ".inst_misses") 460 .desc("ITB inst misses") 461 ; 462 463 instAccesses 464 .name(name() + ".inst_accesses") 465 .desc("ITB inst accesses") 466 ; 467 468 readHits 469 .name(name() + ".read_hits") 470 .desc("DTB read hits") 471 ; 472 473 readMisses 474 .name(name() + ".read_misses") 475 .desc("DTB read misses") 476 ; 477 478 readAccesses 479 .name(name() + ".read_accesses") 480 .desc("DTB read accesses") 481 ; 482 483 writeHits 484 .name(name() + ".write_hits") 485 .desc("DTB write hits") 486 ; 487 488 writeMisses 489 .name(name() + ".write_misses") 490 .desc("DTB write misses") 491 ; 492 493 writeAccesses 494 .name(name() + ".write_accesses") 495 .desc("DTB write accesses") 496 ; 497 498 hits 499 .name(name() + ".hits") 500 .desc("DTB hits") 501 ; 502 503 misses 504 .name(name() + ".misses") 505 .desc("DTB misses") 506 ; 507 508 accesses 509 .name(name() + ".accesses") 510 .desc("DTB accesses") 511 ; 512 513 flushTlb 514 .name(name() + ".flush_tlb") 515 .desc("Number of times complete TLB was flushed") 516 ; 517 518 flushTlbMva 519 .name(name() + ".flush_tlb_mva") 520 .desc("Number of times TLB was flushed by MVA") 521 ; 522 523 flushTlbMvaAsid 524 .name(name() + ".flush_tlb_mva_asid") 525 .desc("Number of times TLB was flushed by MVA & ASID") 526 ; 527 528 flushTlbAsid 529 .name(name() + ".flush_tlb_asid") 530 .desc("Number of times TLB was flushed by ASID") 531 ; 532 533 flushedEntries 534 .name(name() + ".flush_entries") 535 .desc("Number of entries that have been flushed from TLB") 536 ; 537 538 alignFaults 539 .name(name() + ".align_faults") 540 .desc("Number of TLB faults due to alignment restrictions") 541 ; 542 543 prefetchFaults 544 .name(name() + ".prefetch_faults") 545 .desc("Number of TLB faults due to prefetch") 546 ; 547 548 domainFaults 549 .name(name() + ".domain_faults") 550 .desc("Number of TLB faults due to domain restrictions") 551 ; 552 553 permsFaults 554 .name(name() + ".perms_faults") 555 .desc("Number of TLB faults due to permissions restrictions") 556 ; 557 558 instAccesses = instHits + instMisses; 559 readAccesses = readHits + readMisses; 560 writeAccesses = writeHits + writeMisses; 561 hits = readHits + writeHits + instHits; 562 misses = readMisses + writeMisses + instMisses; 563 accesses = readAccesses + writeAccesses + instAccesses; 564} 565 566void 567TLB::regProbePoints() 568{ 569 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills")); 570} 571 572Fault 573TLB::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, 574 Translation *translation, bool &delay, bool timing) 575{ 576 updateMiscReg(tc); 577 Addr vaddr_tainted = req->getVaddr(); 578 Addr vaddr = 0; 579 if (aarch64) 580 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 581 else 582 vaddr = vaddr_tainted; 583 Request::Flags flags = req->getFlags(); 584 585 bool is_fetch = (mode == Execute); 586 bool is_write = (mode == Write); 587 588 if (!is_fetch) { 589 assert(flags & MustBeOne); 590 if (sctlr.a || !(flags & AllowUnaligned)) { 591 if (vaddr & mask(flags & AlignmentMask)) { 592 // LPAE is always disabled in SE mode 593 return std::make_shared<DataAbort>( 594 vaddr_tainted, 595 TlbEntry::DomainType::NoAccess, is_write, 596 ArmFault::AlignmentFault, isStage2, 597 ArmFault::VmsaTran); 598 } 599 } 600 } 601 602 Addr paddr; 603 Process *p = tc->getProcessPtr(); 604 605 if (!p->pTable->translate(vaddr, paddr)) 606 return std::make_shared<GenericPageTableFault>(vaddr_tainted); 607 req->setPaddr(paddr); 608 609 return finalizePhysical(req, tc, mode); 610} 611 612Fault 613TLB::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode) 614{ 615 // a data cache maintenance instruction that operates by MVA does 616 // not generate a Data Abort exeception due to a Permission fault 617 if (req->isCacheMaintenance()) { 618 return NoFault; 619 } 620 621 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify 622 Request::Flags flags = req->getFlags(); 623 bool is_fetch = (mode == Execute); 624 bool is_write = (mode == Write); 625 bool is_priv = isPriv && !(flags & UserMode); 626 627 // Get the translation type from the actuall table entry 628 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran 629 : ArmFault::VmsaTran; 630 631 // If this is the second stage of translation and the request is for a 632 // stage 1 page table walk then we need to check the HCR.PTW bit. This 633 // allows us to generate a fault if the request targets an area marked 634 // as a device or strongly ordered. 635 if (isStage2 && req->isPTWalk() && hcr.ptw && 636 (te->mtype != TlbEntry::MemoryType::Normal)) { 637 return std::make_shared<DataAbort>( 638 vaddr, te->domain, is_write, 639 ArmFault::PermissionLL + te->lookupLevel, 640 isStage2, tranMethod); 641 } 642 643 // Generate an alignment fault for unaligned data accesses to device or 644 // strongly ordered memory 645 if (!is_fetch) { 646 if (te->mtype != TlbEntry::MemoryType::Normal) { 647 if (vaddr & mask(flags & AlignmentMask)) { 648 alignFaults++; 649 return std::make_shared<DataAbort>( 650 vaddr, TlbEntry::DomainType::NoAccess, is_write, 651 ArmFault::AlignmentFault, isStage2, 652 tranMethod); 653 } 654 } 655 } 656 657 if (te->nonCacheable) { 658 // Prevent prefetching from I/O devices. 659 if (req->isPrefetch()) { 660 // Here we can safely use the fault status for the short 661 // desc. format in all cases 662 return std::make_shared<PrefetchAbort>( 663 vaddr, ArmFault::PrefetchUncacheable, 664 isStage2, tranMethod); 665 } 666 } 667 668 if (!te->longDescFormat) { 669 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) { 670 case 0: 671 domainFaults++; 672 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x" 673 " domain: %#x write:%d\n", dacr, 674 static_cast<uint8_t>(te->domain), is_write); 675 if (is_fetch) { 676 // Use PC value instead of vaddr because vaddr might 677 // be aligned to cache line and should not be the 678 // address reported in FAR 679 return std::make_shared<PrefetchAbort>( 680 req->getPC(), 681 ArmFault::DomainLL + te->lookupLevel, 682 isStage2, tranMethod); 683 } else 684 return std::make_shared<DataAbort>( 685 vaddr, te->domain, is_write, 686 ArmFault::DomainLL + te->lookupLevel, 687 isStage2, tranMethod); 688 case 1: 689 // Continue with permissions check 690 break; 691 case 2: 692 panic("UNPRED domain\n"); 693 case 3: 694 return NoFault; 695 } 696 } 697 698 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits 699 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap; 700 uint8_t hap = te->hap; 701 702 if (sctlr.afe == 1 || te->longDescFormat) 703 ap |= 1; 704 705 bool abt; 706 bool isWritable = true; 707 // If this is a stage 2 access (eg for reading stage 1 page table entries) 708 // then don't perform the AP permissions check, we stil do the HAP check 709 // below. 710 if (isStage2) { 711 abt = false; 712 } else { 713 switch (ap) { 714 case 0: 715 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n", 716 (int)sctlr.rs); 717 if (!sctlr.xp) { 718 switch ((int)sctlr.rs) { 719 case 2: 720 abt = is_write; 721 break; 722 case 1: 723 abt = is_write || !is_priv; 724 break; 725 case 0: 726 case 3: 727 default: 728 abt = true; 729 break; 730 } 731 } else { 732 abt = true; 733 } 734 break; 735 case 1: 736 abt = !is_priv; 737 break; 738 case 2: 739 abt = !is_priv && is_write; 740 isWritable = is_priv; 741 break; 742 case 3: 743 abt = false; 744 break; 745 case 4: 746 panic("UNPRED premissions\n"); 747 case 5: 748 abt = !is_priv || is_write; 749 isWritable = false; 750 break; 751 case 6: 752 case 7: 753 abt = is_write; 754 isWritable = false; 755 break; 756 default: 757 panic("Unknown permissions %#x\n", ap); 758 } 759 } 760 761 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1); 762 bool xn = te->xn || (isWritable && sctlr.wxn) || 763 (ap == 3 && sctlr.uwxn && is_priv); 764 if (is_fetch && (abt || xn || 765 (te->longDescFormat && te->pxn && is_priv) || 766 (isSecure && te->ns && scr.sif))) { 767 permsFaults++; 768 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d " 769 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n", 770 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe); 771 // Use PC value instead of vaddr because vaddr might be aligned to 772 // cache line and should not be the address reported in FAR 773 return std::make_shared<PrefetchAbort>( 774 req->getPC(), 775 ArmFault::PermissionLL + te->lookupLevel, 776 isStage2, tranMethod); 777 } else if (abt | hapAbt) { 778 permsFaults++; 779 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d" 780 " write:%d\n", ap, is_priv, is_write); 781 return std::make_shared<DataAbort>( 782 vaddr, te->domain, is_write, 783 ArmFault::PermissionLL + te->lookupLevel, 784 isStage2 | !abt, tranMethod); 785 } 786 return NoFault; 787} 788 789 790Fault 791TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, 792 ThreadContext *tc) 793{ 794 assert(aarch64); 795 796 // A data cache maintenance instruction that operates by VA does 797 // not generate a Permission fault unless: 798 // * It is a data cache invalidate (dc ivac) which requires write 799 // permissions to the VA, or 800 // * It is executed from EL0 801 if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) { 802 return NoFault; 803 } 804 805 Addr vaddr_tainted = req->getVaddr(); 806 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 807 808 Request::Flags flags = req->getFlags(); 809 bool is_fetch = (mode == Execute); 810 // Cache clean operations require read permissions to the specified VA 811 bool is_write = !req->isCacheClean() && mode == Write; 812 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode); 813 814 updateMiscReg(tc, curTranType); 815 816 // If this is the second stage of translation and the request is for a 817 // stage 1 page table walk then we need to check the HCR.PTW bit. This 818 // allows us to generate a fault if the request targets an area marked 819 // as a device or strongly ordered. 820 if (isStage2 && req->isPTWalk() && hcr.ptw && 821 (te->mtype != TlbEntry::MemoryType::Normal)) { 822 return std::make_shared<DataAbort>( 823 vaddr_tainted, te->domain, is_write, 824 ArmFault::PermissionLL + te->lookupLevel, 825 isStage2, ArmFault::LpaeTran); 826 } 827 828 // Generate an alignment fault for unaligned accesses to device or 829 // strongly ordered memory 830 if (!is_fetch) { 831 if (te->mtype != TlbEntry::MemoryType::Normal) { 832 if (vaddr & mask(flags & AlignmentMask)) { 833 alignFaults++; 834 return std::make_shared<DataAbort>( 835 vaddr_tainted, 836 TlbEntry::DomainType::NoAccess, is_write, 837 ArmFault::AlignmentFault, isStage2, 838 ArmFault::LpaeTran); 839 } 840 } 841 } 842 843 if (te->nonCacheable) { 844 // Prevent prefetching from I/O devices. 845 if (req->isPrefetch()) { 846 // Here we can safely use the fault status for the short 847 // desc. format in all cases 848 return std::make_shared<PrefetchAbort>( 849 vaddr_tainted, 850 ArmFault::PrefetchUncacheable, 851 isStage2, ArmFault::LpaeTran); 852 } 853 } 854 855 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field 856 bool grant = false; 857 858 uint8_t xn = te->xn; 859 uint8_t pxn = te->pxn; 860 bool r = !is_write && !is_fetch; 861 bool w = is_write; 862 bool x = is_fetch; 863 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, " 864 "w:%d, x:%d\n", ap, xn, pxn, r, w, x); 865 866 if (isStage2) { 867 assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2); 868 // In stage 2 we use the hypervisor access permission bits. 869 // The following permissions are described in ARM DDI 0487A.f 870 // D4-1802 871 uint8_t hap = 0x3 & te->hap; 872 if (is_fetch) { 873 // sctlr.wxn overrides the xn bit 874 grant = !sctlr.wxn && !xn; 875 } else if (is_write) { 876 grant = hap & 0x2; 877 } else { // is_read 878 grant = hap & 0x1; 879 } 880 } else { 881 switch (aarch64EL) { 882 case EL0: 883 { 884 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 885 switch (perm) { 886 case 0: 887 case 1: 888 case 8: 889 case 9: 890 grant = x; 891 break; 892 case 4: 893 case 5: 894 grant = r || w || (x && !sctlr.wxn); 895 break; 896 case 6: 897 case 7: 898 grant = r || w; 899 break; 900 case 12: 901 case 13: 902 grant = r || x; 903 break; 904 case 14: 905 case 15: 906 grant = r; 907 break; 908 default: 909 grant = false; 910 } 911 } 912 break; 913 case EL1: 914 { 915 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 916 switch (perm) { 917 case 0: 918 case 2: 919 grant = r || w || (x && !sctlr.wxn); 920 break; 921 case 1: 922 case 3: 923 case 4: 924 case 5: 925 case 6: 926 case 7: 927 // regions that are writeable at EL0 should not be 928 // executable at EL1 929 grant = r || w; 930 break; 931 case 8: 932 case 10: 933 case 12: 934 case 14: 935 grant = r || x; 936 break; 937 case 9: 938 case 11: 939 case 13: 940 case 15: 941 grant = r; 942 break; 943 default: 944 grant = false; 945 } 946 } 947 break; 948 case EL2: 949 case EL3: 950 { 951 uint8_t perm = (ap & 0x2) | xn; 952 switch (perm) { 953 case 0: 954 grant = r || w || (x && !sctlr.wxn) ; 955 break; 956 case 1: 957 grant = r || w; 958 break; 959 case 2: 960 grant = r || x; 961 break; 962 case 3: 963 grant = r; 964 break; 965 default: 966 grant = false; 967 } 968 } 969 break; 970 } 971 } 972 973 if (!grant) { 974 if (is_fetch) { 975 permsFaults++; 976 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. " 977 "AP:%d priv:%d write:%d ns:%d sif:%d " 978 "sctlr.afe: %d\n", 979 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe); 980 // Use PC value instead of vaddr because vaddr might be aligned to 981 // cache line and should not be the address reported in FAR 982 return std::make_shared<PrefetchAbort>( 983 req->getPC(), 984 ArmFault::PermissionLL + te->lookupLevel, 985 isStage2, ArmFault::LpaeTran); 986 } else { 987 permsFaults++; 988 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d " 989 "priv:%d write:%d\n", ap, is_priv, is_write); 990 return std::make_shared<DataAbort>( 991 vaddr_tainted, te->domain, is_write, 992 ArmFault::PermissionLL + te->lookupLevel, 993 isStage2, ArmFault::LpaeTran); 994 } 995 } 996 997 return NoFault; 998} 999 1000Fault 1001TLB::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, 1002 Translation *translation, bool &delay, bool timing, 1003 TLB::ArmTranslationType tranType, bool functional) 1004{ 1005 // No such thing as a functional timing access 1006 assert(!(timing && functional)); 1007 1008 updateMiscReg(tc, tranType); 1009 1010 Addr vaddr_tainted = req->getVaddr(); 1011 Addr vaddr = 0; 1012 if (aarch64) 1013 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 1014 else 1015 vaddr = vaddr_tainted; 1016 Request::Flags flags = req->getFlags(); 1017 1018 bool is_fetch = (mode == Execute); 1019 bool is_write = (mode == Write); 1020 bool long_desc_format = aarch64 || longDescFormatInUse(tc); 1021 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran 1022 : ArmFault::VmsaTran; 1023 1024 req->setAsid(asid); 1025 1026 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n", 1027 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran); 1028 1029 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x " 1030 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2, 1031 scr, sctlr, flags, tranType); 1032 1033 if ((req->isInstFetch() && (!sctlr.i)) || 1034 ((!req->isInstFetch()) && (!sctlr.c))){ 1035 if (!req->isCacheMaintenance()) { 1036 req->setFlags(Request::UNCACHEABLE); 1037 } 1038 req->setFlags(Request::STRICT_ORDER); 1039 } 1040 if (!is_fetch) { 1041 assert(flags & MustBeOne); 1042 if (sctlr.a || !(flags & AllowUnaligned)) { 1043 if (vaddr & mask(flags & AlignmentMask)) { 1044 alignFaults++; 1045 return std::make_shared<DataAbort>( 1046 vaddr_tainted, 1047 TlbEntry::DomainType::NoAccess, is_write, 1048 ArmFault::AlignmentFault, isStage2, 1049 tranMethod); 1050 } 1051 } 1052 } 1053 1054 // If guest MMU is off or hcr.vm=0 go straight to stage2 1055 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) { 1056 1057 req->setPaddr(vaddr); 1058 // When the MMU is off the security attribute corresponds to the 1059 // security state of the processor 1060 if (isSecure) 1061 req->setFlags(Request::SECURE); 1062 1063 // @todo: double check this (ARM ARM issue C B3.2.1) 1064 if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 || 1065 nmrr.or0 == 0 || prrr.tr0 != 0x2) { 1066 if (!req->isCacheMaintenance()) { 1067 req->setFlags(Request::UNCACHEABLE); 1068 } 1069 req->setFlags(Request::STRICT_ORDER); 1070 } 1071 1072 // Set memory attributes 1073 TlbEntry temp_te; 1074 temp_te.ns = !isSecure; 1075 if (isStage2 || hcr.dc == 0 || isSecure || 1076 (isHyp && !(tranType & S1CTran))) { 1077 1078 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal 1079 : TlbEntry::MemoryType::StronglyOrdered; 1080 temp_te.innerAttrs = 0x0; 1081 temp_te.outerAttrs = 0x0; 1082 temp_te.shareable = true; 1083 temp_te.outerShareable = true; 1084 } else { 1085 temp_te.mtype = TlbEntry::MemoryType::Normal; 1086 temp_te.innerAttrs = 0x3; 1087 temp_te.outerAttrs = 0x3; 1088 temp_te.shareable = false; 1089 temp_te.outerShareable = false; 1090 } 1091 temp_te.setAttributes(long_desc_format); 1092 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: " 1093 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n", 1094 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs, 1095 isStage2); 1096 setAttr(temp_te.attributes); 1097 1098 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess); 1099 } 1100 1101 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n", 1102 isStage2 ? "IPA" : "VA", vaddr_tainted, asid); 1103 // Translation enabled 1104 1105 TlbEntry *te = NULL; 1106 TlbEntry mergeTe; 1107 Fault fault = getResultTe(&te, req, tc, mode, translation, timing, 1108 functional, &mergeTe); 1109 // only proceed if we have a valid table entry 1110 if ((te == NULL) && (fault == NoFault)) delay = true; 1111 1112 // If we have the table entry transfer some of the attributes to the 1113 // request that triggered the translation 1114 if (te != NULL) { 1115 // Set memory attributes 1116 DPRINTF(TLBVerbose, 1117 "Setting memory attributes: shareable: %d, innerAttrs: %d, " 1118 "outerAttrs: %d, mtype: %d, isStage2: %d\n", 1119 te->shareable, te->innerAttrs, te->outerAttrs, 1120 static_cast<uint8_t>(te->mtype), isStage2); 1121 setAttr(te->attributes); 1122 1123 if (te->nonCacheable && !req->isCacheMaintenance()) 1124 req->setFlags(Request::UNCACHEABLE); 1125 1126 // Require requests to be ordered if the request goes to 1127 // strongly ordered or device memory (i.e., anything other 1128 // than normal memory requires strict order). 1129 if (te->mtype != TlbEntry::MemoryType::Normal) 1130 req->setFlags(Request::STRICT_ORDER); 1131 1132 Addr pa = te->pAddr(vaddr); 1133 req->setPaddr(pa); 1134 1135 if (isSecure && !te->ns) { 1136 req->setFlags(Request::SECURE); 1137 } 1138 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) && 1139 (te->mtype != TlbEntry::MemoryType::Normal)) { 1140 // Unaligned accesses to Device memory should always cause an 1141 // abort regardless of sctlr.a 1142 alignFaults++; 1143 return std::make_shared<DataAbort>( 1144 vaddr_tainted, 1145 TlbEntry::DomainType::NoAccess, is_write, 1146 ArmFault::AlignmentFault, isStage2, 1147 tranMethod); 1148 } 1149 1150 // Check for a trickbox generated address fault 1151 if (fault == NoFault) 1152 fault = testTranslation(req, mode, te->domain); 1153 } 1154 1155 if (fault == NoFault) { 1156 // Don't try to finalize a physical address unless the 1157 // translation has completed (i.e., there is a table entry). 1158 return te ? finalizePhysical(req, tc, mode) : NoFault; 1159 } else { 1160 return fault; 1161 } 1162} 1163 1164Fault 1165TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode, 1166 TLB::ArmTranslationType tranType) 1167{ 1168 updateMiscReg(tc, tranType); 1169 1170 if (directToStage2) { 1171 assert(stage2Tlb); 1172 return stage2Tlb->translateAtomic(req, tc, mode, tranType); 1173 } 1174 1175 bool delay = false; 1176 Fault fault; 1177 if (FullSystem) 1178 fault = translateFs(req, tc, mode, NULL, delay, false, tranType); 1179 else 1180 fault = translateSe(req, tc, mode, NULL, delay, false); 1181 assert(!delay); 1182 return fault; 1183} 1184 1185Fault 1186TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode, 1187 TLB::ArmTranslationType tranType) 1188{ 1189 updateMiscReg(tc, tranType); 1190 1191 if (directToStage2) { 1192 assert(stage2Tlb); 1193 return stage2Tlb->translateFunctional(req, tc, mode, tranType); 1194 } 1195 1196 bool delay = false; 1197 Fault fault; 1198 if (FullSystem) 1199 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true); 1200 else 1201 fault = translateSe(req, tc, mode, NULL, delay, false); 1202 assert(!delay); 1203 return fault; 1204} 1205 1206void 1207TLB::translateTiming(const RequestPtr &req, ThreadContext *tc, 1208 Translation *translation, Mode mode, TLB::ArmTranslationType tranType) 1209{ 1210 updateMiscReg(tc, tranType); 1211 1212 if (directToStage2) { 1213 assert(stage2Tlb); 1214 stage2Tlb->translateTiming(req, tc, translation, mode, tranType); 1215 return; 1216 } 1217 1218 assert(translation); 1219 1220 translateComplete(req, tc, translation, mode, tranType, isStage2); 1221} 1222 1223Fault 1224TLB::translateComplete(const RequestPtr &req, ThreadContext *tc, 1225 Translation *translation, Mode mode, TLB::ArmTranslationType tranType, 1226 bool callFromS2) 1227{ 1228 bool delay = false; 1229 Fault fault; 1230 if (FullSystem) 1231 fault = translateFs(req, tc, mode, translation, delay, true, tranType); 1232 else 1233 fault = translateSe(req, tc, mode, translation, delay, true); 1234 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault != 1235 NoFault); 1236 // If we have a translation, and we're not in the middle of doing a stage 1237 // 2 translation tell the translation that we've either finished or its 1238 // going to take a while. By not doing this when we're in the middle of a 1239 // stage 2 translation we prevent marking the translation as delayed twice, 1240 // one when the translation starts and again when the stage 1 translation 1241 // completes. 1242 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) { 1243 if (!delay) 1244 translation->finish(fault, req, tc, mode); 1245 else 1246 translation->markDelayed(); 1247 } 1248 return fault; 1249} 1250 1251Port * 1252TLB::getTableWalkerPort() 1253{ 1254 return &stage2Mmu->getDMAPort(); 1255} 1256 1257void 1258TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) 1259{ 1260 // check if the regs have changed, or the translation mode is different. 1261 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle 1262 // one type of translation anyway 1263 if (miscRegValid && miscRegContext == tc->contextId() && 1264 ((tranType == curTranType) || isStage2)) { 1265 return; 1266 } 1267 1268 DPRINTF(TLBVerbose, "TLB variables changed!\n"); 1269 cpsr = tc->readMiscReg(MISCREG_CPSR); 1270 1271 // Dependencies: SCR/SCR_EL3, CPSR 1272 isSecure = inSecureState(tc) && 1273 !(tranType & HypMode) && !(tranType & S1S2NsTran); 1274 1275 aarch64EL = tranTypeEL(cpsr, tranType); 1276 aarch64 = isStage2 ? 1277 ELIs64(tc, EL2) : 1278 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL); 1279 1280 if (aarch64) { // AArch64 1281 // determine EL we need to translate in 1282 switch (aarch64EL) { 1283 case EL0: 1284 case EL1: 1285 { 1286 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); 1287 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1); 1288 uint64_t ttbr_asid = ttbcr.a1 ? 1289 tc->readMiscReg(MISCREG_TTBR1_EL1) : 1290 tc->readMiscReg(MISCREG_TTBR0_EL1); 1291 asid = bits(ttbr_asid, 1292 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); 1293 } 1294 break; 1295 case EL2: 1296 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); 1297 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); 1298 asid = -1; 1299 break; 1300 case EL3: 1301 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3); 1302 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3); 1303 asid = -1; 1304 break; 1305 } 1306 hcr = tc->readMiscReg(MISCREG_HCR_EL2); 1307 scr = tc->readMiscReg(MISCREG_SCR_EL3); 1308 isPriv = aarch64EL != EL0; 1309 if (haveVirtualization) { 1310 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48); 1311 isHyp = tranType & HypMode; 1312 isHyp &= (tranType & S1S2NsTran) == 0; 1313 isHyp &= (tranType & S1CTran) == 0; 1314 // Work out if we should skip the first stage of translation and go 1315 // directly to stage 2. This value is cached so we don't have to 1316 // compute it for every translation. 1317 stage2Req = isStage2 || 1318 (hcr.vm && !isHyp && !isSecure && 1319 !(tranType & S1CTran) && (aarch64EL < EL2) && 1320 !(tranType & S1E1Tran)); // <--- FIX THIS HACK 1321 stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure && 1322 (aarch64EL < EL2)); 1323 directToStage2 = !isStage2 && stage2Req && !sctlr.m; 1324 } else { 1325 vmid = 0; 1326 isHyp = false; 1327 directToStage2 = false; 1328 stage2Req = false; 1329 stage2DescReq = false; 1330 } 1331 } else { // AArch32 1332 sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc, 1333 !isSecure)); 1334 ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc, 1335 !isSecure)); 1336 scr = tc->readMiscReg(MISCREG_SCR); 1337 isPriv = cpsr.mode != MODE_USER; 1338 if (longDescFormatInUse(tc)) { 1339 uint64_t ttbr_asid = tc->readMiscReg( 1340 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 : 1341 MISCREG_TTBR0, 1342 tc, !isSecure)); 1343 asid = bits(ttbr_asid, 55, 48); 1344 } else { // Short-descriptor translation table format in use 1345 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex( 1346 MISCREG_CONTEXTIDR, tc,!isSecure)); 1347 asid = context_id.asid; 1348 } 1349 prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc, 1350 !isSecure)); 1351 nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc, 1352 !isSecure)); 1353 dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc, 1354 !isSecure)); 1355 hcr = tc->readMiscReg(MISCREG_HCR); 1356 1357 if (haveVirtualization) { 1358 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); 1359 isHyp = cpsr.mode == MODE_HYP; 1360 isHyp |= tranType & HypMode; 1361 isHyp &= (tranType & S1S2NsTran) == 0; 1362 isHyp &= (tranType & S1CTran) == 0; 1363 if (isHyp) { 1364 sctlr = tc->readMiscReg(MISCREG_HSCTLR); 1365 } 1366 // Work out if we should skip the first stage of translation and go 1367 // directly to stage 2. This value is cached so we don't have to 1368 // compute it for every translation. 1369 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure && 1370 !(tranType & S1CTran); 1371 stage2DescReq = hcr.vm && !isStage2 && !isHyp && !isSecure; 1372 directToStage2 = stage2Req && !sctlr.m; 1373 } else { 1374 vmid = 0; 1375 stage2Req = false; 1376 isHyp = false; 1377 directToStage2 = false; 1378 stage2DescReq = false; 1379 } 1380 } 1381 miscRegValid = true; 1382 miscRegContext = tc->contextId(); 1383 curTranType = tranType; 1384} 1385 1386ExceptionLevel 1387TLB::tranTypeEL(CPSR cpsr, ArmTranslationType type) 1388{ 1389 switch (type) { 1390 case S1E0Tran: 1391 case S12E0Tran: 1392 return EL0; 1393 1394 case S1E1Tran: 1395 case S12E1Tran: 1396 return EL1; 1397 1398 case S1E2Tran: 1399 return EL2; 1400 1401 case S1E3Tran: 1402 return EL3; 1403 1404 case NormalTran: 1405 case S1CTran: 1406 case S1S2NsTran: 1407 case HypMode: 1408 return opModeToEL((OperatingMode)(uint8_t)cpsr.mode); 1409 1410 default: 1411 panic("Unknown translation mode!\n"); 1412 } 1413} 1414 1415Fault 1416TLB::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, 1417 Translation *translation, bool timing, bool functional, 1418 bool is_secure, TLB::ArmTranslationType tranType) 1419{ 1420 bool is_fetch = (mode == Execute); 1421 bool is_write = (mode == Write); 1422 1423 Addr vaddr_tainted = req->getVaddr(); 1424 Addr vaddr = 0; 1425 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1; 1426 if (aarch64) { 1427 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr); 1428 } else { 1429 vaddr = vaddr_tainted; 1430 } 1431 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1432 if (*te == NULL) { 1433 if (req->isPrefetch()) { 1434 // if the request is a prefetch don't attempt to fill the TLB or go 1435 // any further with the memory access (here we can safely use the 1436 // fault status for the short desc. format in all cases) 1437 prefetchFaults++; 1438 return std::make_shared<PrefetchAbort>( 1439 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2); 1440 } 1441 1442 if (is_fetch) 1443 instMisses++; 1444 else if (is_write) 1445 writeMisses++; 1446 else 1447 readMisses++; 1448 1449 // start translation table walk, pass variables rather than 1450 // re-retreaving in table walker for speed 1451 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n", 1452 vaddr_tainted, asid, vmid); 1453 Fault fault; 1454 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode, 1455 translation, timing, functional, is_secure, 1456 tranType, stage2DescReq); 1457 // for timing mode, return and wait for table walk, 1458 if (timing || fault != NoFault) { 1459 return fault; 1460 } 1461 1462 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1463 if (!*te) 1464 printTlb(); 1465 assert(*te); 1466 } else { 1467 if (is_fetch) 1468 instHits++; 1469 else if (is_write) 1470 writeHits++; 1471 else 1472 readHits++; 1473 } 1474 return NoFault; 1475} 1476 1477Fault 1478TLB::getResultTe(TlbEntry **te, const RequestPtr &req, 1479 ThreadContext *tc, Mode mode, 1480 Translation *translation, bool timing, bool functional, 1481 TlbEntry *mergeTe) 1482{ 1483 Fault fault; 1484 1485 if (isStage2) { 1486 // We are already in the stage 2 TLB. Grab the table entry for stage 1487 // 2 only. We are here because stage 1 translation is disabled. 1488 TlbEntry *s2Te = NULL; 1489 // Get the stage 2 table entry 1490 fault = getTE(&s2Te, req, tc, mode, translation, timing, functional, 1491 isSecure, curTranType); 1492 // Check permissions of stage 2 1493 if ((s2Te != NULL) && (fault == NoFault)) { 1494 if (aarch64) 1495 fault = checkPermissions64(s2Te, req, mode, tc); 1496 else 1497 fault = checkPermissions(s2Te, req, mode); 1498 } 1499 *te = s2Te; 1500 return fault; 1501 } 1502 1503 TlbEntry *s1Te = NULL; 1504 1505 Addr vaddr_tainted = req->getVaddr(); 1506 1507 // Get the stage 1 table entry 1508 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional, 1509 isSecure, curTranType); 1510 // only proceed if we have a valid table entry 1511 if ((s1Te != NULL) && (fault == NoFault)) { 1512 // Check stage 1 permissions before checking stage 2 1513 if (aarch64) 1514 fault = checkPermissions64(s1Te, req, mode, tc); 1515 else 1516 fault = checkPermissions(s1Te, req, mode); 1517 if (stage2Req & (fault == NoFault)) { 1518 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te, 1519 req, translation, mode, timing, functional, curTranType); 1520 fault = s2Lookup->getTe(tc, mergeTe); 1521 if (s2Lookup->isComplete()) { 1522 *te = mergeTe; 1523 // We've finished with the lookup so delete it 1524 delete s2Lookup; 1525 } else { 1526 // The lookup hasn't completed, so we can't delete it now. We 1527 // get round this by asking the object to self delete when the 1528 // translation is complete. 1529 s2Lookup->setSelfDelete(); 1530 } 1531 } else { 1532 // This case deals with an S1 hit (or bypass), followed by 1533 // an S2 hit-but-perms issue 1534 if (isStage2) { 1535 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n", 1536 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault); 1537 if (fault != NoFault) { 1538 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get()); 1539 armFault->annotate(ArmFault::S1PTW, false); 1540 armFault->annotate(ArmFault::OVA, vaddr_tainted); 1541 } 1542 } 1543 *te = s1Te; 1544 } 1545 } 1546 return fault; 1547} 1548 1549void 1550TLB::setTestInterface(SimObject *_ti) 1551{ 1552 if (!_ti) { 1553 test = nullptr; 1554 } else { 1555 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti)); 1556 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name()); 1557 test = ti; 1558 } 1559} 1560 1561Fault 1562TLB::testTranslation(const RequestPtr &req, Mode mode, 1563 TlbEntry::DomainType domain) 1564{ 1565 if (!test || !req->hasSize() || req->getSize() == 0 || 1566 req->isCacheMaintenance()) { 1567 return NoFault; 1568 } else { 1569 return test->translationCheck(req, isPriv, mode, domain); 1570 } 1571} 1572 1573Fault 1574TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, 1575 TlbEntry::DomainType domain, LookupLevel lookup_level) 1576{ 1577 if (!test) { 1578 return NoFault; 1579 } else { 1580 return test->walkCheck(pa, size, va, is_secure, isPriv, mode, 1581 domain, lookup_level); 1582 } 1583} 1584 1585 1586ArmISA::TLB * 1587ArmTLBParams::create() 1588{ 1589 return new ArmISA::TLB(this); 1590} 1591