tlb.cc revision 11517:54230f1ebef2
1/* 2 * Copyright (c) 2010-2013, 2016 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Steve Reinhardt 43 */ 44 45#include "arch/arm/tlb.hh" 46 47#include <memory> 48#include <string> 49#include <vector> 50 51#include "arch/arm/faults.hh" 52#include "arch/arm/pagetable.hh" 53#include "arch/arm/system.hh" 54#include "arch/arm/table_walker.hh" 55#include "arch/arm/stage2_lookup.hh" 56#include "arch/arm/stage2_mmu.hh" 57#include "arch/arm/utility.hh" 58#include "base/inifile.hh" 59#include "base/str.hh" 60#include "base/trace.hh" 61#include "cpu/base.hh" 62#include "cpu/thread_context.hh" 63#include "debug/Checkpoint.hh" 64#include "debug/TLB.hh" 65#include "debug/TLBVerbose.hh" 66#include "mem/page_table.hh" 67#include "params/ArmTLB.hh" 68#include "sim/full_system.hh" 69#include "sim/process.hh" 70 71using namespace std; 72using namespace ArmISA; 73 74TLB::TLB(const ArmTLBParams *p) 75 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size), 76 isStage2(p->is_stage2), stage2Req(false), _attr(0), 77 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL), 78 stage2Mmu(NULL), test(nullptr), rangeMRU(1), 79 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false), 80 isHyp(false), asid(0), vmid(0), dacr(0), 81 miscRegValid(false), miscRegContext(0), curTranType(NormalTran) 82{ 83 tableWalker->setTlb(this); 84 85 // Cache system-level properties 86 haveLPAE = tableWalker->haveLPAE(); 87 haveVirtualization = tableWalker->haveVirtualization(); 88 haveLargeAsid64 = tableWalker->haveLargeAsid64(); 89} 90 91TLB::~TLB() 92{ 93 delete[] table; 94} 95 96void 97TLB::init() 98{ 99 if (stage2Mmu && !isStage2) 100 stage2Tlb = stage2Mmu->stage2Tlb(); 101} 102 103void 104TLB::setMMU(Stage2MMU *m, MasterID master_id) 105{ 106 stage2Mmu = m; 107 tableWalker->setMMU(m, master_id); 108} 109 110bool 111TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) 112{ 113 updateMiscReg(tc); 114 115 if (directToStage2) { 116 assert(stage2Tlb); 117 return stage2Tlb->translateFunctional(tc, va, pa); 118 } 119 120 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false, 121 aarch64 ? aarch64EL : EL1); 122 if (!e) 123 return false; 124 pa = e->pAddr(va); 125 return true; 126} 127 128Fault 129TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const 130{ 131 return NoFault; 132} 133 134TlbEntry* 135TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, 136 bool functional, bool ignore_asn, uint8_t target_el) 137{ 138 139 TlbEntry *retval = NULL; 140 141 // Maintaining LRU array 142 int x = 0; 143 while (retval == NULL && x < size) { 144 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false, 145 target_el)) || 146 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) { 147 // We only move the hit entry ahead when the position is higher 148 // than rangeMRU 149 if (x > rangeMRU && !functional) { 150 TlbEntry tmp_entry = table[x]; 151 for (int i = x; i > 0; i--) 152 table[i] = table[i - 1]; 153 table[0] = tmp_entry; 154 retval = &table[0]; 155 } else { 156 retval = &table[x]; 157 } 158 break; 159 } 160 ++x; 161 } 162 163 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d " 164 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d " 165 "el: %d\n", 166 va, asn, retval ? "hit" : "miss", vmid, hyp, secure, 167 retval ? retval->pfn : 0, retval ? retval->size : 0, 168 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0, 169 retval ? retval->ns : 0, retval ? retval->nstid : 0, 170 retval ? retval->global : 0, retval ? retval->asid : 0, 171 retval ? retval->el : 0); 172 173 return retval; 174} 175 176// insert a new TLB entry 177void 178TLB::insert(Addr addr, TlbEntry &entry) 179{ 180 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x" 181 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d" 182 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn, 183 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N, 184 entry.global, entry.valid, entry.nonCacheable, entry.xn, 185 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid, 186 entry.isHyp); 187 188 if (table[size - 1].valid) 189 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x " 190 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n", 191 table[size-1].vpn << table[size-1].N, table[size-1].asid, 192 table[size-1].vmid, table[size-1].pfn << table[size-1].N, 193 table[size-1].size, table[size-1].ap, table[size-1].ns, 194 table[size-1].nstid, table[size-1].global, table[size-1].isHyp, 195 table[size-1].el); 196 197 //inserting to MRU position and evicting the LRU one 198 199 for (int i = size - 1; i > 0; --i) 200 table[i] = table[i-1]; 201 table[0] = entry; 202 203 inserts++; 204 ppRefills->notify(1); 205} 206 207void 208TLB::printTlb() const 209{ 210 int x = 0; 211 TlbEntry *te; 212 DPRINTF(TLB, "Current TLB contents:\n"); 213 while (x < size) { 214 te = &table[x]; 215 if (te->valid) 216 DPRINTF(TLB, " * %s\n", te->print()); 217 ++x; 218 } 219} 220 221void 222TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el) 223{ 224 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", 225 (secure_lookup ? "secure" : "non-secure")); 226 int x = 0; 227 TlbEntry *te; 228 while (x < size) { 229 te = &table[x]; 230 if (te->valid && secure_lookup == !te->nstid && 231 (te->vmid == vmid || secure_lookup) && 232 checkELMatch(target_el, te->el, ignore_el)) { 233 234 DPRINTF(TLB, " - %s\n", te->print()); 235 te->valid = false; 236 flushedEntries++; 237 } 238 ++x; 239 } 240 241 flushTlb++; 242 243 // If there's a second stage TLB (and we're not it) then flush it as well 244 // if we're currently in hyp mode 245 if (!isStage2 && isHyp) { 246 stage2Tlb->flushAllSecurity(secure_lookup, true); 247 } 248} 249 250void 251TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el) 252{ 253 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n", 254 (hyp ? "hyp" : "non-hyp")); 255 int x = 0; 256 TlbEntry *te; 257 while (x < size) { 258 te = &table[x]; 259 if (te->valid && te->nstid && te->isHyp == hyp && 260 checkELMatch(target_el, te->el, ignore_el)) { 261 262 DPRINTF(TLB, " - %s\n", te->print()); 263 flushedEntries++; 264 te->valid = false; 265 } 266 ++x; 267 } 268 269 flushTlb++; 270 271 // If there's a second stage TLB (and we're not it) then flush it as well 272 if (!isStage2 && !hyp) { 273 stage2Tlb->flushAllNs(false, true); 274 } 275} 276 277void 278TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el) 279{ 280 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " 281 "(%s lookup)\n", mva, asn, (secure_lookup ? 282 "secure" : "non-secure")); 283 _flushMva(mva, asn, secure_lookup, false, false, target_el); 284 flushTlbMvaAsid++; 285} 286 287void 288TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el) 289{ 290 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn, 291 (secure_lookup ? "secure" : "non-secure")); 292 293 int x = 0 ; 294 TlbEntry *te; 295 296 while (x < size) { 297 te = &table[x]; 298 if (te->valid && te->asid == asn && secure_lookup == !te->nstid && 299 (te->vmid == vmid || secure_lookup) && 300 checkELMatch(target_el, te->el, false)) { 301 302 te->valid = false; 303 DPRINTF(TLB, " - %s\n", te->print()); 304 flushedEntries++; 305 } 306 ++x; 307 } 308 flushTlbAsid++; 309} 310 311void 312TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el) 313{ 314 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva, 315 (secure_lookup ? "secure" : "non-secure")); 316 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el); 317 flushTlbMva++; 318} 319 320void 321TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp, 322 bool ignore_asn, uint8_t target_el) 323{ 324 TlbEntry *te; 325 // D5.7.2: Sign-extend address to 64 bits 326 mva = sext<56>(mva); 327 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 328 target_el); 329 while (te != NULL) { 330 if (secure_lookup == !te->nstid) { 331 DPRINTF(TLB, " - %s\n", te->print()); 332 te->valid = false; 333 flushedEntries++; 334 } 335 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 336 target_el); 337 } 338} 339 340bool 341TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el) 342{ 343 bool elMatch = true; 344 if (!ignore_el) { 345 if (target_el == 2 || target_el == 3) { 346 elMatch = (tentry_el == target_el); 347 } else { 348 elMatch = (tentry_el == 0) || (tentry_el == 1); 349 } 350 } 351 return elMatch; 352} 353 354void 355TLB::drainResume() 356{ 357 // We might have unserialized something or switched CPUs, so make 358 // sure to re-read the misc regs. 359 miscRegValid = false; 360} 361 362void 363TLB::takeOverFrom(BaseTLB *_otlb) 364{ 365 TLB *otlb = dynamic_cast<TLB*>(_otlb); 366 /* Make sure we actually have a valid type */ 367 if (otlb) { 368 _attr = otlb->_attr; 369 haveLPAE = otlb->haveLPAE; 370 directToStage2 = otlb->directToStage2; 371 stage2Req = otlb->stage2Req; 372 373 /* Sync the stage2 MMU if they exist in both 374 * the old CPU and the new 375 */ 376 if (!isStage2 && 377 stage2Tlb && otlb->stage2Tlb) { 378 stage2Tlb->takeOverFrom(otlb->stage2Tlb); 379 } 380 } else { 381 panic("Incompatible TLB type!"); 382 } 383} 384 385void 386TLB::serialize(CheckpointOut &cp) const 387{ 388 DPRINTF(Checkpoint, "Serializing Arm TLB\n"); 389 390 SERIALIZE_SCALAR(_attr); 391 SERIALIZE_SCALAR(haveLPAE); 392 SERIALIZE_SCALAR(directToStage2); 393 SERIALIZE_SCALAR(stage2Req); 394 395 int num_entries = size; 396 SERIALIZE_SCALAR(num_entries); 397 for (int i = 0; i < size; i++) 398 table[i].serializeSection(cp, csprintf("TlbEntry%d", i)); 399} 400 401void 402TLB::unserialize(CheckpointIn &cp) 403{ 404 DPRINTF(Checkpoint, "Unserializing Arm TLB\n"); 405 406 UNSERIALIZE_SCALAR(_attr); 407 UNSERIALIZE_SCALAR(haveLPAE); 408 UNSERIALIZE_SCALAR(directToStage2); 409 UNSERIALIZE_SCALAR(stage2Req); 410 411 int num_entries; 412 UNSERIALIZE_SCALAR(num_entries); 413 for (int i = 0; i < min(size, num_entries); i++) 414 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i)); 415} 416 417void 418TLB::regStats() 419{ 420 instHits 421 .name(name() + ".inst_hits") 422 .desc("ITB inst hits") 423 ; 424 425 instMisses 426 .name(name() + ".inst_misses") 427 .desc("ITB inst misses") 428 ; 429 430 instAccesses 431 .name(name() + ".inst_accesses") 432 .desc("ITB inst accesses") 433 ; 434 435 readHits 436 .name(name() + ".read_hits") 437 .desc("DTB read hits") 438 ; 439 440 readMisses 441 .name(name() + ".read_misses") 442 .desc("DTB read misses") 443 ; 444 445 readAccesses 446 .name(name() + ".read_accesses") 447 .desc("DTB read accesses") 448 ; 449 450 writeHits 451 .name(name() + ".write_hits") 452 .desc("DTB write hits") 453 ; 454 455 writeMisses 456 .name(name() + ".write_misses") 457 .desc("DTB write misses") 458 ; 459 460 writeAccesses 461 .name(name() + ".write_accesses") 462 .desc("DTB write accesses") 463 ; 464 465 hits 466 .name(name() + ".hits") 467 .desc("DTB hits") 468 ; 469 470 misses 471 .name(name() + ".misses") 472 .desc("DTB misses") 473 ; 474 475 accesses 476 .name(name() + ".accesses") 477 .desc("DTB accesses") 478 ; 479 480 flushTlb 481 .name(name() + ".flush_tlb") 482 .desc("Number of times complete TLB was flushed") 483 ; 484 485 flushTlbMva 486 .name(name() + ".flush_tlb_mva") 487 .desc("Number of times TLB was flushed by MVA") 488 ; 489 490 flushTlbMvaAsid 491 .name(name() + ".flush_tlb_mva_asid") 492 .desc("Number of times TLB was flushed by MVA & ASID") 493 ; 494 495 flushTlbAsid 496 .name(name() + ".flush_tlb_asid") 497 .desc("Number of times TLB was flushed by ASID") 498 ; 499 500 flushedEntries 501 .name(name() + ".flush_entries") 502 .desc("Number of entries that have been flushed from TLB") 503 ; 504 505 alignFaults 506 .name(name() + ".align_faults") 507 .desc("Number of TLB faults due to alignment restrictions") 508 ; 509 510 prefetchFaults 511 .name(name() + ".prefetch_faults") 512 .desc("Number of TLB faults due to prefetch") 513 ; 514 515 domainFaults 516 .name(name() + ".domain_faults") 517 .desc("Number of TLB faults due to domain restrictions") 518 ; 519 520 permsFaults 521 .name(name() + ".perms_faults") 522 .desc("Number of TLB faults due to permissions restrictions") 523 ; 524 525 instAccesses = instHits + instMisses; 526 readAccesses = readHits + readMisses; 527 writeAccesses = writeHits + writeMisses; 528 hits = readHits + writeHits + instHits; 529 misses = readMisses + writeMisses + instMisses; 530 accesses = readAccesses + writeAccesses + instAccesses; 531} 532 533void 534TLB::regProbePoints() 535{ 536 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills")); 537} 538 539Fault 540TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode, 541 Translation *translation, bool &delay, bool timing) 542{ 543 updateMiscReg(tc); 544 Addr vaddr_tainted = req->getVaddr(); 545 Addr vaddr = 0; 546 if (aarch64) 547 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 548 else 549 vaddr = vaddr_tainted; 550 uint32_t flags = req->getFlags(); 551 552 bool is_fetch = (mode == Execute); 553 bool is_write = (mode == Write); 554 555 if (!is_fetch) { 556 assert(flags & MustBeOne); 557 if (sctlr.a || !(flags & AllowUnaligned)) { 558 if (vaddr & mask(flags & AlignmentMask)) { 559 // LPAE is always disabled in SE mode 560 return std::make_shared<DataAbort>( 561 vaddr_tainted, 562 TlbEntry::DomainType::NoAccess, is_write, 563 ArmFault::AlignmentFault, isStage2, 564 ArmFault::VmsaTran); 565 } 566 } 567 } 568 569 Addr paddr; 570 Process *p = tc->getProcessPtr(); 571 572 if (!p->pTable->translate(vaddr, paddr)) 573 return std::make_shared<GenericPageTableFault>(vaddr_tainted); 574 req->setPaddr(paddr); 575 576 return NoFault; 577} 578 579Fault 580TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode) 581{ 582 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify 583 uint32_t flags = req->getFlags(); 584 bool is_fetch = (mode == Execute); 585 bool is_write = (mode == Write); 586 bool is_priv = isPriv && !(flags & UserMode); 587 588 // Get the translation type from the actuall table entry 589 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran 590 : ArmFault::VmsaTran; 591 592 // If this is the second stage of translation and the request is for a 593 // stage 1 page table walk then we need to check the HCR.PTW bit. This 594 // allows us to generate a fault if the request targets an area marked 595 // as a device or strongly ordered. 596 if (isStage2 && req->isPTWalk() && hcr.ptw && 597 (te->mtype != TlbEntry::MemoryType::Normal)) { 598 return std::make_shared<DataAbort>( 599 vaddr, te->domain, is_write, 600 ArmFault::PermissionLL + te->lookupLevel, 601 isStage2, tranMethod); 602 } 603 604 // Generate an alignment fault for unaligned data accesses to device or 605 // strongly ordered memory 606 if (!is_fetch) { 607 if (te->mtype != TlbEntry::MemoryType::Normal) { 608 if (vaddr & mask(flags & AlignmentMask)) { 609 alignFaults++; 610 return std::make_shared<DataAbort>( 611 vaddr, TlbEntry::DomainType::NoAccess, is_write, 612 ArmFault::AlignmentFault, isStage2, 613 tranMethod); 614 } 615 } 616 } 617 618 if (te->nonCacheable) { 619 // Prevent prefetching from I/O devices. 620 if (req->isPrefetch()) { 621 // Here we can safely use the fault status for the short 622 // desc. format in all cases 623 return std::make_shared<PrefetchAbort>( 624 vaddr, ArmFault::PrefetchUncacheable, 625 isStage2, tranMethod); 626 } 627 } 628 629 if (!te->longDescFormat) { 630 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) { 631 case 0: 632 domainFaults++; 633 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x" 634 " domain: %#x write:%d\n", dacr, 635 static_cast<uint8_t>(te->domain), is_write); 636 if (is_fetch) 637 return std::make_shared<PrefetchAbort>( 638 vaddr, 639 ArmFault::DomainLL + te->lookupLevel, 640 isStage2, tranMethod); 641 else 642 return std::make_shared<DataAbort>( 643 vaddr, te->domain, is_write, 644 ArmFault::DomainLL + te->lookupLevel, 645 isStage2, tranMethod); 646 case 1: 647 // Continue with permissions check 648 break; 649 case 2: 650 panic("UNPRED domain\n"); 651 case 3: 652 return NoFault; 653 } 654 } 655 656 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits 657 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap; 658 uint8_t hap = te->hap; 659 660 if (sctlr.afe == 1 || te->longDescFormat) 661 ap |= 1; 662 663 bool abt; 664 bool isWritable = true; 665 // If this is a stage 2 access (eg for reading stage 1 page table entries) 666 // then don't perform the AP permissions check, we stil do the HAP check 667 // below. 668 if (isStage2) { 669 abt = false; 670 } else { 671 switch (ap) { 672 case 0: 673 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n", 674 (int)sctlr.rs); 675 if (!sctlr.xp) { 676 switch ((int)sctlr.rs) { 677 case 2: 678 abt = is_write; 679 break; 680 case 1: 681 abt = is_write || !is_priv; 682 break; 683 case 0: 684 case 3: 685 default: 686 abt = true; 687 break; 688 } 689 } else { 690 abt = true; 691 } 692 break; 693 case 1: 694 abt = !is_priv; 695 break; 696 case 2: 697 abt = !is_priv && is_write; 698 isWritable = is_priv; 699 break; 700 case 3: 701 abt = false; 702 break; 703 case 4: 704 panic("UNPRED premissions\n"); 705 case 5: 706 abt = !is_priv || is_write; 707 isWritable = false; 708 break; 709 case 6: 710 case 7: 711 abt = is_write; 712 isWritable = false; 713 break; 714 default: 715 panic("Unknown permissions %#x\n", ap); 716 } 717 } 718 719 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1); 720 bool xn = te->xn || (isWritable && sctlr.wxn) || 721 (ap == 3 && sctlr.uwxn && is_priv); 722 if (is_fetch && (abt || xn || 723 (te->longDescFormat && te->pxn && is_priv) || 724 (isSecure && te->ns && scr.sif))) { 725 permsFaults++; 726 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d " 727 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n", 728 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe); 729 return std::make_shared<PrefetchAbort>( 730 vaddr, 731 ArmFault::PermissionLL + te->lookupLevel, 732 isStage2, tranMethod); 733 } else if (abt | hapAbt) { 734 permsFaults++; 735 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d" 736 " write:%d\n", ap, is_priv, is_write); 737 return std::make_shared<DataAbort>( 738 vaddr, te->domain, is_write, 739 ArmFault::PermissionLL + te->lookupLevel, 740 isStage2 | !abt, tranMethod); 741 } 742 return NoFault; 743} 744 745 746Fault 747TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode, 748 ThreadContext *tc) 749{ 750 assert(aarch64); 751 752 Addr vaddr_tainted = req->getVaddr(); 753 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 754 755 uint32_t flags = req->getFlags(); 756 bool is_fetch = (mode == Execute); 757 bool is_write = (mode == Write); 758 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode); 759 760 updateMiscReg(tc, curTranType); 761 762 // If this is the second stage of translation and the request is for a 763 // stage 1 page table walk then we need to check the HCR.PTW bit. This 764 // allows us to generate a fault if the request targets an area marked 765 // as a device or strongly ordered. 766 if (isStage2 && req->isPTWalk() && hcr.ptw && 767 (te->mtype != TlbEntry::MemoryType::Normal)) { 768 return std::make_shared<DataAbort>( 769 vaddr_tainted, te->domain, is_write, 770 ArmFault::PermissionLL + te->lookupLevel, 771 isStage2, ArmFault::LpaeTran); 772 } 773 774 // Generate an alignment fault for unaligned accesses to device or 775 // strongly ordered memory 776 if (!is_fetch) { 777 if (te->mtype != TlbEntry::MemoryType::Normal) { 778 if (vaddr & mask(flags & AlignmentMask)) { 779 alignFaults++; 780 return std::make_shared<DataAbort>( 781 vaddr_tainted, 782 TlbEntry::DomainType::NoAccess, is_write, 783 ArmFault::AlignmentFault, isStage2, 784 ArmFault::LpaeTran); 785 } 786 } 787 } 788 789 if (te->nonCacheable) { 790 // Prevent prefetching from I/O devices. 791 if (req->isPrefetch()) { 792 // Here we can safely use the fault status for the short 793 // desc. format in all cases 794 return std::make_shared<PrefetchAbort>( 795 vaddr_tainted, 796 ArmFault::PrefetchUncacheable, 797 isStage2, ArmFault::LpaeTran); 798 } 799 } 800 801 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field 802 bool grant = false; 803 804 uint8_t xn = te->xn; 805 uint8_t pxn = te->pxn; 806 bool r = !is_write && !is_fetch; 807 bool w = is_write; 808 bool x = is_fetch; 809 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, " 810 "w:%d, x:%d\n", ap, xn, pxn, r, w, x); 811 812 if (isStage2) { 813 panic("Virtualization in AArch64 state is not supported yet"); 814 } else { 815 switch (aarch64EL) { 816 case EL0: 817 { 818 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 819 switch (perm) { 820 case 0: 821 case 1: 822 case 8: 823 case 9: 824 grant = x; 825 break; 826 case 4: 827 case 5: 828 grant = r || w || (x && !sctlr.wxn); 829 break; 830 case 6: 831 case 7: 832 grant = r || w; 833 break; 834 case 12: 835 case 13: 836 grant = r || x; 837 break; 838 case 14: 839 case 15: 840 grant = r; 841 break; 842 default: 843 grant = false; 844 } 845 } 846 break; 847 case EL1: 848 { 849 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 850 switch (perm) { 851 case 0: 852 case 2: 853 grant = r || w || (x && !sctlr.wxn); 854 break; 855 case 1: 856 case 3: 857 case 4: 858 case 5: 859 case 6: 860 case 7: 861 // regions that are writeable at EL0 should not be 862 // executable at EL1 863 grant = r || w; 864 break; 865 case 8: 866 case 10: 867 case 12: 868 case 14: 869 grant = r || x; 870 break; 871 case 9: 872 case 11: 873 case 13: 874 case 15: 875 grant = r; 876 break; 877 default: 878 grant = false; 879 } 880 } 881 break; 882 case EL2: 883 case EL3: 884 { 885 uint8_t perm = (ap & 0x2) | xn; 886 switch (perm) { 887 case 0: 888 grant = r || w || (x && !sctlr.wxn) ; 889 break; 890 case 1: 891 grant = r || w; 892 break; 893 case 2: 894 grant = r || x; 895 break; 896 case 3: 897 grant = r; 898 break; 899 default: 900 grant = false; 901 } 902 } 903 break; 904 } 905 } 906 907 if (!grant) { 908 if (is_fetch) { 909 permsFaults++; 910 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. " 911 "AP:%d priv:%d write:%d ns:%d sif:%d " 912 "sctlr.afe: %d\n", 913 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe); 914 // Use PC value instead of vaddr because vaddr might be aligned to 915 // cache line and should not be the address reported in FAR 916 return std::make_shared<PrefetchAbort>( 917 req->getPC(), 918 ArmFault::PermissionLL + te->lookupLevel, 919 isStage2, ArmFault::LpaeTran); 920 } else { 921 permsFaults++; 922 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d " 923 "priv:%d write:%d\n", ap, is_priv, is_write); 924 return std::make_shared<DataAbort>( 925 vaddr_tainted, te->domain, is_write, 926 ArmFault::PermissionLL + te->lookupLevel, 927 isStage2, ArmFault::LpaeTran); 928 } 929 } 930 931 return NoFault; 932} 933 934Fault 935TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode, 936 Translation *translation, bool &delay, bool timing, 937 TLB::ArmTranslationType tranType, bool functional) 938{ 939 // No such thing as a functional timing access 940 assert(!(timing && functional)); 941 942 updateMiscReg(tc, tranType); 943 944 Addr vaddr_tainted = req->getVaddr(); 945 Addr vaddr = 0; 946 if (aarch64) 947 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 948 else 949 vaddr = vaddr_tainted; 950 uint32_t flags = req->getFlags(); 951 952 bool is_fetch = (mode == Execute); 953 bool is_write = (mode == Write); 954 bool long_desc_format = aarch64 || longDescFormatInUse(tc); 955 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran 956 : ArmFault::VmsaTran; 957 958 req->setAsid(asid); 959 960 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n", 961 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran); 962 963 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x " 964 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2, 965 scr, sctlr, flags, tranType); 966 967 if ((req->isInstFetch() && (!sctlr.i)) || 968 ((!req->isInstFetch()) && (!sctlr.c))){ 969 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 970 } 971 if (!is_fetch) { 972 assert(flags & MustBeOne); 973 if (sctlr.a || !(flags & AllowUnaligned)) { 974 if (vaddr & mask(flags & AlignmentMask)) { 975 alignFaults++; 976 return std::make_shared<DataAbort>( 977 vaddr_tainted, 978 TlbEntry::DomainType::NoAccess, is_write, 979 ArmFault::AlignmentFault, isStage2, 980 tranMethod); 981 } 982 } 983 } 984 985 // If guest MMU is off or hcr.vm=0 go straight to stage2 986 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) { 987 988 req->setPaddr(vaddr); 989 // When the MMU is off the security attribute corresponds to the 990 // security state of the processor 991 if (isSecure) 992 req->setFlags(Request::SECURE); 993 994 // @todo: double check this (ARM ARM issue C B3.2.1) 995 if (long_desc_format || sctlr.tre == 0) { 996 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 997 } else { 998 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2) 999 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 1000 } 1001 1002 // Set memory attributes 1003 TlbEntry temp_te; 1004 temp_te.ns = !isSecure; 1005 if (isStage2 || hcr.dc == 0 || isSecure || 1006 (isHyp && !(tranType & S1CTran))) { 1007 1008 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal 1009 : TlbEntry::MemoryType::StronglyOrdered; 1010 temp_te.innerAttrs = 0x0; 1011 temp_te.outerAttrs = 0x0; 1012 temp_te.shareable = true; 1013 temp_te.outerShareable = true; 1014 } else { 1015 temp_te.mtype = TlbEntry::MemoryType::Normal; 1016 temp_te.innerAttrs = 0x3; 1017 temp_te.outerAttrs = 0x3; 1018 temp_te.shareable = false; 1019 temp_te.outerShareable = false; 1020 } 1021 temp_te.setAttributes(long_desc_format); 1022 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: " 1023 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n", 1024 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs, 1025 isStage2); 1026 setAttr(temp_te.attributes); 1027 1028 return testTranslation(req, mode, TlbEntry::DomainType::NoAccess); 1029 } 1030 1031 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n", 1032 isStage2 ? "IPA" : "VA", vaddr_tainted, asid); 1033 // Translation enabled 1034 1035 TlbEntry *te = NULL; 1036 TlbEntry mergeTe; 1037 Fault fault = getResultTe(&te, req, tc, mode, translation, timing, 1038 functional, &mergeTe); 1039 // only proceed if we have a valid table entry 1040 if ((te == NULL) && (fault == NoFault)) delay = true; 1041 1042 // If we have the table entry transfer some of the attributes to the 1043 // request that triggered the translation 1044 if (te != NULL) { 1045 // Set memory attributes 1046 DPRINTF(TLBVerbose, 1047 "Setting memory attributes: shareable: %d, innerAttrs: %d, " 1048 "outerAttrs: %d, mtype: %d, isStage2: %d\n", 1049 te->shareable, te->innerAttrs, te->outerAttrs, 1050 static_cast<uint8_t>(te->mtype), isStage2); 1051 setAttr(te->attributes); 1052 1053 if (te->nonCacheable) 1054 req->setFlags(Request::UNCACHEABLE); 1055 1056 // Require requests to be ordered if the request goes to 1057 // strongly ordered or device memory (i.e., anything other 1058 // than normal memory requires strict order). 1059 if (te->mtype != TlbEntry::MemoryType::Normal) 1060 req->setFlags(Request::STRICT_ORDER); 1061 1062 Addr pa = te->pAddr(vaddr); 1063 req->setPaddr(pa); 1064 1065 if (isSecure && !te->ns) { 1066 req->setFlags(Request::SECURE); 1067 } 1068 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) && 1069 (te->mtype != TlbEntry::MemoryType::Normal)) { 1070 // Unaligned accesses to Device memory should always cause an 1071 // abort regardless of sctlr.a 1072 alignFaults++; 1073 return std::make_shared<DataAbort>( 1074 vaddr_tainted, 1075 TlbEntry::DomainType::NoAccess, is_write, 1076 ArmFault::AlignmentFault, isStage2, 1077 tranMethod); 1078 } 1079 1080 // Check for a trickbox generated address fault 1081 if (fault == NoFault) 1082 fault = testTranslation(req, mode, te->domain); 1083 } 1084 1085 // Generate Illegal Inst Set State fault if IL bit is set in CPSR 1086 if (fault == NoFault) { 1087 if (aarch64 && is_fetch && cpsr.il == 1) { 1088 return std::make_shared<IllegalInstSetStateFault>(); 1089 } 1090 } 1091 1092 return fault; 1093} 1094 1095Fault 1096TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode, 1097 TLB::ArmTranslationType tranType) 1098{ 1099 updateMiscReg(tc, tranType); 1100 1101 if (directToStage2) { 1102 assert(stage2Tlb); 1103 return stage2Tlb->translateAtomic(req, tc, mode, tranType); 1104 } 1105 1106 bool delay = false; 1107 Fault fault; 1108 if (FullSystem) 1109 fault = translateFs(req, tc, mode, NULL, delay, false, tranType); 1110 else 1111 fault = translateSe(req, tc, mode, NULL, delay, false); 1112 assert(!delay); 1113 return fault; 1114} 1115 1116Fault 1117TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode, 1118 TLB::ArmTranslationType tranType) 1119{ 1120 updateMiscReg(tc, tranType); 1121 1122 if (directToStage2) { 1123 assert(stage2Tlb); 1124 return stage2Tlb->translateFunctional(req, tc, mode, tranType); 1125 } 1126 1127 bool delay = false; 1128 Fault fault; 1129 if (FullSystem) 1130 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true); 1131 else 1132 fault = translateSe(req, tc, mode, NULL, delay, false); 1133 assert(!delay); 1134 return fault; 1135} 1136 1137Fault 1138TLB::translateTiming(RequestPtr req, ThreadContext *tc, 1139 Translation *translation, Mode mode, TLB::ArmTranslationType tranType) 1140{ 1141 updateMiscReg(tc, tranType); 1142 1143 if (directToStage2) { 1144 assert(stage2Tlb); 1145 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType); 1146 } 1147 1148 assert(translation); 1149 1150 return translateComplete(req, tc, translation, mode, tranType, isStage2); 1151} 1152 1153Fault 1154TLB::translateComplete(RequestPtr req, ThreadContext *tc, 1155 Translation *translation, Mode mode, TLB::ArmTranslationType tranType, 1156 bool callFromS2) 1157{ 1158 bool delay = false; 1159 Fault fault; 1160 if (FullSystem) 1161 fault = translateFs(req, tc, mode, translation, delay, true, tranType); 1162 else 1163 fault = translateSe(req, tc, mode, translation, delay, true); 1164 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault != 1165 NoFault); 1166 // If we have a translation, and we're not in the middle of doing a stage 1167 // 2 translation tell the translation that we've either finished or its 1168 // going to take a while. By not doing this when we're in the middle of a 1169 // stage 2 translation we prevent marking the translation as delayed twice, 1170 // one when the translation starts and again when the stage 1 translation 1171 // completes. 1172 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) { 1173 if (!delay) 1174 translation->finish(fault, req, tc, mode); 1175 else 1176 translation->markDelayed(); 1177 } 1178 return fault; 1179} 1180 1181BaseMasterPort* 1182TLB::getMasterPort() 1183{ 1184 return &stage2Mmu->getPort(); 1185} 1186 1187void 1188TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) 1189{ 1190 // check if the regs have changed, or the translation mode is different. 1191 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle 1192 // one type of translation anyway 1193 if (miscRegValid && miscRegContext == tc->contextId() && 1194 ((tranType == curTranType) || isStage2)) { 1195 return; 1196 } 1197 1198 DPRINTF(TLBVerbose, "TLB variables changed!\n"); 1199 cpsr = tc->readMiscReg(MISCREG_CPSR); 1200 1201 // Dependencies: SCR/SCR_EL3, CPSR 1202 isSecure = inSecureState(tc) && 1203 !(tranType & HypMode) && !(tranType & S1S2NsTran); 1204 1205 const OperatingMode op_mode = (OperatingMode) (uint8_t)cpsr.mode; 1206 aarch64 = opModeIs64(op_mode) || 1207 (opModeToEL(op_mode) == EL0 && ELIs64(tc, EL1)); 1208 1209 if (aarch64) { // AArch64 1210 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el; 1211 switch (aarch64EL) { 1212 case EL0: 1213 case EL1: 1214 { 1215 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); 1216 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1); 1217 uint64_t ttbr_asid = ttbcr.a1 ? 1218 tc->readMiscReg(MISCREG_TTBR1_EL1) : 1219 tc->readMiscReg(MISCREG_TTBR0_EL1); 1220 asid = bits(ttbr_asid, 1221 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); 1222 } 1223 break; 1224 case EL2: 1225 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); 1226 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); 1227 asid = -1; 1228 break; 1229 case EL3: 1230 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3); 1231 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3); 1232 asid = -1; 1233 break; 1234 } 1235 scr = tc->readMiscReg(MISCREG_SCR_EL3); 1236 isPriv = aarch64EL != EL0; 1237 // @todo: modify this behaviour to support Virtualization in 1238 // AArch64 1239 vmid = 0; 1240 isHyp = false; 1241 directToStage2 = false; 1242 stage2Req = false; 1243 } else { // AArch32 1244 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc, 1245 !isSecure)); 1246 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc, 1247 !isSecure)); 1248 scr = tc->readMiscReg(MISCREG_SCR); 1249 isPriv = cpsr.mode != MODE_USER; 1250 if (longDescFormatInUse(tc)) { 1251 uint64_t ttbr_asid = tc->readMiscReg( 1252 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1 1253 : MISCREG_TTBR0, 1254 tc, !isSecure)); 1255 asid = bits(ttbr_asid, 55, 48); 1256 } else { // Short-descriptor translation table format in use 1257 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked( 1258 MISCREG_CONTEXTIDR, tc,!isSecure)); 1259 asid = context_id.asid; 1260 } 1261 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc, 1262 !isSecure)); 1263 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc, 1264 !isSecure)); 1265 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc, 1266 !isSecure)); 1267 hcr = tc->readMiscReg(MISCREG_HCR); 1268 1269 if (haveVirtualization) { 1270 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); 1271 isHyp = cpsr.mode == MODE_HYP; 1272 isHyp |= tranType & HypMode; 1273 isHyp &= (tranType & S1S2NsTran) == 0; 1274 isHyp &= (tranType & S1CTran) == 0; 1275 if (isHyp) { 1276 sctlr = tc->readMiscReg(MISCREG_HSCTLR); 1277 } 1278 // Work out if we should skip the first stage of translation and go 1279 // directly to stage 2. This value is cached so we don't have to 1280 // compute it for every translation. 1281 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure && 1282 !(tranType & S1CTran); 1283 directToStage2 = stage2Req && !sctlr.m; 1284 } else { 1285 vmid = 0; 1286 stage2Req = false; 1287 isHyp = false; 1288 directToStage2 = false; 1289 } 1290 } 1291 miscRegValid = true; 1292 miscRegContext = tc->contextId(); 1293 curTranType = tranType; 1294} 1295 1296Fault 1297TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1298 Translation *translation, bool timing, bool functional, 1299 bool is_secure, TLB::ArmTranslationType tranType) 1300{ 1301 bool is_fetch = (mode == Execute); 1302 bool is_write = (mode == Write); 1303 1304 Addr vaddr_tainted = req->getVaddr(); 1305 Addr vaddr = 0; 1306 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1; 1307 if (aarch64) { 1308 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr); 1309 } else { 1310 vaddr = vaddr_tainted; 1311 } 1312 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1313 if (*te == NULL) { 1314 if (req->isPrefetch()) { 1315 // if the request is a prefetch don't attempt to fill the TLB or go 1316 // any further with the memory access (here we can safely use the 1317 // fault status for the short desc. format in all cases) 1318 prefetchFaults++; 1319 return std::make_shared<PrefetchAbort>( 1320 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2); 1321 } 1322 1323 if (is_fetch) 1324 instMisses++; 1325 else if (is_write) 1326 writeMisses++; 1327 else 1328 readMisses++; 1329 1330 // start translation table walk, pass variables rather than 1331 // re-retreaving in table walker for speed 1332 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n", 1333 vaddr_tainted, asid, vmid); 1334 Fault fault; 1335 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode, 1336 translation, timing, functional, is_secure, 1337 tranType); 1338 // for timing mode, return and wait for table walk, 1339 if (timing || fault != NoFault) { 1340 return fault; 1341 } 1342 1343 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1344 if (!*te) 1345 printTlb(); 1346 assert(*te); 1347 } else { 1348 if (is_fetch) 1349 instHits++; 1350 else if (is_write) 1351 writeHits++; 1352 else 1353 readHits++; 1354 } 1355 return NoFault; 1356} 1357 1358Fault 1359TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1360 Translation *translation, bool timing, bool functional, 1361 TlbEntry *mergeTe) 1362{ 1363 Fault fault; 1364 TlbEntry *s1Te = NULL; 1365 1366 Addr vaddr_tainted = req->getVaddr(); 1367 1368 // Get the stage 1 table entry 1369 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional, 1370 isSecure, curTranType); 1371 // only proceed if we have a valid table entry 1372 if ((s1Te != NULL) && (fault == NoFault)) { 1373 // Check stage 1 permissions before checking stage 2 1374 if (aarch64) 1375 fault = checkPermissions64(s1Te, req, mode, tc); 1376 else 1377 fault = checkPermissions(s1Te, req, mode); 1378 if (stage2Req & (fault == NoFault)) { 1379 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te, 1380 req, translation, mode, timing, functional, curTranType); 1381 fault = s2Lookup->getTe(tc, mergeTe); 1382 if (s2Lookup->isComplete()) { 1383 *te = mergeTe; 1384 // We've finished with the lookup so delete it 1385 delete s2Lookup; 1386 } else { 1387 // The lookup hasn't completed, so we can't delete it now. We 1388 // get round this by asking the object to self delete when the 1389 // translation is complete. 1390 s2Lookup->setSelfDelete(); 1391 } 1392 } else { 1393 // This case deals with an S1 hit (or bypass), followed by 1394 // an S2 hit-but-perms issue 1395 if (isStage2) { 1396 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n", 1397 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault); 1398 if (fault != NoFault) { 1399 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get()); 1400 armFault->annotate(ArmFault::S1PTW, false); 1401 armFault->annotate(ArmFault::OVA, vaddr_tainted); 1402 } 1403 } 1404 *te = s1Te; 1405 } 1406 } 1407 return fault; 1408} 1409 1410void 1411TLB::setTestInterface(SimObject *_ti) 1412{ 1413 if (!_ti) { 1414 test = nullptr; 1415 } else { 1416 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti)); 1417 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name()); 1418 test = ti; 1419 } 1420} 1421 1422Fault 1423TLB::testTranslation(RequestPtr req, Mode mode, TlbEntry::DomainType domain) 1424{ 1425 if (!test) { 1426 return NoFault; 1427 } else { 1428 return test->translationCheck(req, isPriv, mode, domain); 1429 } 1430} 1431 1432Fault 1433TLB::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, 1434 TlbEntry::DomainType domain, LookupLevel lookup_level) 1435{ 1436 if (!test) { 1437 return NoFault; 1438 } else { 1439 return test->walkCheck(pa, size, va, is_secure, isPriv, mode, 1440 domain, lookup_level); 1441 } 1442} 1443 1444 1445ArmISA::TLB * 1446ArmTLBParams::create() 1447{ 1448 return new ArmISA::TLB(this); 1449} 1450