tlb.cc revision 11055:54071fd5c397
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Steve Reinhardt 43 */ 44 45#include "arch/arm/tlb.hh" 46 47#include <memory> 48#include <string> 49#include <vector> 50 51#include "arch/arm/faults.hh" 52#include "arch/arm/pagetable.hh" 53#include "arch/arm/system.hh" 54#include "arch/arm/table_walker.hh" 55#include "arch/arm/stage2_lookup.hh" 56#include "arch/arm/stage2_mmu.hh" 57#include "arch/arm/utility.hh" 58#include "base/inifile.hh" 59#include "base/str.hh" 60#include "base/trace.hh" 61#include "cpu/base.hh" 62#include "cpu/thread_context.hh" 63#include "debug/Checkpoint.hh" 64#include "debug/TLB.hh" 65#include "debug/TLBVerbose.hh" 66#include "mem/page_table.hh" 67#include "params/ArmTLB.hh" 68#include "sim/full_system.hh" 69#include "sim/process.hh" 70 71using namespace std; 72using namespace ArmISA; 73 74TLB::TLB(const ArmTLBParams *p) 75 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size), 76 isStage2(p->is_stage2), stage2Req(false), _attr(0), 77 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL), 78 stage2Mmu(NULL), rangeMRU(1), 79 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false), 80 isHyp(false), asid(0), vmid(0), dacr(0), 81 miscRegValid(false), curTranType(NormalTran) 82{ 83 tableWalker->setTlb(this); 84 85 // Cache system-level properties 86 haveLPAE = tableWalker->haveLPAE(); 87 haveVirtualization = tableWalker->haveVirtualization(); 88 haveLargeAsid64 = tableWalker->haveLargeAsid64(); 89} 90 91TLB::~TLB() 92{ 93 delete[] table; 94} 95 96void 97TLB::init() 98{ 99 if (stage2Mmu && !isStage2) 100 stage2Tlb = stage2Mmu->stage2Tlb(); 101} 102 103void 104TLB::setMMU(Stage2MMU *m, MasterID master_id) 105{ 106 stage2Mmu = m; 107 tableWalker->setMMU(m, master_id); 108} 109 110bool 111TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) 112{ 113 updateMiscReg(tc); 114 115 if (directToStage2) { 116 assert(stage2Tlb); 117 return stage2Tlb->translateFunctional(tc, va, pa); 118 } 119 120 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false, 121 aarch64 ? aarch64EL : EL1); 122 if (!e) 123 return false; 124 pa = e->pAddr(va); 125 return true; 126} 127 128Fault 129TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const 130{ 131 return NoFault; 132} 133 134TlbEntry* 135TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, 136 bool functional, bool ignore_asn, uint8_t target_el) 137{ 138 139 TlbEntry *retval = NULL; 140 141 // Maintaining LRU array 142 int x = 0; 143 while (retval == NULL && x < size) { 144 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false, 145 target_el)) || 146 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) { 147 // We only move the hit entry ahead when the position is higher 148 // than rangeMRU 149 if (x > rangeMRU && !functional) { 150 TlbEntry tmp_entry = table[x]; 151 for(int i = x; i > 0; i--) 152 table[i] = table[i - 1]; 153 table[0] = tmp_entry; 154 retval = &table[0]; 155 } else { 156 retval = &table[x]; 157 } 158 break; 159 } 160 ++x; 161 } 162 163 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d " 164 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d " 165 "el: %d\n", 166 va, asn, retval ? "hit" : "miss", vmid, hyp, secure, 167 retval ? retval->pfn : 0, retval ? retval->size : 0, 168 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0, 169 retval ? retval->ns : 0, retval ? retval->nstid : 0, 170 retval ? retval->global : 0, retval ? retval->asid : 0, 171 retval ? retval->el : 0); 172 173 return retval; 174} 175 176// insert a new TLB entry 177void 178TLB::insert(Addr addr, TlbEntry &entry) 179{ 180 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x" 181 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d" 182 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn, 183 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N, 184 entry.global, entry.valid, entry.nonCacheable, entry.xn, 185 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid, 186 entry.isHyp); 187 188 if (table[size - 1].valid) 189 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x " 190 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n", 191 table[size-1].vpn << table[size-1].N, table[size-1].asid, 192 table[size-1].vmid, table[size-1].pfn << table[size-1].N, 193 table[size-1].size, table[size-1].ap, table[size-1].ns, 194 table[size-1].nstid, table[size-1].global, table[size-1].isHyp, 195 table[size-1].el); 196 197 //inserting to MRU position and evicting the LRU one 198 199 for (int i = size - 1; i > 0; --i) 200 table[i] = table[i-1]; 201 table[0] = entry; 202 203 inserts++; 204 ppRefills->notify(1); 205} 206 207void 208TLB::printTlb() const 209{ 210 int x = 0; 211 TlbEntry *te; 212 DPRINTF(TLB, "Current TLB contents:\n"); 213 while (x < size) { 214 te = &table[x]; 215 if (te->valid) 216 DPRINTF(TLB, " * %s\n", te->print()); 217 ++x; 218 } 219} 220 221void 222TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el) 223{ 224 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", 225 (secure_lookup ? "secure" : "non-secure")); 226 int x = 0; 227 TlbEntry *te; 228 while (x < size) { 229 te = &table[x]; 230 if (te->valid && secure_lookup == !te->nstid && 231 (te->vmid == vmid || secure_lookup) && 232 checkELMatch(target_el, te->el, ignore_el)) { 233 234 DPRINTF(TLB, " - %s\n", te->print()); 235 te->valid = false; 236 flushedEntries++; 237 } 238 ++x; 239 } 240 241 flushTlb++; 242 243 // If there's a second stage TLB (and we're not it) then flush it as well 244 // if we're currently in hyp mode 245 if (!isStage2 && isHyp) { 246 stage2Tlb->flushAllSecurity(secure_lookup, true); 247 } 248} 249 250void 251TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el) 252{ 253 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n", 254 (hyp ? "hyp" : "non-hyp")); 255 int x = 0; 256 TlbEntry *te; 257 while (x < size) { 258 te = &table[x]; 259 if (te->valid && te->nstid && te->isHyp == hyp && 260 checkELMatch(target_el, te->el, ignore_el)) { 261 262 DPRINTF(TLB, " - %s\n", te->print()); 263 flushedEntries++; 264 te->valid = false; 265 } 266 ++x; 267 } 268 269 flushTlb++; 270 271 // If there's a second stage TLB (and we're not it) then flush it as well 272 if (!isStage2 && !hyp) { 273 stage2Tlb->flushAllNs(false, true); 274 } 275} 276 277void 278TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el) 279{ 280 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " 281 "(%s lookup)\n", mva, asn, (secure_lookup ? 282 "secure" : "non-secure")); 283 _flushMva(mva, asn, secure_lookup, false, false, target_el); 284 flushTlbMvaAsid++; 285} 286 287void 288TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el) 289{ 290 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn, 291 (secure_lookup ? "secure" : "non-secure")); 292 293 int x = 0 ; 294 TlbEntry *te; 295 296 while (x < size) { 297 te = &table[x]; 298 if (te->valid && te->asid == asn && secure_lookup == !te->nstid && 299 (te->vmid == vmid || secure_lookup) && 300 checkELMatch(target_el, te->el, false)) { 301 302 te->valid = false; 303 DPRINTF(TLB, " - %s\n", te->print()); 304 flushedEntries++; 305 } 306 ++x; 307 } 308 flushTlbAsid++; 309} 310 311void 312TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el) 313{ 314 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva, 315 (secure_lookup ? "secure" : "non-secure")); 316 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el); 317 flushTlbMva++; 318} 319 320void 321TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp, 322 bool ignore_asn, uint8_t target_el) 323{ 324 TlbEntry *te; 325 // D5.7.2: Sign-extend address to 64 bits 326 mva = sext<56>(mva); 327 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 328 target_el); 329 while (te != NULL) { 330 if (secure_lookup == !te->nstid) { 331 DPRINTF(TLB, " - %s\n", te->print()); 332 te->valid = false; 333 flushedEntries++; 334 } 335 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 336 target_el); 337 } 338} 339 340bool 341TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el) 342{ 343 bool elMatch = true; 344 if (!ignore_el) { 345 if (target_el == 2 || target_el == 3) { 346 elMatch = (tentry_el == target_el); 347 } else { 348 elMatch = (tentry_el == 0) || (tentry_el == 1); 349 } 350 } 351 return elMatch; 352} 353 354void 355TLB::drainResume() 356{ 357 // We might have unserialized something or switched CPUs, so make 358 // sure to re-read the misc regs. 359 miscRegValid = false; 360} 361 362void 363TLB::takeOverFrom(BaseTLB *_otlb) 364{ 365 TLB *otlb = dynamic_cast<TLB*>(_otlb); 366 /* Make sure we actually have a valid type */ 367 if (otlb) { 368 _attr = otlb->_attr; 369 haveLPAE = otlb->haveLPAE; 370 directToStage2 = otlb->directToStage2; 371 stage2Req = otlb->stage2Req; 372 373 /* Sync the stage2 MMU if they exist in both 374 * the old CPU and the new 375 */ 376 if (!isStage2 && 377 stage2Tlb && otlb->stage2Tlb) { 378 stage2Tlb->takeOverFrom(otlb->stage2Tlb); 379 } 380 } else { 381 panic("Incompatible TLB type!"); 382 } 383} 384 385void 386TLB::serialize(CheckpointOut &cp) const 387{ 388 DPRINTF(Checkpoint, "Serializing Arm TLB\n"); 389 390 SERIALIZE_SCALAR(_attr); 391 SERIALIZE_SCALAR(haveLPAE); 392 SERIALIZE_SCALAR(directToStage2); 393 SERIALIZE_SCALAR(stage2Req); 394 395 int num_entries = size; 396 SERIALIZE_SCALAR(num_entries); 397 for(int i = 0; i < size; i++) 398 table[i].serializeSection(cp, csprintf("TlbEntry%d", i)); 399} 400 401void 402TLB::unserialize(CheckpointIn &cp) 403{ 404 DPRINTF(Checkpoint, "Unserializing Arm TLB\n"); 405 406 UNSERIALIZE_SCALAR(_attr); 407 UNSERIALIZE_SCALAR(haveLPAE); 408 UNSERIALIZE_SCALAR(directToStage2); 409 UNSERIALIZE_SCALAR(stage2Req); 410 411 int num_entries; 412 UNSERIALIZE_SCALAR(num_entries); 413 for(int i = 0; i < min(size, num_entries); i++) 414 table[i].unserializeSection(cp, csprintf("TlbEntry%d", i)); 415} 416 417void 418TLB::regStats() 419{ 420 instHits 421 .name(name() + ".inst_hits") 422 .desc("ITB inst hits") 423 ; 424 425 instMisses 426 .name(name() + ".inst_misses") 427 .desc("ITB inst misses") 428 ; 429 430 instAccesses 431 .name(name() + ".inst_accesses") 432 .desc("ITB inst accesses") 433 ; 434 435 readHits 436 .name(name() + ".read_hits") 437 .desc("DTB read hits") 438 ; 439 440 readMisses 441 .name(name() + ".read_misses") 442 .desc("DTB read misses") 443 ; 444 445 readAccesses 446 .name(name() + ".read_accesses") 447 .desc("DTB read accesses") 448 ; 449 450 writeHits 451 .name(name() + ".write_hits") 452 .desc("DTB write hits") 453 ; 454 455 writeMisses 456 .name(name() + ".write_misses") 457 .desc("DTB write misses") 458 ; 459 460 writeAccesses 461 .name(name() + ".write_accesses") 462 .desc("DTB write accesses") 463 ; 464 465 hits 466 .name(name() + ".hits") 467 .desc("DTB hits") 468 ; 469 470 misses 471 .name(name() + ".misses") 472 .desc("DTB misses") 473 ; 474 475 accesses 476 .name(name() + ".accesses") 477 .desc("DTB accesses") 478 ; 479 480 flushTlb 481 .name(name() + ".flush_tlb") 482 .desc("Number of times complete TLB was flushed") 483 ; 484 485 flushTlbMva 486 .name(name() + ".flush_tlb_mva") 487 .desc("Number of times TLB was flushed by MVA") 488 ; 489 490 flushTlbMvaAsid 491 .name(name() + ".flush_tlb_mva_asid") 492 .desc("Number of times TLB was flushed by MVA & ASID") 493 ; 494 495 flushTlbAsid 496 .name(name() + ".flush_tlb_asid") 497 .desc("Number of times TLB was flushed by ASID") 498 ; 499 500 flushedEntries 501 .name(name() + ".flush_entries") 502 .desc("Number of entries that have been flushed from TLB") 503 ; 504 505 alignFaults 506 .name(name() + ".align_faults") 507 .desc("Number of TLB faults due to alignment restrictions") 508 ; 509 510 prefetchFaults 511 .name(name() + ".prefetch_faults") 512 .desc("Number of TLB faults due to prefetch") 513 ; 514 515 domainFaults 516 .name(name() + ".domain_faults") 517 .desc("Number of TLB faults due to domain restrictions") 518 ; 519 520 permsFaults 521 .name(name() + ".perms_faults") 522 .desc("Number of TLB faults due to permissions restrictions") 523 ; 524 525 instAccesses = instHits + instMisses; 526 readAccesses = readHits + readMisses; 527 writeAccesses = writeHits + writeMisses; 528 hits = readHits + writeHits + instHits; 529 misses = readMisses + writeMisses + instMisses; 530 accesses = readAccesses + writeAccesses + instAccesses; 531} 532 533void 534TLB::regProbePoints() 535{ 536 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills")); 537} 538 539Fault 540TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode, 541 Translation *translation, bool &delay, bool timing) 542{ 543 updateMiscReg(tc); 544 Addr vaddr_tainted = req->getVaddr(); 545 Addr vaddr = 0; 546 if (aarch64) 547 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 548 else 549 vaddr = vaddr_tainted; 550 uint32_t flags = req->getFlags(); 551 552 bool is_fetch = (mode == Execute); 553 bool is_write = (mode == Write); 554 555 if (!is_fetch) { 556 assert(flags & MustBeOne); 557 if (sctlr.a || !(flags & AllowUnaligned)) { 558 if (vaddr & mask(flags & AlignmentMask)) { 559 // LPAE is always disabled in SE mode 560 return std::make_shared<DataAbort>( 561 vaddr_tainted, 562 TlbEntry::DomainType::NoAccess, is_write, 563 ArmFault::AlignmentFault, isStage2, 564 ArmFault::VmsaTran); 565 } 566 } 567 } 568 569 Addr paddr; 570 Process *p = tc->getProcessPtr(); 571 572 if (!p->pTable->translate(vaddr, paddr)) 573 return std::make_shared<GenericPageTableFault>(vaddr_tainted); 574 req->setPaddr(paddr); 575 576 return NoFault; 577} 578 579Fault 580TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain) 581{ 582 return NoFault; 583} 584 585Fault 586TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec, 587 bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level) 588{ 589 return NoFault; 590} 591 592Fault 593TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode) 594{ 595 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify 596 uint32_t flags = req->getFlags(); 597 bool is_fetch = (mode == Execute); 598 bool is_write = (mode == Write); 599 bool is_priv = isPriv && !(flags & UserMode); 600 601 // Get the translation type from the actuall table entry 602 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran 603 : ArmFault::VmsaTran; 604 605 // If this is the second stage of translation and the request is for a 606 // stage 1 page table walk then we need to check the HCR.PTW bit. This 607 // allows us to generate a fault if the request targets an area marked 608 // as a device or strongly ordered. 609 if (isStage2 && req->isPTWalk() && hcr.ptw && 610 (te->mtype != TlbEntry::MemoryType::Normal)) { 611 return std::make_shared<DataAbort>( 612 vaddr, te->domain, is_write, 613 ArmFault::PermissionLL + te->lookupLevel, 614 isStage2, tranMethod); 615 } 616 617 // Generate an alignment fault for unaligned data accesses to device or 618 // strongly ordered memory 619 if (!is_fetch) { 620 if (te->mtype != TlbEntry::MemoryType::Normal) { 621 if (vaddr & mask(flags & AlignmentMask)) { 622 alignFaults++; 623 return std::make_shared<DataAbort>( 624 vaddr, TlbEntry::DomainType::NoAccess, is_write, 625 ArmFault::AlignmentFault, isStage2, 626 tranMethod); 627 } 628 } 629 } 630 631 if (te->nonCacheable) { 632 // Prevent prefetching from I/O devices. 633 if (req->isPrefetch()) { 634 // Here we can safely use the fault status for the short 635 // desc. format in all cases 636 return std::make_shared<PrefetchAbort>( 637 vaddr, ArmFault::PrefetchUncacheable, 638 isStage2, tranMethod); 639 } 640 } 641 642 if (!te->longDescFormat) { 643 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) { 644 case 0: 645 domainFaults++; 646 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x" 647 " domain: %#x write:%d\n", dacr, 648 static_cast<uint8_t>(te->domain), is_write); 649 if (is_fetch) 650 return std::make_shared<PrefetchAbort>( 651 vaddr, 652 ArmFault::DomainLL + te->lookupLevel, 653 isStage2, tranMethod); 654 else 655 return std::make_shared<DataAbort>( 656 vaddr, te->domain, is_write, 657 ArmFault::DomainLL + te->lookupLevel, 658 isStage2, tranMethod); 659 case 1: 660 // Continue with permissions check 661 break; 662 case 2: 663 panic("UNPRED domain\n"); 664 case 3: 665 return NoFault; 666 } 667 } 668 669 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits 670 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap; 671 uint8_t hap = te->hap; 672 673 if (sctlr.afe == 1 || te->longDescFormat) 674 ap |= 1; 675 676 bool abt; 677 bool isWritable = true; 678 // If this is a stage 2 access (eg for reading stage 1 page table entries) 679 // then don't perform the AP permissions check, we stil do the HAP check 680 // below. 681 if (isStage2) { 682 abt = false; 683 } else { 684 switch (ap) { 685 case 0: 686 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n", 687 (int)sctlr.rs); 688 if (!sctlr.xp) { 689 switch ((int)sctlr.rs) { 690 case 2: 691 abt = is_write; 692 break; 693 case 1: 694 abt = is_write || !is_priv; 695 break; 696 case 0: 697 case 3: 698 default: 699 abt = true; 700 break; 701 } 702 } else { 703 abt = true; 704 } 705 break; 706 case 1: 707 abt = !is_priv; 708 break; 709 case 2: 710 abt = !is_priv && is_write; 711 isWritable = is_priv; 712 break; 713 case 3: 714 abt = false; 715 break; 716 case 4: 717 panic("UNPRED premissions\n"); 718 case 5: 719 abt = !is_priv || is_write; 720 isWritable = false; 721 break; 722 case 6: 723 case 7: 724 abt = is_write; 725 isWritable = false; 726 break; 727 default: 728 panic("Unknown permissions %#x\n", ap); 729 } 730 } 731 732 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1); 733 bool xn = te->xn || (isWritable && sctlr.wxn) || 734 (ap == 3 && sctlr.uwxn && is_priv); 735 if (is_fetch && (abt || xn || 736 (te->longDescFormat && te->pxn && !is_priv) || 737 (isSecure && te->ns && scr.sif))) { 738 permsFaults++; 739 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d " 740 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n", 741 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe); 742 return std::make_shared<PrefetchAbort>( 743 vaddr, 744 ArmFault::PermissionLL + te->lookupLevel, 745 isStage2, tranMethod); 746 } else if (abt | hapAbt) { 747 permsFaults++; 748 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d" 749 " write:%d\n", ap, is_priv, is_write); 750 return std::make_shared<DataAbort>( 751 vaddr, te->domain, is_write, 752 ArmFault::PermissionLL + te->lookupLevel, 753 isStage2 | !abt, tranMethod); 754 } 755 return NoFault; 756} 757 758 759Fault 760TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode, 761 ThreadContext *tc) 762{ 763 assert(aarch64); 764 765 Addr vaddr_tainted = req->getVaddr(); 766 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 767 768 uint32_t flags = req->getFlags(); 769 bool is_fetch = (mode == Execute); 770 bool is_write = (mode == Write); 771 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode); 772 773 updateMiscReg(tc, curTranType); 774 775 // If this is the second stage of translation and the request is for a 776 // stage 1 page table walk then we need to check the HCR.PTW bit. This 777 // allows us to generate a fault if the request targets an area marked 778 // as a device or strongly ordered. 779 if (isStage2 && req->isPTWalk() && hcr.ptw && 780 (te->mtype != TlbEntry::MemoryType::Normal)) { 781 return std::make_shared<DataAbort>( 782 vaddr_tainted, te->domain, is_write, 783 ArmFault::PermissionLL + te->lookupLevel, 784 isStage2, ArmFault::LpaeTran); 785 } 786 787 // Generate an alignment fault for unaligned accesses to device or 788 // strongly ordered memory 789 if (!is_fetch) { 790 if (te->mtype != TlbEntry::MemoryType::Normal) { 791 if (vaddr & mask(flags & AlignmentMask)) { 792 alignFaults++; 793 return std::make_shared<DataAbort>( 794 vaddr_tainted, 795 TlbEntry::DomainType::NoAccess, is_write, 796 ArmFault::AlignmentFault, isStage2, 797 ArmFault::LpaeTran); 798 } 799 } 800 } 801 802 if (te->nonCacheable) { 803 // Prevent prefetching from I/O devices. 804 if (req->isPrefetch()) { 805 // Here we can safely use the fault status for the short 806 // desc. format in all cases 807 return std::make_shared<PrefetchAbort>( 808 vaddr_tainted, 809 ArmFault::PrefetchUncacheable, 810 isStage2, ArmFault::LpaeTran); 811 } 812 } 813 814 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field 815 bool grant = false; 816 817 uint8_t xn = te->xn; 818 uint8_t pxn = te->pxn; 819 bool r = !is_write && !is_fetch; 820 bool w = is_write; 821 bool x = is_fetch; 822 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, " 823 "w:%d, x:%d\n", ap, xn, pxn, r, w, x); 824 825 if (isStage2) { 826 panic("Virtualization in AArch64 state is not supported yet"); 827 } else { 828 switch (aarch64EL) { 829 case EL0: 830 { 831 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 832 switch (perm) { 833 case 0: 834 case 1: 835 case 8: 836 case 9: 837 grant = x; 838 break; 839 case 4: 840 case 5: 841 grant = r || w || (x && !sctlr.wxn); 842 break; 843 case 6: 844 case 7: 845 grant = r || w; 846 break; 847 case 12: 848 case 13: 849 grant = r || x; 850 break; 851 case 14: 852 case 15: 853 grant = r; 854 break; 855 default: 856 grant = false; 857 } 858 } 859 break; 860 case EL1: 861 { 862 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 863 switch (perm) { 864 case 0: 865 case 2: 866 grant = r || w || (x && !sctlr.wxn); 867 break; 868 case 1: 869 case 3: 870 case 4: 871 case 5: 872 case 6: 873 case 7: 874 // regions that are writeable at EL0 should not be 875 // executable at EL1 876 grant = r || w; 877 break; 878 case 8: 879 case 10: 880 case 12: 881 case 14: 882 grant = r || x; 883 break; 884 case 9: 885 case 11: 886 case 13: 887 case 15: 888 grant = r; 889 break; 890 default: 891 grant = false; 892 } 893 } 894 break; 895 case EL2: 896 case EL3: 897 { 898 uint8_t perm = (ap & 0x2) | xn; 899 switch (perm) { 900 case 0: 901 grant = r || w || (x && !sctlr.wxn) ; 902 break; 903 case 1: 904 grant = r || w; 905 break; 906 case 2: 907 grant = r || x; 908 break; 909 case 3: 910 grant = r; 911 break; 912 default: 913 grant = false; 914 } 915 } 916 break; 917 } 918 } 919 920 if (!grant) { 921 if (is_fetch) { 922 permsFaults++; 923 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. " 924 "AP:%d priv:%d write:%d ns:%d sif:%d " 925 "sctlr.afe: %d\n", 926 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe); 927 // Use PC value instead of vaddr because vaddr might be aligned to 928 // cache line and should not be the address reported in FAR 929 return std::make_shared<PrefetchAbort>( 930 req->getPC(), 931 ArmFault::PermissionLL + te->lookupLevel, 932 isStage2, ArmFault::LpaeTran); 933 } else { 934 permsFaults++; 935 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d " 936 "priv:%d write:%d\n", ap, is_priv, is_write); 937 return std::make_shared<DataAbort>( 938 vaddr_tainted, te->domain, is_write, 939 ArmFault::PermissionLL + te->lookupLevel, 940 isStage2, ArmFault::LpaeTran); 941 } 942 } 943 944 return NoFault; 945} 946 947Fault 948TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode, 949 Translation *translation, bool &delay, bool timing, 950 TLB::ArmTranslationType tranType, bool functional) 951{ 952 // No such thing as a functional timing access 953 assert(!(timing && functional)); 954 955 updateMiscReg(tc, tranType); 956 957 Addr vaddr_tainted = req->getVaddr(); 958 Addr vaddr = 0; 959 if (aarch64) 960 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 961 else 962 vaddr = vaddr_tainted; 963 uint32_t flags = req->getFlags(); 964 965 bool is_fetch = (mode == Execute); 966 bool is_write = (mode == Write); 967 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae); 968 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran 969 : ArmFault::VmsaTran; 970 971 req->setAsid(asid); 972 973 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n", 974 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran); 975 976 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x " 977 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2, 978 scr, sctlr, flags, tranType); 979 980 if ((req->isInstFetch() && (!sctlr.i)) || 981 ((!req->isInstFetch()) && (!sctlr.c))){ 982 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 983 } 984 if (!is_fetch) { 985 assert(flags & MustBeOne); 986 if (sctlr.a || !(flags & AllowUnaligned)) { 987 if (vaddr & mask(flags & AlignmentMask)) { 988 alignFaults++; 989 return std::make_shared<DataAbort>( 990 vaddr_tainted, 991 TlbEntry::DomainType::NoAccess, is_write, 992 ArmFault::AlignmentFault, isStage2, 993 tranMethod); 994 } 995 } 996 } 997 998 // If guest MMU is off or hcr.vm=0 go straight to stage2 999 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) { 1000 1001 req->setPaddr(vaddr); 1002 // When the MMU is off the security attribute corresponds to the 1003 // security state of the processor 1004 if (isSecure) 1005 req->setFlags(Request::SECURE); 1006 1007 // @todo: double check this (ARM ARM issue C B3.2.1) 1008 if (long_desc_format || sctlr.tre == 0) { 1009 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 1010 } else { 1011 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2) 1012 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 1013 } 1014 1015 // Set memory attributes 1016 TlbEntry temp_te; 1017 temp_te.ns = !isSecure; 1018 if (isStage2 || hcr.dc == 0 || isSecure || 1019 (isHyp && !(tranType & S1CTran))) { 1020 1021 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal 1022 : TlbEntry::MemoryType::StronglyOrdered; 1023 temp_te.innerAttrs = 0x0; 1024 temp_te.outerAttrs = 0x0; 1025 temp_te.shareable = true; 1026 temp_te.outerShareable = true; 1027 } else { 1028 temp_te.mtype = TlbEntry::MemoryType::Normal; 1029 temp_te.innerAttrs = 0x3; 1030 temp_te.outerAttrs = 0x3; 1031 temp_te.shareable = false; 1032 temp_te.outerShareable = false; 1033 } 1034 temp_te.setAttributes(long_desc_format); 1035 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: " 1036 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n", 1037 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs, 1038 isStage2); 1039 setAttr(temp_te.attributes); 1040 1041 return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess); 1042 } 1043 1044 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n", 1045 isStage2 ? "IPA" : "VA", vaddr_tainted, asid); 1046 // Translation enabled 1047 1048 TlbEntry *te = NULL; 1049 TlbEntry mergeTe; 1050 Fault fault = getResultTe(&te, req, tc, mode, translation, timing, 1051 functional, &mergeTe); 1052 // only proceed if we have a valid table entry 1053 if ((te == NULL) && (fault == NoFault)) delay = true; 1054 1055 // If we have the table entry transfer some of the attributes to the 1056 // request that triggered the translation 1057 if (te != NULL) { 1058 // Set memory attributes 1059 DPRINTF(TLBVerbose, 1060 "Setting memory attributes: shareable: %d, innerAttrs: %d, " 1061 "outerAttrs: %d, mtype: %d, isStage2: %d\n", 1062 te->shareable, te->innerAttrs, te->outerAttrs, 1063 static_cast<uint8_t>(te->mtype), isStage2); 1064 setAttr(te->attributes); 1065 1066 if (te->nonCacheable) 1067 req->setFlags(Request::UNCACHEABLE); 1068 1069 // Require requests to be ordered if the request goes to 1070 // strongly ordered or device memory (i.e., anything other 1071 // than normal memory requires strict order). 1072 if (te->mtype != TlbEntry::MemoryType::Normal) 1073 req->setFlags(Request::STRICT_ORDER); 1074 1075 Addr pa = te->pAddr(vaddr); 1076 req->setPaddr(pa); 1077 1078 if (isSecure && !te->ns) { 1079 req->setFlags(Request::SECURE); 1080 } 1081 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) && 1082 (te->mtype != TlbEntry::MemoryType::Normal)) { 1083 // Unaligned accesses to Device memory should always cause an 1084 // abort regardless of sctlr.a 1085 alignFaults++; 1086 return std::make_shared<DataAbort>( 1087 vaddr_tainted, 1088 TlbEntry::DomainType::NoAccess, is_write, 1089 ArmFault::AlignmentFault, isStage2, 1090 tranMethod); 1091 } 1092 1093 // Check for a trickbox generated address fault 1094 if (fault == NoFault) { 1095 fault = trickBoxCheck(req, mode, te->domain); 1096 } 1097 } 1098 1099 // Generate Illegal Inst Set State fault if IL bit is set in CPSR 1100 if (fault == NoFault) { 1101 if (aarch64 && is_fetch && cpsr.il == 1) { 1102 return std::make_shared<IllegalInstSetStateFault>(); 1103 } 1104 } 1105 1106 return fault; 1107} 1108 1109Fault 1110TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode, 1111 TLB::ArmTranslationType tranType) 1112{ 1113 updateMiscReg(tc, tranType); 1114 1115 if (directToStage2) { 1116 assert(stage2Tlb); 1117 return stage2Tlb->translateAtomic(req, tc, mode, tranType); 1118 } 1119 1120 bool delay = false; 1121 Fault fault; 1122 if (FullSystem) 1123 fault = translateFs(req, tc, mode, NULL, delay, false, tranType); 1124 else 1125 fault = translateSe(req, tc, mode, NULL, delay, false); 1126 assert(!delay); 1127 return fault; 1128} 1129 1130Fault 1131TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode, 1132 TLB::ArmTranslationType tranType) 1133{ 1134 updateMiscReg(tc, tranType); 1135 1136 if (directToStage2) { 1137 assert(stage2Tlb); 1138 return stage2Tlb->translateFunctional(req, tc, mode, tranType); 1139 } 1140 1141 bool delay = false; 1142 Fault fault; 1143 if (FullSystem) 1144 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true); 1145 else 1146 fault = translateSe(req, tc, mode, NULL, delay, false); 1147 assert(!delay); 1148 return fault; 1149} 1150 1151Fault 1152TLB::translateTiming(RequestPtr req, ThreadContext *tc, 1153 Translation *translation, Mode mode, TLB::ArmTranslationType tranType) 1154{ 1155 updateMiscReg(tc, tranType); 1156 1157 if (directToStage2) { 1158 assert(stage2Tlb); 1159 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType); 1160 } 1161 1162 assert(translation); 1163 1164 return translateComplete(req, tc, translation, mode, tranType, isStage2); 1165} 1166 1167Fault 1168TLB::translateComplete(RequestPtr req, ThreadContext *tc, 1169 Translation *translation, Mode mode, TLB::ArmTranslationType tranType, 1170 bool callFromS2) 1171{ 1172 bool delay = false; 1173 Fault fault; 1174 if (FullSystem) 1175 fault = translateFs(req, tc, mode, translation, delay, true, tranType); 1176 else 1177 fault = translateSe(req, tc, mode, translation, delay, true); 1178 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault != 1179 NoFault); 1180 // If we have a translation, and we're not in the middle of doing a stage 1181 // 2 translation tell the translation that we've either finished or its 1182 // going to take a while. By not doing this when we're in the middle of a 1183 // stage 2 translation we prevent marking the translation as delayed twice, 1184 // one when the translation starts and again when the stage 1 translation 1185 // completes. 1186 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) { 1187 if (!delay) 1188 translation->finish(fault, req, tc, mode); 1189 else 1190 translation->markDelayed(); 1191 } 1192 return fault; 1193} 1194 1195BaseMasterPort* 1196TLB::getMasterPort() 1197{ 1198 return &stage2Mmu->getPort(); 1199} 1200 1201void 1202TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) 1203{ 1204 // check if the regs have changed, or the translation mode is different. 1205 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle 1206 // one type of translation anyway 1207 if (miscRegValid && ((tranType == curTranType) || isStage2)) { 1208 return; 1209 } 1210 1211 DPRINTF(TLBVerbose, "TLB variables changed!\n"); 1212 cpsr = tc->readMiscReg(MISCREG_CPSR); 1213 // Dependencies: SCR/SCR_EL3, CPSR 1214 isSecure = inSecureState(tc); 1215 isSecure &= (tranType & HypMode) == 0; 1216 isSecure &= (tranType & S1S2NsTran) == 0; 1217 aarch64 = !cpsr.width; 1218 if (aarch64) { // AArch64 1219 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el; 1220 switch (aarch64EL) { 1221 case EL0: 1222 case EL1: 1223 { 1224 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); 1225 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1); 1226 uint64_t ttbr_asid = ttbcr.a1 ? 1227 tc->readMiscReg(MISCREG_TTBR1_EL1) : 1228 tc->readMiscReg(MISCREG_TTBR0_EL1); 1229 asid = bits(ttbr_asid, 1230 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); 1231 } 1232 break; 1233 case EL2: 1234 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); 1235 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); 1236 asid = -1; 1237 break; 1238 case EL3: 1239 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3); 1240 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3); 1241 asid = -1; 1242 break; 1243 } 1244 scr = tc->readMiscReg(MISCREG_SCR_EL3); 1245 isPriv = aarch64EL != EL0; 1246 // @todo: modify this behaviour to support Virtualization in 1247 // AArch64 1248 vmid = 0; 1249 isHyp = false; 1250 directToStage2 = false; 1251 stage2Req = false; 1252 } else { // AArch32 1253 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc, 1254 !isSecure)); 1255 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc, 1256 !isSecure)); 1257 scr = tc->readMiscReg(MISCREG_SCR); 1258 isPriv = cpsr.mode != MODE_USER; 1259 if (haveLPAE && ttbcr.eae) { 1260 // Long-descriptor translation table format in use 1261 uint64_t ttbr_asid = tc->readMiscReg( 1262 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1 1263 : MISCREG_TTBR0, 1264 tc, !isSecure)); 1265 asid = bits(ttbr_asid, 55, 48); 1266 } else { 1267 // Short-descriptor translation table format in use 1268 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked( 1269 MISCREG_CONTEXTIDR, tc,!isSecure)); 1270 asid = context_id.asid; 1271 } 1272 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc, 1273 !isSecure)); 1274 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc, 1275 !isSecure)); 1276 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc, 1277 !isSecure)); 1278 hcr = tc->readMiscReg(MISCREG_HCR); 1279 1280 if (haveVirtualization) { 1281 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); 1282 isHyp = cpsr.mode == MODE_HYP; 1283 isHyp |= tranType & HypMode; 1284 isHyp &= (tranType & S1S2NsTran) == 0; 1285 isHyp &= (tranType & S1CTran) == 0; 1286 if (isHyp) { 1287 sctlr = tc->readMiscReg(MISCREG_HSCTLR); 1288 } 1289 // Work out if we should skip the first stage of translation and go 1290 // directly to stage 2. This value is cached so we don't have to 1291 // compute it for every translation. 1292 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure && 1293 !(tranType & S1CTran); 1294 directToStage2 = stage2Req && !sctlr.m; 1295 } else { 1296 vmid = 0; 1297 stage2Req = false; 1298 isHyp = false; 1299 directToStage2 = false; 1300 } 1301 } 1302 miscRegValid = true; 1303 curTranType = tranType; 1304} 1305 1306Fault 1307TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1308 Translation *translation, bool timing, bool functional, 1309 bool is_secure, TLB::ArmTranslationType tranType) 1310{ 1311 bool is_fetch = (mode == Execute); 1312 bool is_write = (mode == Write); 1313 1314 Addr vaddr_tainted = req->getVaddr(); 1315 Addr vaddr = 0; 1316 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1; 1317 if (aarch64) { 1318 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr); 1319 } else { 1320 vaddr = vaddr_tainted; 1321 } 1322 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1323 if (*te == NULL) { 1324 if (req->isPrefetch()) { 1325 // if the request is a prefetch don't attempt to fill the TLB or go 1326 // any further with the memory access (here we can safely use the 1327 // fault status for the short desc. format in all cases) 1328 prefetchFaults++; 1329 return std::make_shared<PrefetchAbort>( 1330 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2); 1331 } 1332 1333 if (is_fetch) 1334 instMisses++; 1335 else if (is_write) 1336 writeMisses++; 1337 else 1338 readMisses++; 1339 1340 // start translation table walk, pass variables rather than 1341 // re-retreaving in table walker for speed 1342 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n", 1343 vaddr_tainted, asid, vmid); 1344 Fault fault; 1345 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode, 1346 translation, timing, functional, is_secure, 1347 tranType); 1348 // for timing mode, return and wait for table walk, 1349 if (timing || fault != NoFault) { 1350 return fault; 1351 } 1352 1353 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1354 if (!*te) 1355 printTlb(); 1356 assert(*te); 1357 } else { 1358 if (is_fetch) 1359 instHits++; 1360 else if (is_write) 1361 writeHits++; 1362 else 1363 readHits++; 1364 } 1365 return NoFault; 1366} 1367 1368Fault 1369TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1370 Translation *translation, bool timing, bool functional, 1371 TlbEntry *mergeTe) 1372{ 1373 Fault fault; 1374 TlbEntry *s1Te = NULL; 1375 1376 Addr vaddr_tainted = req->getVaddr(); 1377 1378 // Get the stage 1 table entry 1379 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional, 1380 isSecure, curTranType); 1381 // only proceed if we have a valid table entry 1382 if ((s1Te != NULL) && (fault == NoFault)) { 1383 // Check stage 1 permissions before checking stage 2 1384 if (aarch64) 1385 fault = checkPermissions64(s1Te, req, mode, tc); 1386 else 1387 fault = checkPermissions(s1Te, req, mode); 1388 if (stage2Req & (fault == NoFault)) { 1389 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te, 1390 req, translation, mode, timing, functional, curTranType); 1391 fault = s2Lookup->getTe(tc, mergeTe); 1392 if (s2Lookup->isComplete()) { 1393 *te = mergeTe; 1394 // We've finished with the lookup so delete it 1395 delete s2Lookup; 1396 } else { 1397 // The lookup hasn't completed, so we can't delete it now. We 1398 // get round this by asking the object to self delete when the 1399 // translation is complete. 1400 s2Lookup->setSelfDelete(); 1401 } 1402 } else { 1403 // This case deals with an S1 hit (or bypass), followed by 1404 // an S2 hit-but-perms issue 1405 if (isStage2) { 1406 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n", 1407 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault); 1408 if (fault != NoFault) { 1409 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get()); 1410 armFault->annotate(ArmFault::S1PTW, false); 1411 armFault->annotate(ArmFault::OVA, vaddr_tainted); 1412 } 1413 } 1414 *te = s1Te; 1415 } 1416 } 1417 return fault; 1418} 1419 1420ArmISA::TLB * 1421ArmTLBParams::create() 1422{ 1423 return new ArmISA::TLB(this); 1424} 1425