tlb.cc revision 10873:7c972b9aea16
1/* 2 * Copyright (c) 2010-2013 ARM Limited 3 * All rights reserved 4 * 5 * The license below extends only to copyright in the software and shall 6 * not be construed as granting a license to any other intellectual 7 * property including but not limited to intellectual property relating 8 * to a hardware implementation of the functionality of the software 9 * licensed hereunder. You may use the software subject to the license 10 * terms below provided that you ensure that this notice is replicated 11 * unmodified and in its entirety in all distributions of the software, 12 * modified or unmodified, in source code or in binary form. 13 * 14 * Copyright (c) 2001-2005 The Regents of The University of Michigan 15 * All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are 19 * met: redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer; 21 * redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution; 24 * neither the name of the copyright holders nor the names of its 25 * contributors may be used to endorse or promote products derived from 26 * this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Authors: Ali Saidi 41 * Nathan Binkert 42 * Steve Reinhardt 43 */ 44 45#include "arch/arm/tlb.hh" 46 47#include <memory> 48#include <string> 49#include <vector> 50 51#include "arch/arm/faults.hh" 52#include "arch/arm/pagetable.hh" 53#include "arch/arm/system.hh" 54#include "arch/arm/table_walker.hh" 55#include "arch/arm/stage2_lookup.hh" 56#include "arch/arm/stage2_mmu.hh" 57#include "arch/arm/utility.hh" 58#include "base/inifile.hh" 59#include "base/str.hh" 60#include "base/trace.hh" 61#include "cpu/base.hh" 62#include "cpu/thread_context.hh" 63#include "debug/Checkpoint.hh" 64#include "debug/TLB.hh" 65#include "debug/TLBVerbose.hh" 66#include "mem/page_table.hh" 67#include "params/ArmTLB.hh" 68#include "sim/full_system.hh" 69#include "sim/process.hh" 70 71using namespace std; 72using namespace ArmISA; 73 74TLB::TLB(const ArmTLBParams *p) 75 : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size), 76 isStage2(p->is_stage2), stage2Req(false), _attr(0), 77 directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL), 78 stage2Mmu(NULL), rangeMRU(1), 79 aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false), 80 isHyp(false), asid(0), vmid(0), dacr(0), 81 miscRegValid(false), curTranType(NormalTran) 82{ 83 tableWalker->setTlb(this); 84 85 // Cache system-level properties 86 haveLPAE = tableWalker->haveLPAE(); 87 haveVirtualization = tableWalker->haveVirtualization(); 88 haveLargeAsid64 = tableWalker->haveLargeAsid64(); 89} 90 91TLB::~TLB() 92{ 93 delete[] table; 94} 95 96void 97TLB::init() 98{ 99 if (stage2Mmu && !isStage2) 100 stage2Tlb = stage2Mmu->stage2Tlb(); 101} 102 103void 104TLB::setMMU(Stage2MMU *m, MasterID master_id) 105{ 106 stage2Mmu = m; 107 tableWalker->setMMU(m, master_id); 108} 109 110bool 111TLB::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) 112{ 113 updateMiscReg(tc); 114 115 if (directToStage2) { 116 assert(stage2Tlb); 117 return stage2Tlb->translateFunctional(tc, va, pa); 118 } 119 120 TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false, 121 aarch64 ? aarch64EL : EL1); 122 if (!e) 123 return false; 124 pa = e->pAddr(va); 125 return true; 126} 127 128Fault 129TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const 130{ 131 return NoFault; 132} 133 134TlbEntry* 135TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure, 136 bool functional, bool ignore_asn, uint8_t target_el) 137{ 138 139 TlbEntry *retval = NULL; 140 141 // Maintaining LRU array 142 int x = 0; 143 while (retval == NULL && x < size) { 144 if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false, 145 target_el)) || 146 (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) { 147 // We only move the hit entry ahead when the position is higher 148 // than rangeMRU 149 if (x > rangeMRU && !functional) { 150 TlbEntry tmp_entry = table[x]; 151 for(int i = x; i > 0; i--) 152 table[i] = table[i - 1]; 153 table[0] = tmp_entry; 154 retval = &table[0]; 155 } else { 156 retval = &table[x]; 157 } 158 break; 159 } 160 ++x; 161 } 162 163 DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d " 164 "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d " 165 "el: %d\n", 166 va, asn, retval ? "hit" : "miss", vmid, hyp, secure, 167 retval ? retval->pfn : 0, retval ? retval->size : 0, 168 retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0, 169 retval ? retval->ns : 0, retval ? retval->nstid : 0, 170 retval ? retval->global : 0, retval ? retval->asid : 0, 171 retval ? retval->el : 0); 172 173 return retval; 174} 175 176// insert a new TLB entry 177void 178TLB::insert(Addr addr, TlbEntry &entry) 179{ 180 DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x" 181 " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d" 182 " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn, 183 entry.size, entry.vpn, entry.asid, entry.vmid, entry.N, 184 entry.global, entry.valid, entry.nonCacheable, entry.xn, 185 entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid, 186 entry.isHyp); 187 188 if (table[size - 1].valid) 189 DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x " 190 "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n", 191 table[size-1].vpn << table[size-1].N, table[size-1].asid, 192 table[size-1].vmid, table[size-1].pfn << table[size-1].N, 193 table[size-1].size, table[size-1].ap, table[size-1].ns, 194 table[size-1].nstid, table[size-1].global, table[size-1].isHyp, 195 table[size-1].el); 196 197 //inserting to MRU position and evicting the LRU one 198 199 for (int i = size - 1; i > 0; --i) 200 table[i] = table[i-1]; 201 table[0] = entry; 202 203 inserts++; 204 ppRefills->notify(1); 205} 206 207void 208TLB::printTlb() const 209{ 210 int x = 0; 211 TlbEntry *te; 212 DPRINTF(TLB, "Current TLB contents:\n"); 213 while (x < size) { 214 te = &table[x]; 215 if (te->valid) 216 DPRINTF(TLB, " * %s\n", te->print()); 217 ++x; 218 } 219} 220 221void 222TLB::flushAllSecurity(bool secure_lookup, uint8_t target_el, bool ignore_el) 223{ 224 DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n", 225 (secure_lookup ? "secure" : "non-secure")); 226 int x = 0; 227 TlbEntry *te; 228 while (x < size) { 229 te = &table[x]; 230 if (te->valid && secure_lookup == !te->nstid && 231 (te->vmid == vmid || secure_lookup) && 232 checkELMatch(target_el, te->el, ignore_el)) { 233 234 DPRINTF(TLB, " - %s\n", te->print()); 235 te->valid = false; 236 flushedEntries++; 237 } 238 ++x; 239 } 240 241 flushTlb++; 242 243 // If there's a second stage TLB (and we're not it) then flush it as well 244 // if we're currently in hyp mode 245 if (!isStage2 && isHyp) { 246 stage2Tlb->flushAllSecurity(secure_lookup, true); 247 } 248} 249 250void 251TLB::flushAllNs(bool hyp, uint8_t target_el, bool ignore_el) 252{ 253 DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n", 254 (hyp ? "hyp" : "non-hyp")); 255 int x = 0; 256 TlbEntry *te; 257 while (x < size) { 258 te = &table[x]; 259 if (te->valid && te->nstid && te->isHyp == hyp && 260 checkELMatch(target_el, te->el, ignore_el)) { 261 262 DPRINTF(TLB, " - %s\n", te->print()); 263 flushedEntries++; 264 te->valid = false; 265 } 266 ++x; 267 } 268 269 flushTlb++; 270 271 // If there's a second stage TLB (and we're not it) then flush it as well 272 if (!isStage2 && !hyp) { 273 stage2Tlb->flushAllNs(false, true); 274 } 275} 276 277void 278TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, uint8_t target_el) 279{ 280 DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x " 281 "(%s lookup)\n", mva, asn, (secure_lookup ? 282 "secure" : "non-secure")); 283 _flushMva(mva, asn, secure_lookup, false, false, target_el); 284 flushTlbMvaAsid++; 285} 286 287void 288TLB::flushAsid(uint64_t asn, bool secure_lookup, uint8_t target_el) 289{ 290 DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn, 291 (secure_lookup ? "secure" : "non-secure")); 292 293 int x = 0 ; 294 TlbEntry *te; 295 296 while (x < size) { 297 te = &table[x]; 298 if (te->valid && te->asid == asn && secure_lookup == !te->nstid && 299 (te->vmid == vmid || secure_lookup) && 300 checkELMatch(target_el, te->el, false)) { 301 302 te->valid = false; 303 DPRINTF(TLB, " - %s\n", te->print()); 304 flushedEntries++; 305 } 306 ++x; 307 } 308 flushTlbAsid++; 309} 310 311void 312TLB::flushMva(Addr mva, bool secure_lookup, bool hyp, uint8_t target_el) 313{ 314 DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva, 315 (secure_lookup ? "secure" : "non-secure")); 316 _flushMva(mva, 0xbeef, secure_lookup, hyp, true, target_el); 317 flushTlbMva++; 318} 319 320void 321TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool hyp, 322 bool ignore_asn, uint8_t target_el) 323{ 324 TlbEntry *te; 325 // D5.7.2: Sign-extend address to 64 bits 326 mva = sext<56>(mva); 327 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 328 target_el); 329 while (te != NULL) { 330 if (secure_lookup == !te->nstid) { 331 DPRINTF(TLB, " - %s\n", te->print()); 332 te->valid = false; 333 flushedEntries++; 334 } 335 te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn, 336 target_el); 337 } 338} 339 340bool 341TLB::checkELMatch(uint8_t target_el, uint8_t tentry_el, bool ignore_el) 342{ 343 bool elMatch = true; 344 if (!ignore_el) { 345 if (target_el == 2 || target_el == 3) { 346 elMatch = (tentry_el == target_el); 347 } else { 348 elMatch = (tentry_el == 0) || (tentry_el == 1); 349 } 350 } 351 return elMatch; 352} 353 354void 355TLB::drainResume() 356{ 357 // We might have unserialized something or switched CPUs, so make 358 // sure to re-read the misc regs. 359 miscRegValid = false; 360} 361 362void 363TLB::takeOverFrom(BaseTLB *_otlb) 364{ 365 TLB *otlb = dynamic_cast<TLB*>(_otlb); 366 /* Make sure we actually have a valid type */ 367 if (otlb) { 368 _attr = otlb->_attr; 369 haveLPAE = otlb->haveLPAE; 370 directToStage2 = otlb->directToStage2; 371 stage2Req = otlb->stage2Req; 372 373 /* Sync the stage2 MMU if they exist in both 374 * the old CPU and the new 375 */ 376 if (!isStage2 && 377 stage2Tlb && otlb->stage2Tlb) { 378 stage2Tlb->takeOverFrom(otlb->stage2Tlb); 379 } 380 } else { 381 panic("Incompatible TLB type!"); 382 } 383} 384 385void 386TLB::serialize(ostream &os) 387{ 388 DPRINTF(Checkpoint, "Serializing Arm TLB\n"); 389 390 SERIALIZE_SCALAR(_attr); 391 SERIALIZE_SCALAR(haveLPAE); 392 SERIALIZE_SCALAR(directToStage2); 393 SERIALIZE_SCALAR(stage2Req); 394 395 int num_entries = size; 396 SERIALIZE_SCALAR(num_entries); 397 for(int i = 0; i < size; i++){ 398 nameOut(os, csprintf("%s.TlbEntry%d", name(), i)); 399 table[i].serialize(os); 400 } 401} 402 403void 404TLB::unserialize(Checkpoint *cp, const string §ion) 405{ 406 DPRINTF(Checkpoint, "Unserializing Arm TLB\n"); 407 408 UNSERIALIZE_SCALAR(_attr); 409 UNSERIALIZE_SCALAR(haveLPAE); 410 UNSERIALIZE_SCALAR(directToStage2); 411 UNSERIALIZE_SCALAR(stage2Req); 412 413 int num_entries; 414 UNSERIALIZE_SCALAR(num_entries); 415 for(int i = 0; i < min(size, num_entries); i++){ 416 table[i].unserialize(cp, csprintf("%s.TlbEntry%d", section, i)); 417 } 418} 419 420void 421TLB::regStats() 422{ 423 instHits 424 .name(name() + ".inst_hits") 425 .desc("ITB inst hits") 426 ; 427 428 instMisses 429 .name(name() + ".inst_misses") 430 .desc("ITB inst misses") 431 ; 432 433 instAccesses 434 .name(name() + ".inst_accesses") 435 .desc("ITB inst accesses") 436 ; 437 438 readHits 439 .name(name() + ".read_hits") 440 .desc("DTB read hits") 441 ; 442 443 readMisses 444 .name(name() + ".read_misses") 445 .desc("DTB read misses") 446 ; 447 448 readAccesses 449 .name(name() + ".read_accesses") 450 .desc("DTB read accesses") 451 ; 452 453 writeHits 454 .name(name() + ".write_hits") 455 .desc("DTB write hits") 456 ; 457 458 writeMisses 459 .name(name() + ".write_misses") 460 .desc("DTB write misses") 461 ; 462 463 writeAccesses 464 .name(name() + ".write_accesses") 465 .desc("DTB write accesses") 466 ; 467 468 hits 469 .name(name() + ".hits") 470 .desc("DTB hits") 471 ; 472 473 misses 474 .name(name() + ".misses") 475 .desc("DTB misses") 476 ; 477 478 accesses 479 .name(name() + ".accesses") 480 .desc("DTB accesses") 481 ; 482 483 flushTlb 484 .name(name() + ".flush_tlb") 485 .desc("Number of times complete TLB was flushed") 486 ; 487 488 flushTlbMva 489 .name(name() + ".flush_tlb_mva") 490 .desc("Number of times TLB was flushed by MVA") 491 ; 492 493 flushTlbMvaAsid 494 .name(name() + ".flush_tlb_mva_asid") 495 .desc("Number of times TLB was flushed by MVA & ASID") 496 ; 497 498 flushTlbAsid 499 .name(name() + ".flush_tlb_asid") 500 .desc("Number of times TLB was flushed by ASID") 501 ; 502 503 flushedEntries 504 .name(name() + ".flush_entries") 505 .desc("Number of entries that have been flushed from TLB") 506 ; 507 508 alignFaults 509 .name(name() + ".align_faults") 510 .desc("Number of TLB faults due to alignment restrictions") 511 ; 512 513 prefetchFaults 514 .name(name() + ".prefetch_faults") 515 .desc("Number of TLB faults due to prefetch") 516 ; 517 518 domainFaults 519 .name(name() + ".domain_faults") 520 .desc("Number of TLB faults due to domain restrictions") 521 ; 522 523 permsFaults 524 .name(name() + ".perms_faults") 525 .desc("Number of TLB faults due to permissions restrictions") 526 ; 527 528 instAccesses = instHits + instMisses; 529 readAccesses = readHits + readMisses; 530 writeAccesses = writeHits + writeMisses; 531 hits = readHits + writeHits + instHits; 532 misses = readMisses + writeMisses + instMisses; 533 accesses = readAccesses + writeAccesses + instAccesses; 534} 535 536void 537TLB::regProbePoints() 538{ 539 ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills")); 540} 541 542Fault 543TLB::translateSe(RequestPtr req, ThreadContext *tc, Mode mode, 544 Translation *translation, bool &delay, bool timing) 545{ 546 updateMiscReg(tc); 547 Addr vaddr_tainted = req->getVaddr(); 548 Addr vaddr = 0; 549 if (aarch64) 550 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 551 else 552 vaddr = vaddr_tainted; 553 uint32_t flags = req->getFlags(); 554 555 bool is_fetch = (mode == Execute); 556 bool is_write = (mode == Write); 557 558 if (!is_fetch) { 559 assert(flags & MustBeOne); 560 if (sctlr.a || !(flags & AllowUnaligned)) { 561 if (vaddr & mask(flags & AlignmentMask)) { 562 // LPAE is always disabled in SE mode 563 return std::make_shared<DataAbort>( 564 vaddr_tainted, 565 TlbEntry::DomainType::NoAccess, is_write, 566 ArmFault::AlignmentFault, isStage2, 567 ArmFault::VmsaTran); 568 } 569 } 570 } 571 572 Addr paddr; 573 Process *p = tc->getProcessPtr(); 574 575 if (!p->pTable->translate(vaddr, paddr)) 576 return std::make_shared<GenericPageTableFault>(vaddr_tainted); 577 req->setPaddr(paddr); 578 579 return NoFault; 580} 581 582Fault 583TLB::trickBoxCheck(RequestPtr req, Mode mode, TlbEntry::DomainType domain) 584{ 585 return NoFault; 586} 587 588Fault 589TLB::walkTrickBoxCheck(Addr pa, bool is_secure, Addr va, Addr sz, bool is_exec, 590 bool is_write, TlbEntry::DomainType domain, LookupLevel lookup_level) 591{ 592 return NoFault; 593} 594 595Fault 596TLB::checkPermissions(TlbEntry *te, RequestPtr req, Mode mode) 597{ 598 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify 599 uint32_t flags = req->getFlags(); 600 bool is_fetch = (mode == Execute); 601 bool is_write = (mode == Write); 602 bool is_priv = isPriv && !(flags & UserMode); 603 604 // Get the translation type from the actuall table entry 605 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran 606 : ArmFault::VmsaTran; 607 608 // If this is the second stage of translation and the request is for a 609 // stage 1 page table walk then we need to check the HCR.PTW bit. This 610 // allows us to generate a fault if the request targets an area marked 611 // as a device or strongly ordered. 612 if (isStage2 && req->isPTWalk() && hcr.ptw && 613 (te->mtype != TlbEntry::MemoryType::Normal)) { 614 return std::make_shared<DataAbort>( 615 vaddr, te->domain, is_write, 616 ArmFault::PermissionLL + te->lookupLevel, 617 isStage2, tranMethod); 618 } 619 620 // Generate an alignment fault for unaligned data accesses to device or 621 // strongly ordered memory 622 if (!is_fetch) { 623 if (te->mtype != TlbEntry::MemoryType::Normal) { 624 if (vaddr & mask(flags & AlignmentMask)) { 625 alignFaults++; 626 return std::make_shared<DataAbort>( 627 vaddr, TlbEntry::DomainType::NoAccess, is_write, 628 ArmFault::AlignmentFault, isStage2, 629 tranMethod); 630 } 631 } 632 } 633 634 if (te->nonCacheable) { 635 // Prevent prefetching from I/O devices. 636 if (req->isPrefetch()) { 637 // Here we can safely use the fault status for the short 638 // desc. format in all cases 639 return std::make_shared<PrefetchAbort>( 640 vaddr, ArmFault::PrefetchUncacheable, 641 isStage2, tranMethod); 642 } 643 } 644 645 if (!te->longDescFormat) { 646 switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) { 647 case 0: 648 domainFaults++; 649 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x" 650 " domain: %#x write:%d\n", dacr, 651 static_cast<uint8_t>(te->domain), is_write); 652 if (is_fetch) 653 return std::make_shared<PrefetchAbort>( 654 vaddr, 655 ArmFault::DomainLL + te->lookupLevel, 656 isStage2, tranMethod); 657 else 658 return std::make_shared<DataAbort>( 659 vaddr, te->domain, is_write, 660 ArmFault::DomainLL + te->lookupLevel, 661 isStage2, tranMethod); 662 case 1: 663 // Continue with permissions check 664 break; 665 case 2: 666 panic("UNPRED domain\n"); 667 case 3: 668 return NoFault; 669 } 670 } 671 672 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits 673 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap; 674 uint8_t hap = te->hap; 675 676 if (sctlr.afe == 1 || te->longDescFormat) 677 ap |= 1; 678 679 bool abt; 680 bool isWritable = true; 681 // If this is a stage 2 access (eg for reading stage 1 page table entries) 682 // then don't perform the AP permissions check, we stil do the HAP check 683 // below. 684 if (isStage2) { 685 abt = false; 686 } else { 687 switch (ap) { 688 case 0: 689 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n", 690 (int)sctlr.rs); 691 if (!sctlr.xp) { 692 switch ((int)sctlr.rs) { 693 case 2: 694 abt = is_write; 695 break; 696 case 1: 697 abt = is_write || !is_priv; 698 break; 699 case 0: 700 case 3: 701 default: 702 abt = true; 703 break; 704 } 705 } else { 706 abt = true; 707 } 708 break; 709 case 1: 710 abt = !is_priv; 711 break; 712 case 2: 713 abt = !is_priv && is_write; 714 isWritable = is_priv; 715 break; 716 case 3: 717 abt = false; 718 break; 719 case 4: 720 panic("UNPRED premissions\n"); 721 case 5: 722 abt = !is_priv || is_write; 723 isWritable = false; 724 break; 725 case 6: 726 case 7: 727 abt = is_write; 728 isWritable = false; 729 break; 730 default: 731 panic("Unknown permissions %#x\n", ap); 732 } 733 } 734 735 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1); 736 bool xn = te->xn || (isWritable && sctlr.wxn) || 737 (ap == 3 && sctlr.uwxn && is_priv); 738 if (is_fetch && (abt || xn || 739 (te->longDescFormat && te->pxn && !is_priv) || 740 (isSecure && te->ns && scr.sif))) { 741 permsFaults++; 742 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d " 743 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n", 744 ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe); 745 return std::make_shared<PrefetchAbort>( 746 vaddr, 747 ArmFault::PermissionLL + te->lookupLevel, 748 isStage2, tranMethod); 749 } else if (abt | hapAbt) { 750 permsFaults++; 751 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d" 752 " write:%d\n", ap, is_priv, is_write); 753 return std::make_shared<DataAbort>( 754 vaddr, te->domain, is_write, 755 ArmFault::PermissionLL + te->lookupLevel, 756 isStage2 | !abt, tranMethod); 757 } 758 return NoFault; 759} 760 761 762Fault 763TLB::checkPermissions64(TlbEntry *te, RequestPtr req, Mode mode, 764 ThreadContext *tc) 765{ 766 assert(aarch64); 767 768 Addr vaddr_tainted = req->getVaddr(); 769 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 770 771 uint32_t flags = req->getFlags(); 772 bool is_fetch = (mode == Execute); 773 bool is_write = (mode == Write); 774 bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode); 775 776 updateMiscReg(tc, curTranType); 777 778 // If this is the second stage of translation and the request is for a 779 // stage 1 page table walk then we need to check the HCR.PTW bit. This 780 // allows us to generate a fault if the request targets an area marked 781 // as a device or strongly ordered. 782 if (isStage2 && req->isPTWalk() && hcr.ptw && 783 (te->mtype != TlbEntry::MemoryType::Normal)) { 784 return std::make_shared<DataAbort>( 785 vaddr_tainted, te->domain, is_write, 786 ArmFault::PermissionLL + te->lookupLevel, 787 isStage2, ArmFault::LpaeTran); 788 } 789 790 // Generate an alignment fault for unaligned accesses to device or 791 // strongly ordered memory 792 if (!is_fetch) { 793 if (te->mtype != TlbEntry::MemoryType::Normal) { 794 if (vaddr & mask(flags & AlignmentMask)) { 795 alignFaults++; 796 return std::make_shared<DataAbort>( 797 vaddr_tainted, 798 TlbEntry::DomainType::NoAccess, is_write, 799 ArmFault::AlignmentFault, isStage2, 800 ArmFault::LpaeTran); 801 } 802 } 803 } 804 805 if (te->nonCacheable) { 806 // Prevent prefetching from I/O devices. 807 if (req->isPrefetch()) { 808 // Here we can safely use the fault status for the short 809 // desc. format in all cases 810 return std::make_shared<PrefetchAbort>( 811 vaddr_tainted, 812 ArmFault::PrefetchUncacheable, 813 isStage2, ArmFault::LpaeTran); 814 } 815 } 816 817 uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field 818 bool grant = false; 819 820 uint8_t xn = te->xn; 821 uint8_t pxn = te->pxn; 822 bool r = !is_write && !is_fetch; 823 bool w = is_write; 824 bool x = is_fetch; 825 DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, " 826 "w:%d, x:%d\n", ap, xn, pxn, r, w, x); 827 828 if (isStage2) { 829 panic("Virtualization in AArch64 state is not supported yet"); 830 } else { 831 switch (aarch64EL) { 832 case EL0: 833 { 834 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 835 switch (perm) { 836 case 0: 837 case 1: 838 case 8: 839 case 9: 840 grant = x; 841 break; 842 case 4: 843 case 5: 844 grant = r || w || (x && !sctlr.wxn); 845 break; 846 case 6: 847 case 7: 848 grant = r || w; 849 break; 850 case 12: 851 case 13: 852 grant = r || x; 853 break; 854 case 14: 855 case 15: 856 grant = r; 857 break; 858 default: 859 grant = false; 860 } 861 } 862 break; 863 case EL1: 864 { 865 uint8_t perm = (ap << 2) | (xn << 1) | pxn; 866 switch (perm) { 867 case 0: 868 case 2: 869 grant = r || w || (x && !sctlr.wxn); 870 break; 871 case 1: 872 case 3: 873 case 4: 874 case 5: 875 case 6: 876 case 7: 877 // regions that are writeable at EL0 should not be 878 // executable at EL1 879 grant = r || w; 880 break; 881 case 8: 882 case 10: 883 case 12: 884 case 14: 885 grant = r || x; 886 break; 887 case 9: 888 case 11: 889 case 13: 890 case 15: 891 grant = r; 892 break; 893 default: 894 grant = false; 895 } 896 } 897 break; 898 case EL2: 899 case EL3: 900 { 901 uint8_t perm = (ap & 0x2) | xn; 902 switch (perm) { 903 case 0: 904 grant = r || w || (x && !sctlr.wxn) ; 905 break; 906 case 1: 907 grant = r || w; 908 break; 909 case 2: 910 grant = r || x; 911 break; 912 case 3: 913 grant = r; 914 break; 915 default: 916 grant = false; 917 } 918 } 919 break; 920 } 921 } 922 923 if (!grant) { 924 if (is_fetch) { 925 permsFaults++; 926 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. " 927 "AP:%d priv:%d write:%d ns:%d sif:%d " 928 "sctlr.afe: %d\n", 929 ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe); 930 // Use PC value instead of vaddr because vaddr might be aligned to 931 // cache line and should not be the address reported in FAR 932 return std::make_shared<PrefetchAbort>( 933 req->getPC(), 934 ArmFault::PermissionLL + te->lookupLevel, 935 isStage2, ArmFault::LpaeTran); 936 } else { 937 permsFaults++; 938 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d " 939 "priv:%d write:%d\n", ap, is_priv, is_write); 940 return std::make_shared<DataAbort>( 941 vaddr_tainted, te->domain, is_write, 942 ArmFault::PermissionLL + te->lookupLevel, 943 isStage2, ArmFault::LpaeTran); 944 } 945 } 946 947 return NoFault; 948} 949 950Fault 951TLB::translateFs(RequestPtr req, ThreadContext *tc, Mode mode, 952 Translation *translation, bool &delay, bool timing, 953 TLB::ArmTranslationType tranType, bool functional) 954{ 955 // No such thing as a functional timing access 956 assert(!(timing && functional)); 957 958 updateMiscReg(tc, tranType); 959 960 Addr vaddr_tainted = req->getVaddr(); 961 Addr vaddr = 0; 962 if (aarch64) 963 vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, ttbcr); 964 else 965 vaddr = vaddr_tainted; 966 uint32_t flags = req->getFlags(); 967 968 bool is_fetch = (mode == Execute); 969 bool is_write = (mode == Write); 970 bool long_desc_format = aarch64 || (haveLPAE && ttbcr.eae); 971 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran 972 : ArmFault::VmsaTran; 973 974 req->setAsid(asid); 975 976 DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n", 977 isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran); 978 979 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x " 980 "flags %#x tranType 0x%x\n", vaddr_tainted, mode, isStage2, 981 scr, sctlr, flags, tranType); 982 983 // If this is a clrex instruction, provide a PA of 0 with no fault 984 // This will force the monitor to set the tracked address to 0 985 // a bit of a hack but this effectively clrears this processors monitor 986 if (flags & Request::CLEAR_LL){ 987 // @todo: check implications of security extensions 988 req->setPaddr(0); 989 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 990 req->setFlags(Request::CLEAR_LL); 991 return NoFault; 992 } 993 if ((req->isInstFetch() && (!sctlr.i)) || 994 ((!req->isInstFetch()) && (!sctlr.c))){ 995 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 996 } 997 if (!is_fetch) { 998 assert(flags & MustBeOne); 999 if (sctlr.a || !(flags & AllowUnaligned)) { 1000 if (vaddr & mask(flags & AlignmentMask)) { 1001 alignFaults++; 1002 return std::make_shared<DataAbort>( 1003 vaddr_tainted, 1004 TlbEntry::DomainType::NoAccess, is_write, 1005 ArmFault::AlignmentFault, isStage2, 1006 tranMethod); 1007 } 1008 } 1009 } 1010 1011 // If guest MMU is off or hcr.vm=0 go straight to stage2 1012 if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) { 1013 1014 req->setPaddr(vaddr); 1015 // When the MMU is off the security attribute corresponds to the 1016 // security state of the processor 1017 if (isSecure) 1018 req->setFlags(Request::SECURE); 1019 1020 // @todo: double check this (ARM ARM issue C B3.2.1) 1021 if (long_desc_format || sctlr.tre == 0) { 1022 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 1023 } else { 1024 if (nmrr.ir0 == 0 || nmrr.or0 == 0 || prrr.tr0 != 0x2) 1025 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 1026 } 1027 1028 // Set memory attributes 1029 TlbEntry temp_te; 1030 temp_te.ns = !isSecure; 1031 if (isStage2 || hcr.dc == 0 || isSecure || 1032 (isHyp && !(tranType & S1CTran))) { 1033 1034 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal 1035 : TlbEntry::MemoryType::StronglyOrdered; 1036 temp_te.innerAttrs = 0x0; 1037 temp_te.outerAttrs = 0x0; 1038 temp_te.shareable = true; 1039 temp_te.outerShareable = true; 1040 } else { 1041 temp_te.mtype = TlbEntry::MemoryType::Normal; 1042 temp_te.innerAttrs = 0x3; 1043 temp_te.outerAttrs = 0x3; 1044 temp_te.shareable = false; 1045 temp_te.outerShareable = false; 1046 } 1047 temp_te.setAttributes(long_desc_format); 1048 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: " 1049 "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n", 1050 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs, 1051 isStage2); 1052 setAttr(temp_te.attributes); 1053 1054 return trickBoxCheck(req, mode, TlbEntry::DomainType::NoAccess); 1055 } 1056 1057 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n", 1058 isStage2 ? "IPA" : "VA", vaddr_tainted, asid); 1059 // Translation enabled 1060 1061 TlbEntry *te = NULL; 1062 TlbEntry mergeTe; 1063 Fault fault = getResultTe(&te, req, tc, mode, translation, timing, 1064 functional, &mergeTe); 1065 // only proceed if we have a valid table entry 1066 if ((te == NULL) && (fault == NoFault)) delay = true; 1067 1068 // If we have the table entry transfer some of the attributes to the 1069 // request that triggered the translation 1070 if (te != NULL) { 1071 // Set memory attributes 1072 DPRINTF(TLBVerbose, 1073 "Setting memory attributes: shareable: %d, innerAttrs: %d, " 1074 "outerAttrs: %d, mtype: %d, isStage2: %d\n", 1075 te->shareable, te->innerAttrs, te->outerAttrs, 1076 static_cast<uint8_t>(te->mtype), isStage2); 1077 setAttr(te->attributes); 1078 1079 if (te->nonCacheable) 1080 req->setFlags(Request::UNCACHEABLE); 1081 1082 // Require requests to be ordered if the request goes to 1083 // strongly ordered or device memory (i.e., anything other 1084 // than normal memory requires strict order). 1085 if (te->mtype != TlbEntry::MemoryType::Normal) 1086 req->setFlags(Request::STRICT_ORDER); 1087 1088 Addr pa = te->pAddr(vaddr); 1089 req->setPaddr(pa); 1090 1091 if (isSecure && !te->ns) { 1092 req->setFlags(Request::SECURE); 1093 } 1094 if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) && 1095 (te->mtype != TlbEntry::MemoryType::Normal)) { 1096 // Unaligned accesses to Device memory should always cause an 1097 // abort regardless of sctlr.a 1098 alignFaults++; 1099 return std::make_shared<DataAbort>( 1100 vaddr_tainted, 1101 TlbEntry::DomainType::NoAccess, is_write, 1102 ArmFault::AlignmentFault, isStage2, 1103 tranMethod); 1104 } 1105 1106 // Check for a trickbox generated address fault 1107 if (fault == NoFault) { 1108 fault = trickBoxCheck(req, mode, te->domain); 1109 } 1110 } 1111 1112 // Generate Illegal Inst Set State fault if IL bit is set in CPSR 1113 if (fault == NoFault) { 1114 if (aarch64 && is_fetch && cpsr.il == 1) { 1115 return std::make_shared<IllegalInstSetStateFault>(); 1116 } 1117 } 1118 1119 return fault; 1120} 1121 1122Fault 1123TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode, 1124 TLB::ArmTranslationType tranType) 1125{ 1126 updateMiscReg(tc, tranType); 1127 1128 if (directToStage2) { 1129 assert(stage2Tlb); 1130 return stage2Tlb->translateAtomic(req, tc, mode, tranType); 1131 } 1132 1133 bool delay = false; 1134 Fault fault; 1135 if (FullSystem) 1136 fault = translateFs(req, tc, mode, NULL, delay, false, tranType); 1137 else 1138 fault = translateSe(req, tc, mode, NULL, delay, false); 1139 assert(!delay); 1140 return fault; 1141} 1142 1143Fault 1144TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode, 1145 TLB::ArmTranslationType tranType) 1146{ 1147 updateMiscReg(tc, tranType); 1148 1149 if (directToStage2) { 1150 assert(stage2Tlb); 1151 return stage2Tlb->translateFunctional(req, tc, mode, tranType); 1152 } 1153 1154 bool delay = false; 1155 Fault fault; 1156 if (FullSystem) 1157 fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true); 1158 else 1159 fault = translateSe(req, tc, mode, NULL, delay, false); 1160 assert(!delay); 1161 return fault; 1162} 1163 1164Fault 1165TLB::translateTiming(RequestPtr req, ThreadContext *tc, 1166 Translation *translation, Mode mode, TLB::ArmTranslationType tranType) 1167{ 1168 updateMiscReg(tc, tranType); 1169 1170 if (directToStage2) { 1171 assert(stage2Tlb); 1172 return stage2Tlb->translateTiming(req, tc, translation, mode, tranType); 1173 } 1174 1175 assert(translation); 1176 1177 return translateComplete(req, tc, translation, mode, tranType, isStage2); 1178} 1179 1180Fault 1181TLB::translateComplete(RequestPtr req, ThreadContext *tc, 1182 Translation *translation, Mode mode, TLB::ArmTranslationType tranType, 1183 bool callFromS2) 1184{ 1185 bool delay = false; 1186 Fault fault; 1187 if (FullSystem) 1188 fault = translateFs(req, tc, mode, translation, delay, true, tranType); 1189 else 1190 fault = translateSe(req, tc, mode, translation, delay, true); 1191 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault != 1192 NoFault); 1193 // If we have a translation, and we're not in the middle of doing a stage 1194 // 2 translation tell the translation that we've either finished or its 1195 // going to take a while. By not doing this when we're in the middle of a 1196 // stage 2 translation we prevent marking the translation as delayed twice, 1197 // one when the translation starts and again when the stage 1 translation 1198 // completes. 1199 if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) { 1200 if (!delay) 1201 translation->finish(fault, req, tc, mode); 1202 else 1203 translation->markDelayed(); 1204 } 1205 return fault; 1206} 1207 1208BaseMasterPort* 1209TLB::getMasterPort() 1210{ 1211 return &stage2Mmu->getPort(); 1212} 1213 1214void 1215TLB::updateMiscReg(ThreadContext *tc, ArmTranslationType tranType) 1216{ 1217 // check if the regs have changed, or the translation mode is different. 1218 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle 1219 // one type of translation anyway 1220 if (miscRegValid && ((tranType == curTranType) || isStage2)) { 1221 return; 1222 } 1223 1224 DPRINTF(TLBVerbose, "TLB variables changed!\n"); 1225 cpsr = tc->readMiscReg(MISCREG_CPSR); 1226 // Dependencies: SCR/SCR_EL3, CPSR 1227 isSecure = inSecureState(tc); 1228 isSecure &= (tranType & HypMode) == 0; 1229 isSecure &= (tranType & S1S2NsTran) == 0; 1230 aarch64 = !cpsr.width; 1231 if (aarch64) { // AArch64 1232 aarch64EL = (ExceptionLevel) (uint8_t) cpsr.el; 1233 switch (aarch64EL) { 1234 case EL0: 1235 case EL1: 1236 { 1237 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1); 1238 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1); 1239 uint64_t ttbr_asid = ttbcr.a1 ? 1240 tc->readMiscReg(MISCREG_TTBR1_EL1) : 1241 tc->readMiscReg(MISCREG_TTBR0_EL1); 1242 asid = bits(ttbr_asid, 1243 (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48); 1244 } 1245 break; 1246 case EL2: 1247 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2); 1248 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2); 1249 asid = -1; 1250 break; 1251 case EL3: 1252 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3); 1253 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3); 1254 asid = -1; 1255 break; 1256 } 1257 scr = tc->readMiscReg(MISCREG_SCR_EL3); 1258 isPriv = aarch64EL != EL0; 1259 // @todo: modify this behaviour to support Virtualization in 1260 // AArch64 1261 vmid = 0; 1262 isHyp = false; 1263 directToStage2 = false; 1264 stage2Req = false; 1265 } else { // AArch32 1266 sctlr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_SCTLR, tc, 1267 !isSecure)); 1268 ttbcr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_TTBCR, tc, 1269 !isSecure)); 1270 scr = tc->readMiscReg(MISCREG_SCR); 1271 isPriv = cpsr.mode != MODE_USER; 1272 if (haveLPAE && ttbcr.eae) { 1273 // Long-descriptor translation table format in use 1274 uint64_t ttbr_asid = tc->readMiscReg( 1275 flattenMiscRegNsBanked(ttbcr.a1 ? MISCREG_TTBR1 1276 : MISCREG_TTBR0, 1277 tc, !isSecure)); 1278 asid = bits(ttbr_asid, 55, 48); 1279 } else { 1280 // Short-descriptor translation table format in use 1281 CONTEXTIDR context_id = tc->readMiscReg(flattenMiscRegNsBanked( 1282 MISCREG_CONTEXTIDR, tc,!isSecure)); 1283 asid = context_id.asid; 1284 } 1285 prrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_PRRR, tc, 1286 !isSecure)); 1287 nmrr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_NMRR, tc, 1288 !isSecure)); 1289 dacr = tc->readMiscReg(flattenMiscRegNsBanked(MISCREG_DACR, tc, 1290 !isSecure)); 1291 hcr = tc->readMiscReg(MISCREG_HCR); 1292 1293 if (haveVirtualization) { 1294 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); 1295 isHyp = cpsr.mode == MODE_HYP; 1296 isHyp |= tranType & HypMode; 1297 isHyp &= (tranType & S1S2NsTran) == 0; 1298 isHyp &= (tranType & S1CTran) == 0; 1299 if (isHyp) { 1300 sctlr = tc->readMiscReg(MISCREG_HSCTLR); 1301 } 1302 // Work out if we should skip the first stage of translation and go 1303 // directly to stage 2. This value is cached so we don't have to 1304 // compute it for every translation. 1305 stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure && 1306 !(tranType & S1CTran); 1307 directToStage2 = stage2Req && !sctlr.m; 1308 } else { 1309 vmid = 0; 1310 stage2Req = false; 1311 isHyp = false; 1312 directToStage2 = false; 1313 } 1314 } 1315 miscRegValid = true; 1316 curTranType = tranType; 1317} 1318 1319Fault 1320TLB::getTE(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1321 Translation *translation, bool timing, bool functional, 1322 bool is_secure, TLB::ArmTranslationType tranType) 1323{ 1324 bool is_fetch = (mode == Execute); 1325 bool is_write = (mode == Write); 1326 1327 Addr vaddr_tainted = req->getVaddr(); 1328 Addr vaddr = 0; 1329 ExceptionLevel target_el = aarch64 ? aarch64EL : EL1; 1330 if (aarch64) { 1331 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, ttbcr); 1332 } else { 1333 vaddr = vaddr_tainted; 1334 } 1335 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1336 if (*te == NULL) { 1337 if (req->isPrefetch()) { 1338 // if the request is a prefetch don't attempt to fill the TLB or go 1339 // any further with the memory access (here we can safely use the 1340 // fault status for the short desc. format in all cases) 1341 prefetchFaults++; 1342 return std::make_shared<PrefetchAbort>( 1343 vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2); 1344 } 1345 1346 if (is_fetch) 1347 instMisses++; 1348 else if (is_write) 1349 writeMisses++; 1350 else 1351 readMisses++; 1352 1353 // start translation table walk, pass variables rather than 1354 // re-retreaving in table walker for speed 1355 DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n", 1356 vaddr_tainted, asid, vmid); 1357 Fault fault; 1358 fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode, 1359 translation, timing, functional, is_secure, 1360 tranType); 1361 // for timing mode, return and wait for table walk, 1362 if (timing || fault != NoFault) { 1363 return fault; 1364 } 1365 1366 *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el); 1367 if (!*te) 1368 printTlb(); 1369 assert(*te); 1370 } else { 1371 if (is_fetch) 1372 instHits++; 1373 else if (is_write) 1374 writeHits++; 1375 else 1376 readHits++; 1377 } 1378 return NoFault; 1379} 1380 1381Fault 1382TLB::getResultTe(TlbEntry **te, RequestPtr req, ThreadContext *tc, Mode mode, 1383 Translation *translation, bool timing, bool functional, 1384 TlbEntry *mergeTe) 1385{ 1386 Fault fault; 1387 TlbEntry *s1Te = NULL; 1388 1389 Addr vaddr_tainted = req->getVaddr(); 1390 1391 // Get the stage 1 table entry 1392 fault = getTE(&s1Te, req, tc, mode, translation, timing, functional, 1393 isSecure, curTranType); 1394 // only proceed if we have a valid table entry 1395 if ((s1Te != NULL) && (fault == NoFault)) { 1396 // Check stage 1 permissions before checking stage 2 1397 if (aarch64) 1398 fault = checkPermissions64(s1Te, req, mode, tc); 1399 else 1400 fault = checkPermissions(s1Te, req, mode); 1401 if (stage2Req & (fault == NoFault)) { 1402 Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te, 1403 req, translation, mode, timing, functional, curTranType); 1404 fault = s2Lookup->getTe(tc, mergeTe); 1405 if (s2Lookup->isComplete()) { 1406 *te = mergeTe; 1407 // We've finished with the lookup so delete it 1408 delete s2Lookup; 1409 } else { 1410 // The lookup hasn't completed, so we can't delete it now. We 1411 // get round this by asking the object to self delete when the 1412 // translation is complete. 1413 s2Lookup->setSelfDelete(); 1414 } 1415 } else { 1416 // This case deals with an S1 hit (or bypass), followed by 1417 // an S2 hit-but-perms issue 1418 if (isStage2) { 1419 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n", 1420 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault); 1421 if (fault != NoFault) { 1422 ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get()); 1423 armFault->annotate(ArmFault::S1PTW, false); 1424 armFault->annotate(ArmFault::OVA, vaddr_tainted); 1425 } 1426 } 1427 *te = s1Te; 1428 } 1429 } 1430 return fault; 1431} 1432 1433ArmISA::TLB * 1434ArmTLBParams::create() 1435{ 1436 return new ArmISA::TLB(this); 1437} 1438