1/* 2 * Copyright (c) 2001-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Steve Reinhardt 30 * Andrew Schultz 31 */ 32 33#include "arch/alpha/tlb.hh" 34 35#include <algorithm> 36#include <memory> 37#include <string> 38#include <vector> 39 40#include "arch/alpha/faults.hh" 41#include "arch/alpha/pagetable.hh" 42#include "arch/generic/debugfaults.hh" 43#include "base/inifile.hh" 44#include "base/str.hh" 45#include "base/trace.hh" 46#include "cpu/thread_context.hh" 47#include "debug/TLB.hh" 48#include "sim/full_system.hh" 49 50using namespace std; 51 52namespace AlphaISA { 53 54/////////////////////////////////////////////////////////////////////// 55// 56// Alpha TLB 57// 58 59#ifdef DEBUG 60bool uncacheBit39 = false; 61bool uncacheBit40 = false; 62#endif 63 64#define MODE2MASK(X) (1 << (X)) 65 66TLB::TLB(const Params *p) 67 : BaseTLB(p), table(p->size), nlu(0) 68{ 69 flushCache(); 70} 71 72TLB::~TLB() 73{ 74} 75 76void 77TLB::regStats() 78{ 79 BaseTLB::regStats(); 80 81 fetch_hits 82 .name(name() + ".fetch_hits") 83 .desc("ITB hits"); 84 fetch_misses 85 .name(name() + ".fetch_misses") 86 .desc("ITB misses"); 87 fetch_acv 88 .name(name() + ".fetch_acv") 89 .desc("ITB acv"); 90 fetch_accesses 91 .name(name() + ".fetch_accesses") 92 .desc("ITB accesses"); 93 94 fetch_accesses = fetch_hits + fetch_misses; 95 96 read_hits 97 .name(name() + ".read_hits") 98 .desc("DTB read hits") 99 ; 100 101 read_misses 102 .name(name() + ".read_misses") 103 .desc("DTB read misses") 104 ; 105 106 read_acv 107 .name(name() + ".read_acv") 108 .desc("DTB read access violations") 109 ; 110 111 read_accesses 112 .name(name() + ".read_accesses") 113 .desc("DTB read accesses") 114 ; 115 116 write_hits 117 .name(name() + ".write_hits") 118 .desc("DTB write hits") 119 ; 120 121 write_misses 122 .name(name() + ".write_misses") 123 .desc("DTB write misses") 124 ; 125 126 write_acv 127 .name(name() + ".write_acv") 128 .desc("DTB write access violations") 129 ; 130 131 write_accesses 132 .name(name() + ".write_accesses") 133 .desc("DTB write accesses") 134 ; 135 136 data_hits 137 .name(name() + ".data_hits") 138 .desc("DTB hits") 139 ; 140 141 data_misses 142 .name(name() + ".data_misses") 143 .desc("DTB misses") 144 ; 145 146 data_acv 147 .name(name() + ".data_acv") 148 .desc("DTB access violations") 149 ; 150 151 data_accesses 152 .name(name() + ".data_accesses") 153 .desc("DTB accesses") 154 ; 155 156 data_hits = read_hits + write_hits; 157 data_misses = read_misses + write_misses; 158 data_acv = read_acv + write_acv; 159 data_accesses = read_accesses + write_accesses; 160} 161 162// look up an entry in the TLB 163TlbEntry * 164TLB::lookup(Addr vpn, uint8_t asn) 165{ 166 // assume not found... 167 TlbEntry *retval = NULL; 168 169 if (EntryCache[0]) { 170 if (vpn == EntryCache[0]->tag && 171 (EntryCache[0]->asma || EntryCache[0]->asn == asn)) 172 retval = EntryCache[0]; 173 else if (EntryCache[1]) { 174 if (vpn == EntryCache[1]->tag && 175 (EntryCache[1]->asma || EntryCache[1]->asn == asn)) 176 retval = EntryCache[1]; 177 else if (EntryCache[2] && vpn == EntryCache[2]->tag && 178 (EntryCache[2]->asma || EntryCache[2]->asn == asn)) 179 retval = EntryCache[2]; 180 } 181 } 182 183 if (retval == NULL) { 184 PageTable::const_iterator i = lookupTable.find(vpn); 185 if (i != lookupTable.end()) { 186 while (i->first == vpn) { 187 int index = i->second; 188 TlbEntry *entry = &table[index]; 189 assert(entry->valid); 190 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) { 191 retval = updateCache(entry); 192 break; 193 } 194 195 ++i; 196 } 197 } 198 } 199 200 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn, 201 retval ? "hit" : "miss", retval ? retval->ppn : 0); 202 return retval; 203} 204 205Fault 206TLB::checkCacheability(const RequestPtr &req, bool itb) 207{ 208 // in Alpha, cacheability is controlled by upper-level bits of the 209 // physical address 210 211 /* 212 * We support having the uncacheable bit in either bit 39 or bit 213 * 40. The Turbolaser platform (and EV5) support having the bit 214 * in 39, but Tsunami (which Linux assumes uses an EV6) generates 215 * accesses with the bit in 40. So we must check for both, but we 216 * have debug flags to catch a weird case where both are used, 217 * which shouldn't happen. 218 */ 219 220 221 if (req->getPaddr() & PAddrUncachedBit43) { 222 // IPR memory space not implemented 223 if (PAddrIprSpace(req->getPaddr())) { 224 return std::make_shared<UnimpFault>( 225 "IPR memory space not implemented!"); 226 } else { 227 // mark request as uncacheable 228 req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); 229 230 // Clear bits 42:35 of the physical address (10-2 in 231 // Tsunami manual) 232 req->setPaddr(req->getPaddr() & PAddrUncachedMask); 233 } 234 // We shouldn't be able to read from an uncachable address in Alpha as 235 // we don't have a ROM and we don't want to try to fetch from a device 236 // register as we destroy any data that is clear-on-read. 237 if (req->isUncacheable() && itb) 238 return std::make_shared<UnimpFault>( 239 "CPU trying to fetch from uncached I/O"); 240 241 } 242 return NoFault; 243} 244 245 246// insert a new TLB entry 247void 248TLB::insert(Addr addr, TlbEntry &entry) 249{ 250 flushCache(); 251 VAddr vaddr = addr; 252 if (table[nlu].valid) { 253 Addr oldvpn = table[nlu].tag; 254 PageTable::iterator i = lookupTable.find(oldvpn); 255 256 if (i == lookupTable.end()) 257 panic("TLB entry not found in lookupTable"); 258 259 int index; 260 while ((index = i->second) != nlu) { 261 if (table[index].tag != oldvpn) 262 panic("TLB entry not found in lookupTable"); 263 264 ++i; 265 } 266 267 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn); 268 269 lookupTable.erase(i); 270 } 271 272 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn); 273 274 table[nlu] = entry; 275 table[nlu].tag = vaddr.vpn(); 276 table[nlu].valid = true; 277 278 lookupTable.insert(make_pair(vaddr.vpn(), nlu)); 279 nextnlu(); 280} 281 282void 283TLB::flushAll() 284{ 285 DPRINTF(TLB, "flushAll\n"); 286 std::fill(table.begin(), table.end(), TlbEntry()); 287 flushCache(); 288 lookupTable.clear(); 289 nlu = 0; 290} 291 292void 293TLB::flushProcesses() 294{ 295 flushCache(); 296 PageTable::iterator i = lookupTable.begin(); 297 PageTable::iterator end = lookupTable.end(); 298 while (i != end) { 299 int index = i->second; 300 TlbEntry *entry = &table[index]; 301 assert(entry->valid); 302 303 // we can't increment i after we erase it, so save a copy and 304 // increment it to get the next entry now 305 PageTable::iterator cur = i; 306 ++i; 307 308 if (!entry->asma) { 309 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, 310 entry->tag, entry->ppn); 311 entry->valid = false; 312 lookupTable.erase(cur); 313 } 314 } 315} 316 317void 318TLB::flushAddr(Addr addr, uint8_t asn) 319{ 320 flushCache(); 321 VAddr vaddr = addr; 322 323 PageTable::iterator i = lookupTable.find(vaddr.vpn()); 324 if (i == lookupTable.end()) 325 return; 326 327 while (i != lookupTable.end() && i->first == vaddr.vpn()) { 328 int index = i->second; 329 TlbEntry *entry = &table[index]; 330 assert(entry->valid); 331 332 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) { 333 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(), 334 entry->ppn); 335 336 // invalidate this entry 337 entry->valid = false; 338 339 lookupTable.erase(i++); 340 } else { 341 ++i; 342 } 343 } 344} 345 346 347void 348TLB::serialize(CheckpointOut &cp) const 349{ 350 const unsigned size(table.size()); 351 SERIALIZE_SCALAR(size); 352 SERIALIZE_SCALAR(nlu); 353 354 for (int i = 0; i < size; i++) 355 table[i].serializeSection(cp, csprintf("Entry%d", i)); 356} 357 358void 359TLB::unserialize(CheckpointIn &cp) 360{ 361 unsigned size(0); 362 UNSERIALIZE_SCALAR(size); 363 UNSERIALIZE_SCALAR(nlu); 364 365 table.resize(size); 366 for (int i = 0; i < size; i++) { 367 table[i].unserializeSection(cp, csprintf("Entry%d", i)); 368 if (table[i].valid) { 369 lookupTable.insert(make_pair(table[i].tag, i)); 370 } 371 } 372} 373 374Fault 375TLB::translateInst(const RequestPtr &req, ThreadContext *tc) 376{ 377 //If this is a pal pc, then set PHYSICAL 378 if (FullSystem && PcPAL(req->getPC())) 379 req->setFlags(Request::PHYSICAL); 380 381 if (PcPAL(req->getPC())) { 382 // strip off PAL PC marker (lsb is 1) 383 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask); 384 fetch_hits++; 385 return NoFault; 386 } 387 388 if (req->getFlags() & Request::PHYSICAL) { 389 req->setPaddr(req->getVaddr()); 390 } else { 391 // verify that this is a good virtual address 392 if (!validVirtualAddress(req->getVaddr())) { 393 fetch_acv++; 394 return std::make_shared<ItbAcvFault>(req->getVaddr()); 395 } 396 397 398 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5 399 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6 400 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) { 401 // only valid in kernel mode 402 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) != 403 mode_kernel) { 404 fetch_acv++; 405 return std::make_shared<ItbAcvFault>(req->getVaddr()); 406 } 407 408 req->setPaddr(req->getVaddr() & PAddrImplMask); 409 410 // sign extend the physical address properly 411 if (req->getPaddr() & PAddrUncachedBit40) 412 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 413 else 414 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 415 } else { 416 // not a physical address: need to look up pte 417 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 418 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), 419 asn); 420 421 if (!entry) { 422 fetch_misses++; 423 return std::make_shared<ItbPageFault>(req->getVaddr()); 424 } 425 426 req->setPaddr((entry->ppn << PageShift) + 427 (VAddr(req->getVaddr()).offset() 428 & ~3)); 429 430 // check permissions for this access 431 if (!(entry->xre & 432 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) { 433 // instruction access fault 434 fetch_acv++; 435 return std::make_shared<ItbAcvFault>(req->getVaddr()); 436 } 437 438 fetch_hits++; 439 } 440 } 441 442 // check that the physical address is ok (catch bad physical addresses) 443 if (req->getPaddr() & ~PAddrImplMask) { 444 return std::make_shared<MachineCheckFault>(); 445 } 446 447 return checkCacheability(req, true); 448 449} 450 451Fault 452TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write) 453{ 454 mode_type mode = 455 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)); 456 457 /** 458 * Check for alignment faults 459 */ 460 if (req->getVaddr() & (req->getSize() - 1)) { 461 DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(), 462 req->getSize()); 463 uint64_t flags = write ? MM_STAT_WR_MASK : 0; 464 return std::make_shared<DtbAlignmentFault>(req->getVaddr(), 465 req->getFlags(), 466 flags); 467 } 468 469 if (PcPAL(req->getPC())) { 470 mode = (req->getFlags() & AlphaRequestFlags::ALTMODE) ? 471 (mode_type)ALT_MODE_AM( 472 tc->readMiscRegNoEffect(IPR_ALT_MODE)) 473 : mode_kernel; 474 } 475 476 if (req->getFlags() & Request::PHYSICAL) { 477 req->setPaddr(req->getVaddr()); 478 } else { 479 // verify that this is a good virtual address 480 if (!validVirtualAddress(req->getVaddr())) { 481 if (write) { write_acv++; } else { read_acv++; } 482 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 483 MM_STAT_BAD_VA_MASK | 484 MM_STAT_ACV_MASK; 485 return std::make_shared<DtbPageFault>(req->getVaddr(), 486 req->getFlags(), 487 flags); 488 } 489 490 // Check for "superpage" mapping 491 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) { 492 // only valid in kernel mode 493 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) != 494 mode_kernel) { 495 if (write) { write_acv++; } else { read_acv++; } 496 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) | 497 MM_STAT_ACV_MASK); 498 499 return std::make_shared<DtbAcvFault>(req->getVaddr(), 500 req->getFlags(), 501 flags); 502 } 503 504 req->setPaddr(req->getVaddr() & PAddrImplMask); 505 506 // sign extend the physical address properly 507 if (req->getPaddr() & PAddrUncachedBit40) 508 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 509 else 510 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 511 } else { 512 if (write) 513 write_accesses++; 514 else 515 read_accesses++; 516 517 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 518 519 // not a physical address: need to look up pte 520 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn); 521 522 if (!entry) { 523 // page fault 524 if (write) { write_misses++; } else { read_misses++; } 525 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 526 MM_STAT_DTB_MISS_MASK; 527 return (req->getFlags() & AlphaRequestFlags::VPTE) ? 528 (Fault)(std::make_shared<PDtbMissFault>(req->getVaddr(), 529 req->getFlags(), 530 flags)) : 531 (Fault)(std::make_shared<NDtbMissFault>(req->getVaddr(), 532 req->getFlags(), 533 flags)); 534 } 535 536 req->setPaddr((entry->ppn << PageShift) + 537 VAddr(req->getVaddr()).offset()); 538 539 if (write) { 540 if (!(entry->xwe & MODE2MASK(mode))) { 541 // declare the instruction access fault 542 write_acv++; 543 uint64_t flags = MM_STAT_WR_MASK | 544 MM_STAT_ACV_MASK | 545 (entry->fonw ? MM_STAT_FONW_MASK : 0); 546 return std::make_shared<DtbPageFault>(req->getVaddr(), 547 req->getFlags(), 548 flags); 549 } 550 if (entry->fonw) { 551 write_acv++; 552 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK; 553 return std::make_shared<DtbPageFault>(req->getVaddr(), 554 req->getFlags(), 555 flags); 556 } 557 } else { 558 if (!(entry->xre & MODE2MASK(mode))) { 559 read_acv++; 560 uint64_t flags = MM_STAT_ACV_MASK | 561 (entry->fonr ? MM_STAT_FONR_MASK : 0); 562 return std::make_shared<DtbAcvFault>(req->getVaddr(), 563 req->getFlags(), 564 flags); 565 } 566 if (entry->fonr) { 567 read_acv++; 568 uint64_t flags = MM_STAT_FONR_MASK; 569 return std::make_shared<DtbPageFault>(req->getVaddr(), 570 req->getFlags(), 571 flags); 572 } 573 } 574 } 575 576 if (write) 577 write_hits++; 578 else 579 read_hits++; 580 } 581 582 // check that the physical address is ok (catch bad physical addresses) 583 if (req->getPaddr() & ~PAddrImplMask) { 584 return std::make_shared<MachineCheckFault>(); 585 } 586 587 return checkCacheability(req); 588} 589 590TlbEntry & 591TLB::index(bool advance) 592{ 593 TlbEntry *entry = &table[nlu]; 594 595 if (advance) 596 nextnlu(); 597 598 return *entry; 599} 600 601Fault 602TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) 603{ 604 if (mode == Execute) 605 return translateInst(req, tc); 606 else 607 return translateData(req, tc, mode == Write); 608} 609 610void 611TLB::translateTiming(const RequestPtr &req, ThreadContext *tc, 612 Translation *translation, Mode mode) 613{ 614 assert(translation); 615 translation->finish(translateAtomic(req, tc, mode), req, tc, mode); 616} 617 618Fault 619TLB::finalizePhysical(const RequestPtr &req, ThreadContext *tc, 620 Mode mode) const 621{ 622 return NoFault; 623} 624 625} // namespace AlphaISA 626 627AlphaISA::TLB * 628AlphaTLBParams::create() 629{ 630 return new AlphaISA::TLB(this); 631} 632