tlb.cc revision 6023
1/* 2 * Copyright (c) 2001-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Steve Reinhardt 30 * Andrew Schultz 31 */ 32 33#include <string> 34#include <vector> 35 36#include "arch/alpha/pagetable.hh" 37#include "arch/alpha/tlb.hh" 38#include "arch/alpha/faults.hh" 39#include "base/inifile.hh" 40#include "base/str.hh" 41#include "base/trace.hh" 42#include "config/alpha_tlaser.hh" 43#include "cpu/thread_context.hh" 44 45using namespace std; 46 47namespace AlphaISA { 48 49/////////////////////////////////////////////////////////////////////// 50// 51// Alpha TLB 52// 53 54#ifdef DEBUG 55bool uncacheBit39 = false; 56bool uncacheBit40 = false; 57#endif 58 59#define MODE2MASK(X) (1 << (X)) 60 61TLB::TLB(const Params *p) 62 : BaseTLB(p), size(p->size), nlu(0) 63{ 64 table = new TlbEntry[size]; 65 memset(table, 0, sizeof(TlbEntry[size])); 66 flushCache(); 67} 68 69TLB::~TLB() 70{ 71 if (table) 72 delete [] table; 73} 74 75void 76TLB::regStats() 77{ 78 fetch_hits 79 .name(name() + ".fetch_hits") 80 .desc("ITB hits"); 81 fetch_misses 82 .name(name() + ".fetch_misses") 83 .desc("ITB misses"); 84 fetch_acv 85 .name(name() + ".fetch_acv") 86 .desc("ITB acv"); 87 fetch_accesses 88 .name(name() + ".fetch_accesses") 89 .desc("ITB accesses"); 90 91 fetch_accesses = fetch_hits + fetch_misses; 92 93 read_hits 94 .name(name() + ".read_hits") 95 .desc("DTB read hits") 96 ; 97 98 read_misses 99 .name(name() + ".read_misses") 100 .desc("DTB read misses") 101 ; 102 103 read_acv 104 .name(name() + ".read_acv") 105 .desc("DTB read access violations") 106 ; 107 108 read_accesses 109 .name(name() + ".read_accesses") 110 .desc("DTB read accesses") 111 ; 112 113 write_hits 114 .name(name() + ".write_hits") 115 .desc("DTB write hits") 116 ; 117 118 write_misses 119 .name(name() + ".write_misses") 120 .desc("DTB write misses") 121 ; 122 123 write_acv 124 .name(name() + ".write_acv") 125 .desc("DTB write access violations") 126 ; 127 128 write_accesses 129 .name(name() + ".write_accesses") 130 .desc("DTB write accesses") 131 ; 132 133 data_hits 134 .name(name() + ".data_hits") 135 .desc("DTB hits") 136 ; 137 138 data_misses 139 .name(name() + ".data_misses") 140 .desc("DTB misses") 141 ; 142 143 data_acv 144 .name(name() + ".data_acv") 145 .desc("DTB access violations") 146 ; 147 148 data_accesses 149 .name(name() + ".data_accesses") 150 .desc("DTB accesses") 151 ; 152 153 data_hits = read_hits + write_hits; 154 data_misses = read_misses + write_misses; 155 data_acv = read_acv + write_acv; 156 data_accesses = read_accesses + write_accesses; 157} 158 159// look up an entry in the TLB 160TlbEntry * 161TLB::lookup(Addr vpn, uint8_t asn) 162{ 163 // assume not found... 164 TlbEntry *retval = NULL; 165 166 if (EntryCache[0]) { 167 if (vpn == EntryCache[0]->tag && 168 (EntryCache[0]->asma || EntryCache[0]->asn == asn)) 169 retval = EntryCache[0]; 170 else if (EntryCache[1]) { 171 if (vpn == EntryCache[1]->tag && 172 (EntryCache[1]->asma || EntryCache[1]->asn == asn)) 173 retval = EntryCache[1]; 174 else if (EntryCache[2] && vpn == EntryCache[2]->tag && 175 (EntryCache[2]->asma || EntryCache[2]->asn == asn)) 176 retval = EntryCache[2]; 177 } 178 } 179 180 if (retval == NULL) { 181 PageTable::const_iterator i = lookupTable.find(vpn); 182 if (i != lookupTable.end()) { 183 while (i->first == vpn) { 184 int index = i->second; 185 TlbEntry *entry = &table[index]; 186 assert(entry->valid); 187 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) { 188 retval = updateCache(entry); 189 break; 190 } 191 192 ++i; 193 } 194 } 195 } 196 197 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn, 198 retval ? "hit" : "miss", retval ? retval->ppn : 0); 199 return retval; 200} 201 202Fault 203TLB::checkCacheability(RequestPtr &req, bool itb) 204{ 205 // in Alpha, cacheability is controlled by upper-level bits of the 206 // physical address 207 208 /* 209 * We support having the uncacheable bit in either bit 39 or bit 210 * 40. The Turbolaser platform (and EV5) support having the bit 211 * in 39, but Tsunami (which Linux assumes uses an EV6) generates 212 * accesses with the bit in 40. So we must check for both, but we 213 * have debug flags to catch a weird case where both are used, 214 * which shouldn't happen. 215 */ 216 217 218#if ALPHA_TLASER 219 if (req->getPaddr() & PAddrUncachedBit39) 220#else 221 if (req->getPaddr() & PAddrUncachedBit43) 222#endif 223 { 224 // IPR memory space not implemented 225 if (PAddrIprSpace(req->getPaddr())) { 226 return new UnimpFault("IPR memory space not implemented!"); 227 } else { 228 // mark request as uncacheable 229 req->setFlags(Request::UNCACHEABLE); 230 231#if !ALPHA_TLASER 232 // Clear bits 42:35 of the physical address (10-2 in 233 // Tsunami manual) 234 req->setPaddr(req->getPaddr() & PAddrUncachedMask); 235#endif 236 } 237 // We shouldn't be able to read from an uncachable address in Alpha as 238 // we don't have a ROM and we don't want to try to fetch from a device 239 // register as we destroy any data that is clear-on-read. 240 if (req->isUncacheable() && itb) 241 return new UnimpFault("CPU trying to fetch from uncached I/O"); 242 243 } 244 return NoFault; 245} 246 247 248// insert a new TLB entry 249void 250TLB::insert(Addr addr, TlbEntry &entry) 251{ 252 flushCache(); 253 VAddr vaddr = addr; 254 if (table[nlu].valid) { 255 Addr oldvpn = table[nlu].tag; 256 PageTable::iterator i = lookupTable.find(oldvpn); 257 258 if (i == lookupTable.end()) 259 panic("TLB entry not found in lookupTable"); 260 261 int index; 262 while ((index = i->second) != nlu) { 263 if (table[index].tag != oldvpn) 264 panic("TLB entry not found in lookupTable"); 265 266 ++i; 267 } 268 269 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn); 270 271 lookupTable.erase(i); 272 } 273 274 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn); 275 276 table[nlu] = entry; 277 table[nlu].tag = vaddr.vpn(); 278 table[nlu].valid = true; 279 280 lookupTable.insert(make_pair(vaddr.vpn(), nlu)); 281 nextnlu(); 282} 283 284void 285TLB::flushAll() 286{ 287 DPRINTF(TLB, "flushAll\n"); 288 memset(table, 0, sizeof(TlbEntry[size])); 289 flushCache(); 290 lookupTable.clear(); 291 nlu = 0; 292} 293 294void 295TLB::flushProcesses() 296{ 297 flushCache(); 298 PageTable::iterator i = lookupTable.begin(); 299 PageTable::iterator end = lookupTable.end(); 300 while (i != end) { 301 int index = i->second; 302 TlbEntry *entry = &table[index]; 303 assert(entry->valid); 304 305 // we can't increment i after we erase it, so save a copy and 306 // increment it to get the next entry now 307 PageTable::iterator cur = i; 308 ++i; 309 310 if (!entry->asma) { 311 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, 312 entry->tag, entry->ppn); 313 entry->valid = false; 314 lookupTable.erase(cur); 315 } 316 } 317} 318 319void 320TLB::flushAddr(Addr addr, uint8_t asn) 321{ 322 flushCache(); 323 VAddr vaddr = addr; 324 325 PageTable::iterator i = lookupTable.find(vaddr.vpn()); 326 if (i == lookupTable.end()) 327 return; 328 329 while (i != lookupTable.end() && i->first == vaddr.vpn()) { 330 int index = i->second; 331 TlbEntry *entry = &table[index]; 332 assert(entry->valid); 333 334 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) { 335 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(), 336 entry->ppn); 337 338 // invalidate this entry 339 entry->valid = false; 340 341 lookupTable.erase(i++); 342 } else { 343 ++i; 344 } 345 } 346} 347 348 349void 350TLB::serialize(ostream &os) 351{ 352 SERIALIZE_SCALAR(size); 353 SERIALIZE_SCALAR(nlu); 354 355 for (int i = 0; i < size; i++) { 356 nameOut(os, csprintf("%s.Entry%d", name(), i)); 357 table[i].serialize(os); 358 } 359} 360 361void 362TLB::unserialize(Checkpoint *cp, const string §ion) 363{ 364 UNSERIALIZE_SCALAR(size); 365 UNSERIALIZE_SCALAR(nlu); 366 367 for (int i = 0; i < size; i++) { 368 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i)); 369 if (table[i].valid) { 370 lookupTable.insert(make_pair(table[i].tag, i)); 371 } 372 } 373} 374 375Fault 376TLB::translateInst(RequestPtr req, ThreadContext *tc) 377{ 378 //If this is a pal pc, then set PHYSICAL 379 if (FULL_SYSTEM && PcPAL(req->getPC())) 380 req->setFlags(Request::PHYSICAL); 381 382 if (PcPAL(req->getPC())) { 383 // strip off PAL PC marker (lsb is 1) 384 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask); 385 fetch_hits++; 386 return NoFault; 387 } 388 389 if (req->getFlags() & Request::PHYSICAL) { 390 req->setPaddr(req->getVaddr()); 391 } else { 392 // verify that this is a good virtual address 393 if (!validVirtualAddress(req->getVaddr())) { 394 fetch_acv++; 395 return new ItbAcvFault(req->getVaddr()); 396 } 397 398 399 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5 400 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6 401#if ALPHA_TLASER 402 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) && 403 VAddrSpaceEV5(req->getVaddr()) == 2) 404#else 405 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) 406#endif 407 { 408 // only valid in kernel mode 409 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) != 410 mode_kernel) { 411 fetch_acv++; 412 return new ItbAcvFault(req->getVaddr()); 413 } 414 415 req->setPaddr(req->getVaddr() & PAddrImplMask); 416 417#if !ALPHA_TLASER 418 // sign extend the physical address properly 419 if (req->getPaddr() & PAddrUncachedBit40) 420 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 421 else 422 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 423#endif 424 425 } else { 426 // not a physical address: need to look up pte 427 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 428 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), 429 asn); 430 431 if (!entry) { 432 fetch_misses++; 433 return new ItbPageFault(req->getVaddr()); 434 } 435 436 req->setPaddr((entry->ppn << PageShift) + 437 (VAddr(req->getVaddr()).offset() 438 & ~3)); 439 440 // check permissions for this access 441 if (!(entry->xre & 442 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) { 443 // instruction access fault 444 fetch_acv++; 445 return new ItbAcvFault(req->getVaddr()); 446 } 447 448 fetch_hits++; 449 } 450 } 451 452 // check that the physical address is ok (catch bad physical addresses) 453 if (req->getPaddr() & ~PAddrImplMask) 454 return genMachineCheckFault(); 455 456 return checkCacheability(req, true); 457 458} 459 460Fault 461TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) 462{ 463 Addr pc = tc->readPC(); 464 465 mode_type mode = 466 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)); 467 468 /** 469 * Check for alignment faults 470 */ 471 if (req->getVaddr() & (req->getSize() - 1)) { 472 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(), 473 req->getSize()); 474 uint64_t flags = write ? MM_STAT_WR_MASK : 0; 475 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags); 476 } 477 478 if (PcPAL(pc)) { 479 mode = (req->getFlags() & Request::ALTMODE) ? 480 (mode_type)ALT_MODE_AM( 481 tc->readMiscRegNoEffect(IPR_ALT_MODE)) 482 : mode_kernel; 483 } 484 485 if (req->getFlags() & Request::PHYSICAL) { 486 req->setPaddr(req->getVaddr()); 487 } else { 488 // verify that this is a good virtual address 489 if (!validVirtualAddress(req->getVaddr())) { 490 if (write) { write_acv++; } else { read_acv++; } 491 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 492 MM_STAT_BAD_VA_MASK | 493 MM_STAT_ACV_MASK; 494 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags); 495 } 496 497 // Check for "superpage" mapping 498#if ALPHA_TLASER 499 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) && 500 VAddrSpaceEV5(req->getVaddr()) == 2) 501#else 502 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) 503#endif 504 { 505 // only valid in kernel mode 506 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) != 507 mode_kernel) { 508 if (write) { write_acv++; } else { read_acv++; } 509 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) | 510 MM_STAT_ACV_MASK); 511 512 return new DtbAcvFault(req->getVaddr(), req->getFlags(), 513 flags); 514 } 515 516 req->setPaddr(req->getVaddr() & PAddrImplMask); 517 518#if !ALPHA_TLASER 519 // sign extend the physical address properly 520 if (req->getPaddr() & PAddrUncachedBit40) 521 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 522 else 523 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 524#endif 525 526 } else { 527 if (write) 528 write_accesses++; 529 else 530 read_accesses++; 531 532 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 533 534 // not a physical address: need to look up pte 535 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn); 536 537 if (!entry) { 538 // page fault 539 if (write) { write_misses++; } else { read_misses++; } 540 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 541 MM_STAT_DTB_MISS_MASK; 542 return (req->getFlags() & Request::VPTE) ? 543 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(), 544 flags)) : 545 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(), 546 flags)); 547 } 548 549 req->setPaddr((entry->ppn << PageShift) + 550 VAddr(req->getVaddr()).offset()); 551 552 if (write) { 553 if (!(entry->xwe & MODE2MASK(mode))) { 554 // declare the instruction access fault 555 write_acv++; 556 uint64_t flags = MM_STAT_WR_MASK | 557 MM_STAT_ACV_MASK | 558 (entry->fonw ? MM_STAT_FONW_MASK : 0); 559 return new DtbPageFault(req->getVaddr(), req->getFlags(), 560 flags); 561 } 562 if (entry->fonw) { 563 write_acv++; 564 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK; 565 return new DtbPageFault(req->getVaddr(), req->getFlags(), 566 flags); 567 } 568 } else { 569 if (!(entry->xre & MODE2MASK(mode))) { 570 read_acv++; 571 uint64_t flags = MM_STAT_ACV_MASK | 572 (entry->fonr ? MM_STAT_FONR_MASK : 0); 573 return new DtbAcvFault(req->getVaddr(), req->getFlags(), 574 flags); 575 } 576 if (entry->fonr) { 577 read_acv++; 578 uint64_t flags = MM_STAT_FONR_MASK; 579 return new DtbPageFault(req->getVaddr(), req->getFlags(), 580 flags); 581 } 582 } 583 } 584 585 if (write) 586 write_hits++; 587 else 588 read_hits++; 589 } 590 591 // check that the physical address is ok (catch bad physical addresses) 592 if (req->getPaddr() & ~PAddrImplMask) 593 return genMachineCheckFault(); 594 595 return checkCacheability(req); 596} 597 598TlbEntry & 599TLB::index(bool advance) 600{ 601 TlbEntry *entry = &table[nlu]; 602 603 if (advance) 604 nextnlu(); 605 606 return *entry; 607} 608 609Fault 610TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode) 611{ 612 if (mode == Execute) 613 return translateInst(req, tc); 614 else 615 return translateData(req, tc, mode == Write); 616} 617 618void 619TLB::translateTiming(RequestPtr req, ThreadContext *tc, 620 Translation *translation, Mode mode) 621{ 622 assert(translation); 623 translation->finish(translateAtomic(req, tc, mode), req, tc, mode); 624} 625 626/* end namespace AlphaISA */ } 627 628AlphaISA::TLB * 629AlphaTLBParams::create() 630{ 631 return new AlphaISA::TLB(this); 632} 633