tlb.cc revision 8232
1/* 2 * Copyright (c) 2001-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Nathan Binkert 29 * Steve Reinhardt 30 * Andrew Schultz 31 */ 32 33#include <string> 34#include <vector> 35 36#include "arch/alpha/faults.hh" 37#include "arch/alpha/pagetable.hh" 38#include "arch/alpha/tlb.hh" 39#include "base/inifile.hh" 40#include "base/str.hh" 41#include "base/trace.hh" 42#include "cpu/thread_context.hh" 43#include "debug/TLB.hh" 44 45using namespace std; 46 47namespace AlphaISA { 48 49/////////////////////////////////////////////////////////////////////// 50// 51// Alpha TLB 52// 53 54#ifdef DEBUG 55bool uncacheBit39 = false; 56bool uncacheBit40 = false; 57#endif 58 59#define MODE2MASK(X) (1 << (X)) 60 61TLB::TLB(const Params *p) 62 : BaseTLB(p), size(p->size), nlu(0) 63{ 64 table = new TlbEntry[size]; 65 memset(table, 0, sizeof(TlbEntry[size])); 66 flushCache(); 67} 68 69TLB::~TLB() 70{ 71 if (table) 72 delete [] table; 73} 74 75void 76TLB::regStats() 77{ 78 fetch_hits 79 .name(name() + ".fetch_hits") 80 .desc("ITB hits"); 81 fetch_misses 82 .name(name() + ".fetch_misses") 83 .desc("ITB misses"); 84 fetch_acv 85 .name(name() + ".fetch_acv") 86 .desc("ITB acv"); 87 fetch_accesses 88 .name(name() + ".fetch_accesses") 89 .desc("ITB accesses"); 90 91 fetch_accesses = fetch_hits + fetch_misses; 92 93 read_hits 94 .name(name() + ".read_hits") 95 .desc("DTB read hits") 96 ; 97 98 read_misses 99 .name(name() + ".read_misses") 100 .desc("DTB read misses") 101 ; 102 103 read_acv 104 .name(name() + ".read_acv") 105 .desc("DTB read access violations") 106 ; 107 108 read_accesses 109 .name(name() + ".read_accesses") 110 .desc("DTB read accesses") 111 ; 112 113 write_hits 114 .name(name() + ".write_hits") 115 .desc("DTB write hits") 116 ; 117 118 write_misses 119 .name(name() + ".write_misses") 120 .desc("DTB write misses") 121 ; 122 123 write_acv 124 .name(name() + ".write_acv") 125 .desc("DTB write access violations") 126 ; 127 128 write_accesses 129 .name(name() + ".write_accesses") 130 .desc("DTB write accesses") 131 ; 132 133 data_hits 134 .name(name() + ".data_hits") 135 .desc("DTB hits") 136 ; 137 138 data_misses 139 .name(name() + ".data_misses") 140 .desc("DTB misses") 141 ; 142 143 data_acv 144 .name(name() + ".data_acv") 145 .desc("DTB access violations") 146 ; 147 148 data_accesses 149 .name(name() + ".data_accesses") 150 .desc("DTB accesses") 151 ; 152 153 data_hits = read_hits + write_hits; 154 data_misses = read_misses + write_misses; 155 data_acv = read_acv + write_acv; 156 data_accesses = read_accesses + write_accesses; 157} 158 159// look up an entry in the TLB 160TlbEntry * 161TLB::lookup(Addr vpn, uint8_t asn) 162{ 163 // assume not found... 164 TlbEntry *retval = NULL; 165 166 if (EntryCache[0]) { 167 if (vpn == EntryCache[0]->tag && 168 (EntryCache[0]->asma || EntryCache[0]->asn == asn)) 169 retval = EntryCache[0]; 170 else if (EntryCache[1]) { 171 if (vpn == EntryCache[1]->tag && 172 (EntryCache[1]->asma || EntryCache[1]->asn == asn)) 173 retval = EntryCache[1]; 174 else if (EntryCache[2] && vpn == EntryCache[2]->tag && 175 (EntryCache[2]->asma || EntryCache[2]->asn == asn)) 176 retval = EntryCache[2]; 177 } 178 } 179 180 if (retval == NULL) { 181 PageTable::const_iterator i = lookupTable.find(vpn); 182 if (i != lookupTable.end()) { 183 while (i->first == vpn) { 184 int index = i->second; 185 TlbEntry *entry = &table[index]; 186 assert(entry->valid); 187 if (vpn == entry->tag && (entry->asma || entry->asn == asn)) { 188 retval = updateCache(entry); 189 break; 190 } 191 192 ++i; 193 } 194 } 195 } 196 197 DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn, 198 retval ? "hit" : "miss", retval ? retval->ppn : 0); 199 return retval; 200} 201 202Fault 203TLB::checkCacheability(RequestPtr &req, bool itb) 204{ 205 // in Alpha, cacheability is controlled by upper-level bits of the 206 // physical address 207 208 /* 209 * We support having the uncacheable bit in either bit 39 or bit 210 * 40. The Turbolaser platform (and EV5) support having the bit 211 * in 39, but Tsunami (which Linux assumes uses an EV6) generates 212 * accesses with the bit in 40. So we must check for both, but we 213 * have debug flags to catch a weird case where both are used, 214 * which shouldn't happen. 215 */ 216 217 218 if (req->getPaddr() & PAddrUncachedBit43) { 219 // IPR memory space not implemented 220 if (PAddrIprSpace(req->getPaddr())) { 221 return new UnimpFault("IPR memory space not implemented!"); 222 } else { 223 // mark request as uncacheable 224 req->setFlags(Request::UNCACHEABLE); 225 226 // Clear bits 42:35 of the physical address (10-2 in 227 // Tsunami manual) 228 req->setPaddr(req->getPaddr() & PAddrUncachedMask); 229 } 230 // We shouldn't be able to read from an uncachable address in Alpha as 231 // we don't have a ROM and we don't want to try to fetch from a device 232 // register as we destroy any data that is clear-on-read. 233 if (req->isUncacheable() && itb) 234 return new UnimpFault("CPU trying to fetch from uncached I/O"); 235 236 } 237 return NoFault; 238} 239 240 241// insert a new TLB entry 242void 243TLB::insert(Addr addr, TlbEntry &entry) 244{ 245 flushCache(); 246 VAddr vaddr = addr; 247 if (table[nlu].valid) { 248 Addr oldvpn = table[nlu].tag; 249 PageTable::iterator i = lookupTable.find(oldvpn); 250 251 if (i == lookupTable.end()) 252 panic("TLB entry not found in lookupTable"); 253 254 int index; 255 while ((index = i->second) != nlu) { 256 if (table[index].tag != oldvpn) 257 panic("TLB entry not found in lookupTable"); 258 259 ++i; 260 } 261 262 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn); 263 264 lookupTable.erase(i); 265 } 266 267 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn); 268 269 table[nlu] = entry; 270 table[nlu].tag = vaddr.vpn(); 271 table[nlu].valid = true; 272 273 lookupTable.insert(make_pair(vaddr.vpn(), nlu)); 274 nextnlu(); 275} 276 277void 278TLB::flushAll() 279{ 280 DPRINTF(TLB, "flushAll\n"); 281 memset(table, 0, sizeof(TlbEntry[size])); 282 flushCache(); 283 lookupTable.clear(); 284 nlu = 0; 285} 286 287void 288TLB::flushProcesses() 289{ 290 flushCache(); 291 PageTable::iterator i = lookupTable.begin(); 292 PageTable::iterator end = lookupTable.end(); 293 while (i != end) { 294 int index = i->second; 295 TlbEntry *entry = &table[index]; 296 assert(entry->valid); 297 298 // we can't increment i after we erase it, so save a copy and 299 // increment it to get the next entry now 300 PageTable::iterator cur = i; 301 ++i; 302 303 if (!entry->asma) { 304 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, 305 entry->tag, entry->ppn); 306 entry->valid = false; 307 lookupTable.erase(cur); 308 } 309 } 310} 311 312void 313TLB::flushAddr(Addr addr, uint8_t asn) 314{ 315 flushCache(); 316 VAddr vaddr = addr; 317 318 PageTable::iterator i = lookupTable.find(vaddr.vpn()); 319 if (i == lookupTable.end()) 320 return; 321 322 while (i != lookupTable.end() && i->first == vaddr.vpn()) { 323 int index = i->second; 324 TlbEntry *entry = &table[index]; 325 assert(entry->valid); 326 327 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) { 328 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(), 329 entry->ppn); 330 331 // invalidate this entry 332 entry->valid = false; 333 334 lookupTable.erase(i++); 335 } else { 336 ++i; 337 } 338 } 339} 340 341 342void 343TLB::serialize(ostream &os) 344{ 345 SERIALIZE_SCALAR(size); 346 SERIALIZE_SCALAR(nlu); 347 348 for (int i = 0; i < size; i++) { 349 nameOut(os, csprintf("%s.Entry%d", name(), i)); 350 table[i].serialize(os); 351 } 352} 353 354void 355TLB::unserialize(Checkpoint *cp, const string §ion) 356{ 357 UNSERIALIZE_SCALAR(size); 358 UNSERIALIZE_SCALAR(nlu); 359 360 for (int i = 0; i < size; i++) { 361 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i)); 362 if (table[i].valid) { 363 lookupTable.insert(make_pair(table[i].tag, i)); 364 } 365 } 366} 367 368Fault 369TLB::translateInst(RequestPtr req, ThreadContext *tc) 370{ 371 //If this is a pal pc, then set PHYSICAL 372 if (FULL_SYSTEM && PcPAL(req->getPC())) 373 req->setFlags(Request::PHYSICAL); 374 375 if (PcPAL(req->getPC())) { 376 // strip off PAL PC marker (lsb is 1) 377 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask); 378 fetch_hits++; 379 return NoFault; 380 } 381 382 if (req->getFlags() & Request::PHYSICAL) { 383 req->setPaddr(req->getVaddr()); 384 } else { 385 // verify that this is a good virtual address 386 if (!validVirtualAddress(req->getVaddr())) { 387 fetch_acv++; 388 return new ItbAcvFault(req->getVaddr()); 389 } 390 391 392 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5 393 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6 394 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) { 395 // only valid in kernel mode 396 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) != 397 mode_kernel) { 398 fetch_acv++; 399 return new ItbAcvFault(req->getVaddr()); 400 } 401 402 req->setPaddr(req->getVaddr() & PAddrImplMask); 403 404 // sign extend the physical address properly 405 if (req->getPaddr() & PAddrUncachedBit40) 406 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 407 else 408 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 409 } else { 410 // not a physical address: need to look up pte 411 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 412 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), 413 asn); 414 415 if (!entry) { 416 fetch_misses++; 417 return new ItbPageFault(req->getVaddr()); 418 } 419 420 req->setPaddr((entry->ppn << PageShift) + 421 (VAddr(req->getVaddr()).offset() 422 & ~3)); 423 424 // check permissions for this access 425 if (!(entry->xre & 426 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) { 427 // instruction access fault 428 fetch_acv++; 429 return new ItbAcvFault(req->getVaddr()); 430 } 431 432 fetch_hits++; 433 } 434 } 435 436 // check that the physical address is ok (catch bad physical addresses) 437 if (req->getPaddr() & ~PAddrImplMask) 438 return genMachineCheckFault(); 439 440 return checkCacheability(req, true); 441 442} 443 444Fault 445TLB::translateData(RequestPtr req, ThreadContext *tc, bool write) 446{ 447 mode_type mode = 448 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)); 449 450 /** 451 * Check for alignment faults 452 */ 453 if (req->getVaddr() & (req->getSize() - 1)) { 454 DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(), 455 req->getSize()); 456 uint64_t flags = write ? MM_STAT_WR_MASK : 0; 457 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags); 458 } 459 460 if (PcPAL(tc->pcState().pc())) { 461 mode = (req->getFlags() & Request::ALTMODE) ? 462 (mode_type)ALT_MODE_AM( 463 tc->readMiscRegNoEffect(IPR_ALT_MODE)) 464 : mode_kernel; 465 } 466 467 if (req->getFlags() & Request::PHYSICAL) { 468 req->setPaddr(req->getVaddr()); 469 } else { 470 // verify that this is a good virtual address 471 if (!validVirtualAddress(req->getVaddr())) { 472 if (write) { write_acv++; } else { read_acv++; } 473 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 474 MM_STAT_BAD_VA_MASK | 475 MM_STAT_ACV_MASK; 476 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags); 477 } 478 479 // Check for "superpage" mapping 480 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) { 481 // only valid in kernel mode 482 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) != 483 mode_kernel) { 484 if (write) { write_acv++; } else { read_acv++; } 485 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) | 486 MM_STAT_ACV_MASK); 487 488 return new DtbAcvFault(req->getVaddr(), req->getFlags(), 489 flags); 490 } 491 492 req->setPaddr(req->getVaddr() & PAddrImplMask); 493 494 // sign extend the physical address properly 495 if (req->getPaddr() & PAddrUncachedBit40) 496 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 497 else 498 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 499 } else { 500 if (write) 501 write_accesses++; 502 else 503 read_accesses++; 504 505 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 506 507 // not a physical address: need to look up pte 508 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn); 509 510 if (!entry) { 511 // page fault 512 if (write) { write_misses++; } else { read_misses++; } 513 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 514 MM_STAT_DTB_MISS_MASK; 515 return (req->getFlags() & Request::VPTE) ? 516 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(), 517 flags)) : 518 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(), 519 flags)); 520 } 521 522 req->setPaddr((entry->ppn << PageShift) + 523 VAddr(req->getVaddr()).offset()); 524 525 if (write) { 526 if (!(entry->xwe & MODE2MASK(mode))) { 527 // declare the instruction access fault 528 write_acv++; 529 uint64_t flags = MM_STAT_WR_MASK | 530 MM_STAT_ACV_MASK | 531 (entry->fonw ? MM_STAT_FONW_MASK : 0); 532 return new DtbPageFault(req->getVaddr(), req->getFlags(), 533 flags); 534 } 535 if (entry->fonw) { 536 write_acv++; 537 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK; 538 return new DtbPageFault(req->getVaddr(), req->getFlags(), 539 flags); 540 } 541 } else { 542 if (!(entry->xre & MODE2MASK(mode))) { 543 read_acv++; 544 uint64_t flags = MM_STAT_ACV_MASK | 545 (entry->fonr ? MM_STAT_FONR_MASK : 0); 546 return new DtbAcvFault(req->getVaddr(), req->getFlags(), 547 flags); 548 } 549 if (entry->fonr) { 550 read_acv++; 551 uint64_t flags = MM_STAT_FONR_MASK; 552 return new DtbPageFault(req->getVaddr(), req->getFlags(), 553 flags); 554 } 555 } 556 } 557 558 if (write) 559 write_hits++; 560 else 561 read_hits++; 562 } 563 564 // check that the physical address is ok (catch bad physical addresses) 565 if (req->getPaddr() & ~PAddrImplMask) 566 return genMachineCheckFault(); 567 568 return checkCacheability(req); 569} 570 571TlbEntry & 572TLB::index(bool advance) 573{ 574 TlbEntry *entry = &table[nlu]; 575 576 if (advance) 577 nextnlu(); 578 579 return *entry; 580} 581 582Fault 583TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode) 584{ 585 if (mode == Execute) 586 return translateInst(req, tc); 587 else 588 return translateData(req, tc, mode == Write); 589} 590 591void 592TLB::translateTiming(RequestPtr req, ThreadContext *tc, 593 Translation *translation, Mode mode) 594{ 595 assert(translation); 596 translation->finish(translateAtomic(req, tc, mode), req, tc, mode); 597} 598 599} // namespace AlphaISA 600 601AlphaISA::TLB * 602AlphaTLBParams::create() 603{ 604 return new AlphaISA::TLB(this); 605} 606