tlb.cc revision 5736:426510e758ad
114205Sandreas.sandberg@arm.com/* 214205Sandreas.sandberg@arm.com * Copyright (c) 2001-2005 The Regents of The University of Michigan 314205Sandreas.sandberg@arm.com * All rights reserved. 414205Sandreas.sandberg@arm.com * 514205Sandreas.sandberg@arm.com * Redistribution and use in source and binary forms, with or without 614205Sandreas.sandberg@arm.com * modification, are permitted provided that the following conditions are 714205Sandreas.sandberg@arm.com * met: redistributions of source code must retain the above copyright 814205Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer; 914205Sandreas.sandberg@arm.com * redistributions in binary form must reproduce the above copyright 1014205Sandreas.sandberg@arm.com * notice, this list of conditions and the following disclaimer in the 1114205Sandreas.sandberg@arm.com * documentation and/or other materials provided with the distribution; 1214205Sandreas.sandberg@arm.com * neither the name of the copyright holders nor the names of its 1314205Sandreas.sandberg@arm.com * contributors may be used to endorse or promote products derived from 1414205Sandreas.sandberg@arm.com * this software without specific prior written permission. 1514205Sandreas.sandberg@arm.com * 1614205Sandreas.sandberg@arm.com * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1714205Sandreas.sandberg@arm.com * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 1814205Sandreas.sandberg@arm.com * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 1914205Sandreas.sandberg@arm.com * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2014205Sandreas.sandberg@arm.com * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2114205Sandreas.sandberg@arm.com * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 2214205Sandreas.sandberg@arm.com * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2314205Sandreas.sandberg@arm.com * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2414205Sandreas.sandberg@arm.com * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2514205Sandreas.sandberg@arm.com * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 2614205Sandreas.sandberg@arm.com * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2714205Sandreas.sandberg@arm.com * 2814205Sandreas.sandberg@arm.com * Authors: Nathan Binkert 2914205Sandreas.sandberg@arm.com * Steve Reinhardt 3014205Sandreas.sandberg@arm.com * Andrew Schultz 3114205Sandreas.sandberg@arm.com */ 3214205Sandreas.sandberg@arm.com 3314205Sandreas.sandberg@arm.com#include <string> 3414205Sandreas.sandberg@arm.com#include <vector> 3514205Sandreas.sandberg@arm.com 3614205Sandreas.sandberg@arm.com#include "arch/alpha/pagetable.hh" 3714205Sandreas.sandberg@arm.com#include "arch/alpha/tlb.hh" 3814205Sandreas.sandberg@arm.com#include "arch/alpha/faults.hh" 3914205Sandreas.sandberg@arm.com#include "base/inifile.hh" 4014205Sandreas.sandberg@arm.com#include "base/str.hh" 4114205Sandreas.sandberg@arm.com#include "base/trace.hh" 4214205Sandreas.sandberg@arm.com#include "config/alpha_tlaser.hh" 4314205Sandreas.sandberg@arm.com#include "cpu/thread_context.hh" 4414205Sandreas.sandberg@arm.com 4514205Sandreas.sandberg@arm.comusing namespace std; 4614205Sandreas.sandberg@arm.com 4714205Sandreas.sandberg@arm.comnamespace AlphaISA { 4814205Sandreas.sandberg@arm.com 4914205Sandreas.sandberg@arm.com/////////////////////////////////////////////////////////////////////// 5014205Sandreas.sandberg@arm.com// 5114205Sandreas.sandberg@arm.com// Alpha TLB 5214205Sandreas.sandberg@arm.com// 5314205Sandreas.sandberg@arm.com 5414205Sandreas.sandberg@arm.com#ifdef DEBUG 5514205Sandreas.sandberg@arm.combool uncacheBit39 = false; 5614205Sandreas.sandberg@arm.combool uncacheBit40 = false; 5714205Sandreas.sandberg@arm.com#endif 5814205Sandreas.sandberg@arm.com 5914205Sandreas.sandberg@arm.com#define MODE2MASK(X) (1 << (X)) 6014205Sandreas.sandberg@arm.com 6114205Sandreas.sandberg@arm.comTLB::TLB(const Params *p) 6214205Sandreas.sandberg@arm.com : BaseTLB(p), size(p->size), nlu(0) 6314205Sandreas.sandberg@arm.com{ 6414205Sandreas.sandberg@arm.com table = new TlbEntry[size]; 6514205Sandreas.sandberg@arm.com memset(table, 0, sizeof(TlbEntry[size])); 6614205Sandreas.sandberg@arm.com flushCache(); 6714205Sandreas.sandberg@arm.com} 6814205Sandreas.sandberg@arm.com 6914205Sandreas.sandberg@arm.comTLB::~TLB() 7014205Sandreas.sandberg@arm.com{ 7114205Sandreas.sandberg@arm.com if (table) 7214205Sandreas.sandberg@arm.com delete [] table; 7314205Sandreas.sandberg@arm.com} 7414205Sandreas.sandberg@arm.com 7514205Sandreas.sandberg@arm.com// look up an entry in the TLB 7614205Sandreas.sandberg@arm.comTlbEntry * 7714205Sandreas.sandberg@arm.comTLB::lookup(Addr vpn, uint8_t asn) 7814205Sandreas.sandberg@arm.com{ 7914205Sandreas.sandberg@arm.com // assume not found... 8014205Sandreas.sandberg@arm.com TlbEntry *retval = NULL; 8114205Sandreas.sandberg@arm.com 8214205Sandreas.sandberg@arm.com if (EntryCache[0]) { 8314205Sandreas.sandberg@arm.com if (vpn == EntryCache[0]->tag && 8414205Sandreas.sandberg@arm.com (EntryCache[0]->asma || EntryCache[0]->asn == asn)) 8514205Sandreas.sandberg@arm.com retval = EntryCache[0]; 8614205Sandreas.sandberg@arm.com else if (EntryCache[1]) { 8714205Sandreas.sandberg@arm.com if (vpn == EntryCache[1]->tag && 8814205Sandreas.sandberg@arm.com (EntryCache[1]->asma || EntryCache[1]->asn == asn)) 8914205Sandreas.sandberg@arm.com retval = EntryCache[1]; 9014205Sandreas.sandberg@arm.com else if (EntryCache[2] && vpn == EntryCache[2]->tag && 9114205Sandreas.sandberg@arm.com (EntryCache[2]->asma || EntryCache[2]->asn == asn)) 9214205Sandreas.sandberg@arm.com retval = EntryCache[2]; 9314205Sandreas.sandberg@arm.com } 9414205Sandreas.sandberg@arm.com } 9514205Sandreas.sandberg@arm.com 9614205Sandreas.sandberg@arm.com if (retval == NULL) { 9714205Sandreas.sandberg@arm.com PageTable::const_iterator i = lookupTable.find(vpn); 9814205Sandreas.sandberg@arm.com if (i != lookupTable.end()) { 9914205Sandreas.sandberg@arm.com while (i->first == vpn) { 10014205Sandreas.sandberg@arm.com int index = i->second; 10114205Sandreas.sandberg@arm.com TlbEntry *entry = &table[index]; 10214205Sandreas.sandberg@arm.com assert(entry->valid); 10314205Sandreas.sandberg@arm.com if (vpn == entry->tag && (entry->asma || entry->asn == asn)) { 10414205Sandreas.sandberg@arm.com retval = updateCache(entry); 10514205Sandreas.sandberg@arm.com break; 10614205Sandreas.sandberg@arm.com } 10714205Sandreas.sandberg@arm.com 10814205Sandreas.sandberg@arm.com ++i; 10914205Sandreas.sandberg@arm.com } 11014205Sandreas.sandberg@arm.com } 11114205Sandreas.sandberg@arm.com } 11214205Sandreas.sandberg@arm.com 11314205Sandreas.sandberg@arm.com DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn, 11414205Sandreas.sandberg@arm.com retval ? "hit" : "miss", retval ? retval->ppn : 0); 11514205Sandreas.sandberg@arm.com return retval; 11614205Sandreas.sandberg@arm.com} 11714205Sandreas.sandberg@arm.com 11814205Sandreas.sandberg@arm.comFault 11914205Sandreas.sandberg@arm.comTLB::checkCacheability(RequestPtr &req, bool itb) 12014205Sandreas.sandberg@arm.com{ 12114205Sandreas.sandberg@arm.com // in Alpha, cacheability is controlled by upper-level bits of the 12214205Sandreas.sandberg@arm.com // physical address 12314205Sandreas.sandberg@arm.com 12414205Sandreas.sandberg@arm.com /* 12514205Sandreas.sandberg@arm.com * We support having the uncacheable bit in either bit 39 or bit 12614205Sandreas.sandberg@arm.com * 40. The Turbolaser platform (and EV5) support having the bit 12714205Sandreas.sandberg@arm.com * in 39, but Tsunami (which Linux assumes uses an EV6) generates 12814205Sandreas.sandberg@arm.com * accesses with the bit in 40. So we must check for both, but we 12914205Sandreas.sandberg@arm.com * have debug flags to catch a weird case where both are used, 13014205Sandreas.sandberg@arm.com * which shouldn't happen. 13114205Sandreas.sandberg@arm.com */ 13214205Sandreas.sandberg@arm.com 13314205Sandreas.sandberg@arm.com 13414205Sandreas.sandberg@arm.com#if ALPHA_TLASER 13514205Sandreas.sandberg@arm.com if (req->getPaddr() & PAddrUncachedBit39) 13614205Sandreas.sandberg@arm.com#else 13714205Sandreas.sandberg@arm.com if (req->getPaddr() & PAddrUncachedBit43) 13814205Sandreas.sandberg@arm.com#endif 13914205Sandreas.sandberg@arm.com { 14014205Sandreas.sandberg@arm.com // IPR memory space not implemented 14114205Sandreas.sandberg@arm.com if (PAddrIprSpace(req->getPaddr())) { 14214205Sandreas.sandberg@arm.com return new UnimpFault("IPR memory space not implemented!"); 14314205Sandreas.sandberg@arm.com } else { 14414205Sandreas.sandberg@arm.com // mark request as uncacheable 14514205Sandreas.sandberg@arm.com req->setFlags(Request::UNCACHEABLE); 14614205Sandreas.sandberg@arm.com 14714205Sandreas.sandberg@arm.com#if !ALPHA_TLASER 14814205Sandreas.sandberg@arm.com // Clear bits 42:35 of the physical address (10-2 in 14914205Sandreas.sandberg@arm.com // Tsunami manual) 15014205Sandreas.sandberg@arm.com req->setPaddr(req->getPaddr() & PAddrUncachedMask); 15114205Sandreas.sandberg@arm.com#endif 15214205Sandreas.sandberg@arm.com } 15314205Sandreas.sandberg@arm.com // We shouldn't be able to read from an uncachable address in Alpha as 15414205Sandreas.sandberg@arm.com // we don't have a ROM and we don't want to try to fetch from a device 15514205Sandreas.sandberg@arm.com // register as we destroy any data that is clear-on-read. 15614205Sandreas.sandberg@arm.com if (req->isUncacheable() && itb) 15714205Sandreas.sandberg@arm.com return new UnimpFault("CPU trying to fetch from uncached I/O"); 15814205Sandreas.sandberg@arm.com 15914205Sandreas.sandberg@arm.com } 16014205Sandreas.sandberg@arm.com return NoFault; 16114205Sandreas.sandberg@arm.com} 16214205Sandreas.sandberg@arm.com 16314205Sandreas.sandberg@arm.com 16414205Sandreas.sandberg@arm.com// insert a new TLB entry 16514205Sandreas.sandberg@arm.comvoid 16614205Sandreas.sandberg@arm.comTLB::insert(Addr addr, TlbEntry &entry) 16714205Sandreas.sandberg@arm.com{ 16814205Sandreas.sandberg@arm.com flushCache(); 16914205Sandreas.sandberg@arm.com VAddr vaddr = addr; 17014205Sandreas.sandberg@arm.com if (table[nlu].valid) { 17114205Sandreas.sandberg@arm.com Addr oldvpn = table[nlu].tag; 17214205Sandreas.sandberg@arm.com PageTable::iterator i = lookupTable.find(oldvpn); 17314205Sandreas.sandberg@arm.com 17414205Sandreas.sandberg@arm.com if (i == lookupTable.end()) 17514205Sandreas.sandberg@arm.com panic("TLB entry not found in lookupTable"); 176 177 int index; 178 while ((index = i->second) != nlu) { 179 if (table[index].tag != oldvpn) 180 panic("TLB entry not found in lookupTable"); 181 182 ++i; 183 } 184 185 DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn); 186 187 lookupTable.erase(i); 188 } 189 190 DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn); 191 192 table[nlu] = entry; 193 table[nlu].tag = vaddr.vpn(); 194 table[nlu].valid = true; 195 196 lookupTable.insert(make_pair(vaddr.vpn(), nlu)); 197 nextnlu(); 198} 199 200void 201TLB::flushAll() 202{ 203 DPRINTF(TLB, "flushAll\n"); 204 memset(table, 0, sizeof(TlbEntry[size])); 205 flushCache(); 206 lookupTable.clear(); 207 nlu = 0; 208} 209 210void 211TLB::flushProcesses() 212{ 213 flushCache(); 214 PageTable::iterator i = lookupTable.begin(); 215 PageTable::iterator end = lookupTable.end(); 216 while (i != end) { 217 int index = i->second; 218 TlbEntry *entry = &table[index]; 219 assert(entry->valid); 220 221 // we can't increment i after we erase it, so save a copy and 222 // increment it to get the next entry now 223 PageTable::iterator cur = i; 224 ++i; 225 226 if (!entry->asma) { 227 DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, 228 entry->tag, entry->ppn); 229 entry->valid = false; 230 lookupTable.erase(cur); 231 } 232 } 233} 234 235void 236TLB::flushAddr(Addr addr, uint8_t asn) 237{ 238 flushCache(); 239 VAddr vaddr = addr; 240 241 PageTable::iterator i = lookupTable.find(vaddr.vpn()); 242 if (i == lookupTable.end()) 243 return; 244 245 while (i != lookupTable.end() && i->first == vaddr.vpn()) { 246 int index = i->second; 247 TlbEntry *entry = &table[index]; 248 assert(entry->valid); 249 250 if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) { 251 DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(), 252 entry->ppn); 253 254 // invalidate this entry 255 entry->valid = false; 256 257 lookupTable.erase(i++); 258 } else { 259 ++i; 260 } 261 } 262} 263 264 265void 266TLB::serialize(ostream &os) 267{ 268 SERIALIZE_SCALAR(size); 269 SERIALIZE_SCALAR(nlu); 270 271 for (int i = 0; i < size; i++) { 272 nameOut(os, csprintf("%s.Entry%d", name(), i)); 273 table[i].serialize(os); 274 } 275} 276 277void 278TLB::unserialize(Checkpoint *cp, const string §ion) 279{ 280 UNSERIALIZE_SCALAR(size); 281 UNSERIALIZE_SCALAR(nlu); 282 283 for (int i = 0; i < size; i++) { 284 table[i].unserialize(cp, csprintf("%s.Entry%d", section, i)); 285 if (table[i].valid) { 286 lookupTable.insert(make_pair(table[i].tag, i)); 287 } 288 } 289} 290 291/////////////////////////////////////////////////////////////////////// 292// 293// Alpha ITB 294// 295ITB::ITB(const Params *p) 296 : TLB(p) 297{} 298 299 300void 301ITB::regStats() 302{ 303 hits 304 .name(name() + ".hits") 305 .desc("ITB hits"); 306 misses 307 .name(name() + ".misses") 308 .desc("ITB misses"); 309 acv 310 .name(name() + ".acv") 311 .desc("ITB acv"); 312 accesses 313 .name(name() + ".accesses") 314 .desc("ITB accesses"); 315 316 accesses = hits + misses; 317} 318 319Fault 320ITB::translate(RequestPtr &req, ThreadContext *tc) 321{ 322 //If this is a pal pc, then set PHYSICAL 323 if (FULL_SYSTEM && PcPAL(req->getPC())) 324 req->setFlags(Request::PHYSICAL); 325 326 if (PcPAL(req->getPC())) { 327 // strip off PAL PC marker (lsb is 1) 328 req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask); 329 hits++; 330 return NoFault; 331 } 332 333 if (req->getFlags() & Request::PHYSICAL) { 334 req->setPaddr(req->getVaddr()); 335 } else { 336 // verify that this is a good virtual address 337 if (!validVirtualAddress(req->getVaddr())) { 338 acv++; 339 return new ItbAcvFault(req->getVaddr()); 340 } 341 342 343 // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5 344 // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6 345#if ALPHA_TLASER 346 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) && 347 VAddrSpaceEV5(req->getVaddr()) == 2) 348#else 349 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) 350#endif 351 { 352 // only valid in kernel mode 353 if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) != 354 mode_kernel) { 355 acv++; 356 return new ItbAcvFault(req->getVaddr()); 357 } 358 359 req->setPaddr(req->getVaddr() & PAddrImplMask); 360 361#if !ALPHA_TLASER 362 // sign extend the physical address properly 363 if (req->getPaddr() & PAddrUncachedBit40) 364 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 365 else 366 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 367#endif 368 369 } else { 370 // not a physical address: need to look up pte 371 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 372 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), 373 asn); 374 375 if (!entry) { 376 misses++; 377 return new ItbPageFault(req->getVaddr()); 378 } 379 380 req->setPaddr((entry->ppn << PageShift) + 381 (VAddr(req->getVaddr()).offset() 382 & ~3)); 383 384 // check permissions for this access 385 if (!(entry->xre & 386 (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) { 387 // instruction access fault 388 acv++; 389 return new ItbAcvFault(req->getVaddr()); 390 } 391 392 hits++; 393 } 394 } 395 396 // check that the physical address is ok (catch bad physical addresses) 397 if (req->getPaddr() & ~PAddrImplMask) 398 return genMachineCheckFault(); 399 400 return checkCacheability(req, true); 401 402} 403 404/////////////////////////////////////////////////////////////////////// 405// 406// Alpha DTB 407// 408DTB::DTB(const Params *p) 409 : TLB(p) 410{} 411 412void 413DTB::regStats() 414{ 415 read_hits 416 .name(name() + ".read_hits") 417 .desc("DTB read hits") 418 ; 419 420 read_misses 421 .name(name() + ".read_misses") 422 .desc("DTB read misses") 423 ; 424 425 read_acv 426 .name(name() + ".read_acv") 427 .desc("DTB read access violations") 428 ; 429 430 read_accesses 431 .name(name() + ".read_accesses") 432 .desc("DTB read accesses") 433 ; 434 435 write_hits 436 .name(name() + ".write_hits") 437 .desc("DTB write hits") 438 ; 439 440 write_misses 441 .name(name() + ".write_misses") 442 .desc("DTB write misses") 443 ; 444 445 write_acv 446 .name(name() + ".write_acv") 447 .desc("DTB write access violations") 448 ; 449 450 write_accesses 451 .name(name() + ".write_accesses") 452 .desc("DTB write accesses") 453 ; 454 455 hits 456 .name(name() + ".hits") 457 .desc("DTB hits") 458 ; 459 460 misses 461 .name(name() + ".misses") 462 .desc("DTB misses") 463 ; 464 465 acv 466 .name(name() + ".acv") 467 .desc("DTB access violations") 468 ; 469 470 accesses 471 .name(name() + ".accesses") 472 .desc("DTB accesses") 473 ; 474 475 hits = read_hits + write_hits; 476 misses = read_misses + write_misses; 477 acv = read_acv + write_acv; 478 accesses = read_accesses + write_accesses; 479} 480 481Fault 482DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) 483{ 484 Addr pc = tc->readPC(); 485 486 mode_type mode = 487 (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)); 488 489 /** 490 * Check for alignment faults 491 */ 492 if (req->getVaddr() & (req->getSize() - 1)) { 493 DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->getVaddr(), 494 req->getSize()); 495 uint64_t flags = write ? MM_STAT_WR_MASK : 0; 496 return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags); 497 } 498 499 if (PcPAL(pc)) { 500 mode = (req->getFlags() & Request::ALTMODE) ? 501 (mode_type)ALT_MODE_AM( 502 tc->readMiscRegNoEffect(IPR_ALT_MODE)) 503 : mode_kernel; 504 } 505 506 if (req->getFlags() & Request::PHYSICAL) { 507 req->setPaddr(req->getVaddr()); 508 } else { 509 // verify that this is a good virtual address 510 if (!validVirtualAddress(req->getVaddr())) { 511 if (write) { write_acv++; } else { read_acv++; } 512 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 513 MM_STAT_BAD_VA_MASK | 514 MM_STAT_ACV_MASK; 515 return new DtbPageFault(req->getVaddr(), req->getFlags(), flags); 516 } 517 518 // Check for "superpage" mapping 519#if ALPHA_TLASER 520 if ((MCSR_SP(tc->readMiscRegNoEffect(IPR_MCSR)) & 2) && 521 VAddrSpaceEV5(req->getVaddr()) == 2) 522#else 523 if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) 524#endif 525 { 526 // only valid in kernel mode 527 if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) != 528 mode_kernel) { 529 if (write) { write_acv++; } else { read_acv++; } 530 uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) | 531 MM_STAT_ACV_MASK); 532 533 return new DtbAcvFault(req->getVaddr(), req->getFlags(), 534 flags); 535 } 536 537 req->setPaddr(req->getVaddr() & PAddrImplMask); 538 539#if !ALPHA_TLASER 540 // sign extend the physical address properly 541 if (req->getPaddr() & PAddrUncachedBit40) 542 req->setPaddr(req->getPaddr() | ULL(0xf0000000000)); 543 else 544 req->setPaddr(req->getPaddr() & ULL(0xffffffffff)); 545#endif 546 547 } else { 548 if (write) 549 write_accesses++; 550 else 551 read_accesses++; 552 553 int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN)); 554 555 // not a physical address: need to look up pte 556 TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn); 557 558 if (!entry) { 559 // page fault 560 if (write) { write_misses++; } else { read_misses++; } 561 uint64_t flags = (write ? MM_STAT_WR_MASK : 0) | 562 MM_STAT_DTB_MISS_MASK; 563 return (req->getFlags() & Request::VPTE) ? 564 (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(), 565 flags)) : 566 (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(), 567 flags)); 568 } 569 570 req->setPaddr((entry->ppn << PageShift) + 571 VAddr(req->getVaddr()).offset()); 572 573 if (write) { 574 if (!(entry->xwe & MODE2MASK(mode))) { 575 // declare the instruction access fault 576 write_acv++; 577 uint64_t flags = MM_STAT_WR_MASK | 578 MM_STAT_ACV_MASK | 579 (entry->fonw ? MM_STAT_FONW_MASK : 0); 580 return new DtbPageFault(req->getVaddr(), req->getFlags(), 581 flags); 582 } 583 if (entry->fonw) { 584 write_acv++; 585 uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK; 586 return new DtbPageFault(req->getVaddr(), req->getFlags(), 587 flags); 588 } 589 } else { 590 if (!(entry->xre & MODE2MASK(mode))) { 591 read_acv++; 592 uint64_t flags = MM_STAT_ACV_MASK | 593 (entry->fonr ? MM_STAT_FONR_MASK : 0); 594 return new DtbAcvFault(req->getVaddr(), req->getFlags(), 595 flags); 596 } 597 if (entry->fonr) { 598 read_acv++; 599 uint64_t flags = MM_STAT_FONR_MASK; 600 return new DtbPageFault(req->getVaddr(), req->getFlags(), 601 flags); 602 } 603 } 604 } 605 606 if (write) 607 write_hits++; 608 else 609 read_hits++; 610 } 611 612 // check that the physical address is ok (catch bad physical addresses) 613 if (req->getPaddr() & ~PAddrImplMask) 614 return genMachineCheckFault(); 615 616 return checkCacheability(req); 617} 618 619TlbEntry & 620TLB::index(bool advance) 621{ 622 TlbEntry *entry = &table[nlu]; 623 624 if (advance) 625 nextnlu(); 626 627 return *entry; 628} 629 630/* end namespace AlphaISA */ } 631 632AlphaISA::ITB * 633AlphaITBParams::create() 634{ 635 return new AlphaISA::ITB(this); 636} 637 638AlphaISA::DTB * 639AlphaDTBParams::create() 640{ 641 return new AlphaISA::DTB(this); 642} 643