tlb.cc revision 4989
1/* 2 * Copyright (c) 2001-2005 The Regents of The University of Michigan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer; 9 * redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution; 12 * neither the name of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Authors: Ali Saidi 29 */ 30 31#include <cstring> 32 33#include "arch/sparc/asi.hh" 34#include "arch/sparc/miscregfile.hh" 35#include "arch/sparc/tlb.hh" 36#include "base/bitfield.hh" 37#include "base/trace.hh" 38#include "cpu/thread_context.hh" 39#include "cpu/base.hh" 40#include "mem/packet_access.hh" 41#include "mem/request.hh" 42#include "params/SparcDTB.hh" 43#include "params/SparcITB.hh" 44#include "sim/system.hh" 45 46/* @todo remove some of the magic constants. -- ali 47 * */ 48namespace SparcISA { 49 50TLB::TLB(const std::string &name, int s) 51 : SimObject(name), size(s), usedEntries(0), lastReplaced(0), 52 cacheValid(false) 53{ 54 // To make this work you'll have to change the hypervisor and OS 55 if (size > 64) 56 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries."); 57 58 tlb = new TlbEntry[size]; 59 std::memset(tlb, 0, sizeof(TlbEntry) * size); 60 61 for (int x = 0; x < size; x++) 62 freeList.push_back(&tlb[x]); 63} 64 65void 66TLB::clearUsedBits() 67{ 68 MapIter i; 69 for (i = lookupTable.begin(); i != lookupTable.end(); i++) { 70 TlbEntry *t = i->second; 71 if (!t->pte.locked()) { 72 t->used = false; 73 usedEntries--; 74 } 75 } 76} 77 78 79void 80TLB::insert(Addr va, int partition_id, int context_id, bool real, 81 const PageTableEntry& PTE, int entry) 82{ 83 84 85 MapIter i; 86 TlbEntry *new_entry = NULL; 87// TlbRange tr; 88 int x; 89 90 cacheValid = false; 91 va &= ~(PTE.size()-1); 92 /* tr.va = va; 93 tr.size = PTE.size() - 1; 94 tr.contextId = context_id; 95 tr.partitionId = partition_id; 96 tr.real = real; 97*/ 98 99 DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n", 100 va, PTE.paddr(), partition_id, context_id, (int)real, entry); 101 102 // Demap any entry that conflicts 103 for (x = 0; x < size; x++) { 104 if (tlb[x].range.real == real && 105 tlb[x].range.partitionId == partition_id && 106 tlb[x].range.va < va + PTE.size() - 1 && 107 tlb[x].range.va + tlb[x].range.size >= va && 108 (real || tlb[x].range.contextId == context_id )) 109 { 110 if (tlb[x].valid) { 111 freeList.push_front(&tlb[x]); 112 DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x); 113 114 tlb[x].valid = false; 115 if (tlb[x].used) { 116 tlb[x].used = false; 117 usedEntries--; 118 } 119 lookupTable.erase(tlb[x].range); 120 } 121 } 122 } 123 124 125/* 126 i = lookupTable.find(tr); 127 if (i != lookupTable.end()) { 128 i->second->valid = false; 129 if (i->second->used) { 130 i->second->used = false; 131 usedEntries--; 132 } 133 freeList.push_front(i->second); 134 DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n", 135 i->second); 136 lookupTable.erase(i); 137 } 138*/ 139 140 if (entry != -1) { 141 assert(entry < size && entry >= 0); 142 new_entry = &tlb[entry]; 143 } else { 144 if (!freeList.empty()) { 145 new_entry = freeList.front(); 146 } else { 147 x = lastReplaced; 148 do { 149 ++x; 150 if (x == size) 151 x = 0; 152 if (x == lastReplaced) 153 goto insertAllLocked; 154 } while (tlb[x].pte.locked()); 155 lastReplaced = x; 156 new_entry = &tlb[x]; 157 } 158 /* 159 for (x = 0; x < size; x++) { 160 if (!tlb[x].valid || !tlb[x].used) { 161 new_entry = &tlb[x]; 162 break; 163 } 164 }*/ 165 } 166 167insertAllLocked: 168 // Update the last ently if their all locked 169 if (!new_entry) { 170 new_entry = &tlb[size-1]; 171 } 172 173 freeList.remove(new_entry); 174 if (new_entry->valid && new_entry->used) 175 usedEntries--; 176 if (new_entry->valid) 177 lookupTable.erase(new_entry->range); 178 179 180 assert(PTE.valid()); 181 new_entry->range.va = va; 182 new_entry->range.size = PTE.size() - 1; 183 new_entry->range.partitionId = partition_id; 184 new_entry->range.contextId = context_id; 185 new_entry->range.real = real; 186 new_entry->pte = PTE; 187 new_entry->used = true;; 188 new_entry->valid = true; 189 usedEntries++; 190 191 192 193 i = lookupTable.insert(new_entry->range, new_entry); 194 assert(i != lookupTable.end()); 195 196 // If all entries have there used bit set, clear it on them all, but the 197 // one we just inserted 198 if (usedEntries == size) { 199 clearUsedBits(); 200 new_entry->used = true; 201 usedEntries++; 202 } 203 204} 205 206 207TlbEntry* 208TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool 209 update_used) 210{ 211 MapIter i; 212 TlbRange tr; 213 TlbEntry *t; 214 215 DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n", 216 va, partition_id, context_id, real); 217 // Assemble full address structure 218 tr.va = va; 219 tr.size = MachineBytes; 220 tr.contextId = context_id; 221 tr.partitionId = partition_id; 222 tr.real = real; 223 224 // Try to find the entry 225 i = lookupTable.find(tr); 226 if (i == lookupTable.end()) { 227 DPRINTF(TLB, "TLB: No valid entry found\n"); 228 return NULL; 229 } 230 231 // Mark the entries used bit and clear other used bits in needed 232 t = i->second; 233 DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(), 234 t->pte.size()); 235 236 // Update the used bits only if this is a real access (not a fake one from 237 // virttophys() 238 if (!t->used && update_used) { 239 t->used = true; 240 usedEntries++; 241 if (usedEntries == size) { 242 clearUsedBits(); 243 t->used = true; 244 usedEntries++; 245 } 246 } 247 248 return t; 249} 250 251void 252TLB::dumpAll() 253{ 254 MapIter i; 255 for (int x = 0; x < size; x++) { 256 if (tlb[x].valid) { 257 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n", 258 x, tlb[x].range.partitionId, tlb[x].range.contextId, 259 tlb[x].range.real ? 'R' : ' ', tlb[x].range.size, 260 tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte()); 261 } 262 } 263} 264 265void 266TLB::demapPage(Addr va, int partition_id, bool real, int context_id) 267{ 268 TlbRange tr; 269 MapIter i; 270 271 DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n", 272 va, partition_id, context_id, real); 273 274 cacheValid = false; 275 276 // Assemble full address structure 277 tr.va = va; 278 tr.size = MachineBytes; 279 tr.contextId = context_id; 280 tr.partitionId = partition_id; 281 tr.real = real; 282 283 // Demap any entry that conflicts 284 i = lookupTable.find(tr); 285 if (i != lookupTable.end()) { 286 DPRINTF(IPR, "TLB: Demapped page\n"); 287 i->second->valid = false; 288 if (i->second->used) { 289 i->second->used = false; 290 usedEntries--; 291 } 292 freeList.push_front(i->second); 293 lookupTable.erase(i); 294 } 295} 296 297void 298TLB::demapContext(int partition_id, int context_id) 299{ 300 int x; 301 DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n", 302 partition_id, context_id); 303 cacheValid = false; 304 for (x = 0; x < size; x++) { 305 if (tlb[x].range.contextId == context_id && 306 tlb[x].range.partitionId == partition_id) { 307 if (tlb[x].valid == true) { 308 freeList.push_front(&tlb[x]); 309 } 310 tlb[x].valid = false; 311 if (tlb[x].used) { 312 tlb[x].used = false; 313 usedEntries--; 314 } 315 lookupTable.erase(tlb[x].range); 316 } 317 } 318} 319 320void 321TLB::demapAll(int partition_id) 322{ 323 int x; 324 DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id); 325 cacheValid = false; 326 for (x = 0; x < size; x++) { 327 if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) { 328 if (tlb[x].valid == true){ 329 freeList.push_front(&tlb[x]); 330 } 331 tlb[x].valid = false; 332 if (tlb[x].used) { 333 tlb[x].used = false; 334 usedEntries--; 335 } 336 lookupTable.erase(tlb[x].range); 337 } 338 } 339} 340 341void 342TLB::invalidateAll() 343{ 344 int x; 345 cacheValid = false; 346 347 freeList.clear(); 348 lookupTable.clear(); 349 for (x = 0; x < size; x++) { 350 if (tlb[x].valid == true) 351 freeList.push_back(&tlb[x]); 352 tlb[x].valid = false; 353 tlb[x].used = false; 354 } 355 usedEntries = 0; 356} 357 358uint64_t 359TLB::TteRead(int entry) { 360 if (entry >= size) 361 panic("entry: %d\n", entry); 362 363 assert(entry < size); 364 if (tlb[entry].valid) 365 return tlb[entry].pte(); 366 else 367 return (uint64_t)-1ll; 368} 369 370uint64_t 371TLB::TagRead(int entry) { 372 assert(entry < size); 373 uint64_t tag; 374 if (!tlb[entry].valid) 375 return (uint64_t)-1ll; 376 377 tag = tlb[entry].range.contextId; 378 tag |= tlb[entry].range.va; 379 tag |= (uint64_t)tlb[entry].range.partitionId << 61; 380 tag |= tlb[entry].range.real ? ULL(1) << 60 : 0; 381 tag |= (uint64_t)~tlb[entry].pte._size() << 56; 382 return tag; 383} 384 385bool 386TLB::validVirtualAddress(Addr va, bool am) 387{ 388 if (am) 389 return true; 390 if (va >= StartVAddrHole && va <= EndVAddrHole) 391 return false; 392 return true; 393} 394 395void 396TLB::writeSfsr(ThreadContext *tc, int reg, bool write, ContextType ct, 397 bool se, FaultTypes ft, int asi) 398{ 399 uint64_t sfsr; 400 sfsr = tc->readMiscRegNoEffect(reg); 401 402 if (sfsr & 0x1) 403 sfsr = 0x3; 404 else 405 sfsr = 1; 406 407 if (write) 408 sfsr |= 1 << 2; 409 sfsr |= ct << 4; 410 if (se) 411 sfsr |= 1 << 6; 412 sfsr |= ft << 7; 413 sfsr |= asi << 16; 414 tc->setMiscReg(reg, sfsr); 415} 416 417void 418TLB::writeTagAccess(ThreadContext *tc, int reg, Addr va, int context) 419{ 420 DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n", 421 va, context, mbits(va, 63,13) | mbits(context,12,0)); 422 423 tc->setMiscReg(reg, mbits(va, 63,13) | mbits(context,12,0)); 424} 425 426void 427ITB::writeSfsr(ThreadContext *tc, bool write, ContextType ct, 428 bool se, FaultTypes ft, int asi) 429{ 430 DPRINTF(TLB, "TLB: ITB Fault: w=%d ct=%d ft=%d asi=%d\n", 431 (int)write, ct, ft, asi); 432 TLB::writeSfsr(tc, MISCREG_MMU_ITLB_SFSR, write, ct, se, ft, asi); 433} 434 435void 436ITB::writeTagAccess(ThreadContext *tc, Addr va, int context) 437{ 438 TLB::writeTagAccess(tc, MISCREG_MMU_ITLB_TAG_ACCESS, va, context); 439} 440 441void 442DTB::writeSfr(ThreadContext *tc, Addr a, bool write, ContextType ct, 443 bool se, FaultTypes ft, int asi) 444{ 445 DPRINTF(TLB, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n", 446 a, (int)write, ct, ft, asi); 447 TLB::writeSfsr(tc, MISCREG_MMU_DTLB_SFSR, write, ct, se, ft, asi); 448 tc->setMiscReg(MISCREG_MMU_DTLB_SFAR, a); 449} 450 451void 452DTB::writeTagAccess(ThreadContext *tc, Addr va, int context) 453{ 454 TLB::writeTagAccess(tc, MISCREG_MMU_DTLB_TAG_ACCESS, va, context); 455} 456 457 458 459Fault 460ITB::translate(RequestPtr &req, ThreadContext *tc) 461{ 462 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA); 463 464 Addr vaddr = req->getVaddr(); 465 TlbEntry *e; 466 467 assert(req->getAsi() == ASI_IMPLICIT); 468 469 DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n", 470 vaddr, req->getSize()); 471 472 // Be fast if we can! 473 if (cacheValid && cacheState == tlbdata) { 474 if (cacheEntry) { 475 if (cacheEntry->range.va < vaddr + sizeof(MachInst) && 476 cacheEntry->range.va + cacheEntry->range.size >= vaddr) { 477 req->setPaddr(cacheEntry->pte.paddr() & ~(cacheEntry->pte.size()-1) | 478 vaddr & cacheEntry->pte.size()-1 ); 479 return NoFault; 480 } 481 } else { 482 req->setPaddr(vaddr & PAddrImplMask); 483 return NoFault; 484 } 485 } 486 487 bool hpriv = bits(tlbdata,0,0); 488 bool red = bits(tlbdata,1,1); 489 bool priv = bits(tlbdata,2,2); 490 bool addr_mask = bits(tlbdata,3,3); 491 bool lsu_im = bits(tlbdata,4,4); 492 493 int part_id = bits(tlbdata,15,8); 494 int tl = bits(tlbdata,18,16); 495 int pri_context = bits(tlbdata,47,32); 496 int context; 497 ContextType ct; 498 int asi; 499 bool real = false; 500 501 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n", 502 priv, hpriv, red, lsu_im, part_id); 503 504 if (tl > 0) { 505 asi = ASI_N; 506 ct = Nucleus; 507 context = 0; 508 } else { 509 asi = ASI_P; 510 ct = Primary; 511 context = pri_context; 512 } 513 514 if ( hpriv || red ) { 515 cacheValid = true; 516 cacheState = tlbdata; 517 cacheEntry = NULL; 518 req->setPaddr(vaddr & PAddrImplMask); 519 return NoFault; 520 } 521 522 // If the access is unaligned trap 523 if (vaddr & 0x3) { 524 writeSfsr(tc, false, ct, false, OtherFault, asi); 525 return new MemAddressNotAligned; 526 } 527 528 if (addr_mask) 529 vaddr = vaddr & VAddrAMask; 530 531 if (!validVirtualAddress(vaddr, addr_mask)) { 532 writeSfsr(tc, false, ct, false, VaOutOfRange, asi); 533 return new InstructionAccessException; 534 } 535 536 if (!lsu_im) { 537 e = lookup(vaddr, part_id, true); 538 real = true; 539 context = 0; 540 } else { 541 e = lookup(vaddr, part_id, false, context); 542 } 543 544 if (e == NULL || !e->valid) { 545 writeTagAccess(tc, vaddr, context); 546 if (real) 547 return new InstructionRealTranslationMiss; 548 else 549 return new FastInstructionAccessMMUMiss; 550 } 551 552 // were not priviledged accesing priv page 553 if (!priv && e->pte.priv()) { 554 writeTagAccess(tc, vaddr, context); 555 writeSfsr(tc, false, ct, false, PrivViolation, asi); 556 return new InstructionAccessException; 557 } 558 559 // cache translation date for next translation 560 cacheValid = true; 561 cacheState = tlbdata; 562 cacheEntry = e; 563 564 req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) | 565 vaddr & e->pte.size()-1 ); 566 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr()); 567 return NoFault; 568} 569 570 571 572Fault 573DTB::translate(RequestPtr &req, ThreadContext *tc, bool write) 574{ 575 /* @todo this could really use some profiling and fixing to make it faster! */ 576 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA); 577 Addr vaddr = req->getVaddr(); 578 Addr size = req->getSize(); 579 ASI asi; 580 asi = (ASI)req->getAsi(); 581 bool implicit = false; 582 bool hpriv = bits(tlbdata,0,0); 583 584 DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n", 585 vaddr, size, asi); 586 587 if (lookupTable.size() != 64 - freeList.size()) 588 panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(), 589 freeList.size()); 590 if (asi == ASI_IMPLICIT) 591 implicit = true; 592 593 if (hpriv && implicit) { 594 req->setPaddr(vaddr & PAddrImplMask); 595 return NoFault; 596 } 597 598 // Be fast if we can! 599 if (cacheValid && cacheState == tlbdata) { 600 601 602 603 if (cacheEntry[0]) { 604 TlbEntry *ce = cacheEntry[0]; 605 Addr ce_va = ce->range.va; 606 if (cacheAsi[0] == asi && 607 ce_va < vaddr + size && ce_va + ce->range.size > vaddr && 608 (!write || ce->pte.writable())) { 609 req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask()); 610 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) 611 req->setFlags(req->getFlags() | UNCACHEABLE); 612 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr()); 613 return NoFault; 614 } // if matched 615 } // if cache entry valid 616 if (cacheEntry[1]) { 617 TlbEntry *ce = cacheEntry[1]; 618 Addr ce_va = ce->range.va; 619 if (cacheAsi[1] == asi && 620 ce_va < vaddr + size && ce_va + ce->range.size > vaddr && 621 (!write || ce->pte.writable())) { 622 req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask()); 623 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) 624 req->setFlags(req->getFlags() | UNCACHEABLE); 625 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr()); 626 return NoFault; 627 } // if matched 628 } // if cache entry valid 629 } 630 631 bool red = bits(tlbdata,1,1); 632 bool priv = bits(tlbdata,2,2); 633 bool addr_mask = bits(tlbdata,3,3); 634 bool lsu_dm = bits(tlbdata,5,5); 635 636 int part_id = bits(tlbdata,15,8); 637 int tl = bits(tlbdata,18,16); 638 int pri_context = bits(tlbdata,47,32); 639 int sec_context = bits(tlbdata,63,48); 640 641 bool real = false; 642 ContextType ct = Primary; 643 int context = 0; 644 645 TlbEntry *e; 646 647 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n", 648 priv, hpriv, red, lsu_dm, part_id); 649 650 if (implicit) { 651 if (tl > 0) { 652 asi = ASI_N; 653 ct = Nucleus; 654 context = 0; 655 } else { 656 asi = ASI_P; 657 ct = Primary; 658 context = pri_context; 659 } 660 } else { 661 // We need to check for priv level/asi priv 662 if (!priv && !hpriv && !AsiIsUnPriv(asi)) { 663 // It appears that context should be Nucleus in these cases? 664 writeSfr(tc, vaddr, write, Nucleus, false, IllegalAsi, asi); 665 return new PrivilegedAction; 666 } 667 668 if (!hpriv && AsiIsHPriv(asi)) { 669 writeSfr(tc, vaddr, write, Nucleus, false, IllegalAsi, asi); 670 return new DataAccessException; 671 } 672 673 if (AsiIsPrimary(asi)) { 674 context = pri_context; 675 ct = Primary; 676 } else if (AsiIsSecondary(asi)) { 677 context = sec_context; 678 ct = Secondary; 679 } else if (AsiIsNucleus(asi)) { 680 ct = Nucleus; 681 context = 0; 682 } else { // ???? 683 ct = Primary; 684 context = pri_context; 685 } 686 } 687 688 if (!implicit && asi != ASI_P && asi != ASI_S) { 689 if (AsiIsLittle(asi)) 690 panic("Little Endian ASIs not supported\n"); 691 692 //XXX It's unclear from looking at the documentation how a no fault 693 //load differs from a regular one, other than what happens concerning 694 //nfo and e bits in the TTE 695// if (AsiIsNoFault(asi)) 696// panic("No Fault ASIs not supported\n"); 697 698 if (AsiIsPartialStore(asi)) 699 panic("Partial Store ASIs not supported\n"); 700 701 if (AsiIsCmt(asi)) 702 panic("Cmt ASI registers not implmented\n"); 703 704 if (AsiIsInterrupt(asi)) 705 goto handleIntRegAccess; 706 if (AsiIsMmu(asi)) 707 goto handleMmuRegAccess; 708 if (AsiIsScratchPad(asi)) 709 goto handleScratchRegAccess; 710 if (AsiIsQueue(asi)) 711 goto handleQueueRegAccess; 712 if (AsiIsSparcError(asi)) 713 goto handleSparcErrorRegAccess; 714 715 if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) && 716 !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi)) 717 panic("Accessing ASI %#X. Should we?\n", asi); 718 } 719 720 // If the asi is unaligned trap 721 if (vaddr & size-1) { 722 writeSfr(tc, vaddr, false, ct, false, OtherFault, asi); 723 return new MemAddressNotAligned; 724 } 725 726 if (addr_mask) 727 vaddr = vaddr & VAddrAMask; 728 729 if (!validVirtualAddress(vaddr, addr_mask)) { 730 writeSfr(tc, vaddr, false, ct, true, VaOutOfRange, asi); 731 return new DataAccessException; 732 } 733 734 735 if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) { 736 real = true; 737 context = 0; 738 }; 739 740 if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) { 741 req->setPaddr(vaddr & PAddrImplMask); 742 return NoFault; 743 } 744 745 e = lookup(vaddr, part_id, real, context); 746 747 if (e == NULL || !e->valid) { 748 writeTagAccess(tc, vaddr, context); 749 DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n"); 750 if (real) 751 return new DataRealTranslationMiss; 752 else 753 return new FastDataAccessMMUMiss; 754 755 } 756 757 if (!priv && e->pte.priv()) { 758 writeTagAccess(tc, vaddr, context); 759 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi); 760 return new DataAccessException; 761 } 762 763 if (write && !e->pte.writable()) { 764 writeTagAccess(tc, vaddr, context); 765 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), OtherFault, asi); 766 return new FastDataAccessProtection; 767 } 768 769 if (e->pte.nofault() && !AsiIsNoFault(asi)) { 770 writeTagAccess(tc, vaddr, context); 771 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi); 772 return new DataAccessException; 773 } 774 775 if (e->pte.sideffect() && AsiIsNoFault(asi)) { 776 writeTagAccess(tc, vaddr, context); 777 writeSfr(tc, vaddr, write, ct, e->pte.sideffect(), SideEffect, asi); 778 return new DataAccessException; 779 } 780 781 782 if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1) 783 req->setFlags(req->getFlags() | UNCACHEABLE); 784 785 // cache translation date for next translation 786 cacheState = tlbdata; 787 if (!cacheValid) { 788 cacheEntry[1] = NULL; 789 cacheEntry[0] = NULL; 790 } 791 792 if (cacheEntry[0] != e && cacheEntry[1] != e) { 793 cacheEntry[1] = cacheEntry[0]; 794 cacheEntry[0] = e; 795 cacheAsi[1] = cacheAsi[0]; 796 cacheAsi[0] = asi; 797 if (implicit) 798 cacheAsi[0] = (ASI)0; 799 } 800 cacheValid = true; 801 req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) | 802 vaddr & e->pte.size()-1); 803 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr()); 804 return NoFault; 805 806 /** Normal flow ends here. */ 807handleIntRegAccess: 808 if (!hpriv) { 809 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); 810 if (priv) 811 return new DataAccessException; 812 else 813 return new PrivilegedAction; 814 } 815 816 if (asi == ASI_SWVR_UDB_INTR_W && !write || 817 asi == ASI_SWVR_UDB_INTR_R && write) { 818 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); 819 return new DataAccessException; 820 } 821 822 goto regAccessOk; 823 824 825handleScratchRegAccess: 826 if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) { 827 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); 828 return new DataAccessException; 829 } 830 goto regAccessOk; 831 832handleQueueRegAccess: 833 if (!priv && !hpriv) { 834 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); 835 return new PrivilegedAction; 836 } 837 if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) { 838 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); 839 return new DataAccessException; 840 } 841 goto regAccessOk; 842 843handleSparcErrorRegAccess: 844 if (!hpriv) { 845 writeSfr(tc, vaddr, write, Primary, true, IllegalAsi, asi); 846 if (priv) 847 return new DataAccessException; 848 else 849 return new PrivilegedAction; 850 } 851 goto regAccessOk; 852 853 854regAccessOk: 855handleMmuRegAccess: 856 DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n"); 857 req->setMmapedIpr(true); 858 req->setPaddr(req->getVaddr()); 859 return NoFault; 860}; 861 862Tick 863DTB::doMmuRegRead(ThreadContext *tc, Packet *pkt) 864{ 865 Addr va = pkt->getAddr(); 866 ASI asi = (ASI)pkt->req->getAsi(); 867 uint64_t temp; 868 869 DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n", 870 (uint32_t)pkt->req->getAsi(), pkt->getAddr()); 871 872 switch (asi) { 873 case ASI_LSU_CONTROL_REG: 874 assert(va == 0); 875 pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL)); 876 break; 877 case ASI_MMU: 878 switch (va) { 879 case 0x8: 880 pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT)); 881 break; 882 case 0x10: 883 pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT)); 884 break; 885 default: 886 goto doMmuReadError; 887 } 888 break; 889 case ASI_QUEUE: 890 pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD + 891 (va >> 4) - 0x3c)); 892 break; 893 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0: 894 assert(va == 0); 895 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS0)); 896 break; 897 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1: 898 assert(va == 0); 899 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS1)); 900 break; 901 case ASI_DMMU_CTXT_ZERO_CONFIG: 902 assert(va == 0); 903 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_C0_CONFIG)); 904 break; 905 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0: 906 assert(va == 0); 907 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS0)); 908 break; 909 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1: 910 assert(va == 0); 911 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS1)); 912 break; 913 case ASI_IMMU_CTXT_ZERO_CONFIG: 914 assert(va == 0); 915 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_C0_CONFIG)); 916 break; 917 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0: 918 assert(va == 0); 919 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS0)); 920 break; 921 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1: 922 assert(va == 0); 923 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS1)); 924 break; 925 case ASI_DMMU_CTXT_NONZERO_CONFIG: 926 assert(va == 0); 927 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_CX_CONFIG)); 928 break; 929 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0: 930 assert(va == 0); 931 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS0)); 932 break; 933 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1: 934 assert(va == 0); 935 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS1)); 936 break; 937 case ASI_IMMU_CTXT_NONZERO_CONFIG: 938 assert(va == 0); 939 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_CX_CONFIG)); 940 break; 941 case ASI_SPARC_ERROR_STATUS_REG: 942 pkt->set((uint64_t)0); 943 break; 944 case ASI_HYP_SCRATCHPAD: 945 case ASI_SCRATCHPAD: 946 pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3))); 947 break; 948 case ASI_IMMU: 949 switch (va) { 950 case 0x0: 951 temp = tc->readMiscReg(MISCREG_MMU_ITLB_TAG_ACCESS); 952 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48); 953 break; 954 case 0x18: 955 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_SFSR)); 956 break; 957 case 0x30: 958 pkt->set(tc->readMiscReg(MISCREG_MMU_ITLB_TAG_ACCESS)); 959 break; 960 default: 961 goto doMmuReadError; 962 } 963 break; 964 case ASI_DMMU: 965 switch (va) { 966 case 0x0: 967 temp = tc->readMiscReg(MISCREG_MMU_DTLB_TAG_ACCESS); 968 pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48); 969 break; 970 case 0x18: 971 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_SFSR)); 972 break; 973 case 0x20: 974 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_SFAR)); 975 break; 976 case 0x30: 977 pkt->set(tc->readMiscReg(MISCREG_MMU_DTLB_TAG_ACCESS)); 978 break; 979 case 0x80: 980 pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID)); 981 break; 982 default: 983 goto doMmuReadError; 984 } 985 break; 986 case ASI_DMMU_TSB_PS0_PTR_REG: 987 pkt->set(MakeTsbPtr(Ps0, 988 tc->readMiscReg(MISCREG_MMU_DTLB_TAG_ACCESS), 989 tc->readMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS0), 990 tc->readMiscReg(MISCREG_MMU_DTLB_C0_CONFIG), 991 tc->readMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS0), 992 tc->readMiscReg(MISCREG_MMU_DTLB_CX_CONFIG))); 993 break; 994 case ASI_DMMU_TSB_PS1_PTR_REG: 995 pkt->set(MakeTsbPtr(Ps1, 996 tc->readMiscReg(MISCREG_MMU_DTLB_TAG_ACCESS), 997 tc->readMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS1), 998 tc->readMiscReg(MISCREG_MMU_DTLB_C0_CONFIG), 999 tc->readMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS1), 1000 tc->readMiscReg(MISCREG_MMU_DTLB_CX_CONFIG))); 1001 break; 1002 case ASI_IMMU_TSB_PS0_PTR_REG: 1003 pkt->set(MakeTsbPtr(Ps0, 1004 tc->readMiscReg(MISCREG_MMU_ITLB_TAG_ACCESS), 1005 tc->readMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS0), 1006 tc->readMiscReg(MISCREG_MMU_ITLB_C0_CONFIG), 1007 tc->readMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS0), 1008 tc->readMiscReg(MISCREG_MMU_ITLB_CX_CONFIG))); 1009 break; 1010 case ASI_IMMU_TSB_PS1_PTR_REG: 1011 pkt->set(MakeTsbPtr(Ps1, 1012 tc->readMiscReg(MISCREG_MMU_ITLB_TAG_ACCESS), 1013 tc->readMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS1), 1014 tc->readMiscReg(MISCREG_MMU_ITLB_C0_CONFIG), 1015 tc->readMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS1), 1016 tc->readMiscReg(MISCREG_MMU_ITLB_CX_CONFIG))); 1017 break; 1018 case ASI_SWVR_INTR_RECEIVE: 1019 pkt->set(tc->getCpuPtr()->get_interrupts(IT_INT_VEC)); 1020 break; 1021 case ASI_SWVR_UDB_INTR_R: 1022 temp = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC)); 1023 tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, temp); 1024 pkt->set(temp); 1025 break; 1026 default: 1027doMmuReadError: 1028 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n", 1029 (uint32_t)asi, va); 1030 } 1031 pkt->makeAtomicResponse(); 1032 return tc->getCpuPtr()->cycles(1); 1033} 1034 1035Tick 1036DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt) 1037{ 1038 uint64_t data = gtoh(pkt->get<uint64_t>()); 1039 Addr va = pkt->getAddr(); 1040 ASI asi = (ASI)pkt->req->getAsi(); 1041 1042 Addr ta_insert; 1043 Addr va_insert; 1044 Addr ct_insert; 1045 int part_insert; 1046 int entry_insert = -1; 1047 bool real_insert; 1048 bool ignore; 1049 int part_id; 1050 int ctx_id; 1051 PageTableEntry pte; 1052 1053 DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n", 1054 (uint32_t)asi, va, data); 1055 1056 switch (asi) { 1057 case ASI_LSU_CONTROL_REG: 1058 assert(va == 0); 1059 tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data); 1060 break; 1061 case ASI_MMU: 1062 switch (va) { 1063 case 0x8: 1064 tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data); 1065 break; 1066 case 0x10: 1067 tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data); 1068 break; 1069 default: 1070 goto doMmuWriteError; 1071 } 1072 break; 1073 case ASI_QUEUE: 1074 assert(mbits(data,13,6) == data); 1075 tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD + 1076 (va >> 4) - 0x3c, data); 1077 break; 1078 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0: 1079 assert(va == 0); 1080 tc->setMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS0, data); 1081 break; 1082 case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1: 1083 assert(va == 0); 1084 tc->setMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS1, data); 1085 break; 1086 case ASI_DMMU_CTXT_ZERO_CONFIG: 1087 assert(va == 0); 1088 tc->setMiscReg(MISCREG_MMU_DTLB_C0_CONFIG, data); 1089 break; 1090 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0: 1091 assert(va == 0); 1092 tc->setMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS0, data); 1093 break; 1094 case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1: 1095 assert(va == 0); 1096 tc->setMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS1, data); 1097 break; 1098 case ASI_IMMU_CTXT_ZERO_CONFIG: 1099 assert(va == 0); 1100 tc->setMiscReg(MISCREG_MMU_ITLB_C0_CONFIG, data); 1101 break; 1102 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0: 1103 assert(va == 0); 1104 tc->setMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS0, data); 1105 break; 1106 case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1: 1107 assert(va == 0); 1108 tc->setMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS1, data); 1109 break; 1110 case ASI_DMMU_CTXT_NONZERO_CONFIG: 1111 assert(va == 0); 1112 tc->setMiscReg(MISCREG_MMU_DTLB_CX_CONFIG, data); 1113 break; 1114 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0: 1115 assert(va == 0); 1116 tc->setMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS0, data); 1117 break; 1118 case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1: 1119 assert(va == 0); 1120 tc->setMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS1, data); 1121 break; 1122 case ASI_IMMU_CTXT_NONZERO_CONFIG: 1123 assert(va == 0); 1124 tc->setMiscReg(MISCREG_MMU_ITLB_CX_CONFIG, data); 1125 break; 1126 case ASI_SPARC_ERROR_EN_REG: 1127 case ASI_SPARC_ERROR_STATUS_REG: 1128 warn("Ignoring write to SPARC ERROR regsiter\n"); 1129 break; 1130 case ASI_HYP_SCRATCHPAD: 1131 case ASI_SCRATCHPAD: 1132 tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data); 1133 break; 1134 case ASI_IMMU: 1135 switch (va) { 1136 case 0x18: 1137 tc->setMiscReg(MISCREG_MMU_ITLB_SFSR, data); 1138 break; 1139 case 0x30: 1140 sext<59>(bits(data, 59,0)); 1141 tc->setMiscReg(MISCREG_MMU_ITLB_TAG_ACCESS, data); 1142 break; 1143 default: 1144 goto doMmuWriteError; 1145 } 1146 break; 1147 case ASI_ITLB_DATA_ACCESS_REG: 1148 entry_insert = bits(va, 8,3); 1149 case ASI_ITLB_DATA_IN_REG: 1150 assert(entry_insert != -1 || mbits(va,10,9) == va); 1151 ta_insert = tc->readMiscReg(MISCREG_MMU_ITLB_TAG_ACCESS); 1152 va_insert = mbits(ta_insert, 63,13); 1153 ct_insert = mbits(ta_insert, 12,0); 1154 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID); 1155 real_insert = bits(va, 9,9); 1156 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v : 1157 PageTableEntry::sun4u); 1158 tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert, 1159 pte, entry_insert); 1160 break; 1161 case ASI_DTLB_DATA_ACCESS_REG: 1162 entry_insert = bits(va, 8,3); 1163 case ASI_DTLB_DATA_IN_REG: 1164 assert(entry_insert != -1 || mbits(va,10,9) == va); 1165 ta_insert = tc->readMiscReg(MISCREG_MMU_DTLB_TAG_ACCESS); 1166 va_insert = mbits(ta_insert, 63,13); 1167 ct_insert = mbits(ta_insert, 12,0); 1168 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID); 1169 real_insert = bits(va, 9,9); 1170 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v : 1171 PageTableEntry::sun4u); 1172 insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert); 1173 break; 1174 case ASI_IMMU_DEMAP: 1175 ignore = false; 1176 ctx_id = -1; 1177 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID); 1178 switch (bits(va,5,4)) { 1179 case 0: 1180 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT); 1181 break; 1182 case 1: 1183 ignore = true; 1184 break; 1185 case 3: 1186 ctx_id = 0; 1187 break; 1188 default: 1189 ignore = true; 1190 } 1191 1192 switch(bits(va,7,6)) { 1193 case 0: // demap page 1194 if (!ignore) 1195 tc->getITBPtr()->demapPage(mbits(va,63,13), part_id, 1196 bits(va,9,9), ctx_id); 1197 break; 1198 case 1: //demap context 1199 if (!ignore) 1200 tc->getITBPtr()->demapContext(part_id, ctx_id); 1201 break; 1202 case 2: 1203 tc->getITBPtr()->demapAll(part_id); 1204 break; 1205 default: 1206 panic("Invalid type for IMMU demap\n"); 1207 } 1208 break; 1209 case ASI_DMMU: 1210 switch (va) { 1211 case 0x18: 1212 tc->setMiscReg(MISCREG_MMU_DTLB_SFSR, data); 1213 break; 1214 case 0x30: 1215 sext<59>(bits(data, 59,0)); 1216 tc->setMiscReg(MISCREG_MMU_DTLB_TAG_ACCESS, data); 1217 break; 1218 case 0x80: 1219 tc->setMiscReg(MISCREG_MMU_PART_ID, data); 1220 break; 1221 default: 1222 goto doMmuWriteError; 1223 } 1224 break; 1225 case ASI_DMMU_DEMAP: 1226 ignore = false; 1227 ctx_id = -1; 1228 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID); 1229 switch (bits(va,5,4)) { 1230 case 0: 1231 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT); 1232 break; 1233 case 1: 1234 ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT); 1235 break; 1236 case 3: 1237 ctx_id = 0; 1238 break; 1239 default: 1240 ignore = true; 1241 } 1242 1243 switch(bits(va,7,6)) { 1244 case 0: // demap page 1245 if (!ignore) 1246 demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id); 1247 break; 1248 case 1: //demap context 1249 if (!ignore) 1250 demapContext(part_id, ctx_id); 1251 break; 1252 case 2: 1253 demapAll(part_id); 1254 break; 1255 default: 1256 panic("Invalid type for IMMU demap\n"); 1257 } 1258 break; 1259 case ASI_SWVR_INTR_RECEIVE: 1260 int msb; 1261 // clear all the interrupts that aren't set in the write 1262 while(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data) { 1263 msb = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data); 1264 tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, msb); 1265 } 1266 break; 1267 case ASI_SWVR_UDB_INTR_W: 1268 tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()-> 1269 post_interrupt(bits(data,5,0),0); 1270 break; 1271 default: 1272doMmuWriteError: 1273 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n", 1274 (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data); 1275 } 1276 pkt->makeAtomicResponse(); 1277 return tc->getCpuPtr()->cycles(1); 1278} 1279 1280void 1281DTB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs) 1282{ 1283 uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0); 1284 ptrs[0] = MakeTsbPtr(Ps0, tag_access, 1285 tc->readMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS0), 1286 tc->readMiscReg(MISCREG_MMU_DTLB_C0_CONFIG), 1287 tc->readMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS0), 1288 tc->readMiscReg(MISCREG_MMU_DTLB_CX_CONFIG)); 1289 ptrs[1] = MakeTsbPtr(Ps1, tag_access, 1290 tc->readMiscReg(MISCREG_MMU_DTLB_C0_TSB_PS1), 1291 tc->readMiscReg(MISCREG_MMU_DTLB_C0_CONFIG), 1292 tc->readMiscReg(MISCREG_MMU_DTLB_CX_TSB_PS1), 1293 tc->readMiscReg(MISCREG_MMU_DTLB_CX_CONFIG)); 1294 ptrs[2] = MakeTsbPtr(Ps0, tag_access, 1295 tc->readMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS0), 1296 tc->readMiscReg(MISCREG_MMU_ITLB_C0_CONFIG), 1297 tc->readMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS0), 1298 tc->readMiscReg(MISCREG_MMU_ITLB_CX_CONFIG)); 1299 ptrs[3] = MakeTsbPtr(Ps1, tag_access, 1300 tc->readMiscReg(MISCREG_MMU_ITLB_C0_TSB_PS1), 1301 tc->readMiscReg(MISCREG_MMU_ITLB_C0_CONFIG), 1302 tc->readMiscReg(MISCREG_MMU_ITLB_CX_TSB_PS1), 1303 tc->readMiscReg(MISCREG_MMU_ITLB_CX_CONFIG)); 1304} 1305 1306 1307 1308 1309 1310uint64_t 1311DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb, 1312 uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config) 1313{ 1314 uint64_t tsb; 1315 uint64_t config; 1316 1317 if (bits(tag_access, 12,0) == 0) { 1318 tsb = c0_tsb; 1319 config = c0_config; 1320 } else { 1321 tsb = cX_tsb; 1322 config = cX_config; 1323 } 1324 1325 uint64_t ptr = mbits(tsb,63,13); 1326 bool split = bits(tsb,12,12); 1327 int tsb_size = bits(tsb,3,0); 1328 int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8); 1329 1330 if (ps == Ps1 && split) 1331 ptr |= ULL(1) << (13 + tsb_size); 1332 ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4); 1333 1334 return ptr; 1335} 1336 1337 1338void 1339TLB::serialize(std::ostream &os) 1340{ 1341 SERIALIZE_SCALAR(size); 1342 SERIALIZE_SCALAR(usedEntries); 1343 SERIALIZE_SCALAR(lastReplaced); 1344 1345 // convert the pointer based free list into an index based one 1346 int *free_list = (int*)malloc(sizeof(int) * size); 1347 int cntr = 0; 1348 std::list<TlbEntry*>::iterator i; 1349 i = freeList.begin(); 1350 while (i != freeList.end()) { 1351 free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry); 1352 i++; 1353 } 1354 SERIALIZE_SCALAR(cntr); 1355 SERIALIZE_ARRAY(free_list, cntr); 1356 1357 for (int x = 0; x < size; x++) { 1358 nameOut(os, csprintf("%s.PTE%d", name(), x)); 1359 tlb[x].serialize(os); 1360 } 1361} 1362 1363void 1364TLB::unserialize(Checkpoint *cp, const std::string §ion) 1365{ 1366 int oldSize; 1367 1368 paramIn(cp, section, "size", oldSize); 1369 if (oldSize != size) 1370 panic("Don't support unserializing different sized TLBs\n"); 1371 UNSERIALIZE_SCALAR(usedEntries); 1372 UNSERIALIZE_SCALAR(lastReplaced); 1373 1374 int cntr; 1375 UNSERIALIZE_SCALAR(cntr); 1376 1377 int *free_list = (int*)malloc(sizeof(int) * cntr); 1378 freeList.clear(); 1379 UNSERIALIZE_ARRAY(free_list, cntr); 1380 for (int x = 0; x < cntr; x++) 1381 freeList.push_back(&tlb[free_list[x]]); 1382 1383 lookupTable.clear(); 1384 for (int x = 0; x < size; x++) { 1385 tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x)); 1386 if (tlb[x].valid) 1387 lookupTable.insert(tlb[x].range, &tlb[x]); 1388 1389 } 1390} 1391 1392/* end namespace SparcISA */ } 1393 1394SparcISA::ITB * 1395SparcITBParams::create() 1396{ 1397 return new SparcISA::ITB(name, size); 1398} 1399 1400SparcISA::DTB * 1401SparcDTBParams::create() 1402{ 1403 return new SparcISA::DTB(name, size); 1404} 1405