tlb.cc revision 5894
16145Snate@binkert.org/*
26145Snate@binkert.org * Copyright (c) 2001-2005 The Regents of The University of Michigan
36145Snate@binkert.org * All rights reserved.
46145Snate@binkert.org *
56145Snate@binkert.org * Redistribution and use in source and binary forms, with or without
66145Snate@binkert.org * modification, are permitted provided that the following conditions are
76145Snate@binkert.org * met: redistributions of source code must retain the above copyright
86145Snate@binkert.org * notice, this list of conditions and the following disclaimer;
96145Snate@binkert.org * redistributions in binary form must reproduce the above copyright
106145Snate@binkert.org * notice, this list of conditions and the following disclaimer in the
116145Snate@binkert.org * documentation and/or other materials provided with the distribution;
126145Snate@binkert.org * neither the name of the copyright holders nor the names of its
136145Snate@binkert.org * contributors may be used to endorse or promote products derived from
146145Snate@binkert.org * this software without specific prior written permission.
156145Snate@binkert.org *
166145Snate@binkert.org * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
176145Snate@binkert.org * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
186145Snate@binkert.org * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
196145Snate@binkert.org * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
206145Snate@binkert.org * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
216145Snate@binkert.org * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
226145Snate@binkert.org * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
236145Snate@binkert.org * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
246145Snate@binkert.org * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
256145Snate@binkert.org * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
266145Snate@binkert.org * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
276145Snate@binkert.org *
286145Snate@binkert.org * Authors: Ali Saidi
297832Snate@binkert.org */
307547SBrad.Beckmann@amd.com
317547SBrad.Beckmann@amd.com#include <cstring>
327454Snate@binkert.org
337054Snate@binkert.org#include "arch/sparc/asi.hh"
347054Snate@binkert.org#include "arch/sparc/miscregfile.hh"
357054Snate@binkert.org#include "arch/sparc/tlb.hh"
367054Snate@binkert.org#include "base/bitfield.hh"
377054Snate@binkert.org#include "base/trace.hh"
386154Snate@binkert.org#include "cpu/thread_context.hh"
397054Snate@binkert.org#include "cpu/base.hh"
407547SBrad.Beckmann@amd.com#include "mem/packet_access.hh"
418255SBrad.Beckmann@amd.com#include "mem/request.hh"
426154Snate@binkert.org#include "sim/system.hh"
436154Snate@binkert.org
446145Snate@binkert.org/* @todo remove some of the magic constants.  -- ali
457055Snate@binkert.org * */
467454Snate@binkert.orgnamespace SparcISA {
477055Snate@binkert.org
487054Snate@binkert.orgTLB::TLB(const Params *p)
496284Snate@binkert.org    : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
506145Snate@binkert.org      cacheValid(false)
516145Snate@binkert.org{
526145Snate@binkert.org    // To make this work you'll have to change the hypervisor and OS
537054Snate@binkert.org    if (size > 64)
547054Snate@binkert.org        fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
556145Snate@binkert.org
567054Snate@binkert.org    tlb = new TlbEntry[size];
576145Snate@binkert.org    std::memset(tlb, 0, sizeof(TlbEntry) * size);
587054Snate@binkert.org
596145Snate@binkert.org    for (int x = 0; x < size; x++)
606876Ssteve.reinhardt@amd.com        freeList.push_back(&tlb[x]);
616876Ssteve.reinhardt@amd.com
626285Snate@binkert.org    c0_tsb_ps0 = 0;
637054Snate@binkert.org    c0_tsb_ps1 = 0;
647054Snate@binkert.org    c0_config = 0;
657054Snate@binkert.org    cx_tsb_ps0 = 0;
666285Snate@binkert.org    cx_tsb_ps1 = 0;
677454Snate@binkert.org    cx_config = 0;
686285Snate@binkert.org    sfsr = 0;
697454Snate@binkert.org    tag_access = 0;
707454Snate@binkert.org}
717054Snate@binkert.org
727054Snate@binkert.orgvoid
737054Snate@binkert.orgTLB::clearUsedBits()
746285Snate@binkert.org{
757054Snate@binkert.org    MapIter i;
767054Snate@binkert.org    for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
777454Snate@binkert.org        TlbEntry *t = i->second;
787454Snate@binkert.org        if (!t->pte.locked()) {
797054Snate@binkert.org            t->used = false;
807454Snate@binkert.org            usedEntries--;
817454Snate@binkert.org        }
827054Snate@binkert.org    }
837056Snate@binkert.org}
847056Snate@binkert.org
857056Snate@binkert.org
867056Snate@binkert.orgvoid
877054Snate@binkert.orgTLB::insert(Addr va, int partition_id, int context_id, bool real,
887054Snate@binkert.org        const PageTableEntry& PTE, int entry)
896881SBrad.Beckmann@amd.com{
906285Snate@binkert.org    MapIter i;
917054Snate@binkert.org    TlbEntry *new_entry = NULL;
927054Snate@binkert.org//    TlbRange tr;
936881SBrad.Beckmann@amd.com    int x;
947054Snate@binkert.org
956881SBrad.Beckmann@amd.com    cacheValid = false;
967054Snate@binkert.org    va &= ~(PTE.size()-1);
977054Snate@binkert.org /*   tr.va = va;
987054Snate@binkert.org    tr.size = PTE.size() - 1;
997054Snate@binkert.org    tr.contextId = context_id;
1007054Snate@binkert.org    tr.partitionId = partition_id;
1017454Snate@binkert.org    tr.real = real;
1027054Snate@binkert.org*/
1036881SBrad.Beckmann@amd.com
1047054Snate@binkert.org    DPRINTF(TLB,
1057054Snate@binkert.org        "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
1066285Snate@binkert.org        va, PTE.paddr(), partition_id, context_id, (int)real, entry);
1076145Snate@binkert.org
1087054Snate@binkert.org    // Demap any entry that conflicts
1097054Snate@binkert.org    for (x = 0; x < size; x++) {
1106145Snate@binkert.org        if (tlb[x].range.real == real &&
1117054Snate@binkert.org            tlb[x].range.partitionId == partition_id &&
1127054Snate@binkert.org            tlb[x].range.va < va + PTE.size() - 1 &&
1137054Snate@binkert.org            tlb[x].range.va + tlb[x].range.size >= va &&
1147054Snate@binkert.org            (real || tlb[x].range.contextId == context_id ))
1157054Snate@binkert.org        {
1166145Snate@binkert.org            if (tlb[x].valid) {
1176145Snate@binkert.org                freeList.push_front(&tlb[x]);
1187054Snate@binkert.org                DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
1197054Snate@binkert.org
1207054Snate@binkert.org                tlb[x].valid = false;
1216145Snate@binkert.org                if (tlb[x].used) {
1226145Snate@binkert.org                    tlb[x].used = false;
1236145Snate@binkert.org                    usedEntries--;
1246145Snate@binkert.org                }
1257054Snate@binkert.org                lookupTable.erase(tlb[x].range);
1267454Snate@binkert.org            }
1277454Snate@binkert.org        }
1287054Snate@binkert.org    }
1297454Snate@binkert.org
1307454Snate@binkert.org/*
1317054Snate@binkert.org    i = lookupTable.find(tr);
1326145Snate@binkert.org    if (i != lookupTable.end()) {
1336145Snate@binkert.org        i->second->valid = false;
1346145Snate@binkert.org        if (i->second->used) {
1357054Snate@binkert.org            i->second->used = false;
1367054Snate@binkert.org            usedEntries--;
1377054Snate@binkert.org        }
1387054Snate@binkert.org        freeList.push_front(i->second);
1396145Snate@binkert.org        DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
1407054Snate@binkert.org                i->second);
1417054Snate@binkert.org        lookupTable.erase(i);
1427054Snate@binkert.org    }
1437054Snate@binkert.org*/
1447054Snate@binkert.org
1457054Snate@binkert.org    if (entry != -1) {
1467054Snate@binkert.org        assert(entry < size && entry >= 0);
1477054Snate@binkert.org        new_entry = &tlb[entry];
1487054Snate@binkert.org    } else {
1497054Snate@binkert.org        if (!freeList.empty()) {
1507054Snate@binkert.org            new_entry = freeList.front();
1516145Snate@binkert.org        } else {
1526145Snate@binkert.org            x = lastReplaced;
1536145Snate@binkert.org            do {
1546145Snate@binkert.org                ++x;
1557054Snate@binkert.org                if (x == size)
1567054Snate@binkert.org                    x = 0;
1577054Snate@binkert.org                if (x == lastReplaced)
1587054Snate@binkert.org                    goto insertAllLocked;
1596145Snate@binkert.org            } while (tlb[x].pte.locked());
1607054Snate@binkert.org            lastReplaced = x;
1617054Snate@binkert.org            new_entry = &tlb[x];
1627054Snate@binkert.org        }
1637054Snate@binkert.org        /*
1647054Snate@binkert.org        for (x = 0; x < size; x++) {
1657054Snate@binkert.org            if (!tlb[x].valid || !tlb[x].used)  {
1666145Snate@binkert.org                new_entry = &tlb[x];
1676145Snate@binkert.org                break;
1686145Snate@binkert.org            }
1696145Snate@binkert.org        }*/
1707054Snate@binkert.org    }
1717054Snate@binkert.org
1727054Snate@binkert.orginsertAllLocked:
1737054Snate@binkert.org    // Update the last ently if their all locked
1746145Snate@binkert.org    if (!new_entry) {
1757054Snate@binkert.org        new_entry = &tlb[size-1];
1767054Snate@binkert.org    }
1777054Snate@binkert.org
1787054Snate@binkert.org    freeList.remove(new_entry);
1797054Snate@binkert.org    if (new_entry->valid && new_entry->used)
1806145Snate@binkert.org        usedEntries--;
1817454Snate@binkert.org    if (new_entry->valid)
1826145Snate@binkert.org        lookupTable.erase(new_entry->range);
1837054Snate@binkert.org
1847054Snate@binkert.org
1857054Snate@binkert.org    assert(PTE.valid());
1867054Snate@binkert.org    new_entry->range.va = va;
1877454Snate@binkert.org    new_entry->range.size = PTE.size() - 1;
1887054Snate@binkert.org    new_entry->range.partitionId = partition_id;
1897454Snate@binkert.org    new_entry->range.contextId = context_id;
1907054Snate@binkert.org    new_entry->range.real = real;
1917454Snate@binkert.org    new_entry->pte = PTE;
1927054Snate@binkert.org    new_entry->used = true;;
1937054Snate@binkert.org    new_entry->valid = true;
1947054Snate@binkert.org    usedEntries++;
1957054Snate@binkert.org
1967054Snate@binkert.org    i = lookupTable.insert(new_entry->range, new_entry);
1977054Snate@binkert.org    assert(i != lookupTable.end());
1987054Snate@binkert.org
1997054Snate@binkert.org    // If all entries have their used bit set, clear it on them all,
2007054Snate@binkert.org    // but the one we just inserted
2017054Snate@binkert.org    if (usedEntries == size) {
2027832Snate@binkert.org        clearUsedBits();
2037832Snate@binkert.org        new_entry->used = true;
2047054Snate@binkert.org        usedEntries++;
2057054Snate@binkert.org    }
2067054Snate@binkert.org}
2077054Snate@binkert.org
2087054Snate@binkert.org
2097054Snate@binkert.orgTlbEntry*
2107054Snate@binkert.orgTLB::lookup(Addr va, int partition_id, bool real, int context_id,
2117054Snate@binkert.org            bool update_used)
2127054Snate@binkert.org{
2137054Snate@binkert.org    MapIter i;
2147054Snate@binkert.org    TlbRange tr;
2157054Snate@binkert.org    TlbEntry *t;
2167054Snate@binkert.org
2177054Snate@binkert.org    DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
2187054Snate@binkert.org            va, partition_id, context_id, real);
2197054Snate@binkert.org    // Assemble full address structure
2207054Snate@binkert.org    tr.va = va;
2217054Snate@binkert.org    tr.size = 1;
2227054Snate@binkert.org    tr.contextId = context_id;
2237054Snate@binkert.org    tr.partitionId = partition_id;
2247054Snate@binkert.org    tr.real = real;
2257454Snate@binkert.org
2267054Snate@binkert.org    // Try to find the entry
2277054Snate@binkert.org    i = lookupTable.find(tr);
2287054Snate@binkert.org    if (i == lookupTable.end()) {
2297054Snate@binkert.org        DPRINTF(TLB, "TLB: No valid entry found\n");
2307054Snate@binkert.org        return NULL;
2317054Snate@binkert.org    }
2327054Snate@binkert.org
2337054Snate@binkert.org    // Mark the entries used bit and clear other used bits in needed
2347054Snate@binkert.org    t = i->second;
2357054Snate@binkert.org    DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
2367054Snate@binkert.org            t->pte.size());
2377054Snate@binkert.org
2387054Snate@binkert.org    // Update the used bits only if this is a real access (not a fake
2397054Snate@binkert.org    // one from virttophys()
2407054Snate@binkert.org    if (!t->used && update_used) {
2417547SBrad.Beckmann@amd.com        t->used = true;
2427547SBrad.Beckmann@amd.com        usedEntries++;
2437547SBrad.Beckmann@amd.com        if (usedEntries == size) {
2447547SBrad.Beckmann@amd.com            clearUsedBits();
2457547SBrad.Beckmann@amd.com            t->used = true;
2467547SBrad.Beckmann@amd.com            usedEntries++;
2477547SBrad.Beckmann@amd.com        }
2487547SBrad.Beckmann@amd.com    }
2497547SBrad.Beckmann@amd.com
2507547SBrad.Beckmann@amd.com    return t;
2517547SBrad.Beckmann@amd.com}
2527547SBrad.Beckmann@amd.com
2537547SBrad.Beckmann@amd.comvoid
2547547SBrad.Beckmann@amd.comTLB::dumpAll()
2557547SBrad.Beckmann@amd.com{
2567547SBrad.Beckmann@amd.com    MapIter i;
2577547SBrad.Beckmann@amd.com    for (int x = 0; x < size; x++) {
2587547SBrad.Beckmann@amd.com        if (tlb[x].valid) {
2597547SBrad.Beckmann@amd.com           DPRINTFN("%4d:  %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
2607547SBrad.Beckmann@amd.com                   x, tlb[x].range.partitionId, tlb[x].range.contextId,
2617547SBrad.Beckmann@amd.com                   tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
2627547SBrad.Beckmann@amd.com                   tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
2637547SBrad.Beckmann@amd.com        }
2647547SBrad.Beckmann@amd.com    }
2657547SBrad.Beckmann@amd.com}
2667547SBrad.Beckmann@amd.com
2677547SBrad.Beckmann@amd.comvoid
2687547SBrad.Beckmann@amd.comTLB::demapPage(Addr va, int partition_id, bool real, int context_id)
2697547SBrad.Beckmann@amd.com{
2707547SBrad.Beckmann@amd.com    TlbRange tr;
2717547SBrad.Beckmann@amd.com    MapIter i;
2727547SBrad.Beckmann@amd.com
2737547SBrad.Beckmann@amd.com    DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
2747547SBrad.Beckmann@amd.com            va, partition_id, context_id, real);
2757547SBrad.Beckmann@amd.com
2767547SBrad.Beckmann@amd.com    cacheValid = false;
2777547SBrad.Beckmann@amd.com
2787547SBrad.Beckmann@amd.com    // Assemble full address structure
2797547SBrad.Beckmann@amd.com    tr.va = va;
2807547SBrad.Beckmann@amd.com    tr.size = 1;
2817547SBrad.Beckmann@amd.com    tr.contextId = context_id;
2827547SBrad.Beckmann@amd.com    tr.partitionId = partition_id;
2837547SBrad.Beckmann@amd.com    tr.real = real;
2847547SBrad.Beckmann@amd.com
2857547SBrad.Beckmann@amd.com    // Demap any entry that conflicts
2867547SBrad.Beckmann@amd.com    i = lookupTable.find(tr);
2877547SBrad.Beckmann@amd.com    if (i != lookupTable.end()) {
2887547SBrad.Beckmann@amd.com        DPRINTF(IPR, "TLB: Demapped page\n");
2897547SBrad.Beckmann@amd.com        i->second->valid = false;
2907547SBrad.Beckmann@amd.com        if (i->second->used) {
2917547SBrad.Beckmann@amd.com            i->second->used = false;
2927547SBrad.Beckmann@amd.com            usedEntries--;
2937547SBrad.Beckmann@amd.com        }
2947547SBrad.Beckmann@amd.com        freeList.push_front(i->second);
2957547SBrad.Beckmann@amd.com        lookupTable.erase(i);
2967054Snate@binkert.org    }
2977054Snate@binkert.org}
2987054Snate@binkert.org
2997054Snate@binkert.orgvoid
3007054Snate@binkert.orgTLB::demapContext(int partition_id, int context_id)
3017054Snate@binkert.org{
3027054Snate@binkert.org    DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
3037054Snate@binkert.org            partition_id, context_id);
3047054Snate@binkert.org    cacheValid = false;
3057054Snate@binkert.org    for (int x = 0; x < size; x++) {
3067054Snate@binkert.org        if (tlb[x].range.contextId == context_id &&
3077054Snate@binkert.org            tlb[x].range.partitionId == partition_id) {
3087054Snate@binkert.org            if (tlb[x].valid == true) {
3097054Snate@binkert.org                freeList.push_front(&tlb[x]);
3107054Snate@binkert.org            }
3117054Snate@binkert.org            tlb[x].valid = false;
3127054Snate@binkert.org            if (tlb[x].used) {
3137054Snate@binkert.org                tlb[x].used = false;
3147054Snate@binkert.org                usedEntries--;
3157054Snate@binkert.org            }
3167054Snate@binkert.org            lookupTable.erase(tlb[x].range);
3177054Snate@binkert.org        }
3187054Snate@binkert.org    }
3197054Snate@binkert.org}
3207054Snate@binkert.org
3217054Snate@binkert.orgvoid
3227054Snate@binkert.orgTLB::demapAll(int partition_id)
3237054Snate@binkert.org{
3247054Snate@binkert.org    DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
3257054Snate@binkert.org    cacheValid = false;
3267054Snate@binkert.org    for (int x = 0; x < size; x++) {
3277054Snate@binkert.org        if (tlb[x].valid && !tlb[x].pte.locked() &&
3287054Snate@binkert.org                tlb[x].range.partitionId == partition_id) {
3297054Snate@binkert.org            freeList.push_front(&tlb[x]);
3307054Snate@binkert.org            tlb[x].valid = false;
3317054Snate@binkert.org            if (tlb[x].used) {
3327054Snate@binkert.org                tlb[x].used = false;
3337054Snate@binkert.org                usedEntries--;
3347054Snate@binkert.org            }
3357054Snate@binkert.org            lookupTable.erase(tlb[x].range);
3367054Snate@binkert.org        }
3377054Snate@binkert.org    }
3386145Snate@binkert.org}
3396145Snate@binkert.org
3407054Snate@binkert.orgvoid
3416145Snate@binkert.orgTLB::invalidateAll()
3426145Snate@binkert.org{
3437054Snate@binkert.org    cacheValid = false;
3447054Snate@binkert.org    lookupTable.clear();
3456145Snate@binkert.org
3467054Snate@binkert.org    for (int x = 0; x < size; x++) {
3476145Snate@binkert.org        if (tlb[x].valid == true)
3486876Ssteve.reinhardt@amd.com            freeList.push_back(&tlb[x]);
3496876Ssteve.reinhardt@amd.com        tlb[x].valid = false;
3506876Ssteve.reinhardt@amd.com        tlb[x].used = false;
3516876Ssteve.reinhardt@amd.com    }
3526876Ssteve.reinhardt@amd.com    usedEntries = 0;
3536876Ssteve.reinhardt@amd.com}
3546876Ssteve.reinhardt@amd.com
355uint64_t
356TLB::TteRead(int entry)
357{
358    if (entry >= size)
359        panic("entry: %d\n", entry);
360
361    assert(entry < size);
362    if (tlb[entry].valid)
363        return tlb[entry].pte();
364    else
365        return (uint64_t)-1ll;
366}
367
368uint64_t
369TLB::TagRead(int entry)
370{
371    assert(entry < size);
372    uint64_t tag;
373    if (!tlb[entry].valid)
374        return (uint64_t)-1ll;
375
376    tag = tlb[entry].range.contextId;
377    tag |= tlb[entry].range.va;
378    tag |= (uint64_t)tlb[entry].range.partitionId << 61;
379    tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
380    tag |= (uint64_t)~tlb[entry].pte._size() << 56;
381    return tag;
382}
383
384bool
385TLB::validVirtualAddress(Addr va, bool am)
386{
387    if (am)
388        return true;
389    if (va >= StartVAddrHole && va <= EndVAddrHole)
390        return false;
391    return true;
392}
393
394void
395TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
396{
397    if (sfsr & 0x1)
398        sfsr = 0x3;
399    else
400        sfsr = 1;
401
402    if (write)
403        sfsr |= 1 << 2;
404    sfsr |= ct << 4;
405    if (se)
406        sfsr |= 1 << 6;
407    sfsr |= ft << 7;
408    sfsr |= asi << 16;
409}
410
411void
412TLB::writeTagAccess(Addr va, int context)
413{
414    DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
415            va, context, mbits(va, 63,13) | mbits(context,12,0));
416
417    tag_access = mbits(va, 63,13) | mbits(context,12,0);
418}
419
420void
421ITB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
422{
423    DPRINTF(TLB, "TLB: ITB Fault:  w=%d ct=%d ft=%d asi=%d\n",
424             (int)write, ct, ft, asi);
425    TLB::writeSfsr(write, ct, se, ft, asi);
426}
427
428void
429DTB::writeSfsr(Addr a, bool write, ContextType ct,
430        bool se, FaultTypes ft, int asi)
431{
432    DPRINTF(TLB, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
433            a, (int)write, ct, ft, asi);
434    TLB::writeSfsr(write, ct, se, ft, asi);
435    sfar = a;
436}
437
438Fault
439ITB::translateAtomic(RequestPtr req, ThreadContext *tc)
440{
441    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
442
443    Addr vaddr = req->getVaddr();
444    TlbEntry *e;
445
446    assert(req->getAsi() == ASI_IMPLICIT);
447
448    DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
449            vaddr, req->getSize());
450
451    // Be fast if we can!
452    if (cacheValid && cacheState == tlbdata) {
453        if (cacheEntry) {
454            if (cacheEntry->range.va < vaddr + sizeof(MachInst) &&
455                cacheEntry->range.va + cacheEntry->range.size >= vaddr) {
456                req->setPaddr(cacheEntry->pte.translate(vaddr));
457                return NoFault;
458            }
459        } else {
460            req->setPaddr(vaddr & PAddrImplMask);
461            return NoFault;
462        }
463    }
464
465    bool hpriv = bits(tlbdata,0,0);
466    bool red = bits(tlbdata,1,1);
467    bool priv = bits(tlbdata,2,2);
468    bool addr_mask = bits(tlbdata,3,3);
469    bool lsu_im = bits(tlbdata,4,4);
470
471    int part_id = bits(tlbdata,15,8);
472    int tl = bits(tlbdata,18,16);
473    int pri_context = bits(tlbdata,47,32);
474    int context;
475    ContextType ct;
476    int asi;
477    bool real = false;
478
479    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
480           priv, hpriv, red, lsu_im, part_id);
481
482    if (tl > 0) {
483        asi = ASI_N;
484        ct = Nucleus;
485        context = 0;
486    } else {
487        asi = ASI_P;
488        ct = Primary;
489        context = pri_context;
490    }
491
492    if ( hpriv || red ) {
493        cacheValid = true;
494        cacheState = tlbdata;
495        cacheEntry = NULL;
496        req->setPaddr(vaddr & PAddrImplMask);
497        return NoFault;
498    }
499
500    // If the access is unaligned trap
501    if (vaddr & 0x3) {
502        writeSfsr(false, ct, false, OtherFault, asi);
503        return new MemAddressNotAligned;
504    }
505
506    if (addr_mask)
507        vaddr = vaddr & VAddrAMask;
508
509    if (!validVirtualAddress(vaddr, addr_mask)) {
510        writeSfsr(false, ct, false, VaOutOfRange, asi);
511        return new InstructionAccessException;
512    }
513
514    if (!lsu_im) {
515        e = lookup(vaddr, part_id, true);
516        real = true;
517        context = 0;
518    } else {
519        e = lookup(vaddr, part_id, false, context);
520    }
521
522    if (e == NULL || !e->valid) {
523        writeTagAccess(vaddr, context);
524        if (real)
525            return new InstructionRealTranslationMiss;
526        else
527#if FULL_SYSTEM
528            return new FastInstructionAccessMMUMiss;
529#else
530            return new FastInstructionAccessMMUMiss(req->getVaddr());
531#endif
532    }
533
534    // were not priviledged accesing priv page
535    if (!priv && e->pte.priv()) {
536        writeTagAccess(vaddr, context);
537        writeSfsr(false, ct, false, PrivViolation, asi);
538        return new InstructionAccessException;
539    }
540
541    // cache translation date for next translation
542    cacheValid = true;
543    cacheState = tlbdata;
544    cacheEntry = e;
545
546    req->setPaddr(e->pte.translate(vaddr));
547    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
548    return NoFault;
549}
550
551void
552ITB::translateTiming(RequestPtr req, ThreadContext *tc,
553        Translation *translation)
554{
555    assert(translation);
556    translation->finish(translateAtomic(req, tc), req, tc, false);
557}
558
559Fault
560DTB::translateAtomic(RequestPtr req, ThreadContext *tc, bool write)
561{
562    /*
563     * @todo this could really use some profiling and fixing to make
564     * it faster!
565     */
566    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
567    Addr vaddr = req->getVaddr();
568    Addr size = req->getSize();
569    ASI asi;
570    asi = (ASI)req->getAsi();
571    bool implicit = false;
572    bool hpriv = bits(tlbdata,0,0);
573    bool unaligned = vaddr & (size - 1);
574
575    DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
576            vaddr, size, asi);
577
578    if (lookupTable.size() != 64 - freeList.size())
579       panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
580               freeList.size());
581    if (asi == ASI_IMPLICIT)
582        implicit = true;
583
584    // Only use the fast path here if there doesn't need to be an unaligned
585    // trap later
586    if (!unaligned) {
587        if (hpriv && implicit) {
588            req->setPaddr(vaddr & PAddrImplMask);
589            return NoFault;
590        }
591
592        // Be fast if we can!
593        if (cacheValid &&  cacheState == tlbdata) {
594
595
596
597            if (cacheEntry[0]) {
598                TlbEntry *ce = cacheEntry[0];
599                Addr ce_va = ce->range.va;
600                if (cacheAsi[0] == asi &&
601                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
602                    (!write || ce->pte.writable())) {
603                    req->setPaddr(ce->pte.translate(vaddr));
604                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
605                        req->setFlags(Request::UNCACHEABLE);
606                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
607                    return NoFault;
608                } // if matched
609            } // if cache entry valid
610            if (cacheEntry[1]) {
611                TlbEntry *ce = cacheEntry[1];
612                Addr ce_va = ce->range.va;
613                if (cacheAsi[1] == asi &&
614                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
615                    (!write || ce->pte.writable())) {
616                    req->setPaddr(ce->pte.translate(vaddr));
617                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
618                        req->setFlags(Request::UNCACHEABLE);
619                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
620                    return NoFault;
621                } // if matched
622            } // if cache entry valid
623        }
624    }
625
626    bool red = bits(tlbdata,1,1);
627    bool priv = bits(tlbdata,2,2);
628    bool addr_mask = bits(tlbdata,3,3);
629    bool lsu_dm = bits(tlbdata,5,5);
630
631    int part_id = bits(tlbdata,15,8);
632    int tl = bits(tlbdata,18,16);
633    int pri_context = bits(tlbdata,47,32);
634    int sec_context = bits(tlbdata,63,48);
635
636    bool real = false;
637    ContextType ct = Primary;
638    int context = 0;
639
640    TlbEntry *e;
641
642    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
643            priv, hpriv, red, lsu_dm, part_id);
644
645    if (implicit) {
646        if (tl > 0) {
647            asi = ASI_N;
648            ct = Nucleus;
649            context = 0;
650        } else {
651            asi = ASI_P;
652            ct = Primary;
653            context = pri_context;
654        }
655    } else {
656        // We need to check for priv level/asi priv
657        if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
658            // It appears that context should be Nucleus in these cases?
659            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
660            return new PrivilegedAction;
661        }
662
663        if (!hpriv && AsiIsHPriv(asi)) {
664            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
665            return new DataAccessException;
666        }
667
668        if (AsiIsPrimary(asi)) {
669            context = pri_context;
670            ct = Primary;
671        } else if (AsiIsSecondary(asi)) {
672            context = sec_context;
673            ct = Secondary;
674        } else if (AsiIsNucleus(asi)) {
675            ct = Nucleus;
676            context = 0;
677        } else {  // ????
678            ct = Primary;
679            context = pri_context;
680        }
681    }
682
683    if (!implicit && asi != ASI_P && asi != ASI_S) {
684        if (AsiIsLittle(asi))
685            panic("Little Endian ASIs not supported\n");
686
687        //XXX It's unclear from looking at the documentation how a no fault
688        //load differs from a regular one, other than what happens concerning
689        //nfo and e bits in the TTE
690//        if (AsiIsNoFault(asi))
691//            panic("No Fault ASIs not supported\n");
692
693        if (AsiIsPartialStore(asi))
694            panic("Partial Store ASIs not supported\n");
695
696        if (AsiIsCmt(asi))
697            panic("Cmt ASI registers not implmented\n");
698
699        if (AsiIsInterrupt(asi))
700            goto handleIntRegAccess;
701        if (AsiIsMmu(asi))
702            goto handleMmuRegAccess;
703        if (AsiIsScratchPad(asi))
704            goto handleScratchRegAccess;
705        if (AsiIsQueue(asi))
706            goto handleQueueRegAccess;
707        if (AsiIsSparcError(asi))
708            goto handleSparcErrorRegAccess;
709
710        if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
711                !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi))
712            panic("Accessing ASI %#X. Should we?\n", asi);
713    }
714
715    // If the asi is unaligned trap
716    if (unaligned) {
717        writeSfsr(vaddr, false, ct, false, OtherFault, asi);
718        return new MemAddressNotAligned;
719    }
720
721    if (addr_mask)
722        vaddr = vaddr & VAddrAMask;
723
724    if (!validVirtualAddress(vaddr, addr_mask)) {
725        writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
726        return new DataAccessException;
727    }
728
729    if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
730        real = true;
731        context = 0;
732    }
733
734    if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
735        req->setPaddr(vaddr & PAddrImplMask);
736        return NoFault;
737    }
738
739    e = lookup(vaddr, part_id, real, context);
740
741    if (e == NULL || !e->valid) {
742        writeTagAccess(vaddr, context);
743        DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
744        if (real)
745            return new DataRealTranslationMiss;
746        else
747#if FULL_SYSTEM
748            return new FastDataAccessMMUMiss;
749#else
750            return new FastDataAccessMMUMiss(req->getVaddr());
751#endif
752
753    }
754
755    if (!priv && e->pte.priv()) {
756        writeTagAccess(vaddr, context);
757        writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
758        return new DataAccessException;
759    }
760
761    if (write && !e->pte.writable()) {
762        writeTagAccess(vaddr, context);
763        writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
764        return new FastDataAccessProtection;
765    }
766
767    if (e->pte.nofault() && !AsiIsNoFault(asi)) {
768        writeTagAccess(vaddr, context);
769        writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
770        return new DataAccessException;
771    }
772
773    if (e->pte.sideffect() && AsiIsNoFault(asi)) {
774        writeTagAccess(vaddr, context);
775        writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
776        return new DataAccessException;
777    }
778
779    if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
780        req->setFlags(Request::UNCACHEABLE);
781
782    // cache translation date for next translation
783    cacheState = tlbdata;
784    if (!cacheValid) {
785        cacheEntry[1] = NULL;
786        cacheEntry[0] = NULL;
787    }
788
789    if (cacheEntry[0] != e && cacheEntry[1] != e) {
790        cacheEntry[1] = cacheEntry[0];
791        cacheEntry[0] = e;
792        cacheAsi[1] = cacheAsi[0];
793        cacheAsi[0] = asi;
794        if (implicit)
795            cacheAsi[0] = (ASI)0;
796    }
797    cacheValid = true;
798    req->setPaddr(e->pte.translate(vaddr));
799    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
800    return NoFault;
801
802    /** Normal flow ends here. */
803handleIntRegAccess:
804    if (!hpriv) {
805        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
806        if (priv)
807            return new DataAccessException;
808         else
809            return new PrivilegedAction;
810    }
811
812    if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
813        (asi == ASI_SWVR_UDB_INTR_R && write)) {
814        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
815        return new DataAccessException;
816    }
817
818    goto regAccessOk;
819
820
821handleScratchRegAccess:
822    if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
823        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
824        return new DataAccessException;
825    }
826    goto regAccessOk;
827
828handleQueueRegAccess:
829    if (!priv  && !hpriv) {
830        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
831        return new PrivilegedAction;
832    }
833    if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
834        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
835        return new DataAccessException;
836    }
837    goto regAccessOk;
838
839handleSparcErrorRegAccess:
840    if (!hpriv) {
841        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
842        if (priv)
843            return new DataAccessException;
844         else
845            return new PrivilegedAction;
846    }
847    goto regAccessOk;
848
849
850regAccessOk:
851handleMmuRegAccess:
852    DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
853    req->setMmapedIpr(true);
854    req->setPaddr(req->getVaddr());
855    return NoFault;
856};
857
858void
859DTB::translateTiming(RequestPtr req, ThreadContext *tc,
860        Translation *translation, bool write)
861{
862    assert(translation);
863    translation->finish(translateAtomic(req, tc, write), req, tc, write);
864}
865
866#if FULL_SYSTEM
867
868Tick
869DTB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
870{
871    Addr va = pkt->getAddr();
872    ASI asi = (ASI)pkt->req->getAsi();
873    uint64_t temp;
874
875    DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
876         (uint32_t)pkt->req->getAsi(), pkt->getAddr());
877
878    ITB *itb = tc->getITBPtr();
879
880    switch (asi) {
881      case ASI_LSU_CONTROL_REG:
882        assert(va == 0);
883        pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL));
884        break;
885      case ASI_MMU:
886        switch (va) {
887          case 0x8:
888            pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT));
889            break;
890          case 0x10:
891            pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT));
892            break;
893          default:
894            goto doMmuReadError;
895        }
896        break;
897      case ASI_QUEUE:
898        pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
899                    (va >> 4) - 0x3c));
900        break;
901      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
902        assert(va == 0);
903        pkt->set(c0_tsb_ps0);
904        break;
905      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
906        assert(va == 0);
907        pkt->set(c0_tsb_ps1);
908        break;
909      case ASI_DMMU_CTXT_ZERO_CONFIG:
910        assert(va == 0);
911        pkt->set(c0_config);
912        break;
913      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
914        assert(va == 0);
915        pkt->set(itb->c0_tsb_ps0);
916        break;
917      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
918        assert(va == 0);
919        pkt->set(itb->c0_tsb_ps1);
920        break;
921      case ASI_IMMU_CTXT_ZERO_CONFIG:
922        assert(va == 0);
923        pkt->set(itb->c0_config);
924        break;
925      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
926        assert(va == 0);
927        pkt->set(cx_tsb_ps0);
928        break;
929      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
930        assert(va == 0);
931        pkt->set(cx_tsb_ps1);
932        break;
933      case ASI_DMMU_CTXT_NONZERO_CONFIG:
934        assert(va == 0);
935        pkt->set(cx_config);
936        break;
937      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
938        assert(va == 0);
939        pkt->set(itb->cx_tsb_ps0);
940        break;
941      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
942        assert(va == 0);
943        pkt->set(itb->cx_tsb_ps1);
944        break;
945      case ASI_IMMU_CTXT_NONZERO_CONFIG:
946        assert(va == 0);
947        pkt->set(itb->cx_config);
948        break;
949      case ASI_SPARC_ERROR_STATUS_REG:
950        pkt->set((uint64_t)0);
951        break;
952      case ASI_HYP_SCRATCHPAD:
953      case ASI_SCRATCHPAD:
954        pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
955        break;
956      case ASI_IMMU:
957        switch (va) {
958          case 0x0:
959            temp = itb->tag_access;
960            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
961            break;
962          case 0x18:
963            pkt->set(itb->sfsr);
964            break;
965          case 0x30:
966            pkt->set(itb->tag_access);
967            break;
968          default:
969            goto doMmuReadError;
970        }
971        break;
972      case ASI_DMMU:
973        switch (va) {
974          case 0x0:
975            temp = tag_access;
976            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
977            break;
978          case 0x18:
979            pkt->set(sfsr);
980            break;
981          case 0x20:
982            pkt->set(sfar);
983            break;
984          case 0x30:
985            pkt->set(tag_access);
986            break;
987          case 0x80:
988            pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID));
989            break;
990          default:
991                goto doMmuReadError;
992        }
993        break;
994      case ASI_DMMU_TSB_PS0_PTR_REG:
995        pkt->set(MakeTsbPtr(Ps0,
996            tag_access,
997            c0_tsb_ps0,
998            c0_config,
999            cx_tsb_ps0,
1000            cx_config));
1001        break;
1002      case ASI_DMMU_TSB_PS1_PTR_REG:
1003        pkt->set(MakeTsbPtr(Ps1,
1004                tag_access,
1005                c0_tsb_ps1,
1006                c0_config,
1007                cx_tsb_ps1,
1008                cx_config));
1009        break;
1010      case ASI_IMMU_TSB_PS0_PTR_REG:
1011          pkt->set(MakeTsbPtr(Ps0,
1012                itb->tag_access,
1013                itb->c0_tsb_ps0,
1014                itb->c0_config,
1015                itb->cx_tsb_ps0,
1016                itb->cx_config));
1017        break;
1018      case ASI_IMMU_TSB_PS1_PTR_REG:
1019          pkt->set(MakeTsbPtr(Ps1,
1020                itb->tag_access,
1021                itb->c0_tsb_ps1,
1022                itb->c0_config,
1023                itb->cx_tsb_ps1,
1024                itb->cx_config));
1025        break;
1026      case ASI_SWVR_INTR_RECEIVE:
1027        {
1028            SparcISA::Interrupts * interrupts =
1029                dynamic_cast<SparcISA::Interrupts *>(
1030                        tc->getCpuPtr()->getInterruptController());
1031            pkt->set(interrupts->get_vec(IT_INT_VEC));
1032        }
1033        break;
1034      case ASI_SWVR_UDB_INTR_R:
1035        {
1036            SparcISA::Interrupts * interrupts =
1037                dynamic_cast<SparcISA::Interrupts *>(
1038                        tc->getCpuPtr()->getInterruptController());
1039            temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1040            tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, temp);
1041            pkt->set(temp);
1042        }
1043        break;
1044      default:
1045doMmuReadError:
1046        panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1047            (uint32_t)asi, va);
1048    }
1049    pkt->makeAtomicResponse();
1050    return tc->getCpuPtr()->ticks(1);
1051}
1052
1053Tick
1054DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1055{
1056    uint64_t data = gtoh(pkt->get<uint64_t>());
1057    Addr va = pkt->getAddr();
1058    ASI asi = (ASI)pkt->req->getAsi();
1059
1060    Addr ta_insert;
1061    Addr va_insert;
1062    Addr ct_insert;
1063    int part_insert;
1064    int entry_insert = -1;
1065    bool real_insert;
1066    bool ignore;
1067    int part_id;
1068    int ctx_id;
1069    PageTableEntry pte;
1070
1071    DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1072         (uint32_t)asi, va, data);
1073
1074    ITB *itb = tc->getITBPtr();
1075
1076    switch (asi) {
1077      case ASI_LSU_CONTROL_REG:
1078        assert(va == 0);
1079        tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1080        break;
1081      case ASI_MMU:
1082        switch (va) {
1083          case 0x8:
1084            tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1085            break;
1086          case 0x10:
1087            tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1088            break;
1089          default:
1090            goto doMmuWriteError;
1091        }
1092        break;
1093      case ASI_QUEUE:
1094        assert(mbits(data,13,6) == data);
1095        tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
1096                    (va >> 4) - 0x3c, data);
1097        break;
1098      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1099        assert(va == 0);
1100        c0_tsb_ps0 = data;
1101        break;
1102      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1103        assert(va == 0);
1104        c0_tsb_ps1 = data;
1105        break;
1106      case ASI_DMMU_CTXT_ZERO_CONFIG:
1107        assert(va == 0);
1108        c0_config = data;
1109        break;
1110      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1111        assert(va == 0);
1112        itb->c0_tsb_ps0 = data;
1113        break;
1114      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1115        assert(va == 0);
1116        itb->c0_tsb_ps1 = data;
1117        break;
1118      case ASI_IMMU_CTXT_ZERO_CONFIG:
1119        assert(va == 0);
1120        itb->c0_config = data;
1121        break;
1122      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1123        assert(va == 0);
1124        cx_tsb_ps0 = data;
1125        break;
1126      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1127        assert(va == 0);
1128        cx_tsb_ps1 = data;
1129        break;
1130      case ASI_DMMU_CTXT_NONZERO_CONFIG:
1131        assert(va == 0);
1132        cx_config = data;
1133        break;
1134      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1135        assert(va == 0);
1136        itb->cx_tsb_ps0 = data;
1137        break;
1138      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1139        assert(va == 0);
1140        itb->cx_tsb_ps1 = data;
1141        break;
1142      case ASI_IMMU_CTXT_NONZERO_CONFIG:
1143        assert(va == 0);
1144        itb->cx_config = data;
1145        break;
1146      case ASI_SPARC_ERROR_EN_REG:
1147      case ASI_SPARC_ERROR_STATUS_REG:
1148        inform("Ignoring write to SPARC ERROR regsiter\n");
1149        break;
1150      case ASI_HYP_SCRATCHPAD:
1151      case ASI_SCRATCHPAD:
1152        tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1153        break;
1154      case ASI_IMMU:
1155        switch (va) {
1156          case 0x18:
1157            itb->sfsr = data;
1158            break;
1159          case 0x30:
1160            sext<59>(bits(data, 59,0));
1161            itb->tag_access = data;
1162            break;
1163          default:
1164            goto doMmuWriteError;
1165        }
1166        break;
1167      case ASI_ITLB_DATA_ACCESS_REG:
1168        entry_insert = bits(va, 8,3);
1169      case ASI_ITLB_DATA_IN_REG:
1170        assert(entry_insert != -1 || mbits(va,10,9) == va);
1171        ta_insert = itb->tag_access;
1172        va_insert = mbits(ta_insert, 63,13);
1173        ct_insert = mbits(ta_insert, 12,0);
1174        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1175        real_insert = bits(va, 9,9);
1176        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1177                PageTableEntry::sun4u);
1178        tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1179                pte, entry_insert);
1180        break;
1181      case ASI_DTLB_DATA_ACCESS_REG:
1182        entry_insert = bits(va, 8,3);
1183      case ASI_DTLB_DATA_IN_REG:
1184        assert(entry_insert != -1 || mbits(va,10,9) == va);
1185        ta_insert = tag_access;
1186        va_insert = mbits(ta_insert, 63,13);
1187        ct_insert = mbits(ta_insert, 12,0);
1188        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1189        real_insert = bits(va, 9,9);
1190        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1191                PageTableEntry::sun4u);
1192        insert(va_insert, part_insert, ct_insert, real_insert, pte,
1193               entry_insert);
1194        break;
1195      case ASI_IMMU_DEMAP:
1196        ignore = false;
1197        ctx_id = -1;
1198        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1199        switch (bits(va,5,4)) {
1200          case 0:
1201            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1202            break;
1203          case 1:
1204            ignore = true;
1205            break;
1206          case 3:
1207            ctx_id = 0;
1208            break;
1209          default:
1210            ignore = true;
1211        }
1212
1213        switch(bits(va,7,6)) {
1214          case 0: // demap page
1215            if (!ignore)
1216                tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1217                        bits(va,9,9), ctx_id);
1218            break;
1219          case 1: //demap context
1220            if (!ignore)
1221                tc->getITBPtr()->demapContext(part_id, ctx_id);
1222            break;
1223          case 2:
1224            tc->getITBPtr()->demapAll(part_id);
1225            break;
1226          default:
1227            panic("Invalid type for IMMU demap\n");
1228        }
1229        break;
1230      case ASI_DMMU:
1231        switch (va) {
1232          case 0x18:
1233            sfsr = data;
1234            break;
1235          case 0x30:
1236            sext<59>(bits(data, 59,0));
1237            tag_access = data;
1238            break;
1239          case 0x80:
1240            tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1241            break;
1242          default:
1243            goto doMmuWriteError;
1244        }
1245        break;
1246      case ASI_DMMU_DEMAP:
1247        ignore = false;
1248        ctx_id = -1;
1249        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1250        switch (bits(va,5,4)) {
1251          case 0:
1252            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1253            break;
1254          case 1:
1255            ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1256            break;
1257          case 3:
1258            ctx_id = 0;
1259            break;
1260          default:
1261            ignore = true;
1262        }
1263
1264        switch(bits(va,7,6)) {
1265          case 0: // demap page
1266            if (!ignore)
1267                demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1268            break;
1269          case 1: //demap context
1270            if (!ignore)
1271                demapContext(part_id, ctx_id);
1272            break;
1273          case 2:
1274            demapAll(part_id);
1275            break;
1276          default:
1277            panic("Invalid type for IMMU demap\n");
1278        }
1279        break;
1280       case ASI_SWVR_INTR_RECEIVE:
1281        {
1282            int msb;
1283            // clear all the interrupts that aren't set in the write
1284            SparcISA::Interrupts * interrupts =
1285                dynamic_cast<SparcISA::Interrupts *>(
1286                        tc->getCpuPtr()->getInterruptController());
1287            while (interrupts->get_vec(IT_INT_VEC) & data) {
1288                msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1289                tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, msb);
1290            }
1291        }
1292        break;
1293      case ASI_SWVR_UDB_INTR_W:
1294            tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1295            postInterrupt(bits(data, 5, 0), 0);
1296        break;
1297      default:
1298doMmuWriteError:
1299        panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1300            (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1301    }
1302    pkt->makeAtomicResponse();
1303    return tc->getCpuPtr()->ticks(1);
1304}
1305
1306#endif
1307
1308void
1309DTB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1310{
1311    uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1312    ITB * itb = tc->getITBPtr();
1313    ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1314                c0_tsb_ps0,
1315                c0_config,
1316                cx_tsb_ps0,
1317                cx_config);
1318    ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1319                c0_tsb_ps1,
1320                c0_config,
1321                cx_tsb_ps1,
1322                cx_config);
1323    ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1324                itb->c0_tsb_ps0,
1325                itb->c0_config,
1326                itb->cx_tsb_ps0,
1327                itb->cx_config);
1328    ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1329                itb->c0_tsb_ps1,
1330                itb->c0_config,
1331                itb->cx_tsb_ps1,
1332                itb->cx_config);
1333}
1334
1335uint64_t
1336DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1337        uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1338{
1339    uint64_t tsb;
1340    uint64_t config;
1341
1342    if (bits(tag_access, 12,0) == 0) {
1343        tsb = c0_tsb;
1344        config = c0_config;
1345    } else {
1346        tsb = cX_tsb;
1347        config = cX_config;
1348    }
1349
1350    uint64_t ptr = mbits(tsb,63,13);
1351    bool split = bits(tsb,12,12);
1352    int tsb_size = bits(tsb,3,0);
1353    int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1354
1355    if (ps == Ps1  && split)
1356        ptr |= ULL(1) << (13 + tsb_size);
1357    ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1358
1359    return ptr;
1360}
1361
1362void
1363TLB::serialize(std::ostream &os)
1364{
1365    SERIALIZE_SCALAR(size);
1366    SERIALIZE_SCALAR(usedEntries);
1367    SERIALIZE_SCALAR(lastReplaced);
1368
1369    // convert the pointer based free list into an index based one
1370    int *free_list = (int*)malloc(sizeof(int) * size);
1371    int cntr = 0;
1372    std::list<TlbEntry*>::iterator i;
1373    i = freeList.begin();
1374    while (i != freeList.end()) {
1375        free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1376        i++;
1377    }
1378    SERIALIZE_SCALAR(cntr);
1379    SERIALIZE_ARRAY(free_list,  cntr);
1380
1381    SERIALIZE_SCALAR(c0_tsb_ps0);
1382    SERIALIZE_SCALAR(c0_tsb_ps1);
1383    SERIALIZE_SCALAR(c0_config);
1384    SERIALIZE_SCALAR(cx_tsb_ps0);
1385    SERIALIZE_SCALAR(cx_tsb_ps1);
1386    SERIALIZE_SCALAR(cx_config);
1387    SERIALIZE_SCALAR(sfsr);
1388    SERIALIZE_SCALAR(tag_access);
1389
1390    for (int x = 0; x < size; x++) {
1391        nameOut(os, csprintf("%s.PTE%d", name(), x));
1392        tlb[x].serialize(os);
1393    }
1394}
1395
1396void
1397TLB::unserialize(Checkpoint *cp, const std::string &section)
1398{
1399    int oldSize;
1400
1401    paramIn(cp, section, "size", oldSize);
1402    if (oldSize != size)
1403        panic("Don't support unserializing different sized TLBs\n");
1404    UNSERIALIZE_SCALAR(usedEntries);
1405    UNSERIALIZE_SCALAR(lastReplaced);
1406
1407    int cntr;
1408    UNSERIALIZE_SCALAR(cntr);
1409
1410    int *free_list = (int*)malloc(sizeof(int) * cntr);
1411    freeList.clear();
1412    UNSERIALIZE_ARRAY(free_list,  cntr);
1413    for (int x = 0; x < cntr; x++)
1414        freeList.push_back(&tlb[free_list[x]]);
1415
1416    UNSERIALIZE_SCALAR(c0_tsb_ps0);
1417    UNSERIALIZE_SCALAR(c0_tsb_ps1);
1418    UNSERIALIZE_SCALAR(c0_config);
1419    UNSERIALIZE_SCALAR(cx_tsb_ps0);
1420    UNSERIALIZE_SCALAR(cx_tsb_ps1);
1421    UNSERIALIZE_SCALAR(cx_config);
1422    UNSERIALIZE_SCALAR(sfsr);
1423    UNSERIALIZE_SCALAR(tag_access);
1424
1425    lookupTable.clear();
1426    for (int x = 0; x < size; x++) {
1427        tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1428        if (tlb[x].valid)
1429            lookupTable.insert(tlb[x].range, &tlb[x]);
1430
1431    }
1432}
1433
1434void
1435DTB::serialize(std::ostream &os)
1436{
1437    TLB::serialize(os);
1438    SERIALIZE_SCALAR(sfar);
1439}
1440
1441void
1442DTB::unserialize(Checkpoint *cp, const std::string &section)
1443{
1444    TLB::unserialize(cp, section);
1445    UNSERIALIZE_SCALAR(sfar);
1446}
1447
1448/* end namespace SparcISA */ }
1449
1450SparcISA::ITB *
1451SparcITBParams::create()
1452{
1453    return new SparcISA::ITB(this);
1454}
1455
1456SparcISA::DTB *
1457SparcDTBParams::create()
1458{
1459    return new SparcISA::DTB(this);
1460}
1461