tlb.cc revision 7678
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31#include <cstring>
32
33#include "arch/sparc/asi.hh"
34#include "arch/sparc/faults.hh"
35#include "arch/sparc/registers.hh"
36#include "arch/sparc/tlb.hh"
37#include "base/bitfield.hh"
38#include "base/trace.hh"
39#include "cpu/thread_context.hh"
40#include "cpu/base.hh"
41#include "mem/packet_access.hh"
42#include "mem/request.hh"
43#include "sim/system.hh"
44
45/* @todo remove some of the magic constants.  -- ali
46 * */
47namespace SparcISA {
48
49TLB::TLB(const Params *p)
50    : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
51      cacheValid(false)
52{
53    // To make this work you'll have to change the hypervisor and OS
54    if (size > 64)
55        fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
56
57    tlb = new TlbEntry[size];
58    std::memset(tlb, 0, sizeof(TlbEntry) * size);
59
60    for (int x = 0; x < size; x++)
61        freeList.push_back(&tlb[x]);
62
63    c0_tsb_ps0 = 0;
64    c0_tsb_ps1 = 0;
65    c0_config = 0;
66    cx_tsb_ps0 = 0;
67    cx_tsb_ps1 = 0;
68    cx_config = 0;
69    sfsr = 0;
70    tag_access = 0;
71    sfar = 0;
72    cacheEntry[0] = NULL;
73    cacheEntry[1] = NULL;
74}
75
76void
77TLB::clearUsedBits()
78{
79    MapIter i;
80    for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
81        TlbEntry *t = i->second;
82        if (!t->pte.locked()) {
83            t->used = false;
84            usedEntries--;
85        }
86    }
87}
88
89
90void
91TLB::insert(Addr va, int partition_id, int context_id, bool real,
92        const PageTableEntry& PTE, int entry)
93{
94    MapIter i;
95    TlbEntry *new_entry = NULL;
96//    TlbRange tr;
97    int x;
98
99    cacheValid = false;
100    va &= ~(PTE.size()-1);
101 /*   tr.va = va;
102    tr.size = PTE.size() - 1;
103    tr.contextId = context_id;
104    tr.partitionId = partition_id;
105    tr.real = real;
106*/
107
108    DPRINTF(TLB,
109        "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
110        va, PTE.paddr(), partition_id, context_id, (int)real, entry);
111
112    // Demap any entry that conflicts
113    for (x = 0; x < size; x++) {
114        if (tlb[x].range.real == real &&
115            tlb[x].range.partitionId == partition_id &&
116            tlb[x].range.va < va + PTE.size() - 1 &&
117            tlb[x].range.va + tlb[x].range.size >= va &&
118            (real || tlb[x].range.contextId == context_id ))
119        {
120            if (tlb[x].valid) {
121                freeList.push_front(&tlb[x]);
122                DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
123
124                tlb[x].valid = false;
125                if (tlb[x].used) {
126                    tlb[x].used = false;
127                    usedEntries--;
128                }
129                lookupTable.erase(tlb[x].range);
130            }
131        }
132    }
133
134/*
135    i = lookupTable.find(tr);
136    if (i != lookupTable.end()) {
137        i->second->valid = false;
138        if (i->second->used) {
139            i->second->used = false;
140            usedEntries--;
141        }
142        freeList.push_front(i->second);
143        DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
144                i->second);
145        lookupTable.erase(i);
146    }
147*/
148
149    if (entry != -1) {
150        assert(entry < size && entry >= 0);
151        new_entry = &tlb[entry];
152    } else {
153        if (!freeList.empty()) {
154            new_entry = freeList.front();
155        } else {
156            x = lastReplaced;
157            do {
158                ++x;
159                if (x == size)
160                    x = 0;
161                if (x == lastReplaced)
162                    goto insertAllLocked;
163            } while (tlb[x].pte.locked());
164            lastReplaced = x;
165            new_entry = &tlb[x];
166        }
167        /*
168        for (x = 0; x < size; x++) {
169            if (!tlb[x].valid || !tlb[x].used)  {
170                new_entry = &tlb[x];
171                break;
172            }
173        }*/
174    }
175
176insertAllLocked:
177    // Update the last ently if their all locked
178    if (!new_entry) {
179        new_entry = &tlb[size-1];
180    }
181
182    freeList.remove(new_entry);
183    if (new_entry->valid && new_entry->used)
184        usedEntries--;
185    if (new_entry->valid)
186        lookupTable.erase(new_entry->range);
187
188
189    assert(PTE.valid());
190    new_entry->range.va = va;
191    new_entry->range.size = PTE.size() - 1;
192    new_entry->range.partitionId = partition_id;
193    new_entry->range.contextId = context_id;
194    new_entry->range.real = real;
195    new_entry->pte = PTE;
196    new_entry->used = true;;
197    new_entry->valid = true;
198    usedEntries++;
199
200    i = lookupTable.insert(new_entry->range, new_entry);
201    assert(i != lookupTable.end());
202
203    // If all entries have their used bit set, clear it on them all,
204    // but the one we just inserted
205    if (usedEntries == size) {
206        clearUsedBits();
207        new_entry->used = true;
208        usedEntries++;
209    }
210}
211
212
213TlbEntry*
214TLB::lookup(Addr va, int partition_id, bool real, int context_id,
215            bool update_used)
216{
217    MapIter i;
218    TlbRange tr;
219    TlbEntry *t;
220
221    DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
222            va, partition_id, context_id, real);
223    // Assemble full address structure
224    tr.va = va;
225    tr.size = 1;
226    tr.contextId = context_id;
227    tr.partitionId = partition_id;
228    tr.real = real;
229
230    // Try to find the entry
231    i = lookupTable.find(tr);
232    if (i == lookupTable.end()) {
233        DPRINTF(TLB, "TLB: No valid entry found\n");
234        return NULL;
235    }
236
237    // Mark the entries used bit and clear other used bits in needed
238    t = i->second;
239    DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
240            t->pte.size());
241
242    // Update the used bits only if this is a real access (not a fake
243    // one from virttophys()
244    if (!t->used && update_used) {
245        t->used = true;
246        usedEntries++;
247        if (usedEntries == size) {
248            clearUsedBits();
249            t->used = true;
250            usedEntries++;
251        }
252    }
253
254    return t;
255}
256
257void
258TLB::dumpAll()
259{
260    MapIter i;
261    for (int x = 0; x < size; x++) {
262        if (tlb[x].valid) {
263           DPRINTFN("%4d:  %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
264                   x, tlb[x].range.partitionId, tlb[x].range.contextId,
265                   tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
266                   tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
267        }
268    }
269}
270
271void
272TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
273{
274    TlbRange tr;
275    MapIter i;
276
277    DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
278            va, partition_id, context_id, real);
279
280    cacheValid = false;
281
282    // Assemble full address structure
283    tr.va = va;
284    tr.size = 1;
285    tr.contextId = context_id;
286    tr.partitionId = partition_id;
287    tr.real = real;
288
289    // Demap any entry that conflicts
290    i = lookupTable.find(tr);
291    if (i != lookupTable.end()) {
292        DPRINTF(IPR, "TLB: Demapped page\n");
293        i->second->valid = false;
294        if (i->second->used) {
295            i->second->used = false;
296            usedEntries--;
297        }
298        freeList.push_front(i->second);
299        lookupTable.erase(i);
300    }
301}
302
303void
304TLB::demapContext(int partition_id, int context_id)
305{
306    DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
307            partition_id, context_id);
308    cacheValid = false;
309    for (int x = 0; x < size; x++) {
310        if (tlb[x].range.contextId == context_id &&
311            tlb[x].range.partitionId == partition_id) {
312            if (tlb[x].valid == true) {
313                freeList.push_front(&tlb[x]);
314            }
315            tlb[x].valid = false;
316            if (tlb[x].used) {
317                tlb[x].used = false;
318                usedEntries--;
319            }
320            lookupTable.erase(tlb[x].range);
321        }
322    }
323}
324
325void
326TLB::demapAll(int partition_id)
327{
328    DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
329    cacheValid = false;
330    for (int x = 0; x < size; x++) {
331        if (tlb[x].valid && !tlb[x].pte.locked() &&
332                tlb[x].range.partitionId == partition_id) {
333            freeList.push_front(&tlb[x]);
334            tlb[x].valid = false;
335            if (tlb[x].used) {
336                tlb[x].used = false;
337                usedEntries--;
338            }
339            lookupTable.erase(tlb[x].range);
340        }
341    }
342}
343
344void
345TLB::invalidateAll()
346{
347    cacheValid = false;
348    lookupTable.clear();
349
350    for (int x = 0; x < size; x++) {
351        if (tlb[x].valid == true)
352            freeList.push_back(&tlb[x]);
353        tlb[x].valid = false;
354        tlb[x].used = false;
355    }
356    usedEntries = 0;
357}
358
359uint64_t
360TLB::TteRead(int entry)
361{
362    if (entry >= size)
363        panic("entry: %d\n", entry);
364
365    assert(entry < size);
366    if (tlb[entry].valid)
367        return tlb[entry].pte();
368    else
369        return (uint64_t)-1ll;
370}
371
372uint64_t
373TLB::TagRead(int entry)
374{
375    assert(entry < size);
376    uint64_t tag;
377    if (!tlb[entry].valid)
378        return (uint64_t)-1ll;
379
380    tag = tlb[entry].range.contextId;
381    tag |= tlb[entry].range.va;
382    tag |= (uint64_t)tlb[entry].range.partitionId << 61;
383    tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
384    tag |= (uint64_t)~tlb[entry].pte._size() << 56;
385    return tag;
386}
387
388bool
389TLB::validVirtualAddress(Addr va, bool am)
390{
391    if (am)
392        return true;
393    if (va >= StartVAddrHole && va <= EndVAddrHole)
394        return false;
395    return true;
396}
397
398void
399TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
400{
401    if (sfsr & 0x1)
402        sfsr = 0x3;
403    else
404        sfsr = 1;
405
406    if (write)
407        sfsr |= 1 << 2;
408    sfsr |= ct << 4;
409    if (se)
410        sfsr |= 1 << 6;
411    sfsr |= ft << 7;
412    sfsr |= asi << 16;
413}
414
415void
416TLB::writeTagAccess(Addr va, int context)
417{
418    DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
419            va, context, mbits(va, 63,13) | mbits(context,12,0));
420
421    tag_access = mbits(va, 63,13) | mbits(context,12,0);
422}
423
424void
425TLB::writeSfsr(Addr a, bool write, ContextType ct,
426        bool se, FaultTypes ft, int asi)
427{
428    DPRINTF(TLB, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
429            a, (int)write, ct, ft, asi);
430    TLB::writeSfsr(write, ct, se, ft, asi);
431    sfar = a;
432}
433
434Fault
435TLB::translateInst(RequestPtr req, ThreadContext *tc)
436{
437    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
438
439    Addr vaddr = req->getVaddr();
440    TlbEntry *e;
441
442    assert(req->getAsi() == ASI_IMPLICIT);
443
444    DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
445            vaddr, req->getSize());
446
447    // Be fast if we can!
448    if (cacheValid && cacheState == tlbdata) {
449        if (cacheEntry[0]) {
450            if (cacheEntry[0]->range.va < vaddr + sizeof(MachInst) &&
451                cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
452                req->setPaddr(cacheEntry[0]->pte.translate(vaddr));
453                return NoFault;
454            }
455        } else {
456            req->setPaddr(vaddr & PAddrImplMask);
457            return NoFault;
458        }
459    }
460
461    bool hpriv = bits(tlbdata,0,0);
462    bool red = bits(tlbdata,1,1);
463    bool priv = bits(tlbdata,2,2);
464    bool addr_mask = bits(tlbdata,3,3);
465    bool lsu_im = bits(tlbdata,4,4);
466
467    int part_id = bits(tlbdata,15,8);
468    int tl = bits(tlbdata,18,16);
469    int pri_context = bits(tlbdata,47,32);
470    int context;
471    ContextType ct;
472    int asi;
473    bool real = false;
474
475    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
476           priv, hpriv, red, lsu_im, part_id);
477
478    if (tl > 0) {
479        asi = ASI_N;
480        ct = Nucleus;
481        context = 0;
482    } else {
483        asi = ASI_P;
484        ct = Primary;
485        context = pri_context;
486    }
487
488    if ( hpriv || red ) {
489        cacheValid = true;
490        cacheState = tlbdata;
491        cacheEntry[0] = NULL;
492        req->setPaddr(vaddr & PAddrImplMask);
493        return NoFault;
494    }
495
496    // If the access is unaligned trap
497    if (vaddr & 0x3) {
498        writeSfsr(false, ct, false, OtherFault, asi);
499        return new MemAddressNotAligned;
500    }
501
502    if (addr_mask)
503        vaddr = vaddr & VAddrAMask;
504
505    if (!validVirtualAddress(vaddr, addr_mask)) {
506        writeSfsr(false, ct, false, VaOutOfRange, asi);
507        return new InstructionAccessException;
508    }
509
510    if (!lsu_im) {
511        e = lookup(vaddr, part_id, true);
512        real = true;
513        context = 0;
514    } else {
515        e = lookup(vaddr, part_id, false, context);
516    }
517
518    if (e == NULL || !e->valid) {
519        writeTagAccess(vaddr, context);
520        if (real)
521            return new InstructionRealTranslationMiss;
522        else
523#if FULL_SYSTEM
524            return new FastInstructionAccessMMUMiss;
525#else
526            return new FastInstructionAccessMMUMiss(req->getVaddr());
527#endif
528    }
529
530    // were not priviledged accesing priv page
531    if (!priv && e->pte.priv()) {
532        writeTagAccess(vaddr, context);
533        writeSfsr(false, ct, false, PrivViolation, asi);
534        return new InstructionAccessException;
535    }
536
537    // cache translation date for next translation
538    cacheValid = true;
539    cacheState = tlbdata;
540    cacheEntry[0] = e;
541
542    req->setPaddr(e->pte.translate(vaddr));
543    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
544    return NoFault;
545}
546
547Fault
548TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
549{
550    /*
551     * @todo this could really use some profiling and fixing to make
552     * it faster!
553     */
554    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
555    Addr vaddr = req->getVaddr();
556    Addr size = req->getSize();
557    ASI asi;
558    asi = (ASI)req->getAsi();
559    bool implicit = false;
560    bool hpriv = bits(tlbdata,0,0);
561    bool unaligned = vaddr & (size - 1);
562
563    DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
564            vaddr, size, asi);
565
566    if (lookupTable.size() != 64 - freeList.size())
567       panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
568               freeList.size());
569    if (asi == ASI_IMPLICIT)
570        implicit = true;
571
572    // Only use the fast path here if there doesn't need to be an unaligned
573    // trap later
574    if (!unaligned) {
575        if (hpriv && implicit) {
576            req->setPaddr(vaddr & PAddrImplMask);
577            return NoFault;
578        }
579
580        // Be fast if we can!
581        if (cacheValid &&  cacheState == tlbdata) {
582
583
584
585            if (cacheEntry[0]) {
586                TlbEntry *ce = cacheEntry[0];
587                Addr ce_va = ce->range.va;
588                if (cacheAsi[0] == asi &&
589                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
590                    (!write || ce->pte.writable())) {
591                    req->setPaddr(ce->pte.translate(vaddr));
592                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
593                        req->setFlags(Request::UNCACHEABLE);
594                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
595                    return NoFault;
596                } // if matched
597            } // if cache entry valid
598            if (cacheEntry[1]) {
599                TlbEntry *ce = cacheEntry[1];
600                Addr ce_va = ce->range.va;
601                if (cacheAsi[1] == asi &&
602                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
603                    (!write || ce->pte.writable())) {
604                    req->setPaddr(ce->pte.translate(vaddr));
605                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
606                        req->setFlags(Request::UNCACHEABLE);
607                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
608                    return NoFault;
609                } // if matched
610            } // if cache entry valid
611        }
612    }
613
614    bool red = bits(tlbdata,1,1);
615    bool priv = bits(tlbdata,2,2);
616    bool addr_mask = bits(tlbdata,3,3);
617    bool lsu_dm = bits(tlbdata,5,5);
618
619    int part_id = bits(tlbdata,15,8);
620    int tl = bits(tlbdata,18,16);
621    int pri_context = bits(tlbdata,47,32);
622    int sec_context = bits(tlbdata,63,48);
623
624    bool real = false;
625    ContextType ct = Primary;
626    int context = 0;
627
628    TlbEntry *e;
629
630    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
631            priv, hpriv, red, lsu_dm, part_id);
632
633    if (implicit) {
634        if (tl > 0) {
635            asi = ASI_N;
636            ct = Nucleus;
637            context = 0;
638        } else {
639            asi = ASI_P;
640            ct = Primary;
641            context = pri_context;
642        }
643    } else {
644        // We need to check for priv level/asi priv
645        if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
646            // It appears that context should be Nucleus in these cases?
647            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
648            return new PrivilegedAction;
649        }
650
651        if (!hpriv && AsiIsHPriv(asi)) {
652            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
653            return new DataAccessException;
654        }
655
656        if (AsiIsPrimary(asi)) {
657            context = pri_context;
658            ct = Primary;
659        } else if (AsiIsSecondary(asi)) {
660            context = sec_context;
661            ct = Secondary;
662        } else if (AsiIsNucleus(asi)) {
663            ct = Nucleus;
664            context = 0;
665        } else {  // ????
666            ct = Primary;
667            context = pri_context;
668        }
669    }
670
671    if (!implicit && asi != ASI_P && asi != ASI_S) {
672        if (AsiIsLittle(asi))
673            panic("Little Endian ASIs not supported\n");
674
675        //XXX It's unclear from looking at the documentation how a no fault
676        //load differs from a regular one, other than what happens concerning
677        //nfo and e bits in the TTE
678//        if (AsiIsNoFault(asi))
679//            panic("No Fault ASIs not supported\n");
680
681        if (AsiIsPartialStore(asi))
682            panic("Partial Store ASIs not supported\n");
683
684        if (AsiIsCmt(asi))
685            panic("Cmt ASI registers not implmented\n");
686
687        if (AsiIsInterrupt(asi))
688            goto handleIntRegAccess;
689        if (AsiIsMmu(asi))
690            goto handleMmuRegAccess;
691        if (AsiIsScratchPad(asi))
692            goto handleScratchRegAccess;
693        if (AsiIsQueue(asi))
694            goto handleQueueRegAccess;
695        if (AsiIsSparcError(asi))
696            goto handleSparcErrorRegAccess;
697
698        if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
699                !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi))
700            panic("Accessing ASI %#X. Should we?\n", asi);
701    }
702
703    // If the asi is unaligned trap
704    if (unaligned) {
705        writeSfsr(vaddr, false, ct, false, OtherFault, asi);
706        return new MemAddressNotAligned;
707    }
708
709    if (addr_mask)
710        vaddr = vaddr & VAddrAMask;
711
712    if (!validVirtualAddress(vaddr, addr_mask)) {
713        writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
714        return new DataAccessException;
715    }
716
717    if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
718        real = true;
719        context = 0;
720    }
721
722    if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
723        req->setPaddr(vaddr & PAddrImplMask);
724        return NoFault;
725    }
726
727    e = lookup(vaddr, part_id, real, context);
728
729    if (e == NULL || !e->valid) {
730        writeTagAccess(vaddr, context);
731        DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
732        if (real)
733            return new DataRealTranslationMiss;
734        else
735#if FULL_SYSTEM
736            return new FastDataAccessMMUMiss;
737#else
738            return new FastDataAccessMMUMiss(req->getVaddr());
739#endif
740
741    }
742
743    if (!priv && e->pte.priv()) {
744        writeTagAccess(vaddr, context);
745        writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
746        return new DataAccessException;
747    }
748
749    if (write && !e->pte.writable()) {
750        writeTagAccess(vaddr, context);
751        writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
752        return new FastDataAccessProtection;
753    }
754
755    if (e->pte.nofault() && !AsiIsNoFault(asi)) {
756        writeTagAccess(vaddr, context);
757        writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
758        return new DataAccessException;
759    }
760
761    if (e->pte.sideffect() && AsiIsNoFault(asi)) {
762        writeTagAccess(vaddr, context);
763        writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
764        return new DataAccessException;
765    }
766
767    if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
768        req->setFlags(Request::UNCACHEABLE);
769
770    // cache translation date for next translation
771    cacheState = tlbdata;
772    if (!cacheValid) {
773        cacheEntry[1] = NULL;
774        cacheEntry[0] = NULL;
775    }
776
777    if (cacheEntry[0] != e && cacheEntry[1] != e) {
778        cacheEntry[1] = cacheEntry[0];
779        cacheEntry[0] = e;
780        cacheAsi[1] = cacheAsi[0];
781        cacheAsi[0] = asi;
782        if (implicit)
783            cacheAsi[0] = (ASI)0;
784    }
785    cacheValid = true;
786    req->setPaddr(e->pte.translate(vaddr));
787    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
788    return NoFault;
789
790    /** Normal flow ends here. */
791handleIntRegAccess:
792    if (!hpriv) {
793        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
794        if (priv)
795            return new DataAccessException;
796         else
797            return new PrivilegedAction;
798    }
799
800    if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
801        (asi == ASI_SWVR_UDB_INTR_R && write)) {
802        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
803        return new DataAccessException;
804    }
805
806    goto regAccessOk;
807
808
809handleScratchRegAccess:
810    if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
811        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
812        return new DataAccessException;
813    }
814    goto regAccessOk;
815
816handleQueueRegAccess:
817    if (!priv  && !hpriv) {
818        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
819        return new PrivilegedAction;
820    }
821    if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
822        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
823        return new DataAccessException;
824    }
825    goto regAccessOk;
826
827handleSparcErrorRegAccess:
828    if (!hpriv) {
829        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
830        if (priv)
831            return new DataAccessException;
832         else
833            return new PrivilegedAction;
834    }
835    goto regAccessOk;
836
837
838regAccessOk:
839handleMmuRegAccess:
840    DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
841    req->setFlags(Request::MMAPED_IPR);
842    req->setPaddr(req->getVaddr());
843    return NoFault;
844};
845
846Fault
847TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
848{
849    if (mode == Execute)
850        return translateInst(req, tc);
851    else
852        return translateData(req, tc, mode == Write);
853}
854
855void
856TLB::translateTiming(RequestPtr req, ThreadContext *tc,
857        Translation *translation, Mode mode)
858{
859    assert(translation);
860    translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
861}
862
863#if FULL_SYSTEM
864
865Tick
866TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
867{
868    Addr va = pkt->getAddr();
869    ASI asi = (ASI)pkt->req->getAsi();
870    uint64_t temp;
871
872    DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
873         (uint32_t)pkt->req->getAsi(), pkt->getAddr());
874
875    TLB *itb = tc->getITBPtr();
876
877    switch (asi) {
878      case ASI_LSU_CONTROL_REG:
879        assert(va == 0);
880        pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL));
881        break;
882      case ASI_MMU:
883        switch (va) {
884          case 0x8:
885            pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT));
886            break;
887          case 0x10:
888            pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT));
889            break;
890          default:
891            goto doMmuReadError;
892        }
893        break;
894      case ASI_QUEUE:
895        pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
896                    (va >> 4) - 0x3c));
897        break;
898      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
899        assert(va == 0);
900        pkt->set(c0_tsb_ps0);
901        break;
902      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
903        assert(va == 0);
904        pkt->set(c0_tsb_ps1);
905        break;
906      case ASI_DMMU_CTXT_ZERO_CONFIG:
907        assert(va == 0);
908        pkt->set(c0_config);
909        break;
910      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
911        assert(va == 0);
912        pkt->set(itb->c0_tsb_ps0);
913        break;
914      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
915        assert(va == 0);
916        pkt->set(itb->c0_tsb_ps1);
917        break;
918      case ASI_IMMU_CTXT_ZERO_CONFIG:
919        assert(va == 0);
920        pkt->set(itb->c0_config);
921        break;
922      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
923        assert(va == 0);
924        pkt->set(cx_tsb_ps0);
925        break;
926      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
927        assert(va == 0);
928        pkt->set(cx_tsb_ps1);
929        break;
930      case ASI_DMMU_CTXT_NONZERO_CONFIG:
931        assert(va == 0);
932        pkt->set(cx_config);
933        break;
934      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
935        assert(va == 0);
936        pkt->set(itb->cx_tsb_ps0);
937        break;
938      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
939        assert(va == 0);
940        pkt->set(itb->cx_tsb_ps1);
941        break;
942      case ASI_IMMU_CTXT_NONZERO_CONFIG:
943        assert(va == 0);
944        pkt->set(itb->cx_config);
945        break;
946      case ASI_SPARC_ERROR_STATUS_REG:
947        pkt->set((uint64_t)0);
948        break;
949      case ASI_HYP_SCRATCHPAD:
950      case ASI_SCRATCHPAD:
951        pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
952        break;
953      case ASI_IMMU:
954        switch (va) {
955          case 0x0:
956            temp = itb->tag_access;
957            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
958            break;
959          case 0x18:
960            pkt->set(itb->sfsr);
961            break;
962          case 0x30:
963            pkt->set(itb->tag_access);
964            break;
965          default:
966            goto doMmuReadError;
967        }
968        break;
969      case ASI_DMMU:
970        switch (va) {
971          case 0x0:
972            temp = tag_access;
973            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
974            break;
975          case 0x18:
976            pkt->set(sfsr);
977            break;
978          case 0x20:
979            pkt->set(sfar);
980            break;
981          case 0x30:
982            pkt->set(tag_access);
983            break;
984          case 0x80:
985            pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID));
986            break;
987          default:
988                goto doMmuReadError;
989        }
990        break;
991      case ASI_DMMU_TSB_PS0_PTR_REG:
992        pkt->set(MakeTsbPtr(Ps0,
993            tag_access,
994            c0_tsb_ps0,
995            c0_config,
996            cx_tsb_ps0,
997            cx_config));
998        break;
999      case ASI_DMMU_TSB_PS1_PTR_REG:
1000        pkt->set(MakeTsbPtr(Ps1,
1001                tag_access,
1002                c0_tsb_ps1,
1003                c0_config,
1004                cx_tsb_ps1,
1005                cx_config));
1006        break;
1007      case ASI_IMMU_TSB_PS0_PTR_REG:
1008          pkt->set(MakeTsbPtr(Ps0,
1009                itb->tag_access,
1010                itb->c0_tsb_ps0,
1011                itb->c0_config,
1012                itb->cx_tsb_ps0,
1013                itb->cx_config));
1014        break;
1015      case ASI_IMMU_TSB_PS1_PTR_REG:
1016          pkt->set(MakeTsbPtr(Ps1,
1017                itb->tag_access,
1018                itb->c0_tsb_ps1,
1019                itb->c0_config,
1020                itb->cx_tsb_ps1,
1021                itb->cx_config));
1022        break;
1023      case ASI_SWVR_INTR_RECEIVE:
1024        {
1025            SparcISA::Interrupts * interrupts =
1026                dynamic_cast<SparcISA::Interrupts *>(
1027                        tc->getCpuPtr()->getInterruptController());
1028            pkt->set(interrupts->get_vec(IT_INT_VEC));
1029        }
1030        break;
1031      case ASI_SWVR_UDB_INTR_R:
1032        {
1033            SparcISA::Interrupts * interrupts =
1034                dynamic_cast<SparcISA::Interrupts *>(
1035                        tc->getCpuPtr()->getInterruptController());
1036            temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1037            tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, temp);
1038            pkt->set(temp);
1039        }
1040        break;
1041      default:
1042doMmuReadError:
1043        panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1044            (uint32_t)asi, va);
1045    }
1046    pkt->makeAtomicResponse();
1047    return tc->getCpuPtr()->ticks(1);
1048}
1049
1050Tick
1051TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1052{
1053    uint64_t data = pkt->get<uint64_t>();
1054    Addr va = pkt->getAddr();
1055    ASI asi = (ASI)pkt->req->getAsi();
1056
1057    Addr ta_insert;
1058    Addr va_insert;
1059    Addr ct_insert;
1060    int part_insert;
1061    int entry_insert = -1;
1062    bool real_insert;
1063    bool ignore;
1064    int part_id;
1065    int ctx_id;
1066    PageTableEntry pte;
1067
1068    DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1069         (uint32_t)asi, va, data);
1070
1071    TLB *itb = tc->getITBPtr();
1072
1073    switch (asi) {
1074      case ASI_LSU_CONTROL_REG:
1075        assert(va == 0);
1076        tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1077        break;
1078      case ASI_MMU:
1079        switch (va) {
1080          case 0x8:
1081            tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1082            break;
1083          case 0x10:
1084            tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1085            break;
1086          default:
1087            goto doMmuWriteError;
1088        }
1089        break;
1090      case ASI_QUEUE:
1091        assert(mbits(data,13,6) == data);
1092        tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
1093                    (va >> 4) - 0x3c, data);
1094        break;
1095      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1096        assert(va == 0);
1097        c0_tsb_ps0 = data;
1098        break;
1099      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1100        assert(va == 0);
1101        c0_tsb_ps1 = data;
1102        break;
1103      case ASI_DMMU_CTXT_ZERO_CONFIG:
1104        assert(va == 0);
1105        c0_config = data;
1106        break;
1107      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1108        assert(va == 0);
1109        itb->c0_tsb_ps0 = data;
1110        break;
1111      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1112        assert(va == 0);
1113        itb->c0_tsb_ps1 = data;
1114        break;
1115      case ASI_IMMU_CTXT_ZERO_CONFIG:
1116        assert(va == 0);
1117        itb->c0_config = data;
1118        break;
1119      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1120        assert(va == 0);
1121        cx_tsb_ps0 = data;
1122        break;
1123      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1124        assert(va == 0);
1125        cx_tsb_ps1 = data;
1126        break;
1127      case ASI_DMMU_CTXT_NONZERO_CONFIG:
1128        assert(va == 0);
1129        cx_config = data;
1130        break;
1131      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1132        assert(va == 0);
1133        itb->cx_tsb_ps0 = data;
1134        break;
1135      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1136        assert(va == 0);
1137        itb->cx_tsb_ps1 = data;
1138        break;
1139      case ASI_IMMU_CTXT_NONZERO_CONFIG:
1140        assert(va == 0);
1141        itb->cx_config = data;
1142        break;
1143      case ASI_SPARC_ERROR_EN_REG:
1144      case ASI_SPARC_ERROR_STATUS_REG:
1145        inform("Ignoring write to SPARC ERROR regsiter\n");
1146        break;
1147      case ASI_HYP_SCRATCHPAD:
1148      case ASI_SCRATCHPAD:
1149        tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1150        break;
1151      case ASI_IMMU:
1152        switch (va) {
1153          case 0x18:
1154            itb->sfsr = data;
1155            break;
1156          case 0x30:
1157            sext<59>(bits(data, 59,0));
1158            itb->tag_access = data;
1159            break;
1160          default:
1161            goto doMmuWriteError;
1162        }
1163        break;
1164      case ASI_ITLB_DATA_ACCESS_REG:
1165        entry_insert = bits(va, 8,3);
1166      case ASI_ITLB_DATA_IN_REG:
1167        assert(entry_insert != -1 || mbits(va,10,9) == va);
1168        ta_insert = itb->tag_access;
1169        va_insert = mbits(ta_insert, 63,13);
1170        ct_insert = mbits(ta_insert, 12,0);
1171        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1172        real_insert = bits(va, 9,9);
1173        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1174                PageTableEntry::sun4u);
1175        tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1176                pte, entry_insert);
1177        break;
1178      case ASI_DTLB_DATA_ACCESS_REG:
1179        entry_insert = bits(va, 8,3);
1180      case ASI_DTLB_DATA_IN_REG:
1181        assert(entry_insert != -1 || mbits(va,10,9) == va);
1182        ta_insert = tag_access;
1183        va_insert = mbits(ta_insert, 63,13);
1184        ct_insert = mbits(ta_insert, 12,0);
1185        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1186        real_insert = bits(va, 9,9);
1187        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1188                PageTableEntry::sun4u);
1189        insert(va_insert, part_insert, ct_insert, real_insert, pte,
1190               entry_insert);
1191        break;
1192      case ASI_IMMU_DEMAP:
1193        ignore = false;
1194        ctx_id = -1;
1195        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1196        switch (bits(va,5,4)) {
1197          case 0:
1198            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1199            break;
1200          case 1:
1201            ignore = true;
1202            break;
1203          case 3:
1204            ctx_id = 0;
1205            break;
1206          default:
1207            ignore = true;
1208        }
1209
1210        switch(bits(va,7,6)) {
1211          case 0: // demap page
1212            if (!ignore)
1213                tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1214                        bits(va,9,9), ctx_id);
1215            break;
1216          case 1: //demap context
1217            if (!ignore)
1218                tc->getITBPtr()->demapContext(part_id, ctx_id);
1219            break;
1220          case 2:
1221            tc->getITBPtr()->demapAll(part_id);
1222            break;
1223          default:
1224            panic("Invalid type for IMMU demap\n");
1225        }
1226        break;
1227      case ASI_DMMU:
1228        switch (va) {
1229          case 0x18:
1230            sfsr = data;
1231            break;
1232          case 0x30:
1233            sext<59>(bits(data, 59,0));
1234            tag_access = data;
1235            break;
1236          case 0x80:
1237            tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1238            break;
1239          default:
1240            goto doMmuWriteError;
1241        }
1242        break;
1243      case ASI_DMMU_DEMAP:
1244        ignore = false;
1245        ctx_id = -1;
1246        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1247        switch (bits(va,5,4)) {
1248          case 0:
1249            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1250            break;
1251          case 1:
1252            ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1253            break;
1254          case 3:
1255            ctx_id = 0;
1256            break;
1257          default:
1258            ignore = true;
1259        }
1260
1261        switch(bits(va,7,6)) {
1262          case 0: // demap page
1263            if (!ignore)
1264                demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1265            break;
1266          case 1: //demap context
1267            if (!ignore)
1268                demapContext(part_id, ctx_id);
1269            break;
1270          case 2:
1271            demapAll(part_id);
1272            break;
1273          default:
1274            panic("Invalid type for IMMU demap\n");
1275        }
1276        break;
1277       case ASI_SWVR_INTR_RECEIVE:
1278        {
1279            int msb;
1280            // clear all the interrupts that aren't set in the write
1281            SparcISA::Interrupts * interrupts =
1282                dynamic_cast<SparcISA::Interrupts *>(
1283                        tc->getCpuPtr()->getInterruptController());
1284            while (interrupts->get_vec(IT_INT_VEC) & data) {
1285                msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1286                tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, msb);
1287            }
1288        }
1289        break;
1290      case ASI_SWVR_UDB_INTR_W:
1291            tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1292            postInterrupt(bits(data, 5, 0), 0);
1293        break;
1294      default:
1295doMmuWriteError:
1296        panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1297            (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1298    }
1299    pkt->makeAtomicResponse();
1300    return tc->getCpuPtr()->ticks(1);
1301}
1302
1303#endif
1304
1305void
1306TLB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1307{
1308    uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1309    TLB * itb = tc->getITBPtr();
1310    ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1311                c0_tsb_ps0,
1312                c0_config,
1313                cx_tsb_ps0,
1314                cx_config);
1315    ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1316                c0_tsb_ps1,
1317                c0_config,
1318                cx_tsb_ps1,
1319                cx_config);
1320    ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1321                itb->c0_tsb_ps0,
1322                itb->c0_config,
1323                itb->cx_tsb_ps0,
1324                itb->cx_config);
1325    ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1326                itb->c0_tsb_ps1,
1327                itb->c0_config,
1328                itb->cx_tsb_ps1,
1329                itb->cx_config);
1330}
1331
1332uint64_t
1333TLB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1334        uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1335{
1336    uint64_t tsb;
1337    uint64_t config;
1338
1339    if (bits(tag_access, 12,0) == 0) {
1340        tsb = c0_tsb;
1341        config = c0_config;
1342    } else {
1343        tsb = cX_tsb;
1344        config = cX_config;
1345    }
1346
1347    uint64_t ptr = mbits(tsb,63,13);
1348    bool split = bits(tsb,12,12);
1349    int tsb_size = bits(tsb,3,0);
1350    int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1351
1352    if (ps == Ps1  && split)
1353        ptr |= ULL(1) << (13 + tsb_size);
1354    ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1355
1356    return ptr;
1357}
1358
1359void
1360TLB::serialize(std::ostream &os)
1361{
1362    SERIALIZE_SCALAR(size);
1363    SERIALIZE_SCALAR(usedEntries);
1364    SERIALIZE_SCALAR(lastReplaced);
1365
1366    // convert the pointer based free list into an index based one
1367    int *free_list = (int*)malloc(sizeof(int) * size);
1368    int cntr = 0;
1369    std::list<TlbEntry*>::iterator i;
1370    i = freeList.begin();
1371    while (i != freeList.end()) {
1372        free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1373        i++;
1374    }
1375    SERIALIZE_SCALAR(cntr);
1376    SERIALIZE_ARRAY(free_list,  cntr);
1377
1378    SERIALIZE_SCALAR(c0_tsb_ps0);
1379    SERIALIZE_SCALAR(c0_tsb_ps1);
1380    SERIALIZE_SCALAR(c0_config);
1381    SERIALIZE_SCALAR(cx_tsb_ps0);
1382    SERIALIZE_SCALAR(cx_tsb_ps1);
1383    SERIALIZE_SCALAR(cx_config);
1384    SERIALIZE_SCALAR(sfsr);
1385    SERIALIZE_SCALAR(tag_access);
1386
1387    for (int x = 0; x < size; x++) {
1388        nameOut(os, csprintf("%s.PTE%d", name(), x));
1389        tlb[x].serialize(os);
1390    }
1391    SERIALIZE_SCALAR(sfar);
1392}
1393
1394void
1395TLB::unserialize(Checkpoint *cp, const std::string &section)
1396{
1397    int oldSize;
1398
1399    paramIn(cp, section, "size", oldSize);
1400    if (oldSize != size)
1401        panic("Don't support unserializing different sized TLBs\n");
1402    UNSERIALIZE_SCALAR(usedEntries);
1403    UNSERIALIZE_SCALAR(lastReplaced);
1404
1405    int cntr;
1406    UNSERIALIZE_SCALAR(cntr);
1407
1408    int *free_list = (int*)malloc(sizeof(int) * cntr);
1409    freeList.clear();
1410    UNSERIALIZE_ARRAY(free_list,  cntr);
1411    for (int x = 0; x < cntr; x++)
1412        freeList.push_back(&tlb[free_list[x]]);
1413
1414    UNSERIALIZE_SCALAR(c0_tsb_ps0);
1415    UNSERIALIZE_SCALAR(c0_tsb_ps1);
1416    UNSERIALIZE_SCALAR(c0_config);
1417    UNSERIALIZE_SCALAR(cx_tsb_ps0);
1418    UNSERIALIZE_SCALAR(cx_tsb_ps1);
1419    UNSERIALIZE_SCALAR(cx_config);
1420    UNSERIALIZE_SCALAR(sfsr);
1421    UNSERIALIZE_SCALAR(tag_access);
1422
1423    lookupTable.clear();
1424    for (int x = 0; x < size; x++) {
1425        tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1426        if (tlb[x].valid)
1427            lookupTable.insert(tlb[x].range, &tlb[x]);
1428
1429    }
1430    UNSERIALIZE_SCALAR(sfar);
1431}
1432
1433/* end namespace SparcISA */ }
1434
1435SparcISA::TLB *
1436SparcTLBParams::create()
1437{
1438    return new SparcISA::TLB(this);
1439}
1440