tlb.cc revision 4996:e827e57a01f9
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31#include <cstring>
32
33#include "arch/sparc/asi.hh"
34#include "arch/sparc/miscregfile.hh"
35#include "arch/sparc/tlb.hh"
36#include "base/bitfield.hh"
37#include "base/trace.hh"
38#include "cpu/thread_context.hh"
39#include "cpu/base.hh"
40#include "mem/packet_access.hh"
41#include "mem/request.hh"
42#include "params/SparcDTB.hh"
43#include "params/SparcITB.hh"
44#include "sim/system.hh"
45
46/* @todo remove some of the magic constants.  -- ali
47 * */
48namespace SparcISA {
49
50TLB::TLB(const std::string &name, int s)
51    : SimObject(name), size(s), usedEntries(0), lastReplaced(0),
52      cacheValid(false)
53{
54    // To make this work you'll have to change the hypervisor and OS
55    if (size > 64)
56        fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
57
58    tlb = new TlbEntry[size];
59    std::memset(tlb, 0, sizeof(TlbEntry) * size);
60
61    for (int x = 0; x < size; x++)
62        freeList.push_back(&tlb[x]);
63
64    c0_tsb_ps0 = 0;
65    c0_tsb_ps1 = 0;
66    c0_config = 0;
67    cx_tsb_ps0 = 0;
68    cx_tsb_ps1 = 0;
69    cx_config = 0;
70    sfsr = 0;
71    tag_access = 0;
72}
73
74void
75TLB::clearUsedBits()
76{
77    MapIter i;
78    for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
79        TlbEntry *t = i->second;
80        if (!t->pte.locked()) {
81            t->used = false;
82            usedEntries--;
83        }
84    }
85}
86
87
88void
89TLB::insert(Addr va, int partition_id, int context_id, bool real,
90        const PageTableEntry& PTE, int entry)
91{
92
93
94    MapIter i;
95    TlbEntry *new_entry = NULL;
96//    TlbRange tr;
97    int x;
98
99    cacheValid = false;
100    va &= ~(PTE.size()-1);
101 /*   tr.va = va;
102    tr.size = PTE.size() - 1;
103    tr.contextId = context_id;
104    tr.partitionId = partition_id;
105    tr.real = real;
106*/
107
108    DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
109            va, PTE.paddr(), partition_id, context_id, (int)real, entry);
110
111    // Demap any entry that conflicts
112    for (x = 0; x < size; x++) {
113        if (tlb[x].range.real == real &&
114            tlb[x].range.partitionId == partition_id &&
115            tlb[x].range.va < va + PTE.size() - 1 &&
116            tlb[x].range.va + tlb[x].range.size >= va &&
117            (real || tlb[x].range.contextId == context_id ))
118        {
119            if (tlb[x].valid) {
120                freeList.push_front(&tlb[x]);
121                DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
122
123                tlb[x].valid = false;
124                if (tlb[x].used) {
125                    tlb[x].used = false;
126                    usedEntries--;
127                }
128                lookupTable.erase(tlb[x].range);
129            }
130        }
131    }
132
133
134/*
135    i = lookupTable.find(tr);
136    if (i != lookupTable.end()) {
137        i->second->valid = false;
138        if (i->second->used) {
139            i->second->used = false;
140            usedEntries--;
141        }
142        freeList.push_front(i->second);
143        DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
144                i->second);
145        lookupTable.erase(i);
146    }
147*/
148
149    if (entry != -1) {
150        assert(entry < size && entry >= 0);
151        new_entry = &tlb[entry];
152    } else {
153        if (!freeList.empty()) {
154            new_entry = freeList.front();
155        } else {
156            x = lastReplaced;
157            do {
158                ++x;
159                if (x == size)
160                    x = 0;
161                if (x == lastReplaced)
162                    goto insertAllLocked;
163            } while (tlb[x].pte.locked());
164            lastReplaced = x;
165            new_entry = &tlb[x];
166        }
167        /*
168        for (x = 0; x < size; x++) {
169            if (!tlb[x].valid || !tlb[x].used)  {
170                new_entry = &tlb[x];
171                break;
172            }
173        }*/
174    }
175
176insertAllLocked:
177    // Update the last ently if their all locked
178    if (!new_entry) {
179        new_entry = &tlb[size-1];
180    }
181
182    freeList.remove(new_entry);
183    if (new_entry->valid && new_entry->used)
184        usedEntries--;
185    if (new_entry->valid)
186        lookupTable.erase(new_entry->range);
187
188
189    assert(PTE.valid());
190    new_entry->range.va = va;
191    new_entry->range.size = PTE.size() - 1;
192    new_entry->range.partitionId = partition_id;
193    new_entry->range.contextId = context_id;
194    new_entry->range.real = real;
195    new_entry->pte = PTE;
196    new_entry->used = true;;
197    new_entry->valid = true;
198    usedEntries++;
199
200
201
202    i = lookupTable.insert(new_entry->range, new_entry);
203    assert(i != lookupTable.end());
204
205    // If all entries have there used bit set, clear it on them all, but the
206    // one we just inserted
207    if (usedEntries == size) {
208        clearUsedBits();
209        new_entry->used = true;
210        usedEntries++;
211    }
212
213}
214
215
216TlbEntry*
217TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool
218        update_used)
219{
220    MapIter i;
221    TlbRange tr;
222    TlbEntry *t;
223
224    DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
225            va, partition_id, context_id, real);
226    // Assemble full address structure
227    tr.va = va;
228    tr.size = MachineBytes;
229    tr.contextId = context_id;
230    tr.partitionId = partition_id;
231    tr.real = real;
232
233    // Try to find the entry
234    i = lookupTable.find(tr);
235    if (i == lookupTable.end()) {
236        DPRINTF(TLB, "TLB: No valid entry found\n");
237        return NULL;
238    }
239
240    // Mark the entries used bit and clear other used bits in needed
241    t = i->second;
242    DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
243            t->pte.size());
244
245    // Update the used bits only if this is a real access (not a fake one from
246    // virttophys()
247    if (!t->used && update_used) {
248        t->used = true;
249        usedEntries++;
250        if (usedEntries == size) {
251            clearUsedBits();
252            t->used = true;
253            usedEntries++;
254        }
255    }
256
257    return t;
258}
259
260void
261TLB::dumpAll()
262{
263    MapIter i;
264    for (int x = 0; x < size; x++) {
265        if (tlb[x].valid) {
266           DPRINTFN("%4d:  %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
267                   x, tlb[x].range.partitionId, tlb[x].range.contextId,
268                   tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
269                   tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
270        }
271    }
272}
273
274void
275TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
276{
277    TlbRange tr;
278    MapIter i;
279
280    DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
281            va, partition_id, context_id, real);
282
283    cacheValid = false;
284
285    // Assemble full address structure
286    tr.va = va;
287    tr.size = MachineBytes;
288    tr.contextId = context_id;
289    tr.partitionId = partition_id;
290    tr.real = real;
291
292    // Demap any entry that conflicts
293    i = lookupTable.find(tr);
294    if (i != lookupTable.end()) {
295        DPRINTF(IPR, "TLB: Demapped page\n");
296        i->second->valid = false;
297        if (i->second->used) {
298            i->second->used = false;
299            usedEntries--;
300        }
301        freeList.push_front(i->second);
302        lookupTable.erase(i);
303    }
304}
305
306void
307TLB::demapContext(int partition_id, int context_id)
308{
309    int x;
310    DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
311            partition_id, context_id);
312    cacheValid = false;
313    for (x = 0; x < size; x++) {
314        if (tlb[x].range.contextId == context_id &&
315            tlb[x].range.partitionId == partition_id) {
316            if (tlb[x].valid == true) {
317                freeList.push_front(&tlb[x]);
318            }
319            tlb[x].valid = false;
320            if (tlb[x].used) {
321                tlb[x].used = false;
322                usedEntries--;
323            }
324            lookupTable.erase(tlb[x].range);
325        }
326    }
327}
328
329void
330TLB::demapAll(int partition_id)
331{
332    int x;
333    DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
334    cacheValid = false;
335    for (x = 0; x < size; x++) {
336        if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) {
337            if (tlb[x].valid == true){
338                freeList.push_front(&tlb[x]);
339            }
340            tlb[x].valid = false;
341            if (tlb[x].used) {
342                tlb[x].used = false;
343                usedEntries--;
344            }
345            lookupTable.erase(tlb[x].range);
346        }
347    }
348}
349
350void
351TLB::invalidateAll()
352{
353    int x;
354    cacheValid = false;
355
356    freeList.clear();
357    lookupTable.clear();
358    for (x = 0; x < size; x++) {
359        if (tlb[x].valid == true)
360            freeList.push_back(&tlb[x]);
361        tlb[x].valid = false;
362        tlb[x].used = false;
363    }
364    usedEntries = 0;
365}
366
367uint64_t
368TLB::TteRead(int entry) {
369    if (entry >= size)
370        panic("entry: %d\n", entry);
371
372    assert(entry < size);
373    if (tlb[entry].valid)
374        return tlb[entry].pte();
375    else
376        return (uint64_t)-1ll;
377}
378
379uint64_t
380TLB::TagRead(int entry) {
381    assert(entry < size);
382    uint64_t tag;
383    if (!tlb[entry].valid)
384        return (uint64_t)-1ll;
385
386    tag = tlb[entry].range.contextId;
387    tag |= tlb[entry].range.va;
388    tag |= (uint64_t)tlb[entry].range.partitionId << 61;
389    tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
390    tag |= (uint64_t)~tlb[entry].pte._size() << 56;
391    return tag;
392}
393
394bool
395TLB::validVirtualAddress(Addr va, bool am)
396{
397    if (am)
398        return true;
399    if (va >= StartVAddrHole && va <= EndVAddrHole)
400        return false;
401    return true;
402}
403
404void
405TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
406{
407    if (sfsr & 0x1)
408        sfsr = 0x3;
409    else
410        sfsr = 1;
411
412    if (write)
413        sfsr |= 1 << 2;
414    sfsr |= ct << 4;
415    if (se)
416        sfsr |= 1 << 6;
417    sfsr |= ft << 7;
418    sfsr |= asi << 16;
419}
420
421void
422TLB::writeTagAccess(Addr va, int context)
423{
424    DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
425            va, context, mbits(va, 63,13) | mbits(context,12,0));
426
427    tag_access = mbits(va, 63,13) | mbits(context,12,0);
428}
429
430void
431ITB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
432{
433    DPRINTF(TLB, "TLB: ITB Fault:  w=%d ct=%d ft=%d asi=%d\n",
434             (int)write, ct, ft, asi);
435    TLB::writeSfsr(write, ct, se, ft, asi);
436}
437
438void
439DTB::writeSfsr(Addr a, bool write, ContextType ct,
440        bool se, FaultTypes ft, int asi)
441{
442    DPRINTF(TLB, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
443            a, (int)write, ct, ft, asi);
444    TLB::writeSfsr(write, ct, se, ft, asi);
445    sfar = a;
446}
447
448Fault
449ITB::translate(RequestPtr &req, ThreadContext *tc)
450{
451    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
452
453    Addr vaddr = req->getVaddr();
454    TlbEntry *e;
455
456    assert(req->getAsi() == ASI_IMPLICIT);
457
458    DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
459            vaddr, req->getSize());
460
461    // Be fast if we can!
462    if (cacheValid && cacheState == tlbdata) {
463        if (cacheEntry) {
464            if (cacheEntry->range.va < vaddr + sizeof(MachInst) &&
465                cacheEntry->range.va + cacheEntry->range.size >= vaddr) {
466                    req->setPaddr(cacheEntry->pte.paddr() & ~(cacheEntry->pte.size()-1) |
467                                  vaddr & cacheEntry->pte.size()-1 );
468                    return NoFault;
469            }
470        } else {
471            req->setPaddr(vaddr & PAddrImplMask);
472            return NoFault;
473        }
474    }
475
476    bool hpriv = bits(tlbdata,0,0);
477    bool red = bits(tlbdata,1,1);
478    bool priv = bits(tlbdata,2,2);
479    bool addr_mask = bits(tlbdata,3,3);
480    bool lsu_im = bits(tlbdata,4,4);
481
482    int part_id = bits(tlbdata,15,8);
483    int tl = bits(tlbdata,18,16);
484    int pri_context = bits(tlbdata,47,32);
485    int context;
486    ContextType ct;
487    int asi;
488    bool real = false;
489
490    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
491           priv, hpriv, red, lsu_im, part_id);
492
493    if (tl > 0) {
494        asi = ASI_N;
495        ct = Nucleus;
496        context = 0;
497    } else {
498        asi = ASI_P;
499        ct = Primary;
500        context = pri_context;
501    }
502
503    if ( hpriv || red ) {
504        cacheValid = true;
505        cacheState = tlbdata;
506        cacheEntry = NULL;
507        req->setPaddr(vaddr & PAddrImplMask);
508        return NoFault;
509    }
510
511    // If the access is unaligned trap
512    if (vaddr & 0x3) {
513        writeSfsr(false, ct, false, OtherFault, asi);
514        return new MemAddressNotAligned;
515    }
516
517    if (addr_mask)
518        vaddr = vaddr & VAddrAMask;
519
520    if (!validVirtualAddress(vaddr, addr_mask)) {
521        writeSfsr(false, ct, false, VaOutOfRange, asi);
522        return new InstructionAccessException;
523    }
524
525    if (!lsu_im) {
526        e = lookup(vaddr, part_id, true);
527        real = true;
528        context = 0;
529    } else {
530        e = lookup(vaddr, part_id, false, context);
531    }
532
533    if (e == NULL || !e->valid) {
534        writeTagAccess(vaddr, context);
535        if (real)
536            return new InstructionRealTranslationMiss;
537        else
538            return new FastInstructionAccessMMUMiss;
539    }
540
541    // were not priviledged accesing priv page
542    if (!priv && e->pte.priv()) {
543        writeTagAccess(vaddr, context);
544        writeSfsr(false, ct, false, PrivViolation, asi);
545        return new InstructionAccessException;
546    }
547
548    // cache translation date for next translation
549    cacheValid = true;
550    cacheState = tlbdata;
551    cacheEntry = e;
552
553    req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
554                  vaddr & e->pte.size()-1 );
555    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
556    return NoFault;
557}
558
559
560
561Fault
562DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
563{
564    /* @todo this could really use some profiling and fixing to make it faster! */
565    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
566    Addr vaddr = req->getVaddr();
567    Addr size = req->getSize();
568    ASI asi;
569    asi = (ASI)req->getAsi();
570    bool implicit = false;
571    bool hpriv = bits(tlbdata,0,0);
572    bool unaligned = (vaddr & size-1);
573
574    DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
575            vaddr, size, asi);
576
577    if (lookupTable.size() != 64 - freeList.size())
578       panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
579               freeList.size());
580    if (asi == ASI_IMPLICIT)
581        implicit = true;
582
583    // Only use the fast path here if there doesn't need to be an unaligned
584    // trap later
585    if (!unaligned) {
586        if (hpriv && implicit) {
587            req->setPaddr(vaddr & PAddrImplMask);
588            return NoFault;
589        }
590
591        // Be fast if we can!
592        if (cacheValid &&  cacheState == tlbdata) {
593
594
595
596            if (cacheEntry[0]) {
597                TlbEntry *ce = cacheEntry[0];
598                Addr ce_va = ce->range.va;
599                if (cacheAsi[0] == asi &&
600                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
601                    (!write || ce->pte.writable())) {
602                        req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
603                        if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
604                            req->setFlags(req->getFlags() | UNCACHEABLE);
605                        DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
606                        return NoFault;
607                } // if matched
608            } // if cache entry valid
609            if (cacheEntry[1]) {
610                TlbEntry *ce = cacheEntry[1];
611                Addr ce_va = ce->range.va;
612                if (cacheAsi[1] == asi &&
613                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
614                    (!write || ce->pte.writable())) {
615                        req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
616                        if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
617                            req->setFlags(req->getFlags() | UNCACHEABLE);
618                        DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
619                        return NoFault;
620                } // if matched
621            } // if cache entry valid
622        }
623    }
624
625    bool red = bits(tlbdata,1,1);
626    bool priv = bits(tlbdata,2,2);
627    bool addr_mask = bits(tlbdata,3,3);
628    bool lsu_dm = bits(tlbdata,5,5);
629
630    int part_id = bits(tlbdata,15,8);
631    int tl = bits(tlbdata,18,16);
632    int pri_context = bits(tlbdata,47,32);
633    int sec_context = bits(tlbdata,63,48);
634
635    bool real = false;
636    ContextType ct = Primary;
637    int context = 0;
638
639    TlbEntry *e;
640
641    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
642           priv, hpriv, red, lsu_dm, part_id);
643
644    if (implicit) {
645        if (tl > 0) {
646            asi = ASI_N;
647            ct = Nucleus;
648            context = 0;
649        } else {
650            asi = ASI_P;
651            ct = Primary;
652            context = pri_context;
653        }
654    } else {
655        // We need to check for priv level/asi priv
656        if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
657            // It appears that context should be Nucleus in these cases?
658            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
659            return new PrivilegedAction;
660        }
661
662        if (!hpriv && AsiIsHPriv(asi)) {
663            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
664            return new DataAccessException;
665        }
666
667        if (AsiIsPrimary(asi)) {
668            context = pri_context;
669            ct = Primary;
670        } else if (AsiIsSecondary(asi)) {
671            context = sec_context;
672            ct = Secondary;
673        } else if (AsiIsNucleus(asi)) {
674            ct = Nucleus;
675            context = 0;
676        } else {  // ????
677            ct = Primary;
678            context = pri_context;
679        }
680    }
681
682    if (!implicit && asi != ASI_P && asi != ASI_S) {
683        if (AsiIsLittle(asi))
684            panic("Little Endian ASIs not supported\n");
685
686        //XXX It's unclear from looking at the documentation how a no fault
687        //load differs from a regular one, other than what happens concerning
688        //nfo and e bits in the TTE
689//        if (AsiIsNoFault(asi))
690//            panic("No Fault ASIs not supported\n");
691
692        if (AsiIsPartialStore(asi))
693            panic("Partial Store ASIs not supported\n");
694
695        if (AsiIsCmt(asi))
696            panic("Cmt ASI registers not implmented\n");
697
698        if (AsiIsInterrupt(asi))
699            goto handleIntRegAccess;
700        if (AsiIsMmu(asi))
701            goto handleMmuRegAccess;
702        if (AsiIsScratchPad(asi))
703            goto handleScratchRegAccess;
704        if (AsiIsQueue(asi))
705            goto handleQueueRegAccess;
706        if (AsiIsSparcError(asi))
707            goto handleSparcErrorRegAccess;
708
709        if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
710                !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi))
711            panic("Accessing ASI %#X. Should we?\n", asi);
712    }
713
714    // If the asi is unaligned trap
715    if (unaligned) {
716        writeSfsr(vaddr, false, ct, false, OtherFault, asi);
717        return new MemAddressNotAligned;
718    }
719
720    if (addr_mask)
721        vaddr = vaddr & VAddrAMask;
722
723    if (!validVirtualAddress(vaddr, addr_mask)) {
724        writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
725        return new DataAccessException;
726    }
727
728
729    if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
730        real = true;
731        context = 0;
732    };
733
734    if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
735        req->setPaddr(vaddr & PAddrImplMask);
736        return NoFault;
737    }
738
739    e = lookup(vaddr, part_id, real, context);
740
741    if (e == NULL || !e->valid) {
742        writeTagAccess(vaddr, context);
743        DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
744        if (real)
745            return new DataRealTranslationMiss;
746        else
747            return new FastDataAccessMMUMiss;
748
749    }
750
751    if (!priv && e->pte.priv()) {
752        writeTagAccess(vaddr, context);
753        writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
754        return new DataAccessException;
755    }
756
757    if (write && !e->pte.writable()) {
758        writeTagAccess(vaddr, context);
759        writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
760        return new FastDataAccessProtection;
761    }
762
763    if (e->pte.nofault() && !AsiIsNoFault(asi)) {
764        writeTagAccess(vaddr, context);
765        writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
766        return new DataAccessException;
767    }
768
769    if (e->pte.sideffect() && AsiIsNoFault(asi)) {
770        writeTagAccess(vaddr, context);
771        writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
772        return new DataAccessException;
773    }
774
775
776    if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
777        req->setFlags(req->getFlags() | UNCACHEABLE);
778
779    // cache translation date for next translation
780    cacheState = tlbdata;
781    if (!cacheValid) {
782        cacheEntry[1] = NULL;
783        cacheEntry[0] = NULL;
784    }
785
786    if (cacheEntry[0] != e && cacheEntry[1] != e) {
787        cacheEntry[1] = cacheEntry[0];
788        cacheEntry[0] = e;
789        cacheAsi[1] = cacheAsi[0];
790        cacheAsi[0] = asi;
791        if (implicit)
792            cacheAsi[0] = (ASI)0;
793    }
794    cacheValid = true;
795    req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
796                  vaddr & e->pte.size()-1);
797    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
798    return NoFault;
799
800    /** Normal flow ends here. */
801handleIntRegAccess:
802    if (!hpriv) {
803        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
804        if (priv)
805            return new DataAccessException;
806         else
807            return new PrivilegedAction;
808    }
809
810    if (asi == ASI_SWVR_UDB_INTR_W && !write ||
811                    asi == ASI_SWVR_UDB_INTR_R && write) {
812        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
813        return new DataAccessException;
814    }
815
816    goto regAccessOk;
817
818
819handleScratchRegAccess:
820    if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
821        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
822        return new DataAccessException;
823    }
824    goto regAccessOk;
825
826handleQueueRegAccess:
827    if (!priv  && !hpriv) {
828        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
829        return new PrivilegedAction;
830    }
831    if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
832        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
833        return new DataAccessException;
834    }
835    goto regAccessOk;
836
837handleSparcErrorRegAccess:
838    if (!hpriv) {
839        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
840        if (priv)
841            return new DataAccessException;
842         else
843            return new PrivilegedAction;
844    }
845    goto regAccessOk;
846
847
848regAccessOk:
849handleMmuRegAccess:
850    DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
851    req->setMmapedIpr(true);
852    req->setPaddr(req->getVaddr());
853    return NoFault;
854};
855
856Tick
857DTB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
858{
859    Addr va = pkt->getAddr();
860    ASI asi = (ASI)pkt->req->getAsi();
861    uint64_t temp;
862
863    DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
864         (uint32_t)pkt->req->getAsi(), pkt->getAddr());
865
866    ITB * itb = tc->getITBPtr();
867
868    switch (asi) {
869      case ASI_LSU_CONTROL_REG:
870        assert(va == 0);
871        pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL));
872        break;
873      case ASI_MMU:
874        switch (va) {
875          case 0x8:
876            pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT));
877            break;
878          case 0x10:
879            pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT));
880            break;
881          default:
882            goto doMmuReadError;
883        }
884        break;
885      case ASI_QUEUE:
886        pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
887                    (va >> 4) - 0x3c));
888        break;
889      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
890        assert(va == 0);
891        pkt->set(c0_tsb_ps0);
892        break;
893      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
894        assert(va == 0);
895        pkt->set(c0_tsb_ps1);
896        break;
897      case ASI_DMMU_CTXT_ZERO_CONFIG:
898        assert(va == 0);
899        pkt->set(c0_config);
900        break;
901      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
902        assert(va == 0);
903        pkt->set(itb->c0_tsb_ps0);
904        break;
905      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
906        assert(va == 0);
907        pkt->set(itb->c0_tsb_ps1);
908        break;
909      case ASI_IMMU_CTXT_ZERO_CONFIG:
910        assert(va == 0);
911        pkt->set(itb->c0_config);
912        break;
913      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
914        assert(va == 0);
915        pkt->set(cx_tsb_ps0);
916        break;
917      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
918        assert(va == 0);
919        pkt->set(cx_tsb_ps1);
920        break;
921      case ASI_DMMU_CTXT_NONZERO_CONFIG:
922        assert(va == 0);
923        pkt->set(cx_config);
924        break;
925      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
926        assert(va == 0);
927        pkt->set(itb->cx_tsb_ps0);
928        break;
929      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
930        assert(va == 0);
931        pkt->set(itb->cx_tsb_ps1);
932        break;
933      case ASI_IMMU_CTXT_NONZERO_CONFIG:
934        assert(va == 0);
935        pkt->set(itb->cx_config);
936        break;
937      case ASI_SPARC_ERROR_STATUS_REG:
938        pkt->set((uint64_t)0);
939        break;
940      case ASI_HYP_SCRATCHPAD:
941      case ASI_SCRATCHPAD:
942        pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
943        break;
944      case ASI_IMMU:
945        switch (va) {
946          case 0x0:
947            temp = itb->tag_access;
948            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
949            break;
950          case 0x18:
951            pkt->set(itb->sfsr);
952            break;
953          case 0x30:
954            pkt->set(itb->tag_access);
955            break;
956          default:
957            goto doMmuReadError;
958        }
959        break;
960      case ASI_DMMU:
961        switch (va) {
962          case 0x0:
963            temp = tag_access;
964            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
965            break;
966          case 0x18:
967            pkt->set(sfsr);
968            break;
969          case 0x20:
970            pkt->set(sfar);
971            break;
972          case 0x30:
973            pkt->set(tag_access);
974            break;
975          case 0x80:
976            pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID));
977            break;
978          default:
979                goto doMmuReadError;
980        }
981        break;
982      case ASI_DMMU_TSB_PS0_PTR_REG:
983        pkt->set(MakeTsbPtr(Ps0,
984            tag_access,
985            c0_tsb_ps0,
986            c0_config,
987            cx_tsb_ps0,
988            cx_config));
989        break;
990      case ASI_DMMU_TSB_PS1_PTR_REG:
991        pkt->set(MakeTsbPtr(Ps1,
992                tag_access,
993                c0_tsb_ps1,
994                c0_config,
995                cx_tsb_ps1,
996                cx_config));
997        break;
998      case ASI_IMMU_TSB_PS0_PTR_REG:
999          pkt->set(MakeTsbPtr(Ps0,
1000                itb->tag_access,
1001                itb->c0_tsb_ps0,
1002                itb->c0_config,
1003                itb->cx_tsb_ps0,
1004                itb->cx_config));
1005        break;
1006      case ASI_IMMU_TSB_PS1_PTR_REG:
1007          pkt->set(MakeTsbPtr(Ps1,
1008                itb->tag_access,
1009                itb->c0_tsb_ps1,
1010                itb->c0_config,
1011                itb->cx_tsb_ps1,
1012                itb->cx_config));
1013        break;
1014      case ASI_SWVR_INTR_RECEIVE:
1015        pkt->set(tc->getCpuPtr()->get_interrupts(IT_INT_VEC));
1016        break;
1017      case ASI_SWVR_UDB_INTR_R:
1018        temp = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC));
1019        tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, temp);
1020        pkt->set(temp);
1021        break;
1022      default:
1023doMmuReadError:
1024        panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1025            (uint32_t)asi, va);
1026    }
1027    pkt->makeAtomicResponse();
1028    return tc->getCpuPtr()->cycles(1);
1029}
1030
1031Tick
1032DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1033{
1034    uint64_t data = gtoh(pkt->get<uint64_t>());
1035    Addr va = pkt->getAddr();
1036    ASI asi = (ASI)pkt->req->getAsi();
1037
1038    Addr ta_insert;
1039    Addr va_insert;
1040    Addr ct_insert;
1041    int part_insert;
1042    int entry_insert = -1;
1043    bool real_insert;
1044    bool ignore;
1045    int part_id;
1046    int ctx_id;
1047    PageTableEntry pte;
1048
1049    DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1050         (uint32_t)asi, va, data);
1051
1052    ITB * itb = tc->getITBPtr();
1053
1054    switch (asi) {
1055      case ASI_LSU_CONTROL_REG:
1056        assert(va == 0);
1057        tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1058        break;
1059      case ASI_MMU:
1060        switch (va) {
1061          case 0x8:
1062            tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1063            break;
1064          case 0x10:
1065            tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1066            break;
1067          default:
1068            goto doMmuWriteError;
1069        }
1070        break;
1071      case ASI_QUEUE:
1072        assert(mbits(data,13,6) == data);
1073        tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
1074                    (va >> 4) - 0x3c, data);
1075        break;
1076      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1077        assert(va == 0);
1078        c0_tsb_ps0 = data;
1079        break;
1080      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1081        assert(va == 0);
1082        c0_tsb_ps1 = data;
1083        break;
1084      case ASI_DMMU_CTXT_ZERO_CONFIG:
1085        assert(va == 0);
1086        c0_config = data;
1087        break;
1088      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1089        assert(va == 0);
1090        itb->c0_tsb_ps0 = data;
1091        break;
1092      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1093        assert(va == 0);
1094        itb->c0_tsb_ps1 = data;
1095        break;
1096      case ASI_IMMU_CTXT_ZERO_CONFIG:
1097        assert(va == 0);
1098        itb->c0_config = data;
1099        break;
1100      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1101        assert(va == 0);
1102        cx_tsb_ps0 = data;
1103        break;
1104      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1105        assert(va == 0);
1106        cx_tsb_ps1 = data;
1107        break;
1108      case ASI_DMMU_CTXT_NONZERO_CONFIG:
1109        assert(va == 0);
1110        cx_config = data;
1111        break;
1112      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1113        assert(va == 0);
1114        itb->cx_tsb_ps0 = data;
1115        break;
1116      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1117        assert(va == 0);
1118        itb->cx_tsb_ps1 = data;
1119        break;
1120      case ASI_IMMU_CTXT_NONZERO_CONFIG:
1121        assert(va == 0);
1122        itb->cx_config = data;
1123        break;
1124      case ASI_SPARC_ERROR_EN_REG:
1125      case ASI_SPARC_ERROR_STATUS_REG:
1126        warn("Ignoring write to SPARC ERROR regsiter\n");
1127        break;
1128      case ASI_HYP_SCRATCHPAD:
1129      case ASI_SCRATCHPAD:
1130        tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1131        break;
1132      case ASI_IMMU:
1133        switch (va) {
1134          case 0x18:
1135            itb->sfsr = data;
1136            break;
1137          case 0x30:
1138            sext<59>(bits(data, 59,0));
1139            itb->tag_access = data;
1140            break;
1141          default:
1142            goto doMmuWriteError;
1143        }
1144        break;
1145      case ASI_ITLB_DATA_ACCESS_REG:
1146        entry_insert = bits(va, 8,3);
1147      case ASI_ITLB_DATA_IN_REG:
1148        assert(entry_insert != -1 || mbits(va,10,9) == va);
1149        ta_insert = itb->tag_access;
1150        va_insert = mbits(ta_insert, 63,13);
1151        ct_insert = mbits(ta_insert, 12,0);
1152        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1153        real_insert = bits(va, 9,9);
1154        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1155                PageTableEntry::sun4u);
1156        tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1157                pte, entry_insert);
1158        break;
1159      case ASI_DTLB_DATA_ACCESS_REG:
1160        entry_insert = bits(va, 8,3);
1161      case ASI_DTLB_DATA_IN_REG:
1162        assert(entry_insert != -1 || mbits(va,10,9) == va);
1163        ta_insert = tag_access;
1164        va_insert = mbits(ta_insert, 63,13);
1165        ct_insert = mbits(ta_insert, 12,0);
1166        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1167        real_insert = bits(va, 9,9);
1168        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1169                PageTableEntry::sun4u);
1170        insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert);
1171        break;
1172      case ASI_IMMU_DEMAP:
1173        ignore = false;
1174        ctx_id = -1;
1175        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1176        switch (bits(va,5,4)) {
1177          case 0:
1178            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1179            break;
1180          case 1:
1181            ignore = true;
1182            break;
1183          case 3:
1184            ctx_id = 0;
1185            break;
1186          default:
1187            ignore = true;
1188        }
1189
1190        switch(bits(va,7,6)) {
1191          case 0: // demap page
1192            if (!ignore)
1193                tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1194                        bits(va,9,9), ctx_id);
1195            break;
1196          case 1: //demap context
1197            if (!ignore)
1198                tc->getITBPtr()->demapContext(part_id, ctx_id);
1199            break;
1200          case 2:
1201            tc->getITBPtr()->demapAll(part_id);
1202            break;
1203          default:
1204            panic("Invalid type for IMMU demap\n");
1205        }
1206        break;
1207      case ASI_DMMU:
1208        switch (va) {
1209          case 0x18:
1210            sfsr = data;
1211            break;
1212          case 0x30:
1213            sext<59>(bits(data, 59,0));
1214            tag_access = data;
1215            break;
1216          case 0x80:
1217            tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1218            break;
1219          default:
1220            goto doMmuWriteError;
1221        }
1222        break;
1223      case ASI_DMMU_DEMAP:
1224        ignore = false;
1225        ctx_id = -1;
1226        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1227        switch (bits(va,5,4)) {
1228          case 0:
1229            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1230            break;
1231          case 1:
1232            ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1233            break;
1234          case 3:
1235            ctx_id = 0;
1236            break;
1237          default:
1238            ignore = true;
1239        }
1240
1241        switch(bits(va,7,6)) {
1242          case 0: // demap page
1243            if (!ignore)
1244                demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1245            break;
1246          case 1: //demap context
1247            if (!ignore)
1248                demapContext(part_id, ctx_id);
1249            break;
1250          case 2:
1251            demapAll(part_id);
1252            break;
1253          default:
1254            panic("Invalid type for IMMU demap\n");
1255        }
1256        break;
1257       case ASI_SWVR_INTR_RECEIVE:
1258        int msb;
1259        // clear all the interrupts that aren't set in the write
1260        while(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data) {
1261            msb = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data);
1262            tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, msb);
1263        }
1264        break;
1265      case ASI_SWVR_UDB_INTR_W:
1266            tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1267            post_interrupt(bits(data,5,0),0);
1268        break;
1269 default:
1270doMmuWriteError:
1271        panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1272            (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1273    }
1274    pkt->makeAtomicResponse();
1275    return tc->getCpuPtr()->cycles(1);
1276}
1277
1278void
1279DTB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1280{
1281    uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1282    ITB * itb = tc->getITBPtr();
1283    ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1284                c0_tsb_ps0,
1285                c0_config,
1286                cx_tsb_ps0,
1287                cx_config);
1288    ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1289                c0_tsb_ps1,
1290                c0_config,
1291                cx_tsb_ps1,
1292                cx_config);
1293    ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1294                itb->c0_tsb_ps0,
1295                itb->c0_config,
1296                itb->cx_tsb_ps0,
1297                itb->cx_config);
1298    ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1299                itb->c0_tsb_ps1,
1300                itb->c0_config,
1301                itb->cx_tsb_ps1,
1302                itb->cx_config);
1303}
1304
1305
1306
1307
1308
1309uint64_t
1310DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1311        uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1312{
1313    uint64_t tsb;
1314    uint64_t config;
1315
1316    if (bits(tag_access, 12,0) == 0) {
1317        tsb = c0_tsb;
1318        config = c0_config;
1319    } else {
1320        tsb = cX_tsb;
1321        config = cX_config;
1322    }
1323
1324    uint64_t ptr = mbits(tsb,63,13);
1325    bool split = bits(tsb,12,12);
1326    int tsb_size = bits(tsb,3,0);
1327    int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1328
1329    if (ps == Ps1  && split)
1330        ptr |= ULL(1) << (13 + tsb_size);
1331    ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1332
1333    return ptr;
1334}
1335
1336
1337void
1338TLB::serialize(std::ostream &os)
1339{
1340    SERIALIZE_SCALAR(size);
1341    SERIALIZE_SCALAR(usedEntries);
1342    SERIALIZE_SCALAR(lastReplaced);
1343
1344    // convert the pointer based free list into an index based one
1345    int *free_list = (int*)malloc(sizeof(int) * size);
1346    int cntr = 0;
1347    std::list<TlbEntry*>::iterator i;
1348    i = freeList.begin();
1349    while (i != freeList.end()) {
1350        free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1351        i++;
1352    }
1353    SERIALIZE_SCALAR(cntr);
1354    SERIALIZE_ARRAY(free_list,  cntr);
1355
1356    for (int x = 0; x < size; x++) {
1357        nameOut(os, csprintf("%s.PTE%d", name(), x));
1358        tlb[x].serialize(os);
1359    }
1360
1361    SERIALIZE_SCALAR(c0_tsb_ps0);
1362    SERIALIZE_SCALAR(c0_tsb_ps1);
1363    SERIALIZE_SCALAR(c0_config);
1364    SERIALIZE_SCALAR(cx_tsb_ps0);
1365    SERIALIZE_SCALAR(cx_tsb_ps1);
1366    SERIALIZE_SCALAR(cx_config);
1367    SERIALIZE_SCALAR(sfsr);
1368    SERIALIZE_SCALAR(tag_access);
1369}
1370
1371void
1372TLB::unserialize(Checkpoint *cp, const std::string &section)
1373{
1374    int oldSize;
1375
1376    paramIn(cp, section, "size", oldSize);
1377    if (oldSize != size)
1378        panic("Don't support unserializing different sized TLBs\n");
1379    UNSERIALIZE_SCALAR(usedEntries);
1380    UNSERIALIZE_SCALAR(lastReplaced);
1381
1382    int cntr;
1383    UNSERIALIZE_SCALAR(cntr);
1384
1385    int *free_list = (int*)malloc(sizeof(int) * cntr);
1386    freeList.clear();
1387    UNSERIALIZE_ARRAY(free_list,  cntr);
1388    for (int x = 0; x < cntr; x++)
1389        freeList.push_back(&tlb[free_list[x]]);
1390
1391    lookupTable.clear();
1392    for (int x = 0; x < size; x++) {
1393        tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1394        if (tlb[x].valid)
1395            lookupTable.insert(tlb[x].range, &tlb[x]);
1396
1397    }
1398
1399    UNSERIALIZE_SCALAR(c0_tsb_ps0);
1400    UNSERIALIZE_SCALAR(c0_tsb_ps1);
1401    UNSERIALIZE_SCALAR(c0_config);
1402    UNSERIALIZE_SCALAR(cx_tsb_ps0);
1403    UNSERIALIZE_SCALAR(cx_tsb_ps1);
1404    UNSERIALIZE_SCALAR(cx_config);
1405    UNSERIALIZE_SCALAR(sfsr);
1406    UNSERIALIZE_SCALAR(tag_access);
1407}
1408
1409void
1410DTB::serialize(std::ostream &os)
1411{
1412    TLB::serialize(os);
1413    SERIALIZE_SCALAR(sfar);
1414}
1415
1416void
1417DTB::unserialize(Checkpoint *cp, const std::string &section)
1418{
1419    TLB::unserialize(cp, section);
1420    UNSERIALIZE_SCALAR(sfar);
1421}
1422
1423/* end namespace SparcISA */ }
1424
1425SparcISA::ITB *
1426SparcITBParams::create()
1427{
1428    return new SparcISA::ITB(name, size);
1429}
1430
1431SparcISA::DTB *
1432SparcDTBParams::create()
1433{
1434    return new SparcISA::DTB(name, size);
1435}
1436