tlb.cc revision 5034
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31#include <cstring>
32
33#include "arch/sparc/asi.hh"
34#include "arch/sparc/miscregfile.hh"
35#include "arch/sparc/tlb.hh"
36#include "base/bitfield.hh"
37#include "base/trace.hh"
38#include "cpu/thread_context.hh"
39#include "cpu/base.hh"
40#include "mem/packet_access.hh"
41#include "mem/request.hh"
42#include "sim/system.hh"
43
44/* @todo remove some of the magic constants.  -- ali
45 * */
46namespace SparcISA {
47
48TLB::TLB(const Params *p)
49    : SimObject(p), size(p->size), usedEntries(0), lastReplaced(0),
50      cacheValid(false)
51{
52    // To make this work you'll have to change the hypervisor and OS
53    if (size > 64)
54        fatal("SPARC T1 TLB registers don't support more than 64 TLB entries.");
55
56    tlb = new TlbEntry[size];
57    std::memset(tlb, 0, sizeof(TlbEntry) * size);
58
59    for (int x = 0; x < size; x++)
60        freeList.push_back(&tlb[x]);
61
62    c0_tsb_ps0 = 0;
63    c0_tsb_ps1 = 0;
64    c0_config = 0;
65    cx_tsb_ps0 = 0;
66    cx_tsb_ps1 = 0;
67    cx_config = 0;
68    sfsr = 0;
69    tag_access = 0;
70}
71
72void
73TLB::clearUsedBits()
74{
75    MapIter i;
76    for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
77        TlbEntry *t = i->second;
78        if (!t->pte.locked()) {
79            t->used = false;
80            usedEntries--;
81        }
82    }
83}
84
85
86void
87TLB::insert(Addr va, int partition_id, int context_id, bool real,
88        const PageTableEntry& PTE, int entry)
89{
90
91
92    MapIter i;
93    TlbEntry *new_entry = NULL;
94//    TlbRange tr;
95    int x;
96
97    cacheValid = false;
98    va &= ~(PTE.size()-1);
99 /*   tr.va = va;
100    tr.size = PTE.size() - 1;
101    tr.contextId = context_id;
102    tr.partitionId = partition_id;
103    tr.real = real;
104*/
105
106    DPRINTF(TLB, "TLB: Inserting TLB Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
107            va, PTE.paddr(), partition_id, context_id, (int)real, entry);
108
109    // Demap any entry that conflicts
110    for (x = 0; x < size; x++) {
111        if (tlb[x].range.real == real &&
112            tlb[x].range.partitionId == partition_id &&
113            tlb[x].range.va < va + PTE.size() - 1 &&
114            tlb[x].range.va + tlb[x].range.size >= va &&
115            (real || tlb[x].range.contextId == context_id ))
116        {
117            if (tlb[x].valid) {
118                freeList.push_front(&tlb[x]);
119                DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
120
121                tlb[x].valid = false;
122                if (tlb[x].used) {
123                    tlb[x].used = false;
124                    usedEntries--;
125                }
126                lookupTable.erase(tlb[x].range);
127            }
128        }
129    }
130
131
132/*
133    i = lookupTable.find(tr);
134    if (i != lookupTable.end()) {
135        i->second->valid = false;
136        if (i->second->used) {
137            i->second->used = false;
138            usedEntries--;
139        }
140        freeList.push_front(i->second);
141        DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
142                i->second);
143        lookupTable.erase(i);
144    }
145*/
146
147    if (entry != -1) {
148        assert(entry < size && entry >= 0);
149        new_entry = &tlb[entry];
150    } else {
151        if (!freeList.empty()) {
152            new_entry = freeList.front();
153        } else {
154            x = lastReplaced;
155            do {
156                ++x;
157                if (x == size)
158                    x = 0;
159                if (x == lastReplaced)
160                    goto insertAllLocked;
161            } while (tlb[x].pte.locked());
162            lastReplaced = x;
163            new_entry = &tlb[x];
164        }
165        /*
166        for (x = 0; x < size; x++) {
167            if (!tlb[x].valid || !tlb[x].used)  {
168                new_entry = &tlb[x];
169                break;
170            }
171        }*/
172    }
173
174insertAllLocked:
175    // Update the last ently if their all locked
176    if (!new_entry) {
177        new_entry = &tlb[size-1];
178    }
179
180    freeList.remove(new_entry);
181    if (new_entry->valid && new_entry->used)
182        usedEntries--;
183    if (new_entry->valid)
184        lookupTable.erase(new_entry->range);
185
186
187    assert(PTE.valid());
188    new_entry->range.va = va;
189    new_entry->range.size = PTE.size() - 1;
190    new_entry->range.partitionId = partition_id;
191    new_entry->range.contextId = context_id;
192    new_entry->range.real = real;
193    new_entry->pte = PTE;
194    new_entry->used = true;;
195    new_entry->valid = true;
196    usedEntries++;
197
198
199
200    i = lookupTable.insert(new_entry->range, new_entry);
201    assert(i != lookupTable.end());
202
203    // If all entries have there used bit set, clear it on them all, but the
204    // one we just inserted
205    if (usedEntries == size) {
206        clearUsedBits();
207        new_entry->used = true;
208        usedEntries++;
209    }
210
211}
212
213
214TlbEntry*
215TLB::lookup(Addr va, int partition_id, bool real, int context_id, bool
216        update_used)
217{
218    MapIter i;
219    TlbRange tr;
220    TlbEntry *t;
221
222    DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
223            va, partition_id, context_id, real);
224    // Assemble full address structure
225    tr.va = va;
226    tr.size = MachineBytes;
227    tr.contextId = context_id;
228    tr.partitionId = partition_id;
229    tr.real = real;
230
231    // Try to find the entry
232    i = lookupTable.find(tr);
233    if (i == lookupTable.end()) {
234        DPRINTF(TLB, "TLB: No valid entry found\n");
235        return NULL;
236    }
237
238    // Mark the entries used bit and clear other used bits in needed
239    t = i->second;
240    DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
241            t->pte.size());
242
243    // Update the used bits only if this is a real access (not a fake one from
244    // virttophys()
245    if (!t->used && update_used) {
246        t->used = true;
247        usedEntries++;
248        if (usedEntries == size) {
249            clearUsedBits();
250            t->used = true;
251            usedEntries++;
252        }
253    }
254
255    return t;
256}
257
258void
259TLB::dumpAll()
260{
261    MapIter i;
262    for (int x = 0; x < size; x++) {
263        if (tlb[x].valid) {
264           DPRINTFN("%4d:  %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
265                   x, tlb[x].range.partitionId, tlb[x].range.contextId,
266                   tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
267                   tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
268        }
269    }
270}
271
272void
273TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
274{
275    TlbRange tr;
276    MapIter i;
277
278    DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
279            va, partition_id, context_id, real);
280
281    cacheValid = false;
282
283    // Assemble full address structure
284    tr.va = va;
285    tr.size = MachineBytes;
286    tr.contextId = context_id;
287    tr.partitionId = partition_id;
288    tr.real = real;
289
290    // Demap any entry that conflicts
291    i = lookupTable.find(tr);
292    if (i != lookupTable.end()) {
293        DPRINTF(IPR, "TLB: Demapped page\n");
294        i->second->valid = false;
295        if (i->second->used) {
296            i->second->used = false;
297            usedEntries--;
298        }
299        freeList.push_front(i->second);
300        lookupTable.erase(i);
301    }
302}
303
304void
305TLB::demapContext(int partition_id, int context_id)
306{
307    int x;
308    DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
309            partition_id, context_id);
310    cacheValid = false;
311    for (x = 0; x < size; x++) {
312        if (tlb[x].range.contextId == context_id &&
313            tlb[x].range.partitionId == partition_id) {
314            if (tlb[x].valid == true) {
315                freeList.push_front(&tlb[x]);
316            }
317            tlb[x].valid = false;
318            if (tlb[x].used) {
319                tlb[x].used = false;
320                usedEntries--;
321            }
322            lookupTable.erase(tlb[x].range);
323        }
324    }
325}
326
327void
328TLB::demapAll(int partition_id)
329{
330    int x;
331    DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
332    cacheValid = false;
333    for (x = 0; x < size; x++) {
334        if (!tlb[x].pte.locked() && tlb[x].range.partitionId == partition_id) {
335            if (tlb[x].valid == true){
336                freeList.push_front(&tlb[x]);
337            }
338            tlb[x].valid = false;
339            if (tlb[x].used) {
340                tlb[x].used = false;
341                usedEntries--;
342            }
343            lookupTable.erase(tlb[x].range);
344        }
345    }
346}
347
348void
349TLB::invalidateAll()
350{
351    int x;
352    cacheValid = false;
353
354    freeList.clear();
355    lookupTable.clear();
356    for (x = 0; x < size; x++) {
357        if (tlb[x].valid == true)
358            freeList.push_back(&tlb[x]);
359        tlb[x].valid = false;
360        tlb[x].used = false;
361    }
362    usedEntries = 0;
363}
364
365uint64_t
366TLB::TteRead(int entry) {
367    if (entry >= size)
368        panic("entry: %d\n", entry);
369
370    assert(entry < size);
371    if (tlb[entry].valid)
372        return tlb[entry].pte();
373    else
374        return (uint64_t)-1ll;
375}
376
377uint64_t
378TLB::TagRead(int entry) {
379    assert(entry < size);
380    uint64_t tag;
381    if (!tlb[entry].valid)
382        return (uint64_t)-1ll;
383
384    tag = tlb[entry].range.contextId;
385    tag |= tlb[entry].range.va;
386    tag |= (uint64_t)tlb[entry].range.partitionId << 61;
387    tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
388    tag |= (uint64_t)~tlb[entry].pte._size() << 56;
389    return tag;
390}
391
392bool
393TLB::validVirtualAddress(Addr va, bool am)
394{
395    if (am)
396        return true;
397    if (va >= StartVAddrHole && va <= EndVAddrHole)
398        return false;
399    return true;
400}
401
402void
403TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
404{
405    if (sfsr & 0x1)
406        sfsr = 0x3;
407    else
408        sfsr = 1;
409
410    if (write)
411        sfsr |= 1 << 2;
412    sfsr |= ct << 4;
413    if (se)
414        sfsr |= 1 << 6;
415    sfsr |= ft << 7;
416    sfsr |= asi << 16;
417}
418
419void
420TLB::writeTagAccess(Addr va, int context)
421{
422    DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
423            va, context, mbits(va, 63,13) | mbits(context,12,0));
424
425    tag_access = mbits(va, 63,13) | mbits(context,12,0);
426}
427
428void
429ITB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
430{
431    DPRINTF(TLB, "TLB: ITB Fault:  w=%d ct=%d ft=%d asi=%d\n",
432             (int)write, ct, ft, asi);
433    TLB::writeSfsr(write, ct, se, ft, asi);
434}
435
436void
437DTB::writeSfsr(Addr a, bool write, ContextType ct,
438        bool se, FaultTypes ft, int asi)
439{
440    DPRINTF(TLB, "TLB: DTB Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
441            a, (int)write, ct, ft, asi);
442    TLB::writeSfsr(write, ct, se, ft, asi);
443    sfar = a;
444}
445
446Fault
447ITB::translate(RequestPtr &req, ThreadContext *tc)
448{
449    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
450
451    Addr vaddr = req->getVaddr();
452    TlbEntry *e;
453
454    assert(req->getAsi() == ASI_IMPLICIT);
455
456    DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
457            vaddr, req->getSize());
458
459    // Be fast if we can!
460    if (cacheValid && cacheState == tlbdata) {
461        if (cacheEntry) {
462            if (cacheEntry->range.va < vaddr + sizeof(MachInst) &&
463                cacheEntry->range.va + cacheEntry->range.size >= vaddr) {
464                    req->setPaddr(cacheEntry->pte.paddr() & ~(cacheEntry->pte.size()-1) |
465                                  vaddr & cacheEntry->pte.size()-1 );
466                    return NoFault;
467            }
468        } else {
469            req->setPaddr(vaddr & PAddrImplMask);
470            return NoFault;
471        }
472    }
473
474    bool hpriv = bits(tlbdata,0,0);
475    bool red = bits(tlbdata,1,1);
476    bool priv = bits(tlbdata,2,2);
477    bool addr_mask = bits(tlbdata,3,3);
478    bool lsu_im = bits(tlbdata,4,4);
479
480    int part_id = bits(tlbdata,15,8);
481    int tl = bits(tlbdata,18,16);
482    int pri_context = bits(tlbdata,47,32);
483    int context;
484    ContextType ct;
485    int asi;
486    bool real = false;
487
488    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
489           priv, hpriv, red, lsu_im, part_id);
490
491    if (tl > 0) {
492        asi = ASI_N;
493        ct = Nucleus;
494        context = 0;
495    } else {
496        asi = ASI_P;
497        ct = Primary;
498        context = pri_context;
499    }
500
501    if ( hpriv || red ) {
502        cacheValid = true;
503        cacheState = tlbdata;
504        cacheEntry = NULL;
505        req->setPaddr(vaddr & PAddrImplMask);
506        return NoFault;
507    }
508
509    // If the access is unaligned trap
510    if (vaddr & 0x3) {
511        writeSfsr(false, ct, false, OtherFault, asi);
512        return new MemAddressNotAligned;
513    }
514
515    if (addr_mask)
516        vaddr = vaddr & VAddrAMask;
517
518    if (!validVirtualAddress(vaddr, addr_mask)) {
519        writeSfsr(false, ct, false, VaOutOfRange, asi);
520        return new InstructionAccessException;
521    }
522
523    if (!lsu_im) {
524        e = lookup(vaddr, part_id, true);
525        real = true;
526        context = 0;
527    } else {
528        e = lookup(vaddr, part_id, false, context);
529    }
530
531    if (e == NULL || !e->valid) {
532        writeTagAccess(vaddr, context);
533        if (real)
534            return new InstructionRealTranslationMiss;
535        else
536#if FULL_SYSTEM
537            return new FastInstructionAccessMMUMiss;
538#else
539            return new FastInstructionAccessMMUMiss(req->getVaddr());
540#endif
541    }
542
543    // were not priviledged accesing priv page
544    if (!priv && e->pte.priv()) {
545        writeTagAccess(vaddr, context);
546        writeSfsr(false, ct, false, PrivViolation, asi);
547        return new InstructionAccessException;
548    }
549
550    // cache translation date for next translation
551    cacheValid = true;
552    cacheState = tlbdata;
553    cacheEntry = e;
554
555    req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
556                  vaddr & e->pte.size()-1 );
557    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
558    return NoFault;
559}
560
561
562
563Fault
564DTB::translate(RequestPtr &req, ThreadContext *tc, bool write)
565{
566    /* @todo this could really use some profiling and fixing to make it faster! */
567    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
568    Addr vaddr = req->getVaddr();
569    Addr size = req->getSize();
570    ASI asi;
571    asi = (ASI)req->getAsi();
572    bool implicit = false;
573    bool hpriv = bits(tlbdata,0,0);
574    bool unaligned = (vaddr & size-1);
575
576    DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
577            vaddr, size, asi);
578
579    if (lookupTable.size() != 64 - freeList.size())
580       panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
581               freeList.size());
582    if (asi == ASI_IMPLICIT)
583        implicit = true;
584
585    // Only use the fast path here if there doesn't need to be an unaligned
586    // trap later
587    if (!unaligned) {
588        if (hpriv && implicit) {
589            req->setPaddr(vaddr & PAddrImplMask);
590            return NoFault;
591        }
592
593        // Be fast if we can!
594        if (cacheValid &&  cacheState == tlbdata) {
595
596
597
598            if (cacheEntry[0]) {
599                TlbEntry *ce = cacheEntry[0];
600                Addr ce_va = ce->range.va;
601                if (cacheAsi[0] == asi &&
602                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
603                    (!write || ce->pte.writable())) {
604                        req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
605                        if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
606                            req->setFlags(req->getFlags() | UNCACHEABLE);
607                        DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
608                        return NoFault;
609                } // if matched
610            } // if cache entry valid
611            if (cacheEntry[1]) {
612                TlbEntry *ce = cacheEntry[1];
613                Addr ce_va = ce->range.va;
614                if (cacheAsi[1] == asi &&
615                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
616                    (!write || ce->pte.writable())) {
617                        req->setPaddr(ce->pte.paddrMask() | vaddr & ce->pte.sizeMask());
618                        if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
619                            req->setFlags(req->getFlags() | UNCACHEABLE);
620                        DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
621                        return NoFault;
622                } // if matched
623            } // if cache entry valid
624        }
625    }
626
627    bool red = bits(tlbdata,1,1);
628    bool priv = bits(tlbdata,2,2);
629    bool addr_mask = bits(tlbdata,3,3);
630    bool lsu_dm = bits(tlbdata,5,5);
631
632    int part_id = bits(tlbdata,15,8);
633    int tl = bits(tlbdata,18,16);
634    int pri_context = bits(tlbdata,47,32);
635    int sec_context = bits(tlbdata,63,48);
636
637    bool real = false;
638    ContextType ct = Primary;
639    int context = 0;
640
641    TlbEntry *e;
642
643    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
644           priv, hpriv, red, lsu_dm, part_id);
645
646    if (implicit) {
647        if (tl > 0) {
648            asi = ASI_N;
649            ct = Nucleus;
650            context = 0;
651        } else {
652            asi = ASI_P;
653            ct = Primary;
654            context = pri_context;
655        }
656    } else {
657        // We need to check for priv level/asi priv
658        if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
659            // It appears that context should be Nucleus in these cases?
660            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
661            return new PrivilegedAction;
662        }
663
664        if (!hpriv && AsiIsHPriv(asi)) {
665            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
666            return new DataAccessException;
667        }
668
669        if (AsiIsPrimary(asi)) {
670            context = pri_context;
671            ct = Primary;
672        } else if (AsiIsSecondary(asi)) {
673            context = sec_context;
674            ct = Secondary;
675        } else if (AsiIsNucleus(asi)) {
676            ct = Nucleus;
677            context = 0;
678        } else {  // ????
679            ct = Primary;
680            context = pri_context;
681        }
682    }
683
684    if (!implicit && asi != ASI_P && asi != ASI_S) {
685        if (AsiIsLittle(asi))
686            panic("Little Endian ASIs not supported\n");
687
688        //XXX It's unclear from looking at the documentation how a no fault
689        //load differs from a regular one, other than what happens concerning
690        //nfo and e bits in the TTE
691//        if (AsiIsNoFault(asi))
692//            panic("No Fault ASIs not supported\n");
693
694        if (AsiIsPartialStore(asi))
695            panic("Partial Store ASIs not supported\n");
696
697        if (AsiIsCmt(asi))
698            panic("Cmt ASI registers not implmented\n");
699
700        if (AsiIsInterrupt(asi))
701            goto handleIntRegAccess;
702        if (AsiIsMmu(asi))
703            goto handleMmuRegAccess;
704        if (AsiIsScratchPad(asi))
705            goto handleScratchRegAccess;
706        if (AsiIsQueue(asi))
707            goto handleQueueRegAccess;
708        if (AsiIsSparcError(asi))
709            goto handleSparcErrorRegAccess;
710
711        if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
712                !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi))
713            panic("Accessing ASI %#X. Should we?\n", asi);
714    }
715
716    // If the asi is unaligned trap
717    if (unaligned) {
718        writeSfsr(vaddr, false, ct, false, OtherFault, asi);
719        return new MemAddressNotAligned;
720    }
721
722    if (addr_mask)
723        vaddr = vaddr & VAddrAMask;
724
725    if (!validVirtualAddress(vaddr, addr_mask)) {
726        writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
727        return new DataAccessException;
728    }
729
730
731    if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
732        real = true;
733        context = 0;
734    };
735
736    if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
737        req->setPaddr(vaddr & PAddrImplMask);
738        return NoFault;
739    }
740
741    e = lookup(vaddr, part_id, real, context);
742
743    if (e == NULL || !e->valid) {
744        writeTagAccess(vaddr, context);
745        DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
746        if (real)
747            return new DataRealTranslationMiss;
748        else
749#if FULL_SYSTEM
750            return new FastDataAccessMMUMiss;
751#else
752            return new FastDataAccessMMUMiss(req->getVaddr());
753#endif
754
755    }
756
757    if (!priv && e->pte.priv()) {
758        writeTagAccess(vaddr, context);
759        writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
760        return new DataAccessException;
761    }
762
763    if (write && !e->pte.writable()) {
764        writeTagAccess(vaddr, context);
765        writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
766        return new FastDataAccessProtection;
767    }
768
769    if (e->pte.nofault() && !AsiIsNoFault(asi)) {
770        writeTagAccess(vaddr, context);
771        writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
772        return new DataAccessException;
773    }
774
775    if (e->pte.sideffect() && AsiIsNoFault(asi)) {
776        writeTagAccess(vaddr, context);
777        writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
778        return new DataAccessException;
779    }
780
781
782    if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
783        req->setFlags(req->getFlags() | UNCACHEABLE);
784
785    // cache translation date for next translation
786    cacheState = tlbdata;
787    if (!cacheValid) {
788        cacheEntry[1] = NULL;
789        cacheEntry[0] = NULL;
790    }
791
792    if (cacheEntry[0] != e && cacheEntry[1] != e) {
793        cacheEntry[1] = cacheEntry[0];
794        cacheEntry[0] = e;
795        cacheAsi[1] = cacheAsi[0];
796        cacheAsi[0] = asi;
797        if (implicit)
798            cacheAsi[0] = (ASI)0;
799    }
800    cacheValid = true;
801    req->setPaddr(e->pte.paddr() & ~(e->pte.size()-1) |
802                  vaddr & e->pte.size()-1);
803    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
804    return NoFault;
805
806    /** Normal flow ends here. */
807handleIntRegAccess:
808    if (!hpriv) {
809        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
810        if (priv)
811            return new DataAccessException;
812         else
813            return new PrivilegedAction;
814    }
815
816    if (asi == ASI_SWVR_UDB_INTR_W && !write ||
817                    asi == ASI_SWVR_UDB_INTR_R && write) {
818        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
819        return new DataAccessException;
820    }
821
822    goto regAccessOk;
823
824
825handleScratchRegAccess:
826    if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
827        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
828        return new DataAccessException;
829    }
830    goto regAccessOk;
831
832handleQueueRegAccess:
833    if (!priv  && !hpriv) {
834        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
835        return new PrivilegedAction;
836    }
837    if (!hpriv && vaddr & 0xF || vaddr > 0x3f8 || vaddr < 0x3c0) {
838        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
839        return new DataAccessException;
840    }
841    goto regAccessOk;
842
843handleSparcErrorRegAccess:
844    if (!hpriv) {
845        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
846        if (priv)
847            return new DataAccessException;
848         else
849            return new PrivilegedAction;
850    }
851    goto regAccessOk;
852
853
854regAccessOk:
855handleMmuRegAccess:
856    DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
857    req->setMmapedIpr(true);
858    req->setPaddr(req->getVaddr());
859    return NoFault;
860};
861
862#if FULL_SYSTEM
863
864Tick
865DTB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
866{
867    Addr va = pkt->getAddr();
868    ASI asi = (ASI)pkt->req->getAsi();
869    uint64_t temp;
870
871    DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
872         (uint32_t)pkt->req->getAsi(), pkt->getAddr());
873
874    ITB * itb = tc->getITBPtr();
875
876    switch (asi) {
877      case ASI_LSU_CONTROL_REG:
878        assert(va == 0);
879        pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL));
880        break;
881      case ASI_MMU:
882        switch (va) {
883          case 0x8:
884            pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT));
885            break;
886          case 0x10:
887            pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT));
888            break;
889          default:
890            goto doMmuReadError;
891        }
892        break;
893      case ASI_QUEUE:
894        pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
895                    (va >> 4) - 0x3c));
896        break;
897      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
898        assert(va == 0);
899        pkt->set(c0_tsb_ps0);
900        break;
901      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
902        assert(va == 0);
903        pkt->set(c0_tsb_ps1);
904        break;
905      case ASI_DMMU_CTXT_ZERO_CONFIG:
906        assert(va == 0);
907        pkt->set(c0_config);
908        break;
909      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
910        assert(va == 0);
911        pkt->set(itb->c0_tsb_ps0);
912        break;
913      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
914        assert(va == 0);
915        pkt->set(itb->c0_tsb_ps1);
916        break;
917      case ASI_IMMU_CTXT_ZERO_CONFIG:
918        assert(va == 0);
919        pkt->set(itb->c0_config);
920        break;
921      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
922        assert(va == 0);
923        pkt->set(cx_tsb_ps0);
924        break;
925      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
926        assert(va == 0);
927        pkt->set(cx_tsb_ps1);
928        break;
929      case ASI_DMMU_CTXT_NONZERO_CONFIG:
930        assert(va == 0);
931        pkt->set(cx_config);
932        break;
933      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
934        assert(va == 0);
935        pkt->set(itb->cx_tsb_ps0);
936        break;
937      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
938        assert(va == 0);
939        pkt->set(itb->cx_tsb_ps1);
940        break;
941      case ASI_IMMU_CTXT_NONZERO_CONFIG:
942        assert(va == 0);
943        pkt->set(itb->cx_config);
944        break;
945      case ASI_SPARC_ERROR_STATUS_REG:
946        pkt->set((uint64_t)0);
947        break;
948      case ASI_HYP_SCRATCHPAD:
949      case ASI_SCRATCHPAD:
950        pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
951        break;
952      case ASI_IMMU:
953        switch (va) {
954          case 0x0:
955            temp = itb->tag_access;
956            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
957            break;
958          case 0x18:
959            pkt->set(itb->sfsr);
960            break;
961          case 0x30:
962            pkt->set(itb->tag_access);
963            break;
964          default:
965            goto doMmuReadError;
966        }
967        break;
968      case ASI_DMMU:
969        switch (va) {
970          case 0x0:
971            temp = tag_access;
972            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
973            break;
974          case 0x18:
975            pkt->set(sfsr);
976            break;
977          case 0x20:
978            pkt->set(sfar);
979            break;
980          case 0x30:
981            pkt->set(tag_access);
982            break;
983          case 0x80:
984            pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID));
985            break;
986          default:
987                goto doMmuReadError;
988        }
989        break;
990      case ASI_DMMU_TSB_PS0_PTR_REG:
991        pkt->set(MakeTsbPtr(Ps0,
992            tag_access,
993            c0_tsb_ps0,
994            c0_config,
995            cx_tsb_ps0,
996            cx_config));
997        break;
998      case ASI_DMMU_TSB_PS1_PTR_REG:
999        pkt->set(MakeTsbPtr(Ps1,
1000                tag_access,
1001                c0_tsb_ps1,
1002                c0_config,
1003                cx_tsb_ps1,
1004                cx_config));
1005        break;
1006      case ASI_IMMU_TSB_PS0_PTR_REG:
1007          pkt->set(MakeTsbPtr(Ps0,
1008                itb->tag_access,
1009                itb->c0_tsb_ps0,
1010                itb->c0_config,
1011                itb->cx_tsb_ps0,
1012                itb->cx_config));
1013        break;
1014      case ASI_IMMU_TSB_PS1_PTR_REG:
1015          pkt->set(MakeTsbPtr(Ps1,
1016                itb->tag_access,
1017                itb->c0_tsb_ps1,
1018                itb->c0_config,
1019                itb->cx_tsb_ps1,
1020                itb->cx_config));
1021        break;
1022      case ASI_SWVR_INTR_RECEIVE:
1023        pkt->set(tc->getCpuPtr()->get_interrupts(IT_INT_VEC));
1024        break;
1025      case ASI_SWVR_UDB_INTR_R:
1026        temp = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC));
1027        tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, temp);
1028        pkt->set(temp);
1029        break;
1030      default:
1031doMmuReadError:
1032        panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1033            (uint32_t)asi, va);
1034    }
1035    pkt->makeAtomicResponse();
1036    return tc->getCpuPtr()->cycles(1);
1037}
1038
1039Tick
1040DTB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1041{
1042    uint64_t data = gtoh(pkt->get<uint64_t>());
1043    Addr va = pkt->getAddr();
1044    ASI asi = (ASI)pkt->req->getAsi();
1045
1046    Addr ta_insert;
1047    Addr va_insert;
1048    Addr ct_insert;
1049    int part_insert;
1050    int entry_insert = -1;
1051    bool real_insert;
1052    bool ignore;
1053    int part_id;
1054    int ctx_id;
1055    PageTableEntry pte;
1056
1057    DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1058         (uint32_t)asi, va, data);
1059
1060    ITB * itb = tc->getITBPtr();
1061
1062    switch (asi) {
1063      case ASI_LSU_CONTROL_REG:
1064        assert(va == 0);
1065        tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1066        break;
1067      case ASI_MMU:
1068        switch (va) {
1069          case 0x8:
1070            tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1071            break;
1072          case 0x10:
1073            tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1074            break;
1075          default:
1076            goto doMmuWriteError;
1077        }
1078        break;
1079      case ASI_QUEUE:
1080        assert(mbits(data,13,6) == data);
1081        tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
1082                    (va >> 4) - 0x3c, data);
1083        break;
1084      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1085        assert(va == 0);
1086        c0_tsb_ps0 = data;
1087        break;
1088      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1089        assert(va == 0);
1090        c0_tsb_ps1 = data;
1091        break;
1092      case ASI_DMMU_CTXT_ZERO_CONFIG:
1093        assert(va == 0);
1094        c0_config = data;
1095        break;
1096      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1097        assert(va == 0);
1098        itb->c0_tsb_ps0 = data;
1099        break;
1100      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1101        assert(va == 0);
1102        itb->c0_tsb_ps1 = data;
1103        break;
1104      case ASI_IMMU_CTXT_ZERO_CONFIG:
1105        assert(va == 0);
1106        itb->c0_config = data;
1107        break;
1108      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1109        assert(va == 0);
1110        cx_tsb_ps0 = data;
1111        break;
1112      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1113        assert(va == 0);
1114        cx_tsb_ps1 = data;
1115        break;
1116      case ASI_DMMU_CTXT_NONZERO_CONFIG:
1117        assert(va == 0);
1118        cx_config = data;
1119        break;
1120      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1121        assert(va == 0);
1122        itb->cx_tsb_ps0 = data;
1123        break;
1124      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1125        assert(va == 0);
1126        itb->cx_tsb_ps1 = data;
1127        break;
1128      case ASI_IMMU_CTXT_NONZERO_CONFIG:
1129        assert(va == 0);
1130        itb->cx_config = data;
1131        break;
1132      case ASI_SPARC_ERROR_EN_REG:
1133      case ASI_SPARC_ERROR_STATUS_REG:
1134        warn("Ignoring write to SPARC ERROR regsiter\n");
1135        break;
1136      case ASI_HYP_SCRATCHPAD:
1137      case ASI_SCRATCHPAD:
1138        tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1139        break;
1140      case ASI_IMMU:
1141        switch (va) {
1142          case 0x18:
1143            itb->sfsr = data;
1144            break;
1145          case 0x30:
1146            sext<59>(bits(data, 59,0));
1147            itb->tag_access = data;
1148            break;
1149          default:
1150            goto doMmuWriteError;
1151        }
1152        break;
1153      case ASI_ITLB_DATA_ACCESS_REG:
1154        entry_insert = bits(va, 8,3);
1155      case ASI_ITLB_DATA_IN_REG:
1156        assert(entry_insert != -1 || mbits(va,10,9) == va);
1157        ta_insert = itb->tag_access;
1158        va_insert = mbits(ta_insert, 63,13);
1159        ct_insert = mbits(ta_insert, 12,0);
1160        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1161        real_insert = bits(va, 9,9);
1162        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1163                PageTableEntry::sun4u);
1164        tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1165                pte, entry_insert);
1166        break;
1167      case ASI_DTLB_DATA_ACCESS_REG:
1168        entry_insert = bits(va, 8,3);
1169      case ASI_DTLB_DATA_IN_REG:
1170        assert(entry_insert != -1 || mbits(va,10,9) == va);
1171        ta_insert = tag_access;
1172        va_insert = mbits(ta_insert, 63,13);
1173        ct_insert = mbits(ta_insert, 12,0);
1174        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1175        real_insert = bits(va, 9,9);
1176        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1177                PageTableEntry::sun4u);
1178        insert(va_insert, part_insert, ct_insert, real_insert, pte, entry_insert);
1179        break;
1180      case ASI_IMMU_DEMAP:
1181        ignore = false;
1182        ctx_id = -1;
1183        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1184        switch (bits(va,5,4)) {
1185          case 0:
1186            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1187            break;
1188          case 1:
1189            ignore = true;
1190            break;
1191          case 3:
1192            ctx_id = 0;
1193            break;
1194          default:
1195            ignore = true;
1196        }
1197
1198        switch(bits(va,7,6)) {
1199          case 0: // demap page
1200            if (!ignore)
1201                tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1202                        bits(va,9,9), ctx_id);
1203            break;
1204          case 1: //demap context
1205            if (!ignore)
1206                tc->getITBPtr()->demapContext(part_id, ctx_id);
1207            break;
1208          case 2:
1209            tc->getITBPtr()->demapAll(part_id);
1210            break;
1211          default:
1212            panic("Invalid type for IMMU demap\n");
1213        }
1214        break;
1215      case ASI_DMMU:
1216        switch (va) {
1217          case 0x18:
1218            sfsr = data;
1219            break;
1220          case 0x30:
1221            sext<59>(bits(data, 59,0));
1222            tag_access = data;
1223            break;
1224          case 0x80:
1225            tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1226            break;
1227          default:
1228            goto doMmuWriteError;
1229        }
1230        break;
1231      case ASI_DMMU_DEMAP:
1232        ignore = false;
1233        ctx_id = -1;
1234        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1235        switch (bits(va,5,4)) {
1236          case 0:
1237            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1238            break;
1239          case 1:
1240            ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1241            break;
1242          case 3:
1243            ctx_id = 0;
1244            break;
1245          default:
1246            ignore = true;
1247        }
1248
1249        switch(bits(va,7,6)) {
1250          case 0: // demap page
1251            if (!ignore)
1252                demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1253            break;
1254          case 1: //demap context
1255            if (!ignore)
1256                demapContext(part_id, ctx_id);
1257            break;
1258          case 2:
1259            demapAll(part_id);
1260            break;
1261          default:
1262            panic("Invalid type for IMMU demap\n");
1263        }
1264        break;
1265       case ASI_SWVR_INTR_RECEIVE:
1266        int msb;
1267        // clear all the interrupts that aren't set in the write
1268        while(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data) {
1269            msb = findMsbSet(tc->getCpuPtr()->get_interrupts(IT_INT_VEC) & data);
1270            tc->getCpuPtr()->clear_interrupt(IT_INT_VEC, msb);
1271        }
1272        break;
1273      case ASI_SWVR_UDB_INTR_W:
1274            tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1275            post_interrupt(bits(data,5,0),0);
1276        break;
1277 default:
1278doMmuWriteError:
1279        panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1280            (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1281    }
1282    pkt->makeAtomicResponse();
1283    return tc->getCpuPtr()->cycles(1);
1284}
1285
1286#endif
1287
1288void
1289DTB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1290{
1291    uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1292    ITB * itb = tc->getITBPtr();
1293    ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1294                c0_tsb_ps0,
1295                c0_config,
1296                cx_tsb_ps0,
1297                cx_config);
1298    ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1299                c0_tsb_ps1,
1300                c0_config,
1301                cx_tsb_ps1,
1302                cx_config);
1303    ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1304                itb->c0_tsb_ps0,
1305                itb->c0_config,
1306                itb->cx_tsb_ps0,
1307                itb->cx_config);
1308    ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1309                itb->c0_tsb_ps1,
1310                itb->c0_config,
1311                itb->cx_tsb_ps1,
1312                itb->cx_config);
1313}
1314
1315
1316
1317
1318
1319uint64_t
1320DTB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1321        uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1322{
1323    uint64_t tsb;
1324    uint64_t config;
1325
1326    if (bits(tag_access, 12,0) == 0) {
1327        tsb = c0_tsb;
1328        config = c0_config;
1329    } else {
1330        tsb = cX_tsb;
1331        config = cX_config;
1332    }
1333
1334    uint64_t ptr = mbits(tsb,63,13);
1335    bool split = bits(tsb,12,12);
1336    int tsb_size = bits(tsb,3,0);
1337    int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1338
1339    if (ps == Ps1  && split)
1340        ptr |= ULL(1) << (13 + tsb_size);
1341    ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1342
1343    return ptr;
1344}
1345
1346
1347void
1348TLB::serialize(std::ostream &os)
1349{
1350    SERIALIZE_SCALAR(size);
1351    SERIALIZE_SCALAR(usedEntries);
1352    SERIALIZE_SCALAR(lastReplaced);
1353
1354    // convert the pointer based free list into an index based one
1355    int *free_list = (int*)malloc(sizeof(int) * size);
1356    int cntr = 0;
1357    std::list<TlbEntry*>::iterator i;
1358    i = freeList.begin();
1359    while (i != freeList.end()) {
1360        free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1361        i++;
1362    }
1363    SERIALIZE_SCALAR(cntr);
1364    SERIALIZE_ARRAY(free_list,  cntr);
1365
1366    for (int x = 0; x < size; x++) {
1367        nameOut(os, csprintf("%s.PTE%d", name(), x));
1368        tlb[x].serialize(os);
1369    }
1370
1371    SERIALIZE_SCALAR(c0_tsb_ps0);
1372    SERIALIZE_SCALAR(c0_tsb_ps1);
1373    SERIALIZE_SCALAR(c0_config);
1374    SERIALIZE_SCALAR(cx_tsb_ps0);
1375    SERIALIZE_SCALAR(cx_tsb_ps1);
1376    SERIALIZE_SCALAR(cx_config);
1377    SERIALIZE_SCALAR(sfsr);
1378    SERIALIZE_SCALAR(tag_access);
1379}
1380
1381void
1382TLB::unserialize(Checkpoint *cp, const std::string &section)
1383{
1384    int oldSize;
1385
1386    paramIn(cp, section, "size", oldSize);
1387    if (oldSize != size)
1388        panic("Don't support unserializing different sized TLBs\n");
1389    UNSERIALIZE_SCALAR(usedEntries);
1390    UNSERIALIZE_SCALAR(lastReplaced);
1391
1392    int cntr;
1393    UNSERIALIZE_SCALAR(cntr);
1394
1395    int *free_list = (int*)malloc(sizeof(int) * cntr);
1396    freeList.clear();
1397    UNSERIALIZE_ARRAY(free_list,  cntr);
1398    for (int x = 0; x < cntr; x++)
1399        freeList.push_back(&tlb[free_list[x]]);
1400
1401    lookupTable.clear();
1402    for (int x = 0; x < size; x++) {
1403        tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1404        if (tlb[x].valid)
1405            lookupTable.insert(tlb[x].range, &tlb[x]);
1406
1407    }
1408
1409    UNSERIALIZE_SCALAR(c0_tsb_ps0);
1410    UNSERIALIZE_SCALAR(c0_tsb_ps1);
1411    UNSERIALIZE_SCALAR(c0_config);
1412    UNSERIALIZE_SCALAR(cx_tsb_ps0);
1413    UNSERIALIZE_SCALAR(cx_tsb_ps1);
1414    UNSERIALIZE_SCALAR(cx_config);
1415    UNSERIALIZE_SCALAR(sfsr);
1416    UNSERIALIZE_SCALAR(tag_access);
1417}
1418
1419void
1420DTB::serialize(std::ostream &os)
1421{
1422    TLB::serialize(os);
1423    SERIALIZE_SCALAR(sfar);
1424}
1425
1426void
1427DTB::unserialize(Checkpoint *cp, const std::string &section)
1428{
1429    TLB::unserialize(cp, section);
1430    UNSERIALIZE_SCALAR(sfar);
1431}
1432
1433/* end namespace SparcISA */ }
1434
1435SparcISA::ITB *
1436SparcITBParams::create()
1437{
1438    return new SparcISA::ITB(this);
1439}
1440
1441SparcISA::DTB *
1442SparcDTBParams::create()
1443{
1444    return new SparcISA::DTB(this);
1445}
1446