tlb.cc revision 6023
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31#include <cstring>
32
33#include "arch/sparc/asi.hh"
34#include "arch/sparc/miscregfile.hh"
35#include "arch/sparc/tlb.hh"
36#include "base/bitfield.hh"
37#include "base/trace.hh"
38#include "cpu/thread_context.hh"
39#include "cpu/base.hh"
40#include "mem/packet_access.hh"
41#include "mem/request.hh"
42#include "sim/system.hh"
43
44/* @todo remove some of the magic constants.  -- ali
45 * */
46namespace SparcISA {
47
48TLB::TLB(const Params *p)
49    : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
50      cacheValid(false)
51{
52    // To make this work you'll have to change the hypervisor and OS
53    if (size > 64)
54        fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
55
56    tlb = new TlbEntry[size];
57    std::memset(tlb, 0, sizeof(TlbEntry) * size);
58
59    for (int x = 0; x < size; x++)
60        freeList.push_back(&tlb[x]);
61
62    c0_tsb_ps0 = 0;
63    c0_tsb_ps1 = 0;
64    c0_config = 0;
65    cx_tsb_ps0 = 0;
66    cx_tsb_ps1 = 0;
67    cx_config = 0;
68    sfsr = 0;
69    tag_access = 0;
70    sfar = 0;
71    cacheEntry[0] = NULL;
72    cacheEntry[1] = NULL;
73}
74
75void
76TLB::clearUsedBits()
77{
78    MapIter i;
79    for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
80        TlbEntry *t = i->second;
81        if (!t->pte.locked()) {
82            t->used = false;
83            usedEntries--;
84        }
85    }
86}
87
88
89void
90TLB::insert(Addr va, int partition_id, int context_id, bool real,
91        const PageTableEntry& PTE, int entry)
92{
93    MapIter i;
94    TlbEntry *new_entry = NULL;
95//    TlbRange tr;
96    int x;
97
98    cacheValid = false;
99    va &= ~(PTE.size()-1);
100 /*   tr.va = va;
101    tr.size = PTE.size() - 1;
102    tr.contextId = context_id;
103    tr.partitionId = partition_id;
104    tr.real = real;
105*/
106
107    DPRINTF(TLB,
108        "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
109        va, PTE.paddr(), partition_id, context_id, (int)real, entry);
110
111    // Demap any entry that conflicts
112    for (x = 0; x < size; x++) {
113        if (tlb[x].range.real == real &&
114            tlb[x].range.partitionId == partition_id &&
115            tlb[x].range.va < va + PTE.size() - 1 &&
116            tlb[x].range.va + tlb[x].range.size >= va &&
117            (real || tlb[x].range.contextId == context_id ))
118        {
119            if (tlb[x].valid) {
120                freeList.push_front(&tlb[x]);
121                DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
122
123                tlb[x].valid = false;
124                if (tlb[x].used) {
125                    tlb[x].used = false;
126                    usedEntries--;
127                }
128                lookupTable.erase(tlb[x].range);
129            }
130        }
131    }
132
133/*
134    i = lookupTable.find(tr);
135    if (i != lookupTable.end()) {
136        i->second->valid = false;
137        if (i->second->used) {
138            i->second->used = false;
139            usedEntries--;
140        }
141        freeList.push_front(i->second);
142        DPRINTF(TLB, "TLB: Found conflicting entry %#X , deleting it\n",
143                i->second);
144        lookupTable.erase(i);
145    }
146*/
147
148    if (entry != -1) {
149        assert(entry < size && entry >= 0);
150        new_entry = &tlb[entry];
151    } else {
152        if (!freeList.empty()) {
153            new_entry = freeList.front();
154        } else {
155            x = lastReplaced;
156            do {
157                ++x;
158                if (x == size)
159                    x = 0;
160                if (x == lastReplaced)
161                    goto insertAllLocked;
162            } while (tlb[x].pte.locked());
163            lastReplaced = x;
164            new_entry = &tlb[x];
165        }
166        /*
167        for (x = 0; x < size; x++) {
168            if (!tlb[x].valid || !tlb[x].used)  {
169                new_entry = &tlb[x];
170                break;
171            }
172        }*/
173    }
174
175insertAllLocked:
176    // Update the last ently if their all locked
177    if (!new_entry) {
178        new_entry = &tlb[size-1];
179    }
180
181    freeList.remove(new_entry);
182    if (new_entry->valid && new_entry->used)
183        usedEntries--;
184    if (new_entry->valid)
185        lookupTable.erase(new_entry->range);
186
187
188    assert(PTE.valid());
189    new_entry->range.va = va;
190    new_entry->range.size = PTE.size() - 1;
191    new_entry->range.partitionId = partition_id;
192    new_entry->range.contextId = context_id;
193    new_entry->range.real = real;
194    new_entry->pte = PTE;
195    new_entry->used = true;;
196    new_entry->valid = true;
197    usedEntries++;
198
199    i = lookupTable.insert(new_entry->range, new_entry);
200    assert(i != lookupTable.end());
201
202    // If all entries have their used bit set, clear it on them all,
203    // but the one we just inserted
204    if (usedEntries == size) {
205        clearUsedBits();
206        new_entry->used = true;
207        usedEntries++;
208    }
209}
210
211
212TlbEntry*
213TLB::lookup(Addr va, int partition_id, bool real, int context_id,
214            bool update_used)
215{
216    MapIter i;
217    TlbRange tr;
218    TlbEntry *t;
219
220    DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
221            va, partition_id, context_id, real);
222    // Assemble full address structure
223    tr.va = va;
224    tr.size = 1;
225    tr.contextId = context_id;
226    tr.partitionId = partition_id;
227    tr.real = real;
228
229    // Try to find the entry
230    i = lookupTable.find(tr);
231    if (i == lookupTable.end()) {
232        DPRINTF(TLB, "TLB: No valid entry found\n");
233        return NULL;
234    }
235
236    // Mark the entries used bit and clear other used bits in needed
237    t = i->second;
238    DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
239            t->pte.size());
240
241    // Update the used bits only if this is a real access (not a fake
242    // one from virttophys()
243    if (!t->used && update_used) {
244        t->used = true;
245        usedEntries++;
246        if (usedEntries == size) {
247            clearUsedBits();
248            t->used = true;
249            usedEntries++;
250        }
251    }
252
253    return t;
254}
255
256void
257TLB::dumpAll()
258{
259    MapIter i;
260    for (int x = 0; x < size; x++) {
261        if (tlb[x].valid) {
262           DPRINTFN("%4d:  %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
263                   x, tlb[x].range.partitionId, tlb[x].range.contextId,
264                   tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
265                   tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
266        }
267    }
268}
269
270void
271TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
272{
273    TlbRange tr;
274    MapIter i;
275
276    DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
277            va, partition_id, context_id, real);
278
279    cacheValid = false;
280
281    // Assemble full address structure
282    tr.va = va;
283    tr.size = 1;
284    tr.contextId = context_id;
285    tr.partitionId = partition_id;
286    tr.real = real;
287
288    // Demap any entry that conflicts
289    i = lookupTable.find(tr);
290    if (i != lookupTable.end()) {
291        DPRINTF(IPR, "TLB: Demapped page\n");
292        i->second->valid = false;
293        if (i->second->used) {
294            i->second->used = false;
295            usedEntries--;
296        }
297        freeList.push_front(i->second);
298        lookupTable.erase(i);
299    }
300}
301
302void
303TLB::demapContext(int partition_id, int context_id)
304{
305    DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
306            partition_id, context_id);
307    cacheValid = false;
308    for (int x = 0; x < size; x++) {
309        if (tlb[x].range.contextId == context_id &&
310            tlb[x].range.partitionId == partition_id) {
311            if (tlb[x].valid == true) {
312                freeList.push_front(&tlb[x]);
313            }
314            tlb[x].valid = false;
315            if (tlb[x].used) {
316                tlb[x].used = false;
317                usedEntries--;
318            }
319            lookupTable.erase(tlb[x].range);
320        }
321    }
322}
323
324void
325TLB::demapAll(int partition_id)
326{
327    DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
328    cacheValid = false;
329    for (int x = 0; x < size; x++) {
330        if (tlb[x].valid && !tlb[x].pte.locked() &&
331                tlb[x].range.partitionId == partition_id) {
332            freeList.push_front(&tlb[x]);
333            tlb[x].valid = false;
334            if (tlb[x].used) {
335                tlb[x].used = false;
336                usedEntries--;
337            }
338            lookupTable.erase(tlb[x].range);
339        }
340    }
341}
342
343void
344TLB::invalidateAll()
345{
346    cacheValid = false;
347    lookupTable.clear();
348
349    for (int x = 0; x < size; x++) {
350        if (tlb[x].valid == true)
351            freeList.push_back(&tlb[x]);
352        tlb[x].valid = false;
353        tlb[x].used = false;
354    }
355    usedEntries = 0;
356}
357
358uint64_t
359TLB::TteRead(int entry)
360{
361    if (entry >= size)
362        panic("entry: %d\n", entry);
363
364    assert(entry < size);
365    if (tlb[entry].valid)
366        return tlb[entry].pte();
367    else
368        return (uint64_t)-1ll;
369}
370
371uint64_t
372TLB::TagRead(int entry)
373{
374    assert(entry < size);
375    uint64_t tag;
376    if (!tlb[entry].valid)
377        return (uint64_t)-1ll;
378
379    tag = tlb[entry].range.contextId;
380    tag |= tlb[entry].range.va;
381    tag |= (uint64_t)tlb[entry].range.partitionId << 61;
382    tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
383    tag |= (uint64_t)~tlb[entry].pte._size() << 56;
384    return tag;
385}
386
387bool
388TLB::validVirtualAddress(Addr va, bool am)
389{
390    if (am)
391        return true;
392    if (va >= StartVAddrHole && va <= EndVAddrHole)
393        return false;
394    return true;
395}
396
397void
398TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
399{
400    if (sfsr & 0x1)
401        sfsr = 0x3;
402    else
403        sfsr = 1;
404
405    if (write)
406        sfsr |= 1 << 2;
407    sfsr |= ct << 4;
408    if (se)
409        sfsr |= 1 << 6;
410    sfsr |= ft << 7;
411    sfsr |= asi << 16;
412}
413
414void
415TLB::writeTagAccess(Addr va, int context)
416{
417    DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
418            va, context, mbits(va, 63,13) | mbits(context,12,0));
419
420    tag_access = mbits(va, 63,13) | mbits(context,12,0);
421}
422
423void
424TLB::writeSfsr(Addr a, bool write, ContextType ct,
425        bool se, FaultTypes ft, int asi)
426{
427    DPRINTF(TLB, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
428            a, (int)write, ct, ft, asi);
429    TLB::writeSfsr(write, ct, se, ft, asi);
430    sfar = a;
431}
432
433Fault
434TLB::translateInst(RequestPtr req, ThreadContext *tc)
435{
436    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
437
438    Addr vaddr = req->getVaddr();
439    TlbEntry *e;
440
441    assert(req->getAsi() == ASI_IMPLICIT);
442
443    DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
444            vaddr, req->getSize());
445
446    // Be fast if we can!
447    if (cacheValid && cacheState == tlbdata) {
448        if (cacheEntry[0]) {
449            if (cacheEntry[0]->range.va < vaddr + sizeof(MachInst) &&
450                cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
451                req->setPaddr(cacheEntry[0]->pte.translate(vaddr));
452                return NoFault;
453            }
454        } else {
455            req->setPaddr(vaddr & PAddrImplMask);
456            return NoFault;
457        }
458    }
459
460    bool hpriv = bits(tlbdata,0,0);
461    bool red = bits(tlbdata,1,1);
462    bool priv = bits(tlbdata,2,2);
463    bool addr_mask = bits(tlbdata,3,3);
464    bool lsu_im = bits(tlbdata,4,4);
465
466    int part_id = bits(tlbdata,15,8);
467    int tl = bits(tlbdata,18,16);
468    int pri_context = bits(tlbdata,47,32);
469    int context;
470    ContextType ct;
471    int asi;
472    bool real = false;
473
474    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
475           priv, hpriv, red, lsu_im, part_id);
476
477    if (tl > 0) {
478        asi = ASI_N;
479        ct = Nucleus;
480        context = 0;
481    } else {
482        asi = ASI_P;
483        ct = Primary;
484        context = pri_context;
485    }
486
487    if ( hpriv || red ) {
488        cacheValid = true;
489        cacheState = tlbdata;
490        cacheEntry[0] = NULL;
491        req->setPaddr(vaddr & PAddrImplMask);
492        return NoFault;
493    }
494
495    // If the access is unaligned trap
496    if (vaddr & 0x3) {
497        writeSfsr(false, ct, false, OtherFault, asi);
498        return new MemAddressNotAligned;
499    }
500
501    if (addr_mask)
502        vaddr = vaddr & VAddrAMask;
503
504    if (!validVirtualAddress(vaddr, addr_mask)) {
505        writeSfsr(false, ct, false, VaOutOfRange, asi);
506        return new InstructionAccessException;
507    }
508
509    if (!lsu_im) {
510        e = lookup(vaddr, part_id, true);
511        real = true;
512        context = 0;
513    } else {
514        e = lookup(vaddr, part_id, false, context);
515    }
516
517    if (e == NULL || !e->valid) {
518        writeTagAccess(vaddr, context);
519        if (real)
520            return new InstructionRealTranslationMiss;
521        else
522#if FULL_SYSTEM
523            return new FastInstructionAccessMMUMiss;
524#else
525            return new FastInstructionAccessMMUMiss(req->getVaddr());
526#endif
527    }
528
529    // were not priviledged accesing priv page
530    if (!priv && e->pte.priv()) {
531        writeTagAccess(vaddr, context);
532        writeSfsr(false, ct, false, PrivViolation, asi);
533        return new InstructionAccessException;
534    }
535
536    // cache translation date for next translation
537    cacheValid = true;
538    cacheState = tlbdata;
539    cacheEntry[0] = e;
540
541    req->setPaddr(e->pte.translate(vaddr));
542    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
543    return NoFault;
544}
545
546Fault
547TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
548{
549    /*
550     * @todo this could really use some profiling and fixing to make
551     * it faster!
552     */
553    uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
554    Addr vaddr = req->getVaddr();
555    Addr size = req->getSize();
556    ASI asi;
557    asi = (ASI)req->getAsi();
558    bool implicit = false;
559    bool hpriv = bits(tlbdata,0,0);
560    bool unaligned = vaddr & (size - 1);
561
562    DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
563            vaddr, size, asi);
564
565    if (lookupTable.size() != 64 - freeList.size())
566       panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
567               freeList.size());
568    if (asi == ASI_IMPLICIT)
569        implicit = true;
570
571    // Only use the fast path here if there doesn't need to be an unaligned
572    // trap later
573    if (!unaligned) {
574        if (hpriv && implicit) {
575            req->setPaddr(vaddr & PAddrImplMask);
576            return NoFault;
577        }
578
579        // Be fast if we can!
580        if (cacheValid &&  cacheState == tlbdata) {
581
582
583
584            if (cacheEntry[0]) {
585                TlbEntry *ce = cacheEntry[0];
586                Addr ce_va = ce->range.va;
587                if (cacheAsi[0] == asi &&
588                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
589                    (!write || ce->pte.writable())) {
590                    req->setPaddr(ce->pte.translate(vaddr));
591                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
592                        req->setFlags(Request::UNCACHEABLE);
593                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
594                    return NoFault;
595                } // if matched
596            } // if cache entry valid
597            if (cacheEntry[1]) {
598                TlbEntry *ce = cacheEntry[1];
599                Addr ce_va = ce->range.va;
600                if (cacheAsi[1] == asi &&
601                    ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
602                    (!write || ce->pte.writable())) {
603                    req->setPaddr(ce->pte.translate(vaddr));
604                    if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1)
605                        req->setFlags(Request::UNCACHEABLE);
606                    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
607                    return NoFault;
608                } // if matched
609            } // if cache entry valid
610        }
611    }
612
613    bool red = bits(tlbdata,1,1);
614    bool priv = bits(tlbdata,2,2);
615    bool addr_mask = bits(tlbdata,3,3);
616    bool lsu_dm = bits(tlbdata,5,5);
617
618    int part_id = bits(tlbdata,15,8);
619    int tl = bits(tlbdata,18,16);
620    int pri_context = bits(tlbdata,47,32);
621    int sec_context = bits(tlbdata,63,48);
622
623    bool real = false;
624    ContextType ct = Primary;
625    int context = 0;
626
627    TlbEntry *e;
628
629    DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
630            priv, hpriv, red, lsu_dm, part_id);
631
632    if (implicit) {
633        if (tl > 0) {
634            asi = ASI_N;
635            ct = Nucleus;
636            context = 0;
637        } else {
638            asi = ASI_P;
639            ct = Primary;
640            context = pri_context;
641        }
642    } else {
643        // We need to check for priv level/asi priv
644        if (!priv && !hpriv && !AsiIsUnPriv(asi)) {
645            // It appears that context should be Nucleus in these cases?
646            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
647            return new PrivilegedAction;
648        }
649
650        if (!hpriv && AsiIsHPriv(asi)) {
651            writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
652            return new DataAccessException;
653        }
654
655        if (AsiIsPrimary(asi)) {
656            context = pri_context;
657            ct = Primary;
658        } else if (AsiIsSecondary(asi)) {
659            context = sec_context;
660            ct = Secondary;
661        } else if (AsiIsNucleus(asi)) {
662            ct = Nucleus;
663            context = 0;
664        } else {  // ????
665            ct = Primary;
666            context = pri_context;
667        }
668    }
669
670    if (!implicit && asi != ASI_P && asi != ASI_S) {
671        if (AsiIsLittle(asi))
672            panic("Little Endian ASIs not supported\n");
673
674        //XXX It's unclear from looking at the documentation how a no fault
675        //load differs from a regular one, other than what happens concerning
676        //nfo and e bits in the TTE
677//        if (AsiIsNoFault(asi))
678//            panic("No Fault ASIs not supported\n");
679
680        if (AsiIsPartialStore(asi))
681            panic("Partial Store ASIs not supported\n");
682
683        if (AsiIsCmt(asi))
684            panic("Cmt ASI registers not implmented\n");
685
686        if (AsiIsInterrupt(asi))
687            goto handleIntRegAccess;
688        if (AsiIsMmu(asi))
689            goto handleMmuRegAccess;
690        if (AsiIsScratchPad(asi))
691            goto handleScratchRegAccess;
692        if (AsiIsQueue(asi))
693            goto handleQueueRegAccess;
694        if (AsiIsSparcError(asi))
695            goto handleSparcErrorRegAccess;
696
697        if (!AsiIsReal(asi) && !AsiIsNucleus(asi) && !AsiIsAsIfUser(asi) &&
698                !AsiIsTwin(asi) && !AsiIsBlock(asi) && !AsiIsNoFault(asi))
699            panic("Accessing ASI %#X. Should we?\n", asi);
700    }
701
702    // If the asi is unaligned trap
703    if (unaligned) {
704        writeSfsr(vaddr, false, ct, false, OtherFault, asi);
705        return new MemAddressNotAligned;
706    }
707
708    if (addr_mask)
709        vaddr = vaddr & VAddrAMask;
710
711    if (!validVirtualAddress(vaddr, addr_mask)) {
712        writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
713        return new DataAccessException;
714    }
715
716    if ((!lsu_dm && !hpriv && !red) || AsiIsReal(asi)) {
717        real = true;
718        context = 0;
719    }
720
721    if (hpriv && (implicit || (!AsiIsAsIfUser(asi) && !AsiIsReal(asi)))) {
722        req->setPaddr(vaddr & PAddrImplMask);
723        return NoFault;
724    }
725
726    e = lookup(vaddr, part_id, real, context);
727
728    if (e == NULL || !e->valid) {
729        writeTagAccess(vaddr, context);
730        DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
731        if (real)
732            return new DataRealTranslationMiss;
733        else
734#if FULL_SYSTEM
735            return new FastDataAccessMMUMiss;
736#else
737            return new FastDataAccessMMUMiss(req->getVaddr());
738#endif
739
740    }
741
742    if (!priv && e->pte.priv()) {
743        writeTagAccess(vaddr, context);
744        writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
745        return new DataAccessException;
746    }
747
748    if (write && !e->pte.writable()) {
749        writeTagAccess(vaddr, context);
750        writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
751        return new FastDataAccessProtection;
752    }
753
754    if (e->pte.nofault() && !AsiIsNoFault(asi)) {
755        writeTagAccess(vaddr, context);
756        writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
757        return new DataAccessException;
758    }
759
760    if (e->pte.sideffect() && AsiIsNoFault(asi)) {
761        writeTagAccess(vaddr, context);
762        writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
763        return new DataAccessException;
764    }
765
766    if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
767        req->setFlags(Request::UNCACHEABLE);
768
769    // cache translation date for next translation
770    cacheState = tlbdata;
771    if (!cacheValid) {
772        cacheEntry[1] = NULL;
773        cacheEntry[0] = NULL;
774    }
775
776    if (cacheEntry[0] != e && cacheEntry[1] != e) {
777        cacheEntry[1] = cacheEntry[0];
778        cacheEntry[0] = e;
779        cacheAsi[1] = cacheAsi[0];
780        cacheAsi[0] = asi;
781        if (implicit)
782            cacheAsi[0] = (ASI)0;
783    }
784    cacheValid = true;
785    req->setPaddr(e->pte.translate(vaddr));
786    DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
787    return NoFault;
788
789    /** Normal flow ends here. */
790handleIntRegAccess:
791    if (!hpriv) {
792        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
793        if (priv)
794            return new DataAccessException;
795         else
796            return new PrivilegedAction;
797    }
798
799    if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
800        (asi == ASI_SWVR_UDB_INTR_R && write)) {
801        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
802        return new DataAccessException;
803    }
804
805    goto regAccessOk;
806
807
808handleScratchRegAccess:
809    if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
810        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
811        return new DataAccessException;
812    }
813    goto regAccessOk;
814
815handleQueueRegAccess:
816    if (!priv  && !hpriv) {
817        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
818        return new PrivilegedAction;
819    }
820    if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
821        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
822        return new DataAccessException;
823    }
824    goto regAccessOk;
825
826handleSparcErrorRegAccess:
827    if (!hpriv) {
828        writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
829        if (priv)
830            return new DataAccessException;
831         else
832            return new PrivilegedAction;
833    }
834    goto regAccessOk;
835
836
837regAccessOk:
838handleMmuRegAccess:
839    DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
840    req->setMmapedIpr(true);
841    req->setPaddr(req->getVaddr());
842    return NoFault;
843};
844
845Fault
846TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
847{
848    if (mode == Execute)
849        return translateInst(req, tc);
850    else
851        return translateData(req, tc, mode == Write);
852}
853
854void
855TLB::translateTiming(RequestPtr req, ThreadContext *tc,
856        Translation *translation, Mode mode)
857{
858    assert(translation);
859    translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
860}
861
862#if FULL_SYSTEM
863
864Tick
865TLB::doMmuRegRead(ThreadContext *tc, Packet *pkt)
866{
867    Addr va = pkt->getAddr();
868    ASI asi = (ASI)pkt->req->getAsi();
869    uint64_t temp;
870
871    DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
872         (uint32_t)pkt->req->getAsi(), pkt->getAddr());
873
874    TLB *itb = tc->getITBPtr();
875
876    switch (asi) {
877      case ASI_LSU_CONTROL_REG:
878        assert(va == 0);
879        pkt->set(tc->readMiscReg(MISCREG_MMU_LSU_CTRL));
880        break;
881      case ASI_MMU:
882        switch (va) {
883          case 0x8:
884            pkt->set(tc->readMiscReg(MISCREG_MMU_P_CONTEXT));
885            break;
886          case 0x10:
887            pkt->set(tc->readMiscReg(MISCREG_MMU_S_CONTEXT));
888            break;
889          default:
890            goto doMmuReadError;
891        }
892        break;
893      case ASI_QUEUE:
894        pkt->set(tc->readMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
895                    (va >> 4) - 0x3c));
896        break;
897      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
898        assert(va == 0);
899        pkt->set(c0_tsb_ps0);
900        break;
901      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
902        assert(va == 0);
903        pkt->set(c0_tsb_ps1);
904        break;
905      case ASI_DMMU_CTXT_ZERO_CONFIG:
906        assert(va == 0);
907        pkt->set(c0_config);
908        break;
909      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
910        assert(va == 0);
911        pkt->set(itb->c0_tsb_ps0);
912        break;
913      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
914        assert(va == 0);
915        pkt->set(itb->c0_tsb_ps1);
916        break;
917      case ASI_IMMU_CTXT_ZERO_CONFIG:
918        assert(va == 0);
919        pkt->set(itb->c0_config);
920        break;
921      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
922        assert(va == 0);
923        pkt->set(cx_tsb_ps0);
924        break;
925      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
926        assert(va == 0);
927        pkt->set(cx_tsb_ps1);
928        break;
929      case ASI_DMMU_CTXT_NONZERO_CONFIG:
930        assert(va == 0);
931        pkt->set(cx_config);
932        break;
933      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
934        assert(va == 0);
935        pkt->set(itb->cx_tsb_ps0);
936        break;
937      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
938        assert(va == 0);
939        pkt->set(itb->cx_tsb_ps1);
940        break;
941      case ASI_IMMU_CTXT_NONZERO_CONFIG:
942        assert(va == 0);
943        pkt->set(itb->cx_config);
944        break;
945      case ASI_SPARC_ERROR_STATUS_REG:
946        pkt->set((uint64_t)0);
947        break;
948      case ASI_HYP_SCRATCHPAD:
949      case ASI_SCRATCHPAD:
950        pkt->set(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
951        break;
952      case ASI_IMMU:
953        switch (va) {
954          case 0x0:
955            temp = itb->tag_access;
956            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
957            break;
958          case 0x18:
959            pkt->set(itb->sfsr);
960            break;
961          case 0x30:
962            pkt->set(itb->tag_access);
963            break;
964          default:
965            goto doMmuReadError;
966        }
967        break;
968      case ASI_DMMU:
969        switch (va) {
970          case 0x0:
971            temp = tag_access;
972            pkt->set(bits(temp,63,22) | bits(temp,12,0) << 48);
973            break;
974          case 0x18:
975            pkt->set(sfsr);
976            break;
977          case 0x20:
978            pkt->set(sfar);
979            break;
980          case 0x30:
981            pkt->set(tag_access);
982            break;
983          case 0x80:
984            pkt->set(tc->readMiscReg(MISCREG_MMU_PART_ID));
985            break;
986          default:
987                goto doMmuReadError;
988        }
989        break;
990      case ASI_DMMU_TSB_PS0_PTR_REG:
991        pkt->set(MakeTsbPtr(Ps0,
992            tag_access,
993            c0_tsb_ps0,
994            c0_config,
995            cx_tsb_ps0,
996            cx_config));
997        break;
998      case ASI_DMMU_TSB_PS1_PTR_REG:
999        pkt->set(MakeTsbPtr(Ps1,
1000                tag_access,
1001                c0_tsb_ps1,
1002                c0_config,
1003                cx_tsb_ps1,
1004                cx_config));
1005        break;
1006      case ASI_IMMU_TSB_PS0_PTR_REG:
1007          pkt->set(MakeTsbPtr(Ps0,
1008                itb->tag_access,
1009                itb->c0_tsb_ps0,
1010                itb->c0_config,
1011                itb->cx_tsb_ps0,
1012                itb->cx_config));
1013        break;
1014      case ASI_IMMU_TSB_PS1_PTR_REG:
1015          pkt->set(MakeTsbPtr(Ps1,
1016                itb->tag_access,
1017                itb->c0_tsb_ps1,
1018                itb->c0_config,
1019                itb->cx_tsb_ps1,
1020                itb->cx_config));
1021        break;
1022      case ASI_SWVR_INTR_RECEIVE:
1023        {
1024            SparcISA::Interrupts * interrupts =
1025                dynamic_cast<SparcISA::Interrupts *>(
1026                        tc->getCpuPtr()->getInterruptController());
1027            pkt->set(interrupts->get_vec(IT_INT_VEC));
1028        }
1029        break;
1030      case ASI_SWVR_UDB_INTR_R:
1031        {
1032            SparcISA::Interrupts * interrupts =
1033                dynamic_cast<SparcISA::Interrupts *>(
1034                        tc->getCpuPtr()->getInterruptController());
1035            temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1036            tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, temp);
1037            pkt->set(temp);
1038        }
1039        break;
1040      default:
1041doMmuReadError:
1042        panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1043            (uint32_t)asi, va);
1044    }
1045    pkt->makeAtomicResponse();
1046    return tc->getCpuPtr()->ticks(1);
1047}
1048
1049Tick
1050TLB::doMmuRegWrite(ThreadContext *tc, Packet *pkt)
1051{
1052    uint64_t data = gtoh(pkt->get<uint64_t>());
1053    Addr va = pkt->getAddr();
1054    ASI asi = (ASI)pkt->req->getAsi();
1055
1056    Addr ta_insert;
1057    Addr va_insert;
1058    Addr ct_insert;
1059    int part_insert;
1060    int entry_insert = -1;
1061    bool real_insert;
1062    bool ignore;
1063    int part_id;
1064    int ctx_id;
1065    PageTableEntry pte;
1066
1067    DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1068         (uint32_t)asi, va, data);
1069
1070    TLB *itb = tc->getITBPtr();
1071
1072    switch (asi) {
1073      case ASI_LSU_CONTROL_REG:
1074        assert(va == 0);
1075        tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1076        break;
1077      case ASI_MMU:
1078        switch (va) {
1079          case 0x8:
1080            tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1081            break;
1082          case 0x10:
1083            tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1084            break;
1085          default:
1086            goto doMmuWriteError;
1087        }
1088        break;
1089      case ASI_QUEUE:
1090        assert(mbits(data,13,6) == data);
1091        tc->setMiscReg(MISCREG_QUEUE_CPU_MONDO_HEAD +
1092                    (va >> 4) - 0x3c, data);
1093        break;
1094      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0:
1095        assert(va == 0);
1096        c0_tsb_ps0 = data;
1097        break;
1098      case ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1:
1099        assert(va == 0);
1100        c0_tsb_ps1 = data;
1101        break;
1102      case ASI_DMMU_CTXT_ZERO_CONFIG:
1103        assert(va == 0);
1104        c0_config = data;
1105        break;
1106      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0:
1107        assert(va == 0);
1108        itb->c0_tsb_ps0 = data;
1109        break;
1110      case ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1:
1111        assert(va == 0);
1112        itb->c0_tsb_ps1 = data;
1113        break;
1114      case ASI_IMMU_CTXT_ZERO_CONFIG:
1115        assert(va == 0);
1116        itb->c0_config = data;
1117        break;
1118      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0:
1119        assert(va == 0);
1120        cx_tsb_ps0 = data;
1121        break;
1122      case ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1:
1123        assert(va == 0);
1124        cx_tsb_ps1 = data;
1125        break;
1126      case ASI_DMMU_CTXT_NONZERO_CONFIG:
1127        assert(va == 0);
1128        cx_config = data;
1129        break;
1130      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0:
1131        assert(va == 0);
1132        itb->cx_tsb_ps0 = data;
1133        break;
1134      case ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1:
1135        assert(va == 0);
1136        itb->cx_tsb_ps1 = data;
1137        break;
1138      case ASI_IMMU_CTXT_NONZERO_CONFIG:
1139        assert(va == 0);
1140        itb->cx_config = data;
1141        break;
1142      case ASI_SPARC_ERROR_EN_REG:
1143      case ASI_SPARC_ERROR_STATUS_REG:
1144        inform("Ignoring write to SPARC ERROR regsiter\n");
1145        break;
1146      case ASI_HYP_SCRATCHPAD:
1147      case ASI_SCRATCHPAD:
1148        tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1149        break;
1150      case ASI_IMMU:
1151        switch (va) {
1152          case 0x18:
1153            itb->sfsr = data;
1154            break;
1155          case 0x30:
1156            sext<59>(bits(data, 59,0));
1157            itb->tag_access = data;
1158            break;
1159          default:
1160            goto doMmuWriteError;
1161        }
1162        break;
1163      case ASI_ITLB_DATA_ACCESS_REG:
1164        entry_insert = bits(va, 8,3);
1165      case ASI_ITLB_DATA_IN_REG:
1166        assert(entry_insert != -1 || mbits(va,10,9) == va);
1167        ta_insert = itb->tag_access;
1168        va_insert = mbits(ta_insert, 63,13);
1169        ct_insert = mbits(ta_insert, 12,0);
1170        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1171        real_insert = bits(va, 9,9);
1172        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1173                PageTableEntry::sun4u);
1174        tc->getITBPtr()->insert(va_insert, part_insert, ct_insert, real_insert,
1175                pte, entry_insert);
1176        break;
1177      case ASI_DTLB_DATA_ACCESS_REG:
1178        entry_insert = bits(va, 8,3);
1179      case ASI_DTLB_DATA_IN_REG:
1180        assert(entry_insert != -1 || mbits(va,10,9) == va);
1181        ta_insert = tag_access;
1182        va_insert = mbits(ta_insert, 63,13);
1183        ct_insert = mbits(ta_insert, 12,0);
1184        part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1185        real_insert = bits(va, 9,9);
1186        pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1187                PageTableEntry::sun4u);
1188        insert(va_insert, part_insert, ct_insert, real_insert, pte,
1189               entry_insert);
1190        break;
1191      case ASI_IMMU_DEMAP:
1192        ignore = false;
1193        ctx_id = -1;
1194        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1195        switch (bits(va,5,4)) {
1196          case 0:
1197            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1198            break;
1199          case 1:
1200            ignore = true;
1201            break;
1202          case 3:
1203            ctx_id = 0;
1204            break;
1205          default:
1206            ignore = true;
1207        }
1208
1209        switch(bits(va,7,6)) {
1210          case 0: // demap page
1211            if (!ignore)
1212                tc->getITBPtr()->demapPage(mbits(va,63,13), part_id,
1213                        bits(va,9,9), ctx_id);
1214            break;
1215          case 1: //demap context
1216            if (!ignore)
1217                tc->getITBPtr()->demapContext(part_id, ctx_id);
1218            break;
1219          case 2:
1220            tc->getITBPtr()->demapAll(part_id);
1221            break;
1222          default:
1223            panic("Invalid type for IMMU demap\n");
1224        }
1225        break;
1226      case ASI_DMMU:
1227        switch (va) {
1228          case 0x18:
1229            sfsr = data;
1230            break;
1231          case 0x30:
1232            sext<59>(bits(data, 59,0));
1233            tag_access = data;
1234            break;
1235          case 0x80:
1236            tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1237            break;
1238          default:
1239            goto doMmuWriteError;
1240        }
1241        break;
1242      case ASI_DMMU_DEMAP:
1243        ignore = false;
1244        ctx_id = -1;
1245        part_id =  tc->readMiscReg(MISCREG_MMU_PART_ID);
1246        switch (bits(va,5,4)) {
1247          case 0:
1248            ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1249            break;
1250          case 1:
1251            ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1252            break;
1253          case 3:
1254            ctx_id = 0;
1255            break;
1256          default:
1257            ignore = true;
1258        }
1259
1260        switch(bits(va,7,6)) {
1261          case 0: // demap page
1262            if (!ignore)
1263                demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1264            break;
1265          case 1: //demap context
1266            if (!ignore)
1267                demapContext(part_id, ctx_id);
1268            break;
1269          case 2:
1270            demapAll(part_id);
1271            break;
1272          default:
1273            panic("Invalid type for IMMU demap\n");
1274        }
1275        break;
1276       case ASI_SWVR_INTR_RECEIVE:
1277        {
1278            int msb;
1279            // clear all the interrupts that aren't set in the write
1280            SparcISA::Interrupts * interrupts =
1281                dynamic_cast<SparcISA::Interrupts *>(
1282                        tc->getCpuPtr()->getInterruptController());
1283            while (interrupts->get_vec(IT_INT_VEC) & data) {
1284                msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1285                tc->getCpuPtr()->clearInterrupt(IT_INT_VEC, msb);
1286            }
1287        }
1288        break;
1289      case ASI_SWVR_UDB_INTR_W:
1290            tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1291            postInterrupt(bits(data, 5, 0), 0);
1292        break;
1293      default:
1294doMmuWriteError:
1295        panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1296            (uint32_t)pkt->req->getAsi(), pkt->getAddr(), data);
1297    }
1298    pkt->makeAtomicResponse();
1299    return tc->getCpuPtr()->ticks(1);
1300}
1301
1302#endif
1303
1304void
1305TLB::GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
1306{
1307    uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1308    TLB * itb = tc->getITBPtr();
1309    ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1310                c0_tsb_ps0,
1311                c0_config,
1312                cx_tsb_ps0,
1313                cx_config);
1314    ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1315                c0_tsb_ps1,
1316                c0_config,
1317                cx_tsb_ps1,
1318                cx_config);
1319    ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1320                itb->c0_tsb_ps0,
1321                itb->c0_config,
1322                itb->cx_tsb_ps0,
1323                itb->cx_config);
1324    ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1325                itb->c0_tsb_ps1,
1326                itb->c0_config,
1327                itb->cx_tsb_ps1,
1328                itb->cx_config);
1329}
1330
1331uint64_t
1332TLB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1333        uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1334{
1335    uint64_t tsb;
1336    uint64_t config;
1337
1338    if (bits(tag_access, 12,0) == 0) {
1339        tsb = c0_tsb;
1340        config = c0_config;
1341    } else {
1342        tsb = cX_tsb;
1343        config = cX_config;
1344    }
1345
1346    uint64_t ptr = mbits(tsb,63,13);
1347    bool split = bits(tsb,12,12);
1348    int tsb_size = bits(tsb,3,0);
1349    int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1350
1351    if (ps == Ps1  && split)
1352        ptr |= ULL(1) << (13 + tsb_size);
1353    ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1354
1355    return ptr;
1356}
1357
1358void
1359TLB::serialize(std::ostream &os)
1360{
1361    SERIALIZE_SCALAR(size);
1362    SERIALIZE_SCALAR(usedEntries);
1363    SERIALIZE_SCALAR(lastReplaced);
1364
1365    // convert the pointer based free list into an index based one
1366    int *free_list = (int*)malloc(sizeof(int) * size);
1367    int cntr = 0;
1368    std::list<TlbEntry*>::iterator i;
1369    i = freeList.begin();
1370    while (i != freeList.end()) {
1371        free_list[cntr++] = ((size_t)*i - (size_t)tlb)/ sizeof(TlbEntry);
1372        i++;
1373    }
1374    SERIALIZE_SCALAR(cntr);
1375    SERIALIZE_ARRAY(free_list,  cntr);
1376
1377    SERIALIZE_SCALAR(c0_tsb_ps0);
1378    SERIALIZE_SCALAR(c0_tsb_ps1);
1379    SERIALIZE_SCALAR(c0_config);
1380    SERIALIZE_SCALAR(cx_tsb_ps0);
1381    SERIALIZE_SCALAR(cx_tsb_ps1);
1382    SERIALIZE_SCALAR(cx_config);
1383    SERIALIZE_SCALAR(sfsr);
1384    SERIALIZE_SCALAR(tag_access);
1385
1386    for (int x = 0; x < size; x++) {
1387        nameOut(os, csprintf("%s.PTE%d", name(), x));
1388        tlb[x].serialize(os);
1389    }
1390    SERIALIZE_SCALAR(sfar);
1391}
1392
1393void
1394TLB::unserialize(Checkpoint *cp, const std::string &section)
1395{
1396    int oldSize;
1397
1398    paramIn(cp, section, "size", oldSize);
1399    if (oldSize != size)
1400        panic("Don't support unserializing different sized TLBs\n");
1401    UNSERIALIZE_SCALAR(usedEntries);
1402    UNSERIALIZE_SCALAR(lastReplaced);
1403
1404    int cntr;
1405    UNSERIALIZE_SCALAR(cntr);
1406
1407    int *free_list = (int*)malloc(sizeof(int) * cntr);
1408    freeList.clear();
1409    UNSERIALIZE_ARRAY(free_list,  cntr);
1410    for (int x = 0; x < cntr; x++)
1411        freeList.push_back(&tlb[free_list[x]]);
1412
1413    UNSERIALIZE_SCALAR(c0_tsb_ps0);
1414    UNSERIALIZE_SCALAR(c0_tsb_ps1);
1415    UNSERIALIZE_SCALAR(c0_config);
1416    UNSERIALIZE_SCALAR(cx_tsb_ps0);
1417    UNSERIALIZE_SCALAR(cx_tsb_ps1);
1418    UNSERIALIZE_SCALAR(cx_config);
1419    UNSERIALIZE_SCALAR(sfsr);
1420    UNSERIALIZE_SCALAR(tag_access);
1421
1422    lookupTable.clear();
1423    for (int x = 0; x < size; x++) {
1424        tlb[x].unserialize(cp, csprintf("%s.PTE%d", section, x));
1425        if (tlb[x].valid)
1426            lookupTable.insert(tlb[x].range, &tlb[x]);
1427
1428    }
1429    UNSERIALIZE_SCALAR(sfar);
1430}
1431
1432/* end namespace SparcISA */ }
1433
1434SparcISA::TLB *
1435SparcTLBParams::create()
1436{
1437    return new SparcISA::TLB(this);
1438}
1439