tlb.cc revision 1147
1/*
2 * Copyright (c) 2001-2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sstream>
30#include <string>
31#include <vector>
32
33#include "arch/alpha/alpha_memory.hh"
34#include "base/inifile.hh"
35#include "base/str.hh"
36#include "base/trace.hh"
37#include "cpu/exec_context.hh"
38#include "sim/builder.hh"
39
40using namespace std;
41using namespace EV5;
42
43///////////////////////////////////////////////////////////////////////
44//
45//  Alpha TLB
46//
47#ifdef DEBUG
48bool uncacheBit39 = false;
49bool uncacheBit40 = false;
50#endif
51
52#define MODE2MASK(X)			(1 << (X))
53
54AlphaTLB::AlphaTLB(const string &name, int s)
55    : SimObject(name), size(s), nlu(0)
56{
57    table = new AlphaISA::PTE[size];
58    memset(table, 0, sizeof(AlphaISA::PTE[size]));
59}
60
61AlphaTLB::~AlphaTLB()
62{
63    if (table)
64        delete [] table;
65}
66
67// look up an entry in the TLB
68AlphaISA::PTE *
69AlphaTLB::lookup(Addr vpn, uint8_t asn) const
70{
71    DPRINTF(TLB, "lookup %#x, asn %#x\n", vpn, (int)asn);
72
73    PageTable::const_iterator i = lookupTable.find(vpn);
74    if (i == lookupTable.end())
75        return NULL;
76
77    while (i->first == vpn) {
78        int index = i->second;
79        AlphaISA::PTE *pte = &table[index];
80        assert(pte->valid);
81        if (vpn == pte->tag && (pte->asma || pte->asn == asn))
82            return pte;
83
84        ++i;
85    }
86
87    // not found...
88    return NULL;
89}
90
91
92void
93AlphaTLB::checkCacheability(MemReqPtr &req)
94{
95    // in Alpha, cacheability is controlled by upper-level bits of the
96    // physical address
97
98    /*
99     * We support having the uncacheable bit in either bit 39 or bit 40.
100     * The Turbolaser platform (and EV5) support having the bit in 39, but
101     * Tsunami (which Linux assumes uses an EV6) generates accesses with
102     * the bit in 40.  So we must check for both, but we have debug flags
103     * to catch a weird case where both are used, which shouldn't happen.
104     */
105
106
107#ifdef ALPHA_TLASER
108    if (req->paddr & PAddrUncachedBit39) {
109#else
110    if (req->paddr & PAddrUncachedBit43) {
111#endif
112        // IPR memory space not implemented
113        if (PAddrIprSpace(req->paddr)) {
114            if (!req->xc->misspeculating()) {
115                switch (req->paddr) {
116                  case ULL(0xFFFFF00188):
117                    req->data = 0;
118                    break;
119
120                  default:
121                    panic("IPR memory space not implemented! PA=%x\n",
122                          req->paddr);
123                }
124            }
125        } else {
126            // mark request as uncacheable
127            req->flags |= UNCACHEABLE;
128
129#ifndef ALPHA_TLASER
130            // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
131            req->paddr &= PAddrUncachedMask;
132#endif
133        }
134    }
135}
136
137
138// insert a new TLB entry
139void
140AlphaTLB::insert(Addr addr, AlphaISA::PTE &pte)
141{
142    AlphaISA::VAddr vaddr = addr;
143    if (table[nlu].valid) {
144        Addr oldvpn = table[nlu].tag;
145        PageTable::iterator i = lookupTable.find(oldvpn);
146
147        if (i == lookupTable.end())
148            panic("TLB entry not found in lookupTable");
149
150        int index;
151        while ((index = i->second) != nlu) {
152            if (table[index].tag != oldvpn)
153                panic("TLB entry not found in lookupTable");
154
155            ++i;
156        }
157
158        DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
159
160        lookupTable.erase(i);
161    }
162
163    DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
164
165    table[nlu] = pte;
166    table[nlu].tag = vaddr.vpn();
167    table[nlu].valid = true;
168
169    lookupTable.insert(make_pair(vaddr.vpn(), nlu));
170    nextnlu();
171}
172
173void
174AlphaTLB::flushAll()
175{
176    memset(table, 0, sizeof(AlphaISA::PTE[size]));
177    lookupTable.clear();
178    nlu = 0;
179}
180
181void
182AlphaTLB::flushProcesses()
183{
184    PageTable::iterator i = lookupTable.begin();
185    PageTable::iterator end = lookupTable.end();
186    while (i != end) {
187        int index = i->second;
188        AlphaISA::PTE *pte = &table[index];
189        assert(pte->valid);
190
191        if (!pte->asma) {
192            DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
193            pte->valid = false;
194            lookupTable.erase(i);
195        }
196
197        ++i;
198    }
199}
200
201void
202AlphaTLB::flushAddr(Addr addr, uint8_t asn)
203{
204    AlphaISA::VAddr vaddr = addr;
205
206    PageTable::iterator i = lookupTable.find(vaddr.vpn());
207    if (i == lookupTable.end())
208        return;
209
210    while (i->first == vaddr.vpn()) {
211        int index = i->second;
212        AlphaISA::PTE *pte = &table[index];
213        assert(pte->valid);
214
215        if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
216            DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
217                    pte->ppn);
218
219            // invalidate this entry
220            pte->valid = false;
221
222            lookupTable.erase(i);
223        }
224
225        ++i;
226    }
227}
228
229
230void
231AlphaTLB::serialize(ostream &os)
232{
233    SERIALIZE_SCALAR(size);
234    SERIALIZE_SCALAR(nlu);
235
236    for (int i = 0; i < size; i++) {
237        nameOut(os, csprintf("%s.PTE%d", name(), i));
238        table[i].serialize(os);
239    }
240}
241
242void
243AlphaTLB::unserialize(Checkpoint *cp, const string &section)
244{
245    UNSERIALIZE_SCALAR(size);
246    UNSERIALIZE_SCALAR(nlu);
247
248    for (int i = 0; i < size; i++) {
249        table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
250        if (table[i].valid) {
251            lookupTable.insert(make_pair(table[i].tag, i));
252        }
253    }
254}
255
256
257///////////////////////////////////////////////////////////////////////
258//
259//  Alpha ITB
260//
261AlphaITB::AlphaITB(const std::string &name, int size)
262    : AlphaTLB(name, size)
263{}
264
265
266void
267AlphaITB::regStats()
268{
269    hits
270        .name(name() + ".hits")
271        .desc("ITB hits");
272    misses
273        .name(name() + ".misses")
274        .desc("ITB misses");
275    acv
276        .name(name() + ".acv")
277        .desc("ITB acv");
278    accesses
279        .name(name() + ".accesses")
280        .desc("ITB accesses");
281
282    accesses = hits + misses;
283}
284
285void
286AlphaITB::fault(Addr pc, ExecContext *xc) const
287{
288    uint64_t *ipr = xc->regs.ipr;
289
290    if (!xc->misspeculating()) {
291        ipr[AlphaISA::IPR_ITB_TAG] = pc;
292        ipr[AlphaISA::IPR_IFAULT_VA_FORM] =
293            ipr[AlphaISA::IPR_IVPTBR] | (AlphaISA::VAddr(pc).vpn() << 3);
294    }
295}
296
297
298Fault
299AlphaITB::translate(MemReqPtr &req) const
300{
301    InternalProcReg *ipr = req->xc->regs.ipr;
302
303    if (AlphaISA::PcPAL(req->vaddr)) {
304        // strip off PAL PC marker (lsb is 1)
305        req->paddr = (req->vaddr & ~3) & PAddrImplMask;
306        hits++;
307        return No_Fault;
308    }
309
310    if (req->flags & PHYSICAL) {
311        req->paddr = req->vaddr;
312    } else {
313        // verify that this is a good virtual address
314        if (!validVirtualAddress(req->vaddr)) {
315            fault(req->vaddr, req->xc);
316            acv++;
317            return ITB_Acv_Fault;
318        }
319
320
321        // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
322        // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
323#ifdef ALPHA_TLASER
324        if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
325            VAddrSpaceEV5(req->vaddr) == 2) {
326#else
327        if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
328#endif
329            // only valid in kernel mode
330            if (ICM_CM(ipr[AlphaISA::IPR_ICM]) !=
331                AlphaISA::mode_kernel) {
332                fault(req->vaddr, req->xc);
333                acv++;
334                return ITB_Acv_Fault;
335            }
336
337            req->paddr = req->vaddr & PAddrImplMask;
338
339#ifndef ALPHA_TLASER
340            // sign extend the physical address properly
341            if (req->paddr & PAddrUncachedBit40)
342                req->paddr |= ULL(0xf0000000000);
343            else
344                req->paddr &= ULL(0xffffffffff);
345#endif
346
347        } else {
348            // not a physical address: need to look up pte
349            AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
350                                        DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
351
352            if (!pte) {
353                fault(req->vaddr, req->xc);
354                misses++;
355                return ITB_Fault_Fault;
356            }
357
358            req->paddr = (pte->ppn << AlphaISA::PageShift) +
359                (AlphaISA::VAddr(req->vaddr).offset() & ~3);
360
361            // check permissions for this access
362            if (!(pte->xre & (1 << ICM_CM(ipr[AlphaISA::IPR_ICM])))) {
363                // instruction access fault
364                fault(req->vaddr, req->xc);
365                acv++;
366                return ITB_Acv_Fault;
367            }
368
369            hits++;
370        }
371    }
372
373    // check that the physical address is ok (catch bad physical addresses)
374    if (req->paddr & ~PAddrImplMask)
375        return Machine_Check_Fault;
376
377    checkCacheability(req);
378
379    return No_Fault;
380}
381
382///////////////////////////////////////////////////////////////////////
383//
384//  Alpha DTB
385//
386AlphaDTB::AlphaDTB(const std::string &name, int size)
387    : AlphaTLB(name, size)
388{}
389
390void
391AlphaDTB::regStats()
392{
393    read_hits
394        .name(name() + ".read_hits")
395        .desc("DTB read hits")
396        ;
397
398    read_misses
399        .name(name() + ".read_misses")
400        .desc("DTB read misses")
401        ;
402
403    read_acv
404        .name(name() + ".read_acv")
405        .desc("DTB read access violations")
406        ;
407
408    read_accesses
409        .name(name() + ".read_accesses")
410        .desc("DTB read accesses")
411        ;
412
413    write_hits
414        .name(name() + ".write_hits")
415        .desc("DTB write hits")
416        ;
417
418    write_misses
419        .name(name() + ".write_misses")
420        .desc("DTB write misses")
421        ;
422
423    write_acv
424        .name(name() + ".write_acv")
425        .desc("DTB write access violations")
426        ;
427
428    write_accesses
429        .name(name() + ".write_accesses")
430        .desc("DTB write accesses")
431        ;
432
433    hits
434        .name(name() + ".hits")
435        .desc("DTB hits")
436        ;
437
438    misses
439        .name(name() + ".misses")
440        .desc("DTB misses")
441        ;
442
443    acv
444        .name(name() + ".acv")
445        .desc("DTB access violations")
446        ;
447
448    accesses
449        .name(name() + ".accesses")
450        .desc("DTB accesses")
451        ;
452
453    hits = read_hits + write_hits;
454    misses = read_misses + write_misses;
455    acv = read_acv + write_acv;
456    accesses = read_accesses + write_accesses;
457}
458
459void
460AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const
461{
462    ExecContext *xc = req->xc;
463    AlphaISA::VAddr vaddr = req->vaddr;
464    uint64_t *ipr = xc->regs.ipr;
465
466    // Set fault address and flags.  Even though we're modeling an
467    // EV5, we use the EV6 technique of not latching fault registers
468    // on VPTE loads (instead of locking the registers until IPR_VA is
469    // read, like the EV5).  The EV6 approach is cleaner and seems to
470    // work with EV5 PAL code, but not the other way around.
471    if (!xc->misspeculating()
472        && !(req->flags & VPTE) && !(req->flags & NO_FAULT)) {
473        // set VA register with faulting address
474        ipr[AlphaISA::IPR_VA] = req->vaddr;
475
476        // set MM_STAT register flags
477        ipr[AlphaISA::IPR_MM_STAT] =
478            (((Opcode(xc->getInst()) & 0x3f) << 11)
479             | ((Ra(xc->getInst()) & 0x1f) << 6)
480             | (flags & 0x3f));
481
482        // set VA_FORM register with faulting formatted address
483        ipr[AlphaISA::IPR_VA_FORM] =
484            ipr[AlphaISA::IPR_MVPTBR] | (vaddr.vpn() << 3);
485    }
486}
487
488Fault
489AlphaDTB::translate(MemReqPtr &req, bool write) const
490{
491    RegFile *regs = &req->xc->regs;
492    Addr pc = regs->pc;
493    InternalProcReg *ipr = regs->ipr;
494
495    AlphaISA::mode_type mode =
496        (AlphaISA::mode_type)DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]);
497
498
499    /**
500     * Check for alignment faults
501     */
502    if (req->vaddr & (req->size - 1)) {
503        fault(req, write ? MM_STAT_WR_MASK : 0);
504        return Alignment_Fault;
505    }
506
507    if (pc & 0x1) {
508        mode = (req->flags & ALTMODE) ?
509            (AlphaISA::mode_type)ALT_MODE_AM(ipr[AlphaISA::IPR_ALT_MODE])
510            : AlphaISA::mode_kernel;
511    }
512
513    if (req->flags & PHYSICAL) {
514        req->paddr = req->vaddr;
515    } else {
516        // verify that this is a good virtual address
517        if (!validVirtualAddress(req->vaddr)) {
518            fault(req, (write ? MM_STAT_WR_MASK : 0) |
519                  MM_STAT_BAD_VA_MASK |
520                  MM_STAT_ACV_MASK);
521
522            if (write) { write_acv++; } else { read_acv++; }
523            return DTB_Fault_Fault;
524        }
525
526        // Check for "superpage" mapping
527#ifdef ALPHA_TLASER
528        if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
529            VAddrSpaceEV5(req->vaddr) == 2) {
530#else
531        if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
532#endif
533
534            // only valid in kernel mode
535            if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) !=
536                AlphaISA::mode_kernel) {
537                fault(req, ((write ? MM_STAT_WR_MASK : 0) |
538                            MM_STAT_ACV_MASK));
539                if (write) { write_acv++; } else { read_acv++; }
540                return DTB_Acv_Fault;
541            }
542
543            req->paddr = req->vaddr & PAddrImplMask;
544
545#ifndef ALPHA_TLASER
546            // sign extend the physical address properly
547            if (req->paddr & PAddrUncachedBit40)
548                req->paddr |= ULL(0xf0000000000);
549            else
550                req->paddr &= ULL(0xffffffffff);
551#endif
552
553        } else {
554            if (write)
555                write_accesses++;
556            else
557                read_accesses++;
558
559            // not a physical address: need to look up pte
560            AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
561                                        DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
562
563            if (!pte) {
564                // page fault
565                fault(req, (write ? MM_STAT_WR_MASK : 0) |
566                      MM_STAT_DTB_MISS_MASK);
567                if (write) { write_misses++; } else { read_misses++; }
568                return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
569            }
570
571            req->paddr = (pte->ppn << AlphaISA::PageShift) +
572                AlphaISA::VAddr(req->vaddr).offset();
573
574            if (write) {
575                if (!(pte->xwe & MODE2MASK(mode))) {
576                    // declare the instruction access fault
577                    fault(req, MM_STAT_WR_MASK |
578                          MM_STAT_ACV_MASK |
579                          (pte->fonw ? MM_STAT_FONW_MASK : 0));
580                    write_acv++;
581                    return DTB_Fault_Fault;
582                }
583                if (pte->fonw) {
584                    fault(req, MM_STAT_WR_MASK |
585                          MM_STAT_FONW_MASK);
586                    write_acv++;
587                    return DTB_Fault_Fault;
588                }
589            } else {
590                if (!(pte->xre & MODE2MASK(mode))) {
591                    fault(req, MM_STAT_ACV_MASK |
592                          (pte->fonr ? MM_STAT_FONR_MASK : 0));
593                    read_acv++;
594                    return DTB_Acv_Fault;
595                }
596                if (pte->fonr) {
597                    fault(req, MM_STAT_FONR_MASK);
598                    read_acv++;
599                    return DTB_Fault_Fault;
600                }
601            }
602        }
603
604        if (write)
605            write_hits++;
606        else
607            read_hits++;
608    }
609
610    // check that the physical address is ok (catch bad physical addresses)
611    if (req->paddr & ~PAddrImplMask)
612        return Machine_Check_Fault;
613
614    checkCacheability(req);
615
616    return No_Fault;
617}
618
619AlphaISA::PTE &
620AlphaTLB::index(bool advance)
621{
622    AlphaISA::PTE *pte = &table[nlu];
623
624    if (advance)
625        nextnlu();
626
627    return *pte;
628}
629
630DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", AlphaTLB)
631
632BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
633
634    Param<int> size;
635
636END_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
637
638BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaITB)
639
640    INIT_PARAM_DFLT(size, "TLB size", 48)
641
642END_INIT_SIM_OBJECT_PARAMS(AlphaITB)
643
644
645CREATE_SIM_OBJECT(AlphaITB)
646{
647    return new AlphaITB(getInstanceName(), size);
648}
649
650REGISTER_SIM_OBJECT("AlphaITB", AlphaITB)
651
652BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
653
654    Param<int> size;
655
656END_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
657
658BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
659
660    INIT_PARAM_DFLT(size, "TLB size", 64)
661
662END_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
663
664
665CREATE_SIM_OBJECT(AlphaDTB)
666{
667    return new AlphaDTB(getInstanceName(), size);
668}
669
670REGISTER_SIM_OBJECT("AlphaDTB", AlphaDTB)
671
672