tlb.cc revision 1762
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sstream>
30#include <string>
31#include <vector>
32
33#include "arch/alpha/alpha_memory.hh"
34#include "base/inifile.hh"
35#include "base/str.hh"
36#include "base/trace.hh"
37#include "cpu/exec_context.hh"
38#include "sim/builder.hh"
39
40using namespace std;
41using namespace EV5;
42
43///////////////////////////////////////////////////////////////////////
44//
45//  Alpha TLB
46//
47#ifdef DEBUG
48bool uncacheBit39 = false;
49bool uncacheBit40 = false;
50#endif
51
52#define MODE2MASK(X)			(1 << (X))
53
54AlphaTLB::AlphaTLB(const string &name, int s)
55    : SimObject(name), size(s), nlu(0)
56{
57    table = new AlphaISA::PTE[size];
58    memset(table, 0, sizeof(AlphaISA::PTE[size]));
59}
60
61AlphaTLB::~AlphaTLB()
62{
63    if (table)
64        delete [] table;
65}
66
67// look up an entry in the TLB
68AlphaISA::PTE *
69AlphaTLB::lookup(Addr vpn, uint8_t asn) const
70{
71    // assume not found...
72    AlphaISA::PTE *retval = NULL;
73
74    PageTable::const_iterator i = lookupTable.find(vpn);
75    if (i != lookupTable.end()) {
76        while (i->first == vpn) {
77            int index = i->second;
78            AlphaISA::PTE *pte = &table[index];
79            assert(pte->valid);
80            if (vpn == pte->tag && (pte->asma || pte->asn == asn)) {
81                retval = pte;
82                break;
83            }
84
85            ++i;
86        }
87    }
88
89    DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
90            retval ? "hit" : "miss", retval ? retval->ppn : 0);
91    return retval;
92}
93
94
95void
96AlphaTLB::checkCacheability(MemReqPtr &req)
97{
98    // in Alpha, cacheability is controlled by upper-level bits of the
99    // physical address
100
101    /*
102     * We support having the uncacheable bit in either bit 39 or bit 40.
103     * The Turbolaser platform (and EV5) support having the bit in 39, but
104     * Tsunami (which Linux assumes uses an EV6) generates accesses with
105     * the bit in 40.  So we must check for both, but we have debug flags
106     * to catch a weird case where both are used, which shouldn't happen.
107     */
108
109
110#ifdef ALPHA_TLASER
111    if (req->paddr & PAddrUncachedBit39) {
112#else
113    if (req->paddr & PAddrUncachedBit43) {
114#endif
115        // IPR memory space not implemented
116        if (PAddrIprSpace(req->paddr)) {
117            if (!req->xc->misspeculating()) {
118                switch (req->paddr) {
119                  case ULL(0xFFFFF00188):
120                    req->data = 0;
121                    break;
122
123                  default:
124                    panic("IPR memory space not implemented! PA=%x\n",
125                          req->paddr);
126                }
127            }
128        } else {
129            // mark request as uncacheable
130            req->flags |= UNCACHEABLE;
131
132#ifndef ALPHA_TLASER
133            // Clear bits 42:35 of the physical address (10-2 in Tsunami manual)
134            req->paddr &= PAddrUncachedMask;
135#endif
136        }
137    }
138}
139
140
141// insert a new TLB entry
142void
143AlphaTLB::insert(Addr addr, AlphaISA::PTE &pte)
144{
145    AlphaISA::VAddr vaddr = addr;
146    if (table[nlu].valid) {
147        Addr oldvpn = table[nlu].tag;
148        PageTable::iterator i = lookupTable.find(oldvpn);
149
150        if (i == lookupTable.end())
151            panic("TLB entry not found in lookupTable");
152
153        int index;
154        while ((index = i->second) != nlu) {
155            if (table[index].tag != oldvpn)
156                panic("TLB entry not found in lookupTable");
157
158            ++i;
159        }
160
161        DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
162
163        lookupTable.erase(i);
164    }
165
166    DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), pte.ppn);
167
168    table[nlu] = pte;
169    table[nlu].tag = vaddr.vpn();
170    table[nlu].valid = true;
171
172    lookupTable.insert(make_pair(vaddr.vpn(), nlu));
173    nextnlu();
174}
175
176void
177AlphaTLB::flushAll()
178{
179    memset(table, 0, sizeof(AlphaISA::PTE[size]));
180    lookupTable.clear();
181    nlu = 0;
182}
183
184void
185AlphaTLB::flushProcesses()
186{
187    PageTable::iterator i = lookupTable.begin();
188    PageTable::iterator end = lookupTable.end();
189    while (i != end) {
190        int index = i->second;
191        AlphaISA::PTE *pte = &table[index];
192        assert(pte->valid);
193
194        if (!pte->asma) {
195            DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index, pte->tag, pte->ppn);
196            pte->valid = false;
197            lookupTable.erase(i);
198        }
199
200        ++i;
201    }
202}
203
204void
205AlphaTLB::flushAddr(Addr addr, uint8_t asn)
206{
207    AlphaISA::VAddr vaddr = addr;
208
209    PageTable::iterator i = lookupTable.find(vaddr.vpn());
210    if (i == lookupTable.end())
211        return;
212
213    while (i->first == vaddr.vpn()) {
214        int index = i->second;
215        AlphaISA::PTE *pte = &table[index];
216        assert(pte->valid);
217
218        if (vaddr.vpn() == pte->tag && (pte->asma || pte->asn == asn)) {
219            DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
220                    pte->ppn);
221
222            // invalidate this entry
223            pte->valid = false;
224
225            lookupTable.erase(i);
226        }
227
228        ++i;
229    }
230}
231
232
233void
234AlphaTLB::serialize(ostream &os)
235{
236    SERIALIZE_SCALAR(size);
237    SERIALIZE_SCALAR(nlu);
238
239    for (int i = 0; i < size; i++) {
240        nameOut(os, csprintf("%s.PTE%d", name(), i));
241        table[i].serialize(os);
242    }
243}
244
245void
246AlphaTLB::unserialize(Checkpoint *cp, const string &section)
247{
248    UNSERIALIZE_SCALAR(size);
249    UNSERIALIZE_SCALAR(nlu);
250
251    for (int i = 0; i < size; i++) {
252        table[i].unserialize(cp, csprintf("%s.PTE%d", section, i));
253        if (table[i].valid) {
254            lookupTable.insert(make_pair(table[i].tag, i));
255        }
256    }
257}
258
259
260///////////////////////////////////////////////////////////////////////
261//
262//  Alpha ITB
263//
264AlphaITB::AlphaITB(const std::string &name, int size)
265    : AlphaTLB(name, size)
266{}
267
268
269void
270AlphaITB::regStats()
271{
272    hits
273        .name(name() + ".hits")
274        .desc("ITB hits");
275    misses
276        .name(name() + ".misses")
277        .desc("ITB misses");
278    acv
279        .name(name() + ".acv")
280        .desc("ITB acv");
281    accesses
282        .name(name() + ".accesses")
283        .desc("ITB accesses");
284
285    accesses = hits + misses;
286}
287
288void
289AlphaITB::fault(Addr pc, ExecContext *xc) const
290{
291    uint64_t *ipr = xc->regs.ipr;
292
293    if (!xc->misspeculating()) {
294        ipr[AlphaISA::IPR_ITB_TAG] = pc;
295        ipr[AlphaISA::IPR_IFAULT_VA_FORM] =
296            ipr[AlphaISA::IPR_IVPTBR] | (AlphaISA::VAddr(pc).vpn() << 3);
297    }
298}
299
300
301Fault
302AlphaITB::translate(MemReqPtr &req) const
303{
304    InternalProcReg *ipr = req->xc->regs.ipr;
305
306    if (AlphaISA::PcPAL(req->vaddr)) {
307        // strip off PAL PC marker (lsb is 1)
308        req->paddr = (req->vaddr & ~3) & PAddrImplMask;
309        hits++;
310        return No_Fault;
311    }
312
313    if (req->flags & PHYSICAL) {
314        req->paddr = req->vaddr;
315    } else {
316        // verify that this is a good virtual address
317        if (!validVirtualAddress(req->vaddr)) {
318            fault(req->vaddr, req->xc);
319            acv++;
320            return ITB_Acv_Fault;
321        }
322
323
324        // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
325        // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
326#ifdef ALPHA_TLASER
327        if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
328            VAddrSpaceEV5(req->vaddr) == 2) {
329#else
330        if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
331#endif
332            // only valid in kernel mode
333            if (ICM_CM(ipr[AlphaISA::IPR_ICM]) !=
334                AlphaISA::mode_kernel) {
335                fault(req->vaddr, req->xc);
336                acv++;
337                return ITB_Acv_Fault;
338            }
339
340            req->paddr = req->vaddr & PAddrImplMask;
341
342#ifndef ALPHA_TLASER
343            // sign extend the physical address properly
344            if (req->paddr & PAddrUncachedBit40)
345                req->paddr |= ULL(0xf0000000000);
346            else
347                req->paddr &= ULL(0xffffffffff);
348#endif
349
350        } else {
351            // not a physical address: need to look up pte
352            AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
353                                        DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
354
355            if (!pte) {
356                fault(req->vaddr, req->xc);
357                misses++;
358                return ITB_Fault_Fault;
359            }
360
361            req->paddr = (pte->ppn << AlphaISA::PageShift) +
362                (AlphaISA::VAddr(req->vaddr).offset() & ~3);
363
364            // check permissions for this access
365            if (!(pte->xre & (1 << ICM_CM(ipr[AlphaISA::IPR_ICM])))) {
366                // instruction access fault
367                fault(req->vaddr, req->xc);
368                acv++;
369                return ITB_Acv_Fault;
370            }
371
372            hits++;
373        }
374    }
375
376    // check that the physical address is ok (catch bad physical addresses)
377    if (req->paddr & ~PAddrImplMask)
378        return Machine_Check_Fault;
379
380    checkCacheability(req);
381
382    return No_Fault;
383}
384
385///////////////////////////////////////////////////////////////////////
386//
387//  Alpha DTB
388//
389AlphaDTB::AlphaDTB(const std::string &name, int size)
390    : AlphaTLB(name, size)
391{}
392
393void
394AlphaDTB::regStats()
395{
396    read_hits
397        .name(name() + ".read_hits")
398        .desc("DTB read hits")
399        ;
400
401    read_misses
402        .name(name() + ".read_misses")
403        .desc("DTB read misses")
404        ;
405
406    read_acv
407        .name(name() + ".read_acv")
408        .desc("DTB read access violations")
409        ;
410
411    read_accesses
412        .name(name() + ".read_accesses")
413        .desc("DTB read accesses")
414        ;
415
416    write_hits
417        .name(name() + ".write_hits")
418        .desc("DTB write hits")
419        ;
420
421    write_misses
422        .name(name() + ".write_misses")
423        .desc("DTB write misses")
424        ;
425
426    write_acv
427        .name(name() + ".write_acv")
428        .desc("DTB write access violations")
429        ;
430
431    write_accesses
432        .name(name() + ".write_accesses")
433        .desc("DTB write accesses")
434        ;
435
436    hits
437        .name(name() + ".hits")
438        .desc("DTB hits")
439        ;
440
441    misses
442        .name(name() + ".misses")
443        .desc("DTB misses")
444        ;
445
446    acv
447        .name(name() + ".acv")
448        .desc("DTB access violations")
449        ;
450
451    accesses
452        .name(name() + ".accesses")
453        .desc("DTB accesses")
454        ;
455
456    hits = read_hits + write_hits;
457    misses = read_misses + write_misses;
458    acv = read_acv + write_acv;
459    accesses = read_accesses + write_accesses;
460}
461
462void
463AlphaDTB::fault(MemReqPtr &req, uint64_t flags) const
464{
465    ExecContext *xc = req->xc;
466    AlphaISA::VAddr vaddr = req->vaddr;
467    uint64_t *ipr = xc->regs.ipr;
468
469    // Set fault address and flags.  Even though we're modeling an
470    // EV5, we use the EV6 technique of not latching fault registers
471    // on VPTE loads (instead of locking the registers until IPR_VA is
472    // read, like the EV5).  The EV6 approach is cleaner and seems to
473    // work with EV5 PAL code, but not the other way around.
474    if (!xc->misspeculating()
475        && !(req->flags & VPTE) && !(req->flags & NO_FAULT)) {
476        // set VA register with faulting address
477        ipr[AlphaISA::IPR_VA] = req->vaddr;
478
479        // set MM_STAT register flags
480        ipr[AlphaISA::IPR_MM_STAT] =
481            (((Opcode(xc->getInst()) & 0x3f) << 11)
482             | ((Ra(xc->getInst()) & 0x1f) << 6)
483             | (flags & 0x3f));
484
485        // set VA_FORM register with faulting formatted address
486        ipr[AlphaISA::IPR_VA_FORM] =
487            ipr[AlphaISA::IPR_MVPTBR] | (vaddr.vpn() << 3);
488    }
489}
490
491Fault
492AlphaDTB::translate(MemReqPtr &req, bool write) const
493{
494    RegFile *regs = &req->xc->regs;
495    Addr pc = regs->pc;
496    InternalProcReg *ipr = regs->ipr;
497
498    AlphaISA::mode_type mode =
499        (AlphaISA::mode_type)DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]);
500
501
502    /**
503     * Check for alignment faults
504     */
505    if (req->vaddr & (req->size - 1)) {
506        fault(req, write ? MM_STAT_WR_MASK : 0);
507        DPRINTF(TLB, "Alignment Fault on %#x, size = %d", req->vaddr,
508                req->size);
509        return Alignment_Fault;
510    }
511
512    if (pc & 0x1) {
513        mode = (req->flags & ALTMODE) ?
514            (AlphaISA::mode_type)ALT_MODE_AM(ipr[AlphaISA::IPR_ALT_MODE])
515            : AlphaISA::mode_kernel;
516    }
517
518    if (req->flags & PHYSICAL) {
519        req->paddr = req->vaddr;
520    } else {
521        // verify that this is a good virtual address
522        if (!validVirtualAddress(req->vaddr)) {
523            fault(req, (write ? MM_STAT_WR_MASK : 0) |
524                  MM_STAT_BAD_VA_MASK |
525                  MM_STAT_ACV_MASK);
526
527            if (write) { write_acv++; } else { read_acv++; }
528            return DTB_Fault_Fault;
529        }
530
531        // Check for "superpage" mapping
532#ifdef ALPHA_TLASER
533        if ((MCSR_SP(ipr[AlphaISA::IPR_MCSR]) & 2) &&
534            VAddrSpaceEV5(req->vaddr) == 2) {
535#else
536        if (VAddrSpaceEV6(req->vaddr) == 0x7e) {
537#endif
538
539            // only valid in kernel mode
540            if (DTB_CM_CM(ipr[AlphaISA::IPR_DTB_CM]) !=
541                AlphaISA::mode_kernel) {
542                fault(req, ((write ? MM_STAT_WR_MASK : 0) |
543                            MM_STAT_ACV_MASK));
544                if (write) { write_acv++; } else { read_acv++; }
545                return DTB_Acv_Fault;
546            }
547
548            req->paddr = req->vaddr & PAddrImplMask;
549
550#ifndef ALPHA_TLASER
551            // sign extend the physical address properly
552            if (req->paddr & PAddrUncachedBit40)
553                req->paddr |= ULL(0xf0000000000);
554            else
555                req->paddr &= ULL(0xffffffffff);
556#endif
557
558        } else {
559            if (write)
560                write_accesses++;
561            else
562                read_accesses++;
563
564            // not a physical address: need to look up pte
565            AlphaISA::PTE *pte = lookup(AlphaISA::VAddr(req->vaddr).vpn(),
566                                        DTB_ASN_ASN(ipr[AlphaISA::IPR_DTB_ASN]));
567
568            if (!pte) {
569                // page fault
570                fault(req, (write ? MM_STAT_WR_MASK : 0) |
571                      MM_STAT_DTB_MISS_MASK);
572                if (write) { write_misses++; } else { read_misses++; }
573                return (req->flags & VPTE) ? Pdtb_Miss_Fault : Ndtb_Miss_Fault;
574            }
575
576            req->paddr = (pte->ppn << AlphaISA::PageShift) +
577                AlphaISA::VAddr(req->vaddr).offset();
578
579            if (write) {
580                if (!(pte->xwe & MODE2MASK(mode))) {
581                    // declare the instruction access fault
582                    fault(req, MM_STAT_WR_MASK |
583                          MM_STAT_ACV_MASK |
584                          (pte->fonw ? MM_STAT_FONW_MASK : 0));
585                    write_acv++;
586                    return DTB_Fault_Fault;
587                }
588                if (pte->fonw) {
589                    fault(req, MM_STAT_WR_MASK |
590                          MM_STAT_FONW_MASK);
591                    write_acv++;
592                    return DTB_Fault_Fault;
593                }
594            } else {
595                if (!(pte->xre & MODE2MASK(mode))) {
596                    fault(req, MM_STAT_ACV_MASK |
597                          (pte->fonr ? MM_STAT_FONR_MASK : 0));
598                    read_acv++;
599                    return DTB_Acv_Fault;
600                }
601                if (pte->fonr) {
602                    fault(req, MM_STAT_FONR_MASK);
603                    read_acv++;
604                    return DTB_Fault_Fault;
605                }
606            }
607        }
608
609        if (write)
610            write_hits++;
611        else
612            read_hits++;
613    }
614
615    // check that the physical address is ok (catch bad physical addresses)
616    if (req->paddr & ~PAddrImplMask)
617        return Machine_Check_Fault;
618
619    checkCacheability(req);
620
621    return No_Fault;
622}
623
624AlphaISA::PTE &
625AlphaTLB::index(bool advance)
626{
627    AlphaISA::PTE *pte = &table[nlu];
628
629    if (advance)
630        nextnlu();
631
632    return *pte;
633}
634
635DEFINE_SIM_OBJECT_CLASS_NAME("AlphaTLB", AlphaTLB)
636
637BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
638
639    Param<int> size;
640
641END_DECLARE_SIM_OBJECT_PARAMS(AlphaITB)
642
643BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaITB)
644
645    INIT_PARAM_DFLT(size, "TLB size", 48)
646
647END_INIT_SIM_OBJECT_PARAMS(AlphaITB)
648
649
650CREATE_SIM_OBJECT(AlphaITB)
651{
652    return new AlphaITB(getInstanceName(), size);
653}
654
655REGISTER_SIM_OBJECT("AlphaITB", AlphaITB)
656
657BEGIN_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
658
659    Param<int> size;
660
661END_DECLARE_SIM_OBJECT_PARAMS(AlphaDTB)
662
663BEGIN_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
664
665    INIT_PARAM_DFLT(size, "TLB size", 64)
666
667END_INIT_SIM_OBJECT_PARAMS(AlphaDTB)
668
669
670CREATE_SIM_OBJECT(AlphaDTB)
671{
672    return new AlphaDTB(getInstanceName(), size);
673}
674
675REGISTER_SIM_OBJECT("AlphaDTB", AlphaDTB)
676
677