tlb.cc revision 8229
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 *          Steve Reinhardt
30 *          Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/faults.hh"
37#include "arch/alpha/pagetable.hh"
38#include "arch/alpha/tlb.hh"
39#include "base/inifile.hh"
40#include "base/str.hh"
41#include "base/trace.hh"
42#include "cpu/thread_context.hh"
43
44using namespace std;
45
46namespace AlphaISA {
47
48///////////////////////////////////////////////////////////////////////
49//
50//  Alpha TLB
51//
52
53#ifdef DEBUG
54bool uncacheBit39 = false;
55bool uncacheBit40 = false;
56#endif
57
58#define MODE2MASK(X) (1 << (X))
59
60TLB::TLB(const Params *p)
61    : BaseTLB(p), size(p->size), nlu(0)
62{
63    table = new TlbEntry[size];
64    memset(table, 0, sizeof(TlbEntry[size]));
65    flushCache();
66}
67
68TLB::~TLB()
69{
70    if (table)
71        delete [] table;
72}
73
74void
75TLB::regStats()
76{
77    fetch_hits
78        .name(name() + ".fetch_hits")
79        .desc("ITB hits");
80    fetch_misses
81        .name(name() + ".fetch_misses")
82        .desc("ITB misses");
83    fetch_acv
84        .name(name() + ".fetch_acv")
85        .desc("ITB acv");
86    fetch_accesses
87        .name(name() + ".fetch_accesses")
88        .desc("ITB accesses");
89
90    fetch_accesses = fetch_hits + fetch_misses;
91
92    read_hits
93        .name(name() + ".read_hits")
94        .desc("DTB read hits")
95        ;
96
97    read_misses
98        .name(name() + ".read_misses")
99        .desc("DTB read misses")
100        ;
101
102    read_acv
103        .name(name() + ".read_acv")
104        .desc("DTB read access violations")
105        ;
106
107    read_accesses
108        .name(name() + ".read_accesses")
109        .desc("DTB read accesses")
110        ;
111
112    write_hits
113        .name(name() + ".write_hits")
114        .desc("DTB write hits")
115        ;
116
117    write_misses
118        .name(name() + ".write_misses")
119        .desc("DTB write misses")
120        ;
121
122    write_acv
123        .name(name() + ".write_acv")
124        .desc("DTB write access violations")
125        ;
126
127    write_accesses
128        .name(name() + ".write_accesses")
129        .desc("DTB write accesses")
130        ;
131
132    data_hits
133        .name(name() + ".data_hits")
134        .desc("DTB hits")
135        ;
136
137    data_misses
138        .name(name() + ".data_misses")
139        .desc("DTB misses")
140        ;
141
142    data_acv
143        .name(name() + ".data_acv")
144        .desc("DTB access violations")
145        ;
146
147    data_accesses
148        .name(name() + ".data_accesses")
149        .desc("DTB accesses")
150        ;
151
152    data_hits = read_hits + write_hits;
153    data_misses = read_misses + write_misses;
154    data_acv = read_acv + write_acv;
155    data_accesses = read_accesses + write_accesses;
156}
157
158// look up an entry in the TLB
159TlbEntry *
160TLB::lookup(Addr vpn, uint8_t asn)
161{
162    // assume not found...
163    TlbEntry *retval = NULL;
164
165    if (EntryCache[0]) {
166        if (vpn == EntryCache[0]->tag &&
167            (EntryCache[0]->asma || EntryCache[0]->asn == asn))
168            retval = EntryCache[0];
169        else if (EntryCache[1]) {
170            if (vpn == EntryCache[1]->tag &&
171                (EntryCache[1]->asma || EntryCache[1]->asn == asn))
172                retval = EntryCache[1];
173            else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
174                     (EntryCache[2]->asma || EntryCache[2]->asn == asn))
175                retval = EntryCache[2];
176        }
177    }
178
179    if (retval == NULL) {
180        PageTable::const_iterator i = lookupTable.find(vpn);
181        if (i != lookupTable.end()) {
182            while (i->first == vpn) {
183                int index = i->second;
184                TlbEntry *entry = &table[index];
185                assert(entry->valid);
186                if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
187                    retval = updateCache(entry);
188                    break;
189                }
190
191                ++i;
192            }
193        }
194    }
195
196    DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
197            retval ? "hit" : "miss", retval ? retval->ppn : 0);
198    return retval;
199}
200
201Fault
202TLB::checkCacheability(RequestPtr &req, bool itb)
203{
204    // in Alpha, cacheability is controlled by upper-level bits of the
205    // physical address
206
207    /*
208     * We support having the uncacheable bit in either bit 39 or bit
209     * 40.  The Turbolaser platform (and EV5) support having the bit
210     * in 39, but Tsunami (which Linux assumes uses an EV6) generates
211     * accesses with the bit in 40.  So we must check for both, but we
212     * have debug flags to catch a weird case where both are used,
213     * which shouldn't happen.
214     */
215
216
217    if (req->getPaddr() & PAddrUncachedBit43) {
218        // IPR memory space not implemented
219        if (PAddrIprSpace(req->getPaddr())) {
220            return new UnimpFault("IPR memory space not implemented!");
221        } else {
222            // mark request as uncacheable
223            req->setFlags(Request::UNCACHEABLE);
224
225            // Clear bits 42:35 of the physical address (10-2 in
226            // Tsunami manual)
227            req->setPaddr(req->getPaddr() & PAddrUncachedMask);
228        }
229        // We shouldn't be able to read from an uncachable address in Alpha as
230        // we don't have a ROM and we don't want to try to fetch from a device
231        // register as we destroy any data that is clear-on-read.
232        if (req->isUncacheable() && itb)
233            return new UnimpFault("CPU trying to fetch from uncached I/O");
234
235    }
236    return NoFault;
237}
238
239
240// insert a new TLB entry
241void
242TLB::insert(Addr addr, TlbEntry &entry)
243{
244    flushCache();
245    VAddr vaddr = addr;
246    if (table[nlu].valid) {
247        Addr oldvpn = table[nlu].tag;
248        PageTable::iterator i = lookupTable.find(oldvpn);
249
250        if (i == lookupTable.end())
251            panic("TLB entry not found in lookupTable");
252
253        int index;
254        while ((index = i->second) != nlu) {
255            if (table[index].tag != oldvpn)
256                panic("TLB entry not found in lookupTable");
257
258            ++i;
259        }
260
261        DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
262
263        lookupTable.erase(i);
264    }
265
266    DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
267
268    table[nlu] = entry;
269    table[nlu].tag = vaddr.vpn();
270    table[nlu].valid = true;
271
272    lookupTable.insert(make_pair(vaddr.vpn(), nlu));
273    nextnlu();
274}
275
276void
277TLB::flushAll()
278{
279    DPRINTF(TLB, "flushAll\n");
280    memset(table, 0, sizeof(TlbEntry[size]));
281    flushCache();
282    lookupTable.clear();
283    nlu = 0;
284}
285
286void
287TLB::flushProcesses()
288{
289    flushCache();
290    PageTable::iterator i = lookupTable.begin();
291    PageTable::iterator end = lookupTable.end();
292    while (i != end) {
293        int index = i->second;
294        TlbEntry *entry = &table[index];
295        assert(entry->valid);
296
297        // we can't increment i after we erase it, so save a copy and
298        // increment it to get the next entry now
299        PageTable::iterator cur = i;
300        ++i;
301
302        if (!entry->asma) {
303            DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
304                    entry->tag, entry->ppn);
305            entry->valid = false;
306            lookupTable.erase(cur);
307        }
308    }
309}
310
311void
312TLB::flushAddr(Addr addr, uint8_t asn)
313{
314    flushCache();
315    VAddr vaddr = addr;
316
317    PageTable::iterator i = lookupTable.find(vaddr.vpn());
318    if (i == lookupTable.end())
319        return;
320
321    while (i != lookupTable.end() && i->first == vaddr.vpn()) {
322        int index = i->second;
323        TlbEntry *entry = &table[index];
324        assert(entry->valid);
325
326        if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
327            DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
328                    entry->ppn);
329
330            // invalidate this entry
331            entry->valid = false;
332
333            lookupTable.erase(i++);
334        } else {
335            ++i;
336        }
337    }
338}
339
340
341void
342TLB::serialize(ostream &os)
343{
344    SERIALIZE_SCALAR(size);
345    SERIALIZE_SCALAR(nlu);
346
347    for (int i = 0; i < size; i++) {
348        nameOut(os, csprintf("%s.Entry%d", name(), i));
349        table[i].serialize(os);
350    }
351}
352
353void
354TLB::unserialize(Checkpoint *cp, const string &section)
355{
356    UNSERIALIZE_SCALAR(size);
357    UNSERIALIZE_SCALAR(nlu);
358
359    for (int i = 0; i < size; i++) {
360        table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
361        if (table[i].valid) {
362            lookupTable.insert(make_pair(table[i].tag, i));
363        }
364    }
365}
366
367Fault
368TLB::translateInst(RequestPtr req, ThreadContext *tc)
369{
370    //If this is a pal pc, then set PHYSICAL
371    if (FULL_SYSTEM && PcPAL(req->getPC()))
372        req->setFlags(Request::PHYSICAL);
373
374    if (PcPAL(req->getPC())) {
375        // strip off PAL PC marker (lsb is 1)
376        req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
377        fetch_hits++;
378        return NoFault;
379    }
380
381    if (req->getFlags() & Request::PHYSICAL) {
382        req->setPaddr(req->getVaddr());
383    } else {
384        // verify that this is a good virtual address
385        if (!validVirtualAddress(req->getVaddr())) {
386            fetch_acv++;
387            return new ItbAcvFault(req->getVaddr());
388        }
389
390
391        // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
392        // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
393        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
394            // only valid in kernel mode
395            if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
396                mode_kernel) {
397                fetch_acv++;
398                return new ItbAcvFault(req->getVaddr());
399            }
400
401            req->setPaddr(req->getVaddr() & PAddrImplMask);
402
403            // sign extend the physical address properly
404            if (req->getPaddr() & PAddrUncachedBit40)
405                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
406            else
407                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
408        } else {
409            // not a physical address: need to look up pte
410            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
411            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
412                              asn);
413
414            if (!entry) {
415                fetch_misses++;
416                return new ItbPageFault(req->getVaddr());
417            }
418
419            req->setPaddr((entry->ppn << PageShift) +
420                          (VAddr(req->getVaddr()).offset()
421                           & ~3));
422
423            // check permissions for this access
424            if (!(entry->xre &
425                  (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
426                // instruction access fault
427                fetch_acv++;
428                return new ItbAcvFault(req->getVaddr());
429            }
430
431            fetch_hits++;
432        }
433    }
434
435    // check that the physical address is ok (catch bad physical addresses)
436    if (req->getPaddr() & ~PAddrImplMask)
437        return genMachineCheckFault();
438
439    return checkCacheability(req, true);
440
441}
442
443Fault
444TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
445{
446    mode_type mode =
447        (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
448
449    /**
450     * Check for alignment faults
451     */
452    if (req->getVaddr() & (req->getSize() - 1)) {
453        DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
454                req->getSize());
455        uint64_t flags = write ? MM_STAT_WR_MASK : 0;
456        return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
457    }
458
459    if (PcPAL(tc->pcState().pc())) {
460        mode = (req->getFlags() & Request::ALTMODE) ?
461            (mode_type)ALT_MODE_AM(
462                tc->readMiscRegNoEffect(IPR_ALT_MODE))
463            : mode_kernel;
464    }
465
466    if (req->getFlags() & Request::PHYSICAL) {
467        req->setPaddr(req->getVaddr());
468    } else {
469        // verify that this is a good virtual address
470        if (!validVirtualAddress(req->getVaddr())) {
471            if (write) { write_acv++; } else { read_acv++; }
472            uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
473                MM_STAT_BAD_VA_MASK |
474                MM_STAT_ACV_MASK;
475            return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
476        }
477
478        // Check for "superpage" mapping
479        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
480            // only valid in kernel mode
481            if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
482                mode_kernel) {
483                if (write) { write_acv++; } else { read_acv++; }
484                uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
485                                  MM_STAT_ACV_MASK);
486
487                return new DtbAcvFault(req->getVaddr(), req->getFlags(),
488                                       flags);
489            }
490
491            req->setPaddr(req->getVaddr() & PAddrImplMask);
492
493            // sign extend the physical address properly
494            if (req->getPaddr() & PAddrUncachedBit40)
495                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
496            else
497                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
498        } else {
499            if (write)
500                write_accesses++;
501            else
502                read_accesses++;
503
504            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
505
506            // not a physical address: need to look up pte
507            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
508
509            if (!entry) {
510                // page fault
511                if (write) { write_misses++; } else { read_misses++; }
512                uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
513                    MM_STAT_DTB_MISS_MASK;
514                return (req->getFlags() & Request::VPTE) ?
515                    (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
516                                              flags)) :
517                    (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
518                                              flags));
519            }
520
521            req->setPaddr((entry->ppn << PageShift) +
522                          VAddr(req->getVaddr()).offset());
523
524            if (write) {
525                if (!(entry->xwe & MODE2MASK(mode))) {
526                    // declare the instruction access fault
527                    write_acv++;
528                    uint64_t flags = MM_STAT_WR_MASK |
529                        MM_STAT_ACV_MASK |
530                        (entry->fonw ? MM_STAT_FONW_MASK : 0);
531                    return new DtbPageFault(req->getVaddr(), req->getFlags(),
532                                            flags);
533                }
534                if (entry->fonw) {
535                    write_acv++;
536                    uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
537                    return new DtbPageFault(req->getVaddr(), req->getFlags(),
538                                            flags);
539                }
540            } else {
541                if (!(entry->xre & MODE2MASK(mode))) {
542                    read_acv++;
543                    uint64_t flags = MM_STAT_ACV_MASK |
544                        (entry->fonr ? MM_STAT_FONR_MASK : 0);
545                    return new DtbAcvFault(req->getVaddr(), req->getFlags(),
546                                           flags);
547                }
548                if (entry->fonr) {
549                    read_acv++;
550                    uint64_t flags = MM_STAT_FONR_MASK;
551                    return new DtbPageFault(req->getVaddr(), req->getFlags(),
552                                            flags);
553                }
554            }
555        }
556
557        if (write)
558            write_hits++;
559        else
560            read_hits++;
561    }
562
563    // check that the physical address is ok (catch bad physical addresses)
564    if (req->getPaddr() & ~PAddrImplMask)
565        return genMachineCheckFault();
566
567    return checkCacheability(req);
568}
569
570TlbEntry &
571TLB::index(bool advance)
572{
573    TlbEntry *entry = &table[nlu];
574
575    if (advance)
576        nextnlu();
577
578    return *entry;
579}
580
581Fault
582TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
583{
584    if (mode == Execute)
585        return translateInst(req, tc);
586    else
587        return translateData(req, tc, mode == Write);
588}
589
590void
591TLB::translateTiming(RequestPtr req, ThreadContext *tc,
592        Translation *translation, Mode mode)
593{
594    assert(translation);
595    translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
596}
597
598} // namespace AlphaISA
599
600AlphaISA::TLB *
601AlphaTLBParams::create()
602{
603    return new AlphaISA::TLB(this);
604}
605