tlb.cc revision 10474:799c8ee4ecba
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 *          Steve Reinhardt
30 *          Andrew Schultz
31 */
32
33#include <memory>
34#include <string>
35#include <vector>
36
37#include "arch/alpha/faults.hh"
38#include "arch/alpha/pagetable.hh"
39#include "arch/alpha/tlb.hh"
40#include "arch/generic/debugfaults.hh"
41#include "base/inifile.hh"
42#include "base/str.hh"
43#include "base/trace.hh"
44#include "cpu/thread_context.hh"
45#include "debug/TLB.hh"
46#include "sim/full_system.hh"
47
48using namespace std;
49
50namespace AlphaISA {
51
52///////////////////////////////////////////////////////////////////////
53//
54//  Alpha TLB
55//
56
57#ifdef DEBUG
58bool uncacheBit39 = false;
59bool uncacheBit40 = false;
60#endif
61
62#define MODE2MASK(X) (1 << (X))
63
64TLB::TLB(const Params *p)
65    : BaseTLB(p), size(p->size), nlu(0)
66{
67    table = new TlbEntry[size];
68    memset(table, 0, sizeof(TlbEntry) * size);
69    flushCache();
70}
71
72TLB::~TLB()
73{
74    if (table)
75        delete [] table;
76}
77
78void
79TLB::regStats()
80{
81    fetch_hits
82        .name(name() + ".fetch_hits")
83        .desc("ITB hits");
84    fetch_misses
85        .name(name() + ".fetch_misses")
86        .desc("ITB misses");
87    fetch_acv
88        .name(name() + ".fetch_acv")
89        .desc("ITB acv");
90    fetch_accesses
91        .name(name() + ".fetch_accesses")
92        .desc("ITB accesses");
93
94    fetch_accesses = fetch_hits + fetch_misses;
95
96    read_hits
97        .name(name() + ".read_hits")
98        .desc("DTB read hits")
99        ;
100
101    read_misses
102        .name(name() + ".read_misses")
103        .desc("DTB read misses")
104        ;
105
106    read_acv
107        .name(name() + ".read_acv")
108        .desc("DTB read access violations")
109        ;
110
111    read_accesses
112        .name(name() + ".read_accesses")
113        .desc("DTB read accesses")
114        ;
115
116    write_hits
117        .name(name() + ".write_hits")
118        .desc("DTB write hits")
119        ;
120
121    write_misses
122        .name(name() + ".write_misses")
123        .desc("DTB write misses")
124        ;
125
126    write_acv
127        .name(name() + ".write_acv")
128        .desc("DTB write access violations")
129        ;
130
131    write_accesses
132        .name(name() + ".write_accesses")
133        .desc("DTB write accesses")
134        ;
135
136    data_hits
137        .name(name() + ".data_hits")
138        .desc("DTB hits")
139        ;
140
141    data_misses
142        .name(name() + ".data_misses")
143        .desc("DTB misses")
144        ;
145
146    data_acv
147        .name(name() + ".data_acv")
148        .desc("DTB access violations")
149        ;
150
151    data_accesses
152        .name(name() + ".data_accesses")
153        .desc("DTB accesses")
154        ;
155
156    data_hits = read_hits + write_hits;
157    data_misses = read_misses + write_misses;
158    data_acv = read_acv + write_acv;
159    data_accesses = read_accesses + write_accesses;
160}
161
162// look up an entry in the TLB
163TlbEntry *
164TLB::lookup(Addr vpn, uint8_t asn)
165{
166    // assume not found...
167    TlbEntry *retval = NULL;
168
169    if (EntryCache[0]) {
170        if (vpn == EntryCache[0]->tag &&
171            (EntryCache[0]->asma || EntryCache[0]->asn == asn))
172            retval = EntryCache[0];
173        else if (EntryCache[1]) {
174            if (vpn == EntryCache[1]->tag &&
175                (EntryCache[1]->asma || EntryCache[1]->asn == asn))
176                retval = EntryCache[1];
177            else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
178                     (EntryCache[2]->asma || EntryCache[2]->asn == asn))
179                retval = EntryCache[2];
180        }
181    }
182
183    if (retval == NULL) {
184        PageTable::const_iterator i = lookupTable.find(vpn);
185        if (i != lookupTable.end()) {
186            while (i->first == vpn) {
187                int index = i->second;
188                TlbEntry *entry = &table[index];
189                assert(entry->valid);
190                if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
191                    retval = updateCache(entry);
192                    break;
193                }
194
195                ++i;
196            }
197        }
198    }
199
200    DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
201            retval ? "hit" : "miss", retval ? retval->ppn : 0);
202    return retval;
203}
204
205Fault
206TLB::checkCacheability(RequestPtr &req, bool itb)
207{
208    // in Alpha, cacheability is controlled by upper-level bits of the
209    // physical address
210
211    /*
212     * We support having the uncacheable bit in either bit 39 or bit
213     * 40.  The Turbolaser platform (and EV5) support having the bit
214     * in 39, but Tsunami (which Linux assumes uses an EV6) generates
215     * accesses with the bit in 40.  So we must check for both, but we
216     * have debug flags to catch a weird case where both are used,
217     * which shouldn't happen.
218     */
219
220
221    if (req->getPaddr() & PAddrUncachedBit43) {
222        // IPR memory space not implemented
223        if (PAddrIprSpace(req->getPaddr())) {
224            return std::make_shared<UnimpFault>(
225                "IPR memory space not implemented!");
226        } else {
227            // mark request as uncacheable
228            req->setFlags(Request::UNCACHEABLE);
229
230            // Clear bits 42:35 of the physical address (10-2 in
231            // Tsunami manual)
232            req->setPaddr(req->getPaddr() & PAddrUncachedMask);
233        }
234        // We shouldn't be able to read from an uncachable address in Alpha as
235        // we don't have a ROM and we don't want to try to fetch from a device
236        // register as we destroy any data that is clear-on-read.
237        if (req->isUncacheable() && itb)
238            return std::make_shared<UnimpFault>(
239                "CPU trying to fetch from uncached I/O");
240
241    }
242    return NoFault;
243}
244
245
246// insert a new TLB entry
247void
248TLB::insert(Addr addr, TlbEntry &entry)
249{
250    flushCache();
251    VAddr vaddr = addr;
252    if (table[nlu].valid) {
253        Addr oldvpn = table[nlu].tag;
254        PageTable::iterator i = lookupTable.find(oldvpn);
255
256        if (i == lookupTable.end())
257            panic("TLB entry not found in lookupTable");
258
259        int index;
260        while ((index = i->second) != nlu) {
261            if (table[index].tag != oldvpn)
262                panic("TLB entry not found in lookupTable");
263
264            ++i;
265        }
266
267        DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
268
269        lookupTable.erase(i);
270    }
271
272    DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
273
274    table[nlu] = entry;
275    table[nlu].tag = vaddr.vpn();
276    table[nlu].valid = true;
277
278    lookupTable.insert(make_pair(vaddr.vpn(), nlu));
279    nextnlu();
280}
281
282void
283TLB::flushAll()
284{
285    DPRINTF(TLB, "flushAll\n");
286    memset(table, 0, sizeof(TlbEntry) * size);
287    flushCache();
288    lookupTable.clear();
289    nlu = 0;
290}
291
292void
293TLB::flushProcesses()
294{
295    flushCache();
296    PageTable::iterator i = lookupTable.begin();
297    PageTable::iterator end = lookupTable.end();
298    while (i != end) {
299        int index = i->second;
300        TlbEntry *entry = &table[index];
301        assert(entry->valid);
302
303        // we can't increment i after we erase it, so save a copy and
304        // increment it to get the next entry now
305        PageTable::iterator cur = i;
306        ++i;
307
308        if (!entry->asma) {
309            DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
310                    entry->tag, entry->ppn);
311            entry->valid = false;
312            lookupTable.erase(cur);
313        }
314    }
315}
316
317void
318TLB::flushAddr(Addr addr, uint8_t asn)
319{
320    flushCache();
321    VAddr vaddr = addr;
322
323    PageTable::iterator i = lookupTable.find(vaddr.vpn());
324    if (i == lookupTable.end())
325        return;
326
327    while (i != lookupTable.end() && i->first == vaddr.vpn()) {
328        int index = i->second;
329        TlbEntry *entry = &table[index];
330        assert(entry->valid);
331
332        if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
333            DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
334                    entry->ppn);
335
336            // invalidate this entry
337            entry->valid = false;
338
339            lookupTable.erase(i++);
340        } else {
341            ++i;
342        }
343    }
344}
345
346
347void
348TLB::serialize(ostream &os)
349{
350    SERIALIZE_SCALAR(size);
351    SERIALIZE_SCALAR(nlu);
352
353    for (int i = 0; i < size; i++) {
354        nameOut(os, csprintf("%s.Entry%d", name(), i));
355        table[i].serialize(os);
356    }
357}
358
359void
360TLB::unserialize(Checkpoint *cp, const string &section)
361{
362    UNSERIALIZE_SCALAR(size);
363    UNSERIALIZE_SCALAR(nlu);
364
365    for (int i = 0; i < size; i++) {
366        table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
367        if (table[i].valid) {
368            lookupTable.insert(make_pair(table[i].tag, i));
369        }
370    }
371}
372
373Fault
374TLB::translateInst(RequestPtr req, ThreadContext *tc)
375{
376    //If this is a pal pc, then set PHYSICAL
377    if (FullSystem && PcPAL(req->getPC()))
378        req->setFlags(Request::PHYSICAL);
379
380    if (PcPAL(req->getPC())) {
381        // strip off PAL PC marker (lsb is 1)
382        req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
383        fetch_hits++;
384        return NoFault;
385    }
386
387    if (req->getFlags() & Request::PHYSICAL) {
388        req->setPaddr(req->getVaddr());
389    } else {
390        // verify that this is a good virtual address
391        if (!validVirtualAddress(req->getVaddr())) {
392            fetch_acv++;
393            return std::make_shared<ItbAcvFault>(req->getVaddr());
394        }
395
396
397        // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
398        // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
399        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
400            // only valid in kernel mode
401            if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
402                mode_kernel) {
403                fetch_acv++;
404                return std::make_shared<ItbAcvFault>(req->getVaddr());
405            }
406
407            req->setPaddr(req->getVaddr() & PAddrImplMask);
408
409            // sign extend the physical address properly
410            if (req->getPaddr() & PAddrUncachedBit40)
411                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
412            else
413                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
414        } else {
415            // not a physical address: need to look up pte
416            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
417            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
418                              asn);
419
420            if (!entry) {
421                fetch_misses++;
422                return std::make_shared<ItbPageFault>(req->getVaddr());
423            }
424
425            req->setPaddr((entry->ppn << PageShift) +
426                          (VAddr(req->getVaddr()).offset()
427                           & ~3));
428
429            // check permissions for this access
430            if (!(entry->xre &
431                  (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
432                // instruction access fault
433                fetch_acv++;
434                return std::make_shared<ItbAcvFault>(req->getVaddr());
435            }
436
437            fetch_hits++;
438        }
439    }
440
441    // check that the physical address is ok (catch bad physical addresses)
442    if (req->getPaddr() & ~PAddrImplMask) {
443        return std::make_shared<MachineCheckFault>();
444    }
445
446    return checkCacheability(req, true);
447
448}
449
450Fault
451TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
452{
453    mode_type mode =
454        (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
455
456    /**
457     * Check for alignment faults
458     */
459    if (req->getVaddr() & (req->getSize() - 1)) {
460        DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
461                req->getSize());
462        uint64_t flags = write ? MM_STAT_WR_MASK : 0;
463        return std::make_shared<DtbAlignmentFault>(req->getVaddr(),
464                                                   req->getFlags(),
465                                                   flags);
466    }
467
468    if (PcPAL(req->getPC())) {
469        mode = (req->getFlags() & Request::ALTMODE) ?
470            (mode_type)ALT_MODE_AM(
471                tc->readMiscRegNoEffect(IPR_ALT_MODE))
472            : mode_kernel;
473    }
474
475    if (req->getFlags() & Request::PHYSICAL) {
476        req->setPaddr(req->getVaddr());
477    } else {
478        // verify that this is a good virtual address
479        if (!validVirtualAddress(req->getVaddr())) {
480            if (write) { write_acv++; } else { read_acv++; }
481            uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
482                MM_STAT_BAD_VA_MASK |
483                MM_STAT_ACV_MASK;
484            return std::make_shared<DtbPageFault>(req->getVaddr(),
485                                                  req->getFlags(),
486                                                  flags);
487        }
488
489        // Check for "superpage" mapping
490        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
491            // only valid in kernel mode
492            if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
493                mode_kernel) {
494                if (write) { write_acv++; } else { read_acv++; }
495                uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
496                                  MM_STAT_ACV_MASK);
497
498                return std::make_shared<DtbAcvFault>(req->getVaddr(),
499                                                     req->getFlags(),
500                                                     flags);
501            }
502
503            req->setPaddr(req->getVaddr() & PAddrImplMask);
504
505            // sign extend the physical address properly
506            if (req->getPaddr() & PAddrUncachedBit40)
507                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
508            else
509                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
510        } else {
511            if (write)
512                write_accesses++;
513            else
514                read_accesses++;
515
516            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
517
518            // not a physical address: need to look up pte
519            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
520
521            if (!entry) {
522                // page fault
523                if (write) { write_misses++; } else { read_misses++; }
524                uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
525                    MM_STAT_DTB_MISS_MASK;
526                return (req->getFlags() & Request::VPTE) ?
527                    (Fault)(std::make_shared<PDtbMissFault>(req->getVaddr(),
528                                                            req->getFlags(),
529                                                            flags)) :
530                    (Fault)(std::make_shared<NDtbMissFault>(req->getVaddr(),
531                                                            req->getFlags(),
532                                                            flags));
533            }
534
535            req->setPaddr((entry->ppn << PageShift) +
536                          VAddr(req->getVaddr()).offset());
537
538            if (write) {
539                if (!(entry->xwe & MODE2MASK(mode))) {
540                    // declare the instruction access fault
541                    write_acv++;
542                    uint64_t flags = MM_STAT_WR_MASK |
543                        MM_STAT_ACV_MASK |
544                        (entry->fonw ? MM_STAT_FONW_MASK : 0);
545                    return std::make_shared<DtbPageFault>(req->getVaddr(),
546                                                          req->getFlags(),
547                                                          flags);
548                }
549                if (entry->fonw) {
550                    write_acv++;
551                    uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
552                    return std::make_shared<DtbPageFault>(req->getVaddr(),
553                                                          req->getFlags(),
554                                                          flags);
555                }
556            } else {
557                if (!(entry->xre & MODE2MASK(mode))) {
558                    read_acv++;
559                    uint64_t flags = MM_STAT_ACV_MASK |
560                        (entry->fonr ? MM_STAT_FONR_MASK : 0);
561                    return std::make_shared<DtbAcvFault>(req->getVaddr(),
562                                                         req->getFlags(),
563                                                         flags);
564                }
565                if (entry->fonr) {
566                    read_acv++;
567                    uint64_t flags = MM_STAT_FONR_MASK;
568                    return std::make_shared<DtbPageFault>(req->getVaddr(),
569                                                          req->getFlags(),
570                                                          flags);
571                }
572            }
573        }
574
575        if (write)
576            write_hits++;
577        else
578            read_hits++;
579    }
580
581    // check that the physical address is ok (catch bad physical addresses)
582    if (req->getPaddr() & ~PAddrImplMask) {
583        return std::make_shared<MachineCheckFault>();
584    }
585
586    return checkCacheability(req);
587}
588
589TlbEntry &
590TLB::index(bool advance)
591{
592    TlbEntry *entry = &table[nlu];
593
594    if (advance)
595        nextnlu();
596
597    return *entry;
598}
599
600Fault
601TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
602{
603    if (mode == Execute)
604        return translateInst(req, tc);
605    else
606        return translateData(req, tc, mode == Write);
607}
608
609void
610TLB::translateTiming(RequestPtr req, ThreadContext *tc,
611        Translation *translation, Mode mode)
612{
613    assert(translation);
614    translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
615}
616
617Fault
618TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
619{
620    panic("Not implemented\n");
621    return NoFault;
622}
623
624Fault
625TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
626{
627    return NoFault;
628}
629
630} // namespace AlphaISA
631
632AlphaISA::TLB *
633AlphaTLBParams::create()
634{
635    return new AlphaISA::TLB(this);
636}
637