tlb.cc revision 8737:770ccf3af571
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 *          Steve Reinhardt
30 *          Andrew Schultz
31 */
32
33#include <string>
34#include <vector>
35
36#include "arch/alpha/faults.hh"
37#include "arch/alpha/pagetable.hh"
38#include "arch/alpha/tlb.hh"
39#include "arch/generic/debugfaults.hh"
40#include "base/inifile.hh"
41#include "base/str.hh"
42#include "base/trace.hh"
43#include "cpu/thread_context.hh"
44#include "debug/TLB.hh"
45
46using namespace std;
47
48namespace AlphaISA {
49
50///////////////////////////////////////////////////////////////////////
51//
52//  Alpha TLB
53//
54
55#ifdef DEBUG
56bool uncacheBit39 = false;
57bool uncacheBit40 = false;
58#endif
59
60#define MODE2MASK(X) (1 << (X))
61
62TLB::TLB(const Params *p)
63    : BaseTLB(p), size(p->size), nlu(0)
64{
65    table = new TlbEntry[size];
66    memset(table, 0, sizeof(TlbEntry) * size);
67    flushCache();
68}
69
70TLB::~TLB()
71{
72    if (table)
73        delete [] table;
74}
75
76void
77TLB::regStats()
78{
79    fetch_hits
80        .name(name() + ".fetch_hits")
81        .desc("ITB hits");
82    fetch_misses
83        .name(name() + ".fetch_misses")
84        .desc("ITB misses");
85    fetch_acv
86        .name(name() + ".fetch_acv")
87        .desc("ITB acv");
88    fetch_accesses
89        .name(name() + ".fetch_accesses")
90        .desc("ITB accesses");
91
92    fetch_accesses = fetch_hits + fetch_misses;
93
94    read_hits
95        .name(name() + ".read_hits")
96        .desc("DTB read hits")
97        ;
98
99    read_misses
100        .name(name() + ".read_misses")
101        .desc("DTB read misses")
102        ;
103
104    read_acv
105        .name(name() + ".read_acv")
106        .desc("DTB read access violations")
107        ;
108
109    read_accesses
110        .name(name() + ".read_accesses")
111        .desc("DTB read accesses")
112        ;
113
114    write_hits
115        .name(name() + ".write_hits")
116        .desc("DTB write hits")
117        ;
118
119    write_misses
120        .name(name() + ".write_misses")
121        .desc("DTB write misses")
122        ;
123
124    write_acv
125        .name(name() + ".write_acv")
126        .desc("DTB write access violations")
127        ;
128
129    write_accesses
130        .name(name() + ".write_accesses")
131        .desc("DTB write accesses")
132        ;
133
134    data_hits
135        .name(name() + ".data_hits")
136        .desc("DTB hits")
137        ;
138
139    data_misses
140        .name(name() + ".data_misses")
141        .desc("DTB misses")
142        ;
143
144    data_acv
145        .name(name() + ".data_acv")
146        .desc("DTB access violations")
147        ;
148
149    data_accesses
150        .name(name() + ".data_accesses")
151        .desc("DTB accesses")
152        ;
153
154    data_hits = read_hits + write_hits;
155    data_misses = read_misses + write_misses;
156    data_acv = read_acv + write_acv;
157    data_accesses = read_accesses + write_accesses;
158}
159
160// look up an entry in the TLB
161TlbEntry *
162TLB::lookup(Addr vpn, uint8_t asn)
163{
164    // assume not found...
165    TlbEntry *retval = NULL;
166
167    if (EntryCache[0]) {
168        if (vpn == EntryCache[0]->tag &&
169            (EntryCache[0]->asma || EntryCache[0]->asn == asn))
170            retval = EntryCache[0];
171        else if (EntryCache[1]) {
172            if (vpn == EntryCache[1]->tag &&
173                (EntryCache[1]->asma || EntryCache[1]->asn == asn))
174                retval = EntryCache[1];
175            else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
176                     (EntryCache[2]->asma || EntryCache[2]->asn == asn))
177                retval = EntryCache[2];
178        }
179    }
180
181    if (retval == NULL) {
182        PageTable::const_iterator i = lookupTable.find(vpn);
183        if (i != lookupTable.end()) {
184            while (i->first == vpn) {
185                int index = i->second;
186                TlbEntry *entry = &table[index];
187                assert(entry->valid);
188                if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
189                    retval = updateCache(entry);
190                    break;
191                }
192
193                ++i;
194            }
195        }
196    }
197
198    DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
199            retval ? "hit" : "miss", retval ? retval->ppn : 0);
200    return retval;
201}
202
203Fault
204TLB::checkCacheability(RequestPtr &req, bool itb)
205{
206    // in Alpha, cacheability is controlled by upper-level bits of the
207    // physical address
208
209    /*
210     * We support having the uncacheable bit in either bit 39 or bit
211     * 40.  The Turbolaser platform (and EV5) support having the bit
212     * in 39, but Tsunami (which Linux assumes uses an EV6) generates
213     * accesses with the bit in 40.  So we must check for both, but we
214     * have debug flags to catch a weird case where both are used,
215     * which shouldn't happen.
216     */
217
218
219    if (req->getPaddr() & PAddrUncachedBit43) {
220        // IPR memory space not implemented
221        if (PAddrIprSpace(req->getPaddr())) {
222            return new UnimpFault("IPR memory space not implemented!");
223        } else {
224            // mark request as uncacheable
225            req->setFlags(Request::UNCACHEABLE);
226
227            // Clear bits 42:35 of the physical address (10-2 in
228            // Tsunami manual)
229            req->setPaddr(req->getPaddr() & PAddrUncachedMask);
230        }
231        // We shouldn't be able to read from an uncachable address in Alpha as
232        // we don't have a ROM and we don't want to try to fetch from a device
233        // register as we destroy any data that is clear-on-read.
234        if (req->isUncacheable() && itb)
235            return new UnimpFault("CPU trying to fetch from uncached I/O");
236
237    }
238    return NoFault;
239}
240
241
242// insert a new TLB entry
243void
244TLB::insert(Addr addr, TlbEntry &entry)
245{
246    flushCache();
247    VAddr vaddr = addr;
248    if (table[nlu].valid) {
249        Addr oldvpn = table[nlu].tag;
250        PageTable::iterator i = lookupTable.find(oldvpn);
251
252        if (i == lookupTable.end())
253            panic("TLB entry not found in lookupTable");
254
255        int index;
256        while ((index = i->second) != nlu) {
257            if (table[index].tag != oldvpn)
258                panic("TLB entry not found in lookupTable");
259
260            ++i;
261        }
262
263        DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
264
265        lookupTable.erase(i);
266    }
267
268    DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
269
270    table[nlu] = entry;
271    table[nlu].tag = vaddr.vpn();
272    table[nlu].valid = true;
273
274    lookupTable.insert(make_pair(vaddr.vpn(), nlu));
275    nextnlu();
276}
277
278void
279TLB::flushAll()
280{
281    DPRINTF(TLB, "flushAll\n");
282    memset(table, 0, sizeof(TlbEntry) * size);
283    flushCache();
284    lookupTable.clear();
285    nlu = 0;
286}
287
288void
289TLB::flushProcesses()
290{
291    flushCache();
292    PageTable::iterator i = lookupTable.begin();
293    PageTable::iterator end = lookupTable.end();
294    while (i != end) {
295        int index = i->second;
296        TlbEntry *entry = &table[index];
297        assert(entry->valid);
298
299        // we can't increment i after we erase it, so save a copy and
300        // increment it to get the next entry now
301        PageTable::iterator cur = i;
302        ++i;
303
304        if (!entry->asma) {
305            DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
306                    entry->tag, entry->ppn);
307            entry->valid = false;
308            lookupTable.erase(cur);
309        }
310    }
311}
312
313void
314TLB::flushAddr(Addr addr, uint8_t asn)
315{
316    flushCache();
317    VAddr vaddr = addr;
318
319    PageTable::iterator i = lookupTable.find(vaddr.vpn());
320    if (i == lookupTable.end())
321        return;
322
323    while (i != lookupTable.end() && i->first == vaddr.vpn()) {
324        int index = i->second;
325        TlbEntry *entry = &table[index];
326        assert(entry->valid);
327
328        if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
329            DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
330                    entry->ppn);
331
332            // invalidate this entry
333            entry->valid = false;
334
335            lookupTable.erase(i++);
336        } else {
337            ++i;
338        }
339    }
340}
341
342
343void
344TLB::serialize(ostream &os)
345{
346    SERIALIZE_SCALAR(size);
347    SERIALIZE_SCALAR(nlu);
348
349    for (int i = 0; i < size; i++) {
350        nameOut(os, csprintf("%s.Entry%d", name(), i));
351        table[i].serialize(os);
352    }
353}
354
355void
356TLB::unserialize(Checkpoint *cp, const string &section)
357{
358    UNSERIALIZE_SCALAR(size);
359    UNSERIALIZE_SCALAR(nlu);
360
361    for (int i = 0; i < size; i++) {
362        table[i].unserialize(cp, csprintf("%s.Entry%d", section, i));
363        if (table[i].valid) {
364            lookupTable.insert(make_pair(table[i].tag, i));
365        }
366    }
367}
368
369Fault
370TLB::translateInst(RequestPtr req, ThreadContext *tc)
371{
372    //If this is a pal pc, then set PHYSICAL
373    if (FULL_SYSTEM && PcPAL(req->getPC()))
374        req->setFlags(Request::PHYSICAL);
375
376    if (PcPAL(req->getPC())) {
377        // strip off PAL PC marker (lsb is 1)
378        req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
379        fetch_hits++;
380        return NoFault;
381    }
382
383    if (req->getFlags() & Request::PHYSICAL) {
384        req->setPaddr(req->getVaddr());
385    } else {
386        // verify that this is a good virtual address
387        if (!validVirtualAddress(req->getVaddr())) {
388            fetch_acv++;
389            return new ItbAcvFault(req->getVaddr());
390        }
391
392
393        // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
394        // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
395        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
396            // only valid in kernel mode
397            if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
398                mode_kernel) {
399                fetch_acv++;
400                return new ItbAcvFault(req->getVaddr());
401            }
402
403            req->setPaddr(req->getVaddr() & PAddrImplMask);
404
405            // sign extend the physical address properly
406            if (req->getPaddr() & PAddrUncachedBit40)
407                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
408            else
409                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
410        } else {
411            // not a physical address: need to look up pte
412            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
413            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
414                              asn);
415
416            if (!entry) {
417                fetch_misses++;
418                return new ItbPageFault(req->getVaddr());
419            }
420
421            req->setPaddr((entry->ppn << PageShift) +
422                          (VAddr(req->getVaddr()).offset()
423                           & ~3));
424
425            // check permissions for this access
426            if (!(entry->xre &
427                  (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
428                // instruction access fault
429                fetch_acv++;
430                return new ItbAcvFault(req->getVaddr());
431            }
432
433            fetch_hits++;
434        }
435    }
436
437    // check that the physical address is ok (catch bad physical addresses)
438    if (req->getPaddr() & ~PAddrImplMask) {
439        return new MachineCheckFault();
440    }
441
442    return checkCacheability(req, true);
443
444}
445
446Fault
447TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
448{
449    mode_type mode =
450        (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
451
452    /**
453     * Check for alignment faults
454     */
455    if (req->getVaddr() & (req->getSize() - 1)) {
456        DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
457                req->getSize());
458        uint64_t flags = write ? MM_STAT_WR_MASK : 0;
459        return new DtbAlignmentFault(req->getVaddr(), req->getFlags(), flags);
460    }
461
462    if (PcPAL(req->getPC())) {
463        mode = (req->getFlags() & Request::ALTMODE) ?
464            (mode_type)ALT_MODE_AM(
465                tc->readMiscRegNoEffect(IPR_ALT_MODE))
466            : mode_kernel;
467    }
468
469    if (req->getFlags() & Request::PHYSICAL) {
470        req->setPaddr(req->getVaddr());
471    } else {
472        // verify that this is a good virtual address
473        if (!validVirtualAddress(req->getVaddr())) {
474            if (write) { write_acv++; } else { read_acv++; }
475            uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
476                MM_STAT_BAD_VA_MASK |
477                MM_STAT_ACV_MASK;
478            return new DtbPageFault(req->getVaddr(), req->getFlags(), flags);
479        }
480
481        // Check for "superpage" mapping
482        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
483            // only valid in kernel mode
484            if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
485                mode_kernel) {
486                if (write) { write_acv++; } else { read_acv++; }
487                uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
488                                  MM_STAT_ACV_MASK);
489
490                return new DtbAcvFault(req->getVaddr(), req->getFlags(),
491                                       flags);
492            }
493
494            req->setPaddr(req->getVaddr() & PAddrImplMask);
495
496            // sign extend the physical address properly
497            if (req->getPaddr() & PAddrUncachedBit40)
498                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
499            else
500                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
501        } else {
502            if (write)
503                write_accesses++;
504            else
505                read_accesses++;
506
507            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
508
509            // not a physical address: need to look up pte
510            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
511
512            if (!entry) {
513                // page fault
514                if (write) { write_misses++; } else { read_misses++; }
515                uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
516                    MM_STAT_DTB_MISS_MASK;
517                return (req->getFlags() & Request::VPTE) ?
518                    (Fault)(new PDtbMissFault(req->getVaddr(), req->getFlags(),
519                                              flags)) :
520                    (Fault)(new NDtbMissFault(req->getVaddr(), req->getFlags(),
521                                              flags));
522            }
523
524            req->setPaddr((entry->ppn << PageShift) +
525                          VAddr(req->getVaddr()).offset());
526
527            if (write) {
528                if (!(entry->xwe & MODE2MASK(mode))) {
529                    // declare the instruction access fault
530                    write_acv++;
531                    uint64_t flags = MM_STAT_WR_MASK |
532                        MM_STAT_ACV_MASK |
533                        (entry->fonw ? MM_STAT_FONW_MASK : 0);
534                    return new DtbPageFault(req->getVaddr(), req->getFlags(),
535                                            flags);
536                }
537                if (entry->fonw) {
538                    write_acv++;
539                    uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
540                    return new DtbPageFault(req->getVaddr(), req->getFlags(),
541                                            flags);
542                }
543            } else {
544                if (!(entry->xre & MODE2MASK(mode))) {
545                    read_acv++;
546                    uint64_t flags = MM_STAT_ACV_MASK |
547                        (entry->fonr ? MM_STAT_FONR_MASK : 0);
548                    return new DtbAcvFault(req->getVaddr(), req->getFlags(),
549                                           flags);
550                }
551                if (entry->fonr) {
552                    read_acv++;
553                    uint64_t flags = MM_STAT_FONR_MASK;
554                    return new DtbPageFault(req->getVaddr(), req->getFlags(),
555                                            flags);
556                }
557            }
558        }
559
560        if (write)
561            write_hits++;
562        else
563            read_hits++;
564    }
565
566    // check that the physical address is ok (catch bad physical addresses)
567    if (req->getPaddr() & ~PAddrImplMask) {
568        return new MachineCheckFault();
569    }
570
571    return checkCacheability(req);
572}
573
574TlbEntry &
575TLB::index(bool advance)
576{
577    TlbEntry *entry = &table[nlu];
578
579    if (advance)
580        nextnlu();
581
582    return *entry;
583}
584
585Fault
586TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
587{
588    if (mode == Execute)
589        return translateInst(req, tc);
590    else
591        return translateData(req, tc, mode == Write);
592}
593
594void
595TLB::translateTiming(RequestPtr req, ThreadContext *tc,
596        Translation *translation, Mode mode)
597{
598    assert(translation);
599    translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
600}
601
602} // namespace AlphaISA
603
604AlphaISA::TLB *
605AlphaTLBParams::create()
606{
607    return new AlphaISA::TLB(this);
608}
609