tlb.cc revision 11320:42ecb523c64a
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 *          Steve Reinhardt
30 *          Andrew Schultz
31 */
32
33#include "arch/alpha/tlb.hh"
34
35#include <algorithm>
36#include <memory>
37#include <string>
38#include <vector>
39
40#include "arch/alpha/faults.hh"
41#include "arch/alpha/pagetable.hh"
42#include "arch/generic/debugfaults.hh"
43#include "base/inifile.hh"
44#include "base/str.hh"
45#include "base/trace.hh"
46#include "cpu/thread_context.hh"
47#include "debug/TLB.hh"
48#include "sim/full_system.hh"
49
50using namespace std;
51
52namespace AlphaISA {
53
54///////////////////////////////////////////////////////////////////////
55//
56//  Alpha TLB
57//
58
59#ifdef DEBUG
60bool uncacheBit39 = false;
61bool uncacheBit40 = false;
62#endif
63
64#define MODE2MASK(X) (1 << (X))
65
66TLB::TLB(const Params *p)
67    : BaseTLB(p), table(p->size), nlu(0)
68{
69    flushCache();
70}
71
72TLB::~TLB()
73{
74}
75
76void
77TLB::regStats()
78{
79    fetch_hits
80        .name(name() + ".fetch_hits")
81        .desc("ITB hits");
82    fetch_misses
83        .name(name() + ".fetch_misses")
84        .desc("ITB misses");
85    fetch_acv
86        .name(name() + ".fetch_acv")
87        .desc("ITB acv");
88    fetch_accesses
89        .name(name() + ".fetch_accesses")
90        .desc("ITB accesses");
91
92    fetch_accesses = fetch_hits + fetch_misses;
93
94    read_hits
95        .name(name() + ".read_hits")
96        .desc("DTB read hits")
97        ;
98
99    read_misses
100        .name(name() + ".read_misses")
101        .desc("DTB read misses")
102        ;
103
104    read_acv
105        .name(name() + ".read_acv")
106        .desc("DTB read access violations")
107        ;
108
109    read_accesses
110        .name(name() + ".read_accesses")
111        .desc("DTB read accesses")
112        ;
113
114    write_hits
115        .name(name() + ".write_hits")
116        .desc("DTB write hits")
117        ;
118
119    write_misses
120        .name(name() + ".write_misses")
121        .desc("DTB write misses")
122        ;
123
124    write_acv
125        .name(name() + ".write_acv")
126        .desc("DTB write access violations")
127        ;
128
129    write_accesses
130        .name(name() + ".write_accesses")
131        .desc("DTB write accesses")
132        ;
133
134    data_hits
135        .name(name() + ".data_hits")
136        .desc("DTB hits")
137        ;
138
139    data_misses
140        .name(name() + ".data_misses")
141        .desc("DTB misses")
142        ;
143
144    data_acv
145        .name(name() + ".data_acv")
146        .desc("DTB access violations")
147        ;
148
149    data_accesses
150        .name(name() + ".data_accesses")
151        .desc("DTB accesses")
152        ;
153
154    data_hits = read_hits + write_hits;
155    data_misses = read_misses + write_misses;
156    data_acv = read_acv + write_acv;
157    data_accesses = read_accesses + write_accesses;
158}
159
160// look up an entry in the TLB
161TlbEntry *
162TLB::lookup(Addr vpn, uint8_t asn)
163{
164    // assume not found...
165    TlbEntry *retval = NULL;
166
167    if (EntryCache[0]) {
168        if (vpn == EntryCache[0]->tag &&
169            (EntryCache[0]->asma || EntryCache[0]->asn == asn))
170            retval = EntryCache[0];
171        else if (EntryCache[1]) {
172            if (vpn == EntryCache[1]->tag &&
173                (EntryCache[1]->asma || EntryCache[1]->asn == asn))
174                retval = EntryCache[1];
175            else if (EntryCache[2] && vpn == EntryCache[2]->tag &&
176                     (EntryCache[2]->asma || EntryCache[2]->asn == asn))
177                retval = EntryCache[2];
178        }
179    }
180
181    if (retval == NULL) {
182        PageTable::const_iterator i = lookupTable.find(vpn);
183        if (i != lookupTable.end()) {
184            while (i->first == vpn) {
185                int index = i->second;
186                TlbEntry *entry = &table[index];
187                assert(entry->valid);
188                if (vpn == entry->tag && (entry->asma || entry->asn == asn)) {
189                    retval = updateCache(entry);
190                    break;
191                }
192
193                ++i;
194            }
195        }
196    }
197
198    DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
199            retval ? "hit" : "miss", retval ? retval->ppn : 0);
200    return retval;
201}
202
203Fault
204TLB::checkCacheability(RequestPtr &req, bool itb)
205{
206    // in Alpha, cacheability is controlled by upper-level bits of the
207    // physical address
208
209    /*
210     * We support having the uncacheable bit in either bit 39 or bit
211     * 40.  The Turbolaser platform (and EV5) support having the bit
212     * in 39, but Tsunami (which Linux assumes uses an EV6) generates
213     * accesses with the bit in 40.  So we must check for both, but we
214     * have debug flags to catch a weird case where both are used,
215     * which shouldn't happen.
216     */
217
218
219    if (req->getPaddr() & PAddrUncachedBit43) {
220        // IPR memory space not implemented
221        if (PAddrIprSpace(req->getPaddr())) {
222            return std::make_shared<UnimpFault>(
223                "IPR memory space not implemented!");
224        } else {
225            // mark request as uncacheable
226            req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
227
228            // Clear bits 42:35 of the physical address (10-2 in
229            // Tsunami manual)
230            req->setPaddr(req->getPaddr() & PAddrUncachedMask);
231        }
232        // We shouldn't be able to read from an uncachable address in Alpha as
233        // we don't have a ROM and we don't want to try to fetch from a device
234        // register as we destroy any data that is clear-on-read.
235        if (req->isUncacheable() && itb)
236            return std::make_shared<UnimpFault>(
237                "CPU trying to fetch from uncached I/O");
238
239    }
240    return NoFault;
241}
242
243
244// insert a new TLB entry
245void
246TLB::insert(Addr addr, TlbEntry &entry)
247{
248    flushCache();
249    VAddr vaddr = addr;
250    if (table[nlu].valid) {
251        Addr oldvpn = table[nlu].tag;
252        PageTable::iterator i = lookupTable.find(oldvpn);
253
254        if (i == lookupTable.end())
255            panic("TLB entry not found in lookupTable");
256
257        int index;
258        while ((index = i->second) != nlu) {
259            if (table[index].tag != oldvpn)
260                panic("TLB entry not found in lookupTable");
261
262            ++i;
263        }
264
265        DPRINTF(TLB, "remove @%d: %#x -> %#x\n", nlu, oldvpn, table[nlu].ppn);
266
267        lookupTable.erase(i);
268    }
269
270    DPRINTF(TLB, "insert @%d: %#x -> %#x\n", nlu, vaddr.vpn(), entry.ppn);
271
272    table[nlu] = entry;
273    table[nlu].tag = vaddr.vpn();
274    table[nlu].valid = true;
275
276    lookupTable.insert(make_pair(vaddr.vpn(), nlu));
277    nextnlu();
278}
279
280void
281TLB::flushAll()
282{
283    DPRINTF(TLB, "flushAll\n");
284    std::fill(table.begin(), table.end(), TlbEntry());
285    flushCache();
286    lookupTable.clear();
287    nlu = 0;
288}
289
290void
291TLB::flushProcesses()
292{
293    flushCache();
294    PageTable::iterator i = lookupTable.begin();
295    PageTable::iterator end = lookupTable.end();
296    while (i != end) {
297        int index = i->second;
298        TlbEntry *entry = &table[index];
299        assert(entry->valid);
300
301        // we can't increment i after we erase it, so save a copy and
302        // increment it to get the next entry now
303        PageTable::iterator cur = i;
304        ++i;
305
306        if (!entry->asma) {
307            DPRINTF(TLB, "flush @%d: %#x -> %#x\n", index,
308                    entry->tag, entry->ppn);
309            entry->valid = false;
310            lookupTable.erase(cur);
311        }
312    }
313}
314
315void
316TLB::flushAddr(Addr addr, uint8_t asn)
317{
318    flushCache();
319    VAddr vaddr = addr;
320
321    PageTable::iterator i = lookupTable.find(vaddr.vpn());
322    if (i == lookupTable.end())
323        return;
324
325    while (i != lookupTable.end() && i->first == vaddr.vpn()) {
326        int index = i->second;
327        TlbEntry *entry = &table[index];
328        assert(entry->valid);
329
330        if (vaddr.vpn() == entry->tag && (entry->asma || entry->asn == asn)) {
331            DPRINTF(TLB, "flushaddr @%d: %#x -> %#x\n", index, vaddr.vpn(),
332                    entry->ppn);
333
334            // invalidate this entry
335            entry->valid = false;
336
337            lookupTable.erase(i++);
338        } else {
339            ++i;
340        }
341    }
342}
343
344
345void
346TLB::serialize(CheckpointOut &cp) const
347{
348    const unsigned size(table.size());
349    SERIALIZE_SCALAR(size);
350    SERIALIZE_SCALAR(nlu);
351
352    for (int i = 0; i < size; i++)
353        table[i].serializeSection(cp, csprintf("Entry%d", i));
354}
355
356void
357TLB::unserialize(CheckpointIn &cp)
358{
359    unsigned size(0);
360    UNSERIALIZE_SCALAR(size);
361    UNSERIALIZE_SCALAR(nlu);
362
363    table.resize(size);
364    for (int i = 0; i < size; i++) {
365        table[i].unserializeSection(cp, csprintf("Entry%d", i));
366        if (table[i].valid) {
367            lookupTable.insert(make_pair(table[i].tag, i));
368        }
369    }
370}
371
372Fault
373TLB::translateInst(RequestPtr req, ThreadContext *tc)
374{
375    //If this is a pal pc, then set PHYSICAL
376    if (FullSystem && PcPAL(req->getPC()))
377        req->setFlags(Request::PHYSICAL);
378
379    if (PcPAL(req->getPC())) {
380        // strip off PAL PC marker (lsb is 1)
381        req->setPaddr((req->getVaddr() & ~3) & PAddrImplMask);
382        fetch_hits++;
383        return NoFault;
384    }
385
386    if (req->getFlags() & Request::PHYSICAL) {
387        req->setPaddr(req->getVaddr());
388    } else {
389        // verify that this is a good virtual address
390        if (!validVirtualAddress(req->getVaddr())) {
391            fetch_acv++;
392            return std::make_shared<ItbAcvFault>(req->getVaddr());
393        }
394
395
396        // VA<42:41> == 2, VA<39:13> maps directly to PA<39:13> for EV5
397        // VA<47:41> == 0x7e, VA<40:13> maps directly to PA<40:13> for EV6
398        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
399            // only valid in kernel mode
400            if (ICM_CM(tc->readMiscRegNoEffect(IPR_ICM)) !=
401                mode_kernel) {
402                fetch_acv++;
403                return std::make_shared<ItbAcvFault>(req->getVaddr());
404            }
405
406            req->setPaddr(req->getVaddr() & PAddrImplMask);
407
408            // sign extend the physical address properly
409            if (req->getPaddr() & PAddrUncachedBit40)
410                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
411            else
412                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
413        } else {
414            // not a physical address: need to look up pte
415            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
416            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(),
417                              asn);
418
419            if (!entry) {
420                fetch_misses++;
421                return std::make_shared<ItbPageFault>(req->getVaddr());
422            }
423
424            req->setPaddr((entry->ppn << PageShift) +
425                          (VAddr(req->getVaddr()).offset()
426                           & ~3));
427
428            // check permissions for this access
429            if (!(entry->xre &
430                  (1 << ICM_CM(tc->readMiscRegNoEffect(IPR_ICM))))) {
431                // instruction access fault
432                fetch_acv++;
433                return std::make_shared<ItbAcvFault>(req->getVaddr());
434            }
435
436            fetch_hits++;
437        }
438    }
439
440    // check that the physical address is ok (catch bad physical addresses)
441    if (req->getPaddr() & ~PAddrImplMask) {
442        return std::make_shared<MachineCheckFault>();
443    }
444
445    return checkCacheability(req, true);
446
447}
448
449Fault
450TLB::translateData(RequestPtr req, ThreadContext *tc, bool write)
451{
452    mode_type mode =
453        (mode_type)DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM));
454
455    /**
456     * Check for alignment faults
457     */
458    if (req->getVaddr() & (req->getSize() - 1)) {
459        DPRINTF(TLB, "Alignment Fault on %#x, size = %d\n", req->getVaddr(),
460                req->getSize());
461        uint64_t flags = write ? MM_STAT_WR_MASK : 0;
462        return std::make_shared<DtbAlignmentFault>(req->getVaddr(),
463                                                   req->getFlags(),
464                                                   flags);
465    }
466
467    if (PcPAL(req->getPC())) {
468        mode = (req->getFlags() & AlphaRequestFlags::ALTMODE) ?
469            (mode_type)ALT_MODE_AM(
470                tc->readMiscRegNoEffect(IPR_ALT_MODE))
471            : mode_kernel;
472    }
473
474    if (req->getFlags() & Request::PHYSICAL) {
475        req->setPaddr(req->getVaddr());
476    } else {
477        // verify that this is a good virtual address
478        if (!validVirtualAddress(req->getVaddr())) {
479            if (write) { write_acv++; } else { read_acv++; }
480            uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
481                MM_STAT_BAD_VA_MASK |
482                MM_STAT_ACV_MASK;
483            return std::make_shared<DtbPageFault>(req->getVaddr(),
484                                                  req->getFlags(),
485                                                  flags);
486        }
487
488        // Check for "superpage" mapping
489        if (VAddrSpaceEV6(req->getVaddr()) == 0x7e) {
490            // only valid in kernel mode
491            if (DTB_CM_CM(tc->readMiscRegNoEffect(IPR_DTB_CM)) !=
492                mode_kernel) {
493                if (write) { write_acv++; } else { read_acv++; }
494                uint64_t flags = ((write ? MM_STAT_WR_MASK : 0) |
495                                  MM_STAT_ACV_MASK);
496
497                return std::make_shared<DtbAcvFault>(req->getVaddr(),
498                                                     req->getFlags(),
499                                                     flags);
500            }
501
502            req->setPaddr(req->getVaddr() & PAddrImplMask);
503
504            // sign extend the physical address properly
505            if (req->getPaddr() & PAddrUncachedBit40)
506                req->setPaddr(req->getPaddr() | ULL(0xf0000000000));
507            else
508                req->setPaddr(req->getPaddr() & ULL(0xffffffffff));
509        } else {
510            if (write)
511                write_accesses++;
512            else
513                read_accesses++;
514
515            int asn = DTB_ASN_ASN(tc->readMiscRegNoEffect(IPR_DTB_ASN));
516
517            // not a physical address: need to look up pte
518            TlbEntry *entry = lookup(VAddr(req->getVaddr()).vpn(), asn);
519
520            if (!entry) {
521                // page fault
522                if (write) { write_misses++; } else { read_misses++; }
523                uint64_t flags = (write ? MM_STAT_WR_MASK : 0) |
524                    MM_STAT_DTB_MISS_MASK;
525                return (req->getFlags() & AlphaRequestFlags::VPTE) ?
526                    (Fault)(std::make_shared<PDtbMissFault>(req->getVaddr(),
527                                                            req->getFlags(),
528                                                            flags)) :
529                    (Fault)(std::make_shared<NDtbMissFault>(req->getVaddr(),
530                                                            req->getFlags(),
531                                                            flags));
532            }
533
534            req->setPaddr((entry->ppn << PageShift) +
535                          VAddr(req->getVaddr()).offset());
536
537            if (write) {
538                if (!(entry->xwe & MODE2MASK(mode))) {
539                    // declare the instruction access fault
540                    write_acv++;
541                    uint64_t flags = MM_STAT_WR_MASK |
542                        MM_STAT_ACV_MASK |
543                        (entry->fonw ? MM_STAT_FONW_MASK : 0);
544                    return std::make_shared<DtbPageFault>(req->getVaddr(),
545                                                          req->getFlags(),
546                                                          flags);
547                }
548                if (entry->fonw) {
549                    write_acv++;
550                    uint64_t flags = MM_STAT_WR_MASK | MM_STAT_FONW_MASK;
551                    return std::make_shared<DtbPageFault>(req->getVaddr(),
552                                                          req->getFlags(),
553                                                          flags);
554                }
555            } else {
556                if (!(entry->xre & MODE2MASK(mode))) {
557                    read_acv++;
558                    uint64_t flags = MM_STAT_ACV_MASK |
559                        (entry->fonr ? MM_STAT_FONR_MASK : 0);
560                    return std::make_shared<DtbAcvFault>(req->getVaddr(),
561                                                         req->getFlags(),
562                                                         flags);
563                }
564                if (entry->fonr) {
565                    read_acv++;
566                    uint64_t flags = MM_STAT_FONR_MASK;
567                    return std::make_shared<DtbPageFault>(req->getVaddr(),
568                                                          req->getFlags(),
569                                                          flags);
570                }
571            }
572        }
573
574        if (write)
575            write_hits++;
576        else
577            read_hits++;
578    }
579
580    // check that the physical address is ok (catch bad physical addresses)
581    if (req->getPaddr() & ~PAddrImplMask) {
582        return std::make_shared<MachineCheckFault>();
583    }
584
585    return checkCacheability(req);
586}
587
588TlbEntry &
589TLB::index(bool advance)
590{
591    TlbEntry *entry = &table[nlu];
592
593    if (advance)
594        nextnlu();
595
596    return *entry;
597}
598
599Fault
600TLB::translateAtomic(RequestPtr req, ThreadContext *tc, Mode mode)
601{
602    if (mode == Execute)
603        return translateInst(req, tc);
604    else
605        return translateData(req, tc, mode == Write);
606}
607
608void
609TLB::translateTiming(RequestPtr req, ThreadContext *tc,
610        Translation *translation, Mode mode)
611{
612    assert(translation);
613    translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
614}
615
616Fault
617TLB::translateFunctional(RequestPtr req, ThreadContext *tc, Mode mode)
618{
619    panic("Not implemented\n");
620    return NoFault;
621}
622
623Fault
624TLB::finalizePhysical(RequestPtr req, ThreadContext *tc, Mode mode) const
625{
626    return NoFault;
627}
628
629} // namespace AlphaISA
630
631AlphaISA::TLB *
632AlphaTLBParams::create()
633{
634    return new AlphaISA::TLB(this);
635}
636