page_table.hh revision 11294:a368064a2ab5
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 */
31
32/**
33 * @file
34 * Declarations of a non-full system Page Table.
35 */
36
37#ifndef __MEM_PAGE_TABLE_HH__
38#define __MEM_PAGE_TABLE_HH__
39
40#include <string>
41#include <unordered_map>
42
43#include "arch/isa_traits.hh"
44#include "arch/tlb.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "mem/request.hh"
48#include "sim/serialize.hh"
49#include "sim/system.hh"
50
51class ThreadContext;
52
53/**
54 * Declaration of base class for page table
55 */
56class PageTableBase : public Serializable
57{
58  protected:
59    struct cacheElement {
60        bool valid;
61        Addr vaddr;
62        TheISA::TlbEntry entry;
63    };
64
65    struct cacheElement pTableCache[3];
66
67    const Addr pageSize;
68    const Addr offsetMask;
69
70    const uint64_t pid;
71    const std::string _name;
72
73  public:
74
75    PageTableBase(const std::string &__name, uint64_t _pid,
76              Addr _pageSize = TheISA::PageBytes)
77            : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
78              pid(_pid), _name(__name)
79    {
80        assert(isPowerOf2(pageSize));
81        pTableCache[0].valid = false;
82        pTableCache[1].valid = false;
83        pTableCache[2].valid = false;
84    }
85
86    virtual ~PageTableBase() {};
87
88    /* generic page table mapping flags
89     *              unset | set
90     * bit 0 - no-clobber | clobber
91     * bit 1 - present    | not-present
92     * bit 2 - cacheable  | uncacheable
93     * bit 3 - read-write | read-only
94     */
95    enum MappingFlags : uint32_t {
96        Zero        = 0,
97        Clobber     = 1,
98        NotPresent  = 2,
99        Uncacheable = 4,
100        ReadOnly    = 8,
101    };
102
103    virtual void initState(ThreadContext* tc) = 0;
104
105    // for DPRINTF compatibility
106    const std::string name() const { return _name; }
107
108    Addr pageAlign(Addr a)  { return (a & ~offsetMask); }
109    Addr pageOffset(Addr a) { return (a &  offsetMask); }
110
111    /**
112     * Maps a virtual memory region to a physical memory region.
113     * @param vaddr The starting virtual address of the region.
114     * @param paddr The starting physical address where the region is mapped.
115     * @param size The length of the region.
116     * @param flags Generic mapping flags that can be set by or-ing values
117     *              from MappingFlags enum.
118     */
119    virtual void map(Addr vaddr, Addr paddr, int64_t size,
120                     uint64_t flags = 0) = 0;
121    virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
122    virtual void unmap(Addr vaddr, int64_t size) = 0;
123
124    /**
125     * Check if any pages in a region are already allocated
126     * @param vaddr The starting virtual address of the region.
127     * @param size The length of the region.
128     * @return True if no pages in the region are mapped.
129     */
130    virtual bool isUnmapped(Addr vaddr, int64_t size) = 0;
131
132    /**
133     * Lookup function
134     * @param vaddr The virtual address.
135     * @return entry The page table entry corresponding to vaddr.
136     */
137    virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0;
138
139    /**
140     * Translate function
141     * @param vaddr The virtual address.
142     * @param paddr Physical address from translation.
143     * @return True if translation exists
144     */
145    bool translate(Addr vaddr, Addr &paddr);
146
147    /**
148     * Simplified translate function (just check for translation)
149     * @param vaddr The virtual address.
150     * @return True if translation exists
151     */
152    bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); }
153
154    /**
155     * Perform a translation on the memory request, fills in paddr
156     * field of req.
157     * @param req The memory request.
158     */
159    Fault translate(RequestPtr req);
160
161    /**
162     * Update the page table cache.
163     * @param vaddr virtual address (page aligned) to check
164     * @param pte page table entry to return
165     */
166    inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
167    {
168        pTableCache[2].entry = pTableCache[1].entry;
169        pTableCache[2].vaddr = pTableCache[1].vaddr;
170        pTableCache[2].valid = pTableCache[1].valid;
171
172        pTableCache[1].entry = pTableCache[0].entry;
173        pTableCache[1].vaddr = pTableCache[0].vaddr;
174        pTableCache[1].valid = pTableCache[0].valid;
175
176        pTableCache[0].entry = entry;
177        pTableCache[0].vaddr = vaddr;
178        pTableCache[0].valid = true;
179    }
180
181    /**
182     * Erase an entry from the page table cache.
183     * @param vaddr virtual address (page aligned) to check
184     */
185    inline void eraseCacheEntry(Addr vaddr)
186    {
187        // Invalidate cached entries if necessary
188        if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
189            pTableCache[0].valid = false;
190        } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
191            pTableCache[1].valid = false;
192        } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
193            pTableCache[2].valid = false;
194        }
195    }
196};
197
198/**
199 * Declaration of functional page table.
200 */
201class FuncPageTable : public PageTableBase
202{
203  private:
204    typedef std::unordered_map<Addr, TheISA::TlbEntry> PTable;
205    typedef PTable::iterator PTableItr;
206    PTable pTable;
207
208  public:
209
210    FuncPageTable(const std::string &__name, uint64_t _pid,
211                  Addr _pageSize = TheISA::PageBytes);
212
213    ~FuncPageTable();
214
215    void initState(ThreadContext* tc) override
216    {
217    }
218
219    void map(Addr vaddr, Addr paddr, int64_t size,
220             uint64_t flags = 0) override;
221    void remap(Addr vaddr, int64_t size, Addr new_vaddr) override;
222    void unmap(Addr vaddr, int64_t size) override;
223
224    /**
225     * Check if any pages in a region are already allocated
226     * @param vaddr The starting virtual address of the region.
227     * @param size The length of the region.
228     * @return True if no pages in the region are mapped.
229     */
230    bool isUnmapped(Addr vaddr, int64_t size) override;
231
232    /**
233     * Lookup function
234     * @param vaddr The virtual address.
235     * @return entry The page table entry corresponding to vaddr.
236     */
237    bool lookup(Addr vaddr, TheISA::TlbEntry &entry) override;
238
239    void serialize(CheckpointOut &cp) const override;
240    void unserialize(CheckpointIn &cp) override;
241};
242
243/**
244 * Faux page table class indended to stop the usage of
245 * an architectural page table, when there is none defined
246 * for a particular ISA.
247 */
248class NoArchPageTable : public FuncPageTable
249{
250  public:
251    NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys,
252              Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid)
253    {
254        fatal("No architectural page table defined for this ISA.\n");
255    }
256};
257
258#endif // __MEM_PAGE_TABLE_HH__
259