page_table.hh revision 11886:43b882cada33
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 */
31
32/**
33 * @file
34 * Declarations of a non-full system Page Table.
35 */
36
37#ifndef __MEM_PAGE_TABLE_HH__
38#define __MEM_PAGE_TABLE_HH__
39
40#include <string>
41#include <unordered_map>
42
43#include "arch/isa_traits.hh"
44#include "arch/tlb.hh"
45#include "base/intmath.hh"
46#include "base/types.hh"
47#include "config/the_isa.hh"
48#include "mem/request.hh"
49#include "sim/serialize.hh"
50
51class ThreadContext;
52class System;
53
54/**
55 * Declaration of base class for page table
56 */
57class PageTableBase : public Serializable
58{
59  protected:
60    struct cacheElement {
61        bool valid;
62        Addr vaddr;
63        TheISA::TlbEntry entry;
64    };
65
66    struct cacheElement pTableCache[3];
67
68    const Addr pageSize;
69    const Addr offsetMask;
70
71    const uint64_t pid;
72    const std::string _name;
73
74  public:
75
76    PageTableBase(const std::string &__name, uint64_t _pid,
77              Addr _pageSize = TheISA::PageBytes)
78            : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
79              pid(_pid), _name(__name)
80    {
81        assert(isPowerOf2(pageSize));
82        pTableCache[0].valid = false;
83        pTableCache[1].valid = false;
84        pTableCache[2].valid = false;
85    }
86
87    virtual ~PageTableBase() {};
88
89    /* generic page table mapping flags
90     *              unset | set
91     * bit 0 - no-clobber | clobber
92     * bit 1 - present    | not-present
93     * bit 2 - cacheable  | uncacheable
94     * bit 3 - read-write | read-only
95     */
96    enum MappingFlags : uint32_t {
97        Zero        = 0,
98        Clobber     = 1,
99        NotPresent  = 2,
100        Uncacheable = 4,
101        ReadOnly    = 8,
102    };
103
104    virtual void initState(ThreadContext* tc) = 0;
105
106    // for DPRINTF compatibility
107    const std::string name() const { return _name; }
108
109    Addr pageAlign(Addr a)  { return (a & ~offsetMask); }
110    Addr pageOffset(Addr a) { return (a &  offsetMask); }
111
112    /**
113     * Maps a virtual memory region to a physical memory region.
114     * @param vaddr The starting virtual address of the region.
115     * @param paddr The starting physical address where the region is mapped.
116     * @param size The length of the region.
117     * @param flags Generic mapping flags that can be set by or-ing values
118     *              from MappingFlags enum.
119     */
120    virtual void map(Addr vaddr, Addr paddr, int64_t size,
121                     uint64_t flags = 0) = 0;
122    virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
123    virtual void unmap(Addr vaddr, int64_t size) = 0;
124
125    /**
126     * Check if any pages in a region are already allocated
127     * @param vaddr The starting virtual address of the region.
128     * @param size The length of the region.
129     * @return True if no pages in the region are mapped.
130     */
131    virtual bool isUnmapped(Addr vaddr, int64_t size) = 0;
132
133    /**
134     * Lookup function
135     * @param vaddr The virtual address.
136     * @return entry The page table entry corresponding to vaddr.
137     */
138    virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0;
139
140    /**
141     * Translate function
142     * @param vaddr The virtual address.
143     * @param paddr Physical address from translation.
144     * @return True if translation exists
145     */
146    bool translate(Addr vaddr, Addr &paddr);
147
148    /**
149     * Simplified translate function (just check for translation)
150     * @param vaddr The virtual address.
151     * @return True if translation exists
152     */
153    bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); }
154
155    /**
156     * Perform a translation on the memory request, fills in paddr
157     * field of req.
158     * @param req The memory request.
159     */
160    Fault translate(RequestPtr req);
161
162    /**
163     * Update the page table cache.
164     * @param vaddr virtual address (page aligned) to check
165     * @param pte page table entry to return
166     */
167    inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
168    {
169        pTableCache[2].entry = pTableCache[1].entry;
170        pTableCache[2].vaddr = pTableCache[1].vaddr;
171        pTableCache[2].valid = pTableCache[1].valid;
172
173        pTableCache[1].entry = pTableCache[0].entry;
174        pTableCache[1].vaddr = pTableCache[0].vaddr;
175        pTableCache[1].valid = pTableCache[0].valid;
176
177        pTableCache[0].entry = entry;
178        pTableCache[0].vaddr = vaddr;
179        pTableCache[0].valid = true;
180    }
181
182    /**
183     * Erase an entry from the page table cache.
184     * @param vaddr virtual address (page aligned) to check
185     */
186    inline void eraseCacheEntry(Addr vaddr)
187    {
188        // Invalidate cached entries if necessary
189        if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
190            pTableCache[0].valid = false;
191        } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
192            pTableCache[1].valid = false;
193        } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
194            pTableCache[2].valid = false;
195        }
196    }
197
198    virtual void getMappings(std::vector<std::pair<Addr, Addr>>
199                             *addr_mappings) {};
200};
201
202/**
203 * Declaration of functional page table.
204 */
205class FuncPageTable : public PageTableBase
206{
207  private:
208    typedef std::unordered_map<Addr, TheISA::TlbEntry> PTable;
209    typedef PTable::iterator PTableItr;
210    PTable pTable;
211
212  public:
213
214    FuncPageTable(const std::string &__name, uint64_t _pid,
215                  Addr _pageSize = TheISA::PageBytes);
216
217    ~FuncPageTable();
218
219    void initState(ThreadContext* tc) override
220    {
221    }
222
223    void map(Addr vaddr, Addr paddr, int64_t size,
224             uint64_t flags = 0) override;
225    void remap(Addr vaddr, int64_t size, Addr new_vaddr) override;
226    void unmap(Addr vaddr, int64_t size) override;
227
228    /**
229     * Check if any pages in a region are already allocated
230     * @param vaddr The starting virtual address of the region.
231     * @param size The length of the region.
232     * @return True if no pages in the region are mapped.
233     */
234    bool isUnmapped(Addr vaddr, int64_t size) override;
235
236    /**
237     * Lookup function
238     * @param vaddr The virtual address.
239     * @return entry The page table entry corresponding to vaddr.
240     */
241    bool lookup(Addr vaddr, TheISA::TlbEntry &entry) override;
242
243    void serialize(CheckpointOut &cp) const override;
244    void unserialize(CheckpointIn &cp) override;
245
246    void getMappings(std::vector<std::pair<Addr, Addr>> *addr_maps) override;
247};
248
249/**
250 * Faux page table class indended to stop the usage of
251 * an architectural page table, when there is none defined
252 * for a particular ISA.
253 */
254class NoArchPageTable : public FuncPageTable
255{
256  public:
257    NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys,
258              Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid)
259    {
260        fatal("No architectural page table defined for this ISA.\n");
261    }
262};
263
264#endif // __MEM_PAGE_TABLE_HH__
265