page_table.hh revision 10558:426665ec11a9
17506Stjones1@inf.ed.ac.uk/*
27506Stjones1@inf.ed.ac.uk * Copyright (c) 2014 Advanced Micro Devices, Inc.
37506Stjones1@inf.ed.ac.uk * Copyright (c) 2003 The Regents of The University of Michigan
47506Stjones1@inf.ed.ac.uk * All rights reserved.
57506Stjones1@inf.ed.ac.uk *
67506Stjones1@inf.ed.ac.uk * Redistribution and use in source and binary forms, with or without
77506Stjones1@inf.ed.ac.uk * modification, are permitted provided that the following conditions are
87506Stjones1@inf.ed.ac.uk * met: redistributions of source code must retain the above copyright
97506Stjones1@inf.ed.ac.uk * notice, this list of conditions and the following disclaimer;
107506Stjones1@inf.ed.ac.uk * redistributions in binary form must reproduce the above copyright
117506Stjones1@inf.ed.ac.uk * notice, this list of conditions and the following disclaimer in the
127506Stjones1@inf.ed.ac.uk * documentation and/or other materials provided with the distribution;
137506Stjones1@inf.ed.ac.uk * neither the name of the copyright holders nor the names of its
147506Stjones1@inf.ed.ac.uk * contributors may be used to endorse or promote products derived from
157506Stjones1@inf.ed.ac.uk * this software without specific prior written permission.
167506Stjones1@inf.ed.ac.uk *
177506Stjones1@inf.ed.ac.uk * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
187506Stjones1@inf.ed.ac.uk * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
197506Stjones1@inf.ed.ac.uk * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
207506Stjones1@inf.ed.ac.uk * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
217506Stjones1@inf.ed.ac.uk * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
227506Stjones1@inf.ed.ac.uk * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
237506Stjones1@inf.ed.ac.uk * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
247506Stjones1@inf.ed.ac.uk * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
257506Stjones1@inf.ed.ac.uk * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
267506Stjones1@inf.ed.ac.uk * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
277506Stjones1@inf.ed.ac.uk * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
287506Stjones1@inf.ed.ac.uk *
297506Stjones1@inf.ed.ac.uk * Authors: Steve Reinhardt
307506Stjones1@inf.ed.ac.uk */
317506Stjones1@inf.ed.ac.uk
327506Stjones1@inf.ed.ac.uk/**
337506Stjones1@inf.ed.ac.uk * @file
347506Stjones1@inf.ed.ac.uk * Declarations of a non-full system Page Table.
357506Stjones1@inf.ed.ac.uk */
367698SAli.Saidi@ARM.com
377506Stjones1@inf.ed.ac.uk#ifndef __MEM_PAGE_TABLE_HH__
387506Stjones1@inf.ed.ac.uk#define __MEM_PAGE_TABLE_HH__
397506Stjones1@inf.ed.ac.uk
407506Stjones1@inf.ed.ac.uk#include <string>
417506Stjones1@inf.ed.ac.uk
427506Stjones1@inf.ed.ac.uk#include "arch/isa_traits.hh"
437506Stjones1@inf.ed.ac.uk#include "arch/tlb.hh"
447506Stjones1@inf.ed.ac.uk#include "base/hashmap.hh"
457506Stjones1@inf.ed.ac.uk#include "base/types.hh"
467506Stjones1@inf.ed.ac.uk#include "config/the_isa.hh"
477506Stjones1@inf.ed.ac.uk#include "mem/request.hh"
487506Stjones1@inf.ed.ac.uk#include "sim/serialize.hh"
497506Stjones1@inf.ed.ac.uk#include "sim/system.hh"
507506Stjones1@inf.ed.ac.uk
519920Syasuko.eckert@amd.comclass ThreadContext;
529920Syasuko.eckert@amd.com
539920Syasuko.eckert@amd.com/**
547506Stjones1@inf.ed.ac.uk * Declaration of base class for page table
557506Stjones1@inf.ed.ac.uk */
567506Stjones1@inf.ed.ac.ukclass PageTableBase
577506Stjones1@inf.ed.ac.uk{
587720Sgblack@eecs.umich.edu  protected:
597506Stjones1@inf.ed.ac.uk    struct cacheElement {
607506Stjones1@inf.ed.ac.uk        bool valid;
618787Sgblack@eecs.umich.edu        Addr vaddr;
628787Sgblack@eecs.umich.edu        TheISA::TlbEntry entry;
638787Sgblack@eecs.umich.edu    };
648787Sgblack@eecs.umich.edu
658787Sgblack@eecs.umich.edu    struct cacheElement pTableCache[3];
668787Sgblack@eecs.umich.edu
678787Sgblack@eecs.umich.edu    const Addr pageSize;
687693SAli.Saidi@ARM.com    const Addr offsetMask;
697693SAli.Saidi@ARM.com
707693SAli.Saidi@ARM.com    const uint64_t pid;
717693SAli.Saidi@ARM.com    const std::string _name;
727693SAli.Saidi@ARM.com
737693SAli.Saidi@ARM.com  public:
748791Sgblack@eecs.umich.edu
758791Sgblack@eecs.umich.edu    PageTableBase(const std::string &__name, uint64_t _pid,
768791Sgblack@eecs.umich.edu              Addr _pageSize = TheISA::PageBytes)
778791Sgblack@eecs.umich.edu            : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
788791Sgblack@eecs.umich.edu              pid(_pid), _name(__name)
798791Sgblack@eecs.umich.edu    {
807693SAli.Saidi@ARM.com        assert(isPowerOf2(pageSize));
817811Ssteve.reinhardt@amd.com        pTableCache[0].valid = false;
82        pTableCache[1].valid = false;
83        pTableCache[2].valid = false;
84    }
85
86    virtual ~PageTableBase() {};
87
88    /* generic page table mapping flags
89     *              unset | set
90     * bit 0 - no-clobber | clobber
91     * bit 1 - present    | not-present
92     * bit 2 - cacheable  | uncacheable
93     * bit 3 - read-write | read-only
94     */
95    enum MappingFlags : uint32_t {
96        Clobber     = 1,
97        NotPresent  = 2,
98        Uncacheable = 4,
99        ReadOnly    = 8,
100    };
101
102    virtual void initState(ThreadContext* tc) = 0;
103
104    // for DPRINTF compatibility
105    const std::string name() const { return _name; }
106
107    Addr pageAlign(Addr a)  { return (a & ~offsetMask); }
108    Addr pageOffset(Addr a) { return (a &  offsetMask); }
109
110    /**
111     * Maps a virtual memory region to a physical memory region.
112     * @param vaddr The starting virtual address of the region.
113     * @param paddr The starting physical address where the region is mapped.
114     * @param size The length of the region.
115     * @param flags Generic mapping flags that can be set by or-ing values
116     *              from MappingFlags enum.
117     */
118    virtual void map(Addr vaddr, Addr paddr, int64_t size,
119                     uint64_t flags = 0) = 0;
120    virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
121    virtual void unmap(Addr vaddr, int64_t size) = 0;
122
123    /**
124     * Check if any pages in a region are already allocated
125     * @param vaddr The starting virtual address of the region.
126     * @param size The length of the region.
127     * @return True if no pages in the region are mapped.
128     */
129    virtual bool isUnmapped(Addr vaddr, int64_t size) = 0;
130
131    /**
132     * Lookup function
133     * @param vaddr The virtual address.
134     * @return entry The page table entry corresponding to vaddr.
135     */
136    virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0;
137
138    /**
139     * Translate function
140     * @param vaddr The virtual address.
141     * @param paddr Physical address from translation.
142     * @return True if translation exists
143     */
144    bool translate(Addr vaddr, Addr &paddr);
145
146    /**
147     * Simplified translate function (just check for translation)
148     * @param vaddr The virtual address.
149     * @return True if translation exists
150     */
151    bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); }
152
153    /**
154     * Perform a translation on the memory request, fills in paddr
155     * field of req.
156     * @param req The memory request.
157     */
158    Fault translate(RequestPtr req);
159
160    /**
161     * Update the page table cache.
162     * @param vaddr virtual address (page aligned) to check
163     * @param pte page table entry to return
164     */
165    inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
166    {
167        pTableCache[2].entry = pTableCache[1].entry;
168        pTableCache[2].vaddr = pTableCache[1].vaddr;
169        pTableCache[2].valid = pTableCache[1].valid;
170
171        pTableCache[1].entry = pTableCache[0].entry;
172        pTableCache[1].vaddr = pTableCache[0].vaddr;
173        pTableCache[1].valid = pTableCache[0].valid;
174
175        pTableCache[0].entry = entry;
176        pTableCache[0].vaddr = vaddr;
177        pTableCache[0].valid = true;
178    }
179
180    /**
181     * Erase an entry from the page table cache.
182     * @param vaddr virtual address (page aligned) to check
183     */
184    inline void eraseCacheEntry(Addr vaddr)
185    {
186        // Invalidate cached entries if necessary
187        if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
188            pTableCache[0].valid = false;
189        } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
190            pTableCache[1].valid = false;
191        } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
192            pTableCache[2].valid = false;
193        }
194    }
195
196    virtual void serialize(std::ostream &os) = 0;
197
198    virtual void unserialize(Checkpoint *cp, const std::string &section) = 0;
199};
200
201/**
202 * Declaration of functional page table.
203 */
204class FuncPageTable : public PageTableBase
205{
206  private:
207    typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable;
208    typedef PTable::iterator PTableItr;
209    PTable pTable;
210
211  public:
212
213    FuncPageTable(const std::string &__name, uint64_t _pid,
214                  Addr _pageSize = TheISA::PageBytes);
215
216    ~FuncPageTable();
217
218    void initState(ThreadContext* tc)
219    {
220    }
221
222    void map(Addr vaddr, Addr paddr, int64_t size,
223             uint64_t flags = 0);
224    void remap(Addr vaddr, int64_t size, Addr new_vaddr);
225    void unmap(Addr vaddr, int64_t size);
226
227    /**
228     * Check if any pages in a region are already allocated
229     * @param vaddr The starting virtual address of the region.
230     * @param size The length of the region.
231     * @return True if no pages in the region are mapped.
232     */
233    bool isUnmapped(Addr vaddr, int64_t size);
234
235    /**
236     * Lookup function
237     * @param vaddr The virtual address.
238     * @return entry The page table entry corresponding to vaddr.
239     */
240    bool lookup(Addr vaddr, TheISA::TlbEntry &entry);
241
242    void serialize(std::ostream &os);
243
244    void unserialize(Checkpoint *cp, const std::string &section);
245};
246
247/**
248 * Faux page table class indended to stop the usage of
249 * an architectural page table, when there is none defined
250 * for a particular ISA.
251 */
252class NoArchPageTable : public FuncPageTable
253{
254  public:
255    NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys,
256              Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid)
257    {
258        fatal("No architectural page table defined for this ISA.\n");
259    }
260};
261
262#endif // __MEM_PAGE_TABLE_HH__
263