page_table.hh (10556:1e3b3c7a0cba) page_table.hh (10558:426665ec11a9)
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 */
31
32/**
33 * @file
34 * Declarations of a non-full system Page Table.
35 */
36
37#ifndef __MEM_PAGE_TABLE_HH__
38#define __MEM_PAGE_TABLE_HH__
39
40#include <string>
41
42#include "arch/isa_traits.hh"
43#include "arch/tlb.hh"
44#include "base/hashmap.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "mem/request.hh"
48#include "sim/serialize.hh"
49#include "sim/system.hh"
50
51class ThreadContext;
52
53/**
54 * Declaration of base class for page table
55 */
56class PageTableBase
57{
58 protected:
59 struct cacheElement {
60 bool valid;
61 Addr vaddr;
62 TheISA::TlbEntry entry;
63 };
64
65 struct cacheElement pTableCache[3];
66
67 const Addr pageSize;
68 const Addr offsetMask;
69
70 const uint64_t pid;
71 const std::string _name;
72
73 public:
74
75 PageTableBase(const std::string &__name, uint64_t _pid,
76 Addr _pageSize = TheISA::PageBytes)
77 : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
78 pid(_pid), _name(__name)
79 {
80 assert(isPowerOf2(pageSize));
81 pTableCache[0].valid = false;
82 pTableCache[1].valid = false;
83 pTableCache[2].valid = false;
84 }
85
86 virtual ~PageTableBase() {};
87
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 */
31
32/**
33 * @file
34 * Declarations of a non-full system Page Table.
35 */
36
37#ifndef __MEM_PAGE_TABLE_HH__
38#define __MEM_PAGE_TABLE_HH__
39
40#include <string>
41
42#include "arch/isa_traits.hh"
43#include "arch/tlb.hh"
44#include "base/hashmap.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "mem/request.hh"
48#include "sim/serialize.hh"
49#include "sim/system.hh"
50
51class ThreadContext;
52
53/**
54 * Declaration of base class for page table
55 */
56class PageTableBase
57{
58 protected:
59 struct cacheElement {
60 bool valid;
61 Addr vaddr;
62 TheISA::TlbEntry entry;
63 };
64
65 struct cacheElement pTableCache[3];
66
67 const Addr pageSize;
68 const Addr offsetMask;
69
70 const uint64_t pid;
71 const std::string _name;
72
73 public:
74
75 PageTableBase(const std::string &__name, uint64_t _pid,
76 Addr _pageSize = TheISA::PageBytes)
77 : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
78 pid(_pid), _name(__name)
79 {
80 assert(isPowerOf2(pageSize));
81 pTableCache[0].valid = false;
82 pTableCache[1].valid = false;
83 pTableCache[2].valid = false;
84 }
85
86 virtual ~PageTableBase() {};
87
88 /* generic page table mapping flags
89 * unset | set
90 * bit 0 - no-clobber | clobber
91 * bit 1 - present | not-present
92 * bit 2 - cacheable | uncacheable
93 * bit 3 - read-write | read-only
94 */
95 enum MappingFlags : uint32_t {
96 Clobber = 1,
97 NotPresent = 2,
98 Uncacheable = 4,
99 ReadOnly = 8,
100 };
101
88 virtual void initState(ThreadContext* tc) = 0;
89
90 // for DPRINTF compatibility
91 const std::string name() const { return _name; }
92
93 Addr pageAlign(Addr a) { return (a & ~offsetMask); }
94 Addr pageOffset(Addr a) { return (a & offsetMask); }
95
102 virtual void initState(ThreadContext* tc) = 0;
103
104 // for DPRINTF compatibility
105 const std::string name() const { return _name; }
106
107 Addr pageAlign(Addr a) { return (a & ~offsetMask); }
108 Addr pageOffset(Addr a) { return (a & offsetMask); }
109
110 /**
111 * Maps a virtual memory region to a physical memory region.
112 * @param vaddr The starting virtual address of the region.
113 * @param paddr The starting physical address where the region is mapped.
114 * @param size The length of the region.
115 * @param flags Generic mapping flags that can be set by or-ing values
116 * from MappingFlags enum.
117 */
96 virtual void map(Addr vaddr, Addr paddr, int64_t size,
118 virtual void map(Addr vaddr, Addr paddr, int64_t size,
97 bool clobber = false) = 0;
119 uint64_t flags = 0) = 0;
98 virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
99 virtual void unmap(Addr vaddr, int64_t size) = 0;
100
101 /**
102 * Check if any pages in a region are already allocated
103 * @param vaddr The starting virtual address of the region.
104 * @param size The length of the region.
105 * @return True if no pages in the region are mapped.
106 */
107 virtual bool isUnmapped(Addr vaddr, int64_t size) = 0;
108
109 /**
110 * Lookup function
111 * @param vaddr The virtual address.
112 * @return entry The page table entry corresponding to vaddr.
113 */
114 virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0;
115
116 /**
117 * Translate function
118 * @param vaddr The virtual address.
119 * @param paddr Physical address from translation.
120 * @return True if translation exists
121 */
122 bool translate(Addr vaddr, Addr &paddr);
123
124 /**
125 * Simplified translate function (just check for translation)
126 * @param vaddr The virtual address.
127 * @return True if translation exists
128 */
129 bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); }
130
131 /**
132 * Perform a translation on the memory request, fills in paddr
133 * field of req.
134 * @param req The memory request.
135 */
136 Fault translate(RequestPtr req);
137
138 /**
139 * Update the page table cache.
140 * @param vaddr virtual address (page aligned) to check
141 * @param pte page table entry to return
142 */
143 inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
144 {
145 pTableCache[2].entry = pTableCache[1].entry;
146 pTableCache[2].vaddr = pTableCache[1].vaddr;
147 pTableCache[2].valid = pTableCache[1].valid;
148
149 pTableCache[1].entry = pTableCache[0].entry;
150 pTableCache[1].vaddr = pTableCache[0].vaddr;
151 pTableCache[1].valid = pTableCache[0].valid;
152
153 pTableCache[0].entry = entry;
154 pTableCache[0].vaddr = vaddr;
155 pTableCache[0].valid = true;
156 }
157
158 /**
159 * Erase an entry from the page table cache.
160 * @param vaddr virtual address (page aligned) to check
161 */
162 inline void eraseCacheEntry(Addr vaddr)
163 {
164 // Invalidate cached entries if necessary
165 if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
166 pTableCache[0].valid = false;
167 } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
168 pTableCache[1].valid = false;
169 } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
170 pTableCache[2].valid = false;
171 }
172 }
173
174 virtual void serialize(std::ostream &os) = 0;
175
176 virtual void unserialize(Checkpoint *cp, const std::string &section) = 0;
177};
178
179/**
180 * Declaration of functional page table.
181 */
182class FuncPageTable : public PageTableBase
183{
184 private:
185 typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable;
186 typedef PTable::iterator PTableItr;
187 PTable pTable;
188
189 public:
190
191 FuncPageTable(const std::string &__name, uint64_t _pid,
192 Addr _pageSize = TheISA::PageBytes);
193
194 ~FuncPageTable();
195
196 void initState(ThreadContext* tc)
197 {
198 }
199
120 virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
121 virtual void unmap(Addr vaddr, int64_t size) = 0;
122
123 /**
124 * Check if any pages in a region are already allocated
125 * @param vaddr The starting virtual address of the region.
126 * @param size The length of the region.
127 * @return True if no pages in the region are mapped.
128 */
129 virtual bool isUnmapped(Addr vaddr, int64_t size) = 0;
130
131 /**
132 * Lookup function
133 * @param vaddr The virtual address.
134 * @return entry The page table entry corresponding to vaddr.
135 */
136 virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0;
137
138 /**
139 * Translate function
140 * @param vaddr The virtual address.
141 * @param paddr Physical address from translation.
142 * @return True if translation exists
143 */
144 bool translate(Addr vaddr, Addr &paddr);
145
146 /**
147 * Simplified translate function (just check for translation)
148 * @param vaddr The virtual address.
149 * @return True if translation exists
150 */
151 bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); }
152
153 /**
154 * Perform a translation on the memory request, fills in paddr
155 * field of req.
156 * @param req The memory request.
157 */
158 Fault translate(RequestPtr req);
159
160 /**
161 * Update the page table cache.
162 * @param vaddr virtual address (page aligned) to check
163 * @param pte page table entry to return
164 */
165 inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
166 {
167 pTableCache[2].entry = pTableCache[1].entry;
168 pTableCache[2].vaddr = pTableCache[1].vaddr;
169 pTableCache[2].valid = pTableCache[1].valid;
170
171 pTableCache[1].entry = pTableCache[0].entry;
172 pTableCache[1].vaddr = pTableCache[0].vaddr;
173 pTableCache[1].valid = pTableCache[0].valid;
174
175 pTableCache[0].entry = entry;
176 pTableCache[0].vaddr = vaddr;
177 pTableCache[0].valid = true;
178 }
179
180 /**
181 * Erase an entry from the page table cache.
182 * @param vaddr virtual address (page aligned) to check
183 */
184 inline void eraseCacheEntry(Addr vaddr)
185 {
186 // Invalidate cached entries if necessary
187 if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
188 pTableCache[0].valid = false;
189 } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
190 pTableCache[1].valid = false;
191 } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
192 pTableCache[2].valid = false;
193 }
194 }
195
196 virtual void serialize(std::ostream &os) = 0;
197
198 virtual void unserialize(Checkpoint *cp, const std::string &section) = 0;
199};
200
201/**
202 * Declaration of functional page table.
203 */
204class FuncPageTable : public PageTableBase
205{
206 private:
207 typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable;
208 typedef PTable::iterator PTableItr;
209 PTable pTable;
210
211 public:
212
213 FuncPageTable(const std::string &__name, uint64_t _pid,
214 Addr _pageSize = TheISA::PageBytes);
215
216 ~FuncPageTable();
217
218 void initState(ThreadContext* tc)
219 {
220 }
221
200 void map(Addr vaddr, Addr paddr, int64_t size, bool clobber = false);
222 void map(Addr vaddr, Addr paddr, int64_t size,
223 uint64_t flags = 0);
201 void remap(Addr vaddr, int64_t size, Addr new_vaddr);
202 void unmap(Addr vaddr, int64_t size);
203
204 /**
205 * Check if any pages in a region are already allocated
206 * @param vaddr The starting virtual address of the region.
207 * @param size The length of the region.
208 * @return True if no pages in the region are mapped.
209 */
210 bool isUnmapped(Addr vaddr, int64_t size);
211
212 /**
213 * Lookup function
214 * @param vaddr The virtual address.
215 * @return entry The page table entry corresponding to vaddr.
216 */
217 bool lookup(Addr vaddr, TheISA::TlbEntry &entry);
218
219 void serialize(std::ostream &os);
220
221 void unserialize(Checkpoint *cp, const std::string &section);
222};
223
224/**
225 * Faux page table class indended to stop the usage of
226 * an architectural page table, when there is none defined
227 * for a particular ISA.
228 */
229class NoArchPageTable : public FuncPageTable
230{
231 public:
232 NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys,
233 Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid)
234 {
235 fatal("No architectural page table defined for this ISA.\n");
236 }
237};
238
239#endif // __MEM_PAGE_TABLE_HH__
224 void remap(Addr vaddr, int64_t size, Addr new_vaddr);
225 void unmap(Addr vaddr, int64_t size);
226
227 /**
228 * Check if any pages in a region are already allocated
229 * @param vaddr The starting virtual address of the region.
230 * @param size The length of the region.
231 * @return True if no pages in the region are mapped.
232 */
233 bool isUnmapped(Addr vaddr, int64_t size);
234
235 /**
236 * Lookup function
237 * @param vaddr The virtual address.
238 * @return entry The page table entry corresponding to vaddr.
239 */
240 bool lookup(Addr vaddr, TheISA::TlbEntry &entry);
241
242 void serialize(std::ostream &os);
243
244 void unserialize(Checkpoint *cp, const std::string &section);
245};
246
247/**
248 * Faux page table class indended to stop the usage of
249 * an architectural page table, when there is none defined
250 * for a particular ISA.
251 */
252class NoArchPageTable : public FuncPageTable
253{
254 public:
255 NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys,
256 Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid)
257 {
258 fatal("No architectural page table defined for this ISA.\n");
259 }
260};
261
262#endif // __MEM_PAGE_TABLE_HH__