page_table.hh (10905:a6ca6831e775) page_table.hh (11168:f98eb2da15a4)
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 */
31
32/**
33 * @file
34 * Declarations of a non-full system Page Table.
35 */
36
37#ifndef __MEM_PAGE_TABLE_HH__
38#define __MEM_PAGE_TABLE_HH__
39
40#include <string>
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 */
31
32/**
33 * @file
34 * Declarations of a non-full system Page Table.
35 */
36
37#ifndef __MEM_PAGE_TABLE_HH__
38#define __MEM_PAGE_TABLE_HH__
39
40#include <string>
41#include <unordered_map>
41
42#include "arch/isa_traits.hh"
43#include "arch/tlb.hh"
42
43#include "arch/isa_traits.hh"
44#include "arch/tlb.hh"
44#include "base/hashmap.hh"
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "mem/request.hh"
48#include "sim/serialize.hh"
49#include "sim/system.hh"
50
51class ThreadContext;
52
53/**
54 * Declaration of base class for page table
55 */
56class PageTableBase : public Serializable
57{
58 protected:
59 struct cacheElement {
60 bool valid;
61 Addr vaddr;
62 TheISA::TlbEntry entry;
63 };
64
65 struct cacheElement pTableCache[3];
66
67 const Addr pageSize;
68 const Addr offsetMask;
69
70 const uint64_t pid;
71 const std::string _name;
72
73 public:
74
75 PageTableBase(const std::string &__name, uint64_t _pid,
76 Addr _pageSize = TheISA::PageBytes)
77 : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
78 pid(_pid), _name(__name)
79 {
80 assert(isPowerOf2(pageSize));
81 pTableCache[0].valid = false;
82 pTableCache[1].valid = false;
83 pTableCache[2].valid = false;
84 }
85
86 virtual ~PageTableBase() {};
87
88 /* generic page table mapping flags
89 * unset | set
90 * bit 0 - no-clobber | clobber
91 * bit 1 - present | not-present
92 * bit 2 - cacheable | uncacheable
93 * bit 3 - read-write | read-only
94 */
95 enum MappingFlags : uint32_t {
96 Clobber = 1,
97 NotPresent = 2,
98 Uncacheable = 4,
99 ReadOnly = 8,
100 };
101
102 virtual void initState(ThreadContext* tc) = 0;
103
104 // for DPRINTF compatibility
105 const std::string name() const { return _name; }
106
107 Addr pageAlign(Addr a) { return (a & ~offsetMask); }
108 Addr pageOffset(Addr a) { return (a & offsetMask); }
109
110 /**
111 * Maps a virtual memory region to a physical memory region.
112 * @param vaddr The starting virtual address of the region.
113 * @param paddr The starting physical address where the region is mapped.
114 * @param size The length of the region.
115 * @param flags Generic mapping flags that can be set by or-ing values
116 * from MappingFlags enum.
117 */
118 virtual void map(Addr vaddr, Addr paddr, int64_t size,
119 uint64_t flags = 0) = 0;
120 virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
121 virtual void unmap(Addr vaddr, int64_t size) = 0;
122
123 /**
124 * Check if any pages in a region are already allocated
125 * @param vaddr The starting virtual address of the region.
126 * @param size The length of the region.
127 * @return True if no pages in the region are mapped.
128 */
129 virtual bool isUnmapped(Addr vaddr, int64_t size) = 0;
130
131 /**
132 * Lookup function
133 * @param vaddr The virtual address.
134 * @return entry The page table entry corresponding to vaddr.
135 */
136 virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0;
137
138 /**
139 * Translate function
140 * @param vaddr The virtual address.
141 * @param paddr Physical address from translation.
142 * @return True if translation exists
143 */
144 bool translate(Addr vaddr, Addr &paddr);
145
146 /**
147 * Simplified translate function (just check for translation)
148 * @param vaddr The virtual address.
149 * @return True if translation exists
150 */
151 bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); }
152
153 /**
154 * Perform a translation on the memory request, fills in paddr
155 * field of req.
156 * @param req The memory request.
157 */
158 Fault translate(RequestPtr req);
159
160 /**
161 * Update the page table cache.
162 * @param vaddr virtual address (page aligned) to check
163 * @param pte page table entry to return
164 */
165 inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
166 {
167 pTableCache[2].entry = pTableCache[1].entry;
168 pTableCache[2].vaddr = pTableCache[1].vaddr;
169 pTableCache[2].valid = pTableCache[1].valid;
170
171 pTableCache[1].entry = pTableCache[0].entry;
172 pTableCache[1].vaddr = pTableCache[0].vaddr;
173 pTableCache[1].valid = pTableCache[0].valid;
174
175 pTableCache[0].entry = entry;
176 pTableCache[0].vaddr = vaddr;
177 pTableCache[0].valid = true;
178 }
179
180 /**
181 * Erase an entry from the page table cache.
182 * @param vaddr virtual address (page aligned) to check
183 */
184 inline void eraseCacheEntry(Addr vaddr)
185 {
186 // Invalidate cached entries if necessary
187 if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
188 pTableCache[0].valid = false;
189 } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
190 pTableCache[1].valid = false;
191 } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
192 pTableCache[2].valid = false;
193 }
194 }
195};
196
197/**
198 * Declaration of functional page table.
199 */
200class FuncPageTable : public PageTableBase
201{
202 private:
45#include "base/types.hh"
46#include "config/the_isa.hh"
47#include "mem/request.hh"
48#include "sim/serialize.hh"
49#include "sim/system.hh"
50
51class ThreadContext;
52
53/**
54 * Declaration of base class for page table
55 */
56class PageTableBase : public Serializable
57{
58 protected:
59 struct cacheElement {
60 bool valid;
61 Addr vaddr;
62 TheISA::TlbEntry entry;
63 };
64
65 struct cacheElement pTableCache[3];
66
67 const Addr pageSize;
68 const Addr offsetMask;
69
70 const uint64_t pid;
71 const std::string _name;
72
73 public:
74
75 PageTableBase(const std::string &__name, uint64_t _pid,
76 Addr _pageSize = TheISA::PageBytes)
77 : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))),
78 pid(_pid), _name(__name)
79 {
80 assert(isPowerOf2(pageSize));
81 pTableCache[0].valid = false;
82 pTableCache[1].valid = false;
83 pTableCache[2].valid = false;
84 }
85
86 virtual ~PageTableBase() {};
87
88 /* generic page table mapping flags
89 * unset | set
90 * bit 0 - no-clobber | clobber
91 * bit 1 - present | not-present
92 * bit 2 - cacheable | uncacheable
93 * bit 3 - read-write | read-only
94 */
95 enum MappingFlags : uint32_t {
96 Clobber = 1,
97 NotPresent = 2,
98 Uncacheable = 4,
99 ReadOnly = 8,
100 };
101
102 virtual void initState(ThreadContext* tc) = 0;
103
104 // for DPRINTF compatibility
105 const std::string name() const { return _name; }
106
107 Addr pageAlign(Addr a) { return (a & ~offsetMask); }
108 Addr pageOffset(Addr a) { return (a & offsetMask); }
109
110 /**
111 * Maps a virtual memory region to a physical memory region.
112 * @param vaddr The starting virtual address of the region.
113 * @param paddr The starting physical address where the region is mapped.
114 * @param size The length of the region.
115 * @param flags Generic mapping flags that can be set by or-ing values
116 * from MappingFlags enum.
117 */
118 virtual void map(Addr vaddr, Addr paddr, int64_t size,
119 uint64_t flags = 0) = 0;
120 virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0;
121 virtual void unmap(Addr vaddr, int64_t size) = 0;
122
123 /**
124 * Check if any pages in a region are already allocated
125 * @param vaddr The starting virtual address of the region.
126 * @param size The length of the region.
127 * @return True if no pages in the region are mapped.
128 */
129 virtual bool isUnmapped(Addr vaddr, int64_t size) = 0;
130
131 /**
132 * Lookup function
133 * @param vaddr The virtual address.
134 * @return entry The page table entry corresponding to vaddr.
135 */
136 virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0;
137
138 /**
139 * Translate function
140 * @param vaddr The virtual address.
141 * @param paddr Physical address from translation.
142 * @return True if translation exists
143 */
144 bool translate(Addr vaddr, Addr &paddr);
145
146 /**
147 * Simplified translate function (just check for translation)
148 * @param vaddr The virtual address.
149 * @return True if translation exists
150 */
151 bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); }
152
153 /**
154 * Perform a translation on the memory request, fills in paddr
155 * field of req.
156 * @param req The memory request.
157 */
158 Fault translate(RequestPtr req);
159
160 /**
161 * Update the page table cache.
162 * @param vaddr virtual address (page aligned) to check
163 * @param pte page table entry to return
164 */
165 inline void updateCache(Addr vaddr, TheISA::TlbEntry entry)
166 {
167 pTableCache[2].entry = pTableCache[1].entry;
168 pTableCache[2].vaddr = pTableCache[1].vaddr;
169 pTableCache[2].valid = pTableCache[1].valid;
170
171 pTableCache[1].entry = pTableCache[0].entry;
172 pTableCache[1].vaddr = pTableCache[0].vaddr;
173 pTableCache[1].valid = pTableCache[0].valid;
174
175 pTableCache[0].entry = entry;
176 pTableCache[0].vaddr = vaddr;
177 pTableCache[0].valid = true;
178 }
179
180 /**
181 * Erase an entry from the page table cache.
182 * @param vaddr virtual address (page aligned) to check
183 */
184 inline void eraseCacheEntry(Addr vaddr)
185 {
186 // Invalidate cached entries if necessary
187 if (pTableCache[0].valid && pTableCache[0].vaddr == vaddr) {
188 pTableCache[0].valid = false;
189 } else if (pTableCache[1].valid && pTableCache[1].vaddr == vaddr) {
190 pTableCache[1].valid = false;
191 } else if (pTableCache[2].valid && pTableCache[2].vaddr == vaddr) {
192 pTableCache[2].valid = false;
193 }
194 }
195};
196
197/**
198 * Declaration of functional page table.
199 */
200class FuncPageTable : public PageTableBase
201{
202 private:
203 typedef m5::hash_map<Addr, TheISA::TlbEntry> PTable;
203 typedef std::unordered_map<Addr, TheISA::TlbEntry> PTable;
204 typedef PTable::iterator PTableItr;
205 PTable pTable;
206
207 public:
208
209 FuncPageTable(const std::string &__name, uint64_t _pid,
210 Addr _pageSize = TheISA::PageBytes);
211
212 ~FuncPageTable();
213
214 void initState(ThreadContext* tc)
215 {
216 }
217
218 void map(Addr vaddr, Addr paddr, int64_t size,
219 uint64_t flags = 0);
220 void remap(Addr vaddr, int64_t size, Addr new_vaddr);
221 void unmap(Addr vaddr, int64_t size);
222
223 /**
224 * Check if any pages in a region are already allocated
225 * @param vaddr The starting virtual address of the region.
226 * @param size The length of the region.
227 * @return True if no pages in the region are mapped.
228 */
229 bool isUnmapped(Addr vaddr, int64_t size);
230
231 /**
232 * Lookup function
233 * @param vaddr The virtual address.
234 * @return entry The page table entry corresponding to vaddr.
235 */
236 bool lookup(Addr vaddr, TheISA::TlbEntry &entry);
237
204 typedef PTable::iterator PTableItr;
205 PTable pTable;
206
207 public:
208
209 FuncPageTable(const std::string &__name, uint64_t _pid,
210 Addr _pageSize = TheISA::PageBytes);
211
212 ~FuncPageTable();
213
214 void initState(ThreadContext* tc)
215 {
216 }
217
218 void map(Addr vaddr, Addr paddr, int64_t size,
219 uint64_t flags = 0);
220 void remap(Addr vaddr, int64_t size, Addr new_vaddr);
221 void unmap(Addr vaddr, int64_t size);
222
223 /**
224 * Check if any pages in a region are already allocated
225 * @param vaddr The starting virtual address of the region.
226 * @param size The length of the region.
227 * @return True if no pages in the region are mapped.
228 */
229 bool isUnmapped(Addr vaddr, int64_t size);
230
231 /**
232 * Lookup function
233 * @param vaddr The virtual address.
234 * @return entry The page table entry corresponding to vaddr.
235 */
236 bool lookup(Addr vaddr, TheISA::TlbEntry &entry);
237
238 void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE;
239 void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE;
238 void serialize(CheckpointOut &cp) const override;
239 void unserialize(CheckpointIn &cp) override;
240};
241
242/**
243 * Faux page table class indended to stop the usage of
244 * an architectural page table, when there is none defined
245 * for a particular ISA.
246 */
247class NoArchPageTable : public FuncPageTable
248{
249 public:
250 NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys,
251 Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid)
252 {
253 fatal("No architectural page table defined for this ISA.\n");
254 }
255};
256
257#endif // __MEM_PAGE_TABLE_HH__
240};
241
242/**
243 * Faux page table class indended to stop the usage of
244 * an architectural page table, when there is none defined
245 * for a particular ISA.
246 */
247class NoArchPageTable : public FuncPageTable
248{
249 public:
250 NoArchPageTable(const std::string &__name, uint64_t _pid, System *_sys,
251 Addr _pageSize = TheISA::PageBytes) : FuncPageTable(__name, _pid)
252 {
253 fatal("No architectural page table defined for this ISA.\n");
254 }
255};
256
257#endif // __MEM_PAGE_TABLE_HH__