page_table.hh revision 12442:e003b72b46ac
1/* 2 * Copyright (c) 2014 Advanced Micro Devices, Inc. 3 * Copyright (c) 2003 The Regents of The University of Michigan 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer; 10 * redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution; 13 * neither the name of the copyright holders nor the names of its 14 * contributors may be used to endorse or promote products derived from 15 * this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * Authors: Steve Reinhardt 30 */ 31 32/** 33 * @file 34 * Declarations of a non-full system Page Table. 35 */ 36 37#ifndef __MEM_PAGE_TABLE_HH__ 38#define __MEM_PAGE_TABLE_HH__ 39 40#include <string> 41#include <unordered_map> 42 43#include "arch/isa_traits.hh" 44#include "arch/tlb.hh" 45#include "base/intmath.hh" 46#include "base/types.hh" 47#include "config/the_isa.hh" 48#include "mem/request.hh" 49#include "sim/serialize.hh" 50 51class ThreadContext; 52class System; 53 54/** 55 * Declaration of base class for page table 56 */ 57class PageTableBase : public Serializable 58{ 59 protected: 60 struct cacheElement { 61 Addr vaddr; 62 TheISA::TlbEntry *entry; 63 }; 64 65 struct cacheElement pTableCache[3]; 66 67 const Addr pageSize; 68 const Addr offsetMask; 69 70 const uint64_t pid; 71 const std::string _name; 72 73 public: 74 75 PageTableBase(const std::string &__name, uint64_t _pid, Addr _pageSize) 76 : pageSize(_pageSize), offsetMask(mask(floorLog2(_pageSize))), 77 pid(_pid), _name(__name) 78 { 79 assert(isPowerOf2(pageSize)); 80 pTableCache[0].entry = nullptr; 81 pTableCache[1].entry = nullptr; 82 pTableCache[2].entry = nullptr; 83 } 84 85 virtual ~PageTableBase() {}; 86 87 /* generic page table mapping flags 88 * unset | set 89 * bit 0 - no-clobber | clobber 90 * bit 1 - present | not-present 91 * bit 2 - cacheable | uncacheable 92 * bit 3 - read-write | read-only 93 */ 94 enum MappingFlags : uint32_t { 95 Zero = 0, 96 Clobber = 1, 97 NotPresent = 2, 98 Uncacheable = 4, 99 ReadOnly = 8, 100 }; 101 102 virtual void initState(ThreadContext* tc) = 0; 103 104 // for DPRINTF compatibility 105 const std::string name() const { return _name; } 106 107 Addr pageAlign(Addr a) { return (a & ~offsetMask); } 108 Addr pageOffset(Addr a) { return (a & offsetMask); } 109 110 /** 111 * Maps a virtual memory region to a physical memory region. 112 * @param vaddr The starting virtual address of the region. 113 * @param paddr The starting physical address where the region is mapped. 114 * @param size The length of the region. 115 * @param flags Generic mapping flags that can be set by or-ing values 116 * from MappingFlags enum. 117 */ 118 virtual void map(Addr vaddr, Addr paddr, int64_t size, 119 uint64_t flags = 0) = 0; 120 virtual void remap(Addr vaddr, int64_t size, Addr new_vaddr) = 0; 121 virtual void unmap(Addr vaddr, int64_t size) = 0; 122 123 /** 124 * Check if any pages in a region are already allocated 125 * @param vaddr The starting virtual address of the region. 126 * @param size The length of the region. 127 * @return True if no pages in the region are mapped. 128 */ 129 virtual bool isUnmapped(Addr vaddr, int64_t size) = 0; 130 131 /** 132 * Lookup function 133 * @param vaddr The virtual address. 134 * @return entry The page table entry corresponding to vaddr. 135 */ 136 virtual bool lookup(Addr vaddr, TheISA::TlbEntry &entry) = 0; 137 138 /** 139 * Translate function 140 * @param vaddr The virtual address. 141 * @param paddr Physical address from translation. 142 * @return True if translation exists 143 */ 144 bool translate(Addr vaddr, Addr &paddr); 145 146 /** 147 * Simplified translate function (just check for translation) 148 * @param vaddr The virtual address. 149 * @return True if translation exists 150 */ 151 bool translate(Addr vaddr) { Addr dummy; return translate(vaddr, dummy); } 152 153 /** 154 * Perform a translation on the memory request, fills in paddr 155 * field of req. 156 * @param req The memory request. 157 */ 158 Fault translate(RequestPtr req); 159 160 /** 161 * Update the page table cache. 162 * @param vaddr virtual address (page aligned) to check 163 * @param pte page table entry to return 164 * @return A pointer to any entry which is displaced from the cache. 165 */ 166 TheISA::TlbEntry * 167 updateCache(Addr vaddr, TheISA::TlbEntry *entry) 168 { 169 TheISA::TlbEntry *evicted = pTableCache[2].entry; 170 171 pTableCache[2].entry = pTableCache[1].entry; 172 pTableCache[2].vaddr = pTableCache[1].vaddr; 173 174 pTableCache[1].entry = pTableCache[0].entry; 175 pTableCache[1].vaddr = pTableCache[0].vaddr; 176 177 pTableCache[0].entry = entry; 178 pTableCache[0].vaddr = vaddr; 179 180 return evicted; 181 } 182 183 /** 184 * Erase an entry from the page table cache. 185 * @param vaddr virtual address (page aligned) to check 186 * @return A pointer to the entry (if any) which is kicked out. 187 */ 188 TheISA::TlbEntry * 189 eraseCacheEntry(Addr vaddr) 190 { 191 TheISA::TlbEntry *evicted = nullptr; 192 // Invalidate cached entries if necessary 193 if (pTableCache[0].entry && pTableCache[0].vaddr == vaddr) { 194 evicted = pTableCache[0].entry; 195 pTableCache[0].entry = nullptr; 196 } else if (pTableCache[1].entry && pTableCache[1].vaddr == vaddr) { 197 evicted = pTableCache[1].entry; 198 pTableCache[1].entry = nullptr; 199 } else if (pTableCache[2].entry && pTableCache[2].vaddr == vaddr) { 200 evicted = pTableCache[2].entry; 201 pTableCache[2].entry = nullptr; 202 } 203 return evicted; 204 } 205 206 virtual void getMappings(std::vector<std::pair<Addr, Addr>> 207 *addr_mappings) {}; 208}; 209 210/** 211 * Declaration of functional page table. 212 */ 213class FuncPageTable : public PageTableBase 214{ 215 private: 216 typedef std::unordered_map<Addr, TheISA::TlbEntry *> PTable; 217 typedef PTable::iterator PTableItr; 218 PTable pTable; 219 220 public: 221 222 FuncPageTable(const std::string &__name, uint64_t _pid, Addr _pageSize); 223 224 ~FuncPageTable(); 225 226 void initState(ThreadContext* tc) override 227 { 228 } 229 230 void map(Addr vaddr, Addr paddr, int64_t size, 231 uint64_t flags = 0) override; 232 void remap(Addr vaddr, int64_t size, Addr new_vaddr) override; 233 void unmap(Addr vaddr, int64_t size) override; 234 235 /** 236 * Check if any pages in a region are already allocated 237 * @param vaddr The starting virtual address of the region. 238 * @param size The length of the region. 239 * @return True if no pages in the region are mapped. 240 */ 241 bool isUnmapped(Addr vaddr, int64_t size) override; 242 243 /** 244 * Lookup function 245 * @param vaddr The virtual address. 246 * @return entry The page table entry corresponding to vaddr. 247 */ 248 bool lookup(Addr vaddr, TheISA::TlbEntry &entry) override; 249 250 void serialize(CheckpointOut &cp) const override; 251 void unserialize(CheckpointIn &cp) override; 252 253 void getMappings(std::vector<std::pair<Addr, Addr>> *addr_maps) override; 254}; 255 256#endif // __MEM_PAGE_TABLE_HH__ 257