page_table.cc revision 11800:54436a1784dc
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Steve Reinhardt
30 *          Ron Dreslinski
31 *          Ali Saidi
32 */
33
34/**
35 * @file
36 * Definitions of functional page table.
37 */
38#include "mem/page_table.hh"
39
40#include <string>
41
42#include "base/trace.hh"
43#include "config/the_isa.hh"
44#include "debug/MMU.hh"
45#include "sim/faults.hh"
46#include "sim/serialize.hh"
47
48using namespace std;
49using namespace TheISA;
50
51FuncPageTable::FuncPageTable(const std::string &__name,
52                             uint64_t _pid, Addr _pageSize)
53        : PageTableBase(__name, _pid, _pageSize)
54{
55}
56
57FuncPageTable::~FuncPageTable()
58{
59}
60
61void
62FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, uint64_t flags)
63{
64    bool clobber = flags & Clobber;
65    // starting address must be page aligned
66    assert(pageOffset(vaddr) == 0);
67
68    DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
69
70    for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
71        if (!clobber && (pTable.find(vaddr) != pTable.end())) {
72            // already mapped
73            fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
74        }
75
76        pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr,
77                                         flags & Uncacheable,
78                                         flags & ReadOnly);
79        eraseCacheEntry(vaddr);
80        updateCache(vaddr, pTable[vaddr]);
81    }
82}
83
84void
85FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
86{
87    assert(pageOffset(vaddr) == 0);
88    assert(pageOffset(new_vaddr) == 0);
89
90    DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
91            new_vaddr, size);
92
93    for (; size > 0;
94         size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
95    {
96        assert(pTable.find(vaddr) != pTable.end());
97
98        pTable[new_vaddr] = pTable[vaddr];
99        pTable.erase(vaddr);
100        eraseCacheEntry(vaddr);
101        pTable[new_vaddr].updateVaddr(new_vaddr);
102        updateCache(new_vaddr, pTable[new_vaddr]);
103    }
104}
105
106void
107FuncPageTable::unmap(Addr vaddr, int64_t size)
108{
109    assert(pageOffset(vaddr) == 0);
110
111    DPRINTF(MMU, "Unmapping page: %#x-%#x\n", vaddr, vaddr+ size);
112
113    for (; size > 0; size -= pageSize, vaddr += pageSize) {
114        assert(pTable.find(vaddr) != pTable.end());
115        pTable.erase(vaddr);
116        eraseCacheEntry(vaddr);
117    }
118
119}
120
121bool
122FuncPageTable::isUnmapped(Addr vaddr, int64_t size)
123{
124    // starting address must be page aligned
125    assert(pageOffset(vaddr) == 0);
126
127    for (; size > 0; size -= pageSize, vaddr += pageSize) {
128        if (pTable.find(vaddr) != pTable.end()) {
129            return false;
130        }
131    }
132
133    return true;
134}
135
136bool
137FuncPageTable::lookup(Addr vaddr, TheISA::TlbEntry &entry)
138{
139    Addr page_addr = pageAlign(vaddr);
140
141    if (pTableCache[0].valid && pTableCache[0].vaddr == page_addr) {
142        entry = pTableCache[0].entry;
143        return true;
144    }
145    if (pTableCache[1].valid && pTableCache[1].vaddr == page_addr) {
146        entry = pTableCache[1].entry;
147        return true;
148    }
149    if (pTableCache[2].valid && pTableCache[2].vaddr == page_addr) {
150        entry = pTableCache[2].entry;
151        return true;
152    }
153
154    PTableItr iter = pTable.find(page_addr);
155
156    if (iter == pTable.end()) {
157        return false;
158    }
159
160    updateCache(page_addr, iter->second);
161    entry = iter->second;
162    return true;
163}
164
165bool
166PageTableBase::translate(Addr vaddr, Addr &paddr)
167{
168    TheISA::TlbEntry entry;
169    if (!lookup(vaddr, entry)) {
170        DPRINTF(MMU, "Couldn't Translate: %#x\n", vaddr);
171        return false;
172    }
173    paddr = pageOffset(vaddr) + entry.pageStart();
174    DPRINTF(MMU, "Translating: %#x->%#x\n", vaddr, paddr);
175    return true;
176}
177
178Fault
179PageTableBase::translate(RequestPtr req)
180{
181    Addr paddr;
182    assert(pageAlign(req->getVaddr() + req->getSize() - 1)
183           == pageAlign(req->getVaddr()));
184    if (!translate(req->getVaddr(), paddr)) {
185        return Fault(new GenericPageTableFault(req->getVaddr()));
186    }
187    req->setPaddr(paddr);
188    if ((paddr & (pageSize - 1)) + req->getSize() > pageSize) {
189        panic("Request spans page boundaries!\n");
190        return NoFault;
191    }
192    return NoFault;
193}
194
195void
196FuncPageTable::serialize(CheckpointOut &cp) const
197{
198    paramOut(cp, "ptable.size", pTable.size());
199
200    PTable::size_type count = 0;
201    for (auto &pte : pTable) {
202        ScopedCheckpointSection sec(cp, csprintf("Entry%d", count++));
203
204        paramOut(cp, "vaddr", pte.first);
205        pte.second.serialize(cp);
206    }
207    assert(count == pTable.size());
208}
209
210void
211FuncPageTable::unserialize(CheckpointIn &cp)
212{
213    int count;
214    paramIn(cp, "ptable.size", count);
215
216    for (int i = 0; i < count; ++i) {
217        ScopedCheckpointSection sec(cp, csprintf("Entry%d", i));
218
219        std::unique_ptr<TheISA::TlbEntry> entry;
220        Addr vaddr;
221
222        paramIn(cp, "vaddr", vaddr);
223        entry.reset(new TheISA::TlbEntry());
224        entry->unserialize(cp);
225
226        pTable[vaddr] = *entry;
227    }
228}
229
230