page_table.cc (10298:77af86f37337) page_table.cc (10556:1e3b3c7a0cba)
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 37 unchanged lines hidden (view full) ---

46#include "debug/MMU.hh"
47#include "mem/page_table.hh"
48#include "sim/faults.hh"
49#include "sim/sim_object.hh"
50
51using namespace std;
52using namespace TheISA;
53
1/*
2 * Copyright (c) 2014 Advanced Micro Devices, Inc.
3 * Copyright (c) 2003 The Regents of The University of Michigan
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright

--- 37 unchanged lines hidden (view full) ---

46#include "debug/MMU.hh"
47#include "mem/page_table.hh"
48#include "sim/faults.hh"
49#include "sim/sim_object.hh"
50
51using namespace std;
52using namespace TheISA;
53
54FuncPageTable::FuncPageTable(const std::string &__name, uint64_t _pid, Addr _pageSize)
54FuncPageTable::FuncPageTable(const std::string &__name,
55 uint64_t _pid, Addr _pageSize)
55 : PageTableBase(__name, _pid, _pageSize)
56{
57}
58
59FuncPageTable::~FuncPageTable()
60{
61}
62
63void
64FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber)
65{
66 // starting address must be page aligned
67 assert(pageOffset(vaddr) == 0);
68
69 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
70
71 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
72 if (!clobber && (pTable.find(vaddr) != pTable.end())) {
73 // already mapped
56 : PageTableBase(__name, _pid, _pageSize)
57{
58}
59
60FuncPageTable::~FuncPageTable()
61{
62}
63
64void
65FuncPageTable::map(Addr vaddr, Addr paddr, int64_t size, bool clobber)
66{
67 // starting address must be page aligned
68 assert(pageOffset(vaddr) == 0);
69
70 DPRINTF(MMU, "Allocating Page: %#x-%#x\n", vaddr, vaddr+ size);
71
72 for (; size > 0; size -= pageSize, vaddr += pageSize, paddr += pageSize) {
73 if (!clobber && (pTable.find(vaddr) != pTable.end())) {
74 // already mapped
74 fatal("FuncPageTable::allocate: address 0x%x already mapped", vaddr);
75 fatal("FuncPageTable::allocate: addr 0x%x already mapped", vaddr);
75 }
76
77 pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr);
78 eraseCacheEntry(vaddr);
79 updateCache(vaddr, pTable[vaddr]);
80 }
81}
82
83void
84FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
85{
86 assert(pageOffset(vaddr) == 0);
87 assert(pageOffset(new_vaddr) == 0);
88
89 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
90 new_vaddr, size);
91
76 }
77
78 pTable[vaddr] = TheISA::TlbEntry(pid, vaddr, paddr);
79 eraseCacheEntry(vaddr);
80 updateCache(vaddr, pTable[vaddr]);
81 }
82}
83
84void
85FuncPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
86{
87 assert(pageOffset(vaddr) == 0);
88 assert(pageOffset(new_vaddr) == 0);
89
90 DPRINTF(MMU, "moving pages from vaddr %08p to %08p, size = %d\n", vaddr,
91 new_vaddr, size);
92
92 for (; size > 0; size -= pageSize, vaddr += pageSize, new_vaddr += pageSize) {
93 for (; size > 0;
94 size -= pageSize, vaddr += pageSize, new_vaddr += pageSize)
95 {
93 assert(pTable.find(vaddr) != pTable.end());
94
95 pTable[new_vaddr] = pTable[vaddr];
96 pTable.erase(vaddr);
97 eraseCacheEntry(vaddr);
98 pTable[new_vaddr].updateVaddr(new_vaddr);
99 updateCache(new_vaddr, pTable[new_vaddr]);
100 }

--- 133 unchanged lines hidden ---
96 assert(pTable.find(vaddr) != pTable.end());
97
98 pTable[new_vaddr] = pTable[vaddr];
99 pTable.erase(vaddr);
100 eraseCacheEntry(vaddr);
101 pTable[new_vaddr].updateVaddr(new_vaddr);
102 updateCache(new_vaddr, pTable[new_vaddr]);
103 }

--- 133 unchanged lines hidden ---