blob: a8f2c4034d582c88bb79bec41a32070aef04df63 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
|
#include "arch/memory/paging.hpp"
#include "arch/exception_handling/assert.hpp"
namespace teachos::arch::memory
{
auto entry::is_unused() const -> bool { return flags == 0U; }
auto entry::set_unused() -> void { flags = 0U; }
auto entry::calculate_pointed_to_frame() const -> std::optional<physical_frame>
{
if (contains_flags(PRESENT))
{
auto physical_address = calculate_physical_address();
return physical_frame::containing_address(physical_address);
}
return std::nullopt;
}
auto entry::calculate_physical_address() const -> std::size_t
{
constexpr std::size_t start_bit = 12U;
constexpr std::size_t end_bit = 52U;
size_t value = 0U;
for (auto i = start_bit; i < end_bit; i++)
{
value |= (flags[i] ? (1 << (i - start_bit)) : 0);
}
return value;
}
auto entry::contains_flags(std::bitset<64U> other) const -> bool { return (flags & other) == other; }
auto entry::set_address(physical_frame frame) -> void
{
arch::exception_handling::assert((frame.start_address() & ~0x000fffff'fffff000) == 0,
"Start address is not aligned with Page");
flags = std::bitset<64U>(frame.start_address()) | flags;
}
page_table::page_table()
: entries()
, p4(reinterpret_cast<page_table *>(0xfffffffffffff000))
{
// Nothing to do
}
auto page_table::zero_entries() -> void
{
constexpr size_t entry_amount = sizeof(entries) / sizeof(entries[0]);
for (size_t i = 0; i < entry_amount; ++i)
{
auto entry = this->operator[](i);
entry.set_unused();
}
}
auto page_table::next_table(std::size_t index) const -> std::optional<page_table const *>
{
auto address = next_table_address(index);
if (address.has_value())
{
return reinterpret_cast<page_table const *>(*address);
}
return std::nullopt;
}
auto page_table::operator[](std::size_t index) -> entry &
{
// C array is not bounds checked, therefore we have to check ourselves, to ensure no out of bounds reads, which
// could be incredibly hard to debug later.
arch::exception_handling::assert(index < PAGE_TABLE_ENTRY_COUNT, "[Page Table] index out of bounds");
return entries[index];
}
auto page_table::operator[](std::size_t index) const -> entry const &
{
// C array is not bounds checked, therefore we have to check ourselves, to ensure no out of bounds reads, which
// could be incredibly hard to debug later.
arch::exception_handling::assert(index < PAGE_TABLE_ENTRY_COUNT, "[Page Table] index out of bounds");
return entries[index];
}
auto page_table::next_table_address(std::size_t index) const -> std::optional<std::size_t>
{
auto entry = this->operator[](index);
if (entry.contains_flags(entry::PRESENT) && !entry.contains_flags(entry::HUGE_PAGE))
{
std::size_t const table_address = reinterpret_cast<std::size_t>(this);
return (table_address << 9) | (index << 12);
}
// TODO: Implement behaviour for huge pages currently not done
return std::nullopt;
}
} // namespace teachos::arch::memory
|