aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/src/memory/paging
diff options
context:
space:
mode:
authorMatteo Gmür <matteo.gmuer1@ost.ch>2024-10-21 09:31:58 +0000
committerMatteo Gmür <matteo.gmuer1@ost.ch>2024-10-21 09:31:58 +0000
commitf171efed99684bf03c315405efda34e36d7db82c (patch)
tree9935c48f6030f4a438ebd9fd1bcf58d4d4c8a233 /arch/x86_64/src/memory/paging
parent49ae81912f3a440f1958e86296d468ec669f71a2 (diff)
downloadteachos-f171efed99684bf03c315405efda34e36d7db82c.tar.xz
teachos-f171efed99684bf03c315405efda34e36d7db82c.zip
Ensure only one instance of global page table can exist
Diffstat (limited to 'arch/x86_64/src/memory/paging')
-rw-r--r--arch/x86_64/src/memory/paging/active_page_table.cpp21
-rw-r--r--arch/x86_64/src/memory/paging/page_mapper.cpp91
-rw-r--r--arch/x86_64/src/memory/paging/page_table.cpp28
3 files changed, 65 insertions, 75 deletions
diff --git a/arch/x86_64/src/memory/paging/active_page_table.cpp b/arch/x86_64/src/memory/paging/active_page_table.cpp
deleted file mode 100644
index ec89e0e..0000000
--- a/arch/x86_64/src/memory/paging/active_page_table.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-#include "arch/memory/paging/active_page_table.hpp"
-
-namespace teachos::arch::memory::paging
-{
- auto active_page_table::create(page_table * level4_page_table) -> active_page_table *
- {
- if (instantiated)
- {
- return this;
- }
-
- instantiated = true;
- return &active_page_table(level4_page_table);
- }
-
- active_page_table::active_page_table(page_table * level4_page_table)
- : level4_page_table(level4_page_table)
- {
- // Nothing to do
- }
-} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/src/memory/paging/page_mapper.cpp b/arch/x86_64/src/memory/paging/page_mapper.cpp
index 5b72a63..cedda54 100644
--- a/arch/x86_64/src/memory/paging/page_mapper.cpp
+++ b/arch/x86_64/src/memory/paging/page_mapper.cpp
@@ -2,60 +2,79 @@
namespace teachos::arch::memory::paging
{
+ namespace
+ {
+ constexpr size_t PAGE_TABLE_LEVEL_4_ADDRESS = 0xfffffffffffff000;
+ }
+
+ auto create_or_get() -> page_table *
+ {
+ static bool instantiated = false;
+ static page_table * active_page = nullptr;
+
+ if (instantiated)
+ {
+ return active_page;
+ }
+
+ instantiated = true;
+ active_page = reinterpret_cast<page_table *>(PAGE_TABLE_LEVEL_4_ADDRESS);
+ return active_page;
+ }
+
auto translate_page(virtual_page page) -> std::optional<allocator::physical_frame>
{
- page_table page_table{};
- bool is_valid = false;
+ page_table * current_page_table = create_or_get();
for (auto level = page_table::LEVEL4; level != page_table::LEVEL1; level--)
{
- is_valid = page_table.next_table(page.get_level_index(level));
- if (!is_valid)
+ auto next_page_table = current_page_table->next_table(page.get_level_index(level));
+ // If the next table method failed then it is highly likely that it was a huge page and we therefore have to parse
+ // the table differently. Therefore, we attempt to parse it using the method required by huge pages.
+ if (!next_page_table)
{
- break;
+ return translate_huge_page(page);
}
+ current_page_table = next_page_table.value();
}
- if (is_valid)
- {
- auto level1_index = page.get_level_index(page_table::LEVEL1);
- auto level1_frame = page_table[level1_index].calculate_pointed_to_frame();
- return level1_frame;
- }
-
- return translate_huge_page(page);
+ auto level1_index = page.get_level_index(page_table::LEVEL1);
+ auto level1_frame = current_page_table->operator[](level1_index).calculate_pointed_to_frame();
+ return level1_frame;
}
auto translate_huge_page(virtual_page page) -> std::optional<allocator::physical_frame>
{
- page_table page_table{};
- bool is_valid = page_table.next_table(page.get_level_index(page_table::LEVEL3));
+ page_table * current_page_table = create_or_get();
+ auto level3_page_table = current_page_table->next_table(page.get_level_index(page_table::LEVEL4));
- if (is_valid)
+ if (!level3_page_table)
{
- auto level3_entry = page_table[page.get_level_index(page_table::LEVEL3)];
- auto level3_optional_frame = level3_entry.calculate_pointed_to_frame();
- if (level3_optional_frame.has_value() && level3_entry.contains_flags(entry::HUGE_PAGE))
- {
- auto level3_frame = level3_optional_frame.value();
- exception_handling::assert(level3_frame.frame_number % (PAGE_TABLE_ENTRY_COUNT * PAGE_TABLE_ENTRY_COUNT) == 0U,
- "[Page Mapper] Physical address must be 1 GiB aligned");
- return allocator::physical_frame{level3_frame.frame_number +
- page.get_level_index(page_table::LEVEL2) * PAGE_TABLE_ENTRY_COUNT +
- page.get_level_index(page_table::LEVEL1)};
- }
+ return std::nullopt;
}
- is_valid = page_table.next_table(page.get_level_index(page_table::LEVEL3));
- if (is_valid)
+
+ auto level3_entry = level3_page_table.value()->operator[](page.get_level_index(page_table::LEVEL3));
+ auto level3_frame = level3_entry.calculate_pointed_to_frame();
+ if (level3_frame && level3_entry.contains_flags(entry::HUGE_PAGE))
+ {
+ exception_handling::assert(
+ level3_frame.value().frame_number % (PAGE_TABLE_ENTRY_COUNT * PAGE_TABLE_ENTRY_COUNT) == 0U,
+ "[Page Mapper] Physical address must be 1 GiB aligned");
+ return allocator::physical_frame{level3_frame.value().frame_number +
+ page.get_level_index(page_table::LEVEL2) * PAGE_TABLE_ENTRY_COUNT +
+ page.get_level_index(page_table::LEVEL1)};
+ }
+
+ auto level2_page_table = level3_page_table.value()->next_table(page.get_level_index(page_table::LEVEL3));
+ if (level2_page_table)
{
- auto level2_entry = page_table[page.get_level_index(page_table::LEVEL2)];
- auto level2_optional_frame = level2_entry.calculate_pointed_to_frame();
- if (level2_optional_frame.has_value() && level2_entry.contains_flags(entry::HUGE_PAGE))
+ auto level2_entry = level2_page_table.value()->operator[](page.get_level_index(page_table::LEVEL2));
+ auto level2_frame = level2_entry.calculate_pointed_to_frame();
+ if (level2_frame && level2_entry.contains_flags(entry::HUGE_PAGE))
{
- auto level2_frame = level2_optional_frame.value();
- exception_handling::assert(level2_frame.frame_number % PAGE_TABLE_ENTRY_COUNT == 0U,
+ exception_handling::assert(level2_frame.value().frame_number % PAGE_TABLE_ENTRY_COUNT == 0U,
"[Page Mapper] Physical address must be 2 MiB aligned");
- return allocator::physical_frame{level2_frame.frame_number + page.get_level_index(page_table::LEVEL1)};
+ return allocator::physical_frame{level2_frame.value().frame_number + page.get_level_index(page_table::LEVEL1)};
}
}
return std::nullopt;
@@ -67,7 +86,7 @@ namespace teachos::arch::memory::paging
virtual_page page = virtual_page::containing_address(virtual_address);
std::optional<allocator::physical_frame> frame = translate_page(page);
- if (frame.has_value())
+ if (frame)
{
return frame.value().frame_number * allocator::PAGE_FRAME_SIZE + offset;
}
diff --git a/arch/x86_64/src/memory/paging/page_table.cpp b/arch/x86_64/src/memory/paging/page_table.cpp
index ea2e9c2..5daf8bb 100644
--- a/arch/x86_64/src/memory/paging/page_table.cpp
+++ b/arch/x86_64/src/memory/paging/page_table.cpp
@@ -4,16 +4,9 @@
namespace teachos::arch::memory::paging
{
- page_table::page_table()
- : current_level(LEVEL4)
- , current_table(reinterpret_cast<table_content *>(0xfffffffffffff000))
- {
- // Nothing to do
- }
-
auto page_table::zero_entries() -> void
{
- constexpr size_t entry_amount = sizeof(current_table->entries) / sizeof(current_table->entries[0]);
+ constexpr size_t entry_amount = sizeof(entries) / sizeof(entries[0]);
for (size_t i = 0; i < entry_amount; ++i)
{
auto entry = this->operator[](i);
@@ -21,18 +14,17 @@ namespace teachos::arch::memory::paging
}
}
- auto page_table::next_table(std::size_t table_index) -> bool
+ auto page_table::next_table(std::size_t table_index) -> std::optional<page_table *>
{
- exception_handling::assert(current_level != LEVEL1,
- "[Page Table] Attempted to call next_table on level 1 page table");
+ // TODO: Find another way to ensure the current page table is not LEVEL1
+ // exception_handling::assert(current_level != LEVEL1, "[Page Table] Attempted to call next_table on level 1 page
+ // table");
auto address = next_table_address(table_index);
- bool const success = address.has_value();
- if (success)
+ if (address)
{
- current_table = reinterpret_cast<table_content *>(address.value());
- current_level = static_cast<level>(current_level - 1U);
+ return reinterpret_cast<page_table *>(address.value());
}
- return success;
+ return std::nullopt;
}
auto page_table::operator[](std::size_t index) -> entry &
@@ -40,7 +32,7 @@ namespace teachos::arch::memory::paging
// C array is not bounds checked, therefore we have to check ourselves, to ensure no out of bounds reads, which
// could be incredibly hard to debug later.
exception_handling::assert(index < PAGE_TABLE_ENTRY_COUNT, "[Page Table] Index out of bounds");
- return current_table->entries[index];
+ return entries[index];
}
auto page_table::next_table_address(std::size_t table_index) -> std::optional<std::size_t>
@@ -49,7 +41,7 @@ namespace teachos::arch::memory::paging
if (entry.contains_flags(entry::PRESENT) && !entry.contains_flags(entry::HUGE_PAGE))
{
- std::size_t const table_address = reinterpret_cast<std::size_t>(current_table);
+ std::size_t const table_address = reinterpret_cast<std::size_t>(this);
return ((table_address << 9) | (table_index << 12));
}
return std::nullopt;