aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
authorMatteo Gmür <matteo.gmuer1@ost.ch>2024-10-21 09:31:58 +0000
committerMatteo Gmür <matteo.gmuer1@ost.ch>2024-10-21 09:31:58 +0000
commitf171efed99684bf03c315405efda34e36d7db82c (patch)
tree9935c48f6030f4a438ebd9fd1bcf58d4d4c8a233 /arch/x86_64
parent49ae81912f3a440f1958e86296d468ec669f71a2 (diff)
downloadteachos-f171efed99684bf03c315405efda34e36d7db82c.tar.xz
teachos-f171efed99684bf03c315405efda34e36d7db82c.zip
Ensure only one instance of global page table can exist
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/include/arch/memory/paging/active_page_table.hpp32
-rw-r--r--arch/x86_64/include/arch/memory/paging/page_mapper.hpp41
-rw-r--r--arch/x86_64/include/arch/memory/paging/page_table.hpp28
-rw-r--r--arch/x86_64/src/kernel/main.cpp2
-rw-r--r--arch/x86_64/src/memory/paging/active_page_table.cpp21
-rw-r--r--arch/x86_64/src/memory/paging/page_mapper.cpp91
-rw-r--r--arch/x86_64/src/memory/paging/page_table.cpp28
7 files changed, 104 insertions, 139 deletions
diff --git a/arch/x86_64/include/arch/memory/paging/active_page_table.hpp b/arch/x86_64/include/arch/memory/paging/active_page_table.hpp
deleted file mode 100644
index 3933d5a..0000000
--- a/arch/x86_64/include/arch/memory/paging/active_page_table.hpp
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef TEACHOS_ARCH_X86_64_MEMORY_PAGING_VIRTUAL_PAGE_HPP
-#define TEACHOS_ARCH_X86_64_MEMORY_PAGING_VIRTUAL_PAGE_HPP
-
-#include "arch/memory/paging/page_table.hpp"
-
-namespace teachos::arch::memory::paging
-{
-
- struct active_page_table
- {
- /**
- * @brief Ensures only one instance of active_page_table exists.
- *
- * @param level4_page_table A pointer to the level 4 page table.
- * @return The only instance of active_page_table.
- */
- auto create(page_table * level4_page_table) -> active_page_table *;
-
- private:
- /**
- * @brief Construct a new active page table object.
- *
- * @param level4_page_table A pointer to the level 4 page table.
- */
- active_page_table(page_table * level4_page_table);
-
- bool instantiated = false; ///< Indicates wether an instance already exists.
- page_table * level4_page_table; ///< The active level4 page table.
- };
-} // namespace teachos::arch::memory::paging
-
-#endif // TEACHOS_ARCH_X86_64_MEMORY_PAGING_VIRTUAL_PAGE_HPP
diff --git a/arch/x86_64/include/arch/memory/paging/page_mapper.hpp b/arch/x86_64/include/arch/memory/paging/page_mapper.hpp
index ae3502e..ebd044a 100644
--- a/arch/x86_64/include/arch/memory/paging/page_mapper.hpp
+++ b/arch/x86_64/include/arch/memory/paging/page_mapper.hpp
@@ -10,6 +10,15 @@
namespace teachos::arch::memory::paging
{
/**
+ * @brief Creates a single instance of the level 4 page table table and returns it or alternatively returns the
+ * previously created instance. The instance is owned by this method and is static, meaning it lives on for the
+ * complete lifetime of the program.
+ *
+ * @return Single unique instance of the level 4 page table.
+ */
+ auto create_or_get() -> page_table *;
+
+ /**
* @brief Translates page into physical frame, will first attempt to parse normally using default page size and if it
* failed attempt to parse using huge pages.
*
@@ -49,27 +58,35 @@ namespace teachos::arch::memory::paging
auto map_page_to_frame(T & allocator, virtual_page page, allocator::physical_frame frame,
std::bitset<64U> flags) -> void
{
- page_table page_table{};
- bool table_exists = false;
+ page_table * current_page_table = create_or_get();
for (auto level = page_table::LEVEL4; level != page_table::LEVEL1; level--)
{
- std::size_t level_index = page.get_level_index(level);
- table_exists = page_table.next_table(level_index);
-
- if (!table_exists)
+ auto level_index = page.get_level_index(level);
+ auto next_page_table = current_page_table->next_table(level_index);
+ // If the next table method failed then it means that the page level of the frame we want allocate has not yet
+ // been created itself. So we have to do that before we are able to allocate the wanted frame. This has to be done
+ // for every level, meaning we potenitally create a level 4, level 3 and level 2 page entry, each pointing to a
+ // page table one level below.
+ if (!next_page_table)
{
auto allocated_frame = allocator.allocate_frame();
- exception_handling::assert(!allocated_frame.has_value(), "[Page mapper]: Unable to allocate frame");
- page_table[level_index].set_entry(allocated_frame.value(), entry::PRESENT | entry::WRITABLE);
- page_table.zero_entries();
+ exception_handling::assert(!allocated_frame.has_value(), "[Page mapper] Unable to allocate frame");
+ current_page_table->operator[](level_index)
+ .set_entry(allocated_frame.value(), entry::PRESENT | entry::WRITABLE);
+ // There should now be an entry at the previously not existent index, therefore we can simply access it again.
+ next_page_table = current_page_table->next_table(page.get_level_index(level));
+ exception_handling::assert(!next_page_table.has_value(),
+ "[Page mapper] Unable to create new entry into page table");
+ next_page_table.value()->zero_entries();
}
+ current_page_table = next_page_table.value();
}
- auto level1_entry = page_table[page.get_level_index(page_table::LEVEL1)];
+ auto level1_entry = current_page_table->operator[](page.get_level_index(page_table::LEVEL1));
arch::exception_handling::assert(!level1_entry.contains_flags(entry::HUGE_PAGE),
- "[Page Mapper]: Unable to map huge pages");
- arch::exception_handling::assert(!level1_entry.is_unused(), "[Page Mapper]: Page table entry is already used");
+ "[Page Mapper] Unable to map huge pages");
+ arch::exception_handling::assert(!level1_entry.is_unused(), "[Page Mapper] Page table entry is already used");
level1_entry.set_entry(frame, flags | std::bitset<64U>{entry::PRESENT});
}
} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/include/arch/memory/paging/page_table.hpp b/arch/x86_64/include/arch/memory/paging/page_table.hpp
index 0fe667c..3439127 100644
--- a/arch/x86_64/include/arch/memory/paging/page_table.hpp
+++ b/arch/x86_64/include/arch/memory/paging/page_table.hpp
@@ -11,16 +11,6 @@ namespace teachos::arch::memory::paging
}
/**
- * @brief Actual data that is contained in every page table, this is the structure we cast a specific address too,
- * because it consists of x amount os entries, which is a simple address.
- */
- struct table_content
- {
- entry entries[PAGE_TABLE_ENTRY_COUNT]; ///< Entries containing addresses to page tables of a level below or actual
- ///< virtual addresses for the level 1 page table.
- };
-
- /**
* @brief A Page table containing 512 entries.
*/
struct page_table
@@ -38,9 +28,10 @@ namespace teachos::arch::memory::paging
};
/**
- * @brief Constructor. Automatically starts on the fixed address of the Level 4 page table.
+ * @brief Deleted constructor. Object can only be created by casting from the fixed Level 4
+ * page table address `reinterpret_cast<page_table *>(0xfffffffffffff000)`.
*/
- page_table();
+ page_table() = delete;
/**
* @brief Set every entry of the page to unused.
@@ -48,13 +39,13 @@ namespace teachos::arch::memory::paging
auto zero_entries() -> void;
/**
- * @brief Turn this page table into the next page table level from the given page table index. Meaning we
- * use an index into a Level 4 page table to get the according Level 3 page table. When using this on an a level 1
- * page table it will cause an assertion.
+ * @brief Returns the next page table level from the given page table index. Meaning we
+ * use an index into a Level 4 page table to get the according Level 3 page table. This method should not be called
+ * on a Level 1 page table or it will return invalid addresses and cause hard to debug issues.
*
* @param table_index Index of this page table in the page table one level higher.
*/
- auto next_table(std::size_t table_index) -> bool;
+ auto next_table(std::size_t table_index) -> std::optional<page_table *>;
/**
* @brief Index operator overload to access specific mutable entry directy.
@@ -83,9 +74,8 @@ namespace teachos::arch::memory::paging
*/
auto next_table_address(std::size_t table_index) -> std::optional<std::size_t>;
- level current_level; ///< Current level of the page table, used to ensure next_table() is never called with a level
- ///< 1 page table
- table_content * current_table; ///< Current table we are accessing and indexing.
+ entry entries[PAGE_TABLE_ENTRY_COUNT]; ///< Entries containing addresses to page tables of a level below or actual
+ ///< virtual addresses for the level 1 page table.
};
auto operator--(page_table::level & level, int) -> page_table::level;
diff --git a/arch/x86_64/src/kernel/main.cpp b/arch/x86_64/src/kernel/main.cpp
index 40dd117..db0a9ef 100644
--- a/arch/x86_64/src/kernel/main.cpp
+++ b/arch/x86_64/src/kernel/main.cpp
@@ -26,7 +26,7 @@ namespace teachos::arch::kernel
{
last_allocated = allocated;
allocated = allocator.allocate_frame();
- } while (allocated.has_value());
+ } while (allocated);
video::vga::text::write("Allocated Frames", video::vga::text::common_attributes::green_on_black);
video::vga::text::write_number(last_allocated.value().frame_number,
video::vga::text::common_attributes::green_on_black);
diff --git a/arch/x86_64/src/memory/paging/active_page_table.cpp b/arch/x86_64/src/memory/paging/active_page_table.cpp
deleted file mode 100644
index ec89e0e..0000000
--- a/arch/x86_64/src/memory/paging/active_page_table.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-#include "arch/memory/paging/active_page_table.hpp"
-
-namespace teachos::arch::memory::paging
-{
- auto active_page_table::create(page_table * level4_page_table) -> active_page_table *
- {
- if (instantiated)
- {
- return this;
- }
-
- instantiated = true;
- return &active_page_table(level4_page_table);
- }
-
- active_page_table::active_page_table(page_table * level4_page_table)
- : level4_page_table(level4_page_table)
- {
- // Nothing to do
- }
-} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/src/memory/paging/page_mapper.cpp b/arch/x86_64/src/memory/paging/page_mapper.cpp
index 5b72a63..cedda54 100644
--- a/arch/x86_64/src/memory/paging/page_mapper.cpp
+++ b/arch/x86_64/src/memory/paging/page_mapper.cpp
@@ -2,60 +2,79 @@
namespace teachos::arch::memory::paging
{
+ namespace
+ {
+ constexpr size_t PAGE_TABLE_LEVEL_4_ADDRESS = 0xfffffffffffff000;
+ }
+
+ auto create_or_get() -> page_table *
+ {
+ static bool instantiated = false;
+ static page_table * active_page = nullptr;
+
+ if (instantiated)
+ {
+ return active_page;
+ }
+
+ instantiated = true;
+ active_page = reinterpret_cast<page_table *>(PAGE_TABLE_LEVEL_4_ADDRESS);
+ return active_page;
+ }
+
auto translate_page(virtual_page page) -> std::optional<allocator::physical_frame>
{
- page_table page_table{};
- bool is_valid = false;
+ page_table * current_page_table = create_or_get();
for (auto level = page_table::LEVEL4; level != page_table::LEVEL1; level--)
{
- is_valid = page_table.next_table(page.get_level_index(level));
- if (!is_valid)
+ auto next_page_table = current_page_table->next_table(page.get_level_index(level));
+ // If the next table method failed then it is highly likely that it was a huge page and we therefore have to parse
+ // the table differently. Therefore, we attempt to parse it using the method required by huge pages.
+ if (!next_page_table)
{
- break;
+ return translate_huge_page(page);
}
+ current_page_table = next_page_table.value();
}
- if (is_valid)
- {
- auto level1_index = page.get_level_index(page_table::LEVEL1);
- auto level1_frame = page_table[level1_index].calculate_pointed_to_frame();
- return level1_frame;
- }
-
- return translate_huge_page(page);
+ auto level1_index = page.get_level_index(page_table::LEVEL1);
+ auto level1_frame = current_page_table->operator[](level1_index).calculate_pointed_to_frame();
+ return level1_frame;
}
auto translate_huge_page(virtual_page page) -> std::optional<allocator::physical_frame>
{
- page_table page_table{};
- bool is_valid = page_table.next_table(page.get_level_index(page_table::LEVEL3));
+ page_table * current_page_table = create_or_get();
+ auto level3_page_table = current_page_table->next_table(page.get_level_index(page_table::LEVEL4));
- if (is_valid)
+ if (!level3_page_table)
{
- auto level3_entry = page_table[page.get_level_index(page_table::LEVEL3)];
- auto level3_optional_frame = level3_entry.calculate_pointed_to_frame();
- if (level3_optional_frame.has_value() && level3_entry.contains_flags(entry::HUGE_PAGE))
- {
- auto level3_frame = level3_optional_frame.value();
- exception_handling::assert(level3_frame.frame_number % (PAGE_TABLE_ENTRY_COUNT * PAGE_TABLE_ENTRY_COUNT) == 0U,
- "[Page Mapper] Physical address must be 1 GiB aligned");
- return allocator::physical_frame{level3_frame.frame_number +
- page.get_level_index(page_table::LEVEL2) * PAGE_TABLE_ENTRY_COUNT +
- page.get_level_index(page_table::LEVEL1)};
- }
+ return std::nullopt;
}
- is_valid = page_table.next_table(page.get_level_index(page_table::LEVEL3));
- if (is_valid)
+
+ auto level3_entry = level3_page_table.value()->operator[](page.get_level_index(page_table::LEVEL3));
+ auto level3_frame = level3_entry.calculate_pointed_to_frame();
+ if (level3_frame && level3_entry.contains_flags(entry::HUGE_PAGE))
+ {
+ exception_handling::assert(
+ level3_frame.value().frame_number % (PAGE_TABLE_ENTRY_COUNT * PAGE_TABLE_ENTRY_COUNT) == 0U,
+ "[Page Mapper] Physical address must be 1 GiB aligned");
+ return allocator::physical_frame{level3_frame.value().frame_number +
+ page.get_level_index(page_table::LEVEL2) * PAGE_TABLE_ENTRY_COUNT +
+ page.get_level_index(page_table::LEVEL1)};
+ }
+
+ auto level2_page_table = level3_page_table.value()->next_table(page.get_level_index(page_table::LEVEL3));
+ if (level2_page_table)
{
- auto level2_entry = page_table[page.get_level_index(page_table::LEVEL2)];
- auto level2_optional_frame = level2_entry.calculate_pointed_to_frame();
- if (level2_optional_frame.has_value() && level2_entry.contains_flags(entry::HUGE_PAGE))
+ auto level2_entry = level2_page_table.value()->operator[](page.get_level_index(page_table::LEVEL2));
+ auto level2_frame = level2_entry.calculate_pointed_to_frame();
+ if (level2_frame && level2_entry.contains_flags(entry::HUGE_PAGE))
{
- auto level2_frame = level2_optional_frame.value();
- exception_handling::assert(level2_frame.frame_number % PAGE_TABLE_ENTRY_COUNT == 0U,
+ exception_handling::assert(level2_frame.value().frame_number % PAGE_TABLE_ENTRY_COUNT == 0U,
"[Page Mapper] Physical address must be 2 MiB aligned");
- return allocator::physical_frame{level2_frame.frame_number + page.get_level_index(page_table::LEVEL1)};
+ return allocator::physical_frame{level2_frame.value().frame_number + page.get_level_index(page_table::LEVEL1)};
}
}
return std::nullopt;
@@ -67,7 +86,7 @@ namespace teachos::arch::memory::paging
virtual_page page = virtual_page::containing_address(virtual_address);
std::optional<allocator::physical_frame> frame = translate_page(page);
- if (frame.has_value())
+ if (frame)
{
return frame.value().frame_number * allocator::PAGE_FRAME_SIZE + offset;
}
diff --git a/arch/x86_64/src/memory/paging/page_table.cpp b/arch/x86_64/src/memory/paging/page_table.cpp
index ea2e9c2..5daf8bb 100644
--- a/arch/x86_64/src/memory/paging/page_table.cpp
+++ b/arch/x86_64/src/memory/paging/page_table.cpp
@@ -4,16 +4,9 @@
namespace teachos::arch::memory::paging
{
- page_table::page_table()
- : current_level(LEVEL4)
- , current_table(reinterpret_cast<table_content *>(0xfffffffffffff000))
- {
- // Nothing to do
- }
-
auto page_table::zero_entries() -> void
{
- constexpr size_t entry_amount = sizeof(current_table->entries) / sizeof(current_table->entries[0]);
+ constexpr size_t entry_amount = sizeof(entries) / sizeof(entries[0]);
for (size_t i = 0; i < entry_amount; ++i)
{
auto entry = this->operator[](i);
@@ -21,18 +14,17 @@ namespace teachos::arch::memory::paging
}
}
- auto page_table::next_table(std::size_t table_index) -> bool
+ auto page_table::next_table(std::size_t table_index) -> std::optional<page_table *>
{
- exception_handling::assert(current_level != LEVEL1,
- "[Page Table] Attempted to call next_table on level 1 page table");
+ // TODO: Find another way to ensure the current page table is not LEVEL1
+ // exception_handling::assert(current_level != LEVEL1, "[Page Table] Attempted to call next_table on level 1 page
+ // table");
auto address = next_table_address(table_index);
- bool const success = address.has_value();
- if (success)
+ if (address)
{
- current_table = reinterpret_cast<table_content *>(address.value());
- current_level = static_cast<level>(current_level - 1U);
+ return reinterpret_cast<page_table *>(address.value());
}
- return success;
+ return std::nullopt;
}
auto page_table::operator[](std::size_t index) -> entry &
@@ -40,7 +32,7 @@ namespace teachos::arch::memory::paging
// C array is not bounds checked, therefore we have to check ourselves, to ensure no out of bounds reads, which
// could be incredibly hard to debug later.
exception_handling::assert(index < PAGE_TABLE_ENTRY_COUNT, "[Page Table] Index out of bounds");
- return current_table->entries[index];
+ return entries[index];
}
auto page_table::next_table_address(std::size_t table_index) -> std::optional<std::size_t>
@@ -49,7 +41,7 @@ namespace teachos::arch::memory::paging
if (entry.contains_flags(entry::PRESENT) && !entry.contains_flags(entry::HUGE_PAGE))
{
- std::size_t const table_address = reinterpret_cast<std::size_t>(current_table);
+ std::size_t const table_address = reinterpret_cast<std::size_t>(this);
return ((table_address << 9) | (table_index << 12));
}
return std::nullopt;