aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/src/memory/paging
diff options
context:
space:
mode:
authorMatteo Gmür <matteo.gmuer1@ost.ch>2025-02-18 10:59:05 +0100
committerMatteo Gmür <matteo.gmuer1@ost.ch>2025-02-18 10:59:05 +0100
commitcd42c21f2460751428b3e1b4ae07ea0b924967bc (patch)
treee3e410f399c3eead444f2a242a19448571fd979a /arch/x86_64/src/memory/paging
parent47879f42d70755fcf5473ffb82798b515cb2e21b (diff)
parent3d488e53a1d15fcc01a7b1d23b9585ca7a724864 (diff)
downloadteachos-cd42c21f2460751428b3e1b4ae07ea0b924967bc.tar.xz
teachos-cd42c21f2460751428b3e1b4ae07ea0b924967bc.zip
Merge branch 'feat_memory_manager' into 'develop_sa'
Finish inital draft of Memory Manager See merge request teachos/kernel!3
Diffstat (limited to 'arch/x86_64/src/memory/paging')
-rw-r--r--arch/x86_64/src/memory/paging/active_page_table.cpp98
-rw-r--r--arch/x86_64/src/memory/paging/inactive_page_table.cpp20
-rw-r--r--arch/x86_64/src/memory/paging/page_entry.cpp58
-rw-r--r--arch/x86_64/src/memory/paging/page_table.cpp128
-rw-r--r--arch/x86_64/src/memory/paging/temporary_page.cpp29
-rw-r--r--arch/x86_64/src/memory/paging/virtual_page.cpp33
6 files changed, 366 insertions, 0 deletions
diff --git a/arch/x86_64/src/memory/paging/active_page_table.cpp b/arch/x86_64/src/memory/paging/active_page_table.cpp
new file mode 100644
index 0000000..0113869
--- /dev/null
+++ b/arch/x86_64/src/memory/paging/active_page_table.cpp
@@ -0,0 +1,98 @@
+#include "arch/memory/paging/active_page_table.hpp"
+
+namespace teachos::arch::memory::paging
+{
+ namespace
+ {
+ paging::virtual_address constexpr PAGE_TABLE_LEVEL_4_ADDRESS = 0xffffffff'fffff000;
+ }
+
+ auto active_page_table::create_or_get() -> active_page_table &
+ {
+ static page_table_handle active_handle{reinterpret_cast<page_table *>(PAGE_TABLE_LEVEL_4_ADDRESS),
+ page_table_handle::LEVEL4};
+ static active_page_table active_page{active_handle};
+ return active_page;
+ }
+
+ auto active_page_table::operator[](std::size_t index) -> entry & { return active_handle[index]; }
+
+ auto active_page_table::translate_address(virtual_address address) -> std::optional<allocator::physical_address>
+ {
+ auto const offset = address % allocator::PAGE_FRAME_SIZE;
+ auto const page = virtual_page::containing_address(address);
+ auto const frame = translate_page(page);
+
+ if (frame.has_value())
+ {
+ return frame.value().frame_number * allocator::PAGE_FRAME_SIZE + offset;
+ }
+
+ return std::nullopt;
+ }
+
+ auto active_page_table::translate_page(virtual_page page) -> std::optional<allocator::physical_frame>
+ {
+ auto current_handle = active_handle;
+
+ for (auto level = page_table_handle::LEVEL4; level != page_table_handle::LEVEL1; --level)
+ {
+ auto const next_handle = current_handle.next_table(page.get_level_index(level));
+ // If the next table method failed then it is highly likely that it was a huge page and we therefore have to
+ // parse the table differently. Therefore, we attempt to parse it using the method required by huge pages.
+ if (!next_handle.has_value())
+ {
+ return translate_huge_page(page);
+ }
+ current_handle = next_handle.value();
+ }
+
+ auto const level1_index = page.get_level_index(page_table_handle::LEVEL1);
+ auto const level1_entry = current_handle[level1_index];
+ return level1_entry.calculate_pointed_to_frame();
+ }
+
+ auto active_page_table::translate_huge_page(virtual_page page) -> std::optional<allocator::physical_frame>
+ {
+ auto current_handle = active_handle;
+ auto level3_handle = current_handle.next_table(page.get_level_index(page_table_handle::LEVEL4));
+
+ if (!level3_handle.has_value())
+ {
+ return std::nullopt;
+ }
+
+ auto const level3_entry = level3_handle.value()[page.get_level_index(page_table_handle::LEVEL3)];
+ auto const level3_frame = level3_entry.calculate_pointed_to_frame();
+ if (level3_frame.has_value() && level3_entry.contains_flags(entry::HUGE_PAGE))
+ {
+ exception_handling::assert(
+ level3_frame.value().frame_number % (PAGE_TABLE_ENTRY_COUNT * PAGE_TABLE_ENTRY_COUNT) == 0U,
+ "[Page Mapper] Physical address must be 1 GiB aligned");
+ return allocator::physical_frame{level3_frame.value().frame_number +
+ page.get_level_index(page_table_handle::LEVEL2) * PAGE_TABLE_ENTRY_COUNT +
+ page.get_level_index(page_table_handle::LEVEL1)};
+ }
+
+ auto level2_handle = level3_handle.value().next_table(page.get_level_index(page_table_handle::LEVEL3));
+ if (level2_handle.has_value())
+ {
+ auto const level2_entry = level2_handle.value()[page.get_level_index(page_table_handle::LEVEL2)];
+ auto const level2_frame = level2_entry.calculate_pointed_to_frame();
+ if (level2_frame.has_value() && level2_entry.contains_flags(entry::HUGE_PAGE))
+ {
+ exception_handling::assert(level2_frame.value().frame_number % PAGE_TABLE_ENTRY_COUNT == 0U,
+ "[Page Mapper] Physical address must be 2 MiB aligned");
+ return allocator::physical_frame{level2_frame.value().frame_number +
+ page.get_level_index(page_table_handle::LEVEL1)};
+ }
+ }
+ return std::nullopt;
+ }
+
+ active_page_table::active_page_table(page_table_handle active_handle)
+ : active_handle(active_handle)
+ {
+ // Nothing to do
+ }
+} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/src/memory/paging/inactive_page_table.cpp b/arch/x86_64/src/memory/paging/inactive_page_table.cpp
new file mode 100644
index 0000000..4e0610e
--- /dev/null
+++ b/arch/x86_64/src/memory/paging/inactive_page_table.cpp
@@ -0,0 +1,20 @@
+#include "arch/memory/paging/inactive_page_table.hpp"
+
+namespace teachos::arch::memory::paging
+{
+ inactive_page_table::inactive_page_table(allocator::physical_frame frame)
+ : page_table_level_4_frame{frame}
+ {
+ // Nothing to do
+ }
+
+ inactive_page_table::inactive_page_table(allocator::physical_frame frame, active_page_table & active_page_table,
+ temporary_page & temporary_page)
+ : page_table_level_4_frame{frame}
+ {
+ auto table = temporary_page.map_table_frame(page_table_level_4_frame, active_page_table);
+ table.zero_entries();
+ table[511].set_entry(page_table_level_4_frame, entry::PRESENT | entry::WRITABLE);
+ temporary_page.unmap_page(active_page_table);
+ }
+} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/src/memory/paging/page_entry.cpp b/arch/x86_64/src/memory/paging/page_entry.cpp
new file mode 100644
index 0000000..5aa0982
--- /dev/null
+++ b/arch/x86_64/src/memory/paging/page_entry.cpp
@@ -0,0 +1,58 @@
+#include "arch/memory/paging/page_entry.hpp"
+
+#include "arch/exception_handling/assert.hpp"
+
+namespace teachos::arch::memory::paging
+{
+ namespace
+ {
+ std::size_t constexpr PHYSICAL_ADDRESS_MASK = 0x000fffff'fffff000;
+ } // namespace
+
+ entry::entry(uint64_t flags)
+ : flags(flags)
+ {
+ // Nothing to do.
+ }
+
+ entry::entry(multiboot::elf_section_flags elf_flags)
+ {
+ if (elf_flags.contains_flags(multiboot::elf_section_flags::OCCUPIES_MEMORY))
+ {
+ flags |= entry::PRESENT;
+ }
+ if (elf_flags.contains_flags(multiboot::elf_section_flags::WRITABLE))
+ {
+ flags |= entry::WRITABLE;
+ }
+ if (!elf_flags.contains_flags(multiboot::elf_section_flags::EXECUTABLE_CODE))
+ {
+ flags |= entry::EXECUTING_CODE_FORBIDDEN;
+ }
+ }
+
+ auto entry::is_unused() const -> bool { return flags == 0U; }
+
+ auto entry::set_unused() -> void { flags = 0U; }
+
+ auto entry::calculate_pointed_to_frame() const -> std::optional<allocator::physical_frame>
+ {
+ if (contains_flags(PRESENT))
+ {
+ auto const address = flags.to_ulong() & PHYSICAL_ADDRESS_MASK;
+ return allocator::physical_frame::containing_address(address);
+ }
+ return std::nullopt;
+ }
+
+ auto entry::contains_flags(std::bitset<64U> other) const -> bool { return (flags & other) == other; }
+
+ auto entry::set_entry(allocator::physical_frame frame, std::bitset<64U> additional_flags) -> void
+ {
+ exception_handling::assert((frame.start_address() & ~PHYSICAL_ADDRESS_MASK) == 0,
+ "[Paging Entry] Start address is not aligned with page");
+ flags = frame.start_address() | additional_flags.to_ulong();
+ }
+
+ auto entry::get_flags() const -> std::bitset<64U> { return flags.to_ulong() & ~PHYSICAL_ADDRESS_MASK; }
+} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/src/memory/paging/page_table.cpp b/arch/x86_64/src/memory/paging/page_table.cpp
new file mode 100644
index 0000000..eb11810
--- /dev/null
+++ b/arch/x86_64/src/memory/paging/page_table.cpp
@@ -0,0 +1,128 @@
+#include "arch/memory/paging/page_table.hpp"
+
+#include <algorithm>
+#include <array>
+#include <memory>
+
+/*
+ * This is a linker variable reference. This referenc cannot reside inside a namespace, because in
+ * that case the compiler would try to find arch::memory::paging::_end_of_image inside the ELF file.
+ */
+extern char _end_of_image;
+
+namespace teachos::arch::memory::paging
+{
+ /**
+ * @brief A Page table containing 512 entries.
+ */
+ struct page_table
+ {
+ auto zero_entries() -> void;
+
+ auto is_empty() const -> bool;
+
+ auto next_table(std::size_t table_index) const -> std::optional<page_table *>;
+
+ auto operator[](std::size_t index) -> entry &;
+
+ auto operator[](std::size_t index) const -> entry const &;
+
+ private:
+ /**
+ * @brief Calculates the address of the next page table level for the given table index.
+ *
+ * @note The next page table address is only valid if the corresponding entry is present and not a huge page.
+ * Meaning we use an index into a Level 4 page table to get the according Level 3 page table address.
+ *
+ * @param table_index Index of this page table in the page table one level higher.
+ * @return An optional of the address of the next page table or null.
+ */
+ auto next_table_address(std::size_t table_index) const -> std::optional<std::size_t>;
+
+ std::array<entry, PAGE_TABLE_ENTRY_COUNT> entries =
+ {}; ///< Entries containing addresses to page tables of a level below or
+ ///< actual virtual addresses for the level 1 page table.
+ };
+
+ auto page_table::zero_entries() -> void
+ {
+ std::ranges::for_each(entries, [](auto & entry) { entry.set_unused(); });
+ }
+
+ auto page_table::is_empty() const -> bool
+ {
+ return std::all_of(entries.begin(), entries.end(), [](entry const & entry) { return entry.is_unused(); });
+ }
+
+ auto page_table::next_table(std::size_t table_index) const -> std::optional<page_table *>
+ {
+ auto const address = next_table_address(table_index);
+ if (address.has_value())
+ {
+ return reinterpret_cast<page_table *>(address.value());
+ }
+ return std::nullopt;
+ }
+
+ auto page_table::operator[](std::size_t index) -> entry &
+ {
+ exception_handling::assert(index < PAGE_TABLE_ENTRY_COUNT, "[Page Table] Index out of bounds");
+ return entries[index];
+ }
+
+ auto page_table::operator[](std::size_t index) const -> entry const &
+ {
+ exception_handling::assert(index < PAGE_TABLE_ENTRY_COUNT, "[Page Table] Index out of bounds");
+ return entries[index];
+ }
+
+ auto page_table::next_table_address(std::size_t table_index) const -> std::optional<std::size_t>
+ {
+ auto const entry = this->operator[](table_index);
+
+ if (entry.contains_flags(entry::PRESENT) && !entry.contains_flags(entry::HUGE_PAGE))
+ {
+ auto const table_address = reinterpret_cast<std::size_t>(this);
+ return ((table_address << 9) | (table_index << 12));
+ }
+ return std::nullopt;
+ }
+
+ page_table_handle::page_table_handle(page_table * table, page_table_handle::level table_level)
+ : table(table)
+ , table_level(table_level)
+ {
+ exception_handling::assert(table != nullptr,
+ "[Page Table] Attempted to pass nullptr as table to page table table method");
+ }
+
+ auto page_table_handle::zero_entries() -> void { table->zero_entries(); }
+
+ auto page_table_handle::is_empty() const -> bool { return table->is_empty(); }
+
+ auto page_table_handle::next_table(std::size_t table_index) const -> std::optional<page_table_handle>
+ {
+ exception_handling::assert(table_level != page_table_handle::LEVEL1,
+ "[Page Table] Attempted to call next_table on level 1 page table");
+ auto const next_table = table->next_table(table_index);
+ if (next_table.has_value())
+ {
+ auto const new_level = static_cast<page_table_handle::level>(table_level - 1);
+ return page_table_handle{next_table.value(), new_level};
+ }
+ return std::nullopt;
+ }
+
+ auto page_table_handle::get_level() const -> page_table_handle::level { return table_level; }
+
+ auto page_table_handle::operator[](std::size_t index) -> entry & { return table->operator[](index); }
+
+ auto operator--(page_table_handle::level & value) -> page_table_handle::level &
+ {
+ exception_handling::assert(value != page_table_handle::LEVEL1,
+ "[Page table] Attempted to decrement enum to value outside of range");
+ auto new_value = static_cast<std::underlying_type<page_table_handle::level>::type>(value);
+ value = static_cast<page_table_handle::level>(--new_value);
+ return value;
+ }
+} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/src/memory/paging/temporary_page.cpp b/arch/x86_64/src/memory/paging/temporary_page.cpp
new file mode 100644
index 0000000..152241d
--- /dev/null
+++ b/arch/x86_64/src/memory/paging/temporary_page.cpp
@@ -0,0 +1,29 @@
+#include "arch/memory/paging/temporary_page.hpp"
+
+#include "arch/memory/paging/page_entry.hpp"
+
+namespace teachos::arch::memory::paging
+{
+ auto temporary_page::map_table_frame(allocator::physical_frame frame,
+ active_page_table & active_table) -> page_table_handle
+ {
+ page_table_handle handle{reinterpret_cast<page_table *>(map_to_frame(frame, active_table)),
+ page_table_handle::LEVEL1};
+ return handle;
+ }
+
+ auto temporary_page::map_to_frame(allocator::physical_frame frame,
+ active_page_table & active_table) -> virtual_address
+ {
+ exception_handling::assert(!active_table.translate_page(page).has_value(),
+ "[Temporary page] Page is already mapped");
+
+ active_table.map_page_to_frame(allocator, page, frame, entry::WRITABLE);
+ return page.start_address();
+ }
+
+ auto temporary_page::unmap_page(active_page_table & active_table) -> void
+ {
+ active_table.unmap_page(allocator, page);
+ }
+} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/src/memory/paging/virtual_page.cpp b/arch/x86_64/src/memory/paging/virtual_page.cpp
new file mode 100644
index 0000000..d374156
--- /dev/null
+++ b/arch/x86_64/src/memory/paging/virtual_page.cpp
@@ -0,0 +1,33 @@
+#include "arch/memory/paging/virtual_page.hpp"
+
+#include "arch/exception_handling/assert.hpp"
+
+namespace teachos::arch::memory::paging
+{
+ auto virtual_page::containing_address(virtual_address address) -> virtual_page
+ {
+ exception_handling::assert(address < 0x00008000'00000000 || address >= 0xffff8000'00000000,
+ "[Virtual Page] Attempted to create virtual page from invalid address");
+ return virtual_page{address / allocator::PAGE_FRAME_SIZE};
+ }
+
+ auto virtual_page::start_address() const -> virtual_address { return page_number * allocator::PAGE_FRAME_SIZE; }
+
+ auto virtual_page::get_level_index(page_table_handle::level level) const -> size_t
+ {
+ return (page_number >> (level * 9U)) & 0x1FF;
+ }
+
+ auto virtual_page::operator++(int) -> virtual_page
+ {
+ virtual_page const old_value = *this;
+ ++page_number;
+ return old_value;
+ }
+
+ auto virtual_page::operator++() -> virtual_page &
+ {
+ ++page_number;
+ return *this;
+ }
+} // namespace teachos::arch::memory::paging