aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorFelix Morgner <felix.morgner@ost.ch>2026-03-17 14:19:02 +0100
committerFelix Morgner <felix.morgner@ost.ch>2026-03-17 14:19:02 +0100
commit541670e49812b5b07079cc86367247402ace331a (patch)
tree1276c3664c73f8d44c5c028b5c88ac2decaa9b66 /arch
parent796ce76185b00feb86f6b4f738ac6f953c247116 (diff)
downloadteachos-541670e49812b5b07079cc86367247402ace331a.tar.xz
teachos-541670e49812b5b07079cc86367247402ace331a.zip
x86_64/memory: finish HHDM-based mapper
Diffstat (limited to 'arch')
-rw-r--r--arch/x86_64/include/arch/memory/higher_half_mapper.hpp25
-rw-r--r--arch/x86_64/include/arch/memory/page_utilities.hpp15
-rw-r--r--arch/x86_64/kapi/memory.cpp109
-rw-r--r--arch/x86_64/src/boot/boot32.S2
-rw-r--r--arch/x86_64/src/memory/higher_half_mapper.cpp104
5 files changed, 191 insertions, 64 deletions
diff --git a/arch/x86_64/include/arch/memory/higher_half_mapper.hpp b/arch/x86_64/include/arch/memory/higher_half_mapper.hpp
index 66fb58b..c8df40c 100644
--- a/arch/x86_64/include/arch/memory/higher_half_mapper.hpp
+++ b/arch/x86_64/include/arch/memory/higher_half_mapper.hpp
@@ -2,16 +2,41 @@
#define TEACHOS_X86_64_HIGHER_HALF_MAPPER_HPP
#include "kapi/memory.hpp"
+
+#include "arch/memory/page_table.hpp"
+
+#include <cstddef>
+
namespace arch::memory
{
+ //! A simple page mapper making use of a Higher Half Direct Map (HHDM) to access and modify page tables.
struct higher_half_mapper : kapi::memory::page_mapper
{
+ //! Construct a new mapper for a hierarchy rooted in the given PML.
+ //!
+ //! @param root The root of the hierarchy to operate on.
+ explicit higher_half_mapper(page_table * root);
+
+ //! @copydoc kapi::memory::page_mapper::map
auto map(kapi::memory::page page, kapi::memory::frame frame, flags flags) -> std::byte * override;
+ //! @copydoc kapi::memory::page_mapper::unmap
auto unmap(kapi::memory::page page) -> void override;
+ //! @copydoc kapi::memory::page_mapper::try_unmap
auto try_unmap(kapi::memory::page page) noexcept -> bool override;
+
+ private:
+ //! Try to retrieve the the PML1 responsible for mapping this page, creating one if necessary.
+ //!
+ //! This function will create a page table hierarchy leading to the target PML1 if it doesn't exist.
+ //!
+ //! @param page The page to get the PML1 for.
+ //! @return The PML1 that manages the given page, nullptr it the system runs out of memory.
+ auto get_or_create_page_table(kapi::memory::page page) noexcept -> page_table *;
+
+ page_table * m_root;
};
} // namespace arch::memory
diff --git a/arch/x86_64/include/arch/memory/page_utilities.hpp b/arch/x86_64/include/arch/memory/page_utilities.hpp
index 8c25af3..c48e74f 100644
--- a/arch/x86_64/include/arch/memory/page_utilities.hpp
+++ b/arch/x86_64/include/arch/memory/page_utilities.hpp
@@ -8,15 +8,22 @@
namespace arch::memory
{
- template<std::size_t Level>
- requires(Level > 0uz && Level < 5uz)
- constexpr auto pml_index(kapi::memory::page page) noexcept -> std::size_t
+ constexpr auto inline pml_index(std::size_t index, kapi::memory::page page) noexcept -> std::size_t
{
- constexpr auto shift_width = (Level - 1) * 9;
+ constexpr auto bits_per_level = 9;
+ auto shift_width = (index - 1) * bits_per_level;
constexpr auto index_mask = 0x1ffuz;
return page.number() >> shift_width & index_mask;
}
+ template<typename ValueType = void>
+ [[nodiscard]] constexpr auto to_higher_half_pointer(kapi::memory::physical_address address) -> ValueType *
+ {
+ using namespace kapi::memory;
+ auto const higher_half_address = higher_half_direct_map_base + address.raw();
+ return static_cast<ValueType *>(higher_half_address);
+ }
+
} // namespace arch::memory
#endif \ No newline at end of file
diff --git a/arch/x86_64/kapi/memory.cpp b/arch/x86_64/kapi/memory.cpp
index f74eea6..a9e1216 100644
--- a/arch/x86_64/kapi/memory.cpp
+++ b/arch/x86_64/kapi/memory.cpp
@@ -6,9 +6,10 @@
#include "arch/boot/boot.hpp"
#include "arch/boot/ld.hpp"
#include "arch/cpu/registers.hpp"
+#include "arch/memory/higher_half_mapper.hpp"
#include "arch/memory/kernel_mapper.hpp"
-#include "arch/memory/mmu.hpp"
#include "arch/memory/page_table.hpp"
+#include "arch/memory/page_utilities.hpp"
#include "arch/memory/region_allocator.hpp"
#include <kstd/print>
@@ -21,6 +22,7 @@
#include <bit>
#include <cstddef>
#include <cstdint>
+#include <memory>
#include <optional>
#include <ranges>
#include <span>
@@ -32,6 +34,7 @@ namespace kapi::memory
namespace
{
auto constinit region_based_allocator = std::optional<arch::memory::region_allocator>{};
+ auto constinit higher_half_mapper = std::optional<arch::memory::higher_half_mapper>{};
//! Instantiate a basic, memory region based, early frame allocator for remapping.
auto collect_memory_information()
@@ -53,6 +56,27 @@ namespace kapi::memory
};
}
+ auto establish_higher_half_direct_mapping() -> void
+ {
+ auto pml3_frame = kapi::memory::allocate_frame();
+ auto pml3 = static_cast<arch::memory::page_table *>(pml3_frame->start_address());
+ pml3->clear();
+
+ std::ranges::for_each(std::views::iota(0uz, 512uz), [&](auto index) {
+ auto frame = kapi::memory::frame{(1024uz * 1024uz * 1024uz * index)};
+ auto & entry = (*pml3)[index];
+ entry.frame(frame, arch::memory::page_table::entry::flags::present |
+ arch::memory::page_table::entry::flags::writable |
+ arch::memory::page_table::entry::flags::huge_page);
+ });
+
+ auto current_cr3 = arch::cpu::cr3::read();
+ auto pml4 = static_cast<arch::memory::page_table *>(current_cr3.address());
+ (*pml4)[256].frame(*pml3_frame, arch::memory::page_table::entry::flags::present |
+ arch::memory::page_table::entry::flags::writable |
+ arch::memory::page_table::entry::flags::global);
+ }
+
//! Enable additional CPU protection features, required during later stages of the kernel.
auto enable_cpu_protections() -> void
{
@@ -62,7 +86,8 @@ namespace kapi::memory
[[maybe_unused]] auto remap_kernel(page_mapper & mapper) -> void
{
- auto kernel_mapper = arch::memory::kernel_mapper{boot::bootstrap_information.mbi};
+ auto mbi_pointer = boot::bootstrap_information.mbi;
+ auto kernel_mapper = arch::memory::kernel_mapper{mbi_pointer};
kernel_mapper.remap_kernel(mapper);
}
@@ -133,39 +158,6 @@ namespace kapi::memory
std::ranges::for_each(std::views::iota(mbi_start, mbi_end), [&](auto frame) { new_allocator.mark_used(frame); });
}
- auto establish_higher_half_direct_mapping() -> void
- {
- auto hhdm_frame = kapi::memory::allocate_frame();
- auto hhdm_pml3 = static_cast<arch::memory::page_table *>(hhdm_frame->start_address());
- hhdm_pml3->clear();
-
- std::ranges::for_each(std::views::iota(0uz, 512uz), [&](auto index) {
- auto frame = kapi::memory::frame{(1024uz * 1024uz * 1024uz * index)};
- auto & entry = (*hhdm_pml3)[index];
- entry.frame(frame, arch::memory::page_table::entry::flags::present |
- arch::memory::page_table::entry::flags::writable |
- arch::memory::page_table::entry::flags::huge_page);
- });
-
- auto current_cr3 = arch::cpu::cr3::read();
- auto pml4_address = linear_address{current_cr3.address().raw()};
- auto pml4 = static_cast<arch::memory::page_table *>(pml4_address);
- (*pml4)[256].frame(*hhdm_frame, arch::memory::page_table::entry::flags::present |
- arch::memory::page_table::entry::flags::writable |
- arch::memory::page_table::entry::flags::global);
- }
-
- auto clear_lower_address_space() -> void
- {
- auto current_cr3 = arch::cpu::cr3::read();
- auto pml4_address = memory::higher_half_direct_map_base + current_cr3.address().raw();
- auto pml4 = static_cast<arch::memory::page_table *>(pml4_address);
-
- std::ranges::for_each(std::views::iota(0uz, 1uz), [&](auto index) { (*pml4)[index].clear(); });
-
- arch::memory::tlb_flush_all();
- }
-
} // namespace
auto init() -> void
@@ -187,31 +179,44 @@ namespace kapi::memory
kstd::println("[x86_64:MEM] Establishing higher-half direct mapping.");
establish_higher_half_direct_mapping();
- clear_lower_address_space();
kstd::println("[x86_64:MEM] Preparing new paging hierarchy.");
- // remap_kernel(*recursive_page_mapper);
- // remap_vga_text_mode_buffer(*recursive_page_mapper);
- // remap_multiboot_information(*recursive_page_mapper);
+ auto new_pml4_frame = kapi::memory::allocate_frame();
+ if (!new_pml4_frame)
+ {
+ system::panic("[x86_64:MEM] Failed to allocate new PML4!");
+ }
+ auto new_pml4 = arch::memory::to_higher_half_pointer<arch::memory::page_table>(new_pml4_frame->start_address());
+ std::construct_at(new_pml4);
+
+ higher_half_mapper.emplace(new_pml4);
+ set_page_mapper(*higher_half_mapper);
+
+ remap_kernel(*higher_half_mapper);
+ remap_vga_text_mode_buffer(*higher_half_mapper);
+ remap_multiboot_information(*higher_half_mapper);
+
+ auto current_cr3 = arch::cpu::cr3::read();
+ auto old_pml4 = static_cast<arch::memory::page_table *>(current_cr3.address());
+ (*new_pml4)[256] = (*old_pml4)[256];
- // kstd::println("[x86_64:MEM] Switching to new paging hierarchy.");
+ kstd::println("[x86_64:MEM] Switching to new paging hierarchy.");
- // auto cr3 = arch::cpu::cr3::read();
- // cr3.frame(new_pml4_frame);
- // arch::cpu::cr3::write(cr3);
+ auto cr3 = arch::cpu::cr3::read();
+ cr3.frame(*new_pml4_frame);
+ arch::cpu::cr3::write(cr3);
- // auto memory_map = boot::bootstrap_information.mbi->memory_map();
- // auto highest_byte = std::ranges::max(std::views::transform(
- // std::views::filter(memory_map.regions(),
- // [](auto const & region) { return region.type == multiboot2::memory_type::available; }),
- // [](auto const & region) { return region.base + region.size_in_B; }));
+ auto memory_map = boot::bootstrap_information.mbi->memory_map();
+ auto highest_byte = std::ranges::max(std::views::transform(
+ std::views::filter(memory_map.regions(),
+ [](auto const & region) { return region.type == multiboot2::memory_type::available; }),
+ [](auto const & region) { return region.base + region.size_in_B; }));
- // init_pmm(frame::containing(physical_address{highest_byte}).number() + 1, handoff_to_kernel_pmm);
+ init_pmm(frame::containing(physical_address{highest_byte}).number() + 1, handoff_to_kernel_pmm);
- // kstd::println("[x86_64:MEM] Releasing bootstrap memory allocators.");
- // allocation_buffer.reset();
- // region_based_allocator.reset();
+ kstd::println("[x86_64:MEM] Releasing bootstrap memory allocators.");
+ region_based_allocator.reset();
}
} // namespace kapi::memory
diff --git a/arch/x86_64/src/boot/boot32.S b/arch/x86_64/src/boot/boot32.S
index 694b8b7..1c2fdaf 100644
--- a/arch/x86_64/src/boot/boot32.S
+++ b/arch/x86_64/src/boot/boot32.S
@@ -306,7 +306,7 @@ _assert_cpu_supports_long_mode:
pie_function_end
/**
- * @brief Prepare a recursive page map hierarchy
+ * @brief Prepare a basic page map hierarchy
*
* @param ebp+8 The number of huge pages to map
* @return void
diff --git a/arch/x86_64/src/memory/higher_half_mapper.cpp b/arch/x86_64/src/memory/higher_half_mapper.cpp
index 9fe3c89..abb54a3 100644
--- a/arch/x86_64/src/memory/higher_half_mapper.cpp
+++ b/arch/x86_64/src/memory/higher_half_mapper.cpp
@@ -1,29 +1,119 @@
#include "arch/memory/higher_half_mapper.hpp"
#include "kapi/memory.hpp"
+#include "kapi/system.hpp"
+#include "arch/memory/page_table.hpp"
+#include "arch/memory/page_utilities.hpp"
+
+#include <algorithm>
+#include <array>
#include <cstddef>
+#include <memory>
+#include <ranges>
+#include <utility>
namespace arch::memory
{
+ higher_half_mapper::higher_half_mapper(page_table * root)
+ : m_root{root}
+ {}
+
auto higher_half_mapper::map(kapi::memory::page page, kapi::memory::frame frame, flags flags) -> std::byte *
{
- static_cast<void>(page);
- static_cast<void>(frame);
- static_cast<void>(flags);
- return nullptr;
+ auto table = get_or_create_page_table(page);
+ if (!table)
+ {
+ return nullptr;
+ }
+
+ auto const index = pml_index(1, page);
+ auto & entry = (*table)[index];
+
+ if (entry.present())
+ {
+ kapi::system::panic("[x86_64:MEM] Tried to map a page that is already mapped!");
+ }
+
+ entry.frame(frame, to_table_flags(flags) | page_table::entry::flags::present);
+
+ return static_cast<std::byte *>(page.start_address());
}
auto higher_half_mapper::unmap(kapi::memory::page page) -> void
{
- static_cast<void>(page);
+ if (!try_unmap(page))
+ {
+ kapi::system::panic("[x86_64:MEM] Tried to unmap a page that is not mapped!");
+ }
}
auto higher_half_mapper::try_unmap(kapi::memory::page page) noexcept -> bool
{
- static_cast<void>(page);
- return false;
+ auto table_path = std::array<std::pair<page_table *, std::size_t>, 4>{};
+ table_path[0] = std::pair{m_root, pml_index(4, page)};
+
+ for (auto level = 4uz; level > 1uz; --level)
+ {
+ auto [table, index] = table_path[4 - level];
+ auto & entry = (*table)[index];
+
+ if (!entry.present())
+ {
+ return false;
+ }
+
+ auto next_table = to_higher_half_pointer<page_table>(entry.frame()->start_address());
+ auto next_index = pml_index(4 - level - 1, page);
+ table_path[4 - level - 1] = std::pair{next_table, next_index};
+ }
+
+ std::ranges::for_each(std::views::reverse(table_path), [previous_was_empty = true](auto & step) mutable {
+ auto [table, index] = step;
+ auto & entry = (*table)[index];
+ auto frame = entry.frame();
+
+ if (previous_was_empty)
+ {
+ entry.clear();
+ previous_was_empty = table->empty();
+ kapi::memory::get_frame_allocator().release(*frame);
+ }
+ });
+
+ return true;
+ }
+
+ auto higher_half_mapper::get_or_create_page_table(kapi::memory::page page) noexcept -> page_table *
+ {
+ auto table = m_root;
+
+ for (auto level = 4uz; level > 1uz; --level)
+ {
+ auto index = pml_index(level, page);
+ auto & entry = (*table)[index];
+
+ if (!entry.present())
+ {
+ auto table_frame = kapi::memory::allocate_frame();
+ if (!table_frame)
+ {
+ return nullptr;
+ }
+
+ auto new_table = to_higher_half_pointer<page_table>(table_frame->start_address());
+ std::construct_at(new_table);
+
+ auto const flags = page_table::entry::flags::present | page_table::entry::flags::writable |
+ page_table::entry::flags::user_accessible;
+ entry.frame(*table_frame, flags);
+ }
+
+ table = to_higher_half_pointer<page_table>(entry.frame()->start_address());
+ }
+
+ return table;
}
} // namespace arch::memory \ No newline at end of file