#include "kapi/memory.hpp" #include "kapi/boot.hpp" #include "kapi/system.hpp" #include "x86_64/boot/boot.hpp" #include "x86_64/boot/ld.hpp" #include "x86_64/cpu/registers.hpp" #include "x86_64/memory/buffered_allocator.hpp" #include "x86_64/memory/kernel_mapper.hpp" #include "x86_64/memory/mmu.hpp" #include "x86_64/memory/page_table.hpp" #include "x86_64/memory/page_utilities.hpp" #include "x86_64/memory/paging_root.hpp" #include "x86_64/memory/recursive_page_mapper.hpp" #include "x86_64/memory/region_allocator.hpp" #include "x86_64/memory/scoped_mapping.hpp" #include #include #include #include #include #include #include #include #include #include namespace teachos::memory { namespace { constexpr auto static unused_page_address = linear_address{0x0000'7fff'cafe'faceuz}; constexpr auto static recursive_page_map_index = x86_64::page_table::entry_count - 2; //! Instantiate a basic, memory region based, early frame allocator for remapping. auto collect_memory_information() { auto memory_map = boot::bootstrap_information.mbi->maybe_memory_map(); if (!memory_map) { system::panic("[x86_64] Failed to create early allocator, no memory map available."); } auto const & mbi = boot::bootstrap_information.mbi; auto mbi_span = std::span{std::bit_cast(mbi), mbi->size_bytes()}; auto image_span = std::span{&boot::x86_64::_start_physical, &boot::x86_64::_end_physical}; return x86_64::region_allocator::memory_information{ .image_range = std::make_pair(physical_address{&image_span.front()}, physical_address{&image_span.back()}), .mbi_range = std::make_pair(physical_address{&mbi_span.front()}, physical_address{&mbi_span.back()}), .memory_map = *memory_map, }; } //! Enable additional CPU protection features, required during later stages of the kernel. auto enable_cpu_protections() -> void { cpu::x86_64::cr0::set(cpu::x86_64::cr0::flags::write_protect); cpu::x86_64::i32_efer::set(cpu::x86_64::i32_efer::flags::execute_disable_bit_enable); } //! Inject, or graft, a faux recursive PML4 into the active page mapping structure. auto inject_faux_pml4(frame_allocator & allocator, page_mapper & mapper) { using namespace x86_64; using entry_flags = page_table::entry::flags; auto page = page::containing(unused_page_address); auto temporary_mapper = scoped_mapping{page, mapper}; auto new_pml4_frame = allocator.allocate(); auto pml4 = std::construct_at(temporary_mapper.map_as(*new_pml4_frame, entry_flags::writable)); (*pml4)[recursive_page_map_index].frame(new_pml4_frame.value(), entry_flags::present | entry_flags::writable); auto pml4_index = pml_index<4>(page); auto old_pml4 = paging_root::get(); auto pml4_entry = (*old_pml4)[pml4_index]; auto pml3_index = pml_index<3>(page); auto old_pml3 = old_pml4->next(pml4_index); auto pml3_entry = (**old_pml3)[pml3_index]; auto pml2_index = pml_index<2>(page); auto old_pml2 = (**old_pml3).next(pml3_index); auto pml2_entry = (**old_pml2)[pml2_index]; auto pml1_index = pml_index<1>(page); auto old_pml1 = (**old_pml2).next(pml2_index); auto pml1_entry = (**old_pml1)[pml1_index]; (*paging_root::get())[recursive_page_map_index].frame(new_pml4_frame.value(), entry_flags::present | entry_flags::writable); tlb_flush_all(); auto new_pml4 = paging_root::get(); (*new_pml4)[pml4_index] = pml4_entry; auto new_pml3 = new_pml4->next(pml4_index); (**new_pml3)[pml3_index] = pml3_entry; auto new_pml2 = (**new_pml3).next(pml3_index); (**new_pml2)[pml2_index] = pml2_entry; auto new_pml1 = (**new_pml2).next(pml2_index); (**new_pml1)[pml1_index] = pml1_entry; return *new_pml4_frame; } auto remap_kernel(page_mapper & mapper) -> void { auto kernel_mapper = x86_64::kernel_mapper{boot::bootstrap_information.mbi}; kernel_mapper.remap_kernel(mapper); } auto remap_vga_text_mode_buffer(page_mapper & mapper) -> void { constexpr auto vga_base = std::uintptr_t{0xb8000}; auto vga_physical_start = physical_address{vga_base}; auto vga_virtual_start = linear_address{vga_base + std::bit_cast(&boot::x86_64::TEACHOS_VMA)}; auto page = page::containing(vga_virtual_start); auto frame = frame::containing(vga_physical_start); mapper.map(page, frame, page_mapper::flags::writable); } auto remap_multiboot_information(page_mapper & mapper) -> void { auto mbi_base = std::bit_cast(boot::bootstrap_information.mbi); auto mbi_size = boot::bootstrap_information.mbi->size_bytes(); auto mbi_physical_start = physical_address{mbi_base & ~std::bit_cast(&boot::x86_64::TEACHOS_VMA)}; auto mbi_virtual_start = linear_address{mbi_base}; auto mbi_block_count = (mbi_size + PLATFORM_FRAME_SIZE - 1) / PLATFORM_FRAME_SIZE; for (auto i = 0uz; i < mbi_block_count; ++i) { auto page = page::containing(mbi_virtual_start) + i; auto frame = frame::containing(mbi_physical_start) + i; mapper.map(page, frame, page_mapper::flags::empty); } } auto constinit region_based_allocator = std::optional{}; auto constinit buffered_allocator = std::optional>{}; auto constinit recursive_page_mapper = std::optional{}; } // namespace auto init() -> void { auto static constinit is_initialized = std::atomic_flag{}; if (is_initialized.test_and_set()) { system::panic("[x86_64] Memory management has already been initialized."); } kstd::println("[x86_64:MEM] Enabling additional CPU protection features."); enable_cpu_protections(); region_based_allocator.emplace(collect_memory_information()); buffered_allocator.emplace(&*region_based_allocator); recursive_page_mapper.emplace(*buffered_allocator); kstd::println("[x86_64:MEM] Preparing new paging hierarchy."); auto new_pml4_frame = inject_faux_pml4(*buffered_allocator, *recursive_page_mapper); remap_kernel(*recursive_page_mapper); remap_vga_text_mode_buffer(*recursive_page_mapper); remap_multiboot_information(*recursive_page_mapper); kstd::println("[x86_64:MEM] Switching to new paging hierarchy."); auto cr3 = cpu::x86_64::cr3::read(); cr3.frame(new_pml4_frame); cpu::x86_64::cr3::write(cr3); set_frame_allocator(*buffered_allocator); set_page_mapper(*recursive_page_mapper); } } // namespace teachos::memory