#include "kapi/memory.hpp" #include "kapi/boot.hpp" #include "kapi/memory/buffered_allocator.hpp" #include "kapi/system.hpp" #include "arch/boot/boot.hpp" #include "arch/boot/ld.hpp" #include "arch/cpu/registers.hpp" #include "arch/memory/kernel_mapper.hpp" #include "arch/memory/mmu.hpp" #include "arch/memory/page_table.hpp" #include "arch/memory/page_utilities.hpp" #include "arch/memory/paging_root.hpp" #include "arch/memory/recursive_page_mapper.hpp" #include "arch/memory/region_allocator.hpp" #include "arch/memory/scoped_mapping.hpp" #include #include #include #include #include #include #include #include #include #include #include #include #include namespace kapi::memory { namespace { constexpr auto static unused_page_address = linear_address{0x0000'7fff'cafe'faceuz}; constexpr auto static recursive_page_map_index = arch::memory::page_table::entry_count - 2; auto constinit region_based_allocator = std::optional{}; auto constinit allocation_buffer = std::optional>{}; auto constinit recursive_page_mapper = std::optional{}; //! Instantiate a basic, memory region based, early frame allocator for remapping. auto collect_memory_information() { auto memory_map = boot::bootstrap_information.mbi->maybe_memory_map(); if (!memory_map) { system::panic("[x86_64] Failed to create early allocator, no memory map available."); } auto const & mbi = boot::bootstrap_information.mbi; auto mbi_span = std::span{std::bit_cast(mbi), mbi->size_bytes()}; auto image_span = std::span{&arch::boot::_start_physical, &arch::boot::_end_physical}; return arch::memory::region_allocator::memory_information{ .image_range = std::make_pair(physical_address{&image_span.front()}, physical_address{&image_span.back()}), .mbi_range = std::make_pair(physical_address{&mbi_span.front()}, physical_address{&mbi_span.back()}), .memory_map = *memory_map, }; } //! Enable additional CPU protection features, required during later stages of the kernel. auto enable_cpu_protections() -> void { arch::cpu::cr0::set(arch::cpu::cr0::flags::write_protect); arch::cpu::i32_efer::set(arch::cpu::i32_efer::flags::execute_disable_bit_enable); } //! Inject, or graft, a faux recursive PML4 into the active page mapping structure. auto inject_faux_pml4(frame_allocator & allocator, page_mapper & mapper) { using arch::memory::page_table; using arch::memory::paging_root; using arch::memory::pml_index; using entry_flags = arch::memory::page_table::entry::flags; auto page = page::containing(unused_page_address); auto temporary_mapper = arch::memory::scoped_mapping{page, mapper}; auto new_pml4_frame = allocator.allocate(); auto pml4 = std::construct_at(temporary_mapper.map_as(*new_pml4_frame, entry_flags::writable)); (*pml4)[recursive_page_map_index].frame(new_pml4_frame.value(), entry_flags::present | entry_flags::writable); auto pml4_index = pml_index<4>(page); auto old_pml4 = paging_root::get(); auto pml4_entry = (*old_pml4)[pml4_index]; auto pml3_index = pml_index<3>(page); auto old_pml3 = old_pml4->next(pml4_index); auto pml3_entry = (**old_pml3)[pml3_index]; auto pml2_index = pml_index<2>(page); auto old_pml2 = (**old_pml3).next(pml3_index); auto pml2_entry = (**old_pml2)[pml2_index]; auto pml1_index = pml_index<1>(page); auto old_pml1 = (**old_pml2).next(pml2_index); auto pml1_entry = (**old_pml1)[pml1_index]; (*paging_root::get())[recursive_page_map_index].frame(new_pml4_frame.value(), entry_flags::present | entry_flags::writable); arch::memory::tlb_flush_all(); auto new_pml4 = paging_root::get(); (*new_pml4)[pml4_index] = pml4_entry; auto new_pml3 = new_pml4->next(pml4_index); (**new_pml3)[pml3_index] = pml3_entry; auto new_pml2 = (**new_pml3).next(pml3_index); (**new_pml2)[pml2_index] = pml2_entry; auto new_pml1 = (**new_pml2).next(pml2_index); (**new_pml1)[pml1_index] = pml1_entry; return *new_pml4_frame; } auto remap_kernel(page_mapper & mapper) -> void { auto kernel_mapper = arch::memory::kernel_mapper{boot::bootstrap_information.mbi}; kernel_mapper.remap_kernel(mapper); } auto remap_vga_text_mode_buffer(page_mapper & mapper) -> void { constexpr auto vga_base = std::uintptr_t{0xb8000}; auto vga_physical_start = physical_address{vga_base}; auto vga_virtual_start = linear_address{vga_base + std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto page = page::containing(vga_virtual_start); auto frame = frame::containing(vga_physical_start); mapper.map(page, frame, page_mapper::flags::writable | page_mapper::flags::supervisor_only); } auto remap_multiboot_information(page_mapper & mapper) -> void { auto mbi_base = std::bit_cast(boot::bootstrap_information.mbi); auto mbi_size = boot::bootstrap_information.mbi->size_bytes(); auto mbi_physical_start = physical_address{mbi_base & ~std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto mbi_virtual_start = linear_address{mbi_base}; auto mbi_block_count = (mbi_size + PLATFORM_FRAME_SIZE - 1) / PLATFORM_FRAME_SIZE; for (auto i = 0uz; i < mbi_block_count; ++i) { auto page = page::containing(mbi_virtual_start) + i; auto frame = frame::containing(mbi_physical_start) + i; mapper.map(page, frame, page_mapper::flags::supervisor_only); } } auto handoff_to_kernel_pmm(frame_allocator & new_allocator) -> void { auto memory_map = boot::bootstrap_information.mbi->memory_map(); for (auto const & region : memory_map.regions() | std::views::filter([](auto const & region) { return region.type == multiboot2::memory_type::available; })) { auto start = frame::containing(physical_address{region.base}); auto count = region.size_in_B / page::size; new_allocator.release_many({start, count}); } auto next_free_frame = region_based_allocator->next_free_frame(); if (!next_free_frame) { system::panic("[x86_64:MEM] No more free memory!"); } std::ranges::for_each(std::views::iota(kapi::memory::frame{}, *next_free_frame), [&](auto frame) { new_allocator.mark_used(frame); }); auto image_start = frame::containing(physical_address{&arch::boot::_start_physical}); auto image_end = frame::containing(physical_address{&arch::boot::_end_physical}) + 1; std::ranges::for_each(std::views::iota(image_start, image_end), [&](auto frame) { new_allocator.mark_used(frame); }); auto mbi_base = std::bit_cast(boot::bootstrap_information.mbi); auto mbi_size = boot::bootstrap_information.mbi->size_bytes(); auto mbi_address = physical_address{mbi_base & ~std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto mbi_start = frame::containing(mbi_address); auto mbi_end = frame::containing(mbi_address + mbi_size) + 1; // TODO BA-FS26: Protect MB2 boot modules std::ranges::for_each(std::views::iota(mbi_start, mbi_end), [&](auto frame) { new_allocator.mark_used(frame); }); } } // namespace auto init() -> void { auto static constinit is_initialized = std::atomic_flag{}; if (is_initialized.test_and_set()) { system::panic("[x86_64] Memory management has already been initialized."); } kstd::println("[x86_64:MEM] Enabling additional CPU protection features."); enable_cpu_protections(); region_based_allocator.emplace(collect_memory_information()); allocation_buffer.emplace(&*region_based_allocator); set_frame_allocator(*allocation_buffer); recursive_page_mapper.emplace(); set_page_mapper(*recursive_page_mapper); kstd::println("[x86_64:MEM] Preparing new paging hierarchy."); auto new_pml4_frame = inject_faux_pml4(*allocation_buffer, *recursive_page_mapper); remap_kernel(*recursive_page_mapper); remap_vga_text_mode_buffer(*recursive_page_mapper); remap_multiboot_information(*recursive_page_mapper); kstd::println("[x86_64:MEM] Switching to new paging hierarchy."); auto cr3 = arch::cpu::cr3::read(); cr3.frame(new_pml4_frame); arch::cpu::cr3::write(cr3); auto memory_map = boot::bootstrap_information.mbi->memory_map(); auto highest_byte = std::ranges::max(std::views::transform( std::views::filter(memory_map.regions(), [](auto const & region) { return region.type == multiboot2::memory_type::available; }), [](auto const & region) { return region.base + region.size_in_B; })); init_pmm(frame::containing(physical_address{highest_byte}).number() + 1, handoff_to_kernel_pmm); kstd::println("[x86_64:MEM] Releasing bootstrap memory allocators."); allocation_buffer.reset(); region_based_allocator.reset(); } } // namespace kapi::memory