#include "kapi/memory.hpp" #include "kapi/boot.hpp" #include "kapi/system.hpp" #include "arch/boot/boot.hpp" #include "arch/boot/ld.hpp" #include "arch/cpu/registers.hpp" #include "arch/memory/higher_half_mapper.hpp" #include "arch/memory/kernel_mapper.hpp" #include "arch/memory/page_table.hpp" #include "arch/memory/page_utilities.hpp" #include "arch/memory/region_allocator.hpp" #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace kstd::units_literals; namespace kapi::memory { namespace { auto constinit region_based_allocator = std::optional{}; auto constinit higher_half_mapper = std::optional{}; //! Instantiate a basic, memory region based, early frame allocator for remapping. auto collect_memory_information() { auto memory_map = boot::bootstrap_information.mbi->maybe_memory_map(); if (!memory_map) { system::panic("[x86_64] Failed to create early allocator, no memory map available."); } auto const & mbi = boot::bootstrap_information.mbi; auto mbi_span = std::span{std::bit_cast(mbi), static_cast(mbi->size())}; auto image_span = std::span{&arch::boot::_start_physical, &arch::boot::_end_physical}; return arch::memory::region_allocator::memory_information{ .image_range = std::make_pair(physical_address{&image_span.front()}, physical_address{&image_span.back()}), .mbi_range = std::make_pair(physical_address{&mbi_span.front()}, physical_address{&mbi_span.back()}), .memory_map = *memory_map, .mbi = mbi, }; } auto establish_higher_half_direct_mapping() -> void { auto pml3_frame = kapi::memory::allocate_frame(); auto pml3 = static_cast(pml3_frame->start_address()); pml3->clear(); std::ranges::for_each(std::views::iota(0uz, 512uz), [&](auto index) { auto frame = kapi::memory::frame{(1024uz * 1024uz * 1024uz * index)}; auto & entry = (*pml3)[index]; entry.frame(frame, arch::memory::page_table::entry::flags::present | arch::memory::page_table::entry::flags::writable | arch::memory::page_table::entry::flags::huge_page); }); auto current_cr3 = arch::cpu::cr3::read(); auto pml4 = static_cast(current_cr3.address()); (*pml4)[256].frame(*pml3_frame, arch::memory::page_table::entry::flags::present | arch::memory::page_table::entry::flags::writable | arch::memory::page_table::entry::flags::global); } //! Enable additional CPU protection features, required during later stages of the kernel. auto enable_cpu_protections() -> void { arch::cpu::cr0::set(arch::cpu::cr0::flags::write_protect); arch::cpu::i32_efer::set(arch::cpu::i32_efer::flags::execute_disable_bit_enable); } [[maybe_unused]] auto remap_kernel(page_mapper & mapper) -> void { auto mbi_pointer = boot::bootstrap_information.mbi; auto kernel_mapper = arch::memory::kernel_mapper{mbi_pointer}; kernel_mapper.remap_kernel(mapper); } [[maybe_unused]] auto remap_vga_text_mode_buffer(page_mapper & mapper) -> void { constexpr auto vga_base = std::uintptr_t{0xb8000}; auto vga_physical_start = physical_address{vga_base}; auto vga_virtual_start = linear_address{vga_base + std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto page = page::containing(vga_virtual_start); auto frame = frame::containing(vga_physical_start); mapper.map(page, frame, page_mapper::flags::writable | page_mapper::flags::supervisor_only); } [[maybe_unused]] auto remap_multiboot_information(page_mapper & mapper) -> void { auto mbi_base = std::bit_cast(boot::bootstrap_information.mbi); auto mbi_size = boot::bootstrap_information.mbi->size(); auto mbi_physical_start = physical_address{mbi_base & ~std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto mbi_virtual_start = linear_address{mbi_base}; auto mbi_block_count = (mbi_size + frame::size - 1_B) / frame::size; for (auto i = 0uz; i < mbi_block_count; ++i) { auto page = page::containing(mbi_virtual_start) + i; auto frame = frame::containing(mbi_physical_start) + i; mapper.map(page, frame, page_mapper::flags::supervisor_only); } } [[maybe_unused]] auto remap_bootloader_modules(page_mapper & mapper) -> void { std::ranges::for_each(boot::bootstrap_information.mbi->modules(), [&mapper](auto const & module) { auto module_physical_start = physical_address{module.start_address}; auto module_virtual_start = linear_address{module.start_address + std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto module_size = static_cast(module.end_address - module.start_address); auto module_block_count = (module_size + frame::size - 1_B) / frame::size; for (auto i = 0uz; i < module_block_count; ++i) { auto page = page::containing(module_virtual_start) + i; auto frame = frame::containing(module_physical_start) + i; mapper.map(page, frame, page_mapper::flags::writable | page_mapper::flags::supervisor_only); } }); } [[maybe_unused]] auto handoff_to_kernel_pmm(frame_allocator & new_allocator) -> void { auto memory_map = boot::bootstrap_information.mbi->memory_map(); for (auto const & region : memory_map.regions() | std::views::filter([](auto const & region) { return region.type == multiboot2::memory_type::available; })) { auto start = frame::containing(physical_address{region.base}); auto count = kstd::units::bytes{region.size_in_B} / page::size; new_allocator.release_many({start, count}); } auto next_free_frame = region_based_allocator->next_free_frame(); if (!next_free_frame) { system::panic("[x86_64:MEM] No more free memory!"); } std::ranges::for_each(std::views::iota(kapi::memory::frame{}, *next_free_frame), [&](auto frame) { new_allocator.mark_used(frame); }); auto image_start = frame::containing(physical_address{&arch::boot::_start_physical}); auto image_end = frame::containing(physical_address{&arch::boot::_end_physical}) + 1; std::ranges::for_each(std::views::iota(image_start, image_end), [&](auto frame) { new_allocator.mark_used(frame); }); auto mbi_base = std::bit_cast(boot::bootstrap_information.mbi); auto mbi_size = boot::bootstrap_information.mbi->size(); auto mbi_address = physical_address{mbi_base & ~std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto mbi_start = frame::containing(mbi_address); auto mbi_end = frame::containing(mbi_address + mbi_size) + 1; std::ranges::for_each(std::views::iota(mbi_start, mbi_end), [&](auto frame) { new_allocator.mark_used(frame); }); std::ranges::for_each(boot::bootstrap_information.mbi->modules(), [&](auto const & module) { auto module_physical_start = physical_address{module.start_address}; auto module_size = module.end_address - module.start_address; auto module_start = frame::containing(module_physical_start); auto module_end = frame::containing(module_physical_start + module_size) + 1; std::ranges::for_each(std::views::iota(module_start, module_end), [&](auto frame) { new_allocator.mark_used(frame); }); }); } } // namespace auto init() -> void { auto static constinit is_initialized = std::atomic_flag{}; if (is_initialized.test_and_set()) { system::panic("[x86_64] Memory management has already been initialized."); } kstd::println("[x86_64:MEM] Enabling additional CPU protection features."); enable_cpu_protections(); region_based_allocator.emplace(collect_memory_information()); set_frame_allocator(*region_based_allocator); kstd::println("[x86_64:MEM] Establishing higher-half direct mapping."); establish_higher_half_direct_mapping(); kstd::println("[x86_64:MEM] Preparing new paging hierarchy."); auto new_pml4_frame = kapi::memory::allocate_frame(); if (!new_pml4_frame) { system::panic("[x86_64:MEM] Failed to allocate new PML4!"); } auto new_pml4 = arch::memory::to_higher_half_pointer(new_pml4_frame->start_address()); std::construct_at(new_pml4); higher_half_mapper.emplace(new_pml4); set_page_mapper(*higher_half_mapper); remap_kernel(*higher_half_mapper); remap_vga_text_mode_buffer(*higher_half_mapper); remap_multiboot_information(*higher_half_mapper); remap_bootloader_modules(*higher_half_mapper); auto current_cr3 = arch::cpu::cr3::read(); auto old_pml4 = static_cast(current_cr3.address()); (*new_pml4)[256] = (*old_pml4)[256]; kstd::println("[x86_64:MEM] Switching to new paging hierarchy."); auto cr3 = arch::cpu::cr3::read(); cr3.frame(*new_pml4_frame); arch::cpu::cr3::write(cr3); auto memory_map = boot::bootstrap_information.mbi->memory_map(); auto highest_byte = std::ranges::max(std::views::transform( std::views::filter(memory_map.regions(), [](auto const & region) { return region.type == multiboot2::memory_type::available; }), [](auto const & region) { return region.base + region.size_in_B; })); init_pmm(frame::containing(physical_address{highest_byte}).number() + 1, handoff_to_kernel_pmm); kstd::println("[x86_64:MEM] Releasing bootstrap memory allocators."); region_based_allocator.reset(); } } // namespace kapi::memory