#include "kapi/memory.hpp" #include "kapi/boot.hpp" #include "kapi/system.hpp" #include "arch/boot/boot.hpp" #include "arch/boot/ld.hpp" #include "arch/cpu/registers.hpp" #include "arch/memory/kernel_mapper.hpp" #include "arch/memory/mmu.hpp" #include "arch/memory/page_table.hpp" #include "arch/memory/region_allocator.hpp" #include #include #include #include #include #include #include #include #include #include #include #include namespace kapi::memory { namespace { auto constinit region_based_allocator = std::optional{}; //! Instantiate a basic, memory region based, early frame allocator for remapping. auto collect_memory_information() { auto memory_map = boot::bootstrap_information.mbi->maybe_memory_map(); if (!memory_map) { system::panic("[x86_64] Failed to create early allocator, no memory map available."); } auto const & mbi = boot::bootstrap_information.mbi; auto mbi_span = std::span{std::bit_cast(mbi), mbi->size_bytes()}; auto image_span = std::span{&arch::boot::_start_physical, &arch::boot::_end_physical}; return arch::memory::region_allocator::memory_information{ .image_range = std::make_pair(physical_address{&image_span.front()}, physical_address{&image_span.back()}), .mbi_range = std::make_pair(physical_address{&mbi_span.front()}, physical_address{&mbi_span.back()}), .memory_map = *memory_map, }; } //! Enable additional CPU protection features, required during later stages of the kernel. auto enable_cpu_protections() -> void { arch::cpu::cr0::set(arch::cpu::cr0::flags::write_protect); arch::cpu::i32_efer::set(arch::cpu::i32_efer::flags::execute_disable_bit_enable); } [[maybe_unused]] auto remap_kernel(page_mapper & mapper) -> void { auto kernel_mapper = arch::memory::kernel_mapper{boot::bootstrap_information.mbi}; kernel_mapper.remap_kernel(mapper); } [[maybe_unused]] auto remap_vga_text_mode_buffer(page_mapper & mapper) -> void { constexpr auto vga_base = std::uintptr_t{0xb8000}; auto vga_physical_start = physical_address{vga_base}; auto vga_virtual_start = linear_address{vga_base + std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto page = page::containing(vga_virtual_start); auto frame = frame::containing(vga_physical_start); mapper.map(page, frame, page_mapper::flags::writable | page_mapper::flags::supervisor_only); } [[maybe_unused]] auto remap_multiboot_information(page_mapper & mapper) -> void { auto mbi_base = std::bit_cast(boot::bootstrap_information.mbi); auto mbi_size = boot::bootstrap_information.mbi->size_bytes(); auto mbi_physical_start = physical_address{mbi_base & ~std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto mbi_virtual_start = linear_address{mbi_base}; auto mbi_block_count = (mbi_size + PLATFORM_FRAME_SIZE - 1) / PLATFORM_FRAME_SIZE; for (auto i = 0uz; i < mbi_block_count; ++i) { auto page = page::containing(mbi_virtual_start) + i; auto frame = frame::containing(mbi_physical_start) + i; mapper.map(page, frame, page_mapper::flags::supervisor_only); } } [[maybe_unused]] auto handoff_to_kernel_pmm(frame_allocator & new_allocator) -> void { auto memory_map = boot::bootstrap_information.mbi->memory_map(); for (auto const & region : memory_map.regions() | std::views::filter([](auto const & region) { return region.type == multiboot2::memory_type::available; })) { auto start = frame::containing(physical_address{region.base}); auto count = region.size_in_B / page::size; new_allocator.release_many({start, count}); } auto next_free_frame = region_based_allocator->next_free_frame(); if (!next_free_frame) { system::panic("[x86_64:MEM] No more free memory!"); } std::ranges::for_each(std::views::iota(kapi::memory::frame{}, *next_free_frame), [&](auto frame) { new_allocator.mark_used(frame); }); auto image_start = frame::containing(physical_address{&arch::boot::_start_physical}); auto image_end = frame::containing(physical_address{&arch::boot::_end_physical}) + 1; std::ranges::for_each(std::views::iota(image_start, image_end), [&](auto frame) { new_allocator.mark_used(frame); }); auto mbi_base = std::bit_cast(boot::bootstrap_information.mbi); auto mbi_size = boot::bootstrap_information.mbi->size_bytes(); auto mbi_address = physical_address{mbi_base & ~std::bit_cast(&arch::boot::TEACHOS_VMA)}; auto mbi_start = frame::containing(mbi_address); auto mbi_end = frame::containing(mbi_address + mbi_size) + 1; // TODO BA-FS26: Protect MB2 boot modules std::ranges::for_each(std::views::iota(mbi_start, mbi_end), [&](auto frame) { new_allocator.mark_used(frame); }); } auto establish_higher_half_direct_mapping() -> void { auto hhdm_frame = kapi::memory::allocate_frame(); auto hhdm_pml3 = static_cast(hhdm_frame->start_address()); hhdm_pml3->clear(); std::ranges::for_each(std::views::iota(0uz, 512uz), [&](auto index) { auto frame = kapi::memory::frame{(1024uz * 1024uz * 1024uz * index)}; auto & entry = (*hhdm_pml3)[index]; entry.frame(frame, arch::memory::page_table::entry::flags::present | arch::memory::page_table::entry::flags::writable | arch::memory::page_table::entry::flags::huge_page); }); auto current_cr3 = arch::cpu::cr3::read(); auto pml4_address = linear_address{current_cr3.address().raw()}; auto pml4 = static_cast(pml4_address); (*pml4)[256].frame(*hhdm_frame, arch::memory::page_table::entry::flags::present | arch::memory::page_table::entry::flags::writable | arch::memory::page_table::entry::flags::global); } auto clear_lower_address_space() -> void { auto current_cr3 = arch::cpu::cr3::read(); auto pml4_address = memory::higher_half_direct_map_base + current_cr3.address().raw(); auto pml4 = static_cast(pml4_address); std::ranges::for_each(std::views::iota(0uz, 1uz), [&](auto index) { (*pml4)[index].clear(); }); arch::memory::tlb_flush_all(); } } // namespace auto init() -> void { auto static constinit is_initialized = std::atomic_flag{}; if (is_initialized.test_and_set()) { system::panic("[x86_64] Memory management has already been initialized."); } kstd::println("[x86_64:MEM] Enabling additional CPU protection features."); enable_cpu_protections(); region_based_allocator.emplace(collect_memory_information()); set_frame_allocator(*region_based_allocator); kstd::println("[x86_64:MEM] Establishing higher-half direct mapping."); establish_higher_half_direct_mapping(); clear_lower_address_space(); kstd::println("[x86_64:MEM] Preparing new paging hierarchy."); // remap_kernel(*recursive_page_mapper); // remap_vga_text_mode_buffer(*recursive_page_mapper); // remap_multiboot_information(*recursive_page_mapper); // kstd::println("[x86_64:MEM] Switching to new paging hierarchy."); // auto cr3 = arch::cpu::cr3::read(); // cr3.frame(new_pml4_frame); // arch::cpu::cr3::write(cr3); // auto memory_map = boot::bootstrap_information.mbi->memory_map(); // auto highest_byte = std::ranges::max(std::views::transform( // std::views::filter(memory_map.regions(), // [](auto const & region) { return region.type == multiboot2::memory_type::available; }), // [](auto const & region) { return region.base + region.size_in_B; })); // init_pmm(frame::containing(physical_address{highest_byte}).number() + 1, handoff_to_kernel_pmm); // kstd::println("[x86_64:MEM] Releasing bootstrap memory allocators."); // allocation_buffer.reset(); // region_based_allocator.reset(); } } // namespace kapi::memory