diff options
Diffstat (limited to 'arch/x86_64/src/memory')
18 files changed, 1062 insertions, 0 deletions
diff --git a/arch/x86_64/src/memory/allocator/area_frame_allocator.cpp b/arch/x86_64/src/memory/allocator/area_frame_allocator.cpp new file mode 100644 index 0000000..cb4fefa --- /dev/null +++ b/arch/x86_64/src/memory/allocator/area_frame_allocator.cpp @@ -0,0 +1,85 @@ +#include "arch/memory/allocator/area_frame_allocator.hpp" + +#include "arch/exception_handling/assert.hpp" + +#include <algorithm> +#include <array> +#include <ranges> + +namespace teachos::arch::memory::allocator +{ + area_frame_allocator::area_frame_allocator(multiboot::memory_information const & mem_info) + : next_free_frame(0U) + , current_area(std::nullopt) + , memory_areas(mem_info.areas) + , kernel_start(physical_frame::containing_address(mem_info.kernel_start)) + , kernel_end(physical_frame::containing_address(mem_info.kernel_end)) + , multiboot_start(physical_frame::containing_address(mem_info.multiboot_start)) + , multiboot_end(physical_frame::containing_address(mem_info.multiboot_end)) + { + choose_next_area(); + } + + auto area_frame_allocator::choose_next_area() -> void + { + current_area = std::nullopt; + auto next_area_with_free_frames = memory_areas | std::views::filter([this](auto const & area) { + auto address = area.base_address + area.area_length - 1; + return physical_frame::containing_address(address) >= next_free_frame; + }); + + auto const lowest_area_with_free_frames = std::ranges::min_element( + next_area_with_free_frames, [](auto const & a, auto const & b) { return a.base_address < b.base_address; }); + + if (lowest_area_with_free_frames != next_area_with_free_frames.end()) + { + current_area = *lowest_area_with_free_frames; + // Update the `next_free_frame` according to the new memory area + auto const start_frame = physical_frame::containing_address(current_area.value().base_address); + if (next_free_frame < start_frame) + { + next_free_frame = start_frame; + } + } + } + + auto area_frame_allocator::allocate_frame() -> std::optional<physical_frame> + { + // Only try to allocate memory if current_area is not null, because + // the current_area is null if there is no more available memory. + if (!current_area.has_value()) + { + return std::nullopt; + } + + auto const address = current_area.value().base_address + current_area.value().area_length - 1; + physical_frame current_area_last_frame = physical_frame::containing_address(address); + + if (next_free_frame > current_area_last_frame) + { + // All frames of current area are used, switch to next area. + choose_next_area(); + } + else if (next_free_frame >= kernel_start && next_free_frame <= kernel_end) + { + // `physical_frame` is used by the kernel or multiboot information structure. + next_free_frame = allocator::physical_frame{kernel_end.frame_number + 1}; + } + else if (next_free_frame >= multiboot_start && next_free_frame <= multiboot_end) + { + // `physical_frame` is used by the kernel or multiboot information structure. + next_free_frame = allocator::physical_frame{multiboot_end.frame_number + 1}; + } + else + { + // Frame is unused, increment `next_free_frame` and return it. + next_free_frame.frame_number += 1; + return next_free_frame; + } + + // `physical_frame` was not valid, try it again with the updated `next_free_frame`. + return allocate_frame(); + } + + auto area_frame_allocator::deallocate_frame(physical_frame const & physical_frame) -> void { (void)physical_frame; } +} // namespace teachos::arch::memory::allocator diff --git a/arch/x86_64/src/memory/allocator/physical_frame.cpp b/arch/x86_64/src/memory/allocator/physical_frame.cpp new file mode 100644 index 0000000..ec387a1 --- /dev/null +++ b/arch/x86_64/src/memory/allocator/physical_frame.cpp @@ -0,0 +1,24 @@ +#include "arch/memory/allocator/physical_frame.hpp" + +namespace teachos::arch::memory::allocator +{ + auto physical_frame::containing_address(physical_address address) -> physical_frame + { + return physical_frame{address / PAGE_FRAME_SIZE}; + } + + auto physical_frame::start_address() const -> physical_address { return frame_number * PAGE_FRAME_SIZE; } + + auto physical_frame::operator++(int) -> physical_frame + { + physical_frame const old_value = *this; + ++frame_number; + return old_value; + } + + auto physical_frame::operator++() -> physical_frame & + { + ++frame_number; + return *this; + } +} // namespace teachos::arch::memory::allocator diff --git a/arch/x86_64/src/memory/allocator/tiny_frame_allocator.cpp b/arch/x86_64/src/memory/allocator/tiny_frame_allocator.cpp new file mode 100644 index 0000000..3cdf9c7 --- /dev/null +++ b/arch/x86_64/src/memory/allocator/tiny_frame_allocator.cpp @@ -0,0 +1,34 @@ +#include "arch/memory/allocator/tiny_frame_allocator.hpp" + +#include "arch/exception_handling/panic.hpp" + +namespace teachos::arch::memory::allocator +{ + auto tiny_frame_allocator::allocate_frame() -> std::optional<physical_frame> + { + for (auto & frame_option : frames) + { + if (frame_option.has_value()) + { + auto value = frame_option; + frame_option.reset(); + return value; + } + } + return std::nullopt; + } + + auto tiny_frame_allocator::deallocate_frame(physical_frame const & physical_frame) -> void + { + for (auto & frame_option : frames) + { + if (!frame_option.has_value()) + { + frame_option.emplace(physical_frame); + return; + } + } + exception_handling::panic( + "[Tiny Frame Allocator] Attempted to deallocate more than the 3 frames, that can be held"); + } +} // namespace teachos::arch::memory::allocator diff --git a/arch/x86_64/src/memory/cpu/control_register.cpp b/arch/x86_64/src/memory/cpu/control_register.cpp new file mode 100644 index 0000000..298874f --- /dev/null +++ b/arch/x86_64/src/memory/cpu/control_register.cpp @@ -0,0 +1,74 @@ +#include "arch/memory/cpu/control_register.hpp" + +#include "arch/exception_handling/assert.hpp" + +#include <type_traits> + +namespace teachos::arch::memory::cpu +{ + auto read_control_register(control_register cr) -> uint64_t + { + uint64_t current_value; + switch (cr) + { + case control_register::CR0: + asm volatile("mov %%cr0, %[output]" : [output] "=r"(current_value)); + break; + case control_register::CR2: + asm volatile("mov %%cr2, %[output]" : [output] "=r"(current_value)); + break; + case control_register::CR3: + asm volatile("mov %%cr3, %[output]" : [output] "=r"(current_value)); + break; + case control_register::CR4: + asm volatile("mov %%cr4, %[output]" : [output] "=r"(current_value)); + break; + default: + exception_handling::assert(false, + "[Control Register] Attempted to read non-existent or reserved control register"); + break; + } + return current_value; + } + + auto write_control_register(control_register cr, uint64_t new_value) -> void + { + switch (cr) + { + case control_register::CR0: + asm volatile("mov %[input], %%cr0" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + case control_register::CR2: + asm volatile("mov %[input], %%cr2" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + case control_register::CR3: + asm volatile("mov %[input], %%cr3" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + case control_register::CR4: + asm volatile("mov %[input], %%cr4" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + default: + exception_handling::assert(false, + "[Control Register] Attempted to write non-existent or reserved control register"); + break; + } + } + + auto set_cr0_bit(cr0_flags flag) -> void + { + auto const cr0 = read_control_register(control_register::CR0); + write_control_register(control_register::CR0, static_cast<std::underlying_type<cr0_flags>::type>(flag) | cr0); + } +} // namespace teachos::arch::memory::cpu diff --git a/arch/x86_64/src/memory/cpu/msr.cpp b/arch/x86_64/src/memory/cpu/msr.cpp new file mode 100644 index 0000000..b83f902 --- /dev/null +++ b/arch/x86_64/src/memory/cpu/msr.cpp @@ -0,0 +1,31 @@ +#include "arch/memory/cpu/msr.hpp" + +namespace teachos::arch::memory::cpu +{ + namespace + { + auto constexpr IA32_EFER_ADDRESS = 0xC0000080; + } + + auto read_msr(uint32_t msr) -> uint64_t + { + uint32_t low, high; + asm volatile("rdmsr" : "=a"(low), "=d"(high) : "c"(msr)); + return (static_cast<uint64_t>(high) << 32) | low; + } + + auto write_msr(uint32_t msr, uint64_t value) -> void + { + uint32_t low = value & 0xFFFFFFFF; + uint32_t high = value >> 32; + asm volatile("wrmsr" + : /* no output from call */ + : "c"(msr), "a"(low), "d"(high)); + } + + auto set_efer_bit(efer_flags flag) -> void + { + auto const efer = read_msr(IA32_EFER_ADDRESS); + write_msr(IA32_EFER_ADDRESS, static_cast<std::underlying_type<efer_flags>::type>(flag) | efer); + } +} // namespace teachos::arch::memory::cpu diff --git a/arch/x86_64/src/memory/cpu/tlb.cpp b/arch/x86_64/src/memory/cpu/tlb.cpp new file mode 100644 index 0000000..591d9fc --- /dev/null +++ b/arch/x86_64/src/memory/cpu/tlb.cpp @@ -0,0 +1,16 @@ +#include "arch/memory/cpu/tlb.hpp" + +#include "arch/memory/cpu/control_register.hpp" + +namespace teachos::arch::memory::cpu +{ + auto tlb_flush(paging::virtual_address address) -> void + { + asm volatile("invlpg (%[input])" : /* no output from call */ : [input] "r"(address) : "memory"); + } + + auto tlb_flush_all() -> void + { + write_control_register(cpu::control_register::CR3, read_control_register(cpu::control_register::CR3)); + } +} // namespace teachos::arch::memory::cpu diff --git a/arch/x86_64/src/memory/heap/bump_allocator.cpp b/arch/x86_64/src/memory/heap/bump_allocator.cpp new file mode 100644 index 0000000..bbf2021 --- /dev/null +++ b/arch/x86_64/src/memory/heap/bump_allocator.cpp @@ -0,0 +1,52 @@ +#include "arch/memory/heap/bump_allocator.hpp" + +#include "arch/exception_handling/assert.hpp" + +#include <limits> +#include <type_traits> + +namespace teachos::arch::memory::heap +{ + namespace + { + template<typename T> + auto saturating_add(T x, T y) -> T + requires std::is_unsigned_v<T> + { + if (x > std::numeric_limits<T>::max() - y) + { + return std::numeric_limits<T>::max(); + } + T result = x + y; + return result; + } + } // namespace + + auto bump_allocator::allocate(std::size_t size) -> void * + { + // Repeat allocation until it succeeds, has to be done, because another allocator could overtake it at any time + // causing the value to differ and the calculation to have to be redone. + for (;;) + { + auto alloc_start = next.load(std::memory_order::relaxed); + auto const alloc_end = saturating_add(alloc_start, size); + arch::exception_handling::assert(alloc_end <= heap_end, "[Heap Allocator] Out of memory"); + // Check if the atomic value is still the one initally loaded, if it isn't we have been overtaken by another + // thread and need to redo the calculation. Spurious failure by weak can be ignored, because the whole allocation + // is wrapped in an infinite for loop so a failure that wasn't actually one will simply be retried until it works. + auto const updated = next.compare_exchange_weak(alloc_start, alloc_end, std::memory_order::relaxed); + if (updated) + { + return reinterpret_cast<void *>(alloc_start); + } + } + } + + auto bump_allocator::deallocate(void * pointer, std::size_t size) -> void + { + if (pointer || size) + { + } + } + +} // namespace teachos::arch::memory::heap diff --git a/arch/x86_64/src/memory/heap/linked_list_allocator.cpp b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp new file mode 100644 index 0000000..e5bae21 --- /dev/null +++ b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp @@ -0,0 +1,168 @@ +#include "arch/memory/heap/linked_list_allocator.hpp" + +#include "arch/exception_handling/assert.hpp" +#include "arch/exception_handling/panic.hpp" + +namespace teachos::arch::memory::heap +{ + linked_list_allocator::linked_list_allocator(std::size_t heap_start, std::size_t heap_end) + : heap_start(heap_start) + , heap_end(heap_end) + , first(nullptr) + , mutex{shared::mutex{}} + { + auto const heap_size = heap_end - heap_start; + exception_handling::assert( + heap_size > min_allocatable_size(), + "[Linked List Allocator] Total heap size can not be smaller than minimum of 16 bytes to hold " + "atleast one memory hole entry"); + first = new (reinterpret_cast<void *>(heap_start)) memory_block(heap_size, nullptr); + } + + auto linked_list_allocator::allocate(std::size_t size) -> void * + { + exception_handling::assert(size > min_allocatable_size(), + "[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes"); + mutex.lock(); + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) + { + if (current->size == size) + { + auto const memory_address = remove_free_memory_block(previous, current); + mutex.unlock(); + return memory_address; + } + else if (current->size >= size + min_allocatable_size()) + { + auto const memory_address = split_free_memory_block(previous, current, size); + mutex.unlock(); + return memory_address; + } + + previous = current; + current = current->next; + } + + exception_handling::panic("[Linked List Allocator] Out of memory"); + } + + auto linked_list_allocator::deallocate(void * pointer, std::size_t size) -> void + { + exception_handling::assert(size > min_allocatable_size(), + "[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes"); + mutex.lock(); + + auto const start_address = reinterpret_cast<std::size_t>(pointer); + auto const end_address = start_address + size; + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) + { + // Current address of the free memory block now points to an address that is after our block to deallocate in heap + // memory space. + if (reinterpret_cast<std::size_t>(current) >= end_address) + { + break; + } + + previous = current; + current = current->next; + } + + coalesce_free_memory_block(previous, current, pointer, size); + mutex.unlock(); + } + + auto linked_list_allocator::remove_free_memory_block(memory_block * previous_block, + memory_block * current_block) -> void * + { + return replace_free_memory_block(previous_block, current_block, current_block->next); + } + + auto linked_list_allocator::split_free_memory_block(memory_block * previous_block, memory_block * current_block, + std::size_t size) -> void * + { + auto const end_address = reinterpret_cast<std::size_t>(current_block) + size; + auto const new_block = + new (reinterpret_cast<void *>(end_address)) memory_block(current_block->size - size, current_block->next); + return replace_free_memory_block(previous_block, current_block, new_block); + } + + auto linked_list_allocator::replace_free_memory_block(memory_block * previous_block, memory_block * current_block, + memory_block * new_block) -> void * + { + auto const start_address = reinterpret_cast<std::size_t>(current_block); + // If we want to allocate into the first block that is before any other free block, then there exists no previous + // free block (nullptr). Therefore we have to overwrite the first block instead of overwriting its next value. + if (previous_block == nullptr) + { + first = new_block; + } + else + { + previous_block->next = new_block; + } + current_block->~memory_block(); + return reinterpret_cast<void *>(start_address); + } + + auto linked_list_allocator::coalesce_free_memory_block(memory_block * previous_block, memory_block * current_block, + void * pointer, std::size_t size) -> void + { + auto const start_address = reinterpret_cast<std::size_t>(pointer); + auto const end_address = start_address + size; + + // Inital values if there are no adjacent blocks either before or after, meaning we have to simply create a free + // memory block that is placed in between the previous and next block. + auto block_size = size; + auto next_block = current_block; + + // If the block we want to deallocate is before another free block and we can therefore combine both into one. + // This is done by deleting the current free block and creating a new block at the start address of the block to + // deallocate with both the size of the block to deallcoate and the free block next to it. + if (end_address == reinterpret_cast<std::size_t>(current_block)) + { + block_size += current_block->size; + next_block = current_block->next; + current_block->~memory_block(); + } + + // If the block we want to deallocate is behind another free block and we can therefore combine both into one. + // This is done by simply changin the size of the previous block to include the size of the block to deallocate. + // This is done, because the previous block might still be referencered by the next field of other memory blocks. + if (previous_block != nullptr && + start_address == (reinterpret_cast<std::size_t>(previous_block) + previous_block->size)) + { + block_size += previous_block->size; + + previous_block->size = block_size; + previous_block->next = next_block; + return; + } + + // Check if the block we want to deallocate is contained in the previous block, because if it is it can only mean + // that the block has already been deallocated and we therefore attempted a double free. + exception_handling::assert(previous_block == nullptr || + start_address >= + (reinterpret_cast<std::size_t>(previous_block) + previous_block->size), + "[Linked List Allocator] Attempted double free detected"); + + auto const new_block = new (pointer) memory_block(block_size, next_block); + // If we want to deallocate the first block that is before any other free block, then there exists no previous free + // block (nullptr). Therefore we have to overwrite the first block instead of overwriting its + // next value. + if (previous_block == nullptr) + { + first = new_block; + return; + } + previous_block->next = new_block; + } + +} // namespace teachos::arch::memory::heap diff --git a/arch/x86_64/src/memory/heap/memory_block.cpp b/arch/x86_64/src/memory/heap/memory_block.cpp new file mode 100644 index 0000000..446cd96 --- /dev/null +++ b/arch/x86_64/src/memory/heap/memory_block.cpp @@ -0,0 +1,15 @@ +#include "arch/memory/heap/memory_block.hpp" + +#include <string.h> + +namespace teachos::arch::memory::heap +{ + memory_block::memory_block(std::size_t size, memory_block * next) + { + memset(static_cast<void *>(this), 0, size); + this->size = size; + this->next = next; + } + + memory_block::~memory_block() { memset(static_cast<void *>(this), 0, sizeof(memory_block)); } +} // namespace teachos::arch::memory::heap diff --git a/arch/x86_64/src/memory/main.cpp b/arch/x86_64/src/memory/main.cpp new file mode 100644 index 0000000..b978319 --- /dev/null +++ b/arch/x86_64/src/memory/main.cpp @@ -0,0 +1,53 @@ +#include "arch/memory/main.hpp" + +#include "arch/exception_handling/assert.hpp" +#include "arch/memory/allocator/area_frame_allocator.hpp" +#include "arch/memory/cpu/control_register.hpp" +#include "arch/memory/cpu/msr.hpp" +#include "arch/memory/heap/concept.hpp" +#include "arch/memory/paging/active_page_table.hpp" +#include "arch/memory/paging/kernel_mapper.hpp" + +namespace teachos::arch::memory +{ + namespace + { + auto remap_heap(allocator::area_frame_allocator allocator, paging::active_page_table & active_table) -> void + { + auto const start_page = paging::virtual_page::containing_address(memory::heap::HEAP_START); + auto const end_page = + ++(paging::virtual_page::containing_address(memory::heap::HEAP_START + memory::heap::HEAP_SIZE - 1)); + paging::page_container::iterator const begin{start_page}; + paging::page_container::iterator const end{end_page}; + paging::page_container const pages{begin, end}; + + for (auto const & page : pages) + { + active_table.map_page_to_next_free_frame(allocator, page, paging::entry::WRITABLE); + } + } + } // namespace + + auto initialize_memory_management() -> void + { + static bool has_been_called = false; + arch::exception_handling::assert(!has_been_called, + "[Initialization] Memory management has already been initialized"); + has_been_called = true; + + auto const memory_information = multiboot::read_multiboot2(); + allocator::area_frame_allocator allocator(memory_information); + + cpu::set_cr0_bit(memory::cpu::cr0_flags::WRITE_PROTECT); + cpu::set_efer_bit(memory::cpu::efer_flags::NXE); + + paging::kernel_mapper kernel(allocator, memory_information); + auto & active_table = kernel.remap_kernel(); + video::vga::text::write("Kernel remapping successful", video::vga::text::common_attributes::green_on_black); + video::vga::text::newline(); + + remap_heap(allocator, active_table); + video::vga::text::write("Heap remapping successful", video::vga::text::common_attributes::green_on_black); + video::vga::text::newline(); + } +} // namespace teachos::arch::memory diff --git a/arch/x86_64/src/memory/multiboot/elf_symbols_section.cpp b/arch/x86_64/src/memory/multiboot/elf_symbols_section.cpp new file mode 100644 index 0000000..f5d126b --- /dev/null +++ b/arch/x86_64/src/memory/multiboot/elf_symbols_section.cpp @@ -0,0 +1,13 @@ +#include "arch/memory/multiboot/elf_symbols_section.hpp" + +namespace teachos::arch::memory::multiboot +{ + auto elf_section_flags::contains_flags(std::bitset<64U> other) const -> bool { return (flags & other) == other; } + + auto elf_section_header::is_null() const -> bool + { + return name_table_index == 0U && type == elf_section_type::INACTIVE && flags == elf_section_flags(0U) && + physical_address == 0U && file_offset == 0U && additional_information == 0U && address_alignment == 0U && + fixed_table_entry_size == 0U; + } +} // namespace teachos::arch::memory::multiboot diff --git a/arch/x86_64/src/memory/multiboot/reader.cpp b/arch/x86_64/src/memory/multiboot/reader.cpp new file mode 100644 index 0000000..2bf5b25 --- /dev/null +++ b/arch/x86_64/src/memory/multiboot/reader.cpp @@ -0,0 +1,131 @@ +#include "arch/memory/multiboot/reader.hpp" + +#include "arch/boot/pointers.hpp" +#include "arch/exception_handling/assert.hpp" +#include "arch/memory/multiboot/elf_symbols_section.hpp" +#include "arch/memory/multiboot/info.hpp" + +#include <algorithm> +#include <ranges> + +namespace teachos::arch::memory::multiboot +{ + namespace + { + template<typename T> + requires std::is_pointer<T>::value + auto align_to_8_byte_boundary(T ptr, uint32_t size) -> T + { + return reinterpret_cast<T>(reinterpret_cast<uint8_t *>(ptr) + ((size + 7) & ~7)); + } + + auto process_memory_map(memory_map_header * mminfo) -> memory_area_container + { + auto const expected_entry_size = mminfo->entry_size; + auto constexpr actual_entry_size = sizeof(memory_area); + exception_handling::assert(expected_entry_size == actual_entry_size, + "[Multiboot Reader] Unexpected memory area entry size"); + + auto const total_size = mminfo->info.size; + auto const total_entries_size = total_size - sizeof(memory_map_header) + actual_entry_size; + auto const number_of_entries = total_entries_size / actual_entry_size; + + auto const begin = memory_area_container::iterator{&mminfo->entries}; + auto const end = begin + number_of_entries; + return memory_area_container{begin, end}; + } + + auto process_elf_sections(elf_symbols_section_header * symbol, std::size_t & kernel_start, + std::size_t & kernel_end) -> elf_section_header_container + { + auto const expected_entry_size = symbol->entry_size; + auto constexpr actual_entry_size = sizeof(elf_section_header); + exception_handling::assert(expected_entry_size == actual_entry_size, + "[Multiboot Reader] Unexpected elf section header entry size"); + + auto const expected_total_size = symbol->info.size; + auto const actual_total_entry_size = actual_entry_size * symbol->number_of_sections; + auto constexpr actual_total_section_size = sizeof(elf_symbols_section_header) - sizeof(uint32_t); + auto const actual_total_size = actual_total_entry_size + actual_total_section_size; + exception_handling::assert(expected_total_size == actual_total_size, + "[Multiboot Reader] Unexpected elf symbols section header total size"); + + auto const begin = elf_section_header_container::iterator{reinterpret_cast<elf_section_header *>(&symbol->end)}; + auto const end = begin + symbol->number_of_sections; + exception_handling::assert(begin->is_null(), + "[Multiboot Reader] Elf symbols section not starting with SHT_NULL section"); + + elf_section_header_container sections{begin, end}; + + auto allocated_sections = sections | std::views::filter([](auto const & section) { + return section.flags.contains_flags(elf_section_flags::OCCUPIES_MEMORY); + }); + + auto const elf_section_with_lowest_physical_address = std::ranges::min_element( + allocated_sections, [](auto const & a, auto const & b) { return a.physical_address < b.physical_address; }); + + auto const elf_section_with_highest_physical_address = + std::ranges::max_element(allocated_sections, [](auto const & a, auto const & b) { + auto a_physical_address_end = a.physical_address + a.section_size; + auto b_physical_address_end = b.physical_address + b.section_size; + return a_physical_address_end < b_physical_address_end; + }); + + auto const symbol_table_section_count = std::ranges::count_if(sections, [](auto const & section) { + return section.type == elf_section_type::DYNAMIC_SYMBOL_TABLE || section.type == elf_section_type::SYMBOL_TABLE; + }); + auto const dynamic_section_count = std::ranges::count_if( + sections, [](auto const & section) { return section.type == elf_section_type::DYNAMIC; }); + + exception_handling::assert( + symbol_table_section_count == 1U, + "[Multiboot Reader] ELF Specifications allows only (1) symbol table section, but got more"); + exception_handling::assert( + dynamic_section_count <= 1U, + "[Multiboot Reader] ELF Specifications allows only (1) or less dynamic sections, but got more"); + + auto const lowest_elf_section = *elf_section_with_lowest_physical_address; + kernel_start = lowest_elf_section.physical_address; + + auto const highest_elf_section = *elf_section_with_highest_physical_address; + kernel_end = highest_elf_section.physical_address + highest_elf_section.section_size; + + return sections; + } + } // namespace + + auto read_multiboot2() -> memory_information + { + memory_information mem_info{UINT64_MAX, + 0U, + elf_section_header_container{}, + boot::multiboot_information_pointer, + 0U, + memory_area_container{}}; + + auto const multiboot_information_pointer = reinterpret_cast<info_header *>(boot::multiboot_information_pointer); + auto const multiboot_tag = &multiboot_information_pointer->tags; + mem_info.multiboot_end = mem_info.multiboot_start + multiboot_information_pointer->total_size; + + for (auto tag = multiboot_tag; tag->type != tag_type::END; tag = align_to_8_byte_boundary(tag, tag->size)) + { + switch (tag->type) + { + case tag_type::ELF_SECTIONS: { + auto const symbol = reinterpret_cast<elf_symbols_section_header *>(tag); + mem_info.sections = process_elf_sections(symbol, mem_info.kernel_start, mem_info.kernel_end); + break; + } + case tag_type::MEMORY_MAP: { + auto const mminfo = reinterpret_cast<memory_map_header *>(tag); + mem_info.areas = process_memory_map(mminfo); + break; + } + default: + // All other cases are not important and can be ignored. + break; + } + } + return mem_info; + } +} // namespace teachos::arch::memory::multiboot diff --git a/arch/x86_64/src/memory/paging/active_page_table.cpp b/arch/x86_64/src/memory/paging/active_page_table.cpp new file mode 100644 index 0000000..0113869 --- /dev/null +++ b/arch/x86_64/src/memory/paging/active_page_table.cpp @@ -0,0 +1,98 @@ +#include "arch/memory/paging/active_page_table.hpp" + +namespace teachos::arch::memory::paging +{ + namespace + { + paging::virtual_address constexpr PAGE_TABLE_LEVEL_4_ADDRESS = 0xffffffff'fffff000; + } + + auto active_page_table::create_or_get() -> active_page_table & + { + static page_table_handle active_handle{reinterpret_cast<page_table *>(PAGE_TABLE_LEVEL_4_ADDRESS), + page_table_handle::LEVEL4}; + static active_page_table active_page{active_handle}; + return active_page; + } + + auto active_page_table::operator[](std::size_t index) -> entry & { return active_handle[index]; } + + auto active_page_table::translate_address(virtual_address address) -> std::optional<allocator::physical_address> + { + auto const offset = address % allocator::PAGE_FRAME_SIZE; + auto const page = virtual_page::containing_address(address); + auto const frame = translate_page(page); + + if (frame.has_value()) + { + return frame.value().frame_number * allocator::PAGE_FRAME_SIZE + offset; + } + + return std::nullopt; + } + + auto active_page_table::translate_page(virtual_page page) -> std::optional<allocator::physical_frame> + { + auto current_handle = active_handle; + + for (auto level = page_table_handle::LEVEL4; level != page_table_handle::LEVEL1; --level) + { + auto const next_handle = current_handle.next_table(page.get_level_index(level)); + // If the next table method failed then it is highly likely that it was a huge page and we therefore have to + // parse the table differently. Therefore, we attempt to parse it using the method required by huge pages. + if (!next_handle.has_value()) + { + return translate_huge_page(page); + |
