diff options
Diffstat (limited to 'arch/x86_64')
5 files changed, 71 insertions, 56 deletions
diff --git a/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp b/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp index 42af23f..6b1b7bb 100644 --- a/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp +++ b/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp @@ -4,6 +4,8 @@ #include "arch/memory/heap/memory_block.hpp" #include "arch/stl/mutex.hpp" +#include <optional> + namespace teachos::arch::memory::heap { /** @@ -47,6 +49,19 @@ namespace teachos::arch::memory::heap [[gnu::section(".user_text")]] auto constexpr min_allocatable_size() -> std::size_t { return sizeof(memory_block); } /** + * @brief Checks if the given memory block is big enough and if it is allocates into the current block. + * + * @note Adjusts the link of the previous memory block to the new smaller remaining block. If the allocation used + * the complete block instead the previous block will point to the next block of the current memroy block that was + * used for the allocation. + * + * @return Allocated usable memory area. + */ + [[gnu::section(".user_text")]] auto + allocate_into_memory_block_if_big_enough(memory_block * current, memory_block * previous, std::size_t total_size) + -> std::optional<void *>; + + /** * @brief Special functionality fo the user heap allocator. Which will result in it being expanded by a syscall with * addtionally 100 KiB, which are mapped into the page table. Will always work until there is no physical memory * left. diff --git a/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp b/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp index 977b40d..b211b8b 100644 --- a/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp +++ b/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp @@ -124,6 +124,13 @@ namespace teachos::arch::memory::paging auto map_elf_kernel_sections(active_page_table & active_table) -> void { exception_handling::assert(!mem_info.sections.empty(), "[Kernel Mapper] Kernel elf sections empty"); + std::array<uint64_t, 6U> constexpr USER_SECTION_BASES = { + 0x102000, // .boot_bss (Contains statically allocated variables) + 0x209000, // .stl_text (Contains code for custom std implementations and standard library code) + 0x218000, // .user_text (Contains the actual user code executed) + 0x21F000, // .user_data (Contains static user variables) + }; + for (auto const & section : mem_info.sections) { if (!section.flags.contains_flags(multiboot::elf_section_flags::OCCUPIES_MEMORY)) @@ -144,18 +151,7 @@ namespace teachos::arch::memory::paging allocator::frame_container const frames{begin, end}; entry entry{section.flags}; - // Required to be accessible in User Mode: - constexpr std::array<uint64_t, 6> user_section_bases = { - 0x102000, // .boot_bss (Contains statically allocated variables) - 0x209000, // .stl_text (Contains code for custom std implementations and standard library code) - 0x218000, // .user_text (Contains the actual user code executed) - 0x21E000, // .user_data (Contains static user variables) - - 0x20A000 // .text (Necessary, because symbols for standard library are placed there) - }; - - if (std::find(user_section_bases.begin(), user_section_bases.end(), section.physical_address) != - user_section_bases.end()) + if (std::ranges::find(USER_SECTION_BASES, section.physical_address) != USER_SECTION_BASES.end()) { entry.set_user_accessible(); } diff --git a/arch/x86_64/scripts/kernel.ld b/arch/x86_64/scripts/kernel.ld index 3d9a7ae..df9d7e7 100644 --- a/arch/x86_64/scripts/kernel.ld +++ b/arch/x86_64/scripts/kernel.ld @@ -89,6 +89,12 @@ SECTIONS { *(.stl_text .stl_text*) KEEP(*libstdc++.a:*(.text .text.*)) + KEEP(*libubsan.a:*(.text .text.*)) /* TODO: Include atomic_base into stl_text / Print where code lies: objdump -t build/bin/Debug/_kernel >> test.txt */ + KEEP(*liblsan.a:*(.text .text.*)) + KEEP(*libtsan.a:*(.text .text.*)) + KEEP(*libasan.a:*(.text .text.*)) + KEEP(*libgcc.a:*(.text .text.*)) + KEEP(*libatomic.a:*(.text .text.*)) } .text ALIGN(4K) : AT(ADDR (.text)) diff --git a/arch/x86_64/src/context_switching/syscall/syscall_handler.cpp b/arch/x86_64/src/context_switching/syscall/syscall_handler.cpp index cd1c8a2..af6d911 100644 --- a/arch/x86_64/src/context_switching/syscall/syscall_handler.cpp +++ b/arch/x86_64/src/context_switching/syscall/syscall_handler.cpp @@ -1,6 +1,7 @@ #include "arch/context_switching/syscall/syscall_handler.hpp" #include "arch/context_switching/syscall/main.hpp" +#include "arch/exception_handling/assert.hpp" #include "arch/exception_handling/panic.hpp" #include "arch/memory/heap/global_heap_allocator.hpp" #include "arch/memory/main.hpp" @@ -61,10 +62,7 @@ namespace teachos::arch::context_switching::syscall result = expand_user_heap(); break; case type::ASSERT: - if (!arg_0) - { - teachos::arch::exception_handling::panic(reinterpret_cast<const char *>(arg_1)); - } + teachos::arch::exception_handling::assert(arg_0, reinterpret_cast<const char *>(arg_1)); break; default: teachos::arch::exception_handling::panic("[Syscall Handler] Invalid syscall number"); diff --git a/arch/x86_64/src/memory/heap/user_heap_allocator.cpp b/arch/x86_64/src/memory/heap/user_heap_allocator.cpp index f3fe1c2..427a68a 100644 --- a/arch/x86_64/src/memory/heap/user_heap_allocator.cpp +++ b/arch/x86_64/src/memory/heap/user_heap_allocator.cpp @@ -1,8 +1,6 @@ #include "arch/memory/heap/user_heap_allocator.hpp" #include "arch/context_switching/syscall/main.hpp" -#include "arch/exception_handling/assert.hpp" -#include "arch/exception_handling/panic.hpp" #include <algorithm> @@ -20,24 +18,10 @@ namespace teachos::arch::memory::heap while (current != nullptr) { - // TODO: Can not access current pointer. Results in a General Protection Fault? - if (current->size == total_size) + auto memory = allocate_into_memory_block_if_big_enough(current, previous, total_size); + if (memory.has_value()) { - auto const memory_address = remove_free_memory_block(previous, current); - new (memory_address) std::size_t(total_size); - mutex.unlock(); - return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); - } - else if (current->size >= total_size + min_allocatable_size()) - { - // Ensure that the allocated size block is atleast 16 bytes (required because if we free the hole afterwards - // there needs to be enough space for a memory block). Therefore we allocate more than is actually required if - // the total size was less and simply deallocate it as well - auto const max_size = std::max(total_size, min_allocatable_size()); - auto const memory_address = split_free_memory_block(previous, current, max_size); - new (memory_address) std::size_t(max_size); - mutex.unlock(); - return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); + return memory.value(); } previous = current; @@ -48,27 +32,17 @@ namespace teachos::arch::memory::heap if (current != nullptr) { - if (current->size == total_size) - { - auto const memory_address = remove_free_memory_block(previous, current); - new (memory_address) std::size_t(total_size); - mutex.unlock(); - return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); - } - else if (current->size >= total_size + min_allocatable_size()) + auto memory = allocate_into_memory_block_if_big_enough(current, previous, total_size); + if (memory.has_value()) { - // Ensure that the allocated size block is atleast 16 bytes (required because if we free the hole afterwards - // there needs to be enough space for a memory block). Therefore we allocate more than is actually required if - // the total size was less and simply deallocate it as well - auto const max_size = std::max(total_size, min_allocatable_size()); - auto const memory_address = split_free_memory_block(previous, current, max_size); - new (memory_address) std::size_t(max_size); - mutex.unlock(); - return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); + return memory.value(); } } - exception_handling::panic("[Linked List Allocator] Out of memory"); + char constexpr OUT_OF_MEMORY_ERROR_MESSAGE[] = "[Linked List Allocator] Out of memory"; + context_switching::syscall::syscall(context_switching::syscall::type::ASSERT, + {false, reinterpret_cast<uint64_t>(&OUT_OF_MEMORY_ERROR_MESSAGE)}); + return nullptr; } auto user_heap_allocator::deallocate(void * pointer) noexcept -> void @@ -102,6 +76,30 @@ namespace teachos::arch::memory::heap mutex.unlock(); } + auto user_heap_allocator::allocate_into_memory_block_if_big_enough(memory_block * current, memory_block * previous, + std::size_t total_size) -> std::optional<void *> + { + if (current->size == total_size) + { + auto const memory_address = remove_free_memory_block(previous, current); + new (memory_address) std::size_t(total_size); + mutex.unlock(); + return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); + } + else if (current->size >= total_size + min_allocatable_size()) + { + // Ensure that the allocated size block is atleast 16 bytes (required because if we free the hole afterwards + // there needs to be enough space for a memory block). Therefore we allocate more than is actually required if + // the total size was less and simply deallocate it as well + auto const max_size = std::max(total_size, min_allocatable_size()); + auto const memory_address = split_free_memory_block(previous, current, max_size); + new (memory_address) std::size_t(max_size); + mutex.unlock(); + return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); + } + return std::nullopt; + } + auto user_heap_allocator::expand_heap_if_full() -> memory_block * { auto const result = context_switching::syscall::syscall(context_switching::syscall::type::EXPAND_HEAP); @@ -180,10 +178,12 @@ namespace teachos::arch::memory::heap // Check if the block we want to deallocate is contained in the previous block, because if it is it can only mean // that the block has already been deallocated and we therefore attempted a double free. - exception_handling::assert(previous_block == nullptr || - start_address >= - (reinterpret_cast<std::size_t>(previous_block) + previous_block->size), - "[Linked List Allocator] Attempted double free detected"); + char constexpr DOUBLE_FREE_ERROR_MESSAGE[] = "[Linked List Allocator] Attempted double free detected"; + context_switching::syscall::syscall( + context_switching::syscall::type::ASSERT, + {previous_block == nullptr || + start_address >= (reinterpret_cast<std::size_t>(previous_block) + previous_block->size), + reinterpret_cast<uint64_t>(&DOUBLE_FREE_ERROR_MESSAGE)}); auto const new_block = new (pointer) memory_block(block_size, next_block); // If we want to deallocate the first block that is before any other free block, then there exists no previous free |
