diff options
| author | Matteo Gmür <matteo.gmuer1@ost.ch> | 2025-05-09 13:15:58 +0000 |
|---|---|---|
| committer | Matteo Gmür <matteo.gmuer1@ost.ch> | 2025-05-09 13:15:58 +0000 |
| commit | a5812be4fa8ecc208219682204b14969d7b718b0 (patch) | |
| tree | 2365468add76cdb604ede4f751bcf3791263f312 /arch/x86_64/src/memory | |
| parent | d80ecf29baada6242c5181adaec0d1500707cad0 (diff) | |
| download | teachos-a5812be4fa8ecc208219682204b14969d7b718b0.tar.xz teachos-a5812be4fa8ecc208219682204b14969d7b718b0.zip | |
Switch uer heap to linked list allocator
Diffstat (limited to 'arch/x86_64/src/memory')
| -rw-r--r-- | arch/x86_64/src/memory/heap/user_heap_allocator.cpp | 186 |
1 files changed, 154 insertions, 32 deletions
diff --git a/arch/x86_64/src/memory/heap/user_heap_allocator.cpp b/arch/x86_64/src/memory/heap/user_heap_allocator.cpp index eefcab5..6843d66 100644 --- a/arch/x86_64/src/memory/heap/user_heap_allocator.cpp +++ b/arch/x86_64/src/memory/heap/user_heap_allocator.cpp @@ -1,58 +1,180 @@ #include "arch/memory/heap/user_heap_allocator.hpp" #include "arch/exception_handling/assert.hpp" +#include "arch/exception_handling/panic.hpp" -#include <limits> -#include <type_traits> +#include <algorithm> namespace teachos::arch::memory::heap { - namespace + user_heap_allocator::user_heap_allocator(std::size_t heap_start, std::size_t heap_end) + : heap_start(heap_start) + , heap_end(heap_end) + , first(nullptr) + , mutex{stl::mutex{}} { - template<typename T> - [[gnu::section(".user_text")]] - auto saturating_add(T x, T y) -> T - requires std::is_unsigned_v<T> + auto const heap_size = heap_end - heap_start; + exception_handling::assert( + heap_size > min_allocatable_size(), + "[Linked List Allocator] Total heap size can not be smaller than minimum of 16 bytes to hold " + "atleast one memory hole entry"); + first = new (reinterpret_cast<void *>(heap_start)) memory_block(heap_size, nullptr); + } + + auto user_heap_allocator::allocate(std::size_t size) -> void * + { + // Add size of size_t to the total allocated size, because we add a header that includes the size of the allocated + // block, to allow for deallocation without the need to call with the corresponding size + auto const total_size = size + sizeof(std::size_t); + mutex.lock(); + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) { - if (x > std::numeric_limits<T>::max() - y) + // TODO: Can not access current pointer. Results in a General Protection Fault? + if (current->size == total_size) { - return std::numeric_limits<T>::max(); + auto const memory_address = remove_free_memory_block(previous, current); + new (memory_address) std::size_t(total_size); + mutex.unlock(); + return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); } - T result = x + y; - return result; + else if (current->size >= total_size + min_allocatable_size()) + { + // Ensure that the allocated size block is atleast 16 bytes (required because if we free the hole afterwards + // there needs to be enough space for a memory block). Therefore we allocate more than is actually required if + // the total size was less and simply deallocate it as well + auto const max_size = std::max(total_size, min_allocatable_size()); + auto const memory_address = split_free_memory_block(previous, current, max_size); + new (memory_address) std::size_t(max_size); + mutex.unlock(); + return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); + } + + previous = current; + current = current->next; } - } // namespace - auto user_heap_allocator::allocate(std::size_t size) -> void * + exception_handling::panic("[Linked List Allocator] Out of memory"); + } + + auto user_heap_allocator::deallocate(void * pointer) noexcept -> void { - // TODO: .load() crashes because it is only in the kernel .text section and not the user one? How would you fix - // Similarly assert cause a crash here it is easier though simply create a syscall for the assert method. - - // that? Reading the value only has to be done once, because compare_exchange_weak updates the value as well if the - // exchange failed, becuase the value was not the expected one. - auto alloc_start = next.load(std::memory_order::relaxed); - // Repeat allocation until it succeeds, has to be done, because another allocator could overtake it at any time - // causing the value to differ and the calculation to have to be redone. - for (;;) + mutex.lock(); + + // Read configured header size of the complete allocated block + auto const header_pointer = reinterpret_cast<void *>(reinterpret_cast<std::size_t>(pointer) - sizeof(std::size_t)); + auto const total_size = *reinterpret_cast<std::size_t *>(header_pointer); + + auto const start_address = reinterpret_cast<std::size_t>(header_pointer); + auto const end_address = start_address + total_size; + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) { - auto const alloc_end = saturating_add(alloc_start, size); - arch::exception_handling::assert(alloc_end <= heap_end, "[Heap Allocator] Out of memory"); - // Check if the atomic value is still the one initally loaded, if it isn't we have been overtaken by another - // thread and need to redo the calculation. Spurious failure by weak can be ignored, because the whole allocation - // is wrapped in an infinite for loop so a failure that wasn't actually one will simply be retried until it works. - auto const updated = next.compare_exchange_weak(alloc_start, alloc_end, std::memory_order::relaxed); - if (updated) + // Current address of the free memory block now points to an address that is after our block to deallocate in heap + // memory space. + if (reinterpret_cast<std::size_t>(current) >= end_address) { - return reinterpret_cast<void *>(alloc_start); + break; } + + previous = current; + current = current->next; } + + coalesce_free_memory_block(previous, current, header_pointer, total_size); + mutex.unlock(); } - auto user_heap_allocator::deallocate(void * pointer) noexcept -> void + auto user_heap_allocator::remove_free_memory_block(memory_block * previous_block, memory_block * current_block) + -> void * { - if (pointer) + return replace_free_memory_block(previous_block, current_block, current_block->next); + } + + auto user_heap_allocator::split_free_memory_block(memory_block * previous_block, memory_block * current_block, + std::size_t size) -> void * + { + auto const end_address = reinterpret_cast<std::size_t>(current_block) + size; + auto const new_block = + new (reinterpret_cast<void *>(end_address)) memory_block(current_block->size - size, current_block->next); + return replace_free_memory_block(previous_block, current_block, new_block); + } + + auto user_heap_allocator::replace_free_memory_block(memory_block * previous_block, memory_block * current_block, + memory_block * new_block) -> void * + { + auto const start_address = reinterpret_cast<std::size_t>(current_block); + // If we want to allocate into the first block that is before any other free block, then there exists no previous + // free block (nullptr). Therefore we have to overwrite the first block instead of overwriting its next value. + if (previous_block == nullptr) + { + first = new_block; + } + else + { + previous_block->next = new_block; + } + current_block->~memory_block(); + return reinterpret_cast<void *>(start_address); + } + + auto user_heap_allocator::coalesce_free_memory_block(memory_block * previous_block, memory_block * current_block, + void * pointer, std::size_t size) -> void + { + auto const start_address = reinterpret_cast<std::size_t>(pointer); + auto const end_address = start_address + size; + + // Inital values if there are no adjacent blocks either before or after, meaning we have to simply create a free + // memory block that is placed in between the previous and next block. + auto block_size = size; + auto next_block = current_block; + + // If the block we want to deallocate is before another free block and we can therefore combine both into one. + // This is done by deleting the current free block and creating a new block at the start address of the block to + // deallocate with both the size of the block to deallcoate and the free block next to it. + if (end_address == reinterpret_cast<std::size_t>(current_block)) + { + block_size += current_block->size; + next_block = current_block->next; + current_block->~memory_block(); + } + + // If the block we want to deallocate is behind another free block and we can therefore combine both into one. + // This is done by simply changin the size of the previous block to include the size of the block to deallocate. + // This is done, because the previous block might still be referencered by the next field of other memory blocks. + if (previous_block != nullptr && + start_address == (reinterpret_cast<std::size_t>(previous_block) + previous_block->size)) + { + block_size += previous_block->size; + + previous_block->size = block_size; + previous_block->next = next_block; + return; + } + + // Check if the block we want to deallocate is contained in the previous block, because if it is it can only mean + // that the block has already been deallocated and we therefore attempted a double free. + exception_handling::assert(previous_block == nullptr || + start_address >= + (reinterpret_cast<std::size_t>(previous_block) + previous_block->size), + "[Linked List Allocator] Attempted double free detected"); + + auto const new_block = new (pointer) memory_block(block_size, next_block); + // If we want to deallocate the first block that is before any other free block, then there exists no previous free + // block (nullptr). Therefore we have to overwrite the first block instead of overwriting its + // next value. + if (previous_block == nullptr) { + first = new_block; + return; } + previous_block->next = new_block; } } // namespace teachos::arch::memory::heap |
