diff options
| author | Matteo Gmür <matteo.gmuer1@ost.ch> | 2025-05-09 13:15:58 +0000 |
|---|---|---|
| committer | Matteo Gmür <matteo.gmuer1@ost.ch> | 2025-05-09 13:15:58 +0000 |
| commit | a5812be4fa8ecc208219682204b14969d7b718b0 (patch) | |
| tree | 2365468add76cdb604ede4f751bcf3791263f312 | |
| parent | d80ecf29baada6242c5181adaec0d1500707cad0 (diff) | |
| download | teachos-a5812be4fa8ecc208219682204b14969d7b718b0.tar.xz teachos-a5812be4fa8ecc208219682204b14969d7b718b0.zip | |
Switch uer heap to linked list allocator
| -rw-r--r-- | arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp | 118 | ||||
| -rw-r--r-- | arch/x86_64/src/memory/heap/user_heap_allocator.cpp | 186 |
2 files changed, 251 insertions, 53 deletions
diff --git a/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp b/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp index 9d00c19..d698888 100644 --- a/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp +++ b/arch/x86_64/include/arch/memory/heap/user_heap_allocator.hpp @@ -1,16 +1,14 @@ #ifndef TEACHOS_ARCH_X86_64_MEMORY_HEAP_USER_HEAP_ALLOCATOR_HPP #define TEACHOS_ARCH_X86_64_MEMORY_HEAP_USER_HEAP_ALLOCATOR_HPP -#include "arch/memory/heap/heap_allocator.hpp" - -#include <atomic> -#include <cstdint> +#include "arch/memory/heap/memory_block.hpp" +#include "arch/stl/mutex.hpp" namespace teachos::arch::memory::heap { /** - * @brief Simple heap allocator, which allocates linearly and leaks all allocated memory, because it does not really - * deallocate anything. + * @brief Sorted by address list of memory holes (free memory). Uses free holes itself to save the information, + * containing the size and pointer to the next hole. Resulting in a singly linked list. */ struct user_heap_allocator { @@ -20,31 +18,109 @@ namespace teachos::arch::memory::heap * @param heap_start Start of the allocatable heap area * @param heap_end End of the allocatable heap area (Start + Size) */ - user_heap_allocator(std::size_t heap_start, std::size_t heap_end) - : heap_start{heap_start} - , heap_end{heap_end} - , next{heap_start} - { - // Nothing to do - } + user_heap_allocator(std::size_t heap_start, std::size_t heap_end); + /** + * @copybrief heap_allocator::allocate + * + * @note The specified size is used to find a free memory block with the exact same size, meaning we can remove that + * free memory block from the free list and simply return its address. Or it has to be big enough to hold the size + * and alteast enough memory for another free memory block entry (16 bytes). If the amount of memory of that free + * memory block is in between we cannot use it for our allocation, because we could only return it to the user, but + * the additional bytes, could not be used to create a free memory block. Additionaly the user couldn't know + * they received more memory than wanted. Therefore the memory would simply be unused and because it is neither + * allocated nor deallocated would never be indexed by the free memory list. We would therefore permanently loose + * that memory, to prevent that allocation into free memory blocks like that are impossible. + */ [[gnu::section(".user_text")]] auto allocate(std::size_t size) -> void *; + [[gnu::section(".user_text")]] + auto deallocate(void * pointer) noexcept -> void; + + private: /** - * @copybrief heap_allocator::deallocate + * @brief Returns the smallest allocatable block of heap memory. * - * @note Simply does nothing, because this allocator leaks all memory + * @return Smallest allocatable block of heap memory. + */ + [[gnu::section(".user_text")]] auto constexpr min_allocatable_size() -> std::size_t { return sizeof(memory_block); } + + /** + * @brief Removes a free memory block from the free list and returns its address so the caller can allocate into it. + * + * @param previous_block Free memory block before the block to allocate in our heap memory. Was to small to + * allocate the required size into. + * @param current_block Free memory block we want to remove from the free list and return for the allocation. + * + * @return Previous start address of the memory block we removed, because it can now be used for the allocation. */ [[gnu::section(".user_text")]] - auto deallocate(void * pointer) noexcept -> void; + auto remove_free_memory_block(memory_block * previous_block, memory_block * current_block) -> void *; - private: - std::size_t heap_start; ///< Start of the allocatable heap area - std::size_t heap_end; ///< End of the allocatable heap area - std::atomic_uint64_t next; ///< Current address, which is the start of still unused allocatable heap area - }; + /** + * @brief Splits the given free memory block into two, where the latter block keeps being free and the first + * part will be used for the allocation. + * + * @param previous_block Free memory block before the block to allocate in our heap memory. Was to small to + * allocate the required size into. + * @param current_block Free memory block we want to split into a size part for the allocation and the rest for + * future allocations. + * @param size Size we want to allocate at the start of the free memory block. + * + * @return Previous start address of the memory block we just split, because it can now be used for the allocation. + */ + [[gnu::section(".user_text")]] + auto split_free_memory_block(memory_block * previous_block, memory_block * current_block, std::size_t size) + -> void *; + /** + * @brief Removes a free memory block from the free list and returns its address so the caller can allocate into it. + * + * @param previous_block Free memory block before the block to allocate in our heap memory. Was to small to + * allocate the required size into. + * @param current_block Free memory block we want to remove from the free list and return for the allocation. + * @param new_block Replaces the current block with the given new block can be nullptr, meaning the free list will + * end here. + * + * @return Previous start address of the memory block we removed, because it can now be used for the allocation. + */ + [[gnu::section(".user_text")]] + auto replace_free_memory_block(memory_block * previous_block, memory_block * current_block, + memory_block * new_block) -> void *; + + /** + * @brief Combines multiple free memory blocks into one if they are adjacent. + * + * @note The internal algorithm for recombination functions like this: + * 1. Check if there is even any memory left, if not the first entry of our linked list should be a nullptr and + * we can therefore set the first entry to our newly created entry. This entry is created in the now deallocated + * memory area. + * 2. If there are more blocks but neither the previous nor the current block are adjacent, we simply create a + * new free memory block of the given size and set the previous next to our block and the next of our block to + * the current block. + * 3. If the current block is adjacent the start address of the newly created block stays the same, but the size + * increases by the amount in the current memory block header. After reading it we also clear the header. + * 4. If the previous block is adjacent the size of the previous block simply increases to include the given + * size as well. + * 5. If the previous block is directly in our start address, so they overlap then it has to mean some or all of + * the region we are trying to deallocate has been freed before. Which would result in a double free therefore + * we halt the execution of the program. + * + * @param previous_block Free memory block before the block to deallocate in our heap memory. + * @param current_block Free memory block after the block to deallocate in our heap memory. + * @param pointer Block to deallocate. + * @param size Size of the block we want to deallocate. + */ + [[gnu::section(".user_text")]] + auto coalesce_free_memory_block(memory_block * previous_block, memory_block * current_block, void * pointer, + std::size_t size) -> void; + + std::size_t heap_start; ///< Start of the allocatable heap area. + std::size_t heap_end; ///< End of the allocatable heap area. + memory_block * first; ///< First free entry in our memory. + stl::mutex mutex; ///< Mutex to ensure only one thread calls allocate or deallocate at once. + }; } // namespace teachos::arch::memory::heap #endif // TEACHOS_ARCH_X86_64_MEMORY_HEAP_USER_HEAP_ALLOCATOR_HPP diff --git a/arch/x86_64/src/memory/heap/user_heap_allocator.cpp b/arch/x86_64/src/memory/heap/user_heap_allocator.cpp index eefcab5..6843d66 100644 --- a/arch/x86_64/src/memory/heap/user_heap_allocator.cpp +++ b/arch/x86_64/src/memory/heap/user_heap_allocator.cpp @@ -1,58 +1,180 @@ #include "arch/memory/heap/user_heap_allocator.hpp" #include "arch/exception_handling/assert.hpp" +#include "arch/exception_handling/panic.hpp" -#include <limits> -#include <type_traits> +#include <algorithm> namespace teachos::arch::memory::heap { - namespace + user_heap_allocator::user_heap_allocator(std::size_t heap_start, std::size_t heap_end) + : heap_start(heap_start) + , heap_end(heap_end) + , first(nullptr) + , mutex{stl::mutex{}} { - template<typename T> - [[gnu::section(".user_text")]] - auto saturating_add(T x, T y) -> T - requires std::is_unsigned_v<T> + auto const heap_size = heap_end - heap_start; + exception_handling::assert( + heap_size > min_allocatable_size(), + "[Linked List Allocator] Total heap size can not be smaller than minimum of 16 bytes to hold " + "atleast one memory hole entry"); + first = new (reinterpret_cast<void *>(heap_start)) memory_block(heap_size, nullptr); + } + + auto user_heap_allocator::allocate(std::size_t size) -> void * + { + // Add size of size_t to the total allocated size, because we add a header that includes the size of the allocated + // block, to allow for deallocation without the need to call with the corresponding size + auto const total_size = size + sizeof(std::size_t); + mutex.lock(); + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) { - if (x > std::numeric_limits<T>::max() - y) + // TODO: Can not access current pointer. Results in a General Protection Fault? + if (current->size == total_size) { - return std::numeric_limits<T>::max(); + auto const memory_address = remove_free_memory_block(previous, current); + new (memory_address) std::size_t(total_size); + mutex.unlock(); + return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); } - T result = x + y; - return result; + else if (current->size >= total_size + min_allocatable_size()) + { + // Ensure that the allocated size block is atleast 16 bytes (required because if we free the hole afterwards + // there needs to be enough space for a memory block). Therefore we allocate more than is actually required if + // the total size was less and simply deallocate it as well + auto const max_size = std::max(total_size, min_allocatable_size()); + auto const memory_address = split_free_memory_block(previous, current, max_size); + new (memory_address) std::size_t(max_size); + mutex.unlock(); + return reinterpret_cast<void *>(reinterpret_cast<std::size_t>(memory_address) + sizeof(std::size_t)); + } + + previous = current; + current = current->next; } - } // namespace - auto user_heap_allocator::allocate(std::size_t size) -> void * + exception_handling::panic("[Linked List Allocator] Out of memory"); + } + + auto user_heap_allocator::deallocate(void * pointer) noexcept -> void { - // TODO: .load() crashes because it is only in the kernel .text section and not the user one? How would you fix - // Similarly assert cause a crash here it is easier though simply create a syscall for the assert method. - - // that? Reading the value only has to be done once, because compare_exchange_weak updates the value as well if the - // exchange failed, becuase the value was not the expected one. - auto alloc_start = next.load(std::memory_order::relaxed); - // Repeat allocation until it succeeds, has to be done, because another allocator could overtake it at any time - // causing the value to differ and the calculation to have to be redone. - for (;;) + mutex.lock(); + + // Read configured header size of the complete allocated block + auto const header_pointer = reinterpret_cast<void *>(reinterpret_cast<std::size_t>(pointer) - sizeof(std::size_t)); + auto const total_size = *reinterpret_cast<std::size_t *>(header_pointer); + + auto const start_address = reinterpret_cast<std::size_t>(header_pointer); + auto const end_address = start_address + total_size; + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) { - auto const alloc_end = saturating_add(alloc_start, size); - arch::exception_handling::assert(alloc_end <= heap_end, "[Heap Allocator] Out of memory"); - // Check if the atomic value is still the one initally loaded, if it isn't we have been overtaken by another - // thread and need to redo the calculation. Spurious failure by weak can be ignored, because the whole allocation - // is wrapped in an infinite for loop so a failure that wasn't actually one will simply be retried until it works. - auto const updated = next.compare_exchange_weak(alloc_start, alloc_end, std::memory_order::relaxed); - if (updated) + // Current address of the free memory block now points to an address that is after our block to deallocate in heap + // memory space. + if (reinterpret_cast<std::size_t>(current) >= end_address) { - return reinterpret_cast<void *>(alloc_start); + break; } + + previous = current; + current = current->next; } + + coalesce_free_memory_block(previous, current, header_pointer, total_size); + mutex.unlock(); } - auto user_heap_allocator::deallocate(void * pointer) noexcept -> void + auto user_heap_allocator::remove_free_memory_block(memory_block * previous_block, memory_block * current_block) + -> void * { - if (pointer) + return replace_free_memory_block(previous_block, current_block, current_block->next); + } + + auto user_heap_allocator::split_free_memory_block(memory_block * previous_block, memory_block * current_block, + std::size_t size) -> void * + { + auto const end_address = reinterpret_cast<std::size_t>(current_block) + size; + auto const new_block = + new (reinterpret_cast<void *>(end_address)) memory_block(current_block->size - size, current_block->next); + return replace_free_memory_block(previous_block, current_block, new_block); + } + + auto user_heap_allocator::replace_free_memory_block(memory_block * previous_block, memory_block * current_block, + memory_block * new_block) -> void * + { + auto const start_address = reinterpret_cast<std::size_t>(current_block); + // If we want to allocate into the first block that is before any other free block, then there exists no previous + // free block (nullptr). Therefore we have to overwrite the first block instead of overwriting its next value. + if (previous_block == nullptr) + { + first = new_block; + } + else + { + previous_block->next = new_block; + } + current_block->~memory_block(); + return reinterpret_cast<void *>(start_address); + } + + auto user_heap_allocator::coalesce_free_memory_block(memory_block * previous_block, memory_block * current_block, + void * pointer, std::size_t size) -> void + { + auto const start_address = reinterpret_cast<std::size_t>(pointer); + auto const end_address = start_address + size; + + // Inital values if there are no adjacent blocks either before or after, meaning we have to simply create a free + // memory block that is placed in between the previous and next block. + auto block_size = size; + auto next_block = current_block; + + // If the block we want to deallocate is before another free block and we can therefore combine both into one. + // This is done by deleting the current free block and creating a new block at the start address of the block to + // deallocate with both the size of the block to deallcoate and the free block next to it. + if (end_address == reinterpret_cast<std::size_t>(current_block)) + { + block_size += current_block->size; + next_block = current_block->next; + current_block->~memory_block(); + } + + // If the block we want to deallocate is behind another free block and we can therefore combine both into one. + // This is done by simply changin the size of the previous block to include the size of the block to deallocate. + // This is done, because the previous block might still be referencered by the next field of other memory blocks. + if (previous_block != nullptr && + start_address == (reinterpret_cast<std::size_t>(previous_block) + previous_block->size)) + { + block_size += previous_block->size; + + previous_block->size = block_size; + previous_block->next = next_block; + return; + } + + // Check if the block we want to deallocate is contained in the previous block, because if it is it can only mean + // that the block has already been deallocated and we therefore attempted a double free. + exception_handling::assert(previous_block == nullptr || + start_address >= + (reinterpret_cast<std::size_t>(previous_block) + previous_block->size), + "[Linked List Allocator] Attempted double free detected"); + + auto const new_block = new (pointer) memory_block(block_size, next_block); + // If we want to deallocate the first block that is before any other free block, then there exists no previous free + // block (nullptr). Therefore we have to overwrite the first block instead of overwriting its + // next value. + if (previous_block == nullptr) { + first = new_block; + return; } + previous_block->next = new_block; } } // namespace teachos::arch::memory::heap |
