aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/src/memory/heap
diff options
context:
space:
mode:
authorMatteo Gmür <matteo.gmuer1@ost.ch>2025-02-18 10:59:05 +0100
committerMatteo Gmür <matteo.gmuer1@ost.ch>2025-02-18 10:59:05 +0100
commitcd42c21f2460751428b3e1b4ae07ea0b924967bc (patch)
treee3e410f399c3eead444f2a242a19448571fd979a /arch/x86_64/src/memory/heap
parent47879f42d70755fcf5473ffb82798b515cb2e21b (diff)
parent3d488e53a1d15fcc01a7b1d23b9585ca7a724864 (diff)
downloadteachos-cd42c21f2460751428b3e1b4ae07ea0b924967bc.tar.xz
teachos-cd42c21f2460751428b3e1b4ae07ea0b924967bc.zip
Merge branch 'feat_memory_manager' into 'develop_sa'
Finish inital draft of Memory Manager See merge request teachos/kernel!3
Diffstat (limited to 'arch/x86_64/src/memory/heap')
-rw-r--r--arch/x86_64/src/memory/heap/bump_allocator.cpp52
-rw-r--r--arch/x86_64/src/memory/heap/linked_list_allocator.cpp168
-rw-r--r--arch/x86_64/src/memory/heap/memory_block.cpp15
3 files changed, 235 insertions, 0 deletions
diff --git a/arch/x86_64/src/memory/heap/bump_allocator.cpp b/arch/x86_64/src/memory/heap/bump_allocator.cpp
new file mode 100644
index 0000000..bbf2021
--- /dev/null
+++ b/arch/x86_64/src/memory/heap/bump_allocator.cpp
@@ -0,0 +1,52 @@
+#include "arch/memory/heap/bump_allocator.hpp"
+
+#include "arch/exception_handling/assert.hpp"
+
+#include <limits>
+#include <type_traits>
+
+namespace teachos::arch::memory::heap
+{
+ namespace
+ {
+ template<typename T>
+ auto saturating_add(T x, T y) -> T
+ requires std::is_unsigned_v<T>
+ {
+ if (x > std::numeric_limits<T>::max() - y)
+ {
+ return std::numeric_limits<T>::max();
+ }
+ T result = x + y;
+ return result;
+ }
+ } // namespace
+
+ auto bump_allocator::allocate(std::size_t size) -> void *
+ {
+ // Repeat allocation until it succeeds, has to be done, because another allocator could overtake it at any time
+ // causing the value to differ and the calculation to have to be redone.
+ for (;;)
+ {
+ auto alloc_start = next.load(std::memory_order::relaxed);
+ auto const alloc_end = saturating_add(alloc_start, size);
+ arch::exception_handling::assert(alloc_end <= heap_end, "[Heap Allocator] Out of memory");
+ // Check if the atomic value is still the one initally loaded, if it isn't we have been overtaken by another
+ // thread and need to redo the calculation. Spurious failure by weak can be ignored, because the whole allocation
+ // is wrapped in an infinite for loop so a failure that wasn't actually one will simply be retried until it works.
+ auto const updated = next.compare_exchange_weak(alloc_start, alloc_end, std::memory_order::relaxed);
+ if (updated)
+ {
+ return reinterpret_cast<void *>(alloc_start);
+ }
+ }
+ }
+
+ auto bump_allocator::deallocate(void * pointer, std::size_t size) -> void
+ {
+ if (pointer || size)
+ {
+ }
+ }
+
+} // namespace teachos::arch::memory::heap
diff --git a/arch/x86_64/src/memory/heap/linked_list_allocator.cpp b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp
new file mode 100644
index 0000000..e5bae21
--- /dev/null
+++ b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp
@@ -0,0 +1,168 @@
+#include "arch/memory/heap/linked_list_allocator.hpp"
+
+#include "arch/exception_handling/assert.hpp"
+#include "arch/exception_handling/panic.hpp"
+
+namespace teachos::arch::memory::heap
+{
+ linked_list_allocator::linked_list_allocator(std::size_t heap_start, std::size_t heap_end)
+ : heap_start(heap_start)
+ , heap_end(heap_end)
+ , first(nullptr)
+ , mutex{shared::mutex{}}
+ {
+ auto const heap_size = heap_end - heap_start;
+ exception_handling::assert(
+ heap_size > min_allocatable_size(),
+ "[Linked List Allocator] Total heap size can not be smaller than minimum of 16 bytes to hold "
+ "atleast one memory hole entry");
+ first = new (reinterpret_cast<void *>(heap_start)) memory_block(heap_size, nullptr);
+ }
+
+ auto linked_list_allocator::allocate(std::size_t size) -> void *
+ {
+ exception_handling::assert(size > min_allocatable_size(),
+ "[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes");
+ mutex.lock();
+
+ memory_block * previous = nullptr;
+ auto current = first;
+
+ while (current != nullptr)
+ {
+ if (current->size == size)
+ {
+ auto const memory_address = remove_free_memory_block(previous, current);
+ mutex.unlock();
+ return memory_address;
+ }
+ else if (current->size >= size + min_allocatable_size())
+ {
+ auto const memory_address = split_free_memory_block(previous, current, size);
+ mutex.unlock();
+ return memory_address;
+ }
+
+ previous = current;
+ current = current->next;
+ }
+
+ exception_handling::panic("[Linked List Allocator] Out of memory");
+ }
+
+ auto linked_list_allocator::deallocate(void * pointer, std::size_t size) -> void
+ {
+ exception_handling::assert(size > min_allocatable_size(),
+ "[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes");
+ mutex.lock();
+
+ auto const start_address = reinterpret_cast<std::size_t>(pointer);
+ auto const end_address = start_address + size;
+
+ memory_block * previous = nullptr;
+ auto current = first;
+
+ while (current != nullptr)
+ {
+ // Current address of the free memory block now points to an address that is after our block to deallocate in heap
+ // memory space.
+ if (reinterpret_cast<std::size_t>(current) >= end_address)
+ {
+ break;
+ }
+
+ previous = current;
+ current = current->next;
+ }
+
+ coalesce_free_memory_block(previous, current, pointer, size);
+ mutex.unlock();
+ }
+
+ auto linked_list_allocator::remove_free_memory_block(memory_block * previous_block,
+ memory_block * current_block) -> void *
+ {
+ return replace_free_memory_block(previous_block, current_block, current_block->next);
+ }
+
+ auto linked_list_allocator::split_free_memory_block(memory_block * previous_block, memory_block * current_block,
+ std::size_t size) -> void *
+ {
+ auto const end_address = reinterpret_cast<std::size_t>(current_block) + size;
+ auto const new_block =
+ new (reinterpret_cast<void *>(end_address)) memory_block(current_block->size - size, current_block->next);
+ return replace_free_memory_block(previous_block, current_block, new_block);
+ }
+
+ auto linked_list_allocator::replace_free_memory_block(memory_block * previous_block, memory_block * current_block,
+ memory_block * new_block) -> void *
+ {
+ auto const start_address = reinterpret_cast<std::size_t>(current_block);
+ // If we want to allocate into the first block that is before any other free block, then there exists no previous
+ // free block (nullptr). Therefore we have to overwrite the first block instead of overwriting its next value.
+ if (previous_block == nullptr)
+ {
+ first = new_block;
+ }
+ else
+ {
+ previous_block->next = new_block;
+ }
+ current_block->~memory_block();
+ return reinterpret_cast<void *>(start_address);
+ }
+
+ auto linked_list_allocator::coalesce_free_memory_block(memory_block * previous_block, memory_block * current_block,
+ void * pointer, std::size_t size) -> void
+ {
+ auto const start_address = reinterpret_cast<std::size_t>(pointer);
+ auto const end_address = start_address + size;
+
+ // Inital values if there are no adjacent blocks either before or after, meaning we have to simply create a free
+ // memory block that is placed in between the previous and next block.
+ auto block_size = size;
+ auto next_block = current_block;
+
+ // If the block we want to deallocate is before another free block and we can therefore combine both into one.
+ // This is done by deleting the current free block and creating a new block at the start address of the block to
+ // deallocate with both the size of the block to deallcoate and the free block next to it.
+ if (end_address == reinterpret_cast<std::size_t>(current_block))
+ {
+ block_size += current_block->size;
+ next_block = current_block->next;
+ current_block->~memory_block();
+ }
+
+ // If the block we want to deallocate is behind another free block and we can therefore combine both into one.
+ // This is done by simply changin the size of the previous block to include the size of the block to deallocate.
+ // This is done, because the previous block might still be referencered by the next field of other memory blocks.
+ if (previous_block != nullptr &&
+ start_address == (reinterpret_cast<std::size_t>(previous_block) + previous_block->size))
+ {
+ block_size += previous_block->size;
+
+ previous_block->size = block_size;
+ previous_block->next = next_block;
+ return;
+ }
+
+ // Check if the block we want to deallocate is contained in the previous block, because if it is it can only mean
+ // that the block has already been deallocated and we therefore attempted a double free.
+ exception_handling::assert(previous_block == nullptr ||
+ start_address >=
+ (reinterpret_cast<std::size_t>(previous_block) + previous_block->size),
+ "[Linked List Allocator] Attempted double free detected");
+
+ auto const new_block = new (pointer) memory_block(block_size, next_block);
+ // If we want to deallocate the first block that is before any other free block, then there exists no previous free
+ // block (nullptr). Therefore we have to overwrite the first block instead of overwriting its
+ // next value.
+ if (previous_block == nullptr)
+ {
+ first = new_block;
+ return;
+ }
+ previous_block->next = new_block;
+ }
+
+} // namespace teachos::arch::memory::heap
diff --git a/arch/x86_64/src/memory/heap/memory_block.cpp b/arch/x86_64/src/memory/heap/memory_block.cpp
new file mode 100644
index 0000000..446cd96
--- /dev/null
+++ b/arch/x86_64/src/memory/heap/memory_block.cpp
@@ -0,0 +1,15 @@
+#include "arch/memory/heap/memory_block.hpp"
+
+#include <string.h>
+
+namespace teachos::arch::memory::heap
+{
+ memory_block::memory_block(std::size_t size, memory_block * next)
+ {
+ memset(static_cast<void *>(this), 0, size);
+ this->size = size;
+ this->next = next;
+ }
+
+ memory_block::~memory_block() { memset(static_cast<void *>(this), 0, sizeof(memory_block)); }
+} // namespace teachos::arch::memory::heap