aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFabian Imhof <fabian.imhof@ost.ch>2024-12-03 08:00:26 +0000
committerFabian Imhof <fabian.imhof@ost.ch>2024-12-03 08:00:26 +0000
commitb4962c8c7b94fce2e67a00671de87fa96fdbb659 (patch)
tree1c1429f12d282002c928cc410f22e76a8d3aeae2
parentdcd83b71c833e86c7e00e2b8f75ab6208b5d360d (diff)
downloadteachos-b4962c8c7b94fce2e67a00671de87fa96fdbb659.tar.xz
teachos-b4962c8c7b94fce2e67a00671de87fa96fdbb659.zip
add mutex to linked_list_allocator
-rw-r--r--arch/x86_64/include/arch/memory/heap/linked_list_allocator.hpp8
-rw-r--r--arch/x86_64/include/arch/memory/heap/memory_block.hpp3
-rw-r--r--arch/x86_64/include/arch/shared/mutex.hpp46
-rw-r--r--arch/x86_64/src/memory/heap/linked_list_allocator.cpp14
4 files changed, 62 insertions, 9 deletions
diff --git a/arch/x86_64/include/arch/memory/heap/linked_list_allocator.hpp b/arch/x86_64/include/arch/memory/heap/linked_list_allocator.hpp
index e77602c..49217d5 100644
--- a/arch/x86_64/include/arch/memory/heap/linked_list_allocator.hpp
+++ b/arch/x86_64/include/arch/memory/heap/linked_list_allocator.hpp
@@ -2,6 +2,7 @@
#define TEACHOS_ARCH_X86_64_MEMORY_HEAP_LINKED_LIST_ALLOCATOR_HPP
#include "arch/memory/heap/memory_block.hpp"
+#include "arch/shared/mutex.hpp"
namespace teachos::arch::memory::heap
{
@@ -96,9 +97,10 @@ namespace teachos::arch::memory::heap
*/
auto clear_memory_block_header(void * pointer) -> void;
- std::size_t heap_start; ///< Start of the allocatable heap area
- std::size_t heap_end; ///< End of the allocatable heap area
- std::atomic<memory_block *> first; ///< First free entry in our memory
+ std::size_t heap_start; ///< Start of the allocatable heap area
+ std::size_t heap_end; ///< End of the allocatable heap area
+ memory_block * first; ///< First free entry in our memory
+ shared::mutex mutex;
};
} // namespace teachos::arch::memory::heap
diff --git a/arch/x86_64/include/arch/memory/heap/memory_block.hpp b/arch/x86_64/include/arch/memory/heap/memory_block.hpp
index 1fbbfd5..c48d0cd 100644
--- a/arch/x86_64/include/arch/memory/heap/memory_block.hpp
+++ b/arch/x86_64/include/arch/memory/heap/memory_block.hpp
@@ -1,7 +1,6 @@
#ifndef TEACHOS_ARCH_X86_64_MEMORY_HEAP_MEMORY_BLOCK_HPP
#define TEACHOS_ARCH_X86_64_MEMORY_HEAP_MEMORY_BLOCK_HPP
-#include <atomic>
#include <cstdint>
namespace teachos::arch::memory::heap
@@ -22,7 +21,7 @@ namespace teachos::arch::memory::heap
std::size_t size; ///< Amount of free memory this hole contains, has to always be atleast 16 bytes to hold the size
///< variable and the pointer to the next hole.
- std::atomic<memory_block *> next; ///< Optional pointer to the next free memory, holds nullptr if there is none.
+ memory_block * next; ///< Optional pointer to the next free memory, holds nullptr if there is none.
};
} // namespace teachos::arch::memory::heap
diff --git a/arch/x86_64/include/arch/shared/mutex.hpp b/arch/x86_64/include/arch/shared/mutex.hpp
new file mode 100644
index 0000000..d874dd8
--- /dev/null
+++ b/arch/x86_64/include/arch/shared/mutex.hpp
@@ -0,0 +1,46 @@
+#ifndef TEACHOS_ARCH_X86_64_MUTEX_HPP
+#define TEACHOS_ARCH_X86_64_MUTEX_HPP
+
+#include <atomic>
+
+namespace teachos::arch::shared
+{
+ struct mutex
+ {
+ mutex() = default;
+ ~mutex() = default;
+
+ mutex(const mutex &) = delete;
+ mutex & operator=(const mutex &) = delete;
+
+ /**
+ * @brief Lock the mutex (blocks if not available)
+ */
+ void lock()
+ {
+ while (true)
+ {
+ if (!locked.exchange(true, std::memory_order_acquire))
+ {
+ return;
+ }
+ }
+ }
+
+ /**
+ * @brief Try to lock the mutex (non-blocking)
+ *
+ * @return true if lock has been acquired and false otherwise
+ */
+ bool try_lock() { return !locked.exchange(true, std::memory_order_acquire); }
+
+ /**
+ * @brief Unlock the mutex
+ */
+ void unlock() { locked.store(false, std::memory_order_release); }
+
+ private:
+ std::atomic<bool> locked{false};
+ };
+} // namespace teachos::arch::shared
+#endif // TEACHOS_ARCH_X86_64_MUTEX_HPP \ No newline at end of file
diff --git a/arch/x86_64/src/memory/heap/linked_list_allocator.cpp b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp
index 22b5757..01838f9 100644
--- a/arch/x86_64/src/memory/heap/linked_list_allocator.cpp
+++ b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp
@@ -13,6 +13,7 @@ namespace teachos::arch::memory::heap
: heap_start(heap_start)
, heap_end(heap_end)
, first(nullptr)
+ , mutex{shared::mutex{}}
{
auto const heap_size = heap_end - heap_start;
exception_handling::assert(
@@ -26,9 +27,10 @@ namespace teachos::arch::memory::heap
{
exception_handling::assert(size > min_allocatable_size(),
"[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes");
+ mutex.lock();
memory_block * previous = nullptr;
- auto current = first.load(std::memory_order::relaxed);
+ auto current = first;
while (current != nullptr)
{
@@ -40,6 +42,8 @@ namespace teachos::arch::memory::heap
previous = current;
current = current->next;
}
+
+ mutex.unlock();
exception_handling::panic("[Linked List Allocator] Out of memory");
}
@@ -47,12 +51,13 @@ namespace teachos::arch::memory::heap
{
exception_handling::assert(size > min_allocatable_size(),
"[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes");
+ mutex.lock();
auto const start_address = reinterpret_cast<std::size_t>(pointer);
auto const end_address = start_address + size;
memory_block * previous = nullptr;
- auto current = first.load(std::memory_order::relaxed);
+ auto current = first;
while (current != nullptr)
{
@@ -68,6 +73,7 @@ namespace teachos::arch::memory::heap
}
coalesce_free_memory_block(previous, current, pointer, size);
+ mutex.unlock();
}
auto linked_list_allocator::split_free_memory_block(memory_block * previous_block, memory_block * current_block,
@@ -81,11 +87,11 @@ namespace teachos::arch::memory::heap
// free block (nullptr). Therefore we have to overwrite the first block instead of overwriting its next value.
if (previous_block == nullptr)
{
- first.compare_exchange_weak(previous_block, new_block, std::memory_order::relaxed);
+ first = new_block;
}
else
{
- previous_block->next.compare_exchange_weak(current_block, new_block, std::memory_order::relaxed);
+ previous_block = new_block;
}
clear_memory_block_header(current_block);
return reinterpret_cast<void *>(start_address);