aboutsummaryrefslogtreecommitdiff
path: root/kernel/src/memory
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/src/memory')
-rw-r--r--kernel/src/memory/block_list_allocator.tests.cpp80
1 files changed, 80 insertions, 0 deletions
diff --git a/kernel/src/memory/block_list_allocator.tests.cpp b/kernel/src/memory/block_list_allocator.tests.cpp
new file mode 100644
index 0000000..5f6f382
--- /dev/null
+++ b/kernel/src/memory/block_list_allocator.tests.cpp
@@ -0,0 +1,80 @@
+#include "kernel/memory/block_list_allocator.hpp"
+
+#include "kernel/test_support/memory.hpp"
+
+#include <kstd/units>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <cstddef>
+
+using namespace kstd::units_literals;
+
+SCENARIO("Block List Allocator Operations", "[memory][allocator]")
+{
+ GIVEN("A newly initialized block list allocator mapped via the test sandbox")
+ {
+ auto sandbox_base = kernel::tests::memory::heap_base();
+ kernel::memory::block_list_allocator allocator{sandbox_base};
+
+ WHEN("a basic allocation request is made")
+ {
+ void * ptr = allocator.allocate(128_B, 8_B);
+
+ THEN("a valid, non-null pointer is returned")
+ {
+ REQUIRE(ptr != nullptr);
+ }
+
+ AND_THEN("the returned memory is writeable without causing segmentation faults")
+ {
+ auto byte_ptr = static_cast<std::byte *>(ptr);
+ byte_ptr[0] = std::byte{0xDE};
+ byte_ptr[127] = std::byte{0xAD};
+ REQUIRE(byte_ptr[0] == std::byte{0xDE});
+ REQUIRE(byte_ptr[127] == std::byte{0xAD});
+ }
+
+ allocator.deallocate(ptr);
+ }
+
+ WHEN("multiple allocations are made sequentially")
+ {
+ void * ptr1 = allocator.allocate(64_B, 8_B);
+ void * ptr2 = allocator.allocate(64_B, 8_B);
+ void * ptr3 = allocator.allocate(1_KiB, 16_B);
+
+ THEN("they return distinct, non-overlapping memory blocks")
+ {
+ REQUIRE(ptr1 != nullptr);
+ REQUIRE(ptr2 != nullptr);
+ REQUIRE(ptr3 != nullptr);
+ REQUIRE(ptr1 != ptr2);
+ REQUIRE(ptr2 != ptr3);
+ REQUIRE(ptr1 != ptr3);
+ }
+
+ allocator.deallocate(ptr1);
+ allocator.deallocate(ptr2);
+ allocator.deallocate(ptr3);
+ }
+
+ WHEN("a block is allocated and then completely freed")
+ {
+ void * original_ptr = allocator.allocate(512_B, 16_B);
+ allocator.deallocate(original_ptr);
+
+ AND_WHEN("a new allocation of equal or smaller size is requested")
+ {
+ void * new_ptr = allocator.allocate(128_B, 16_B);
+
+ THEN("the allocator actively reuses the coalesced space")
+ {
+ REQUIRE(new_ptr == original_ptr);
+ }
+
+ allocator.deallocate(new_ptr);
+ }
+ }
+ }
+}