diff options
| author | Matteo Gmür <matteo.gmuer1@ost.ch> | 2025-02-18 10:59:05 +0100 |
|---|---|---|
| committer | Matteo Gmür <matteo.gmuer1@ost.ch> | 2025-02-18 10:59:05 +0100 |
| commit | cd42c21f2460751428b3e1b4ae07ea0b924967bc (patch) | |
| tree | e3e410f399c3eead444f2a242a19448571fd979a /arch/x86_64/src | |
| parent | 47879f42d70755fcf5473ffb82798b515cb2e21b (diff) | |
| parent | 3d488e53a1d15fcc01a7b1d23b9585ca7a724864 (diff) | |
| download | teachos-cd42c21f2460751428b3e1b4ae07ea0b924967bc.tar.xz teachos-cd42c21f2460751428b3e1b4ae07ea0b924967bc.zip | |
Merge branch 'feat_memory_manager' into 'develop_sa'
Finish inital draft of Memory Manager
See merge request teachos/kernel!3
Diffstat (limited to 'arch/x86_64/src')
26 files changed, 1265 insertions, 57 deletions
diff --git a/arch/x86_64/src/boot/boot.s b/arch/x86_64/src/boot/boot.s index 7b4e193..8d27ea1 100644 --- a/arch/x86_64/src/boot/boot.s +++ b/arch/x86_64/src/boot/boot.s @@ -7,38 +7,38 @@ * Uninitialized data for the bootstrapping process. */ .section .boot_bss, "aw", @nobits + +/** + * Reserve some space for the Multiboot 2 information pointer. + */ +.global multiboot_information_pointer +multiboot_information_pointer: .skip 4 + +/** + * Align page maps to 4 KiB or the assembler code, will cause crashes when attempting to enable paging. + */ .align 4096 /** - * Reserve space for the page maps we are going to used during startup. + * Reserve space for the page maps we are going to use during startup. * * Note: We are going to use large pages to make the initial mapping code * simpler. * * We need: * - A single PML 4 (since we will only use 4-level paging) - * - 2 PML 3s (since we need to map high (-2GiB) and low (1+MiB) memory) - * - 2 PML 2s (since we need to map high (-2GiB) and low (1+MiB) memory) + * - 1 PML 3 + * - 1 PML 2 */ .global page_map_level_4 page_map_level_4: .skip 512 * 8 -.global page_map_level_3_low -page_map_level_3_low: .skip 512 * 8 -.global page_map_level_3_high -page_map_level_3_high: .skip 512 * 8 - -.global page_map_level_2_low -page_map_level_2_low: .skip 512 * 8 -.global page_map_level_2_high -page_map_level_2_high: .skip 512 * 8 +.global page_map_level_3 +page_map_level_3: .skip 512 * 8 -/** - * Reserve some space for the Multiboot 2 information pointer. - */ -.global multiboot_information_pointer -multiboot_information_pointer: .skip 4 +.global page_map_level_2 +page_map_level_2: .skip 512 * 8 /** * Stack space for the bootstrapping process. @@ -86,6 +86,7 @@ global_descriptor_table_pointer: * We are going to print some messages in case we panic during boot, so we are * going to store them here as well */ +.global message_prefix_panic message_prefix_panic: .string "TeachOS Panic: " message_not_loaded_by_multiboot2: @@ -113,6 +114,12 @@ vga_buffer_pointer: .long 0xb8000 .align 16 .code32 +.global halt +halt: +1: + hlt + jmp 1b + /** * Print a given panic message and then halt the machine. * @@ -133,7 +140,7 @@ _panic: call _print add $8, %esp - hlt + call halt /** * Print a message via the VGA buffer. @@ -193,7 +200,7 @@ _start: lgdt (global_descriptor_table_pointer) jmp $global_descriptor_table_code,$_transition_to_long_mode - hlt + call halt /** * Assert that the CPU supports going into long mode. @@ -306,31 +313,23 @@ enable_sse: * * We map all physical memory we were loaded in plus one additional page. The * mapping is done in terms of huge pages (2 MiB per page) to save on required - * page map entries. Note that we also map memory both in the low and high - * virtual address ranges, giving us two ways of accessing it. We need to do - * this, because the bootstrapping code lives in low memory, while the rest of - * the kernel will reside on the high end. + * page map entries. */ prepare_page_maps: - /* Add an entry to the PML4, pointing to the low PML3 */ - mov $page_map_level_3_low, %eax - or $0x3, %eax - mov %eax, (page_map_level_4 + ((0x0000000000100000 >> 39) & 0x1ff) * 8) - - /* Add an entry to the PML4, pointing to the high PML3 */ - mov $page_map_level_3_high, %eax - or $0x3, %eax - mov %eax, (page_map_level_4 + ((0xffffffff80100000 >> 39) & 0x1ff) * 8) + /* Map the P4 table recursively */ + mov $page_map_level_4, %eax + or $0b11, %eax /* Write present + writable flags into eax register */ + mov %eax, (page_map_level_4 + 511 * 8) - /* Add an entry to the low PML3, pointing to the low PML2 */ - mov $page_map_level_2_low, %eax + /* Add an entry to the PML4, pointing to the PML3 */ + mov $page_map_level_3, %eax or $0x3, %eax - mov %eax, (page_map_level_3_low + ((0x0000000000100000 >> 30) & 0x1ff) * 8) + mov %eax, (page_map_level_4 + ((0x0000000000100000 >> 39) & 0x1ff) * 8) - /* Add an entry to the high PML3, pointing to the high PML2 */ - mov $page_map_level_2_high, %eax + /* Add an entry to the PML3, pointing to the PML2 */ + mov $page_map_level_2, %eax or $0x3, %eax - mov %eax, (page_map_level_3_high + ((0xffffffff80100000 >> 30) & 0x1ff) * 8) + mov %eax, (page_map_level_3 + ((0x0000000000100000 >> 30) & 0x1ff) * 8) xor %ecx, %ecx @@ -342,8 +341,7 @@ prepare_page_maps: mov $(1 << 21), %eax mul %ecx or $((1 << 0) | (1 << 1) | (1 << 7)), %eax - mov %eax, page_map_level_2_low(,%ecx,8) - mov %eax, page_map_level_2_high(,%ecx,8) + mov %eax, page_map_level_2(,%ecx,8) inc %ecx cmp %esi, %ecx @@ -367,4 +365,4 @@ _transition_to_long_mode: call _init call kernel_main - hlt + call halt diff --git a/arch/x86_64/src/exception_handling/abort.cpp b/arch/x86_64/src/exception_handling/abort.cpp new file mode 100644 index 0000000..e12e4cb --- /dev/null +++ b/arch/x86_64/src/exception_handling/abort.cpp @@ -0,0 +1,15 @@ +#include "arch/exception_handling/panic.hpp" + +#include <cstdlib> + +namespace teachos::arch::exception_handling +{ + /** + * @brief Override for the newlib abort function. + * + * @note newlib defines @p ::abort as a weak symbol, thus allowing implementations to override it by simply providing + * a matching implementation. Since the default implemenatation calls a number of functions the kernel does not + * currently implement, @p ::abort gets overridden to simply panic. + */ + extern "C" auto abort() -> void { panic("Terminate was called, possibly due to an unhandled exception"); } +} // namespace teachos::arch::exception_handling diff --git a/arch/x86_64/src/exception_handling/assert.cpp b/arch/x86_64/src/exception_handling/assert.cpp new file mode 100644 index 0000000..b2963de --- /dev/null +++ b/arch/x86_64/src/exception_handling/assert.cpp @@ -0,0 +1,15 @@ +#include "arch/exception_handling/assert.hpp" + +#include "arch/exception_handling/panic.hpp" + +namespace teachos::arch::exception_handling +{ + auto assert(bool condition, char const * message) -> void + { + if (condition) + { + return; + } + panic("Assertion Violation: ", message); + } +} // namespace teachos::arch::exception_handling diff --git a/arch/x86_64/src/exception_handling/panic.cpp b/arch/x86_64/src/exception_handling/panic.cpp new file mode 100644 index 0000000..8e3802a --- /dev/null +++ b/arch/x86_64/src/exception_handling/panic.cpp @@ -0,0 +1,22 @@ +#include "arch/exception_handling/panic.hpp" + +#include "arch/kernel/halt.hpp" +#include "arch/video/vga/text.hpp" + +namespace teachos::arch::exception_handling +{ + extern "C" char const message_prefix_panic[]; + + auto panic(char const * reason) -> void { panic(message_prefix_panic, reason); } + + auto panic(char const * prefix, char const * reason) -> void + { + using video::vga::text::common_attributes::white_on_red; + + video::vga::text::newline(); + video::vga::text::write(prefix, white_on_red); + video::vga::text::write(reason, white_on_red); + + kernel::halt(); + }; +} // namespace teachos::arch::exception_handling diff --git a/arch/x86_64/src/exception_handling/pure_virtual.cpp b/arch/x86_64/src/exception_handling/pure_virtual.cpp new file mode 100644 index 0000000..67772f7 --- /dev/null +++ b/arch/x86_64/src/exception_handling/pure_virtual.cpp @@ -0,0 +1,6 @@ +#include "arch/exception_handling/panic.hpp" + +extern "C" auto __cxa_pure_virtual() -> void +{ + teachos::arch::exception_handling::panic("Runtime", "Tried to call a pure virtual function!"); +} diff --git a/arch/x86_64/src/kernel/main.cpp b/arch/x86_64/src/kernel/main.cpp index 0e90264..681f960 100644 --- a/arch/x86_64/src/kernel/main.cpp +++ b/arch/x86_64/src/kernel/main.cpp @@ -1,15 +1,71 @@ #include "arch/kernel/main.hpp" +#include "arch/memory/heap/bump_allocator.hpp" +#include "arch/memory/heap/concept.hpp" +#include "arch/memory/heap/linked_list_allocator.hpp" +#include "arch/memory/main.hpp" +#include "arch/memory/multiboot/reader.hpp" #include "arch/video/vga/text.hpp" namespace teachos::arch::kernel { + auto stack_overflow_test(int count) -> int + { + int test[5000] = {}; + if (test[0] == 0xFFFF) + { + return count; + } + count = stack_overflow_test(count); + return count++; + } + + auto heap_test() -> void + { + memory::heap::linked_list_allocator heap_allocator{memory::heap::HEAP_START, + memory::heap::HEAP_START + memory::heap::HEAP_SIZE}; + auto test = heap_allocator.allocate(1024); + auto test2 = new (test) memory::multiboot::memory_information{}; + auto test3 = new (static_cast<void *>(static_cast<memory::multiboot::memory_information *>(test) + 1)) + memory::multiboot::memory_information{}; + auto test4 = *test2; + auto test5 = *test3; + test4.kernel_end = 5000; + test5.kernel_end = 3000; + auto test6 = test4.kernel_end; + auto test7 = test5.kernel_end; + auto test8 = memory::multiboot::read_multiboot2(); + if (test6 && test7 && test8.kernel_end) + { + video::vga::text::write("Heap test successful", video::vga::text::common_attributes::green_on_black); + } + test2->kernel_end = 2000; + test2->kernel_start = 1000; + test2->multiboot_start = 2000; + heap_allocator.deallocate(test, 1024); + + auto test9 = heap_allocator.allocate(1024); + auto test10 = heap_allocator.allocate(1024); + auto test11 = heap_allocator.allocate(1024); + heap_allocator.deallocate(test9, 1024); + auto test12 = heap_allocator.allocate(1024); + auto test13 = heap_allocator.allocate(1024); + heap_allocator.deallocate(test11, 1024); + heap_allocator.deallocate(test10, 1024); + heap_allocator.deallocate(test13, 1024); + heap_allocator.deallocate(test12, 1024); + } + auto main() -> void { - using namespace video::vga; + video::vga::text::clear(); + video::vga::text::cursor(false); + video::vga::text::write("TeachOS is starting up...", video::vga::text::common_attributes::green_on_black); + video::vga::text::newline(); + + memory::initialize_memory_management(); - text::clear(); - text::cursor(false); - text::write("TeachOS is starting up...", text::common_attributes::green_on_black); + // stack_overflow_test(0); + heap_test(); } } // namespace teachos::arch::kernel diff --git a/arch/x86_64/src/memory/allocator/area_frame_allocator.cpp b/arch/x86_64/src/memory/allocator/area_frame_allocator.cpp new file mode 100644 index 0000000..cb4fefa --- /dev/null +++ b/arch/x86_64/src/memory/allocator/area_frame_allocator.cpp @@ -0,0 +1,85 @@ +#include "arch/memory/allocator/area_frame_allocator.hpp" + +#include "arch/exception_handling/assert.hpp" + +#include <algorithm> +#include <array> +#include <ranges> + +namespace teachos::arch::memory::allocator +{ + area_frame_allocator::area_frame_allocator(multiboot::memory_information const & mem_info) + : next_free_frame(0U) + , current_area(std::nullopt) + , memory_areas(mem_info.areas) + , kernel_start(physical_frame::containing_address(mem_info.kernel_start)) + , kernel_end(physical_frame::containing_address(mem_info.kernel_end)) + , multiboot_start(physical_frame::containing_address(mem_info.multiboot_start)) + , multiboot_end(physical_frame::containing_address(mem_info.multiboot_end)) + { + choose_next_area(); + } + + auto area_frame_allocator::choose_next_area() -> void + { + current_area = std::nullopt; + auto next_area_with_free_frames = memory_areas | std::views::filter([this](auto const & area) { + auto address = area.base_address + area.area_length - 1; + return physical_frame::containing_address(address) >= next_free_frame; + }); + + auto const lowest_area_with_free_frames = std::ranges::min_element( + next_area_with_free_frames, [](auto const & a, auto const & b) { return a.base_address < b.base_address; }); + + if (lowest_area_with_free_frames != next_area_with_free_frames.end()) + { + current_area = *lowest_area_with_free_frames; + // Update the `next_free_frame` according to the new memory area + auto const start_frame = physical_frame::containing_address(current_area.value().base_address); + if (next_free_frame < start_frame) + { + next_free_frame = start_frame; + } + } + } + + auto area_frame_allocator::allocate_frame() -> std::optional<physical_frame> + { + // Only try to allocate memory if current_area is not null, because + // the current_area is null if there is no more available memory. + if (!current_area.has_value()) + { + return std::nullopt; + } + + auto const address = current_area.value().base_address + current_area.value().area_length - 1; + physical_frame current_area_last_frame = physical_frame::containing_address(address); + + if (next_free_frame > current_area_last_frame) + { + // All frames of current area are used, switch to next area. + choose_next_area(); + } + else if (next_free_frame >= kernel_start && next_free_frame <= kernel_end) + { + // `physical_frame` is used by the kernel or multiboot information structure. + next_free_frame = allocator::physical_frame{kernel_end.frame_number + 1}; + } + else if (next_free_frame >= multiboot_start && next_free_frame <= multiboot_end) + { + // `physical_frame` is used by the kernel or multiboot information structure. + next_free_frame = allocator::physical_frame{multiboot_end.frame_number + 1}; + } + else + { + // Frame is unused, increment `next_free_frame` and return it. + next_free_frame.frame_number += 1; + return next_free_frame; + } + + // `physical_frame` was not valid, try it again with the updated `next_free_frame`. + return allocate_frame(); + } + + auto area_frame_allocator::deallocate_frame(physical_frame const & physical_frame) -> void { (void)physical_frame; } +} // namespace teachos::arch::memory::allocator diff --git a/arch/x86_64/src/memory/allocator/physical_frame.cpp b/arch/x86_64/src/memory/allocator/physical_frame.cpp new file mode 100644 index 0000000..ec387a1 --- /dev/null +++ b/arch/x86_64/src/memory/allocator/physical_frame.cpp @@ -0,0 +1,24 @@ +#include "arch/memory/allocator/physical_frame.hpp" + +namespace teachos::arch::memory::allocator +{ + auto physical_frame::containing_address(physical_address address) -> physical_frame + { + return physical_frame{address / PAGE_FRAME_SIZE}; + } + + auto physical_frame::start_address() const -> physical_address { return frame_number * PAGE_FRAME_SIZE; } + + auto physical_frame::operator++(int) -> physical_frame + { + physical_frame const old_value = *this; + ++frame_number; + return old_value; + } + + auto physical_frame::operator++() -> physical_frame & + { + ++frame_number; + return *this; + } +} // namespace teachos::arch::memory::allocator diff --git a/arch/x86_64/src/memory/allocator/tiny_frame_allocator.cpp b/arch/x86_64/src/memory/allocator/tiny_frame_allocator.cpp new file mode 100644 index 0000000..3cdf9c7 --- /dev/null +++ b/arch/x86_64/src/memory/allocator/tiny_frame_allocator.cpp @@ -0,0 +1,34 @@ +#include "arch/memory/allocator/tiny_frame_allocator.hpp" + +#include "arch/exception_handling/panic.hpp" + +namespace teachos::arch::memory::allocator +{ + auto tiny_frame_allocator::allocate_frame() -> std::optional<physical_frame> + { + for (auto & frame_option : frames) + { + if (frame_option.has_value()) + { + auto value = frame_option; + frame_option.reset(); + return value; + } + } + return std::nullopt; + } + + auto tiny_frame_allocator::deallocate_frame(physical_frame const & physical_frame) -> void + { + for (auto & frame_option : frames) + { + if (!frame_option.has_value()) + { + frame_option.emplace(physical_frame); + return; + } + } + exception_handling::panic( + "[Tiny Frame Allocator] Attempted to deallocate more than the 3 frames, that can be held"); + } +} // namespace teachos::arch::memory::allocator diff --git a/arch/x86_64/src/memory/cpu/control_register.cpp b/arch/x86_64/src/memory/cpu/control_register.cpp new file mode 100644 index 0000000..298874f --- /dev/null +++ b/arch/x86_64/src/memory/cpu/control_register.cpp @@ -0,0 +1,74 @@ +#include "arch/memory/cpu/control_register.hpp" + +#include "arch/exception_handling/assert.hpp" + +#include <type_traits> + +namespace teachos::arch::memory::cpu +{ + auto read_control_register(control_register cr) -> uint64_t + { + uint64_t current_value; + switch (cr) + { + case control_register::CR0: + asm volatile("mov %%cr0, %[output]" : [output] "=r"(current_value)); + break; + case control_register::CR2: + asm volatile("mov %%cr2, %[output]" : [output] "=r"(current_value)); + break; + case control_register::CR3: + asm volatile("mov %%cr3, %[output]" : [output] "=r"(current_value)); + break; + case control_register::CR4: + asm volatile("mov %%cr4, %[output]" : [output] "=r"(current_value)); + break; + default: + exception_handling::assert(false, + "[Control Register] Attempted to read non-existent or reserved control register"); + break; + } + return current_value; + } + + auto write_control_register(control_register cr, uint64_t new_value) -> void + { + switch (cr) + { + case control_register::CR0: + asm volatile("mov %[input], %%cr0" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + case control_register::CR2: + asm volatile("mov %[input], %%cr2" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + case control_register::CR3: + asm volatile("mov %[input], %%cr3" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + case control_register::CR4: + asm volatile("mov %[input], %%cr4" + : /* no output from call */ + : [input] "r"(new_value) + : "memory"); + break; + default: + exception_handling::assert(false, + "[Control Register] Attempted to write non-existent or reserved control register"); + break; + } + } + + auto set_cr0_bit(cr0_flags flag) -> void + { + auto const cr0 = read_control_register(control_register::CR0); + write_control_register(control_register::CR0, static_cast<std::underlying_type<cr0_flags>::type>(flag) | cr0); + } +} // namespace teachos::arch::memory::cpu diff --git a/arch/x86_64/src/memory/cpu/msr.cpp b/arch/x86_64/src/memory/cpu/msr.cpp new file mode 100644 index 0000000..b83f902 --- /dev/null +++ b/arch/x86_64/src/memory/cpu/msr.cpp @@ -0,0 +1,31 @@ +#include "arch/memory/cpu/msr.hpp" + +namespace teachos::arch::memory::cpu +{ + namespace + { + auto constexpr IA32_EFER_ADDRESS = 0xC0000080; + } + + auto read_msr(uint32_t msr) -> uint64_t + { + uint32_t low, high; + asm volatile("rdmsr" : "=a"(low), "=d"(high) : "c"(msr)); + return (static_cast<uint64_t>(high) << 32) | low; + } + + auto write_msr(uint32_t msr, uint64_t value) -> void + { + uint32_t low = value & 0xFFFFFFFF; + uint32_t high = value >> 32; + asm volatile("wrmsr" + : /* no output from call */ + : "c"(msr), "a"(low), "d"(high)); + } + + auto set_efer_bit(efer_flags flag) -> void + { + auto const efer = read_msr(IA32_EFER_ADDRESS); + write_msr(IA32_EFER_ADDRESS, static_cast<std::underlying_type<efer_flags>::type>(flag) | efer); + } +} // namespace teachos::arch::memory::cpu diff --git a/arch/x86_64/src/memory/cpu/tlb.cpp b/arch/x86_64/src/memory/cpu/tlb.cpp new file mode 100644 index 0000000..591d9fc --- /dev/null +++ b/arch/x86_64/src/memory/cpu/tlb.cpp @@ -0,0 +1,16 @@ +#include "arch/memory/cpu/tlb.hpp" + +#include "arch/memory/cpu/control_register.hpp" + +namespace teachos::arch::memory::cpu +{ + auto tlb_flush(paging::virtual_address address) -> void + { + asm volatile("invlpg (%[input])" : /* no output from call */ : [input] "r"(address) : "memory"); + } + + auto tlb_flush_all() -> void + { + write_control_register(cpu::control_register::CR3, read_control_register(cpu::control_register::CR3)); + } +} // namespace teachos::arch::memory::cpu diff --git a/arch/x86_64/src/memory/heap/bump_allocator.cpp b/arch/x86_64/src/memory/heap/bump_allocator.cpp new file mode 100644 index 0000000..bbf2021 --- /dev/null +++ b/arch/x86_64/src/memory/heap/bump_allocator.cpp @@ -0,0 +1,52 @@ +#include "arch/memory/heap/bump_allocator.hpp" + +#include "arch/exception_handling/assert.hpp" + +#include <limits> +#include <type_traits> + +namespace teachos::arch::memory::heap +{ + namespace + { + template<typename T> + auto saturating_add(T x, T y) -> T + requires std::is_unsigned_v<T> + { + if (x > std::numeric_limits<T>::max() - y) + { + return std::numeric_limits<T>::max(); + } + T result = x + y; + return result; + } + } // namespace + + auto bump_allocator::allocate(std::size_t size) -> void * + { + // Repeat allocation until it succeeds, has to be done, because another allocator could overtake it at any time + // causing the value to differ and the calculation to have to be redone. + for (;;) + { + auto alloc_start = next.load(std::memory_order::relaxed); + auto const alloc_end = saturating_add(alloc_start, size); + arch::exception_handling::assert(alloc_end <= heap_end, "[Heap Allocator] Out of memory"); + // Check if the atomic value is still the one initally loaded, if it isn't we have been overtaken by another + // thread and need to redo the calculation. Spurious failure by weak can be ignored, because the whole allocation + // is wrapped in an infinite for loop so a failure that wasn't actually one will simply be retried until it works. + auto const updated = next.compare_exchange_weak(alloc_start, alloc_end, std::memory_order::relaxed); + if (updated) + { + return reinterpret_cast<void *>(alloc_start); + } + } + } + + auto bump_allocator::deallocate(void * pointer, std::size_t size) -> void + { + if (pointer || size) + { + } + } + +} // namespace teachos::arch::memory::heap diff --git a/arch/x86_64/src/memory/heap/linked_list_allocator.cpp b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp new file mode 100644 index 0000000..e5bae21 --- /dev/null +++ b/arch/x86_64/src/memory/heap/linked_list_allocator.cpp @@ -0,0 +1,168 @@ +#include "arch/memory/heap/linked_list_allocator.hpp" + +#include "arch/exception_handling/assert.hpp" +#include "arch/exception_handling/panic.hpp" + +namespace teachos::arch::memory::heap +{ + linked_list_allocator::linked_list_allocator(std::size_t heap_start, std::size_t heap_end) + : heap_start(heap_start) + , heap_end(heap_end) + , first(nullptr) + , mutex{shared::mutex{}} + { + auto const heap_size = heap_end - heap_start; + exception_handling::assert( + heap_size > min_allocatable_size(), + "[Linked List Allocator] Total heap size can not be smaller than minimum of 16 bytes to hold " + "atleast one memory hole entry"); + first = new (reinterpret_cast<void *>(heap_start)) memory_block(heap_size, nullptr); + } + + auto linked_list_allocator::allocate(std::size_t size) -> void * + { + exception_handling::assert(size > min_allocatable_size(), + "[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes"); + mutex.lock(); + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) + { + if (current->size == size) + { + auto const memory_address = remove_free_memory_block(previous, current); + mutex.unlock(); + return memory_address; + } + else if (current->size >= size + min_allocatable_size()) + { + auto const memory_address = split_free_memory_block(previous, current, size); + mutex.unlock(); + return memory_address; + } + + previous = current; + current = current->next; + } + + exception_handling::panic("[Linked List Allocator] Out of memory"); + } + + auto linked_list_allocator::deallocate(void * pointer, std::size_t size) -> void + { + exception_handling::assert(size > min_allocatable_size(), + "[Linked List Allocator] Allocated memory cannot be smaller than 16 bytes"); + mutex.lock(); + + auto const start_address = reinterpret_cast<std::size_t>(pointer); + auto const end_address = start_address + size; + + memory_block * previous = nullptr; + auto current = first; + + while (current != nullptr) + { + // Current address of the free memory block now points to an address that is after our block to deallocate in heap + // memory space. + if (reinterpret_cast<std::size_t>(current) >= end_address) + { + break; + } + + previous = current; + current = current->next; + } + + coalesce_free_memory_block(previous, current, pointer, size); + mutex.unlock(); + } + + auto linked_list_allocator::remove_free_memory_block(memory_block * previous_block, + memory_block * current_block) -> void * + { + return replace_free_memory_block(previous_block, current_block, current_block->next); + } + + auto linked_list_allocator::split_free_memory_block(memory_block * previous_block, memory_block * current_block, + std::size_t size) -> void * + { + auto const end_address = reinterpret_cast<std::size_t>(current_block) + size; + auto const new_block = + new (reinterpret_cast<void *>(end_address)) memory_block(current_block->size - size, current_block->next); + return replace_free_memory_block(previous_block, current_block, new_block); + } + + auto linked_list_allocator::replace_free_memory_block(memory_block * previous_block, memory_block * current_block, + memory_block * new_block) -> void * + { + auto const start_address = reinterpret_cast<std::size_t>(current_block); + // If we want to allocate into the first block that is before any other free block, then there exists no previous + // free block (nullptr). Therefore we have to overwrite t |
