aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelix Morgner <felix.morgner@ost.ch>2026-03-16 08:34:13 +0100
committerFelix Morgner <felix.morgner@ost.ch>2026-03-16 08:34:13 +0100
commit64bf7fcf58ced023be1701ed4508e38f746d40b8 (patch)
tree023637c060d169e5a72576f62c9bd616b8b5b937
parent1e23bfc850f0ca126bff3c56c86419ab1570c96e (diff)
downloadteachos-64bf7fcf58ced023be1701ed4508e38f746d40b8.tar.xz
teachos-64bf7fcf58ced023be1701ed4508e38f746d40b8.zip
kernel/memory: implement basic free-list heap
-rw-r--r--arch/x86_64/include/arch/memory/page_table.hpp10
-rw-r--r--arch/x86_64/include/arch/memory/recursive_page_mapper.hpp5
-rw-r--r--arch/x86_64/kapi/memory.cpp10
-rw-r--r--arch/x86_64/src/memory/recursive_page_mapper.cpp18
-rw-r--r--kapi/include/kapi/memory.hpp3
-rw-r--r--kapi/include/kapi/memory/address.hpp19
-rw-r--r--kapi/include/kapi/memory/page.hpp8
-rw-r--r--kapi/include/kapi/memory/page_mapper.hpp3
-rw-r--r--kernel/CMakeLists.txt3
-rw-r--r--kernel/include/kernel/memory.hpp19
-rw-r--r--kernel/include/kernel/memory/free_list_allocator.hpp80
-rw-r--r--kernel/include/kernel/memory/heap_allocator.hpp32
-rw-r--r--kernel/kapi/memory.cpp9
-rw-r--r--kernel/src/main.cpp3
-rw-r--r--kernel/src/memory.cpp55
-rw-r--r--kernel/src/memory/free_list_allocator.cpp187
-rw-r--r--kernel/src/memory/operators.cpp66
-rw-r--r--libs/kstd/include/kstd/mutex3
18 files changed, 498 insertions, 35 deletions
diff --git a/arch/x86_64/include/arch/memory/page_table.hpp b/arch/x86_64/include/arch/memory/page_table.hpp
index a82d9e0..003dbf3 100644
--- a/arch/x86_64/include/arch/memory/page_table.hpp
+++ b/arch/x86_64/include/arch/memory/page_table.hpp
@@ -304,6 +304,11 @@ namespace arch::memory
result |= mapper_flags::supervisor_only;
}
+ if ((flags & table_flags::global) != table_flags::empty)
+ {
+ result |= mapper_flags::global;
+ }
+
return result;
}
@@ -334,6 +339,11 @@ namespace arch::memory
result |= table_flags::user_accessible;
}
+ if ((flags & mapper_flags::global) != mapper_flags::empty)
+ {
+ result |= table_flags::global;
+ }
+
return result;
}
diff --git a/arch/x86_64/include/arch/memory/recursive_page_mapper.hpp b/arch/x86_64/include/arch/memory/recursive_page_mapper.hpp
index e278a6d..a0c290a 100644
--- a/arch/x86_64/include/arch/memory/recursive_page_mapper.hpp
+++ b/arch/x86_64/include/arch/memory/recursive_page_mapper.hpp
@@ -10,14 +10,11 @@ namespace arch::memory
struct recursive_page_mapper : kapi::memory::page_mapper
{
- explicit recursive_page_mapper(kapi::memory::frame_allocator & allocator);
+ explicit recursive_page_mapper();
auto map(kapi::memory::page page, kapi::memory::frame frame, flags flags) -> std::byte * override;
auto unmap(kapi::memory::page page) -> void override;
auto try_unmap(kapi::memory::page page) noexcept -> bool override;
-
- private:
- kapi::memory::frame_allocator * m_allocator;
};
} // namespace arch::memory
diff --git a/arch/x86_64/kapi/memory.cpp b/arch/x86_64/kapi/memory.cpp
index 48aab9f..e704408 100644
--- a/arch/x86_64/kapi/memory.cpp
+++ b/arch/x86_64/kapi/memory.cpp
@@ -189,7 +189,7 @@ namespace kapi::memory
auto mbi_size = boot::bootstrap_information.mbi->size_bytes();
auto mbi_address = physical_address{mbi_base & ~std::bit_cast<std::uintptr_t>(&arch::boot::TEACHOS_VMA)};
auto mbi_start = frame::containing(mbi_address);
- auto mbi_end = frame::containing(physical_address{mbi_address.raw() + mbi_size}) + 1;
+ auto mbi_end = frame::containing(mbi_address + mbi_size) + 1;
std::ranges::for_each(std::views::iota(mbi_start, mbi_end), [&](auto frame) { new_allocator.mark_used(frame); });
}
@@ -211,7 +211,10 @@ namespace kapi::memory
region_based_allocator.emplace(collect_memory_information());
allocation_buffer.emplace(&*region_based_allocator);
- recursive_page_mapper.emplace(*allocation_buffer);
+ set_frame_allocator(*allocation_buffer);
+
+ recursive_page_mapper.emplace();
+ set_page_mapper(*recursive_page_mapper);
kstd::println("[x86_64:MEM] Preparing new paging hierarchy.");
@@ -227,9 +230,6 @@ namespace kapi::memory
cr3.frame(new_pml4_frame);
arch::cpu::cr3::write(cr3);
- set_frame_allocator(*allocation_buffer);
- set_page_mapper(*recursive_page_mapper);
-
auto memory_map = boot::bootstrap_information.mbi->memory_map();
auto highest_byte = std::ranges::max(std::views::transform(
std::views::filter(memory_map.regions(),
diff --git a/arch/x86_64/src/memory/recursive_page_mapper.cpp b/arch/x86_64/src/memory/recursive_page_mapper.cpp
index d8273e1..c7c5341 100644
--- a/arch/x86_64/src/memory/recursive_page_mapper.cpp
+++ b/arch/x86_64/src/memory/recursive_page_mapper.cpp
@@ -56,18 +56,17 @@ namespace arch::memory
} // namespace
- recursive_page_mapper::recursive_page_mapper(kapi::memory::frame_allocator & allocator)
- : m_allocator{&allocator}
- {}
+ recursive_page_mapper::recursive_page_mapper() {}
auto recursive_page_mapper::map(kapi::memory::page page, kapi::memory::frame frame, flags flags) -> std::byte *
{
auto pml4 = static_cast<recursive_page_table<4> *>((paging_root::get()));
+ auto & allocator = kapi::memory::get_frame_allocator();
return std::optional{pml4}
- .and_then([&](auto pml) -> auto { return do_map(pml, page, *m_allocator, flags); })
- .and_then([&](auto pml) -> auto { return do_map(pml, page, *m_allocator, flags); })
- .and_then([&](auto pml) -> auto { return do_map(pml, page, *m_allocator, flags); })
+ .and_then([&](auto pml) -> auto { return do_map(pml, page, allocator, flags); })
+ .and_then([&](auto pml) -> auto { return do_map(pml, page, allocator, flags); })
+ .and_then([&](auto pml) -> auto { return do_map(pml, page, allocator, flags); })
.and_then([&](auto pml) -> auto { return do_map(pml, page, frame, flags); })
.value_or(nullptr);
}
@@ -91,27 +90,28 @@ namespace arch::memory
auto pml3 = pml4->next(pml_index<4>(page)).value();
auto pml2 = pml3->next(pml_index<3>(page)).value();
auto pml1 = pml2->next(pml_index<2>(page)).value();
+ auto & allocator = kapi::memory::get_frame_allocator();
(*pml1)[pml_index<1>(page)].clear();
if (pml1->empty())
{
auto pml1_frame = (*pml2)[pml_index<2>(page)].frame().value();
- m_allocator->release(pml1_frame);
+ allocator.release(pml1_frame);
(*pml2)[pml_index<2>(page)].clear();
}
if (pml2->empty())
{
auto pml2_frame = (*pml3)[pml_index<3>(page)].frame().value();
- m_allocator->release(pml2_frame);
+ allocator.release(pml2_frame);
(*pml3)[pml_index<3>(page)].clear();
}
if (pml3->empty())
{
auto pml3_frame = (*pml4)[pml_index<4>(page)].frame().value();
- m_allocator->release(pml3_frame);
+ allocator.release(pml3_frame);
(*pml4)[pml_index<4>(page)].clear();
}
diff --git a/kapi/include/kapi/memory.hpp b/kapi/include/kapi/memory.hpp
index b234cde..9ef2cd7 100644
--- a/kapi/include/kapi/memory.hpp
+++ b/kapi/include/kapi/memory.hpp
@@ -78,8 +78,9 @@ namespace kapi::memory
//!
//! @param page The page to map.
//! @param frame The frame to map the page into.
+ //! @param flags The flags to apply to this mapping.
//! @return A pointer to the first byte of the mapped page.
- auto map(page page, frame frame) -> std::byte *;
+ auto map(page page, frame frame, page_mapper::flags flags = page_mapper::flags::empty) -> std::byte *;
//! @qualifier kernel-defined
//! Unmap a page.
diff --git a/kapi/include/kapi/memory/address.hpp b/kapi/include/kapi/memory/address.hpp
index 39eb1ee..e5cf50b 100644
--- a/kapi/include/kapi/memory/address.hpp
+++ b/kapi/include/kapi/memory/address.hpp
@@ -55,6 +55,25 @@ namespace kapi::memory
return std::bit_cast<std::byte *>(m_value);
}
+ //! Create a new address n beyond this one.
+ //!
+ //! @param n The amount to increase the address by.
+ //! @return A new address, n further than this one.
+ constexpr auto operator+(std::uintptr_t n) const noexcept -> address
+ {
+ return address{m_value + n};
+ }
+
+ //! Increase this address by a given amount.
+ //!
+ //! @param n The amount to increase the address by.
+ //! @return A reference to this address.
+ constexpr auto operator+=(std::uintptr_t n) noexcept -> address &
+ {
+ m_value += n;
+ return *this;
+ }
+
//! Shift this address n bits to the right.
//!
//! @param n The width of the shift.
diff --git a/kapi/include/kapi/memory/page.hpp b/kapi/include/kapi/memory/page.hpp
index 3846bd8..aa161ee 100644
--- a/kapi/include/kapi/memory/page.hpp
+++ b/kapi/include/kapi/memory/page.hpp
@@ -24,14 +24,6 @@ namespace kapi::memory
: chunk{number}
{}
- //! @copydoc chunk::containing
- //!
- //! @note This factory shadows the base factory to aid in type deduction.
- constexpr auto static containing(linear_address address) noexcept -> page
- {
- return page{chunk::containing(address)};
- }
-
//! Convert a base chunk into a page.
//!
//! This constructor allows for conversion from chunk<linear_address, PLATFORM_PAGE_SIZE> to a page for convenience.
diff --git a/kapi/include/kapi/memory/page_mapper.hpp b/kapi/include/kapi/memory/page_mapper.hpp
index 07eabcf..c6052e9 100644
--- a/kapi/include/kapi/memory/page_mapper.hpp
+++ b/kapi/include/kapi/memory/page_mapper.hpp
@@ -8,6 +8,8 @@
#include <kstd/ext/bitfield_enum>
+#include <cstddef>
+#include <cstdint>
#include <type_traits>
namespace kapi::memory
@@ -30,6 +32,7 @@ namespace kapi::memory
executable = 1 << 1, //! The page contains executable instructions.
uncached = 1 << 2, //! The page contents must not be cached.
supervisor_only = 1 << 3, //! The page is only accessible in supervisor mode.
+ global = 1 << 4, //! The page translation persists across context switches.
};
virtual ~page_mapper() = default;
diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt
index ae2325a..566ab9e 100644
--- a/kernel/CMakeLists.txt
+++ b/kernel/CMakeLists.txt
@@ -11,6 +11,9 @@ add_executable("kernel"
# Kernel Implementation
"src/main.cpp"
"src/memory/bitmap_allocator.cpp"
+ "src/memory/free_list_allocator.cpp"
+ "src/memory/operators.cpp"
+ "src/memory.cpp"
)
target_include_directories("kernel" PRIVATE
diff --git a/kernel/include/kernel/memory.hpp b/kernel/include/kernel/memory.hpp
new file mode 100644
index 0000000..f09c519
--- /dev/null
+++ b/kernel/include/kernel/memory.hpp
@@ -0,0 +1,19 @@
+#ifndef TEACHOS_KERNEL_MEMORY_HPP
+#define TEACHOS_KERNEL_MEMORY_HPP
+
+#include "kapi/memory.hpp"
+
+#include "kernel/memory/heap_allocator.hpp" // IWYU pragma: export
+
+namespace kernel::memory
+{
+
+ //! Get a handle to the global heap allocator.
+ auto get_heap_allocator() -> heap_allocator &;
+
+ //! Initialize the kernel heap.
+ auto init_heap(kapi::memory::linear_address base) -> void;
+
+} // namespace kernel::memory
+
+#endif \ No newline at end of file
diff --git a/kernel/include/kernel/memory/free_list_allocator.hpp b/kernel/include/kernel/memory/free_list_allocator.hpp
new file mode 100644
index 0000000..53a7b61
--- /dev/null
+++ b/kernel/include/kernel/memory/free_list_allocator.hpp
@@ -0,0 +1,80 @@
+#ifndef TEACHOS_KERNEL_MEMORY_FREE_LIST_ALLOCATOR_HPP
+#define TEACHOS_KERNEL_MEMORY_FREE_LIST_ALLOCATOR_HPP
+
+#include "kapi/memory.hpp"
+
+#include "kernel/memory/heap_allocator.hpp"
+
+#include <kstd/mutex>
+
+#include <cstddef>
+#include <new>
+
+namespace kernel::memory
+{
+
+ //! A simple, dynamically sized heap allocator.
+ //!
+ //! This allocator is designed to perform first-fit allocation using an intrusive doubly-linked list. The heap is
+ //! expanded as needed.
+ struct free_list_allocator final : heap_allocator
+ {
+ //! Construct a new free list allocator with the given base address.
+ explicit free_list_allocator(kapi::memory::linear_address base) noexcept;
+
+ free_list_allocator(free_list_allocator const &) = delete;
+ free_list_allocator(free_list_allocator &&) = delete;
+ auto operator=(free_list_allocator const &) -> free_list_allocator & = delete;
+ auto operator=(free_list_allocator &&) -> free_list_allocator & = delete;
+
+ //! Allocate a block of memory with the given alignment.
+ //!
+ //! @param size The size of the block to allocate
+ //! @param alignment The desired alignment of the allocated block
+ //! @return A pointer to the beginning of the block on success, @p nullptr otherwise.
+ [[nodiscard]] auto allocate(std::size_t size, std::align_val_t alignment) noexcept -> void * override;
+
+ //! Deallocate a block of memory previously allocated.
+ //!
+ //! @param block The pointer to the block to free.
+ auto deallocate(void * block) noexcept -> void override;
+
+ private:
+ struct block_header final
+ {
+ std::size_t size;
+ bool free;
+ block_header * next;
+ block_header * prev;
+ };
+
+ //! Try to expand the heap to accommodate the given size.
+ //!
+ //! @param delta The size to expand the heap by.
+ //! @return @p true if the heap was expanded, @p false otherwise.
+ auto expand(std::size_t delta) noexcept -> bool;
+
+ //! Split a given free block to accommodate and allocation.
+ //!
+ //! The newly split free block is inserted into the free list.
+ //!
+ //! @param block The block to split.
+ //! @param size The size of the allocation.
+ //! @param padding The amount of padding to apply.
+ auto split(block_header * block, std::size_t size, std::size_t padding) noexcept -> void;
+
+ //! Try to coalesce a given block with it's preceding and/or following block.
+ //!
+ //! @param block The block to coalesce.
+ auto coalesce(block_header * block) noexcept -> void;
+
+ kapi::memory::linear_address m_base;
+ kapi::memory::linear_address m_frontier;
+ block_header * m_free_list;
+
+ kstd::mutex m_lock;
+ };
+
+} // namespace kernel::memory
+
+#endif \ No newline at end of file
diff --git a/kernel/include/kernel/memory/heap_allocator.hpp b/kernel/include/kernel/memory/heap_allocator.hpp
new file mode 100644
index 0000000..bc771e3
--- /dev/null
+++ b/kernel/include/kernel/memory/heap_allocator.hpp
@@ -0,0 +1,32 @@
+#ifndef TEACHOS_KERNEL_MEMORY_HEAP_ALLOCATOR_HPP
+#define TEACHOS_KERNEL_MEMORY_HEAP_ALLOCATOR_HPP
+
+#include <kstd/mutex>
+
+#include <cstddef>
+#include <new>
+
+namespace kernel::memory
+{
+
+ //! The interface for all kernel heap allocators.
+ struct heap_allocator
+ {
+ virtual ~heap_allocator() = default;
+
+ //! Allocate a block of memory with the given alignment.
+ //!
+ //! @param size The size of the block to allocate
+ //! @param alignment The desired alignment of the allocated block
+ //! @return A pointer to the beginning of the block on success, @p nullptr otherwise.
+ [[nodiscard]] virtual auto allocate(std::size_t size, std::align_val_t alignment) noexcept -> void * = 0;
+
+ //! Deallocate a block of memory previously allocated.
+ //!
+ //! @param block The pointer to the block to free.
+ virtual auto deallocate(void * block) noexcept -> void = 0;
+ };
+
+} // namespace kernel::memory
+
+#endif \ No newline at end of file
diff --git a/kernel/kapi/memory.cpp b/kernel/kapi/memory.cpp
index d657527..767bcd7 100644
--- a/kernel/kapi/memory.cpp
+++ b/kernel/kapi/memory.cpp
@@ -101,9 +101,9 @@ namespace kapi::memory
return get_frame_allocator().allocate_many(count);
}
- auto map(page page, frame frame) -> std::byte *
+ auto map(page page, frame frame, page_mapper::flags flags) -> std::byte *
{
- return active_page_mapper->map(page, frame, page_mapper::flags::empty);
+ return active_page_mapper->map(page, frame, flags);
}
auto unmap(page page) -> void
@@ -122,16 +122,15 @@ namespace kapi::memory
system::panic("[OS:MEM] Not enough memory for bitmap allocator!");
}
- auto const base_address = pmm_metadata_base.raw();
auto const flags = page_mapper::flags::writable | page_mapper::flags::supervisor_only;
std::ranges::for_each(std::views::iota(0uz, bitmap_pages), [&](auto index) {
- auto page = page::containing(linear_address{base_address + index * page::size});
+ auto page = page::containing(pmm_metadata_base + index * page::size);
auto frame = memory::frame(bitmap_frames->first.number() + index);
active_page_mapper->map(page, frame, flags);
});
- auto bitmap_ptr = std::bit_cast<std::uint64_t *>(base_address);
+ auto bitmap_ptr = std::bit_cast<std::uint64_t *>(pmm_metadata_base.raw());
auto bitmap = std::span{bitmap_ptr, (bitmap_bytes + sizeof(std::uint64_t) - 1uz) / sizeof(std::uint64_t)};
allocator.emplace(bitmap, frame_count);
diff --git a/kernel/src/main.cpp b/kernel/src/main.cpp
index d69903d..01bcbb0 100644
--- a/kernel/src/main.cpp
+++ b/kernel/src/main.cpp
@@ -2,6 +2,8 @@
#include "kapi/memory.hpp"
#include "kapi/system.hpp"
+#include "kernel/memory.hpp"
+
#include <kstd/print>
auto main() -> int
@@ -10,6 +12,7 @@ auto main() -> int
kstd::println("[OS] IO subsystem initialized.");
kapi::memory::init();
+ kernel::memory::init_heap(kapi::memory::heap_base);
kstd::println("[OS] Memory subsystem initialized.");
kapi::system::memory_initialized();
diff --git a/kernel/src/memory.cpp b/kernel/src/memory.cpp
new file mode 100644
index 0000000..14bedf1
--- /dev/null
+++ b/kernel/src/memory.cpp
@@ -0,0 +1,55 @@
+#include "kernel/memory.hpp"
+
+#include "kapi/memory.hpp"
+#include "kapi/system.hpp"
+
+#include "kernel/memory/free_list_allocator.hpp"
+#include "kernel/memory/heap_allocator.hpp"
+
+#include <atomic>
+#include <cstddef>
+#include <new>
+#include <optional>
+
+namespace kernel::memory
+{
+
+ namespace
+ {
+ struct null_allocator : heap_allocator
+ {
+ null_allocator static instance;
+
+ [[nodiscard]] auto allocate(std::size_t, std::align_val_t) noexcept -> void * override
+ {
+ return nullptr;
+ }
+
+ auto deallocate(void *) noexcept -> void override {}
+ };
+
+ constinit null_allocator null_allocator::instance{};
+
+ auto constinit active_heap_allocator = std::optional<heap_allocator *>{&null_allocator::instance};
+ auto constinit basic_allocator = std::optional<free_list_allocator>{};
+ } // namespace
+
+ auto get_heap_allocator() -> heap_allocator &
+ {
+ return *active_heap_allocator.value();
+ }
+
+ auto init_heap(kapi::memory::linear_address base) -> void
+ {
+ auto static constinit is_initialized = std::atomic_flag{};
+
+ if (is_initialized.test_and_set())
+ {
+ kapi::system::panic("[OS] The heap has already been initialized.");
+ }
+
+ auto & instance = basic_allocator.emplace(base);
+ active_heap_allocator = &instance;
+ }
+
+} // namespace kernel::memory \ No newline at end of file
diff --git a/kernel/src/memory/free_list_allocator.cpp b/kernel/src/memory/free_list_allocator.cpp
new file mode 100644
index 0000000..255e643
--- /dev/null
+++ b/kernel/src/memory/free_list_allocator.cpp
@@ -0,0 +1,187 @@
+#include "kernel/memory/free_list_allocator.hpp"
+
+#include "kapi/memory.hpp"
+#include "kapi/system.hpp"
+
+#include "kernel/memory/heap_allocator.hpp"
+
+#include <kstd/mutex>
+
+#include <bit>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <new>
+
+namespace kernel::memory
+{
+
+ namespace
+ {
+ [[nodiscard]] constexpr auto align_up(std::uintptr_t value, std::size_t alignment) noexcept -> std::uintptr_t
+ {
+ auto const remainder = value % alignment;
+ return remainder == 0 ? value : value + alignment - remainder;
+ }
+ } // namespace
+
+ free_list_allocator::free_list_allocator(kapi::memory::linear_address base) noexcept
+ : heap_allocator{}
+ , m_base{base}
+ , m_frontier{base}
+ , m_free_list{}
+ , m_lock{}
+ {}
+
+ auto free_list_allocator::allocate(std::size_t size, std::align_val_t alignment) noexcept -> void *
+ {
+ kstd::lock_guard guard{m_lock};
+
+ auto const alignment_value = static_cast<std::size_t>(alignment);
+ auto const search_size = size + alignment_value + sizeof(block_header *);
+
+ for (auto attempt = 0uz; attempt < 2uz; ++attempt)
+ {
+ auto current = m_free_list;
+ while (current != nullptr)
+ {
+ if (current->free && current->size >= search_size)
+ {
+ auto const payload_start = std::bit_cast<std::uintptr_t>(current) + sizeof(block_header);
+ auto const aligned_pointer = align_up(payload_start + sizeof(block_header *), alignment_value);
+ auto const padding = aligned_pointer - payload_start;
+
+ split(current, size, padding);
+ current->free = false;
+
+ auto back_pointer = std::bit_cast<block_header **>(aligned_pointer - sizeof(block_header *));
+ *back_pointer = current;
+
+ return std::bit_cast<void *>(aligned_pointer);
+ }
+ current = current->next;
+ }
+
+ if (attempt == 0uz && !expand(search_size))
+ {
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+ }
+
+ auto free_list_allocator::deallocate(void * ptr) noexcept -> void
+ {
+ if (!ptr)
+ {
+ return;
+ }
+
+ kstd::lock_guard guard{m_lock};
+
+ auto const aligned_pointer = std::bit_cast<std::uintptr_t>(ptr);
+ auto back_pointer = std::bit_cast<block_header **>(aligned_pointer - sizeof(block_header *));
+ auto block = *back_pointer;
+
+ block->free = true;
+ coalesce(block);
+ }
+
+ auto free_list_allocator::expand(std::size_t size) noexcept -> bool
+ {
+ auto const total_required_size = size + sizeof(block_header);
+ auto const frames_needed = (total_required_size + kapi::memory::frame::size - 1) / kapi::memory::frame::size;
+
+ auto const flags = kapi::memory::page_mapper::flags::writable | kapi::memory::page_mapper::flags::supervisor_only;
+
+ for (auto i = 0uz; i < frames_needed; ++i)
+ {
+ auto frame = kapi::memory::allocate_frame();
+ if (!frame)
+ {
+ kapi::system::panic("[OS:Heap] OOM when expanding heap.");
+ return false;
+ }
+
+ auto page = kapi::memory::page::containing(m_frontier + i * kapi::memory::page::size);
+ kapi::memory::map(page, *frame, flags);
+ }
+
+ auto block = std::bit_cast<block_header *>(m_frontier.raw());
+ // std::construct_at(block, frames_needed * kapi::memory::frame::size - sizeof(block_header), true, nullptr,
+ // nullptr);
+ block->size = frames_needed * kapi::memory::frame::size - sizeof(block_header);
+ block->free = true;
+ block->next = nullptr;
+ block->prev = nullptr;
+
+ m_frontier += frames_needed * kapi::memory::frame::size;
+
+ if (!m_free_list)
+ {
+ m_free_list = block;
+ }
+ else
+ {
+ auto current = m_free_list;
+ while (current->next)
+ {
+ current = current->next;
+ }
+ current->next = block;
+ block->prev = current;
+ }
+
+ coalesce(block);
+ return true;
+ }
+
+ auto free_list_allocator::coalesce(block_header * block) noexcept -> void
+ {
+ if (block->next && block->next->free)
+ {
+ auto follower = block->next;
+ block->size += follower->size + sizeof(block_header);
+ block->next = follower->next;
+ if (block->next)
+ {
+ block->next->prev = block;
+ }
+ std::destroy_at(follower);
+ }
+
+ if (block->prev && block->prev->free)
+ {
+ auto leader = block->prev;
+ leader->size += block->size + sizeof(block_header);
+ leader->next = block->next;
+ if (block->next)
+ {
+ block->next->prev = leader;
+ }
+ std::destroy_at(block);
+ }
+ }
+
+ auto free_list_allocator::split(block_header * block, std::size_t size, std::size_t padding) noexcept -> void
+ {
+ auto const allocation_size = size + padding;
+
+ if (block->size > allocation_size + sizeof(block_header) + 16uz)
+ {
+ auto new_block =
+ std::bit_cast<block_header *>(std::bit_cast<std::uintptr_t>(block) + sizeof(block_header) + allocation_size);
+ std::construct_at(new_block, block->size - allocation_size - sizeof(block_header), true, block->next, block);
+
+ if (new_block->next)
+ {
+ new_block->next->prev = new_block;
+ }
+
+ block->size = allocation_size;
+ block->next = new_block;
+ }
+ }
+
+} // namespace kernel::memory \ No newline at end of file
diff --git a/kernel/src/memory/operators.cpp b/kernel/src/memory/operators.cpp
new file mode 100644
index 0000000..1422133
--- /dev/null
+++ b/kernel/src/memory/operators.cpp
@@ -0,0 +1,66 @@
+#include "kapi/system.hpp"
+
+#include "kernel/memory.hpp"
+
+#include <cstddef>
+#include <new>
+
+[[nodiscard]] auto operator new(std::size_t size) -> void *
+{
+ auto & allocator = kernel::memory::get_heap_allocator();
+ auto pointer = allocator.allocate(size, std::align_val_t{__STDCPP_DEFAULT_NEW_ALIGNMENT__});
+
+ if (pointer == nullptr)
+ {
+ kapi::system::panic("[OS:Heap] Out of memory!");
+ }
+
+ return pointer;
+}
+
+[[nodiscard]] auto operator new[](std::size_t size) -> void *
+{
+ return ::operator new(size);
+}
+
+[[nodiscard]] auto operator new(std::size_t size, std::align_val_t alignment) -> void *
+{
+ auto & allocator = kernel::memory::get_heap_allocator();
+ auto pointer = allocator.allocate(size, alignment);
+
+ if (pointer == nullptr)
+ {
+ kapi::system::panic("[OS:Heap] Out of memory!");
+ }
+
+ return pointer;
+}
+
+auto operator delete(void * pointer) noexcept -> void
+{
+ if (pointer != nullptr)
+ {
+ auto & allocator = kernel::memory::get_heap_allocator();
+ allocator.deallocate(pointer);
+ }
+}
+
+auto operator delete(void * pointer, [[maybe_unused]] std::size_t size) noexcept -> void
+{
+ ::operator delete(pointer);
+}
+
+auto operator delete[](void * pointer) noexcept -> void
+{
+ ::operator delete(pointer);
+}
+
+auto operator delete[](void * pointer, [[maybe_unused]] std::size_t size) noexcept -> void
+{
+ ::operator delete(pointer);
+}
+
+auto operator delete(void * pointer, [[maybe_unused]] std::align_val_t alignment) noexcept -> void
+{
+ ::operator delete(pointer);
+} \ No newline at end of file
diff --git a/libs/kstd/include/kstd/mutex b/libs/kstd/include/kstd/mutex
index 7795558..b2a31aa 100644
--- a/libs/kstd/include/kstd/mutex
+++ b/libs/kstd/include/kstd/mutex
@@ -66,7 +66,6 @@ namespace kstd
//!
//! @note This function will block until the mutex was successfully locked.
//! @param mutex The mutex to lock.
- [[gnu::section(".stl_text")]]
lock_guard(MutexType & mutex) noexcept
: m_mutex(mutex)
{
@@ -77,13 +76,11 @@ namespace kstd
//!
//! @note The behavior is undefined if the mutex is not owned by the current thread.
//! @param mutex The mutex to take ownership of.
- [[gnu::section(".stl_text")]]
lock_guard(MutexType & mutex, adopt_lock_t) noexcept
: m_mutex(mutex)
{}
//! Destroy this lock_guard and release the owned mutex.
- [[gnu::section(".stl_text")]]
~lock_guard()
{
m_mutex.unlock();