From 641f20fd782deb6d9e1e1b9996005ad893028744 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matteo=20Gm=C3=BCr?= Date: Sun, 24 Nov 2024 09:22:23 +0000 Subject: Adjust notes on actual unmap implementation --- arch/x86_64/include/arch/memory/paging/active_page_table.hpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86_64/include') diff --git a/arch/x86_64/include/arch/memory/paging/active_page_table.hpp b/arch/x86_64/include/arch/memory/paging/active_page_table.hpp index 88c1c82..4687209 100644 --- a/arch/x86_64/include/arch/memory/paging/active_page_table.hpp +++ b/arch/x86_64/include/arch/memory/paging/active_page_table.hpp @@ -121,8 +121,12 @@ namespace teachos::arch::memory::paging /** * @brief Unmaps the virtual page from the previously mapped to physical frame and resets the flags. * - * @note Deallocates and unmaps the entry in every page level if this page was the last one up to level 4 and - * ensures to clear the Translation Lookaside Buffer, so that the unmapped value is removed from cache as well. + * @note For the unmap function to deallocates and unmaps correctly, the entry in every page level if this page was + * the last one up to level 4 should be unmapped and ensured to clear the Translation Lookaside Buffer, so that the + * unmapped value is removed from cache as well. This is currently not done and instead we only dallocate and unmap + * the level 1 page table entry, this is the case because it conflicts with our recursive mapping for the temporary + * page, which requires the other page table entries to walk to the actual level 4 page table. If we remove all page + * table entries beforehand, we therefore can not remap the kernel anymore. * * @tparam T Type constraint of the allocator, being that is follows the given concept and contains an allocate and * deallocate method. -- cgit v1.2.3