aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/include')
-rw-r--r--arch/x86_64/include/arch/memory/multiboot/reader.hpp17
-rw-r--r--arch/x86_64/include/arch/memory/paging/active_page_table.hpp4
-rw-r--r--arch/x86_64/include/arch/memory/paging/inactive_page_table.hpp14
-rw-r--r--arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp63
4 files changed, 73 insertions, 25 deletions
diff --git a/arch/x86_64/include/arch/memory/multiboot/reader.hpp b/arch/x86_64/include/arch/memory/multiboot/reader.hpp
index a5c4872..9707757 100644
--- a/arch/x86_64/include/arch/memory/multiboot/reader.hpp
+++ b/arch/x86_64/include/arch/memory/multiboot/reader.hpp
@@ -14,17 +14,12 @@ namespace teachos::arch::memory::multiboot
*/
struct memory_information
{
- std::size_t kernel_start; ///< Start address of the kernel code in memory.
- std::size_t kernel_end; ///< End address of the kernel code in memory.
- elf_section_header_container::iterator
- begin_kernel; ///< Iterator containing non-owning pointer to the first element of all kernel sections.
- elf_section_header_container::iterator
- end_kernel; ///< Iterator pointing to one past the last element of all kernel sections.
- std::size_t multiboot_start; ///< Start address of the multiboot code in memory.
- std::size_t multiboot_end; ///< End address of the multiboot code in memory.
- memory_area_container::iterator
- begin_area; ///< Iterator containing non-owning pointer to the first element of all memory areas.
- memory_area_container::iterator end_area; ///< Iterator pointing to one past the last element of all memory areas.
+ std::size_t kernel_start; ///< Start address of the kernel code in memory.
+ std::size_t kernel_end; ///< End address of the kernel code in memory.
+ elf_section_header_container sections; ///< Contains non-owning pointers to all kernel sections.
+ std::size_t multiboot_start; ///< Start address of the multiboot code in memory.
+ std::size_t multiboot_end; ///< End address of the multiboot code in memory.
+ memory_area_container areas; ///< Contains non-owning pointers to all memory areas.
};
/**
diff --git a/arch/x86_64/include/arch/memory/paging/active_page_table.hpp b/arch/x86_64/include/arch/memory/paging/active_page_table.hpp
index c183aff..71f70b5 100644
--- a/arch/x86_64/include/arch/memory/paging/active_page_table.hpp
+++ b/arch/x86_64/include/arch/memory/paging/active_page_table.hpp
@@ -12,8 +12,8 @@
namespace teachos::arch::memory::paging
{
/**
- * @brief Currently active level 4 page table, is used to ensure there is only ever one valid instance and it cannot
- * be copied or constructed again.
+ * @brief Currently actively by the CPU used level 4 page table, is used to ensure there is only ever one valid
+ * instance and it cannot be copied or constructed again.
*/
struct active_page_table
{
diff --git a/arch/x86_64/include/arch/memory/paging/inactive_page_table.hpp b/arch/x86_64/include/arch/memory/paging/inactive_page_table.hpp
index df3ba00..54a53f4 100644
--- a/arch/x86_64/include/arch/memory/paging/inactive_page_table.hpp
+++ b/arch/x86_64/include/arch/memory/paging/inactive_page_table.hpp
@@ -7,12 +7,24 @@
namespace teachos::arch::memory::paging
{
+ /**
+ * @brief By the CPU used level 4 page table.
+ */
struct inactive_page_table
{
+ /**
+ * @brief Constructor.
+ *
+ * @param frame Frame that should be mapped as the level 4 page table.
+ * @param active_page_table Actual active page table that should be unmapped so we can map a new level 4
+ * page table.
+ * @param temporary_page Temporary page that should be used to map the given frame as the new level 4 page
+ * table.
+ */
inactive_page_table(allocator::physical_frame frame, active_page_table & active_page_table,
temporary_page temporary_page);
- allocator::physical_frame page_table_level_4_frame;
+ allocator::physical_frame page_table_level_4_frame; ///< Temporary level 4 page table
};
} // namespace teachos::arch::memory::paging
diff --git a/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp b/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp
index 0e2411a..5ee4ea8 100644
--- a/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp
+++ b/arch/x86_64/include/arch/memory/paging/kernel_mapper.hpp
@@ -14,9 +14,20 @@ namespace teachos::arch::memory::paging
typedef shared::container<allocator::physical_frame_iterator> frame_container;
+ /**
+ * @brief Kernel mapper that allows to remap the kernel elf sections in C++.
+ *
+ * @tparam T Contract the allocator that should be used to allocate frames for the remapping process has to fulfill.
+ */
template<allocator::FrameAllocator T>
struct kernel_mapper
{
+ /**
+ * @brief Constructor.
+ *
+ * @param allocator Allocator that should be used to allocate frames for the remapping process.
+ * @param mem_info Information about elf kernel sections required for remapping process.
+ */
kernel_mapper(T & allocator, multiboot::memory_information const & mem_info)
: allocator(allocator)
, mem_info(mem_info)
@@ -24,6 +35,14 @@ namespace teachos::arch::memory::paging
// Nothing to do
}
+ /**
+ * @brief Remap the kernel, meaning we map the entire kernel and all of it's elf sections with the correct flags
+ * into memory and then replace the created mapping with the current one.
+ *
+ * @note We have to use a workaround with an
+ * inactive page table, that is not used by the CPU to ensure we are not changign memory that we are using. Because
+ * remapping active kernel memory in the kernel wouldn't work.
+ */
auto remap_kernel() -> void
{
temporary_page temp_page{virtual_page{UNUSED_VIRTUAL_ADDRESS}, allocator};
@@ -32,30 +51,51 @@ namespace teachos::arch::memory::paging
exception_handling::assert(frame.has_value(),
"[Kernel Mapper] Frame could not be allocated and therefore kernel not mapped");
auto const inactive_table = inactive_page_table{frame.value(), active_table, temp_page};
- map_elf_sections(inactive_table, temp_page, active_table);
+ remap_elf_kernel_sections(inactive_table, temp_page, active_table);
}
private:
- auto map_elf_sections(inactive_page_table inactive_page_table, temporary_page temporary_page,
- active_page_table & active_table) -> void
+ /**
+ * @brief Remaps the kernel elf sections. This is done with switching the current level 4 page table recursive
+ * mapping to any unmapped address in memory and then actually mapping the level 4 page table on that address.
+ * Once the remapping process is done we can restore the original recursive mapping with the complete remapped
+ * kernel.
+ *
+ * @note Because we change the entries we also have to ensure we flush the translation lookaside buffer, before we
+ * map the entries.
+ *
+ * @param inactive_table Level 4 page table we temporarily map the kernel into.
+ * @param temporary_page Temporary page that should be used for the mapping process and then
+ * unmapped once finished.
+ * @param active_table Active level 4 page table that has its recursive mapping overwritten temporarily and then
+ * restored once the process is finished.
+ */
+ auto remap_elf_kernel_sections(inactive_page_table inactive_table, temporary_page & temporary_page,
+ active_page_table & active_table) -> void
{
auto const backup = allocator::physical_frame::containing_address(PAGE_TABLE_LEVEL_4_ADDRESS);
auto page_table_level4 = temporary_page.map_table_frame(backup, active_table);
- active_table.active_handle[511].set_entry(inactive_page_table.page_table_level_4_frame,
+ active_table.active_handle[511].set_entry(inactive_table.page_table_level_4_frame,
entry::PRESENT | entry::WRITABLE);
tlb_flush_all();
- map_kernel_sections(active_table);
+ map_elf_kernel_sections(active_table);
page_table_level4[511].set_entry(backup, entry::PRESENT | entry::WRITABLE);
tlb_flush_all();
temporary_page.unmap(active_table);
}
- auto map_kernel_sections(active_page_table & active_table) -> void
+ /**
+ * @brief Maps the required entries according to every elf section and it's contained frames. Additionally each of
+ * thoose frames gets the correct entry flags according to elf section flags.
+ *
+ * @param active_table Active level 4 page table that should be used to map the required elf sections into entries.
+ * Has had its recursive mapping temporarily replaced and points to unmapped place in memory.
+ */
+ auto map_elf_kernel_sections(active_page_table & active_table) -> void
{
- multiboot::elf_section_header_container sections{mem_info.begin_kernel, mem_info.end_kernel};
- for (auto const & section : sections)
+ for (auto const & section : mem_info.sections)
{
if (!section.flags.contains_flags(multiboot::elf_section_flags::OCCUPIES_MEMORY))
{
@@ -71,7 +111,7 @@ namespace teachos::arch::memory::paging
allocator::physical_frame_iterator const end{end_frame};
frame_container frames{begin, end};
- for (auto frame : frames)
+ for (auto const & frame : frames)
{
// TODO: Use actual elf section flags, convert from one to the other flag type.
active_table.identity_map(allocator, frame, entry::WRITABLE);
@@ -79,8 +119,9 @@ namespace teachos::arch::memory::paging
}
}
- T & allocator;
- multiboot::memory_information const & mem_info;
+ T & allocator; ///< Allocator that should be used to allocate frames for the mapping process.
+ multiboot::memory_information const &
+ mem_info; ///< Information about elf kernel sections required for remapping process.
};
} // namespace teachos::arch::memory::paging