diff options
| -rw-r--r-- | arch/x86_64/include/x86_64/memory/kernel_mapper.hpp | 6 | ||||
| -rw-r--r-- | arch/x86_64/include/x86_64/memory/page_table.hpp | 54 | ||||
| -rw-r--r-- | arch/x86_64/include/x86_64/memory/paging_root.hpp | 22 | ||||
| -rw-r--r-- | arch/x86_64/include/x86_64/memory/scoped_mapping.hpp | 4 | ||||
| -rw-r--r-- | arch/x86_64/src/kapi/memory.cpp | 36 | ||||
| -rw-r--r-- | arch/x86_64/src/memory/kernel_mapper.cpp | 25 | ||||
| -rw-r--r-- | arch/x86_64/src/memory/paging_root.cpp | 149 | ||||
| -rw-r--r-- | arch/x86_64/src/memory/recursive_page_mapper.cpp | 85 | ||||
| -rw-r--r-- | arch/x86_64/src/memory/scoped_mapping.cpp | 11 |
9 files changed, 180 insertions, 212 deletions
diff --git a/arch/x86_64/include/x86_64/memory/kernel_mapper.hpp b/arch/x86_64/include/x86_64/memory/kernel_mapper.hpp index 1f217ae..5b9c2fd 100644 --- a/arch/x86_64/include/x86_64/memory/kernel_mapper.hpp +++ b/arch/x86_64/include/x86_64/memory/kernel_mapper.hpp @@ -1,6 +1,8 @@ #ifndef TEACHOS_X86_64_KERNEL_MAPPER_HPP #define TEACHOS_X86_64_KERNEL_MAPPER_HPP +#include "kapi/memory.hpp" + #include <elf/format.hpp> #include <elf/section_header.hpp> #include <multiboot2/information.hpp> @@ -16,10 +18,10 @@ namespace teachos::memory::x86_64 explicit kernel_mapper(multiboot2::information_view const * mbi); - auto remap_kernel() -> void; + auto remap_kernel(page_mapper & mapper) -> void; private: - auto map_section(section_header_type const & section, std::string_view name) -> void; + auto map_section(section_header_type const & section, std::string_view name, page_mapper & mapper) -> void; multiboot2::information_view const * m_mbi; std::uintptr_t m_kernel_load_base; diff --git a/arch/x86_64/include/x86_64/memory/page_table.hpp b/arch/x86_64/include/x86_64/memory/page_table.hpp index 3bc2a2b..71ba5b7 100644 --- a/arch/x86_64/include/x86_64/memory/page_table.hpp +++ b/arch/x86_64/include/x86_64/memory/page_table.hpp @@ -3,6 +3,8 @@ #include "kapi/memory.hpp" +#include "x86_64/memory/page_utilities.hpp" + #include <kstd/ext/bitfield_enum> #include <array> @@ -183,6 +185,58 @@ namespace teachos::memory::x86_64 }); } + [[nodiscard]] auto translate(linear_address address) const -> std::optional<physical_address> + requires(Level == 4) + { + auto offset = address.raw() % page::size; + return translate(page::containing(address)).transform([offset](auto frame) -> auto { + return physical_address{frame.start_address().raw() + offset}; + }); + } + + [[nodiscard]] auto translate(page page) const -> std::optional<frame> + requires(Level == 4) + { + auto pml3 = next(pml_index<4>(page)); + + if (!pml3) + { + return std::nullopt; + } + + auto handle_huge_page = [&] -> std::optional<frame> { + auto pml3_entry = pml3.transform([&](auto pml3) -> auto { return (*pml3)[pml_index<3>(page)]; }); + if (!pml3_entry) + { + return std::nullopt; + } + else if (pml3_entry->huge()) + { + auto pml3_entry_frame = *pml3_entry->frame(); + return frame{pml3_entry_frame.number() + pml_index<2>(page) * entry_count + pml_index<1>(page)}; + } + + auto pml2 = (*pml3)->next(pml_index<3>(page)); + auto pml2_entry = pml2.transform([&](auto pml2) -> auto { return (*pml2)[pml_index<2>(page)]; }); + if (!pml2_entry) + { + return std::nullopt; + } + else if (pml2_entry->huge()) + { + auto pml2_entry_frame = *pml2_entry->frame(); + return frame{pml2_entry_frame.number() + pml_index<1>(page)}; + } + + return std::nullopt; + }; + + return pml3.and_then([&](auto pml3) -> auto { return pml3->next(pml_index<3>(page)); }) + .and_then([&](auto pml2) -> auto { return pml2->next(pml_index<2>(page)); }) + .and_then([&](auto pml1) -> auto { return (*pml1)[pml_index<1>(page)].frame(); }) + .or_else(handle_huge_page); + } + private: //! The number of address bits used to represent the page index per level. constexpr auto static level_bits = 9; diff --git a/arch/x86_64/include/x86_64/memory/paging_root.hpp b/arch/x86_64/include/x86_64/memory/paging_root.hpp index 75ba120..47ee2f9 100644 --- a/arch/x86_64/include/x86_64/memory/paging_root.hpp +++ b/arch/x86_64/include/x86_64/memory/paging_root.hpp @@ -1,19 +1,15 @@ #ifndef TEACHOS_X86_64_PAGING_ROOT_HPP #define TEACHOS_X86_64_PAGING_ROOT_HPP -#include "kapi/memory.hpp" - #include "x86_64/memory/page_table.hpp" -#include <optional> - namespace teachos::memory::x86_64 { //! The active, recursively mapped, root map (e.g. PML4) struct paging_root : recursive_page_table<4> { - auto static get() -> paging_root &; + auto static get() -> paging_root *; paging_root(paging_root const &) = delete; paging_root(paging_root &&) = delete; @@ -22,22 +18,6 @@ namespace teachos::memory::x86_64 ~paging_root() = delete; - [[nodiscard]] auto translate(linear_address address) const -> std::optional<physical_address>; - [[nodiscard]] auto translate(page page) const -> std::optional<frame>; - - //! Map the given page into the given frame using the given flags. - //! - //! @param page A page to map. - //! @param frame The frame into which to map the page. - //! @param flags The flags to apply to the mapping. - auto map(page page, frame frame, entry::flags flags) -> std::optional<std::byte *>; - - //! Unmap the given page from virtual memory. - //! - //! @warning If the page has not previously been mapped, this function will panic. - //! @param page The page to unmap - auto unmap(page page) -> void; - private: paging_root() = default; }; diff --git a/arch/x86_64/include/x86_64/memory/scoped_mapping.hpp b/arch/x86_64/include/x86_64/memory/scoped_mapping.hpp index 415ea8e..835e2df 100644 --- a/arch/x86_64/include/x86_64/memory/scoped_mapping.hpp +++ b/arch/x86_64/include/x86_64/memory/scoped_mapping.hpp @@ -22,7 +22,8 @@ namespace teachos::memory::x86_64 //! Construct a new scoped mapping, which can be used to map a frame to the given unused page. //! @param page An unused page. If the page is already mapped, this constructor will panic. - explicit scoped_mapping(page page); + //! @param mapper The page mapper to use for mapping and unmapping of the page. + explicit scoped_mapping(page page, page_mapper & mapper); //! Unmap the mapped frame if one was mapped. //! @note Any page tables that were allocated to support the mapping will be released. @@ -56,6 +57,7 @@ namespace teachos::memory::x86_64 private: page m_page; + page_mapper * m_mapper; bool m_mapped; }; diff --git a/arch/x86_64/src/kapi/memory.cpp b/arch/x86_64/src/kapi/memory.cpp index abc0526..ae0401e 100644 --- a/arch/x86_64/src/kapi/memory.cpp +++ b/arch/x86_64/src/kapi/memory.cpp @@ -67,25 +67,25 @@ namespace teachos::memory } //! Inject, or graft, a faux recursive PML4 into the active page mapping structure. - auto inject_faux_pml4(frame_allocator & allocator) + auto inject_faux_pml4(frame_allocator & allocator, page_mapper & mapper) { using namespace x86_64; using entry_flags = page_table::entry::flags; auto page = page::containing(unused_page_address); - auto temporary_mapper = scoped_mapping{page}; + auto temporary_mapper = scoped_mapping{page, mapper}; auto new_pml4_frame = allocator.allocate(); auto pml4 = std::construct_at(temporary_mapper.map_as<page_table>(*new_pml4_frame, entry_flags::writable)); (*pml4)[recursive_page_map_index].frame(new_pml4_frame.value(), entry_flags::present | entry_flags::writable); auto pml4_index = pml_index<4>(page); - auto & old_pml4 = paging_root::get(); - auto pml4_entry = old_pml4[pml4_index]; + auto old_pml4 = paging_root::get(); + auto pml4_entry = (*old_pml4)[pml4_index]; auto pml3_index = pml_index<3>(page); - auto old_pml3 = old_pml4.next(pml4_index); + auto old_pml3 = old_pml4->next(pml4_index); auto pml3_entry = (**old_pml3)[pml3_index]; auto pml2_index = pml_index<2>(page); @@ -96,15 +96,15 @@ namespace teachos::memory auto old_pml1 = (**old_pml2).next(pml2_index); auto pml1_entry = (**old_pml1)[pml1_index]; - paging_root::get()[recursive_page_map_index].frame(new_pml4_frame.value(), - entry_flags::present | entry_flags::writable); + (*paging_root::get())[recursive_page_map_index].frame(new_pml4_frame.value(), + entry_flags::present | entry_flags::writable); tlb_flush_all(); - auto & new_pml4 = paging_root::get(); - new_pml4[pml4_index] = pml4_entry; + auto new_pml4 = paging_root::get(); + (*new_pml4)[pml4_index] = pml4_entry; - auto new_pml3 = new_pml4.next(pml4_index); + auto new_pml3 = new_pml4->next(pml4_index); (**new_pml3)[pml3_index] = pml3_entry; auto new_pml2 = (**new_pml3).next(pml3_index); @@ -116,10 +116,10 @@ namespace teachos::memory return *new_pml4_frame; } - auto remap_kernel() -> void + auto remap_kernel(page_mapper & mapper) -> void { auto kernel_mapper = x86_64::kernel_mapper{boot::bootstrap_information.mbi}; - kernel_mapper.remap_kernel(); + kernel_mapper.remap_kernel(mapper); } auto remap_vga_text_mode_buffer(page_mapper & mapper) -> void @@ -188,16 +188,13 @@ namespace teachos::memory auto early_allocator = create_early_frame_allocator(); auto allocation_buffer = x86_64::buffered_allocator<4>{&early_allocator}; - allocator = &allocation_buffer; - auto recursive_mapper = x86_64::recursive_page_mapper{allocation_buffer}; - mapper = &recursive_mapper; - - auto new_pml4_frame = inject_faux_pml4(allocation_buffer); cio::println("[x86_64:MEM] Preparing new paging hierarchy."); - remap_kernel(); + auto new_pml4_frame = inject_faux_pml4(allocation_buffer, recursive_mapper); + + remap_kernel(recursive_mapper); remap_vga_text_mode_buffer(recursive_mapper); remap_multiboot_information(recursive_mapper); @@ -206,9 +203,6 @@ namespace teachos::memory auto cr3 = cpu::x86_64::cr3::read(); cr3.address(new_pml4_frame.start_address()); cpu::x86_64::cr3::write(cr3); - - mapper = nullptr; - allocator = nullptr; } } // namespace teachos::memory diff --git a/arch/x86_64/src/memory/kernel_mapper.cpp b/arch/x86_64/src/memory/kernel_mapper.cpp index f46b5b5..a28cf00 100644 --- a/arch/x86_64/src/memory/kernel_mapper.cpp +++ b/arch/x86_64/src/memory/kernel_mapper.cpp @@ -5,8 +5,6 @@ #include "kapi/system.hpp" #include "x86_64/boot/ld.hpp" -#include "x86_64/memory/page_table.hpp" -#include "x86_64/memory/paging_root.hpp" #include <elf/format.hpp> #include <elf/section_header.hpp> @@ -17,7 +15,7 @@ namespace teachos::memory::x86_64 { - inline namespace + namespace { using namespace std::string_view_literals; @@ -37,7 +35,7 @@ namespace teachos::memory::x86_64 , m_kernel_load_base{std::bit_cast<std::uintptr_t>(&boot::x86_64::TEACHOS_VMA)} {} - auto kernel_mapper::remap_kernel() -> void + auto kernel_mapper::remap_kernel(page_mapper & mapper) -> void { auto elf_information = m_mbi->maybe_elf_symbols<elf::format::elf64>(); if (!elf_information) @@ -60,10 +58,11 @@ namespace teachos::memory::x86_64 } std::ranges::for_each(allocated_sections, - [&](auto const & section) -> auto { map_section(section, sections.name(section)); }); + [&](auto const & section) -> auto { map_section(section, sections.name(section), mapper); }); } - auto kernel_mapper::map_section(section_header_type const & section, std::string_view name) -> void + auto kernel_mapper::map_section(section_header_type const & section, std::string_view name, page_mapper & mapper) + -> void { cio::print("[x86_64:MEM] mapping "); cio::println(name); @@ -76,30 +75,30 @@ namespace teachos::memory::x86_64 auto first_page = page::containing(linear_start_address); auto first_frame = frame::containing(physical_start_address); - auto page_flags = page_table::entry::flags::empty; + auto page_flags = page_mapper::flags::empty; if (section.writable()) { - page_flags |= page_table::entry::flags::writable; + page_flags |= page_mapper::flags::writable; } - if (!section.executable()) + if (section.executable()) { - page_flags |= page_table::entry::flags::no_execute; + page_flags |= page_mapper::flags::executable; } auto is_prefix_of_name = [=](auto prefix) -> bool { return name.starts_with(prefix); }; - if (std::ranges::any_of(user_accessible_prefixes, is_prefix_of_name)) + if (!std::ranges::any_of(user_accessible_prefixes, is_prefix_of_name)) { - page_flags |= page_table::entry::flags::user_accessible; + page_flags |= page_mapper::flags::supervisor_only; } for (auto i = 0uz; i < number_of_pages; ++i) { - paging_root::get().map(page{first_page.number() + i}, frame{first_frame.number() + i}, page_flags); + mapper.map(first_page + i, first_frame + i, page_flags); } } diff --git a/arch/x86_64/src/memory/paging_root.cpp b/arch/x86_64/src/memory/paging_root.cpp index 078686b..4f88657 100644 --- a/arch/x86_64/src/memory/paging_root.cpp +++ b/arch/x86_64/src/memory/paging_root.cpp @@ -1,161 +1,18 @@ #include "x86_64/memory/paging_root.hpp" -#include "kapi/memory.hpp" -#include "kapi/system.hpp" - -#include "x86_64/memory/page_table.hpp" -#include "x86_64/memory/page_utilities.hpp" -#include "x86_64/memory/scoped_mapping.hpp" - -#include <cstddef> #include <cstdint> -#include <memory> -#include <optional> namespace teachos::memory::x86_64 { namespace { - constexpr auto PML_RECURSIVE_BASE = std::uintptr_t{0177777'776'776'776'776'0000uz}; - - //! Perform the actual mapping of the page, via the recursive page map. - //! - //! On any level above PML1, the entries need to not be no_execute, because the image is densely packed. The entries - //! also need to be writable, since the mapping is being performed through the recursive page map hierarchy. When - //! setting the final entry in the PML1, the actually desired flags are set as is, with the present bit added, thus - //! still enforcing non-writability and non-execution of the affected page. - template<std::size_t Level> - requires(Level > 1uz && Level < 5uz) - auto do_map(recursive_page_table<Level> * pml, page page, page_table::entry::flags flags) - { - auto index = pml_index<Level>(page); - flags = flags & ~page_table::entry::flags::no_execute; - flags = flags | page_table::entry::flags::writable; - if (!(*pml)[index].present()) - { - auto new_table_frame = active_frame_allocator().allocate(); - auto mapping = scoped_mapping{page}; - (*pml)[index].frame(new_table_frame.value(), page_table::entry::flags::present | flags); - auto new_table = std::optional{std::construct_at(*pml->next(index))}; - return new_table; - } - (*pml)[index] |= flags; - return pml->next(index); - } - - //! Perform the actual PML1 update. - auto do_map(page_table * pml, page page, frame frame, page_table::entry::flags flags) -> std::optional<std::byte *> - { - auto index = pml_index<1>(page); - if ((*pml)[index].present()) - { - system::panic("[x86_64:MEM] Tried to map a page that is already mapped"); - } - (*pml)[index].frame(frame, page_table::entry::flags::present | flags); - return std::optional{static_cast<std::byte *>(page.start_address())}; - } + constexpr auto recursive_base = std::uintptr_t{0177777'776'776'776'776'0000uz}; } // namespace - auto paging_root::get() -> paging_root & - { - auto pml4_address = std::bit_cast<paging_root *>(PML_RECURSIVE_BASE); - return *pml4_address; - } - - auto paging_root::translate(linear_address address) const -> std::optional<physical_address> - { - auto offset = address.raw() % page::size; - return translate(page::containing(address)).transform([offset](auto frame) -> auto { - return physical_address{frame.start_address().raw() + offset}; - }); - } - - auto paging_root::translate(page page) const -> std::optional<frame> - { - auto pml3 = next(pml_index<4>(page)); - - if (!pml3) - { - return std::nullopt; - } - - auto handle_huge_page = [&] -> std::optional<frame> { - auto pml3_entry = pml3.transform([&](auto pml3) -> auto { return (*pml3)[pml_index<3>(page)]; }); - if (!pml3_entry) - { - return std::nullopt; - } - else if (pml3_entry->huge()) - { - auto pml3_entry_frame = *pml3_entry->frame(); - return frame{pml3_entry_frame.number() + pml_index<2>(page) * entry_count + pml_index<1>(page)}; - } - - auto pml2 = (*pml3)->next(pml_index<3>(page)); - auto pml2_entry = pml2.transform([&](auto pml2) -> auto { return (*pml2)[pml_index<2>(page)]; }); - if (!pml2_entry) - { - return std::nullopt; - } - else if (pml2_entry->huge()) - { - auto pml2_entry_frame = *pml2_entry->frame(); - return frame{pml2_entry_frame.number() + pml_index<1>(page)}; - } - - return std::nullopt; - }; - - return pml3.and_then([&](auto pml3) -> auto { return pml3->next(pml_index<3>(page)); }) - .and_then([&](auto pml2) -> auto { return pml2->next(pml_index<2>(page)); }) - .and_then([&](auto pml1) -> auto { return (*pml1)[pml_index<1>(page)].frame(); }) - .or_else(handle_huge_page); - } - - auto paging_root::map(page page, frame frame, page_table::entry::flags flags) -> std::optional<std::byte *> + auto paging_root::get() -> paging_root * { - return std::optional{this} - .and_then([&](auto pml) -> auto { return do_map(pml, page, flags); }) - .and_then([&](auto pml) -> auto { return do_map(pml, page, flags); }) - .and_then([&](auto pml) -> auto { return do_map(pml, page, flags); }) - .and_then([&](auto pml) -> auto { return do_map(pml, page, frame, flags); }); - } - - auto paging_root::unmap(page page) -> void - { - if (!this->translate(page)) - { - system::panic("[x86_64:MEM] Tried to unmap a page that was not mapped."); - } - - auto pml4 = this; - auto pml3 = pml4->next(pml_index<4>(page)).value(); - auto pml2 = pml3->next(pml_index<3>(page)).value(); - auto pml1 = pml2->next(pml_index<2>(page)).value(); - - (*pml1)[pml_index<1>(page)].clear(); - - if (pml1->empty()) - { - auto pml1_frame = (*pml2)[pml_index<2>(page)].frame().value(); - active_frame_allocator().release(pml1_frame); - (*pml2)[pml_index<2>(page)].clear(); - } - - if (pml2->empty()) - { - auto pml2_frame = (*pml3)[pml_index<3>(page)].frame().value(); - active_frame_allocator().release(pml2_frame); - (*pml3)[pml_index<3>(page)].clear(); - } - - if (pml3->empty()) - { - auto pml3_frame = (*pml4)[pml_index<4>(page)].frame().value(); - active_frame_allocator().release(pml3_frame); - (*pml4)[pml_index<4>(page)].clear(); - } + return std::bit_cast<paging_root *>(recursive_base); } } // namespace teachos::memory::x86_64
\ No newline at end of file diff --git a/arch/x86_64/src/memory/recursive_page_mapper.cpp b/arch/x86_64/src/memory/recursive_page_mapper.cpp index 47148f0..fe4fd50 100644 --- a/arch/x86_64/src/memory/recursive_page_mapper.cpp +++ b/arch/x86_64/src/memory/recursive_page_mapper.cpp @@ -7,13 +7,63 @@ namespace teachos::memory::x86_64 { + namespace + { + //! Perform the actual mapping of the page, via the recursive page map. + //! + //! On any level above PML1, the entries need to not be no_execute, because the image is densely packed. The entries + //! also need to be writable, since the mapping is being performed through the recursive page map hierarchy. When + //! setting the final entry in the PML1, the actually desired flags are set as is, with the present bit + //! added, thus + //! still enforcing non-writability and non-execution of the affected page. + template<std::size_t Level> + requires(Level > 1uz && Level < 5uz) + auto do_map(recursive_page_table<Level> * pml, page page, frame_allocator & allocator, page_mapper::flags flags) + { + auto index = pml_index<Level>(page); + auto entry_flags = to_table_flags(flags); + + entry_flags = entry_flags & ~page_table::entry::flags::no_execute; + entry_flags = entry_flags | page_table::entry::flags::writable; + if (!(*pml)[index].present()) + { + auto new_table_frame = allocator.allocate(); + (*pml)[index].frame(new_table_frame.value(), page_table::entry::flags::present | entry_flags); + auto new_table = std::optional{std::construct_at(*pml->next(index))}; + return new_table; + } + (*pml)[index] |= entry_flags; + return pml->next(index); + } + + //! Perform the actual PML1 update. + auto do_map(page_table * pml, page page, frame frame, page_mapper::flags flags) -> std::optional<std::byte *> + { + auto index = pml_index<1>(page); + if ((*pml)[index].present()) + { + system::panic("[x86_64:MEM] Tried to map a page that is already mapped"); + } + (*pml)[index].frame(frame, page_table::entry::flags::present | to_table_flags(flags)); + return std::optional{static_cast<std::byte *>(page.start_address())}; + } + + } // namespace + recursive_page_mapper::recursive_page_mapper(frame_allocator & allocator) : m_allocator{&allocator} {} auto recursive_page_mapper::map(page page, frame frame, flags flags) -> std::byte * { - return paging_root::get().map(page, frame, to_table_flags(flags)).value_or(nullptr); + auto pml4 = static_cast<recursive_page_table<4> *>((paging_root::get())); + + return std::optional{pml4} + .and_then([&](auto pml) -> auto { return do_map(pml, page, *m_allocator, flags); }) + .and_then([&](auto pml) -> auto { return do_map(pml, page, *m_allocator, flags); }) + .and_then([&](auto pml) -> auto { return do_map(pml, page, *m_allocator, flags); }) + .and_then([&](auto pml) -> auto { return do_map(pml, page, frame, flags); }) + .value_or(nullptr); } auto recursive_page_mapper::unmap(page page) -> void @@ -26,12 +76,39 @@ namespace teachos::memory::x86_64 auto recursive_page_mapper::try_unmap(page page) noexcept -> bool { - auto & root = paging_root::get(); - if (!root.translate(page)) + if (!paging_root::get()->translate(page)) { return false; } - root.unmap(page); + + auto pml4 = paging_root::get(); + auto pml3 = pml4->next(pml_index<4>(page)).value(); + auto pml2 = pml3->next(pml_index<3>(page)).value(); + auto pml1 = pml2->next(pml_index<2>(page)).value(); + + (*pml1)[pml_index<1>(page)].clear(); + + if (pml1->empty()) + { + auto pml1_frame = (*pml2)[pml_index<2>(page)].frame().value(); + m_allocator->release(pml1_frame); + (*pml2)[pml_index<2>(page)].clear(); + } + + if (pml2->empty()) + { + auto pml2_frame = (*pml3)[pml_index<3>(page)].frame().value(); + m_allocator->release(pml2_frame); + (*pml3)[pml_index<3>(page)].clear(); + } + + if (pml3->empty()) + { + auto pml3_frame = (*pml4)[pml_index<4>(page)].frame().value(); + m_allocator->release(pml3_frame); + (*pml4)[pml_index<4>(page)].clear(); + } + return true; } diff --git a/arch/x86_64/src/memory/scoped_mapping.cpp b/arch/x86_64/src/memory/scoped_mapping.cpp index e243dc9..fa68387 100644 --- a/arch/x86_64/src/memory/scoped_mapping.cpp +++ b/arch/x86_64/src/memory/scoped_mapping.cpp @@ -14,14 +14,16 @@ namespace teachos::memory::x86_64 scoped_mapping::scoped_mapping(scoped_mapping && other) noexcept : m_page{std::exchange(other.m_page, page{})} + , m_mapper{std::exchange(other.m_mapper, nullptr)} , m_mapped{std::exchange(other.m_mapped, false)} {} - scoped_mapping::scoped_mapping(page page) + scoped_mapping::scoped_mapping(page page, page_mapper & mapper) : m_page{page} + , m_mapper{&mapper} , m_mapped{false} { - if (paging_root::get().translate(page)) + if (paging_root::get()->translate(page)) { system::panic("[MEM] Tried to map a page that is already mapped!"); } @@ -44,14 +46,14 @@ namespace teachos::memory::x86_64 auto scoped_mapping::map(frame frame, page_table::entry::flags flags) -> std::byte * { - auto result = active_page_mapper().map(m_page, frame, to_mapper_flags(flags)); + auto result = m_mapper->map(m_page, frame, to_mapper_flags(flags)); m_mapped = true; return result; } auto scoped_mapping::unmap() -> void { - active_page_mapper().unmap(m_page); + m_mapper->unmap(m_page); m_mapped = false; } @@ -59,6 +61,7 @@ namespace teachos::memory::x86_64 { using std::swap; swap(lhs.m_page, rhs.m_page); + swap(lhs.m_mapper, rhs.m_mapper); swap(lhs.m_mapped, rhs.m_mapped); } |
