aboutsummaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorLioncash <mathew1800@gmail.com>2019-11-26 13:09:12 -0500
committerLioncash <mathew1800@gmail.com>2019-11-26 21:53:34 -0500
commit323680e5ad3ca0e27f2dd1de26816741b3243bed (patch)
treeac7a9e683831493f0f14c8b9566c0d570807ad62 /src/core/memory.cpp
parent4c2ed2706e3579ec1304907dad0d45673768e1fc (diff)
core/memory: Migrate over memory mapping functions to the new Memory class
Migrates all of the direct mapping facilities over to the new memory class. In the process, this also obsoletes the need for memory_setup.h, so we can remove it entirely from the project.
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp177
1 files changed, 106 insertions, 71 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 2098f13f75..28b65ca5eb 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -17,7 +17,6 @@
#include "core/hle/kernel/process.h"
#include "core/hle/kernel/vm_manager.h"
#include "core/memory.h"
-#include "core/memory_setup.h"
#include "video_core/gpu.h"
namespace Memory {
@@ -30,99 +29,135 @@ static Common::PageTable* current_page_table = nullptr;
struct Memory::Impl {
explicit Impl(Core::System& system_) : system{system_} {}
- Core::System& system;
-};
+ void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
+ ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
+ MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
+ }
-Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
-Memory::~Memory() = default;
+ void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
+ Common::MemoryHookPointer mmio_handler) {
+ ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
+ MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
+ Common::PageType::Special);
+
+ const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
+ const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice,
+ std::move(mmio_handler)};
+ page_table.special_regions.add(
+ std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
+ }
-void SetCurrentPageTable(Kernel::Process& process) {
- current_page_table = &process.VMManager().page_table;
+ void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
+ ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
+ MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
+ Common::PageType::Unmapped);
- const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth();
+ const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
+ page_table.special_regions.erase(interval);
+ }
- auto& system = Core::System::GetInstance();
- system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width);
- system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
- system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width);
- system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
-}
+ void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
+ Common::MemoryHookPointer hook) {
+ const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
+ const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
+ page_table.special_regions.add(
+ std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
+ }
+
+ void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
+ Common::MemoryHookPointer hook) {
+ const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
+ const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
+ page_table.special_regions.subtract(
+ std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
+ }
-static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
- Common::PageType type) {
- LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
- (base + size) * PAGE_SIZE);
-
- // During boot, current_page_table might not be set yet, in which case we need not flush
- if (Core::System::GetInstance().IsPoweredOn()) {
- auto& gpu = Core::System::GetInstance().GPU();
- for (u64 i = 0; i < size; i++) {
- const auto page = base + i;
- if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
- gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
+ /**
+ * Maps a region of pages as a specific type.
+ *
+ * @param page_table The page table to use to perform the mapping.
+ * @param base The base address to begin mapping at.
+ * @param size The total size of the range in bytes.
+ * @param memory The memory to map.
+ * @param type The page type to map the memory as.
+ */
+ void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
+ Common::PageType type) {
+ LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
+ (base + size) * PAGE_SIZE);
+
+ // During boot, current_page_table might not be set yet, in which case we need not flush
+ if (system.IsPoweredOn()) {
+ auto& gpu = system.GPU();
+ for (u64 i = 0; i < size; i++) {
+ const auto page = base + i;
+ if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
+ gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
+ }
}
}
- }
- VAddr end = base + size;
- ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
- base + page_table.pointers.size());
+ const VAddr end = base + size;
+ ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
+ base + page_table.pointers.size());
- std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
+ std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
- if (memory == nullptr) {
- std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory);
- } else {
- while (base != end) {
- page_table.pointers[base] = memory;
+ if (memory == nullptr) {
+ std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end,
+ memory);
+ } else {
+ while (base != end) {
+ page_table.pointers[base] = memory;
- base += 1;
- memory += PAGE_SIZE;
+ base += 1;
+ memory += PAGE_SIZE;
+ }
}
}
-}
-void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
- ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
- MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
-}
+ Core::System& system;
+};
-void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
- Common::MemoryHookPointer mmio_handler) {
- ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
- MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special);
+Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
+Memory::~Memory() = default;
- auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
- Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)};
- page_table.special_regions.add(
- std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
+void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
+ impl->MapMemoryRegion(page_table, base, size, target);
}
-void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
- ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
- MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped);
+void Memory::MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
+ Common::MemoryHookPointer mmio_handler) {
+ impl->MapIoRegion(page_table, base, size, std::move(mmio_handler));
+}
+
+void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
+ impl->UnmapRegion(page_table, base, size);
+}
- auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
- page_table.special_regions.erase(interval);
+void Memory::AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
+ Common::MemoryHookPointer hook) {
+ impl->AddDebugHook(page_table, base, size, std::move(hook));
}
-void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
- Common::MemoryHookPointer hook) {
- auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
- Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
- page_table.special_regions.add(
- std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
+void Memory::RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
+ Common::MemoryHookPointer hook) {
+ impl->RemoveDebugHook(page_table, base, size, std::move(hook));
}
-void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
- Common::MemoryHookPointer hook) {
- auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
- Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
- page_table.special_regions.subtract(
- std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
+void SetCurrentPageTable(Kernel::Process& process) {
+ current_page_table = &process.VMManager().page_table;
+
+ const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth();
+
+ auto& system = Core::System::GetInstance();
+ system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width);
+ system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
+ system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width);
+ system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
}
/**