diff options
23 files changed, 273 insertions, 119 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index d1ad55c9cc..d44d67562d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -229,7 +229,7 @@ elseif (${CMAKE_SYSTEM_NAME} STREQUAL "Linux" OR YUZU_USE_BUNDLED_BOOST) include_directories(SYSTEM "${Boost_INCLUDE_DIRS}") else() message(STATUS "Boost 1.73.0 or newer not found, falling back to Conan") - list(APPEND CONAN_REQUIRED_LIBS "boost/1.73.0") + list(APPEND CONAN_REQUIRED_LIBS "boost/1.78.0") endif() # Attempt to locate any packages that are required and report the missing ones in CONAN_REQUIRED_LIBS diff --git a/externals/dynarmic b/externals/dynarmic -Subproject cce7e4ee5d7b07a4609c73c053fbf57dc8c7845 +Subproject 28714ee75aa079cbb706e38bdabc8ee1f6c6951 diff --git a/src/core/frontend/applets/controller.cpp b/src/core/frontend/applets/controller.cpp index 6dbd38ffae..e1033b6345 100644 --- a/src/core/frontend/applets/controller.cpp +++ b/src/core/frontend/applets/controller.cpp @@ -45,26 +45,26 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb // Pro Controller -> Dual Joycons -> Left Joycon/Right Joycon -> Handheld if (parameters.allow_pro_controller) { controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::ProController); - controller->Connect(); + controller->Connect(true); } else if (parameters.allow_dual_joycons) { controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::JoyconDual); - controller->Connect(); + controller->Connect(true); } else if (parameters.allow_left_joycon && parameters.allow_right_joycon) { // Assign left joycons to even player indices and right joycons to odd player indices. // We do this since Captain Toad Treasure Tracker expects a left joycon for Player 1 and // a right Joycon for Player 2 in 2 Player Assist mode. if (index % 2 == 0) { controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::JoyconLeft); - controller->Connect(); + controller->Connect(true); } else { controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::JoyconRight); - controller->Connect(); + controller->Connect(true); } } else if (index == 0 && parameters.enable_single_mode && parameters.allow_handheld && !Settings::values.use_docked_mode.GetValue()) { // We should *never* reach here under any normal circumstances. controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::Handheld); - controller->Connect(); + controller->Connect(true); } else { UNREACHABLE_MSG("Unable to add a new controller based on the given parameters!"); } diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index ff9d7a7e36..2d3fce2761 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp @@ -886,8 +886,9 @@ void EmulatedController::SetSupportedNpadStyleTag(NpadStyleTag supported_styles) } } -bool EmulatedController::IsControllerSupported() const { - switch (npad_type) { +bool EmulatedController::IsControllerSupported(bool use_temporary_value) const { + const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type; + switch (type) { case NpadStyleIndex::ProController: return supported_style_tag.fullkey; case NpadStyleIndex::Handheld: @@ -915,9 +916,10 @@ bool EmulatedController::IsControllerSupported() const { } } -void EmulatedController::Connect() { - if (!IsControllerSupported()) { - LOG_ERROR(Service_HID, "Controller type {} is not supported", npad_type); +void EmulatedController::Connect(bool use_temporary_value) { + if (!IsControllerSupported(use_temporary_value)) { + const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type; + LOG_ERROR(Service_HID, "Controller type {} is not supported", type); return; } { diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h index e42aafebc4..d887eca870 100644 --- a/src/core/hid/emulated_controller.h +++ b/src/core/hid/emulated_controller.h @@ -167,8 +167,11 @@ public: */ void SetSupportedNpadStyleTag(NpadStyleTag supported_styles); - /// Sets the connected status to true - void Connect(); + /** + * Sets the connected status to true + * @param use_temporary_value If true tmp_npad_type will be used + */ + void Connect(bool use_temporary_value = false); /// Sets the connected status to false void Disconnect(); @@ -319,9 +322,10 @@ private: /** * Checks the current controller type against the supported_style_tag + * @param use_temporary_value If true tmp_npad_type will be used * @return true if the controller is supported */ - bool IsControllerSupported() const; + bool IsControllerSupported(bool use_temporary_value = false) const; /** * Updates the button status of the controller diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index fd491146fe..9e51c33ce0 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -120,7 +120,7 @@ static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015); enum class KMemoryPermission : u8 { None = 0, - Mask = static_cast<u8>(~None), + All = static_cast<u8>(~None), Read = 1 << 0, Write = 1 << 1, diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 99982e5a3b..4da509224d 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -264,9 +264,9 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_ ASSERT(heap_last < stack_start || stack_last < heap_start); ASSERT(heap_last < kmap_start || kmap_last < heap_start); - current_heap_addr = heap_region_start; - heap_capacity = 0; - physical_memory_usage = 0; + current_heap_end = heap_region_start; + max_heap_size = 0; + mapped_physical_memory_size = 0; memory_pool = pool; page_table_impl.Resize(address_space_width, PageBits); @@ -306,7 +306,7 @@ ResultCode KPageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std: KMemoryState state{}; KMemoryPermission perm{}; CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, KMemoryState::All, - KMemoryState::Normal, KMemoryPermission::Mask, + KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); @@ -465,7 +465,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { MapPhysicalMemory(page_linked_list, addr, end_addr); - physical_memory_usage += remaining_size; + mapped_physical_memory_size += remaining_size; const std::size_t num_pages{size / PageSize}; block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, @@ -507,7 +507,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { auto process{system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); - physical_memory_usage -= mapped_size; + mapped_physical_memory_size -= mapped_size; return ResultSuccess; } @@ -554,7 +554,7 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { KMemoryState src_state{}; CASCADE_CODE(CheckMemoryState( &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::ReadAndWrite, + KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); if (IsRegionMapped(dst_addr, size)) { @@ -593,7 +593,7 @@ ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { KMemoryState src_state{}; CASCADE_CODE(CheckMemoryState( &src_state, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::Mask, KMemoryPermission::None, + KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); KMemoryPermission dst_perm{}; @@ -784,7 +784,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo CASCADE_CODE(CheckMemoryState( &state, nullptr, &attribute, addr, size, KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::Mask, + KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::All, KMemoryPermission::ReadAndWrite, KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); @@ -806,6 +806,33 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); block_manager->Update(addr, size / PageSize, state, KMemoryPermission::ReadAndWrite); + return ResultSuccess; +} + +ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, + Svc::MemoryPermission svc_perm) { + const size_t num_pages = size / PageSize; + + // Lock the table. + std::lock_guard lock{page_table_lock}; + + // Verify we can change the memory permission. + KMemoryState old_state; + KMemoryPermission old_perm; + R_TRY(this->CheckMemoryState( + std::addressof(old_state), std::addressof(old_perm), nullptr, addr, size, + KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + + // Determine new perm. + const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); + R_SUCCEED_IF(old_perm == new_perm); + + // Perform mapping operation. + R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); + + // Update the blocks. + block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); return ResultSuccess; } @@ -832,61 +859,125 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryA return ResultSuccess; } -ResultCode KPageTable::SetHeapCapacity(std::size_t new_heap_capacity) { +ResultCode KPageTable::SetMaxHeapSize(std::size_t size) { + // Lock the table. std::lock_guard lock{page_table_lock}; - heap_capacity = new_heap_capacity; - return ResultSuccess; -} -ResultVal<VAddr> KPageTable::SetHeapSize(std::size_t size) { + // Only process page tables are allowed to set heap size. + ASSERT(!this->IsKernel()); - if (size > heap_region_end - heap_region_start) { - return ResultOutOfMemory; - } + max_heap_size = size; - const u64 previous_heap_size{GetHeapSize()}; - - UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented"); + return ResultSuccess; +} - // Increase the heap size +ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { + // Try to perform a reduction in heap, instead of an extension. + VAddr cur_address{}; + std::size_t allocation_size{}; { - std::lock_guard lock{page_table_lock}; - - const u64 delta{size - previous_heap_size}; - - // Reserve memory for the heap extension. - KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, - delta); - - if (!memory_reservation.Succeeded()) { - LOG_ERROR(Kernel, "Could not reserve heap extension of size {:X} bytes", delta); - return ResultLimitReached; + // Lock the table. + std::lock_guard lk(page_table_lock); + + // Validate that setting heap size is possible at all. + R_UNLESS(!is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), + ResultOutOfMemory); + R_UNLESS(size <= max_heap_size, ResultOutOfMemory); + + if (size < GetHeapSize()) { + // The size being requested is less than the current size, so we need to free the end of + // the heap. + + // Validate memory state. + std::size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), + heap_region_start + size, GetHeapSize() - size, + KMemoryState::All, KMemoryState::Normal, + KMemoryPermission::All, KMemoryPermission::ReadAndWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); + + // Unmap the end of the heap. + const auto num_pages = (GetHeapSize() - size) / PageSize; + R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, + OperationType::Unmap)); + + // Release the memory from the resource limit. + system.Kernel().CurrentProcess()->GetResourceLimit()->Release( + LimitableResource::PhysicalMemory, num_pages * PageSize); + + // Apply the memory block update. + block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None); + + // Update the current heap end. + current_heap_end = heap_region_start + size; + + // Set the output. + *out = heap_region_start; + return ResultSuccess; + } else if (size == GetHeapSize()) { + // The size requested is exactly the current size. + *out = heap_region_start; + return ResultSuccess; + } else { + // We have to allocate memory. Determine how much to allocate and where while the table + // is locked. + cur_address = current_heap_end; + allocation_size = size - GetHeapSize(); } + } - KPageLinkedList page_linked_list; - const std::size_t num_pages{delta / PageSize}; + // Reserve memory for the heap extension. + KScopedResourceReservation memory_reservation( + system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + allocation_size); + R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); - CASCADE_CODE( - system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + // Allocate pages for the heap extension. + KPageLinkedList page_linked_list; + R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize, + memory_pool)); - if (IsRegionMapped(current_heap_addr, delta)) { - return ResultInvalidCurrentMemory; + // Map the pages. + { + // Lock the table. + std::lock_guard lk(page_table_lock); + + // Ensure that the heap hasn't changed since we began executing. + ASSERT(cur_address == current_heap_end); + + // Check the memory state. + std::size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, + allocation_size, KMemoryState::All, KMemoryState::Free, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryAttribute::None)); + + // Map the pages. + const auto num_pages = allocation_size / PageSize; + R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup)); + + // Clear all the newly allocated pages. + for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { + std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, + PageSize); } - CASCADE_CODE( - Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup)); - - // Succeeded in allocation, commit the resource reservation + // We succeeded, so commit our memory reservation. memory_reservation.Commit(); - block_manager->Update(current_heap_addr, num_pages, KMemoryState::Normal, - KMemoryPermission::ReadAndWrite); + // Apply the memory block update. + block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, + KMemoryPermission::ReadAndWrite, KMemoryAttribute::None); - current_heap_addr = heap_region_start + size; - } + // Update the current heap end. + current_heap_end = heap_region_start + size; - return heap_region_start; + // Set the output. + *out = heap_region_start; + return ResultSuccess; + } } ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, @@ -978,7 +1069,7 @@ ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { if (const ResultCode result{CheckMemoryState( nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, - KMemoryState::FlagCanCodeMemory, KMemoryPermission::Mask, + KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)}; result.IsError()) { return result; @@ -1031,9 +1122,8 @@ ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { bool KPageTable::IsRegionMapped(VAddr address, u64 size) { return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, - KMemoryPermission::Mask, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::None, - KMemoryAttribute::IpcAndDeviceMapped) + KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, + KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped) .IsError(); } diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index d784aa67e3..564410dca2 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -47,10 +47,11 @@ public: KMemoryInfo QueryInfo(VAddr addr); ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); ResultCode ResetTransferMemory(VAddr addr, std::size_t size); + ResultCode SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, KMemoryAttribute mask, KMemoryAttribute value); - ResultCode SetHeapCapacity(std::size_t new_heap_capacity); - ResultVal<VAddr> SetHeapSize(std::size_t size); + ResultCode SetMaxHeapSize(std::size_t size); + ResultCode SetHeapSize(VAddr* out, std::size_t size); ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, bool is_map_only, VAddr region_start, std::size_t region_num_pages, KMemoryState state, @@ -182,14 +183,15 @@ public: constexpr VAddr GetAliasCodeRegionSize() const { return alias_code_region_end - alias_code_region_start; } + size_t GetNormalMemorySize() { + std::lock_guard lk(page_table_lock); + return GetHeapSize() + mapped_physical_memory_size; + } constexpr std::size_t GetAddressSpaceWidth() const { return address_space_width; } - constexpr std::size_t GetHeapSize() { - return current_heap_addr - heap_region_start; - } - constexpr std::size_t GetTotalHeapSize() { - return GetHeapSize() + physical_memory_usage; + constexpr std::size_t GetHeapSize() const { + return current_heap_end - heap_region_start; } constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { return address_space_start <= address && address + size - 1 <= address_space_end - 1; @@ -269,10 +271,8 @@ private: VAddr code_region_end{}; VAddr alias_code_region_start{}; VAddr alias_code_region_end{}; - VAddr current_heap_addr{}; - std::size_t heap_capacity{}; - std::size_t physical_memory_usage{}; + std::size_t mapped_physical_memory_size{}; std::size_t max_heap_size{}; std::size_t max_physical_memory_size{}; std::size_t address_space_width{}; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index aee3139957..73f8bc4fe7 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -172,7 +172,7 @@ void KProcess::DecrementThreadCount() { u64 KProcess::GetTotalPhysicalMemoryAvailable() const { const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + - page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + + page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + main_thread_stack_size}; if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); capacity != pool_size) { @@ -189,7 +189,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { } u64 KProcess::GetTotalPhysicalMemoryUsed() const { - return image_size + main_thread_stack_size + page_table->GetTotalHeapSize() + + return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + GetSystemResourceSize(); } @@ -410,8 +410,8 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { resource_limit->Reserve(LimitableResource::Threads, 1); resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); - const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size}; - ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError()); + const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; + ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); ChangeStatus(ProcessStatus::Running); diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 752592e2ef..b8c993748f 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -26,6 +26,7 @@ #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_system_control.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" @@ -50,6 +51,7 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr entry_point, u64 arg) { context = {}; context.cpu_registers[0] = arg; + context.cpu_registers[18] = Kernel::KSystemControl::GenerateRandomU64() | 1; context.pc = entry_point; context.sp = stack_top; // TODO(merry): Perform a hardware test to determine the below value. diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 37d67b72e8..63e2dff19f 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -135,24 +135,15 @@ enum class ResourceLimitValueType { } // Anonymous namespace /// Set the process heap to a given Size. It can both extend and shrink the heap. -static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) { - LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); +static ResultCode SetHeapSize(Core::System& system, VAddr* out_address, u64 size) { + LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", size); - // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB. - if ((heap_size % 0x200000) != 0) { - LOG_ERROR(Kernel_SVC, "The heap size is not a multiple of 2MB, heap_size=0x{:016X}", - heap_size); - return ResultInvalidSize; - } - - if (heap_size >= 0x200000000) { - LOG_ERROR(Kernel_SVC, "The heap size is not less than 8GB, heap_size=0x{:016X}", heap_size); - return ResultInvalidSize; - } - - auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; + // Validate size. + R_UNLESS(Common::IsAligned(size, HeapSizeAlignment), ResultInvalidSize); + R_UNLESS(size < MainMemorySizeMax, ResultInvalidSize); - CASCADE_RESULT(*heap_addr, page_table.SetHeapSize(heap_size)); + // Set the heap size. + R_TRY(system.Kernel().CurrentProcess()->PageTable().SetHeapSize(out_address, size)); return ResultSuccess; } @@ -164,6 +155,36 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s return result; } +constexpr bool IsValidSetMemoryPermission(MemoryPermission perm) { + switch (perm) { + case MemoryPermission::None: + case MemoryPermission::Read: + case MemoryPermission::ReadWrite: + return true; + default: + return false; + } +} + +static ResultCode SetMemoryPermission(Core::System& system, VAddr address, u64 size, + MemoryPermission perm) { + // Validate address / size. + R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((address < address + size), ResultInvalidCurrentMemory); + + // Validate the permission. + R_UNLESS(IsValidSetMemoryPermission(perm), ResultInvalidNewMemoryPermission); + + // Validate that the region is in range for the current process. + auto& page_table = system.Kernel().CurrentProcess()->PageTable(); + R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); + + // Set the memory attribute. + return page_table.SetMemoryPermission(address, size, perm); +} + static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attribute) { LOG_DEBUG(Kernel_SVC, @@ -2724,7 +2745,7 @@ static const FunctionDef SVC_Table_32[] = { static const FunctionDef SVC_Table_64[] = { {0x00, nullptr, "Unknown"}, {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"}, - {0x02, nullptr, "SetMemoryPermission"}, + {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"}, {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"}, {0x04, SvcWrap64<MapMemory>, "MapMemory"}, {0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"}, diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 60ea2c4055..25de6e4378 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -5,6 +5,7 @@ #pragma once #include "common/common_types.h" +#include "common/literals.h" namespace Kernel { using Handle = u32; @@ -12,9 +13,13 @@ using Handle = u32; namespace Kernel::Svc { +using namespace Common::Literals; + constexpr s32 ArgumentHandleCountMax = 0x40; constexpr u32 HandleWaitMask{1u << 30}; +constexpr inline std::size_t HeapSizeAlignment = 2_MiB; + constexpr inline Handle InvalidHandle = Handle(0); enum PseudoHandle : Handle { diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 86255fe6d3..a60adfcabf 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -249,6 +249,14 @@ void SvcWrap64(Core::System& system) { func(system, Param(system, 0), Param(system, 1), static_cast<u32>(Param(system, 2))).raw); } +// Used by SetMemoryPermission +template <ResultCode func(Core::System&, u64, u64, Svc::MemoryPermission)> +void SvcWrap64(Core::System& system) { + FuncReturn(system, func(system, Param(system, 0), Param(system, 1), + static_cast<Svc::MemoryPermission>(Param(system, 2))) + .raw); +} + // Used by MapSharedMemory template <ResultCode func(Core::System&, Handle, u64, u64, Svc::MemoryPermission)> void SvcWrap64(Core::System& system) { diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp index c0f5fc4024..7434a1f92f 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp @@ -86,7 +86,7 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, Scal } switch (attr) { case IR::Attribute::PrimitiveId: - ctx.Add("MOV.S {}.x,primitive.id;", inst); + ctx.Add("MOV.F {}.x,primitive.id;", inst); break; case IR::Attribute::PositionX: case IR::Attribute::PositionY: @@ -113,13 +113,13 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, Scal ctx.Add("MOV.F {}.x,vertex.tesscoord.{};", inst, swizzle); break; case IR::Attribute::InstanceId: - ctx.Add("MOV.S {}.x,{}.instance;", inst, ctx.attrib_name); + ctx.Add("MOV.F {}.x,{}.instance;", inst, ctx.attrib_name); break; case IR::Attribute::VertexId: - ctx.Add("MOV.S {}.x,{}.id;", inst, ctx.attrib_name); + ctx.Add("MOV.F {}.x,{}.id;", inst, ctx.attrib_name); break; case IR::Attribute::FrontFace: - ctx.Add("CMP.S {}.x,{}.facing.x,0,-1;", inst, ctx.attrib_name); + ctx.Add("CMP.F {}.x,{}.facing.x,0,-1;", inst, ctx.attrib_name); break; default: throw NotImplementedException("Get attribute {}", attr); diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp index 6ce7ed12a8..50918317f9 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp @@ -30,11 +30,20 @@ struct FuncTraits<ReturnType_ (*)(Args...)> { using ArgType = std::tuple_element_t<I, std::tuple<Args...>>; }; +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4702) // Ignore unreachable code warning +#endif + template <auto func, typename... Args> void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) { inst->SetDefinition<Id>(func(ctx, std::forward<Args>(args)...)); } +#ifdef _MSC_VER +#pragma warning(pop) +#endif + template <typename ArgType> ArgType Arg(EmitContext& ctx, const IR::Value& arg) { if constexpr (std::is_same_v<ArgType, Id>) { diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 14e6522f2e..3c1f79a271 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -1047,7 +1047,7 @@ bool Image::ScaleDown(bool ignore) { } ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info, - ImageId image_id_, Image& image) + ImageId image_id_, Image& image, const SlotVector<Image>&) : VideoCommon::ImageViewBase{info, image.info, image_id_}, views{runtime.null_image_views} { const Device& device = runtime.device; if (True(image.flags & ImageFlagBits::Converted)) { diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h index dbf1df79cd..7f425631f5 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.h +++ b/src/video_core/renderer_opengl/gl_texture_cache.h @@ -36,6 +36,7 @@ using VideoCommon::ImageViewType; using VideoCommon::NUM_RT; using VideoCommon::Region2D; using VideoCommon::RenderTargets; +using VideoCommon::SlotVector; struct ImageBufferMap { ~ImageBufferMap(); @@ -234,7 +235,8 @@ class ImageView : public VideoCommon::ImageViewBase { friend Image; public: - explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&); + explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&, + const SlotVector<Image>&); explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo&, const VideoCommon::ImageViewInfo&, GPUVAddr); explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info, diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 1941170cb3..c3050887cf 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -1473,8 +1473,7 @@ bool Image::BlitScaleHelper(bool scale_up) { ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info, ImageId image_id_, Image& image) : VideoCommon::ImageViewBase{info, image.info, image_id_}, device{&runtime.device}, - src_image{&image}, image_handle{image.Handle()}, - samples(ConvertSampleCount(image.info.num_samples)) { + image_handle{image.Handle()}, samples(ConvertSampleCount(image.info.num_samples)) { using Shader::TextureType; const VkImageAspectFlags aspect_mask = ImageViewAspectMask(info); @@ -1557,6 +1556,12 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI } } +ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info, + ImageId image_id_, Image& image, const SlotVector<Image>& slot_imgs) + : ImageView{runtime, info, image_id_, image} { + slot_images = &slot_imgs; +} + ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info, const VideoCommon::ImageViewInfo& view_info, GPUVAddr gpu_addr_) : VideoCommon::ImageViewBase{info, view_info}, gpu_addr{gpu_addr_}, @@ -1613,10 +1618,12 @@ VkImageView ImageView::StorageView(Shader::TextureType texture_type, } bool ImageView::IsRescaled() const noexcept { - if (!src_image) { + if (!slot_images) { return false; } - return src_image->IsRescaled(); + const auto& slots = *slot_images; + const auto& src_image = slots[image_id]; + return src_image.IsRescaled(); } vk::ImageView ImageView::MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask) { diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index c592f2666d..2f12be78b3 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -23,6 +23,7 @@ using VideoCommon::ImageId; using VideoCommon::NUM_RT; using VideoCommon::Region2D; using VideoCommon::RenderTargets; +using VideoCommon::SlotVector; using VideoCore::Surface::PixelFormat; class ASTCDecoderPass; @@ -170,6 +171,8 @@ private: class ImageView : public VideoCommon::ImageViewBase { public: explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&); + explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&, + const SlotVector<Image>&); explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo&, const VideoCommon::ImageViewInfo&, GPUVAddr); explicit ImageView(TextureCacheRuntime&, const VideoCommon::NullImageViewParams&); @@ -226,7 +229,7 @@ private: [[nodiscard]] vk::ImageView MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask); const Device* device = nullptr; - const Image* src_image{}; + const SlotVector<Image>* slot_images = nullptr; std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> image_views; std::unique_ptr<StorageViews> storage_views; diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 2e19fced24..b494152b83 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1397,7 +1397,8 @@ ImageViewId TextureCache<P>::FindOrEmplaceImageView(ImageId image_id, const Imag if (const ImageViewId image_view_id = image.FindView(info); image_view_id) { return image_view_id; } - const ImageViewId image_view_id = slot_image_views.insert(runtime, info, image_id, image); + const ImageViewId image_view_id = + slot_image_views.insert(runtime, info, image_id, image, slot_images); image.InsertView(info, image_view_id); return image_view_id; } diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 7bd31b2111..d8e19cb2fe 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -364,14 +364,14 @@ template <u32 GOB_EXTENT> [[nodiscard]] std::optional<SubresourceExtent> ResolveOverlapRightAddress2D( const ImageInfo& new_info, GPUVAddr gpu_addr, const ImageBase& overlap, bool strict_size) { - const u32 layer_stride = new_info.layer_stride; - const s32 new_size = layer_stride * new_info.resources.layers; - const s32 diff = static_cast<s32>(overlap.gpu_addr - gpu_addr); + const u64 layer_stride = new_info.layer_stride; + const u64 new_size = layer_stride * new_info.resources.layers; + const u64 diff = overlap.gpu_addr - gpu_addr; if (diff > new_size) { return std::nullopt; } - const s32 base_layer = diff / layer_stride; - const s32 mip_offset = diff % layer_stride; + const s32 base_layer = static_cast<s32>(diff / layer_stride); + const s32 mip_offset = static_cast<s32>(diff % layer_stride); const std::array offsets = CalculateMipLevelOffsets(new_info); const auto end = offsets.begin() + new_info.resources.levels; const auto it = std::find(offsets.begin(), end, static_cast<u32>(mip_offset)); diff --git a/src/yuzu/applets/qt_controller.cpp b/src/yuzu/applets/qt_controller.cpp index c6222b571f..d63193131d 100644 --- a/src/yuzu/applets/qt_controller.cpp +++ b/src/yuzu/applets/qt_controller.cpp @@ -33,7 +33,7 @@ void UpdateController(Core::HID::EmulatedController* controller, } controller->SetNpadStyleIndex(controller_type); if (connected) { - controller->Connect(); + controller->Connect(true); } } diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp index 8a8be8e406..cb61637020 100644 --- a/src/yuzu/configuration/configure_input_player.cpp +++ b/src/yuzu/configuration/configure_input_player.cpp @@ -599,11 +599,11 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i if (is_connected) { if (type == Core::HID::NpadStyleIndex::Handheld) { emulated_controller_p1->Disconnect(); - emulated_controller_handheld->Connect(); + emulated_controller_handheld->Connect(true); emulated_controller = emulated_controller_handheld; } else { emulated_controller_handheld->Disconnect(); - emulated_controller_p1->Connect(); + emulated_controller_p1->Connect(true); emulated_controller = emulated_controller_p1; } } @@ -718,7 +718,7 @@ void ConfigureInputPlayer::LoadConfiguration() { void ConfigureInputPlayer::ConnectPlayer(bool connected) { ui->groupConnectedController->setChecked(connected); if (connected) { - emulated_controller->Connect(); + emulated_controller->Connect(true); } else { emulated_controller->Disconnect(); } |