diff options
Diffstat (limited to 'src')
238 files changed, 7375 insertions, 2762 deletions
diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp index 9a0939883c..da50a0bbcc 100644 --- a/src/audio_core/audio_renderer.cpp +++ b/src/audio_core/audio_renderer.cpp @@ -73,13 +73,15 @@ private: EffectInStatus info{}; }; AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params, - Kernel::SharedPtr<Kernel::WritableEvent> buffer_event) + Kernel::SharedPtr<Kernel::WritableEvent> buffer_event, + std::size_t instance_number) : worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count), effects(params.effect_count) { audio_out = std::make_unique<AudioCore::AudioOut>(); stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS, - "AudioRenderer", [=]() { buffer_event->Signal(); }); + fmt::format("AudioRenderer-Instance{}", instance_number), + [=]() { buffer_event->Signal(); }); audio_out->StartStream(stream); QueueMixedBuffer(0); @@ -217,13 +219,15 @@ std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_co if (offset == samples.size()) { offset = 0; - if (!wave_buffer.is_looping) { + if (!wave_buffer.is_looping && wave_buffer.buffer_sz) { SetWaveIndex(wave_index + 1); } - out_status.wave_buffer_consumed++; + if (wave_buffer.buffer_sz) { + out_status.wave_buffer_consumed++; + } - if (wave_buffer.end_of_stream) { + if (wave_buffer.end_of_stream || wave_buffer.buffer_sz == 0) { info.play_state = PlayState::Paused; } } diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h index b2e5d336cf..45afbe7598 100644 --- a/src/audio_core/audio_renderer.h +++ b/src/audio_core/audio_renderer.h @@ -215,7 +215,8 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size class AudioRenderer { public: AudioRenderer(Core::Timing::CoreTiming& core_timing, AudioRendererParameter params, - Kernel::SharedPtr<Kernel::WritableEvent> buffer_event); + Kernel::SharedPtr<Kernel::WritableEvent> buffer_event, + std::size_t instance_number); ~AudioRenderer(); std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params); diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 2554add28e..01abdb3bb5 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -55,7 +55,10 @@ add_custom_command(OUTPUT scm_rev.cpp "${VIDEO_CORE}/shader/decode/register_set_predicate.cpp" "${VIDEO_CORE}/shader/decode/shift.cpp" "${VIDEO_CORE}/shader/decode/video.cpp" + "${VIDEO_CORE}/shader/decode/warp.cpp" "${VIDEO_CORE}/shader/decode/xmad.cpp" + "${VIDEO_CORE}/shader/control_flow.cpp" + "${VIDEO_CORE}/shader/control_flow.h" "${VIDEO_CORE}/shader/decode.cpp" "${VIDEO_CORE}/shader/node.h" "${VIDEO_CORE}/shader/node_helper.cpp" diff --git a/src/common/alignment.h b/src/common/alignment.h index 617b14d9b7..88d5d3a65f 100644 --- a/src/common/alignment.h +++ b/src/common/alignment.h @@ -3,6 +3,7 @@ #pragma once #include <cstddef> +#include <memory> #include <type_traits> namespace Common { @@ -37,4 +38,63 @@ constexpr bool IsWordAligned(T value) { return (value & 0b11) == 0; } +template <typename T, std::size_t Align = 16> +class AlignmentAllocator { +public: + using value_type = T; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + using pointer = T*; + using const_pointer = const T*; + + using reference = T&; + using const_reference = const T&; + +public: + pointer address(reference r) noexcept { + return std::addressof(r); + } + + const_pointer address(const_reference r) const noexcept { + return std::addressof(r); + } + + pointer allocate(size_type n) { + return static_cast<pointer>(::operator new (n, std::align_val_t{Align})); + } + + void deallocate(pointer p, size_type) { + ::operator delete (p, std::align_val_t{Align}); + } + + void construct(pointer p, const value_type& wert) { + new (p) value_type(wert); + } + + void destroy(pointer p) { + p->~value_type(); + } + + size_type max_size() const noexcept { + return size_type(-1) / sizeof(value_type); + } + + template <typename T2> + struct rebind { + using other = AlignmentAllocator<T2, Align>; + }; + + bool operator!=(const AlignmentAllocator<T, Align>& other) const noexcept { + return !(*this == other); + } + + // Returns true if and only if storage allocated from *this + // can be deallocated from other, and vice versa. + // Always returns true for stateless allocators. + bool operator==(const AlignmentAllocator<T, Align>& other) const noexcept { + return true; + } +}; + } // namespace Common diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 30eb9d82e3..877a9e3530 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -70,6 +70,8 @@ add_library(core STATIC file_sys/sdmc_factory.h file_sys/submission_package.cpp file_sys/submission_package.h + file_sys/system_archive/mii_model.cpp + file_sys/system_archive/mii_model.h file_sys/system_archive/ng_word.cpp file_sys/system_archive/ng_word.h file_sys/system_archive/system_archive.cpp @@ -111,6 +113,8 @@ add_library(core STATIC frontend/scope_acquire_window_context.h gdbstub/gdbstub.cpp gdbstub/gdbstub.h + hardware_interrupt_manager.cpp + hardware_interrupt_manager.h hle/ipc.h hle/ipc_helpers.h hle/kernel/address_arbiter.cpp @@ -208,6 +212,8 @@ add_library(core STATIC hle/service/aoc/aoc_u.h hle/service/apm/apm.cpp hle/service/apm/apm.h + hle/service/apm/controller.cpp + hle/service/apm/controller.h hle/service/apm/interface.cpp hle/service/apm/interface.h hle/service/audio/audctl.cpp @@ -293,6 +299,7 @@ add_library(core STATIC hle/service/hid/irs.h hle/service/hid/xcd.cpp hle/service/hid/xcd.h + hle/service/hid/errors.h hle/service/hid/controllers/controller_base.cpp hle/service/hid/controllers/controller_base.h hle/service/hid/controllers/debug_pad.cpp @@ -369,6 +376,7 @@ add_library(core STATIC hle/service/nvdrv/devices/nvmap.h hle/service/nvdrv/interface.cpp hle/service/nvdrv/interface.h + hle/service/nvdrv/nvdata.h hle/service/nvdrv/nvdrv.cpp hle/service/nvdrv/nvdrv.h hle/service/nvdrv/nvmemp.cpp diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index c6691a8e19..45e94e6257 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -44,13 +44,6 @@ public: /// Step CPU by one instruction virtual void Step() = 0; - /// Maps a backing memory region for the CPU - virtual void MapBackingMemory(VAddr address, std::size_t size, u8* memory, - Kernel::VMAPermission perms) = 0; - - /// Unmaps a region of memory that was previously mapped using MapBackingMemory - virtual void UnmapMemory(VAddr address, std::size_t size) = 0; - /// Clear all instruction cache virtual void ClearInstructionCache() = 0; diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp index 44307fa19d..f1506b3728 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp @@ -177,15 +177,6 @@ ARM_Dynarmic::ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor, ARM_Dynarmic::~ARM_Dynarmic() = default; -void ARM_Dynarmic::MapBackingMemory(u64 address, std::size_t size, u8* memory, - Kernel::VMAPermission perms) { - inner_unicorn.MapBackingMemory(address, size, memory, perms); -} - -void ARM_Dynarmic::UnmapMemory(u64 address, std::size_t size) { - inner_unicorn.UnmapMemory(address, size); -} - void ARM_Dynarmic::SetPC(u64 pc) { jit->SetPC(pc); } diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index b701e97a32..504d46c689 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h @@ -23,9 +23,6 @@ public: ARM_Dynarmic(System& system, ExclusiveMonitor& exclusive_monitor, std::size_t core_index); ~ARM_Dynarmic() override; - void MapBackingMemory(VAddr address, std::size_t size, u8* memory, - Kernel::VMAPermission perms) override; - void UnmapMemory(u64 address, std::size_t size) override; void SetPC(u64 pc) override; u64 GetPC() const override; u64 GetReg(int index) const override; diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp index 4e07fe8b5c..97d5c2a8ad 100644 --- a/src/core/arm/unicorn/arm_unicorn.cpp +++ b/src/core/arm/unicorn/arm_unicorn.cpp @@ -50,11 +50,14 @@ static void CodeHook(uc_engine* uc, uint64_t address, uint32_t size, void* user_ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int size, u64 value, void* user_data) { + auto* const system = static_cast<System*>(user_data); + ARM_Interface::ThreadContext ctx{}; - Core::CurrentArmInterface().SaveContext(ctx); + system->CurrentArmInterface().SaveContext(ctx); ASSERT_MSG(false, "Attempted to read from unmapped memory: 0x{:X}, pc=0x{:X}, lr=0x{:X}", addr, ctx.pc, ctx.cpu_registers[30]); - return {}; + + return false; } ARM_Unicorn::ARM_Unicorn(System& system) : system{system} { @@ -65,7 +68,7 @@ ARM_Unicorn::ARM_Unicorn(System& system) : system{system} { uc_hook hook{}; CHECKED(uc_hook_add(uc, &hook, UC_HOOK_INTR, (void*)InterruptHook, this, 0, -1)); - CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, this, 0, -1)); + CHECKED(uc_hook_add(uc, &hook, UC_HOOK_MEM_INVALID, (void*)UnmappedMemoryHook, &system, 0, -1)); if (GDBStub::IsServerEnabled()) { CHECKED(uc_hook_add(uc, &hook, UC_HOOK_CODE, (void*)CodeHook, this, 0, -1)); last_bkpt_hit = false; @@ -76,15 +79,6 @@ ARM_Unicorn::~ARM_Unicorn() { CHECKED(uc_close(uc)); } -void ARM_Unicorn::MapBackingMemory(VAddr address, std::size_t size, u8* memory, - Kernel::VMAPermission perms) { - CHECKED(uc_mem_map_ptr(uc, address, size, static_cast<u32>(perms), memory)); -} - -void ARM_Unicorn::UnmapMemory(VAddr address, std::size_t size) { - CHECKED(uc_mem_unmap(uc, address, size)); -} - void ARM_Unicorn::SetPC(u64 pc) { CHECKED(uc_reg_write(uc, UC_ARM64_REG_PC, &pc)); } diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h index 34e974b4db..fe2ffd70ca 100644 --- a/src/core/arm/unicorn/arm_unicorn.h +++ b/src/core/arm/unicorn/arm_unicorn.h @@ -18,9 +18,6 @@ public: explicit ARM_Unicorn(System& system); ~ARM_Unicorn() override; - void MapBackingMemory(VAddr address, std::size_t size, u8* memory, - Kernel::VMAPermission perms) override; - void UnmapMemory(VAddr address, std::size_t size) override; void SetPC(u64 pc) override; u64 GetPC() const override; u64 GetReg(int index) const override; diff --git a/src/core/core.cpp b/src/core/core.cpp index 262411db85..20d64f3b07 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -19,12 +19,14 @@ #include "core/file_sys/vfs_concat.h" #include "core/file_sys/vfs_real.h" #include "core/gdbstub/gdbstub.h" +#include "core/hardware_interrupt_manager.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/service/am/applets/applets.h" +#include "core/hle/service/apm/controller.h" #include "core/hle/service/glue/manager.h" #include "core/hle/service/service.h" #include "core/hle/service/sm/sm.h" @@ -143,14 +145,14 @@ struct System::Impl { telemetry_session = std::make_unique<Core::TelemetrySession>(); service_manager = std::make_shared<Service::SM::ServiceManager>(); - Service::Init(service_manager, system, *virtual_filesystem); + Service::Init(service_manager, system); GDBStub::Init(); renderer = VideoCore::CreateRenderer(emu_window, system); if (!renderer->Init()) { return ResultStatus::ErrorVideoCore; } - + interrupt_manager = std::make_unique<Core::Hardware::InterruptManager>(system); gpu_core = VideoCore::CreateGPU(system); is_powered_on = true; @@ -297,6 +299,7 @@ struct System::Impl { std::unique_ptr<VideoCore::RendererBase> renderer; std::unique_ptr<Tegra::GPU> gpu_core; std::shared_ptr<Tegra::DebugContext> debug_context; + std::unique_ptr<Core::Hardware::InterruptManager> interrupt_manager; CpuCoreManager cpu_core_manager; bool is_powered_on = false; @@ -306,6 +309,9 @@ struct System::Impl { /// Frontend applets Service::AM::Applets::AppletManager applet_manager; + /// APM (Performance) services + Service::APM::Controller apm_controller{core_timing}; + /// Glue services Service::Glue::ARPManager arp_manager; @@ -440,6 +446,14 @@ const Tegra::GPU& System::GPU() const { return *impl->gpu_core; } +Core::Hardware::InterruptManager& System::InterruptManager() { + return *impl->interrupt_manager; +} + +const Core::Hardware::InterruptManager& System::InterruptManager() const { + return *impl->interrupt_manager; +} + VideoCore::RendererBase& System::Renderer() { return *impl->renderer; } @@ -568,6 +582,14 @@ const Service::Glue::ARPManager& System::GetARPManager() const { return impl->arp_manager; } +Service::APM::Controller& System::GetAPMController() { + return impl->apm_controller; +} + +const Service::APM::Controller& System::GetAPMController() const { + return impl->apm_controller; +} + System::ResultStatus System::Init(Frontend::EmuWindow& emu_window) { return impl->Init(*this, emu_window); } diff --git a/src/core/core.h b/src/core/core.h index 70adb7af96..0138d93b07 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -43,6 +43,10 @@ struct AppletFrontendSet; class AppletManager; } // namespace AM::Applets +namespace APM { +class Controller; +} + namespace Glue { class ARPManager; } @@ -66,6 +70,10 @@ namespace Core::Timing { class CoreTiming; } +namespace Core::Hardware { +class InterruptManager; +} + namespace Core { class ARM_Interface; @@ -230,6 +238,12 @@ public: /// Provides a constant reference to the core timing instance. const Timing::CoreTiming& CoreTiming() const; + /// Provides a reference to the interrupt manager instance. + Core::Hardware::InterruptManager& InterruptManager(); + + /// Provides a constant reference to the interrupt manager instance. + const Core::Hardware::InterruptManager& InterruptManager() const; + /// Provides a reference to the kernel instance. Kernel::KernelCore& Kernel(); @@ -296,6 +310,10 @@ public: const Service::Glue::ARPManager& GetARPManager() const; + Service::APM::Controller& GetAPMController(); + + const Service::APM::Controller& GetAPMController() const; + private: System(); @@ -319,10 +337,6 @@ private: static System s_instance; }; -inline ARM_Interface& CurrentArmInterface() { - return System::GetInstance().CurrentArmInterface(); -} - inline Kernel::Process* CurrentProcess() { return System::GetInstance().CurrentProcess(); } diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index 99b7d387df..21c410e342 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp @@ -53,16 +53,12 @@ bool CpuBarrier::Rendezvous() { Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, std::size_t core_index) : cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} { - if (Settings::values.cpu_jit_enabled) { #ifdef ARCHITECTURE_x86_64 - arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index); + arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index); #else - arm_interface = std::make_unique<ARM_Unicorn>(system); - LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); + arm_interface = std::make_unique<ARM_Unicorn>(system); + LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); #endif - } else { - arm_interface = std::make_unique<ARM_Unicorn>(system); - } scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface); } @@ -70,15 +66,12 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba Cpu::~Cpu() = default; std::unique_ptr<ExclusiveMonitor> Cpu::MakeExclusiveMonitor(std::size_t num_cores) { - if (Settings::values.cpu_jit_enabled) { #ifdef ARCHITECTURE_x86_64 - return std::make_unique<DynarmicExclusiveMonitor>(num_cores); + return std::make_unique<DynarmicExclusiveMonitor>(num_cores); #else - return nullptr; // TODO(merry): Passthrough exclusive monitor + // TODO(merry): Passthrough exclusive monitor + return nullptr; #endif - } else { - return nullptr; // TODO(merry): Passthrough exclusive monitor - } } void Cpu::RunLoop(bool tight_loop) { diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp index eb76174c5b..7310b36026 100644 --- a/src/core/file_sys/program_metadata.cpp +++ b/src/core/file_sys/program_metadata.cpp @@ -94,6 +94,10 @@ u64 ProgramMetadata::GetFilesystemPermissions() const { return aci_file_access.permissions; } +u32 ProgramMetadata::GetSystemResourceSize() const { + return npdm_header.system_resource_size; +} + const ProgramMetadata::KernelCapabilityDescriptors& ProgramMetadata::GetKernelCapabilities() const { return aci_kernel_capabilities; } diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h index 43bf2820a9..88ec97d85f 100644 --- a/src/core/file_sys/program_metadata.h +++ b/src/core/file_sys/program_metadata.h @@ -58,6 +58,7 @@ public: u32 GetMainThreadStackSize() const; u64 GetTitleID() const; u64 GetFilesystemPermissions() const; + u32 GetSystemResourceSize() const; const KernelCapabilityDescriptors& GetKernelCapabilities() const; void Print() const; @@ -76,7 +77,8 @@ private: u8 reserved_3; u8 main_thread_priority; u8 main_thread_cpu; - std::array<u8, 8> reserved_4; + std::array<u8, 4> reserved_4; + u32_le system_resource_size; u32_le process_category; u32_le main_stack_size; std::array<u8, 0x10> application_name; diff --git a/src/core/file_sys/system_archive/mii_model.cpp b/src/core/file_sys/system_archive/mii_model.cpp new file mode 100644 index 0000000000..6a9add87c6 --- /dev/null +++ b/src/core/file_sys/system_archive/mii_model.cpp @@ -0,0 +1,46 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/file_sys/system_archive/mii_model.h" +#include "core/file_sys/vfs_vector.h" + +namespace FileSys::SystemArchive { + +namespace MiiModelData { + +constexpr std::array<u8, 0x10> NFTR_STANDARD{'N', 'F', 'T', 'R', 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; +constexpr std::array<u8, 0x10> NFSR_STANDARD{'N', 'F', 'S', 'R', 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +constexpr auto TEXTURE_LOW_LINEAR = NFTR_STANDARD; +constexpr auto TEXTURE_LOW_SRGB = NFTR_STANDARD; +constexpr auto TEXTURE_MID_LINEAR = NFTR_STANDARD; +constexpr auto TEXTURE_MID_SRGB = NFTR_STANDARD; +constexpr auto SHAPE_HIGH = NFSR_STANDARD; +constexpr auto SHAPE_MID = NFSR_STANDARD; + +} // namespace MiiModelData + +VirtualDir MiiModel() { + auto out = std::make_shared<VectorVfsDirectory>(std::vector<VirtualFile>{}, + std::vector<VirtualDir>{}, "data"); + + out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::TEXTURE_LOW_LINEAR.size()>>( + MiiModelData::TEXTURE_LOW_LINEAR, "NXTextureLowLinear.dat")); + out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::TEXTURE_LOW_SRGB.size()>>( + MiiModelData::TEXTURE_LOW_SRGB, "NXTextureLowSRGB.dat")); + out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::TEXTURE_MID_LINEAR.size()>>( + MiiModelData::TEXTURE_MID_LINEAR, "NXTextureMidLinear.dat")); + out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::TEXTURE_MID_SRGB.size()>>( + MiiModelData::TEXTURE_MID_SRGB, "NXTextureMidSRGB.dat")); + out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::SHAPE_HIGH.size()>>( + MiiModelData::SHAPE_HIGH, "ShapeHigh.dat")); + out->AddFile(std::make_shared<ArrayVfsFile<MiiModelData::SHAPE_MID.size()>>( + MiiModelData::SHAPE_MID, "ShapeMid.dat")); + + return std::move(out); +} + +} // namespace FileSys::SystemArchive diff --git a/src/core/file_sys/system_archive/mii_model.h b/src/core/file_sys/system_archive/mii_model.h new file mode 100644 index 0000000000..6c2d9398b1 --- /dev/null +++ b/src/core/file_sys/system_archive/mii_model.h @@ -0,0 +1,13 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/file_sys/vfs_types.h" + +namespace FileSys::SystemArchive { + +VirtualDir MiiModel(); + +} // namespace FileSys::SystemArchive diff --git a/src/core/file_sys/system_archive/system_archive.cpp b/src/core/file_sys/system_archive/system_archive.cpp index c9722ed77e..6d84453834 100644 --- a/src/core/file_sys/system_archive/system_archive.cpp +++ b/src/core/file_sys/system_archive/system_archive.cpp @@ -4,6 +4,7 @@ #include "common/logging/log.h" #include "core/file_sys/romfs.h" +#include "core/file_sys/system_archive/mii_model.h" #include "core/file_sys/system_archive/ng_word.h" #include "core/file_sys/system_archive/system_archive.h" #include "core/file_sys/system_archive/system_version.h" @@ -24,7 +25,7 @@ struct SystemArchiveDescriptor { constexpr std::array<SystemArchiveDescriptor, SYSTEM_ARCHIVE_COUNT> SYSTEM_ARCHIVES{{ {0x0100000000000800, "CertStore", nullptr}, {0x0100000000000801, "ErrorMessage", nullptr}, - {0x0100000000000802, "MiiModel", nullptr}, + {0x0100000000000802, "MiiModel", &MiiModel}, {0x0100000000000803, "BrowserDll", nullptr}, {0x0100000000000804, "Help", nullptr}, {0x0100000000000805, "SharedFont", nullptr}, diff --git a/src/core/hardware_interrupt_manager.cpp b/src/core/hardware_interrupt_manager.cpp new file mode 100644 index 0000000000..c2115db2de --- /dev/null +++ b/src/core/hardware_interrupt_manager.cpp @@ -0,0 +1,30 @@ +// Copyright 2019 Yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/core.h" +#include "core/core_timing.h" +#include "core/hardware_interrupt_manager.h" +#include "core/hle/service/nvdrv/interface.h" +#include "core/hle/service/sm/sm.h" + +namespace Core::Hardware { + +InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) { + gpu_interrupt_event = + system.CoreTiming().RegisterEvent("GPUInterrupt", [this](u64 message, s64) { + auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv"); + const u32 syncpt = static_cast<u32>(message >> 32); + const u32 value = static_cast<u32>(message); + nvdrv->SignalGPUInterruptSyncpt(syncpt, value); + }); +} + +InterruptManager::~InterruptManager() = default; + +void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { + const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value; + system.CoreTiming().ScheduleEvent(10, gpu_interrupt_event, msg); +} + +} // namespace Core::Hardware diff --git a/src/core/hardware_interrupt_manager.h b/src/core/hardware_interrupt_manager.h new file mode 100644 index 0000000000..494db883ad --- /dev/null +++ b/src/core/hardware_interrupt_manager.h @@ -0,0 +1,31 @@ +// Copyright 2019 Yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Core { +class System; +} + +namespace Core::Timing { +struct EventType; +} + +namespace Core::Hardware { + +class InterruptManager { +public: + explicit InterruptManager(Core::System& system); + ~InterruptManager(); + + void GPUInterruptSyncpt(u32 syncpoint_id, u32 value); + +private: + Core::System& system; + Core::Timing::EventType* gpu_interrupt_event{}; +}; + +} // namespace Core::Hardware diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h index 879957dcb1..d8ad540309 100644 --- a/src/core/hle/kernel/code_set.h +++ b/src/core/hle/kernel/code_set.h @@ -8,6 +8,7 @@ #include <vector> #include "common/common_types.h" +#include "core/hle/kernel/physical_memory.h" namespace Kernel { @@ -77,7 +78,7 @@ struct CodeSet final { } /// The overall data that backs this code set. - std::vector<u8> memory; + Kernel::PhysicalMemory memory; /// The segments that comprise this code set. std::array<Segment, 3> segments; diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h new file mode 100644 index 0000000000..0905653102 --- /dev/null +++ b/src/core/hle/kernel/physical_memory.h @@ -0,0 +1,19 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/alignment.h" + +namespace Kernel { + +// This encapsulation serves 2 purposes: +// - First, to encapsulate host physical memory under a single type and set an +// standard for managing it. +// - Second to ensure all host backing memory used is aligned to 256 bytes due +// to strict alignment restrictions on GPU memory. + +using PhysicalMemory = std::vector<u8, Common::AlignmentAllocator<u8, 256>>; + +} // namespace Kernel diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index f45ef05f69..e80a12ac35 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -129,20 +129,17 @@ u64 Process::GetTotalPhysicalMemoryAvailable() const { return vm_manager.GetTotalPhysicalMemoryAvailable(); } -u64 Process::GetTotalPhysicalMemoryAvailableWithoutMmHeap() const { - // TODO: Subtract the personal heap size from this when the - // personal heap is implemented. - return GetTotalPhysicalMemoryAvailable(); +u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { + return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); } u64 Process::GetTotalPhysicalMemoryUsed() const { - return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size; + return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size + + GetSystemResourceUsage(); } -u64 Process::GetTotalPhysicalMemoryUsedWithoutMmHeap() const { - // TODO: Subtract the personal heap size from this when the - // personal heap is implemented. - return GetTotalPhysicalMemoryUsed(); +u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { + return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } void Process::RegisterThread(const Thread* thread) { @@ -172,6 +169,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { program_id = metadata.GetTitleID(); ideal_core = metadata.GetMainThreadCore(); is_64bit_process = metadata.Is64BitProgram(); + system_resource_size = metadata.GetSystemResourceSize(); vm_manager.Reset(metadata.GetAddressSpaceType()); @@ -186,19 +184,11 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { } void Process::Run(s32 main_thread_priority, u64 stack_size) { - // The kernel always ensures that the given stack size is page aligned. - main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE); - - // Allocate and map the main thread stack - // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part - // of the user address space. - const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size; - vm_manager - .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size), - 0, main_thread_stack_size, MemoryState::Stack) - .Unwrap(); + AllocateMainThreadStack(stack_size); + tls_region_address = CreateTLSRegion(); vm_manager.LogLayout(); + ChangeStatus(ProcessStatus::Running); SetupMainThread(*this, kernel, main_thread_priority); @@ -228,6 +218,9 @@ void Process::PrepareForTermination() { stop_threads(system.Scheduler(2).GetThreadList()); stop_threads(system.Scheduler(3).GetThreadList()); + FreeTLSRegion(tls_region_address); + tls_region_address = 0; + ChangeStatus(ProcessStatus::Exited); } @@ -254,7 +247,7 @@ VAddr Process::CreateTLSRegion() { ASSERT(region_address.Succeeded()); const auto map_result = vm_manager.MapMemoryBlock( - *region_address, std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE), 0, + *region_address, std::make_shared<PhysicalMemory>(Memory::PAGE_SIZE), 0, Memory::PAGE_SIZE, MemoryState::ThreadLocal); ASSERT(map_result.Succeeded()); @@ -284,7 +277,7 @@ void Process::FreeTLSRegion(VAddr tls_address) { } void Process::LoadModule(CodeSet module_, VAddr base_addr) { - const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory)); + const auto memory = std::make_shared<PhysicalMemory>(std::move(module_.memory)); const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { @@ -327,4 +320,16 @@ void Process::ChangeStatus(ProcessStatus new_status) { WakeupAllWaitingThreads(); } +void Process::AllocateMainThreadStack(u64 stack_size) { + // The kernel always ensures that the given stack size is page aligned. + main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE); + + // Allocate and map the main thread stack + const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size; + vm_manager + .MapMemoryBlock(mapping_address, std::make_shared<PhysicalMemory>(main_thread_stack_size), + 0, main_thread_stack_size, MemoryState::Stack) + .Unwrap(); +} + } // namespace Kernel diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 83ea02beec..c2df451f3d 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -135,6 +135,11 @@ public: return mutex; } + /// Gets the address to the process' dedicated TLS region. + VAddr GetTLSRegionAddress() const { + return tls_region_address; + } + /// Gets the current status of the process ProcessStatus GetStatus() const { return status; @@ -168,8 +173,24 @@ public: return capabilities.GetPriorityMask(); } - u32 IsVirtualMemoryEnabled() const { - return is_virtual_address_memory_enabled; + /// Gets the amount of secure memory to allocate for memory management. + u32 GetSystemResourceSize() const { + return system_resource_size; + } + + /// Gets the amount of secure memory currently in use for memory management. + u32 GetSystemResourceUsage() const { + // On hardware, this returns the amount of system resource memory that has + // been used by the kernel. This is problematic for Yuzu to emulate, because + // system resource memory is used for page tables -- and yuzu doesn't really + // have a way to calculate how much memory is required for page tables for + // the current process at any given time. + // TODO: Is this even worth implementing? Games may retrieve this value via + // an SDK function that gets used + available system resource size for debug + // or diagnostic purposes. However, it seems unlikely that a game would make + // decisions based on how much system memory is dedicated to its page tables. + // Is returning a value other than zero wise? + return 0; } /// Whether this process is an AArch64 or AArch32 process. @@ -196,15 +217,15 @@ public: u64 GetTotalPhysicalMemoryAvailable() const; /// Retrieves the total physical memory available to this process in bytes, - /// without the size of the personal heap added to it. - u64 GetTotalPhysicalMemoryAvailableWithoutMmHeap() const; + /// without the size of the personal system resource heap added to it. + u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; /// Retrieves the total physical memory used by this process in bytes. u64 GetTotalPhysicalMemoryUsed() const; /// Retrieves the total physical memory used by this process in bytes, - /// without the size of the personal heap added to it. - u64 GetTotalPhysicalMemoryUsedWithoutMmHeap() const; + /// without the size of the personal system resource heap added to it. + u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; /// Gets the list of all threads created with this process as their owner. const std::list<const Thread*>& GetThreadList() const { @@ -280,6 +301,9 @@ private: /// a process signal. void ChangeStatus(ProcessStatus new_status); + /// Allocates the main thread stack for the process, given the stack size in bytes. + void AllocateMainThreadStack(u64 stack_size); + /// Memory manager for this process. Kernel::VMManager vm_manager; @@ -298,12 +322,16 @@ private: /// Title ID corresponding to the process u64 program_id = 0; + /// Specifies additional memory to be reserved for the process's memory management by the + /// system. When this is non-zero, secure memory is allocated and used for page table allocation + /// instead of using the normal global page tables/memory block management. + u32 system_resource_size = 0; + /// Resource limit descriptor for this process SharedPtr<ResourceLimit> resource_limit; /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; - u32 is_virtual_address_memory_enabled = 0; /// The Thread Local Storage area is allocated as processes create threads, /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part @@ -338,6 +366,9 @@ private: /// variable related facilities. Mutex mutex; + /// Address indicating the location of the process' dedicated TLS region. + VAddr tls_region_address = 0; + /// Random values for svcGetInfo RandomEntropy std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index f15c5ee362..a815c4eea3 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -28,7 +28,7 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_ shared_memory->other_permissions = other_permissions; if (address == 0) { - shared_memory->backing_block = std::make_shared<std::vector<u8>>(size); + shared_memory->backing_block = std::make_shared<Kernel::PhysicalMemory>(size); shared_memory->backing_block_offset = 0; // Refresh the address mappings for the current process. @@ -59,8 +59,8 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_ } SharedPtr<SharedMemory> SharedMemory::CreateForApplet( - KernelCore& kernel, std::shared_ptr<std::vector<u8>> heap_block, std::size_t offset, u64 size, - MemoryPermission permissions, MemoryPermission other_permissions, std::string name) { + KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, + u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name) { SharedPtr<SharedMemory> shared_memory(new SharedMemory(kernel)); shared_memory->owner_process = nullptr; diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index c2b6155e18..01ca6dcd22 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -10,6 +10,7 @@ #include "common/common_types.h" #include "core/hle/kernel/object.h" +#include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/process.h" #include "core/hle/result.h" @@ -62,12 +63,10 @@ public: * block. * @param name Optional object name, used for debugging purposes. */ - static SharedPtr<SharedMemory> CreateForApplet(KernelCore& kernel, - std::shared_ptr<std::vector<u8>> heap_block, - std::size_t offset, u64 size, - MemoryPermission permissions, - MemoryPermission other_permissions, - std::string name = "Unknown Applet"); + static SharedPtr<SharedMemory> CreateForApplet( + KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, + u64 size, MemoryPermission permissions, MemoryPermission other_permissions, + std::string name = "Unknown Applet"); std::string GetTypeName() const override { return "SharedMemory"; @@ -135,7 +134,7 @@ private: ~SharedMemory() override; /// Backing memory for this shared memory block. - std::shared_ptr<std::vector<u8>> backing_block; + std::shared_ptr<PhysicalMemory> backing_block; /// Offset into the backing block for this shared memory. std::size_t backing_block_offset = 0; /// Size of the memory block. Page-aligned. diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 332573a955..1fd1a732a9 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -318,7 +318,14 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad return result; } - return vm_manager.UnmapRange(dst_addr, size); + const auto unmap_res = vm_manager.UnmapRange(dst_addr, size); + + // Reprotect the source mapping on success + if (unmap_res.IsSuccess()) { + ASSERT(vm_manager.ReprotectRange(src_addr, size, VMAPermission::ReadWrite).IsSuccess()); + } + + return unmap_res; } /// Connect to an OS service given the port name, returns the handle to the port to out @@ -729,16 +736,16 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha StackRegionBaseAddr = 14, StackRegionSize = 15, // 3.0.0+ - IsVirtualAddressMemoryEnabled = 16, - PersonalMmHeapUsage = 17, + SystemResourceSize = 16, + SystemResourceUsage = 17, TitleId = 18, // 4.0.0+ PrivilegedProcessId = 19, // 5.0.0+ UserExceptionContextAddr = 20, // 6.0.0+ - TotalPhysicalMemoryAvailableWithoutMmHeap = 21, - TotalPhysicalMemoryUsedWithoutMmHeap = 22, + TotalPhysicalMemoryAvailableWithoutSystemResource = 21, + TotalPhysicalMemoryUsedWithoutSystemResource = 22, }; const auto info_id_type = static_cast<GetInfoType>(info_id); @@ -756,12 +763,12 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha case GetInfoType::StackRegionSize: case GetInfoType::TotalPhysicalMemoryAvailable: case GetInfoType::TotalPhysicalMemoryUsed: - case GetInfoType::IsVirtualAddressMemoryEnabled: - case GetInfoType::PersonalMmHeapUsage: + case GetInfoType::SystemResourceSize: + case GetInfoType::SystemResourceUsage: case GetInfoType::TitleId: case GetInfoType::UserExceptionContextAddr: - case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap: - case GetInfoType::TotalPhysicalMemoryUsedWithoutMmHeap: { + case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: + case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: { if (info_sub_id != 0) { return ERR_INVALID_ENUM_VALUE; } @@ -822,8 +829,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha *result = process->GetTotalPhysicalMemoryUsed(); return RESULT_SUCCESS; - case GetInfoType::IsVirtualAddressMemoryEnabled: - *result = process->IsVirtualMemoryEnabled(); + case GetInfoType::SystemResourceSize: + *result = process->GetSystemResourceSize(); + return RESULT_SUCCESS; + + case GetInfoType::SystemResourceUsage: + LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage"); + *result = process->GetSystemResourceUsage(); return RESULT_SUCCESS; case GetInfoType::TitleId: @@ -831,17 +843,15 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha return RESULT_SUCCESS; case GetInfoType::UserExceptionContextAddr: - LOG_WARNING(Kernel_SVC, - "(STUBBED) Attempted to query user exception context address, returned 0"); - *result = 0; + *result = process->GetTLSRegionAddress(); return RESULT_SUCCESS; - case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap: - *result = process->GetTotalPhysicalMemoryAvailable(); + case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: + *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource(); return RESULT_SUCCESS; - case GetInfoType::TotalPhysicalMemoryUsedWithoutMmHeap: - *result = process->GetTotalPhysicalMemoryUsedWithoutMmHeap(); + case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: + *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource(); return RESULT_SUCCESS; default: @@ -946,6 +956,86 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha } } +/// Maps memory at a desired address +static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { + LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); + + if (!Common::Is4KBAligned(addr)) { + LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); + return ERR_INVALID_ADDRESS; + } + + if (!Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); + return ERR_INVALID_SIZE; + } + + if (size == 0) { + LOG_ERROR(Kernel_SVC, "Size is zero"); + return ERR_INVALID_SIZE; + } + + if (!(addr < addr + size)) { + LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); + return ERR_INVALID_MEMORY_RANGE; + } + + Process* const current_process = system.Kernel().CurrentProcess(); + auto& vm_manager = current_process->VMManager(); + + if (current_process->GetSystemResourceSize() == 0) { + LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); + return ERR_INVALID_STATE; + } + + if (!vm_manager.IsWithinMapRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, "Range not within map region"); + return ERR_INVALID_MEMORY_RANGE; + } + + return vm_manager.MapPhysicalMemory(addr, size); +} + +/// Unmaps memory previously mapped via MapPhysicalMemory +static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { + LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); + + if (!Common::Is4KBAligned(addr)) { + LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr); + return ERR_INVALID_ADDRESS; + } + + if (!Common::Is4KBAligned(size)) { + LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size); + return ERR_INVALID_SIZE; + } + + if (size == 0) { + LOG_ERROR(Kernel_SVC, "Size is zero"); + return ERR_INVALID_SIZE; + } + + if (!(addr < addr + size)) { + LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address"); + return ERR_INVALID_MEMORY_RANGE; + } + + Process* const current_process = system.Kernel().CurrentProcess(); + auto& vm_manager = current_process->VMManager(); + + if (current_process->GetSystemResourceSize() == 0) { + LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); + return ERR_INVALID_STATE; + } + + if (!vm_manager.IsWithinMapRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, "Range not within map region"); + return ERR_INVALID_MEMORY_RANGE; + } + + return vm_manager.UnmapPhysicalMemory(addr, size); +} + /// Sets the thread activity static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); @@ -1647,8 +1737,8 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var // Wait for an address (via Address Arbiter) static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value, s64 timeout) { - LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", - address, type, value, timeout); + LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address, + type, value, timeout); // If the passed address is a kernel virtual address, return invalid memory state. if (Memory::IsKernelVirtualAddress(address)) { @@ -1670,8 +1760,8 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, // Signals to an address (via Address Arbiter) static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, s32 num_to_wake) { - LOG_WARNING(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}", - address, type, value, num_to_wake); + LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}", + address, type, value, num_to_wake); // If the passed address is a kernel virtual address, return invalid memory state. if (Memory::IsKernelVirtualAddress(address)) { @@ -2303,8 +2393,8 @@ static const FunctionDef SVC_Table[] = { {0x29, SvcWrap<GetInfo>, "GetInfo"}, {0x2A, nullptr, "FlushEntireDataCache"}, {0x2B, nullptr, "FlushDataCache"}, - {0x2C, nullptr, "MapPhysicalMemory"}, - {0x2D, nullptr, "UnmapPhysicalMemory"}, + {0x2C, SvcWrap<MapPhysicalMemory>, "MapPhysicalMemory"}, + {0x2D, SvcWrap<UnmapPhysicalMemory>, "UnmapPhysicalMemory"}, {0x2E, nullptr, "GetFutureThreadInfo"}, {0x2F, nullptr, "GetLastThreadInfo"}, {0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"}, diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 865473c6fa..c2d8d0dc30 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -32,6 +32,11 @@ void SvcWrap(Core::System& system) { FuncReturn(system, func(system, Param(system, 0)).raw); } +template <ResultCode func(Core::System&, u64, u64)> +void SvcWrap(Core::System& system) { + FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw); +} + template <ResultCode func(Core::System&, u32)> void SvcWrap(Core::System& system) { FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp index 26c4e5e674..1113c815ec 100644 --- a/src/core/hle/kernel/transfer_memory.cpp +++ b/src/core/hle/kernel/transfer_memory.cpp @@ -47,7 +47,7 @@ ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission p return ERR_INVALID_STATE; } - backing_block = std::make_shared<std::vector<u8>>(size); + backing_block = std::make_shared<PhysicalMemory>(size); const auto map_state = owner_permissions == MemoryPermission::None ? MemoryState::TransferMemoryIsolated diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h index a140b1e2bb..6be9dc0946 100644 --- a/src/core/hle/kernel/transfer_memory.h +++ b/src/core/hle/kernel/transfer_memory.h @@ -8,6 +8,7 @@ #include <vector> #include "core/hle/kernel/object.h" +#include "core/hle/kernel/physical_memory.h" union ResultCode; @@ -82,7 +83,7 @@ private: ~TransferMemory() override; /// Memory block backing this instance. - std::shared_ptr<std::vector<u8>> backing_block; + std::shared_ptr<PhysicalMemory> backing_block; /// The base address for the memory managed by this instance. VAddr base_address = 0; diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 501544090a..40cea1e7cc 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp @@ -5,13 +5,15 @@ #include <algorithm> #include <iterator> #include <utility> +#include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" #include "common/memory_hook.h" -#include "core/arm/arm_interface.h" #include "core/core.h" #include "core/file_sys/program_metadata.h" #include "core/hle/kernel/errors.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "core/memory_setup.h" @@ -49,10 +51,14 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { type != next.type) { return false; } - if (type == VMAType::AllocatedMemoryBlock && - (backing_block != next.backing_block || offset + size != next.offset)) { + if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) { + // TODO: Can device mapped memory be merged sanely? + // Not merging it may cause inaccuracies versus hardware when memory layout is queried. return false; } + if (type == VMAType::AllocatedMemoryBlock) { + return true; + } if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { return false; } @@ -98,9 +104,9 @@ bool VMManager::IsValidHandle(VMAHandle handle) const { } ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, - std::shared_ptr<std::vector<u8>> block, + std::shared_ptr<PhysicalMemory> block, std::size_t offset, u64 size, - MemoryState state) { + MemoryState state, VMAPermission perm) { ASSERT(block != nullptr); ASSERT(offset + size <= block->size()); @@ -109,17 +115,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, VirtualMemoryArea& final_vma = vma_handle->second; ASSERT(final_vma.size == size); - system.ArmInterface(0).MapBackingMemory(target, size, block->data() + offset, - VMAPermission::ReadWriteExecute); - system.ArmInterface(1).MapBackingMemory(target, size, block->data() + offset, - VMAPermission::ReadWriteExecute); - system.ArmInterface(2).MapBackingMemory(target, size, block->data() + offset, - VMAPermission::ReadWriteExecute); - system.ArmInterface(3).MapBackingMemory(target, size, block->data() + offset, - VMAPermission::ReadWriteExecute); - final_vma.type = VMAType::AllocatedMemoryBlock; - final_vma.permissions = VMAPermission::ReadWrite; + final_vma.permissions = perm; final_vma.state = state; final_vma.backing_block = std::move(block); final_vma.offset = offset; @@ -137,11 +134,6 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me VirtualMemoryArea& final_vma = vma_handle->second; ASSERT(final_vma.size == size); - system.ArmInterface(0).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute); - system.ArmInterface(1).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute); - system.ArmInterface(2).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute); - system.ArmInterface(3).MapBackingMemory(target, size, memory, VMAPermission::ReadWriteExecute); - final_vma.type = VMAType::BackingMemory; final_vma.permissions = VMAPermission::ReadWrite; final_vma.state = state; @@ -230,11 +222,6 @@ ResultCode VMManager::UnmapRange(VAddr target, u64 size) { ASSERT(FindVMA(target)->second.size >= size); - system.ArmInterface(0).UnmapMemory(target, size); - system.ArmInterface(1).UnmapMemory(target, size); - system.ArmInterface(2).UnmapMemory(target, size); - system.ArmInterface(3).UnmapMemory(target, size); - return RESULT_SUCCESS; } @@ -274,7 +261,7 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { if (heap_memory == nullptr) { // Initialize heap - heap_memory = std::make_shared<std::vector<u8>>(size); + heap_memory = std::make_shared<PhysicalMemory>(size); heap_end = heap_region_base + size; } else { UnmapRange(heap_region_base, GetCurrentHeapSize()); @@ -308,6 +295,166 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { return MakeResult<VAddr>(heap_region_base); } +ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { + const auto end_addr = target + size; + const auto last_addr = end_addr - 1; + VAddr cur_addr = target; + + ResultCode result = RESULT_SUCCESS; + + // Check how much memory we've already mapped. + const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size); + if (mapped_size_result.Failed()) { + return mapped_size_result.Code(); + } + + // If we've already mapped the desired amount, return early. + const std::size_t mapped_size = *mapped_size_result; + if (mapped_size == size) { + return RESULT_SUCCESS; + } + + // Check that we can map the memory we want. + const auto res_limit = system.CurrentProcess()->GetResourceLimit(); + const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) - + res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory); + if (physmem_remaining < (size - mapped_size)) { + return ERR_RESOURCE_LIMIT_EXCEEDED; + } + + // Keep track of the memory regions we unmap. + std::vector<std::pair<u64, u64>> mapped_regions; + + // Iterate, trying to map memory. + { + cur_addr = target; + + auto iter = FindVMA(target); + ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end"); + + while (true) { + const auto& vma = iter->second; + const auto vma_start = vma.base; + const auto vma_end = vma_start + vma.size; + const auto vma_last = vma_end - 1; + + // Map the memory block + const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); + if (vma.state == MemoryState::Unmapped) { + const auto map_res = + MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size, 0), 0, + map_size, MemoryState::Heap, VMAPermission::ReadWrite); + result = map_res.Code(); + if (result.IsError()) { + break; + } + + mapped_regions.emplace_back(cur_addr, map_size); + } + + // Break once we hit the end of the range. + if (last_addr <= vma_last) { + break; + } + + // Advance to the next block. + cur_addr = vma_end; + iter = FindVMA(cur_addr); + ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end"); + } + } + + // If we failed, unmap memory. + if (result.IsError()) { + for (const auto [unmap_address, unmap_size] : mapped_regions) { + ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(), + "MapPhysicalMemory un-map on error"); + } + + return result; + } + + // Update amount of mapped physical memory. + physical_memory_mapped += size - mapped_size; + + return RESULT_SUCCESS; +} + +ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { + const auto end_addr = target + size; + const auto last_addr = end_addr - 1; + VAddr cur_addr = target; + + ResultCode result = RESULT_SUCCESS; + + // Check how much memory is currently mapped. + const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size); + if (mapped_size_result.Failed()) { + return mapped_size_result.Code(); + } + + // If we've already unmapped all the memory, return early. + const std::size_t mapped_size = *mapped_size_result; + if (mapped_size == 0) { + return RESULT_SUCCESS; + } + + // Keep track of the memory regions we unmap. + std::vector<std::pair<u64, u64>> unmapped_regions; + + // Try to unmap regions. + { + cur_addr = target; + + auto iter = FindVMA(target); + ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end"); + + while (true) { + const auto& vma = iter->second; + const auto vma_start = vma.base; + const auto vma_end = vma_start + vma.size; + const auto vma_last = vma_end - 1; + + // Unmap the memory block + const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr); + if (vma.state == MemoryState::Heap) { + result = UnmapRange(cur_addr, unmap_size); + if (result.IsError()) { + break; + } + + unmapped_regions.emplace_back(cur_addr, unmap_size); + } + + // Break once we hit the end of the range. + if (last_addr <= vma_last) { + break; + } + + // Advance to the next block. + cur_addr = vma_end; + iter = FindVMA(cur_addr); + ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end"); + } + } + + // If we failed, re-map regions. + // TODO: Preserve memory contents? + if (result.IsError()) { + for (const auto [map_address, map_size] : unmapped_regions) { + const auto remap_res = + MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size, 0), 0, + map_size, MemoryState::Heap, VMAPermission::None); + ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); + } + } + + // Update mapped amount + physical_memory_mapped -= mapped_size; + + return RESULT_SUCCESS; +} + ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; const auto src_check_result = CheckRangeState( @@ -447,7 +594,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem ASSERT_MSG(vma_offset + size <= vma->second.size, "Shared memory exceeds bounds of mapped block"); - const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block; + const std::shared_ptr<PhysicalMemory>& backing_block = vma->second.backing_block; const std::size_t backing_block_offset = vma->second.offset + vma_offset; CASCADE_RESULT(auto new_vma, @@ -455,12 +602,12 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem // Protect mirror with permissions from old region Reprotect(new_vma, vma->second.permissions); // Remove permissions from old region - Reprotect(vma, VMAPermission::None); + ReprotectRange(src_addr, size, VMAPermission::None); return RESULT_SUCCESS; } -void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) { +void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) { // If this ever proves to have a noticeable performance impact, allow users of the function to // specify a specific range of addresses to limit the scan to. for (const auto& p : vma_map) { @@ -588,14 +735,14 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { const VMAIter next_vma = std::next(iter); if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { - iter->second.size += next_vma->second.size; + MergeAdjacentVMA(iter->second, next_vma->second); vma_map.erase(next_vma); } if (iter != vma_map.begin()) { VMAIter prev_vma = std::prev(iter); if (prev_vma->second.CanBeMergedWith(iter->second)) { - prev_vma->second.size += iter->second.size; + MergeAdjacentVMA(prev_vma->second, iter->second); vma_map.erase(iter); iter = prev_vma; } @@ -604,6 +751,38 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { return iter; } +void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) { + ASSERT(left.CanBeMergedWith(right)); + + // Always merge allocated memory blocks, even when they don't share the same backing block. + if (left.type == VMAType::AllocatedMemoryBlock && + (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { + // Check if we can save work. + if (left.offset == 0 && left.size == left.backing_block->size()) { + // Fast case: left is an entire backing block. + left.backing_block->insert(left.backing_block->end(), + right.backing_block->begin() + right.offset, + right.backing_block->begin() + right.offset + right.size); + } else { + // Slow case: make a new memory block for left and right. + auto new_memory = std::make_shared<PhysicalMemory>(); + new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset, + left.backing_block->begin() + left.offset + left.size); + new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset, + right.backing_block->begin() + right.offset + right.size); + left.backing_block = new_memory; + left.offset = 0; + } + + // Page table update is needed, because backing memory changed. + left.size += right.size; + UpdatePageTableForVMA(left); + } else { + // Just update the size. + left.size += right.size; + } +} + void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { switch (vma.type) { case VMAType::Free: @@ -778,6 +957,84 @@ VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, Memo std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask)); } +ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address, + std::size_t size) const { + const VAddr end_addr = address + size; + const VAddr last_addr = end_addr - 1; + std::size_t mapped_size = 0; + + VAddr cur_addr = address; + auto iter = FindVMA(cur_addr); + ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end"); + + while (true) { + const auto& vma = iter->second; + const VAddr vma_start = vma.base; + const VAddr vma_end = vma_start + vma.size; + const VAddr vma_last = vma_end - 1; + + // Add size if relevant. + if (vma.state != MemoryState::Unmapped) { + mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); + } + + // Break once we hit the end of the range. + if (last_addr <= vma_last) { + break; + } + + // Advance to the next block. + cur_addr = vma_end; + iter = std::next(iter); + ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end"); + } + + return MakeResult(mapped_size); +} + +ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address, + std::size_t size) const { + const VAddr end_addr = address + size; + const VAddr last_addr = end_addr - 1; + std::size_t mapped_size = 0; + + VAddr cur_addr = address; + auto iter = FindVMA(cur_addr); + ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end"); + + while (true) { + const auto& vma = iter->second; + const auto vma_start = vma.base; + const auto vma_end = vma_start + vma.size; + const auto vma_last = vma_end - 1; + const auto state = vma.state; + const auto attr = vma.attribute; + + // Memory within region must be free or mapped heap. + if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) || + (state == MemoryState::Unmapped))) { + return ERR_INVALID_ADDRESS_STATE; + } + + // Add size if relevant. + if (state != MemoryState::Unmapped) { + mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); + } + + // Break once we hit the end of the range. + if (last_addr <= vma_last) { + break; + } + + // Advance to the next block. + cur_addr = vma_end; + iter = std::next(iter); + ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end"); + } + + return MakeResult(mapped_size); +} + u64 VMManager::GetTotalPhysicalMemoryAvailable() const { LOG_WARNING(Kernel, "(STUBBED) called"); return 0xF8000000; diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 9fe6ac3f46..b18cde6197 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h @@ -11,6 +11,7 @@ #include "common/common_types.h" #include "common/memory_hook.h" #include "common/page_table.h" +#include "core/hle/kernel/physical_memory.h" #include "core/hle/result.h" #include "core/memory.h" @@ -290,7 +291,7 @@ struct VirtualMemoryArea { // Settings for type = AllocatedMemoryBlock /// Memory block backing this VMA. - std::shared_ptr<std::vector<u8>> backing_block = nullptr; + std::shared_ptr<PhysicalMemory> backing_block = nullptr; /// Offset into the backing_memory the mapping starts from. std::size_t offset = 0; @@ -348,8 +349,9 @@ public: * @param size Size of the mapping. * @param state MemoryState tag to attach to the VMA. */ - ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, - std::size_t offset, u64 size, MemoryState state); + ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<PhysicalMemory> block, + std::size_t offset, u64 size, MemoryState state, + VMAPermission perm = VMAPermission::ReadWrite); /** * Maps an unmanaged host memory pointer at a given address. @@ -450,6 +452,34 @@ public: /// ResultVal<VAddr> SetHeapSize(u64 size); + /// Maps memory at a given address. + /// + /// @param addr The virtual address to map memory at. + /// @param size The amount of memory to map. + /// + /// @note The destination address must lie within the Map region. + /// + /// @note This function requires that SystemResourceSize be non-zero, + /// however, this is just because if it were not then the + /// resulting page tables could be exploited on hardware by + /// a malicious program. SystemResource usage does not need + /// to be explicitly checked or updated here. + ResultCode MapPhysicalMemory(VAddr target, u64 size); + + /// Unmaps memory at a given address. + /// + /// @param addr The virtual address to unmap memory at. + /// @param size The amount of memory to unmap. + /// + /// @note The destination address must lie within the Map region. + /// + /// @note This function requires that SystemResourceSize be non-zero, + /// however, this is just because if it were not then the + /// resulting page tables could be exploited on hardware by + /// a malicious program. SystemResource usage does not need + /// to be explicitly checked or updated here. + ResultCode UnmapPhysicalMemory(VAddr target, u64 size); + /// Maps a region of memory as code memory. /// /// @param dst_address The base address of the region to create the aliasing memory region. @@ -518,7 +548,7 @@ public: * Scans all VMAs and updates the page table range of any that use the given vector as backing * memory. This should be called after any operation that causes reallocation of the vector. */ - void RefreshMemoryBlockMappings(const std::vector<u8>* block); + void RefreshMemoryBlockMappings(const PhysicalMemory* block); /// Dumps the address space layout to the log, for debugging void LogLayout() const; @@ -657,6 +687,11 @@ private: */ VMAIter MergeAdjacent(VMAIter vma); + /** + * Merges two adjacent VMAs. + */ + void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right); + /// Updates the pages corresponding to this VMA so they match the VMA's attributes. void UpdatePageTableForVMA(const VirtualMemoryArea& vma); @@ -701,6 +736,13 @@ private: MemoryAttribute attribute_mask, MemoryAttribute attribute, MemoryAttribute ignore_mask) const; + /// Gets the amount of memory currently mapped (state != Unmapped) in a range. + ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const; + + /// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range. + ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address, + std::size_t size) const; + /** * A map covering the entirety of the managed address space, keyed by the `base` field of each * VMA. It must always be modified by splitting or merging VMAs, so that the invariant @@ -736,12 +778,17 @@ private: // the entire virtual address space extents that bound the allocations, including any holes. // This makes deallocation and reallocation of holes fast and keeps process memory contiguous // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. - std::shared_ptr<std::vector<u8>> heap_memory; + std::shared_ptr<PhysicalMemory> heap_memory; // The end of the currently allocated heap. This is not an inclusive // end of the range. This is essentially 'base_address + current_size'. VAddr heap_end = 0; + // The current amount of memory mapped via MapPhysicalMemory. + // This is used here (and in Nintendo's kernel) only for debugging, and does not impact + // any behavior. + u64 physical_memory_mapped = 0; + Core::System& system; }; } // namespace Kernel diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp index eced380013..111633ba3e 100644 --- a/src/core/hle/service/am/am.cpp +++ b/src/core/hle/service/am/am.cpp @@ -29,7 +29,8 @@ #include "core/hle/service/am/omm.h" #include "core/hle/service/am/spsm.h" #include "core/hle/service/am/tcap.h" -#include "core/hle/service/apm/apm.h" +#include "core/hle/service/apm/controller.h" +#include "core/hle/service/apm/interface.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/hle/service/ns/ns.h" #include "core/hle/service/nvflinger/nvflinger.h" @@ -265,8 +266,8 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger {65, nullptr, "ReportUserIsActive"}, {66, nullptr, "GetCurrentIlluminance"}, {67, nullptr, "IsIlluminanceAvailable"}, - {68, nullptr, "SetAutoSleepDisabled"}, - {69, nullptr, "IsAutoSleepDisabled"}, + {68, &ISelfController::SetAutoSleepDisabled, "SetAutoSleepDisabled"}, + {69, &ISelfController::IsAutoSleepDisabled, "IsAutoSleepDisabled"}, {70, nullptr, "ReportMultimediaError"}, {71, nullptr, "GetCurrentIlluminanceEx"}, {80, nullptr, "SetWirelessPriorityMode"}, @@ -453,6 +454,34 @@ void ISelfController::GetIdleTimeDetectionExtension(Kernel::HLERequestContext& c rb.Push<u32>(idle_time_detection_extension); } +void ISelfController::SetAutoSleepDisabled(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + is_auto_sleep_disabled = rp.Pop<bool>(); + + // On the system itself, if the previous state of is_auto_sleep_disabled + // differed from the current value passed in, it'd signify the internal + // window manager to update (and also increment some statistics like update counts) + // + // It'd also indicate this change to an idle handling context. + // + // However, given we're emulating this behavior, most of this can be ignored + // and it's sufficient to simply set the member variable for querying via + // IsAutoSleepDisabled(). + + LOG_DEBUG(Service_AM, "called. is_auto_sleep_disabled={}", is_auto_sleep_disabled); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} + +void ISelfController::IsAutoSleepDisabled(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_AM, "called."); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.Push(is_auto_sleep_disabled); +} + void ISelfController::GetAccumulatedSuspendedTickValue(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_AM, "called."); @@ -520,8 +549,9 @@ void AppletMessageQueue::OperationModeChanged() { on_operation_mode_changed.writable->Signal(); } -ICommonStateGetter::ICommonStateGetter(std::shared_ptr<AppletMessageQueue> msg_queue) - : ServiceFramework("ICommonStateGetter"), msg_queue(std::move(msg_queue)) { +ICommonStateGetter::ICommonStateGetter(Core::System& system, + std::shared_ptr<AppletMessageQueue> msg_queue) + : ServiceFramework("ICommonStateGetter"), system(system), msg_queue(std::move(msg_queue)) { // clang-format off static const FunctionInfo functions[] = { {0, &ICommonStateGetter::GetEventHandle, "GetEventHandle"}, @@ -554,7 +584,7 @@ ICommonStateGetter::ICommonStateGetter(std::shared_ptr<AppletMessageQueue> msg_q {63, nullptr, "GetHdcpAuthenticationStateChangeEvent"}, {64, nullptr, "SetTvPowerStateMatchingMode"}, {65, nullptr, "GetApplicationIdByContentActionName"}, - {66, nullptr, "SetCpuBoostMode"}, + {66, &ICommonStateGetter::SetCpuBoostMode, "SetCpuBoostMode"}, {80, nullptr, "PerformSystemButtonPressingIfInFocus"}, {90, nullptr, "SetPerformanceConfigurationChangedNotification"}, {91, nullptr, "GetCurrentPerformanceConfiguration"}, @@ -635,6 +665,16 @@ void ICommonStateGetter::GetDefaultDisplayResolution(Kernel::HLERequestContext& } } +void ICommonStateGetter::SetCpuBoostMode(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_AM, "called, forwarding to APM:SYS"); + + const auto& sm = system.ServiceManager(); + const auto apm_sys = sm.GetService<APM::APM_Sys>("apm:sys"); + ASSERT(apm_sys != nullptr); + + apm_sys->SetCpuBoostMode(ctx); +} + IStorage::IStorage(std::vector<u8> buffer) : ServiceFramework("IStorage"), buffer(std::move(buffer)) { // clang-format off @@ -663,13 +703,11 @@ void ICommonStateGetter::GetOperationMode(Kernel::HLERequestContext& ctx) { } void ICommonStateGetter::GetPerformanceMode(Kernel::HLERequestContext& ctx) { - const bool use_docked_mode{Settings::values.use_docked_mode}; - LOG_DEBUG(Service_AM, "called, use_docked_mode={}", use_docked_mode); + LOG_DEBUG(Service_AM, "called"); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); - rb.Push(static_cast<u32>(use_docked_mode ? APM::PerformanceMode::Docked - : APM::PerformanceMode::Handheld)); + rb.PushEnum(system.GetAPMController().GetCurrentPerformanceMode()); } class ILibraryAppletAccessor final : public ServiceFramework<ILibraryAppletAccessor> { @@ -1019,6 +1057,7 @@ IApplicationFunctions::IApplicationFunctions() : ServiceFramework("IApplicationF {120, nullptr, "ExecuteProgram"}, {121, nullptr, "ClearUserChannel"}, {122, nullptr, "UnpopToUserChannel"}, + {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"}, {500, nullptr, "StartContinuousRecordingFlushForDebug"}, {1000, nullptr, "CreateMovieMaker"}, {1001, nullptr, "PrepareForJit"}, @@ -1026,6 +1065,10 @@ IApplicationFunctions::IApplicationFunctions() : ServiceFramework("IApplicationF // clang-format on RegisterHandlers(functions); + + auto& kernel = Core::System::GetInstance().Kernel(); + gpu_error_detected_event = Kernel::WritableEvent::CreateEventPair( + kernel, Kernel::ResetType::Manual, "IApplicationFunctions:GpuErrorDetectedSystemEvent"); } IApplicationFunctions::~IApplicationFunctions() = default; @@ -1247,6 +1290,14 @@ void IApplicationFunctions::GetSaveDataSize(Kernel::HLERequestContext& ctx) { rb.Push(size.journal); } +void IApplicationFunctions::GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_AM, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(RESULT_SUCCESS); + rb.PushCopyObjects(gpu_error_detected_event.readable); +} + void InstallInterfaces(SM::ServiceManager& service_manager, std::shared_ptr<NVFlinger::NVFlinger> nvflinger, Core::System& system) { auto message_queue = std::make_shared<AppletMessageQueue>(); diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h index 49ff209593..cbc9da7b6d 100644 --- a/src/core/hle/service/am/am.h +++ b/src/core/hle/service/am/am.h @@ -133,6 +133,8 @@ private: void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); + void SetAutoSleepDisabled(Kernel::HLERequestContext& ctx); + void IsAutoSleepDisabled(Kernel::HLERequestContext& ctx); void GetAccumulatedSuspendedTickValue(Kernel::HLERequestContext& ctx); void GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequestContext& ctx); @@ -142,11 +144,13 @@ private: u32 idle_time_detection_extension = 0; u64 num_fatal_sections_entered = 0; + bool is_auto_sleep_disabled = false; }; class ICommonStateGetter final : public ServiceFramework<ICommonStateGetter> { public: - explicit ICommonStateGetter(std::shared_ptr<AppletMessageQueue> msg_queue); + explicit ICommonStateGetter(Core::System& system, + std::shared_ptr<AppletMessageQueue> msg_queue); ~ICommonStateGetter() override; private: @@ -168,7 +172,9 @@ private: void GetPerformanceMode(Kernel::HLERequestContext& ctx); void GetBootMode(Kernel::HLERequestContext& ctx); void GetDefaultDisplayResolution(Kernel::HLERequestContext& ctx); + void SetCpuBoostMode(Kernel::HLERequestContext& ctx); + Core::System& system; std::shared_ptr<AppletMessageQueue> msg_queue; }; @@ -236,6 +242,9 @@ private: void BeginBlockingHomeButton(Kernel::HLERequestContext& ctx); void EndBlockingHomeButton(Kernel::HLERequestContext& ctx); void EnableApplicationCrashReport(Kernel::HLERequestContext& ctx); + void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx); + + Kernel::EventPair gpu_error_detected_event; }; class IHomeMenuFunctions final : public ServiceFramework<IHomeMenuFunctions> { diff --git a/src/core/hle/service/am/applet_ae.cpp b/src/core/hle/service/am/applet_ae.cpp index fe5beb8f9a..a34368c8b9 100644 --- a/src/core/hle/service/am/applet_ae.cpp +++ b/src/core/hle/service/am/applet_ae.cpp @@ -42,7 +42,7 @@ private: IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<ICommonStateGetter>(msg_queue); + rb.PushIpcInterface<ICommonStateGetter>(system, msg_queue); } void GetSelfController(Kernel::HLERequestContext& ctx) { @@ -146,7 +146,7 @@ private: IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<ICommonStateGetter>(msg_queue); + rb.PushIpcInterface<ICommonStateGetter>(system, msg_queue); } void GetSelfController(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/am/applet_oe.cpp b/src/core/hle/service/am/applet_oe.cpp index 6e255fe95b..5d53ef113b 100644 --- a/src/core/hle/service/am/applet_oe.cpp +++ b/src/core/hle/service/am/applet_oe.cpp @@ -80,7 +80,7 @@ private: IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<ICommonStateGetter>(msg_queue); + rb.PushIpcInterface<ICommonStateGetter>(system, msg_queue); } void GetLibraryAppletCreator(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/apm/apm.cpp b/src/core/hle/service/apm/apm.cpp index f3c09bbb1a..85bbf59887 100644 --- a/src/core/hle/service/apm/apm.cpp +++ b/src/core/hle/service/apm/apm.cpp @@ -2,7 +2,6 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include "common/logging/log.h" #include "core/hle/ipc_helpers.h" #include "core/hle/service/apm/apm.h" #include "core/hle/service/apm/interface.h" @@ -12,11 +11,15 @@ namespace Service::APM { Module::Module() = default; Module::~Module() = default; -void InstallInterfaces(SM::ServiceManager& service_manager) { +void InstallInterfaces(Core::System& system) { auto module_ = std::make_shared<Module>(); - std::make_shared<APM>(module_, "apm")->InstallAsService(service_manager); - std::make_shared<APM>(module_, "apm:p")->InstallAsService(service_manager); - std::make_shared<APM_Sys>()->InstallAsService(service_manager); + std::make_shared<APM>(module_, system.GetAPMController(), "apm") + ->InstallAsService(system.ServiceManager()); + std::make_shared<APM>(module_, system.GetAPMController(), "apm:p") + ->InstallAsService(system.ServiceManager()); + std::make_shared<APM>(module_, system.GetAPMController(), "apm:am") + ->InstallAsService(system.ServiceManager()); + std::make_shared<APM_Sys>(system.GetAPMController())->InstallAsService(system.ServiceManager()); } } // namespace Service::APM diff --git a/src/core/hle/service/apm/apm.h b/src/core/hle/service/apm/apm.h index 4d7d5bb7c3..cf4c2bb111 100644 --- a/src/core/hle/service/apm/apm.h +++ b/src/core/hle/service/apm/apm.h @@ -8,11 +8,6 @@ namespace Service::APM { -enum class PerformanceMode : u8 { - Handheld = 0, - Docked = 1, -}; - class Module final { public: Module(); @@ -20,6 +15,6 @@ public: }; /// Registers all AM services with the specified service manager. -void InstallInterfaces(SM::ServiceManager& service_manager); +void InstallInterfaces(Core::System& system); } // namespace Service::APM diff --git a/src/core/hle/service/apm/controller.cpp b/src/core/hle/service/apm/controller.cpp new file mode 100644 index 0000000000..4376612ebe --- /dev/null +++ b/src/core/hle/service/apm/controller.cpp @@ -0,0 +1,68 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/logging/log.h" +#include "core/core_timing.h" +#include "core/hle/service/apm/controller.h" +#include "core/settings.h" + +namespace Service::APM { + +constexpr PerformanceConfiguration DEFAULT_PERFORMANCE_CONFIGURATION = + PerformanceConfiguration::Config7; + +Controller::Controller(Core::Timing::CoreTiming& core_timing) + : core_timing(core_timing), configs{ + {PerformanceMode::Handheld, DEFAULT_PERFORMANCE_CONFIGURATION}, + {PerformanceMode::Docked, DEFAULT_PERFORMANCE_CONFIGURATION}, + } {} + +Controller::~Controller() = default; + +void Controller::SetPerformanceConfiguration(PerformanceMode mode, + PerformanceConfiguration config) { + static const std::map<PerformanceConfiguration, u32> PCONFIG_TO_SPEED_MAP{ + {PerformanceConfiguration::Config1, 1020}, {PerformanceConfiguration::Config2, 1020}, + {PerformanceConfiguration::Config3, 1224}, {PerformanceConfiguration::Config4, 1020}, + {PerformanceConfiguration::Config5, 1020}, {PerformanceConfiguration::Config6, 1224}, + {PerformanceConfiguration::Config7, 1020}, {PerformanceConfiguration::Config8, 1020}, + {PerformanceConfiguration::Config9, 1020}, {PerformanceConfiguration::Config10, 1020}, + {PerformanceConfiguration::Config11, 1020}, {PerformanceConfiguration::Config12, 1020}, + {PerformanceConfiguration::Config13, 1785}, {PerformanceConfiguration::Config14, 1785}, + {PerformanceConfiguration::Config15, 1020}, {PerformanceConfiguration::Config16, 1020}, + }; + + SetClockSpeed(PCONFIG_TO_SPEED_MAP.find(config)->second); + configs.insert_or_assign(mode, config); +} + +void Controller::SetFromCpuBoostMode(CpuBoostMode mode) { + constexpr std::array<PerformanceConfiguration, 3> BOOST_MODE_TO_CONFIG_MAP{{ + PerformanceConfiguration::Config7, + PerformanceConfiguration::Config13, + PerformanceConfiguration::Config15, + }}; + + SetPerformanceConfiguration(PerformanceMode::Docked, + BOOST_MODE_TO_CONFIG_MAP.at(static_cast<u32>(mode))); +} + +PerformanceMode Controller::GetCurrentPerformanceMode() { + return Settings::values.use_docked_mode ? PerformanceMode::Docked : PerformanceMode::Handheld; +} + +PerformanceConfiguration Controller::GetCurrentPerformanceConfiguration(PerformanceMode mode) { + if (configs.find(mode) == configs.end()) { + configs.insert_or_assign(mode, DEFAULT_PERFORMANCE_CONFIGURATION); + } + + return configs[mode]; +} + +void Controller::SetClockSpeed(u32 mhz) { + LOG_INFO(Service_APM, "called, mhz={:08X}", mhz); + // TODO(DarkLordZach): Actually signal core_timing to change clock speed. +} + +} // namespace Service::APM diff --git a/src/core/hle/service/apm/controller.h b/src/core/hle/service/apm/controller.h new file mode 100644 index 0000000000..8ac80eaea1 --- /dev/null +++ b/src/core/hle/service/apm/controller.h @@ -0,0 +1,70 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <map> +#include "common/common_types.h" + +namespace Core::Timing { +class CoreTiming; +} + +namespace Service::APM { + +enum class PerformanceConfiguration : u32 { + Config1 = 0x00010000, + Config2 = 0x00010001, + Config3 = 0x00010002, + Config4 = 0x00020000, + Config5 = 0x00020001, + Config6 = 0x00020002, + Config7 = 0x00020003, + Config8 = 0x00020004, + Config9 = 0x00020005, + Config10 = 0x00020006, + Config11 = 0x92220007, + Config12 = 0x92220008, + Config13 = 0x92220009, + Config14 = 0x9222000A, + Config15 = 0x9222000B, + Config16 = 0x9222000C, +}; + +enum class CpuBoostMode : u32 { + Disabled = 0, + Full = 1, // CPU + GPU -> Config 13, 14, 15, or 16 + Partial = 2, // GPU Only -> Config 15 or 16 +}; + +enum class PerformanceMode : u8 { + Handheld = 0, + Docked = 1, +}; + +// Class to manage the state and change of the emulated system performance. +// Specifically, this deals with PerformanceMode, which corresponds to the system being docked or +// undocked, and PerformanceConfig which specifies the exact CPU, GPU, and Memory clocks to operate +// at. Additionally, this manages 'Boost Mode', which allows games to temporarily overclock the +// system during times of high load -- this simply maps to different PerformanceConfigs to use. +class Controller { +public: + Controller(Core::Timing::CoreTiming& core_timing); + ~Controller(); + + void SetPerformanceConfiguration(PerformanceMode mode, PerformanceConfiguration config); + void SetFromCpuBoostMode(CpuBoostMode mode); + + PerformanceMode GetCurrentPerformanceMode(); + PerformanceConfiguration GetCurrentPerformanceConfiguration(PerformanceMode mode); + +private: + void SetClockSpeed(u32 mhz); + + std::map<PerformanceMode, PerformanceConfiguration> configs; + + Core::Timing::CoreTiming& core_timing; +}; + +} // namespace Service::APM diff --git a/src/core/hle/service/apm/interface.cpp b/src/core/hle/service/apm/interface.cpp index d058c0245a..06f0f8edd7 100644 --- a/src/core/hle/service/apm/interface.cpp +++ b/src/core/hle/service/apm/interface.cpp @@ -5,43 +5,32 @@ #include "common/logging/log.h" #include "core/hle/ipc_helpers.h" #include "core/hle/service/apm/apm.h" +#include "core/hle/service/apm/controller.h" #include "core/hle/service/apm/interface.h" namespace Service::APM { class ISession final : public ServiceFramework<ISession> { public: - ISession() : ServiceFramework("ISession") { + ISession(Controller& controller) : ServiceFramework("ISession"), controller(controller) { static const FunctionInfo functions[] = { {0, &ISession::SetPerformanceConfiguration, "SetPerformanceConfiguration"}, {1, &ISession::GetPerformanceConfiguration, "GetPerformanceConfiguration"}, + {2, nullptr, "SetCpuOverclockEnabled"}, }; RegisterHandlers(functions); } private: - enum class PerformanceConfiguration : u32 { - Config1 = 0x00010000, - Config2 = 0x00010001, - Config3 = 0x00010002, - Config4 = 0x00020000, - Config5 = 0x00020001, - Config6 = 0x00020002, - Config7 = 0x00020003, - Config8 = 0x00020004, - Config9 = 0x00020005, - Config10 = 0x00020006, - Config11 = 0x92220007, - Config12 = 0x92220008, - }; - void SetPerformanceConfiguration(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; - auto mode = static_cast<PerformanceMode>(rp.Pop<u32>()); - u32 config = rp.Pop<u32>(); - LOG_WARNING(Service_APM, "(STUBBED) called mode={} config={}", static_cast<u32>(mode), - config); + const auto mode = rp.PopEnum<PerformanceMode>(); + const auto config = rp.PopEnum<PerformanceConfiguration>(); + LOG_DEBUG(Service_APM, "called mode={} config={}", static_cast<u32>(mode), + static_cast<u32>(config)); + + controller.SetPerformanceConfiguration(mode, config); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); @@ -50,20 +39,23 @@ private: void GetPerformanceConfiguration(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; - auto mode = static_cast<PerformanceMode>(rp.Pop<u32>()); - LOG_WARNING(Service_APM, "(STUBBED) called mode={}", static_cast<u32>(mode)); + const auto mode = rp.PopEnum<PerformanceMode>(); + LOG_DEBUG(Service_APM, "called mode={}", static_cast<u32>(mode)); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); - rb.Push<u32>(static_cast<u32>(PerformanceConfiguration::Config1)); + rb.PushEnum(controller.GetCurrentPerformanceConfiguration(mode)); } + + Controller& controller; }; -APM::APM(std::shared_ptr<Module> apm, const char* name) - : ServiceFramework(name), apm(std::move(apm)) { +APM::APM(std::shared_ptr<Module> apm, Controller& controller, const char* name) + : ServiceFramework(name), apm(std::move(apm)), controller(controller) { static const FunctionInfo functions[] = { {0, &APM::OpenSession, "OpenSession"}, - {1, nullptr, "GetPerformanceMode"}, + {1, &APM::GetPerformanceMode, "GetPerformanceMode"}, + {6, nullptr, "IsCpuOverclockEnabled"}, }; RegisterHandlers(functions); } @@ -75,10 +67,17 @@ void APM::OpenSession(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<ISession>(); + rb.PushIpcInterface<ISession>(controller); +} + +void APM::GetPerformanceMode(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_APM, "called"); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.PushEnum(controller.GetCurrentPerformanceMode()); } -APM_Sys::APM_Sys() : ServiceFramework{"apm:sys"} { +APM_Sys::APM_Sys(Controller& controller) : ServiceFramework{"apm:sys"}, controller(controller) { // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "RequestPerformanceMode"}, @@ -87,8 +86,8 @@ APM_Sys::APM_Sys() : ServiceFramework{"apm:sys"} { {3, nullptr, "GetLastThrottlingState"}, {4, nullptr, "ClearLastThrottlingState"}, {5, nullptr, "LoadAndApplySettings"}, - {6, nullptr, "SetCpuBoostMode"}, - {7, nullptr, "GetCurrentPerformanceConfiguration"}, + {6, &APM_Sys::SetCpuBoostMode, "SetCpuBoostMode"}, + {7, &APM_Sys::GetCurrentPerformanceConfiguration, "GetCurrentPerformanceConfiguration"}, }; // clang-format on @@ -102,7 +101,28 @@ void APM_Sys::GetPerformanceEvent(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<ISession>(); + rb.PushIpcInterface<ISession>(controller); +} + +void APM_Sys::SetCpuBoostMode(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto mode = rp.PopEnum<CpuBoostMode>(); + + LOG_DEBUG(Service_APM, "called, mode={:08X}", static_cast<u32>(mode)); + + controller.SetFromCpuBoostMode(mode); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} + +void APM_Sys::GetCurrentPerformanceConfiguration(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_APM, "called"); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.PushEnum( + controller.GetCurrentPerformanceConfiguration(controller.GetCurrentPerformanceMode())); } } // namespace Service::APM diff --git a/src/core/hle/service/apm/interface.h b/src/core/hle/service/apm/interface.h index 773541aa4b..de1b894379 100644 --- a/src/core/hle/service/apm/interface.h +++ b/src/core/hle/service/apm/interface.h @@ -8,24 +8,34 @@ namespace Service::APM { +class Controller; +class Module; + class APM final : public ServiceFramework<APM> { public: - explicit APM(std::shared_ptr<Module> apm, const char* name); + explicit APM(std::shared_ptr<Module> apm, Controller& controller, const char* name); ~APM() override; private: void OpenSession(Kernel::HLERequestContext& ctx); + void GetPerformanceMode(Kernel::HLERequestContext& ctx); std::shared_ptr<Module> apm; + Controller& controller; }; class APM_Sys final : public ServiceFramework<APM_Sys> { public: - explicit APM_Sys(); + explicit APM_Sys(Controller& controller); ~APM_Sys() override; + void SetCpuBoostMode(Kernel::HLERequestContext& ctx); + private: void GetPerformanceEvent(Kernel::HLERequestContext& ctx); + void GetCurrentPerformanceConfiguration(Kernel::HLERequestContext& ctx); + + Controller& controller; }; } // namespace Service::APM diff --git a/src/core/hle/service/audio/audio.cpp b/src/core/hle/service/audio/audio.cpp index 128df7db5a..1781bec835 100644 --- a/src/core/hle/service/audio/audio.cpp +++ b/src/core/hle/service/audio/audio.cpp @@ -19,16 +19,16 @@ namespace Service::Audio { -void InstallInterfaces(SM::ServiceManager& service_manager) { +void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) { std::make_shared<AudCtl>()->InstallAsService(service_manager); std::make_shared<AudOutA>()->InstallAsService(service_manager); - std::make_shared<AudOutU>()->InstallAsService(service_manager); + std::make_shared<AudOutU>(system)->InstallAsService(service_manager); std::make_shared<AudInA>()->InstallAsService(service_manager); std::make_shared<AudInU>()->InstallAsService(service_manager); std::make_shared<AudRecA>()->InstallAsService(service_manager); std::make_shared<AudRecU>()->InstallAsService(service_manager); std::make_shared<AudRenA>()->InstallAsService(service_manager); - std::make_shared<AudRenU>()->InstallAsService(service_manager); + std::make_shared<AudRenU>(system)->InstallAsService(service_manager); std::make_shared<CodecCtl>()->InstallAsService(service_manager); std::make_shared<HwOpus>()->InstallAsService(service_manager); diff --git a/src/core/hle/service/audio/audio.h b/src/core/hle/service/audio/audio.h index f5bd3bf5fb..b6d13912e7 100644 --- a/src/core/hle/service/audio/audio.h +++ b/src/core/hle/service/audio/audio.h @@ -4,6 +4,10 @@ #pragma once +namespace Core { +class System; +} + namespace Service::SM { class ServiceManager; } @@ -11,6 +15,6 @@ class ServiceManager; namespace Service::Audio { /// Registers all Audio services with the specified service manager. -void InstallInterfaces(SM::ServiceManager& service_manager); +void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system); } // namespace Service::Audio diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index 7db6eb08d1..fb84a8f139 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp @@ -40,8 +40,8 @@ enum class AudioState : u32 { class IAudioOut final : public ServiceFramework<IAudioOut> { public: - IAudioOut(AudoutParams audio_params, AudioCore::AudioOut& audio_core, std::string&& device_name, - std::string&& unique_name) + IAudioOut(Core::System& system, AudoutParams audio_params, AudioCore::AudioOut& audio_core, + std::string&& device_name, std::string&& unique_name) : ServiceFramework("IAudioOut"), audio_core(audio_core), device_name(std::move(device_name)), audio_params(audio_params) { // clang-format off @@ -65,7 +65,6 @@ public: RegisterHandlers(functions); // This is the event handle used to check if the audio buffer was released - auto& system = Core::System::GetInstance(); buffer_event = Kernel::WritableEvent::CreateEventPair( system.Kernel(), Kernel::ResetType::Manual, "IAudioOutBufferReleased"); @@ -212,6 +211,22 @@ private: Kernel::EventPair buffer_event; }; +AudOutU::AudOutU(Core::System& system_) : ServiceFramework("audout:u"), system{system_} { + // clang-format off + static const FunctionInfo functions[] = { + {0, &AudOutU::ListAudioOutsImpl, "ListAudioOuts"}, + {1, &AudOutU::OpenAudioOutImpl, "OpenAudioOut"}, + {2, &AudOutU::ListAudioOutsImpl, "ListAudioOutsAuto"}, + {3, &AudOutU::OpenAudioOutImpl, "OpenAudioOutAuto"}, + }; + // clang-format on + + RegisterHandlers(functions); + audio_core = std::make_unique<AudioCore::AudioOut>(); +} + +AudOutU::~AudOutU() = default; + void AudOutU::ListAudioOutsImpl(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "called"); @@ -248,7 +263,7 @@ void AudOutU::OpenAudioOutImpl(Kernel::HLERequestContext& ctx) { std::string unique_name{fmt::format("{}-{}", device_name, audio_out_interfaces.size())}; auto audio_out_interface = std::make_shared<IAudioOut>( - params, *audio_core, std::move(device_name), std::move(unique_name)); + system, params, *audio_core, std::move(device_name), std::move(unique_name)); IPC::ResponseBuilder rb{ctx, 6, 0, 1}; rb.Push(RESULT_SUCCESS); @@ -256,20 +271,9 @@ void AudOutU::OpenAudioOutImpl(Kernel::HLERequestContext& ctx) { rb.Push<u32>(params.channel_count); rb.Push<u32>(static_cast<u32>(AudioCore::Codec::PcmFormat::Int16)); rb.Push<u32>(static_cast<u32>(AudioState::Stopped)); - rb.PushIpcInterface<Audio::IAudioOut>(audio_out_interface); + rb.PushIpcInterface<IAudioOut>(audio_out_interface); audio_out_interfaces.push_back(std::move(audio_out_interface)); } -AudOutU::AudOutU() : ServiceFramework("audout:u") { - static const FunctionInfo functions[] = {{0, &AudOutU::ListAudioOutsImpl, "ListAudioOuts"}, - {1, &AudOutU::OpenAudioOutImpl, "OpenAudioOut"}, - {2, &AudOutU::ListAudioOutsImpl, "ListAudioOutsAuto"}, - {3, &AudOutU::OpenAudioOutImpl, "OpenAudioOutAuto"}}; - RegisterHandlers(functions); - audio_core = std::make_unique<AudioCore::AudioOut>(); -} - -AudOutU::~AudOutU() = default; - } // namespace Service::Audio diff --git a/src/core/hle/service/audio/audout_u.h b/src/core/hle/service/audio/audout_u.h index aed4c43b27..c9f532ccd6 100644 --- a/src/core/hle/service/audio/audout_u.h +++ b/src/core/hle/service/audio/audout_u.h @@ -11,6 +11,10 @@ namespace AudioCore { class AudioOut; } +namespace Core { +class System; +} + namespace Kernel { class HLERequestContext; } @@ -21,15 +25,17 @@ class IAudioOut; class AudOutU final : public ServiceFramework<AudOutU> { public: - AudOutU(); + explicit AudOutU(Core::System& system_); ~AudOutU() override; private: + void ListAudioOutsImpl(Kernel::HLERequestContext& ctx); + void OpenAudioOutImpl(Kernel::HLERequestContext& ctx); + std::vector<std::shared_ptr<IAudioOut>> audio_out_interfaces; std::unique_ptr<AudioCore::AudioOut> audio_core; - void ListAudioOutsImpl(Kernel::HLERequestContext& ctx); - void OpenAudioOutImpl(Kernel::HLERequestContext& ctx); + Core::System& system; }; } // namespace Service::Audio diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 3711e1ea18..f162249ede 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp @@ -5,6 +5,7 @@ #include <algorithm> #include <array> #include <memory> +#include <string_view> #include "audio_core/audio_renderer.h" #include "common/alignment.h" @@ -25,7 +26,8 @@ namespace Service::Audio { class IAudioRenderer final : public ServiceFramework<IAudioRenderer> { public: - explicit IAudioRenderer(AudioCore::AudioRendererParameter audren_params) + explicit IAudioRenderer(Core::System& system, AudioCore::AudioRendererParameter audren_params, + const std::size_t instance_number) : ServiceFramework("IAudioRenderer") { // clang-format off static const FunctionInfo functions[] = { @@ -45,11 +47,10 @@ public: // clang-format on RegisterHandlers(functions); - auto& system = Core::System::GetInstance(); system_event = Kernel::WritableEvent::CreateEventPair( system.Kernel(), Kernel::ResetType::Manual, "IAudioRenderer:SystemEvent"); - renderer = std::make_unique<AudioCore::AudioRenderer>(system.CoreTiming(), audren_params, - system_event.writable); + renderer = std::make_unique<AudioCore::AudioRenderer>( + system.CoreTiming(), audren_params, system_event.writable, instance_number); } private: @@ -159,28 +160,33 @@ private: class IAudioDevice final : public ServiceFramework<IAudioDevice> { public: - IAudioDevice() : ServiceFramework("IAudioDevice") { + explicit IAudioDevice(Core::System& system, u32_le revision_num) + : ServiceFramework("IAudioDevice"), revision{revision_num} { static const FunctionInfo functions[] = { {0, &IAudioDevice::ListAudioDeviceName, "ListAudioDeviceName"}, {1, &IAudioDevice::SetAudioDeviceOutputVolume, "SetAudioDeviceOutputVolume"}, - {2, nullptr, "GetAudioDeviceOutputVolume"}, + {2, &IAudioDevice::GetAudioDeviceOutputVolume, "GetAudioDeviceOutputVolume"}, {3, &IAudioDevice::GetActiveAudioDeviceName, "GetActiveAudioDeviceName"}, {4, &IAudioDevice::QueryAudioDeviceSystemEvent, "QueryAudioDeviceSystemEvent"}, {5, &IAudioDevice::GetActiveChannelCount, "GetActiveChannelCount"}, {6, &IAudioDevice::ListAudioDeviceName, "ListAudioDeviceNameAuto"}, {7, &IAudioDevice::SetAudioDeviceOutputVolume, "SetAudioDeviceOutputVolumeAuto"}, - {8, nullptr, "GetAudioDeviceOutputVolumeAuto"}, + {8, &IAudioDevice::GetAudioDeviceOutputVolume, "GetAudioDeviceOutputVolumeAuto"}, {10, &IAudioDevice::GetActiveAudioDeviceName, "GetActiveAudioDeviceNameAuto"}, - {11, nullptr, "QueryAudioDeviceInputEvent"}, + {11, &IAudioDevice::QueryAudioDeviceInputEvent, "QueryAudioDeviceInputEvent"}, {12, &IAudioDevice::QueryAudioDeviceOutputEvent, "QueryAudioDeviceOutputEvent"}, {13, nullptr, "GetAudioSystemMasterVolumeSetting"}, }; RegisterHandlers(functions); - auto& kernel = Core::System::GetInstance().Kernel(); + auto& kernel = system.Kernel(); buffer_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic, "IAudioOutBufferReleasedEvent"); + // Should be similar to audio_output_device_switch_event + audio_input_device_switch_event = Kernel::WritableEvent::CreateEventPair( + kernel, Kernel::ResetType::Automatic, "IAudioDevice:AudioInputDeviceSwitchedEvent"); + // Should only be signalled when an audio output device has been changed, example: speaker // to headset audio_output_device_switch_event = Kernel::WritableEvent::CreateEventPair( @@ -188,15 +194,47 @@ public: } private: + using AudioDeviceName = std::array<char, 256>; + static constexpr std::array<std::string_view, 4> audio_device_names{{ + "AudioStereoJackOutput", + "AudioBuiltInSpeakerOutput", + "AudioTvOutput", + "AudioUsbDeviceOutput", + }}; + enum class DeviceType { + AHUBHeadphones, + AHUBSpeakers, + HDA, + USBOutput, + }; + void ListAudioDeviceName(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_Audio, "(STUBBED) called"); + LOG_DEBUG(Service_Audio, "called"); + + const bool usb_output_supported = + IsFeatureSupported(AudioFeatures::AudioUSBDeviceOutput, revision); + const std::size_t count = ctx.GetWriteBufferSize() / sizeof(AudioDeviceName); + + std::vector<AudioDeviceName> name_buffer; + name_buffer.reserve(audio_device_names.size()); + + for (std::size_t i = 0; i < count && i < audio_device_names.size(); i++) { + const auto type = static_cast<DeviceType>(i); + + if (!usb_output_supported && type == DeviceType::USBOutput) { + continue; + } + + const auto& device_name = audio_device_names[i]; + auto& entry = name_buffer.emplace_back(); + device_name.copy(entry.data(), device_name.size()); + } - constexpr std::array<char, 15> audio_interface{{"AudioInterface"}}; - ctx.WriteBuffer(audio_interface); + ctx.WriteBuffer(name_buffer); IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); - rb.Push<u32>(1); + rb.Push(static_cast<u32>(name_buffer.size())); } void SetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) { @@ -212,15 +250,32 @@ private: rb.Push(RESULT_SUCCESS); } + void GetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + + const auto device_name_buffer = ctx.ReadBuffer(); + const std::string name = Common::StringFromBuffer(device_name_buffer); + + LOG_WARNING(Service_Audio, "(STUBBED) called. name={}", name); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.Push(1.0f); + } + void GetActiveAudioDeviceName(Kernel::HLERequestContext& ctx) { LOG_WARNING(Service_Audio, "(STUBBED) called"); - constexpr std::array<char, 12> audio_interface{{"AudioDevice"}}; - ctx.WriteBuffer(audio_interface); + // Currently set to always be TV audio output. + const auto& device_name = audio_device_names[2]; - IPC::ResponseBuilder rb{ctx, 3}; + AudioDeviceName out_device_name{}; + device_name.copy(out_device_name.data(), device_name.size()); + + ctx.WriteBuffer(out_device_name); + + IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); - rb.Push<u32>(1); } void QueryAudioDeviceSystemEvent(Kernel::HLERequestContext& ctx) { @@ -241,6 +296,15 @@ private: rb.Push<u32>(1); } + // Should be similar to QueryAudioDeviceOutputEvent + void QueryAudioDeviceInputEvent(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_Audio, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 2, 1}; + rb.Push(RESULT_SUCCESS); + rb.PushCopyObjects(audio_input_device_switch_event.readable); + } + void QueryAudioDeviceOutputEvent(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Audio, "called"); @@ -249,12 +313,14 @@ private: rb.PushCopyObjects(audio_output_device_switch_event.readable); } + u32_le revision = 0; Kernel::EventPair buffer_event; + Kernel::EventPair audio_input_device_switch_event; Kernel::EventPair audio_output_device_switch_event; }; // namespace Audio -AudRenU::AudRenU() : ServiceFramework("audren:u") { +AudRenU::AudRenU(Core::System& system_) : ServiceFramework("audren:u"), system{system_} { // clang-format off static const FunctionInfo functions[] = { {0, &AudRenU::OpenAudioRenderer, "OpenAudioRenderer"}, @@ -327,7 +393,7 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { }; // Calculates the portion of the size related to the mix data (and the sorting thereof). - const auto calculate_mix_info_size = [this](const AudioCore::AudioRendererParameter& params) { + const auto calculate_mix_info_size = [](const AudioCore::AudioRendererParameter& params) { // The size of the mixing info data structure. constexpr u64 mix_info_size = 0x940; @@ -399,7 +465,7 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { // Calculates the part of the size related to the splitter context. const auto calculate_splitter_context_size = - [this](const AudioCore::AudioRendererParameter& params) -> u64 { + [](const AudioCore::AudioRendererParameter& params) -> u64 { if (!IsFeatureSupported(AudioFeatures::Splitter, params.revision)) { return 0; } @@ -446,7 +512,7 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { }; // Calculates the part of the size related to performance statistics. - const auto calculate_perf_size = [this](const AudioCore::AudioRendererParameter& params) { + const auto calculate_perf_size = [](const AudioCore::AudioRendererParameter& params) { // Extra size value appended to the end of the calculation. constexpr u64 appended = 128; @@ -473,78 +539,76 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { }; // Calculates the part of the size that relates to the audio command buffer. - const auto calculate_command_buffer_size = - [this](const AudioCore::AudioRendererParameter& params) { - constexpr u64 alignment = (buffer_alignment_size - 1) * 2; + const auto calculate_command_buffer_size = [](const AudioCore::AudioRendererParameter& params) { + constexpr u64 alignment = (buffer_alignment_size - 1) * 2; - if (!IsFeatureSupported(AudioFeatures::VariadicCommandBuffer, params.revision)) { - constexpr u64 command_buffer_size = 0x18000; + if (!IsFeatureSupported(AudioFeatures::VariadicCommandBuffer, params.revision)) { + constexpr u64 command_buffer_size = 0x18000; - return command_buffer_size + alignment; - } + return command_buffer_size + alignment; + } - // When the variadic command buffer is supported, this means - // the command generator for the audio renderer can issue commands - // that are (as one would expect), variable in size. So what we need to do - // is determine the maximum possible size for a few command data structures - // then multiply them by the amount of present commands indicated by the given - // respective audio parameters. + // When the variadic command buffer is supported, this means + // the command generator for the audio renderer can issue commands + // that are (as one would expect), variable in size. So what we need to do + // is determine the maximum possible size for a few command data structures + // then multiply them by the amount of present commands indicated by the given + // respective audio parameters. - constexpr u64 max_biquad_filters = 2; - constexpr u64 max_mix_buffers = 24; + constexpr u64 max_biquad_filters = 2; + constexpr u64 max_mix_buffers = 24; - constexpr u64 biquad_filter_command_size = 0x2C; + constexpr u64 biquad_filter_command_size = 0x2C; - constexpr u64 depop_mix_command_size = 0x24; - constexpr u64 depop_setup_command_size = 0x50; + constexpr u64 depop_mix_command_size = 0x24; + constexpr u64 depop_setup_command_size = 0x50; - constexpr u64 effect_command_max_size = 0x540; + constexpr u64 effect_command_max_size = 0x540; - constexpr u64 mix_command_size = 0x1C; - constexpr u64 mix_ramp_command_size = 0x24; - constexpr u64 mix_ramp_grouped_command_size = 0x13C; + constexpr u64 mix_command_size = 0x1C; + constexpr u64 mix_ramp_command_size = 0x24; + constexpr u64 mix_ramp_grouped_command_size = 0x13C; - constexpr u64 perf_command_size = 0x28; + constexpr u64 perf_command_size = 0x28; - constexpr u64 sink_command_size = 0x130; + constexpr u64 sink_command_size = 0x130; - constexpr u64 submix_command_max_size = - depop_mix_command_size + (mix_command_size * max_mix_buffers) * max_mix_buffers; + constexpr u64 submix_command_max_size = + depop_mix_command_size + (mix_command_size * max_mix_buffers) * max_mix_buffers; - constexpr u64 volume_command_size = 0x1C; - constexpr u64 volume_ramp_command_size = 0x20; + constexpr u64 volume_command_size = 0x1C; + constexpr u64 volume_ramp_command_size = 0x20; - constexpr u64 voice_biquad_filter_command_size = - biquad_filter_command_size * max_biquad_filters; - constexpr u64 voice_data_command_size = 0x9C; - const u64 voice_command_max_size = - (params.splitter_count * depop_setup_command_size) + - (voice_data_command_size + voice_biquad_filter_command_size + - volume_ramp_command_size + mix_ramp_grouped_command_size); + constexpr u64 voice_biquad_filter_command_size = + biquad_filter_command_size * max_biquad_filters; + constexpr u64 voice_data_command_size = 0x9C; + const u64 voice_command_max_size = + (params.splitter_count * depop_setup_command_size) + + (voice_data_command_size + voice_biquad_filter_command_size + volume_ramp_command_size + + mix_ramp_grouped_command_size); - // Now calculate the individual elements that comprise the size and add them together. - const u64 effect_commands_size = params.effect_count * effect_command_max_size; + // Now calculate the individual elements that comprise the size and add them together. + const u64 effect_commands_size = params.effect_count * effect_command_max_size; - const u64 final_mix_commands_size = - depop_mix_command_size + volume_command_size * max_mix_buffers; + const u64 final_mix_commands_size = + depop_mix_command_size + volume_command_size * max_mix_buffers; - const u64 perf_commands_size = - perf_command_size * - (CalculateNumPerformanceEntries(params) + max_perf_detail_entries); + const u64 perf_commands_size = + perf_command_size * (CalculateNumPerformanceEntries(params) + max_perf_detail_entries); - const u64 sink_commands_size = params.sink_count * sink_command_size; + const u64 sink_commands_size = params.sink_count * sink_command_size; - const u64 splitter_commands_size = - params.num_splitter_send_channels * max_mix_buffers * mix_ramp_command_size; + const u64 splitter_commands_size = + params.num_splitter_send_channels * max_mix_buffers * mix_ramp_command_size; - const u64 submix_commands_size = params.submix_count * submix_command_max_size; + const u64 submix_commands_size = params.submix_count * submix_command_max_size; - const u64 voice_commands_size = params.voice_count * voice_command_max_size; + const u64 voice_commands_size = params.voice_count * voice_command_max_size; - return effect_commands_size + final_mix_commands_size + perf_commands_size + - sink_commands_size + splitter_commands_size + submix_commands_size + - voice_commands_size + alignment; - }; + return effect_commands_size + final_mix_commands_size + perf_commands_size + + sink_commands_size + splitter_commands_size + submix_commands_size + + voice_commands_size + alignment; + }; IPC::RequestParser rp{ctx}; const auto params = rp.PopRaw<AudioCore::AudioRendererParameter>(); @@ -577,12 +641,16 @@ void AudRenU::GetAudioRendererWorkBufferSize(Kernel::HLERequestContext& ctx) { } void AudRenU::GetAudioDeviceService(Kernel::HLERequestContext& ctx) { - LOG_DEBUG(Service_Audio, "called"); + IPC::RequestParser rp{ctx}; + const u64 aruid = rp.Pop<u64>(); - IPC::ResponseBuilder rb{ctx, 2, 0, 1}; + LOG_DEBUG(Service_Audio, "called. aruid={:016X}", aruid); + // Revisionless variant of GetAudioDeviceServiceWithRevisionInfo that + // always assumes the initial release revision (REV1). + IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<Audio::IAudioDevice>(); + rb.PushIpcInterface<IAudioDevice>(system, Common::MakeMagic('R', 'E', 'V', '1')); } void AudRenU::OpenAudioRendererAuto(Kernel::HLERequestContext& ctx) { @@ -592,13 +660,19 @@ void AudRenU::OpenAudioRendererAuto(Kernel::HLERequestContext& ctx) { } void AudRenU::GetAudioDeviceServiceWithRevisionInfo(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_Audio, "(STUBBED) called"); + struct Parameters { + u32 revision; + u64 aruid; + }; - IPC::ResponseBuilder rb{ctx, 2, 0, 1}; + IPC::RequestParser rp{ctx}; + const auto [revision, aruid] = rp.PopRaw<Parameters>(); + LOG_DEBUG(Service_Audio, "called. revision={:08X}, aruid={:016X}", revision, aruid); + + IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<Audio::IAudioDevice>(); // TODO(ogniK): Figure out what is different - // based on the current revision + rb.PushIpcInterface<IAudioDevice>(system, revision); } void AudRenU::OpenAudioRendererImpl(Kernel::HLERequestContext& ctx) { @@ -607,14 +681,16 @@ void AudRenU::OpenAudioRendererImpl(Kernel::HLERequestContext& ctx) { IPC::ResponseBuilder rb{ctx, 2, 0, 1}; rb.Push(RESULT_SUCCESS); - rb.PushIpcInterface<IAudioRenderer>(params); + rb.PushIpcInterface<IAudioRenderer>(system, params, audren_instance_count++); } -bool AudRenU::IsFeatureSupported(AudioFeatures feature, u32_le revision) const { +bool IsFeatureSupported(AudioFeatures feature, u32_le revision) { // Byte swap const u32_be version_num = revision - Common::MakeMagic('R', 'E', 'V', '0'); switch (feature) { + case AudioFeatures::AudioUSBDeviceOutput: + return version_num >= 4U; case AudioFeatures::Splitter: return version_num >= 2U; case AudioFeatures::PerformanceMetricsVersion2: diff --git a/src/core/hle/service/audio/audren_u.h b/src/core/hle/service/audio/audren_u.h index 1d3c8df619..4e0ccc792c 100644 --- a/src/core/hle/service/audio/audren_u.h +++ b/src/core/hle/service/audio/audren_u.h @@ -6,6 +6,10 @@ #include "core/hle/service/service.h" +namespace Core { +class System; +} + namespace Kernel { class HLERequestContext; } @@ -14,7 +18,7 @@ namespace Service::Audio { class AudRenU final : public ServiceFramework<AudRenU> { public: - explicit AudRenU(); + explicit AudRenU(Core::System& system_); ~AudRenU() override; private: @@ -26,13 +30,19 @@ private: void OpenAudioRendererImpl(Kernel::HLERequestContext& ctx); - enum class AudioFeatures : u32 { - Splitter, - PerformanceMetricsVersion2, - VariadicCommandBuffer, - }; + std::size_t audren_instance_count = 0; + Core::System& system; +}; - bool IsFeatureSupported(AudioFeatures feature, u32_le revision) const; +// Describes a particular audio feature that may be supported in a particular revision. +enum class AudioFeatures : u32 { + AudioUSBDeviceOutput, + Splitter, + PerformanceMetricsVersion2, + VariadicCommandBuffer, }; +// Tests if a particular audio feature is supported with a given audio revision. +bool IsFeatureSupported(AudioFeatures feature, u32_le revision); + } // namespace Service::Audio diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp index fe49c2161d..01fa06ad3c 100644 --- a/src/core/hle/service/fatal/fatal.cpp +++ b/src/core/hle/service/fatal/fatal.cpp @@ -5,7 +5,7 @@ #include <array> #include <cstring> #include <ctime> -#include <fmt/time.h> +#include <fmt/chrono.h> #include "common/file_util.h" #include "common/logging/log.h" #include "common/scm_rev.h" diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp index 1ebfeb4bf4..8ce110dd11 100644 --- a/src/core/hle/service/filesystem/filesystem.cpp +++ b/src/core/hle/service/filesystem/filesystem.cpp @@ -472,12 +472,12 @@ void CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite) { } } -void InstallInterfaces(SM::ServiceManager& service_manager, FileSys::VfsFilesystem& vfs) { +void InstallInterfaces(Core::System& system) { romfs_factory = nullptr; - CreateFactories(vfs, false); - std::make_shared<FSP_LDR>()->InstallAsService(service_manager); - std::make_shared<FSP_PR>()->InstallAsService(service_manager); - std::make_shared<FSP_SRV>()->InstallAsService(service_manager); + CreateFactories(*system.GetFilesystem(), false); + std::make_shared<FSP_LDR>()->InstallAsService(system.ServiceManager()); + std::make_shared<FSP_PR>()->InstallAsService(system.ServiceManager()); + std::make_shared<FSP_SRV>(system.GetReporter())->InstallAsService(system.ServiceManager()); } } // namespace Service::FileSystem diff --git a/src/core/hle/service/filesystem/filesystem.h b/src/core/hle/service/filesystem/filesystem.h index 6481f237c4..3849dd89eb 100644 --- a/src/core/hle/service/filesystem/filesystem.h +++ b/src/core/hle/service/filesystem/filesystem.h @@ -65,7 +65,7 @@ FileSys::VirtualDir GetModificationDumpRoot(u64 title_id); // above is called. void CreateFactories(FileSys::VfsFilesystem& vfs, bool overwrite = true); -void InstallInterfaces(SM::ServiceManager& service_manager, FileSys::VfsFilesystem& vfs); +void InstallInterfaces(Core::System& system); // A class that wraps a VfsDirectory with methods that return ResultVal and ResultCode instead of // pointers and booleans. This makes using a VfsDirectory with switch services much easier and diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index e7df8fd98b..d3cd46a9b8 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp @@ -26,6 +26,7 @@ #include "core/hle/kernel/process.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/hle/service/filesystem/fsp_srv.h" +#include "core/reporter.h" namespace Service::FileSystem { @@ -613,7 +614,7 @@ private: u64 next_entry_index = 0; }; -FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { +FSP_SRV::FSP_SRV(const Core::Reporter& reporter) : ServiceFramework("fsp-srv"), reporter(reporter) { // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "OpenFileSystem"}, @@ -710,14 +711,14 @@ FSP_SRV::FSP_SRV() : ServiceFramework("fsp-srv") { {1001, nullptr, "SetSaveDataSize"}, {1002, nullptr, "SetSaveDataRootPath"}, {1003, nullptr, "DisableAutoSaveDataCreation"}, - {1004, nullptr, "SetGlobalAccessLogMode"}, + {1004, &FSP_SRV::SetGlobalAccessLogMode, "SetGlobalAccessLogMode"}, {1005, &FSP_SRV::GetGlobalAccessLogMode, "GetGlobalAccessLogMode"}, - {1006, nullptr, "OutputAccessLogToSdCard"}, + {1006, &FSP_SRV::OutputAccessLogToSdCard, "OutputAccessLogToSdCard"}, {1007, nullptr, "RegisterUpdatePartition"}, {1008, nullptr, "OpenRegisteredUpdatePartition"}, {1009, nullptr, "GetAndClearMemoryReportInfo"}, {1010, nullptr, "SetDataStorageRedirectTarget"}, - {1011, nullptr, "OutputAccessLogToSdCard2"}, + {1011, &FSP_SRV::GetAccessLogVersionInfo, "GetAccessLogVersionInfo"}, {1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"}, {1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"}, {1200, nullptr, "OpenMultiCommitManager"}, @@ -814,21 +815,22 @@ void FSP_SRV::OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& rb.PushIpcInterface<ISaveDataInfoReader>(std::make_shared<ISaveDataInfoReader>(space)); } -void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) { - LOG_WARNING(Service_FS, "(STUBBED) called"); +void FSP_SRV::SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + log_mode = rp.PopEnum<LogMode>(); - enum class LogMode : u32 { - Off, - Log, - RedirectToSdCard, - LogToSdCard = Log | RedirectToSdCard, - }; + LOG_DEBUG(Service_FS, "called, log_mode={:08X}", static_cast<u32>(log_mode)); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} + +void FSP_SRV::GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_FS, "called"); - // Given we always want to receive logging information, - // we always specify logging as enabled. IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); - rb.PushEnum(LogMode::Log); + rb.PushEnum(log_mode); } void FSP_SRV::OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx) { @@ -902,4 +904,26 @@ void FSP_SRV::OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ct rb.Push(FileSys::ERROR_ENTITY_NOT_FOUND); } +void FSP_SRV::OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx) { + const auto raw = ctx.ReadBuffer(); + auto log = Common::StringFromFixedZeroTerminatedBuffer( + reinterpret_cast<const char*>(raw.data()), raw.size()); + + LOG_DEBUG(Service_FS, "called, log='{}'", log); + + reporter.SaveFilesystemAccessReport(log_mode, std::move(log)); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} + +void FSP_SRV::GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_FS, "called"); + + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(RESULT_SUCCESS); + rb.PushEnum(AccessLogVersion::Latest); + rb.Push(access_log_program_index); +} + } // namespace Service::FileSystem diff --git a/src/core/hle/service/filesystem/fsp_srv.h b/src/core/hle/service/filesystem/fsp_srv.h index d7572ba7a5..b5486a1932 100644 --- a/src/core/hle/service/filesystem/fsp_srv.h +++ b/src/core/hle/service/filesystem/fsp_srv.h @@ -7,15 +7,32 @@ #include <memory> #include "core/hle/service/service.h" +namespace Core { +class Reporter; +} + namespace FileSys { class FileSystemBackend; } namespace Service::FileSystem { +enum class AccessLogVersion : u32 { + V7_0_0 = 2, + + Latest = V7_0_0, +}; + +enum class LogMode : u32 { + Off, + Log, + RedirectToSdCard, + LogToSdCard = Log | RedirectToSdCard, +}; + class FSP_SRV final : public ServiceFramework<FSP_SRV> { public: - explicit FSP_SRV(); + explicit FSP_SRV(const Core::Reporter& reporter); ~FSP_SRV() override; private: @@ -26,13 +43,20 @@ private: void OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx); void OpenReadOnlySaveDataFileSystem(Kernel::HLERequestContext& ctx); void OpenSaveDataInfoReaderBySaveDataSpaceId(Kernel::HLERequestContext& ctx); + void SetGlobalAccessLogMode(Kernel::HLERequestContext& ctx); void GetGlobalAccessLogMode(Kernel::HLERequestContext& ctx); void OpenDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); void OpenDataStorageByDataId(Kernel::HLERequestContext& ctx); void OpenPatchDataStorageByCurrentProcess(Kernel::HLERequestContext& ctx); + void OutputAccessLogToSdCard(Kernel::HLERequestContext& ctx); + void GetAccessLogVersionInfo(Kernel::HLERequestContext& ctx); FileSys::VirtualFile romfs; u64 current_process_id = 0; + u32 access_log_program_index = 0; + LogMode log_mode = LogMode::LogToSdCard; + + const Core::Reporter& reporter; }; } // namespace Service::FileSystem diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp index dec541f2ee..d1ec12ef92 100644 --- a/src/core/hle/service/friend/friend.cpp +++ b/src/core/hle/service/friend/friend.cpp @@ -22,7 +22,7 @@ public: {0, nullptr, "GetCompletionEvent"}, {1, nullptr, "Cancel"}, {10100, nullptr, "GetFriendListIds"}, - {10101, nullptr, "GetFriendList"}, + {10101, &IFriendService::GetFriendList, "GetFriendList"}, {10102, nullptr, "UpdateFriendInfo"}, {10110, nullptr, "GetFriendProfileImage"}, {10200, nullptr, "SendFriendRequestForApplication"}, @@ -99,6 +99,23 @@ public: } private: + enum class PresenceFilter : u32 { + None = 0, + Online = 1, + OnlinePlay = 2, + OnlineOrOnlinePlay = 3, + }; + + struct SizedFriendFilter { + PresenceFilter presence; + u8 is_favorite; + u8 same_app; + u8 same_app_played; + u8 arbitary_app_played; + u64 group_id; + }; + static_assert(sizeof(SizedFriendFilter) == 0x10, "SizedFriendFilter is an invalid size"); + void DeclareCloseOnlinePlaySession(Kernel::HLERequestContext& ctx) { // Stub used by Splatoon 2 LOG_WARNING(Service_ACC, "(STUBBED) called"); @@ -112,6 +129,22 @@ private: IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); } + + void GetFriendList(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto friend_offset = rp.Pop<u32>(); + const auto uuid = rp.PopRaw<Common::UUID>(); + [[maybe_unused]] const auto filter = rp.PopRaw<SizedFriendFilter>(); + const auto pid = rp.Pop<u64>(); + LOG_WARNING(Service_ACC, "(STUBBED) called, offset={}, uuid={}, pid={}", friend_offset, + uuid.Format(), pid); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + + rb.Push<u32>(0); // Friend count + // TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId" + } }; class INotificationService final : public ServiceFramework<INotificationService> { diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index fdd6d79a26..e47fe81885 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -548,6 +548,37 @@ void Controller_NPad::DisconnectNPad(u32 npad_id) { connected_controllers[NPadIdToIndex(npad_id)].is_connected = false; } +void Controller_NPad::StartLRAssignmentMode() { + // Nothing internally is used for lr assignment mode. Since we have the ability to set the + // controller types from boot, it doesn't really matter about showing a selection screen + is_in_lr_assignment_mode = true; +} + +void Controller_NPad::StopLRAssignmentMode() { + is_in_lr_assignment_mode = false; +} + +bool Controller_NPad::SwapNpadAssignment(u32 npad_id_1, u32 npad_id_2) { + if (npad_id_1 == NPAD_HANDHELD || npad_id_2 == NPAD_HANDHELD || npad_id_1 == NPAD_UNKNOWN || + npad_id_2 == NPAD_UNKNOWN) { + return true; + } + const auto npad_index_1 = NPadIdToIndex(npad_id_1); + const auto npad_index_2 = NPadIdToIndex(npad_id_2); + + if (!IsControllerSupported(connected_controllers[npad_index_1].type) || + !IsControllerSupported(connected_controllers[npad_index_2].type)) { + return false; + } + + std::swap(connected_controllers[npad_index_1].type, connected_controllers[npad_index_2].type); + + InitNewlyAddedControler(npad_index_1); + InitNewlyAddedControler(npad_index_2); + + return true; +} + bool Controller_NPad::IsControllerSupported(NPadControllerType controller) { if (controller == NPadControllerType::Handheld) { // Handheld is not even a supported type, lets stop here @@ -605,10 +636,15 @@ Controller_NPad::LedPattern Controller_NPad::GetLedPattern(u32 npad_id) { return LedPattern{0, 0, 0, 0}; }; } + void Controller_NPad::SetVibrationEnabled(bool can_vibrate) { can_controllers_vibrate = can_vibrate; } +bool Controller_NPad::IsVibrationEnabled() const { + return can_controllers_vibrate; +} + void Controller_NPad::ClearAllConnectedControllers() { for (auto& controller : connected_controllers) { if (controller.is_connected && controller.type != NPadControllerType::None) { @@ -617,6 +653,7 @@ void Controller_NPad::ClearAllConnectedControllers() { } } } + void Controller_NPad::DisconnectAllConnectedControllers() { std::for_each(connected_controllers.begin(), connected_controllers.end(), [](ControllerHolder& controller) { controller.is_connected = false; }); diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h index 4ff50b3cd6..f28b368066 100644 --- a/src/core/hle/service/hid/controllers/npad.h +++ b/src/core/hle/service/hid/controllers/npad.h @@ -119,11 +119,16 @@ public: void DisconnectNPad(u32 npad_id); LedPattern GetLedPattern(u32 npad_id); void SetVibrationEnabled(bool can_vibrate); + bool IsVibrationEnabled() const; void ClearAllConnectedControllers(); void DisconnectAllConnectedControllers(); void ConnectAllDisconnectedControllers(); void ClearAllControllers(); + void StartLRAssignmentMode(); + void StopLRAssignmentMode(); + bool SwapNpadAssignment(u32 npad_id_1, u32 npad_id_2); + // Logical OR for all buttons presses on all controllers // Specifically for cheat engine and other features. u32 GetAndResetPressState(); @@ -321,5 +326,6 @@ private: void RequestPadStateUpdate(u32 npad_id); std::array<ControllerPad, 10> npad_pad_states{}; bool IsControllerSupported(NPadControllerType controller); + bool is_in_lr_assignment_mode{false}; }; } // namespace Service::HID diff --git a/src/core/hle/service/hid/errors.h b/src/core/hle/service/hid/errors.h new file mode 100644 index 0000000000..3583642e71 --- /dev/null +++ b/src/core/hle/service/hid/errors.h @@ -0,0 +1,13 @@ +// Copyright 2019 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/result.h" + +namespace Service::HID { + +constexpr ResultCode ERR_NPAD_NOT_CONNECTED{ErrorModule::HID, 710}; + +} // namespace Service::HID diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index a4ad95d96c..f8b1ca8166 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -16,6 +16,7 @@ #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/writable_event.h" +#include "core/hle/service/hid/errors.h" #include "core/hle/service/hid/hid.h" #include "core/hle/service/hid/irs.h" #include "core/hle/service/hid/xcd.h" @@ -202,11 +203,11 @@ Hid::Hid() : ServiceFramework("hid") { {123, nullptr, "SetNpadJoyAssignmentModeSingleByDefault"}, {124, &Hid::SetNpadJoyAssignmentModeDual, "SetNpadJoyAssignmentModeDual"}, {125, &Hid::MergeSingleJoyAsDualJoy, "MergeSingleJoyAsDualJoy"}, - {126, nullptr, "StartLrAssignmentMode"}, - {127, nullptr, "StopLrAssignmentMode"}, + {126, &Hid::StartLrAssignmentMode, "StartLrAssignmentMode"}, + {127, &Hid::StopLrAssignmentMode, "StopLrAssignmentMode"}, {128, &Hid::SetNpadHandheldActivationMode, "SetNpadHandheldActivationMode"}, {129, nullptr, "GetNpadHandheldActivationMode"}, - {130, nullptr, "SwapNpadAssignment"}, + {130, &Hid::SwapNpadAssignment, "SwapNpadAssignment"}, {131, nullptr, "IsUnintendedHomeButtonInputProtectionEnabled"}, {132, nullptr, "EnableUnintendedHomeButtonInputProtection"}, {133, nullptr, "SetNpadJoyAssignmentModeSingleWithDestination"}, @@ -215,8 +216,8 @@ Hid::Hid() : ServiceFramework("hid") { {201, &Hid::SendVibrationValue, "SendVibrationValue"}, {202, &Hid::GetActualVibrationValue, "GetActualVibrationValue"}, {203, &Hid::CreateActiveVibrationDeviceList, "CreateActiveVibrationDeviceList"}, - {204, nullptr, "PermitVibration"}, - {205, nullptr, "IsVibrationPermitted"}, + {204, &Hid::PermitVibration, "PermitVibration"}, + {205, &Hid::IsVibrationPermitted, "IsVibrationPermitted"}, {206, &Hid::SendVibrationValues, "SendVibrationValues"}, {207, nullptr, "SendVibrationGcErmCommand"}, {208, nullptr, "GetActualVibrationGcErmCommand"}, @@ -678,6 +679,27 @@ void Hid::CreateActiveVibrationDeviceList(Kernel::HLERequestContext& ctx) { rb.PushIpcInterface<IActiveVibrationDeviceList>(); } +void Hid::PermitVibration(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto can_vibrate{rp.Pop<bool>()}; + applet_resource->GetController<Controller_NPad>(HidController::NPad) + .SetVibrationEnabled(can_vibrate); + + LOG_DEBUG(Service_HID, "called, can_vibrate={}", can_vibrate); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} + +void Hid::IsVibrationPermitted(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_HID, "called"); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.Push( + applet_resource->GetController<Controller_NPad>(HidController::NPad).IsVibrationEnabled()); +} + void Hid::ActivateConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; const auto applet_resource_user_id{rp.Pop<u64>()}; @@ -733,6 +755,49 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) { rb.Push(RESULT_SUCCESS); } +void Hid::StartLrAssignmentMode(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto applet_resource_user_id{rp.Pop<u64>()}; + + LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id); + auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad); + controller.StartLRAssignmentMode(); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} + +void Hid::StopLrAssignmentMode(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto applet_resource_user_id{rp.Pop<u64>()}; + + LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id); + auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad); + controller.StopLRAssignmentMode(); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); +} + +void Hid::SwapNpadAssignment(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto npad_1{rp.Pop<u32>()}; + const auto npad_2{rp.Pop<u32>()}; + const auto applet_resource_user_id{rp.Pop<u64>()}; + + LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}, npad_1={}, npad_2={}", + applet_resource_user_id, npad_1, npad_2); + + auto& controller = applet_resource->GetController<Controller_NPad>(HidController::NPad); + IPC::ResponseBuilder rb{ctx, 2}; + if (controller.SwapNpadAssignment(npad_1, npad_2)) { + rb.Push(RESULT_SUCCESS); + } else { + LOG_ERROR(Service_HID, "Npads are not connected!"); + rb.Push(ERR_NPAD_NOT_CONNECTED); + } +} + class HidDbg final : public ServiceFramework<HidDbg> { public: explicit HidDbg() : ServiceFramework{"hid:dbg"} { diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h index d3660cad2f..2fd6d9fc70 100644 --- a/src/core/hle/service/hid/hid.h +++ b/src/core/hle/service/hid/hid.h @@ -114,11 +114,16 @@ private: void SetNpadHandheldActivationMode(Kernel::HLERequestContext& ctx); void GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx); void CreateActiveVibrationDeviceList(Kernel::HLERequestContext& ctx); + void PermitVibration(Kernel::HLERequestContext& ctx); + void IsVibrationPermitted(Kernel::HLERequestContext& ctx); void ActivateConsoleSixAxisSensor(Kernel::HLERequestContext& ctx); void StartConsoleSixAxisSensor(Kernel::HLERequestContext& ctx); void StopSixAxisSensor(Kernel::HLERequestContext& ctx); void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx); void SetPalmaBoostMode(Kernel::HLERequestContext& ctx); + void StartLrAssignmentMode(Kernel::HLERequestContext& ctx); + void StopLrAssignmentMode(Kernel::HLERequestContext& ctx); + void SwapNpadAssignment(Kernel::HLERequestContext& ctx); std::shared_ptr<IAppletResource> applet_resource; }; diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index b839303acb..8ddad86826 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -345,14 +345,16 @@ public: vm_manager .MirrorMemory(*map_address, nro_address, nro_size, Kernel::MemoryState::ModuleCode) .IsSuccess()); - ASSERT(vm_manager.UnmapRange(nro_address, nro_size).IsSuccess()); + ASSERT(vm_manager.ReprotectRange(nro_address, nro_size, Kernel::VMAPermission::None) + .IsSuccess()); if (bss_size > 0) { ASSERT(vm_manager .MirrorMemory(*map_address + nro_size, bss_address, bss_size, Kernel::MemoryState::ModuleCode) .IsSuccess()); - ASSERT(vm_manager.UnmapRange(bss_address, bss_size).IsSuccess()); + ASSERT(vm_manager.ReprotectRange(bss_address, bss_size, Kernel::VMAPermission::None) + .IsSuccess()); } vm_manager.ReprotectRange(*map_address, header.text_size, @@ -364,7 +366,8 @@ public: Core::System::GetInstance().InvalidateCpuInstructionCaches(); - nro.insert_or_assign(*map_address, NROInfo{hash, nro_size + bss_size}); + nro.insert_or_assign(*map_address, + NROInfo{hash, nro_address, nro_size, bss_address, bss_size}); IPC::ResponseBuilder rb{ctx, 4}; rb.Push(RESULT_SUCCESS); @@ -409,9 +412,23 @@ public: } auto& vm_manager = Core::CurrentProcess()->VMManager(); - const auto& nro_size = iter->second.size; + const auto& nro_info = iter->second; - ASSERT(vm_manager.UnmapRange(nro_address, nro_size).IsSuccess()); + // Unmap the mirrored memory + ASSERT( + vm_manager.UnmapRange(nro_address, nro_info.nro_size + nro_info.bss_size).IsSuccess()); + + // Reprotect the source memory + ASSERT(vm_manager + .ReprotectRange(nro_info.nro_address, nro_info.nro_size, + Kernel::VMAPermission::ReadWrite) + .IsSuccess()); + if (nro_info.bss_size > 0) { + ASSERT(vm_manager + .ReprotectRange(nro_info.bss_address, nro_info.bss_size, + Kernel::VMAPermission::ReadWrite) + .IsSuccess()); + } Core::System::GetInstance().InvalidateCpuInstructionCaches(); @@ -473,7 +490,10 @@ private: struct NROInfo { SHA256Hash hash; - u64 size; + VAddr nro_address; + u64 nro_size; + VAddr bss_address; + u64 bss_size; }; bool initialized = false; diff --git a/src/core/hle/service/mii/mii.cpp b/src/core/hle/service/mii/mii.cpp index ce84e25ed2..0b3923ad93 100644 --- a/src/core/hle/service/mii/mii.cpp +++ b/src/core/hle/service/mii/mii.cpp @@ -48,7 +48,7 @@ public: {19, nullptr, "Export"}, {20, nullptr, "IsBrokenDatabaseWithClearFlag"}, {21, &IDatabaseService::GetIndex, "GetIndex"}, - {22, nullptr, "SetInterfaceVersion"}, + {22, &IDatabaseService::SetInterfaceVersion, "SetInterfaceVersion"}, {23, nullptr, "Convert"}, }; // clang-format on @@ -350,8 +350,22 @@ private: rb.Push(index); } + void SetInterfaceVersion(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + current_interface_version = rp.PopRaw<u32>(); + + LOG_DEBUG(Service_Mii, "called, interface_version={:08X}", current_interface_version); + + UNIMPLEMENTED_IF(current_interface_version != 1); + + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(RESULT_SUCCESS); + } + MiiManager db; + u32 current_interface_version = 0; + // Last read offsets of Get functions std::array<u32, 4> offsets{}; }; diff --git a/src/core/hle/service/mii/mii_manager.cpp b/src/core/hle/service/mii/mii_manager.cpp index 131b01d627..8d0353075c 100644 --- a/src/core/hle/service/mii/mii_manager.cpp +++ b/src/core/hle/service/mii/mii_manager.cpp @@ -175,6 +175,10 @@ MiiStoreData ConvertInfoToStoreData(const MiiInfo& info) { } // namespace std::ostream& operator<<(std::ostream& os, Source source) { + if (static_cast<std::size_t>(source) >= SOURCE_NAMES.size()) { + return os << "[UNKNOWN SOURCE]"; + } + os << SOURCE_NAMES.at(static_cast<std::size_t>(source)); return os; } diff --git a/src/core/hle/service/ns/pl_u.cpp b/src/core/hle/service/ns/pl_u.cpp index ad176f89df..2a522136d0 100644 --- a/src/core/hle/service/ns/pl_u.cpp +++ b/src/core/hle/service/ns/pl_u.cpp @@ -77,7 +77,7 @@ enum class LoadState : u32 { Done = 1, }; -static void DecryptSharedFont(const std::vector<u32>& input, std::vector<u8>& output, +static void DecryptSharedFont(const std::vector<u32>& input, Kernel::PhysicalMemory& output, std::size_t& offset) { ASSERT_MSG(offset + (input.size() * sizeof(u32)) < SHARED_FONT_MEM_SIZE, "Shared fonts exceeds 17mb!"); @@ -94,7 +94,7 @@ static void DecryptSharedFont(const std::vector<u32>& input, std::vector<u8>& ou offset += transformed_font.size() * sizeof(u32); } -static void EncryptSharedFont(const std::vector<u8>& input, std::vector<u8>& output, +static void EncryptSharedFont(const std::vector<u8>& input, Kernel::PhysicalMemory& output, std::size_t& offset) { ASSERT_MSG(offset + input.size() + 8 < SHARED_FONT_MEM_SIZE, "Shared fonts exceeds 17mb!"); const u32 KEY = EXPECTED_MAGIC ^ EXPECTED_RESULT; @@ -121,7 +121,7 @@ struct PL_U::Impl { return shared_font_regions.at(index); } - void BuildSharedFontsRawRegions(const std::vector<u8>& input) { + void BuildSharedFontsRawRegions(const Kernel::PhysicalMemory& input) { // As we can derive the xor key we can just populate the offsets // based on the shared memory dump unsigned cur_offset = 0; @@ -144,7 +144,7 @@ struct PL_U::Impl { Kernel::SharedPtr<Kernel::SharedMemory> shared_font_mem; /// Backing memory for the shared font data - std::shared_ptr<std::vector<u8>> shared_font; + std::shared_ptr<Kernel::PhysicalMemory> shared_font; // Automatically populated based on shared_fonts dump or system archives. std::vector<FontRegion> shared_font_regions; @@ -166,7 +166,7 @@ PL_U::PL_U() : ServiceFramework("pl:u"), impl{std::make_unique<Impl>()} { // Rebuild shared fonts from data ncas if (nand->HasEntry(static_cast<u64>(FontArchives::Standard), FileSys::ContentRecordType::Data)) { - impl->shared_font = std::make_shared<std::vector<u8>>(SHARED_FONT_MEM_SIZE); + impl->shared_font = std::make_shared<Kernel::PhysicalMemory>(SHARED_FONT_MEM_SIZE); for (auto font : SHARED_FONTS) { const auto nca = nand->GetEntry(static_cast<u64>(font.first), FileSys::ContentRecordType::Data); @@ -207,7 +207,7 @@ PL_U::PL_U() : ServiceFramework("pl:u"), impl{std::make_unique<Impl>()} { } } else { - impl->shared_font = std::make_shared<std::vector<u8>>( + impl->shared_font = std::make_shared<Kernel::PhysicalMemory>( SHARED_FONT_MEM_SIZE); // Shared memory needs to always be allocated and a fixed size const std::string user_path = FileUtil::GetUserPath(FileUtil::UserPath::SysDataDir); diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h index 4f6042b00a..5b8248433e 100644 --- a/src/core/hle/service/nvdrv/devices/nvdevice.h +++ b/src/core/hle/service/nvdrv/devices/nvdevice.h @@ -8,6 +8,11 @@ #include "common/bit_field.h" #include "common/common_types.h" #include "common/swap.h" +#include "core/hle/service/nvdrv/nvdata.h" + +namespace Core { +class System; +} namespace Service::Nvidia::Devices { @@ -15,7 +20,7 @@ namespace Service::Nvidia::Devices { /// implement the ioctl interface. class nvdevice { public: - nvdevice() = default; + explicit nvdevice(Core::System& system) : system{system} {}; virtual ~nvdevice() = default; union Ioctl { u32_le raw; @@ -33,7 +38,11 @@ public: * @param output A buffer where the output data will be written to. * @returns The result code of the ioctl. */ - virtual u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) = 0; + virtual u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) = 0; + +protected: + Core::System& system; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp index 20c7c39aaf..926a1285d7 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp @@ -13,10 +13,12 @@ namespace Service::Nvidia::Devices { -nvdisp_disp0::nvdisp_disp0(std::shared_ptr<nvmap> nvmap_dev) : nvmap_dev(std::move(nvmap_dev)) {} +nvdisp_disp0::nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_dev) + : nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {} nvdisp_disp0 ::~nvdisp_disp0() = default; -u32 nvdisp_disp0::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvdisp_disp0::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { UNIMPLEMENTED_MSG("Unimplemented ioctl"); return 0; } @@ -34,9 +36,8 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u3 addr, offset, width, height, stride, static_cast<PixelFormat>(format), transform, crop_rect}; - auto& instance = Core::System::GetInstance(); - instance.GetPerfStats().EndGameFrame(); - instance.GPU().SwapBuffers(framebuffer); + system.GetPerfStats().EndGameFrame(); + system.GPU().SwapBuffers(&framebuffer); } } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h index 12f3ef8254..e79e490ff0 100644 --- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h +++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h @@ -17,10 +17,11 @@ class nvmap; class nvdisp_disp0 final : public nvdevice { public: - explicit nvdisp_disp0(std::shared_ptr<nvmap> nvmap_dev); + explicit nvdisp_disp0(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); ~nvdisp_disp0() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; /// Performs a screen flip, drawing the buffer pointed to by the handle. void flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, u32 stride, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index af62d33d21..24ab3f2e9f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -22,10 +22,12 @@ enum { }; } -nvhost_as_gpu::nvhost_as_gpu(std::shared_ptr<nvmap> nvmap_dev) : nvmap_dev(std::move(nvmap_dev)) {} +nvhost_as_gpu::nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev) + : nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {} nvhost_as_gpu::~nvhost_as_gpu() = default; -u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvhost_as_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { LOG_DEBUG(Service_NVDRV, "called, command=0x{:08X}, input_size=0x{:X}, output_size=0x{:X}", command.raw, input.size(), output.size()); @@ -65,7 +67,7 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, params.page_size, params.flags); - auto& gpu = Core::System::GetInstance().GPU(); + auto& gpu = system.GPU(); const u64 size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; if (params.flags & 1) { params.offset = gpu.MemoryManager().AllocateSpace(params.offset, size, 1); @@ -85,7 +87,7 @@ u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) std::vector<IoctlRemapEntry> entries(num_entries); std::memcpy(entries.data(), input.data(), input.size()); - auto& gpu = Core::System::GetInstance().GPU(); + auto& gpu = system.GPU(); for (const auto& entry : entries) { LOG_WARNING(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", entry.offset, entry.nvmap_handle, entry.pages); @@ -136,7 +138,7 @@ u32 nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& ou // case to prevent unexpected behavior. ASSERT(object->id == params.nvmap_handle); - auto& gpu = Core::System::GetInstance().GPU(); + auto& gpu = system.GPU(); if (params.flags & 1) { params.offset = gpu.MemoryManager().MapBufferEx(object->addr, params.offset, object->size); @@ -173,8 +175,7 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou return 0; } - params.offset = Core::System::GetInstance().GPU().MemoryManager().UnmapBuffer(params.offset, - itr->second.size); + params.offset = system.GPU().MemoryManager().UnmapBuffer(params.offset, itr->second.size); buffer_mappings.erase(itr->second.offset); std::memcpy(output.data(), ¶ms, output.size()); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index eb14b1da8e..30ca5f4c3b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -17,10 +17,11 @@ class nvmap; class nvhost_as_gpu final : public nvdevice { public: - explicit nvhost_as_gpu(std::shared_ptr<nvmap> nvmap_dev); + explicit nvhost_as_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); ~nvhost_as_gpu() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; private: enum class IoctlCommand : u32_le { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index b39fb9ef90..9a66a5f880 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -7,14 +7,20 @@ #include "common/assert.h" #include "common/logging/log.h" +#include "core/core.h" +#include "core/hle/kernel/readable_event.h" +#include "core/hle/kernel/writable_event.h" #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" +#include "video_core/gpu.h" namespace Service::Nvidia::Devices { -nvhost_ctrl::nvhost_ctrl() = default; +nvhost_ctrl::nvhost_ctrl(Core::System& system, EventInterface& events_interface) + : nvdevice(system), events_interface{events_interface} {} nvhost_ctrl::~nvhost_ctrl() = default; -u32 nvhost_ctrl::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvhost_ctrl::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { LOG_DEBUG(Service_NVDRV, "called, command=0x{:08X}, input_size=0x{:X}, output_size=0x{:X}", command.raw, input.size(), output.size()); @@ -22,11 +28,15 @@ u32 nvhost_ctrl::ioctl(Ioctl command, const std::vector<u8>& input, std::vector< case IoctlCommand::IocGetConfigCommand: return NvOsGetConfigU32(input, output); case IoctlCommand::IocCtrlEventWaitCommand: - return IocCtrlEventWait(input, output, false); + return IocCtrlEventWait(input, output, false, ctrl); case IoctlCommand::IocCtrlEventWaitAsyncCommand: - return IocCtrlEventWait(input, output, true); + return IocCtrlEventWait(input, output, true, ctrl); case IoctlCommand::IocCtrlEventRegisterCommand: return IocCtrlEventRegister(input, output); + case IoctlCommand::IocCtrlEventUnregisterCommand: + return IocCtrlEventUnregister(input, output); + case IoctlCommand::IocCtrlEventSignalCommand: + return IocCtrlEventSignal(input, output); } UNIMPLEMENTED_MSG("Unimplemented ioctl"); return 0; @@ -41,23 +51,137 @@ u32 nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& } u32 nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, - bool is_async) { + bool is_async, IoctlCtrl& ctrl) { IocCtrlEventWaitParams params{}; std::memcpy(¶ms, input.data(), sizeof(params)); - LOG_WARNING(Service_NVDRV, - "(STUBBED) called, syncpt_id={}, threshold={}, timeout={}, is_async={}", - params.syncpt_id, params.threshold, params.timeout, is_async); + LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}", + params.syncpt_id, params.threshold, params.timeout, is_async); - // TODO(Subv): Implement actual syncpt waiting. - params.value = 0; + if (params.syncpt_id >= MaxSyncPoints) { + return NvResult::BadParameter; + } + + auto& gpu = system.GPU(); + // This is mostly to take into account unimplemented features. As synced + // gpu is always synced. + if (!gpu.IsAsync()) { + return NvResult::Success; + } + auto lock = gpu.LockSync(); + const u32 current_syncpoint_value = gpu.GetSyncpointValue(params.syncpt_id); + const s32 diff = current_syncpoint_value - params.threshold; + if (diff >= 0) { + params.value = current_syncpoint_value; + std::memcpy(output.data(), ¶ms, sizeof(params)); + return NvResult::Success; + } + const u32 target_value = current_syncpoint_value - diff; + + if (!is_async) { + params.value = 0; + } + + if (params.timeout == 0) { + std::memcpy(output.data(), ¶ms, sizeof(params)); + return NvResult::Timeout; + } + + u32 event_id; + if (is_async) { + event_id = params.value & 0x00FF; + if (event_id >= MaxNvEvents) { + std::memcpy(output.data(), ¶ms, sizeof(params)); + return NvResult::BadParameter; + } + } else { + if (ctrl.fresh_call) { + const auto result = events_interface.GetFreeEvent(); + if (result) { + event_id = *result; + } else { + LOG_CRITICAL(Service_NVDRV, "No Free Events available!"); + event_id = params.value & 0x00FF; + } + } else { + event_id = ctrl.event_id; + } + } + + EventState status = events_interface.status[event_id]; + if (event_id < MaxNvEvents || status == EventState::Free || status == EventState::Registered) { + events_interface.SetEventStatus(event_id, EventState::Waiting); + events_interface.assigned_syncpt[event_id] = params.syncpt_id; + events_interface.assigned_value[event_id] = target_value; + if (is_async) { + params.value = params.syncpt_id << 4; + } else { + params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; + } + params.value |= event_id; + events_interface.events[event_id].writable->Clear(); + gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); + if (!is_async && ctrl.fresh_call) { + ctrl.must_delay = true; + ctrl.timeout = params.timeout; + ctrl.event_id = event_id; + return NvResult::Timeout; + } + std::memcpy(output.data(), ¶ms, sizeof(params)); + return NvResult::Timeout; + } std::memcpy(output.data(), ¶ms, sizeof(params)); - return 0; + return NvResult::BadParameter; } u32 nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) { - LOG_WARNING(Service_NVDRV, "(STUBBED) called"); - // TODO(bunnei): Implement this. - return 0; + IocCtrlEventRegisterParams params{}; + std::memcpy(¶ms, input.data(), sizeof(params)); + const u32 event_id = params.user_event_id & 0x00FF; + LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); + if (event_id >= MaxNvEvents) { + return NvResult::BadParameter; + } + if (events_interface.registered[event_id]) { + return NvResult::BadParameter; + } + events_interface.RegisterEvent(event_id); + return NvResult::Success; +} + +u32 nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output) { + IocCtrlEventUnregisterParams params{}; + std::memcpy(¶ms, input.data(), sizeof(params)); + const u32 event_id = params.user_event_id & 0x00FF; + LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); + if (event_id >= MaxNvEvents) { + return NvResult::BadParameter; + } + if (!events_interface.registered[event_id]) { + return NvResult::BadParameter; + } + events_interface.UnregisterEvent(event_id); + return NvResult::Success; +} + +u32 nvhost_ctrl::IocCtrlEventSignal(const std::vector<u8>& input, std::vector<u8>& output) { + IocCtrlEventSignalParams params{}; + std::memcpy(¶ms, input.data(), sizeof(params)); + // TODO(Blinkhawk): This is normally called when an NvEvents timeout on WaitSynchronization + // It is believed from RE to cancel the GPU Event. However, better research is required + u32 event_id = params.user_event_id & 0x00FF; + LOG_WARNING(Service_NVDRV, "(STUBBED) called, user_event_id: {:X}", event_id); + if (event_id >= MaxNvEvents) { + return NvResult::BadParameter; + } + if (events_interface.status[event_id] == EventState::Waiting) { + auto& gpu = system.GPU(); + if (gpu.CancelSyncptInterrupt(events_interface.assigned_syncpt[event_id], + events_interface.assigned_value[event_id])) { + events_interface.LiberateEvent(event_id); + events_interface.events[event_id].writable->Signal(); + } + } + return NvResult::Success; } } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index 6d0de2212a..14e6e7e57a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -8,15 +8,17 @@ #include <vector> #include "common/common_types.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" +#include "core/hle/service/nvdrv/nvdrv.h" namespace Service::Nvidia::Devices { class nvhost_ctrl final : public nvdevice { public: - nvhost_ctrl(); + explicit nvhost_ctrl(Core::System& system, EventInterface& events_interface); ~nvhost_ctrl() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; private: enum class IoctlCommand : u32_le { @@ -132,9 +134,16 @@ private: u32 NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output); - u32 IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async); + u32 IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async, + IoctlCtrl& ctrl); u32 IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output); + + u32 IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output); + + u32 IocCtrlEventSignal(const std::vector<u8>& input, std::vector<u8>& output); + + EventInterface& events_interface; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp index 0e28755bdd..988effd90b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp @@ -12,10 +12,11 @@ namespace Service::Nvidia::Devices { -nvhost_ctrl_gpu::nvhost_ctrl_gpu() = default; +nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system) : nvdevice(system) {} nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default; -u32 nvhost_ctrl_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvhost_ctrl_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { LOG_DEBUG(Service_NVDRV, "called, command=0x{:08X}, input_size=0x{:X}, output_size=0x{:X}", command.raw, input.size(), output.size()); @@ -185,7 +186,7 @@ u32 nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u8>& o IoctlGetGpuTime params{}; std::memcpy(¶ms, input.data(), input.size()); - const auto ns = Core::Timing::CyclesToNs(Core::System::GetInstance().CoreTiming().GetTicks()); + const auto ns = Core::Timing::CyclesToNs(system.CoreTiming().GetTicks()); params.gpu_time = static_cast<u64_le>(ns.count()); std::memcpy(output.data(), ¶ms, output.size()); return 0; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h index 240435eea5..2b035ae3f9 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h @@ -13,10 +13,11 @@ namespace Service::Nvidia::Devices { class nvhost_ctrl_gpu final : public nvdevice { public: - nvhost_ctrl_gpu(); + explicit nvhost_ctrl_gpu(Core::System& system); ~nvhost_ctrl_gpu() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; private: enum class IoctlCommand : u32_le { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index 8ce7bc7a5e..241dac8817 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -13,10 +13,12 @@ namespace Service::Nvidia::Devices { -nvhost_gpu::nvhost_gpu(std::shared_ptr<nvmap> nvmap_dev) : nvmap_dev(std::move(nvmap_dev)) {} +nvhost_gpu::nvhost_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev) + : nvdevice(system), nvmap_dev(std::move(nvmap_dev)) {} nvhost_gpu::~nvhost_gpu() = default; -u32 nvhost_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvhost_gpu::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { LOG_DEBUG(Service_NVDRV, "called, command=0x{:08X}, input_size=0x{:X}, output_size=0x{:X}", command.raw, input.size(), output.size()); @@ -119,8 +121,10 @@ u32 nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8>& ou params.num_entries, params.flags, params.unk0, params.unk1, params.unk2, params.unk3); - params.fence_out.id = 0; - params.fence_out.value = 0; + auto& gpu = system.GPU(); + params.fence_out.id = assigned_syncpoints; + params.fence_out.value = gpu.GetSyncpointValue(assigned_syncpoints); + assigned_syncpoints++; std::memcpy(output.data(), ¶ms, output.size()); return 0; } @@ -143,7 +147,7 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp IoctlSubmitGpfifo params{}; std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); LOG_WARNING(Service_NVDRV, "(STUBBED) called, gpfifo={:X}, num_entries={:X}, flags={:X}", - params.address, params.num_entries, params.flags); + params.address, params.num_entries, params.flags.raw); ASSERT_MSG(input.size() == sizeof(IoctlSubmitGpfifo) + params.num_entries * sizeof(Tegra::CommandListHeader), @@ -153,10 +157,18 @@ u32 nvhost_gpu::SubmitGPFIFO(const std::vector<u8>& input, std::vector<u8>& outp std::memcpy(entries.data(), &input[sizeof(IoctlSubmitGpfifo)], params.num_entries * sizeof(Tegra::CommandListHeader)); - Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries)); + UNIMPLEMENTED_IF(params.flags.add_wait.Value() != 0); + UNIMPLEMENTED_IF(params.flags.add_increment.Value() != 0); + + auto& gpu = system.GPU(); + u32 current_syncpoint_value = gpu.GetSyncpointValue(params.fence_out.id); + if (params.flags.increment.Value()) { + params.fence_out.value += current_syncpoint_value; + } else { + params.fence_out.value = current_syncpoint_value; + } + gpu.PushGPUEntries(std::move(entries)); - params.fence_out.id = 0; - params.fence_out.value = 0; std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo)); return 0; } @@ -168,16 +180,24 @@ u32 nvhost_gpu::KickoffPB(const std::vector<u8>& input, std::vector<u8>& output) IoctlSubmitGpfifo params{}; std::memcpy(¶ms, input.data(), sizeof(IoctlSubmitGpfifo)); LOG_WARNING(Service_NVDRV, "(STUBBED) called, gpfifo={:X}, num_entries={:X}, flags={:X}", - params.address, params.num_entries, params.flags); + params.address, params.num_entries, params.flags.raw); Tegra::CommandList entries(params.num_entries); Memory::ReadBlock(params.address, entries.data(), params.num_entries * sizeof(Tegra::CommandListHeader)); - Core::System::GetInstance().GPU().PushGPUEntries(std::move(entries)); + UNIMPLEMENTED_IF(params.flags.add_wait.Value() != 0); + UNIMPLEMENTED_IF(params.flags.add_increment.Value() != 0); + + auto& gpu = system.GPU(); + u32 current_syncpoint_value = gpu.GetSyncpointValue(params.fence_out.id); + if (params.flags.increment.Value()) { + params.fence_out.value += current_syncpoint_value; + } else { + params.fence_out.value = current_syncpoint_value; + } + gpu.PushGPUEntries(std::move(entries)); - params.fence_out.id = 0; - params.fence_out.value = 0; std::memcpy(output.data(), ¶ms, output.size()); return 0; } diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index 62beb5c0c0..d2e8fbae9a 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -10,6 +10,7 @@ #include "common/common_types.h" #include "common/swap.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" +#include "core/hle/service/nvdrv/nvdata.h" namespace Service::Nvidia::Devices { @@ -20,10 +21,11 @@ constexpr u32 NVGPU_IOCTL_CHANNEL_KICKOFF_PB(0x1b); class nvhost_gpu final : public nvdevice { public: - explicit nvhost_gpu(std::shared_ptr<nvmap> nvmap_dev); + explicit nvhost_gpu(Core::System& system, std::shared_ptr<nvmap> nvmap_dev); ~nvhost_gpu() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; private: enum class IoctlCommand : u32_le { @@ -113,11 +115,7 @@ private: static_assert(sizeof(IoctlGetErrorNotification) == 16, "IoctlGetErrorNotification is incorrect size"); - struct IoctlFence { - u32_le id; - u32_le value; - }; - static_assert(sizeof(IoctlFence) == 8, "IoctlFence is incorrect size"); + static_assert(sizeof(Fence) == 8, "Fence is incorrect size"); struct IoctlAllocGpfifoEx { u32_le num_entries; @@ -132,13 +130,13 @@ private: static_assert(sizeof(IoctlAllocGpfifoEx) == 32, "IoctlAllocGpfifoEx is incorrect size"); struct IoctlAllocGpfifoEx2 { - u32_le num_entries; // in - u32_le flags; // in - u32_le unk0; // in (1 works) - IoctlFence fence_out; // out - u32_le unk1; // in - u32_le unk2; // in - u32_le unk3; // in + u32_le num_entries; // in + u32_le flags; // in + u32_le unk0; // in (1 works) + Fence fence_out; // out + u32_le unk1; // in + u32_le unk2; // in + u32_le unk3; // in }; static_assert(sizeof(IoctlAllocGpfifoEx2) == 32, "IoctlAllocGpfifoEx2 is incorrect size"); @@ -153,10 +151,16 @@ private: struct IoctlSubmitGpfifo { u64_le address; // pointer to gpfifo entry structs u32_le num_entries; // number of fence objects being submitted - u32_le flags; - IoctlFence fence_out; // returned new fence object for others to wait on - }; - static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(IoctlFence), + union { + u32_le raw; + BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list + BitField<1, 1, u32_le> add_increment; // append an increment to the list + BitField<2, 1, u32_le> new_hw_format; // Mostly ignored + BitField<8, 1, u32_le> increment; // increment the returned fence + } flags; + Fence fence_out; // returned new fence object for others to wait on + }; + static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(Fence), "IoctlSubmitGpfifo is incorrect size"); struct IoctlGetWaitbase { @@ -184,6 +188,7 @@ private: u32 ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output); std::shared_ptr<nvmap> nvmap_dev; + u32 assigned_syncpoints{}; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index f5e8ea7c3a..f572ad30f3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -10,10 +10,11 @@ namespace Service::Nvidia::Devices { -nvhost_nvdec::nvhost_nvdec() = default; +nvhost_nvdec::nvhost_nvdec(Core::System& system) : nvdevice(system) {} nvhost_nvdec::~nvhost_nvdec() = default; -u32 nvhost_nvdec::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvhost_nvdec::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { LOG_DEBUG(Service_NVDRV, "called, command=0x{:08X}, input_size=0x{:X}, output_size=0x{:X}", command.raw, input.size(), output.size()); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 0e7b284f87..2710f05113 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -13,10 +13,11 @@ namespace Service::Nvidia::Devices { class nvhost_nvdec final : public nvdevice { public: - nvhost_nvdec(); + explicit nvhost_nvdec(Core::System& system); ~nvhost_nvdec() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; private: enum class IoctlCommand : u32_le { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp index 3e0951ab04..38282956fb 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.cpp @@ -10,10 +10,11 @@ namespace Service::Nvidia::Devices { -nvhost_nvjpg::nvhost_nvjpg() = default; +nvhost_nvjpg::nvhost_nvjpg(Core::System& system) : nvdevice(system) {} nvhost_nvjpg::~nvhost_nvjpg() = default; -u32 nvhost_nvjpg::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvhost_nvjpg::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { LOG_DEBUG(Service_NVDRV, "called, command=0x{:08X}, input_size=0x{:X}, output_size=0x{:X}", command.raw, input.size(), output.size()); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h index 89fd5e95ed..3797666936 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvjpg.h @@ -13,10 +13,11 @@ namespace Service::Nvidia::Devices { class nvhost_nvjpg final : public nvdevice { public: - nvhost_nvjpg(); + explicit nvhost_nvjpg(Core::System& system); ~nvhost_nvjpg() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; private: enum class IoctlCommand : u32_le { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index d544f0f314..70e8091dbc 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -10,10 +10,11 @@ namespace Service::Nvidia::Devices { -nvhost_vic::nvhost_vic() = default; +nvhost_vic::nvhost_vic(Core::System& system) : nvdevice(system) {} nvhost_vic::~nvhost_vic() = default; -u32 nvhost_vic::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvhost_vic::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { LOG_DEBUG(Service_NVDRV, "called, command=0x{:08X}, input_size=0x{:X}, output_size=0x{:X}", command.raw, input.size(), output.size()); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index fc24c3f9c0..7d111977e3 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -13,10 +13,11 @@ namespace Service::Nvidia::Devices { class nvhost_vic final : public nvdevice { public: - nvhost_vic(); + explicit nvhost_vic(Core::System& system); ~nvhost_vic() override; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; private: enum class IoctlCommand : u32_le { diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 1ec796fc6d..223b496b74 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -18,7 +18,7 @@ enum { }; } -nvmap::nvmap() = default; +nvmap::nvmap(Core::System& system) : nvdevice(system) {} nvmap::~nvmap() = default; VAddr nvmap::GetObjectAddress(u32 handle) const { @@ -28,7 +28,8 @@ VAddr nvmap::GetObjectAddress(u32 handle) const { return object->addr; } -u32 nvmap::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 nvmap::ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { switch (static_cast<IoctlCommand>(command.raw)) { case IoctlCommand::Create: return IocCreate(input, output); diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h index 396230c191..bf4a101c22 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.h +++ b/src/core/hle/service/nvdrv/devices/nvmap.h @@ -16,13 +16,14 @@ namespace Service::Nvidia::Devices { class nvmap final : public nvdevice { public: - nvmap(); + explicit nvmap(Core::System& system); ~nvmap() override; /// Returns the allocated address of an nvmap object given its handle. VAddr GetObjectAddress(u32 handle) const; - u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output) override; + u32 ioctl(Ioctl command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) override; /// Represents an nvmap object. struct Object { diff --git a/src/core/hle/service/nvdrv/interface.cpp b/src/core/hle/service/nvdrv/interface.cpp index b60fc748b8..d5be64ed28 100644 --- a/src/core/hle/service/nvdrv/interface.cpp +++ b/src/core/hle/service/nvdrv/interface.cpp @@ -8,12 +8,18 @@ #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/readable_event.h" +#include "core/hle/kernel/thread.h" #include "core/hle/kernel/writable_event.h" #include "core/hle/service/nvdrv/interface.h" +#include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvdrv/nvdrv.h" namespace Service::Nvidia { +void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) { + nvdrv->SignalSyncpt(syncpoint_id, value); +} + void NVDRV::Open(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_NVDRV, "called"); @@ -36,11 +42,31 @@ void NVDRV::Ioctl(Kernel::HLERequestContext& ctx) { std::vector<u8> output(ctx.GetWriteBufferSize()); + IoctlCtrl ctrl{}; + + u32 result = nvdrv->Ioctl(fd, command, ctx.ReadBuffer(), output, ctrl); + + if (ctrl.must_delay) { + ctrl.fresh_call = false; + ctx.SleepClientThread( + "NVServices::DelayedResponse", ctrl.timeout, + [=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx, + Kernel::ThreadWakeupReason reason) { + IoctlCtrl ctrl2{ctrl}; + std::vector<u8> output2 = output; + u32 result = nvdrv->Ioctl(fd, command, ctx.ReadBuffer(), output2, ctrl2); + ctx.WriteBuffer(output2); + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.Push(result); + }, + nvdrv->GetEventWriteable(ctrl.event_id)); + } else { + ctx.WriteBuffer(output); + } IPC::ResponseBuilder rb{ctx, 3}; rb.Push(RESULT_SUCCESS); - rb.Push(nvdrv->Ioctl(fd, command, ctx.ReadBuffer(), output)); - - ctx.WriteBuffer(output); + rb.Push(result); } void NVDRV::Close(Kernel::HLERequestContext& ctx) { @@ -66,13 +92,19 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) { void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { IPC::RequestParser rp{ctx}; u32 fd = rp.Pop<u32>(); - u32 event_id = rp.Pop<u32>(); + // TODO(Blinkhawk): Figure the meaning of the flag at bit 16 + u32 event_id = rp.Pop<u32>() & 0x000000FF; LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id); IPC::ResponseBuilder rb{ctx, 3, 1}; rb.Push(RESULT_SUCCESS); - rb.PushCopyObjects(query_event.readable); - rb.Push<u32>(0); + if (event_id < MaxNvEvents) { + rb.PushCopyObjects(nvdrv->GetEvent(event_id)); + rb.Push<u32>(NvResult::Success); + } else { + rb.Push<u32>(0); + rb.Push<u32>(NvResult::BadParameter); + } } void NVDRV::SetClientPID(Kernel::HLERequestContext& ctx) { @@ -127,10 +159,6 @@ NVDRV::NVDRV(std::shared_ptr<Module> nvdrv, const char* name) {13, &NVDRV::FinishInitialize, "FinishInitialize"}, }; RegisterHandlers(functions); - - auto& kernel = Core::System::GetInstance().Kernel(); - query_event = Kernel::WritableEvent::CreateEventPair(kernel, Kernel::ResetType::Automatic, - "NVDRV::query_event"); } NVDRV::~NVDRV() = default; diff --git a/src/core/hle/service/nvdrv/interface.h b/src/core/hle/service/nvdrv/interface.h index 5b48899108..10a0ecd52d 100644 --- a/src/core/hle/service/nvdrv/interface.h +++ b/src/core/hle/service/nvdrv/interface.h @@ -19,6 +19,8 @@ public: NVDRV(std::shared_ptr<Module> nvdrv, const char* name); ~NVDRV() override; + void SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value); + private: void Open(Kernel::HLERequestContext& ctx); void Ioctl(Kernel::HLERequestContext& ctx); @@ -33,8 +35,6 @@ private: std::shared_ptr<Module> nvdrv; u64 pid{}; - - Kernel::EventPair query_event; }; } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/nvdata.h b/src/core/hle/service/nvdrv/nvdata.h new file mode 100644 index 0000000000..ac03cbc232 --- /dev/null +++ b/src/core/hle/service/nvdrv/nvdata.h @@ -0,0 +1,48 @@ +#pragma once + +#include <array> +#include "common/common_types.h" + +namespace Service::Nvidia { + +constexpr u32 MaxSyncPoints = 192; +constexpr u32 MaxNvEvents = 64; + +struct Fence { + s32 id; + u32 value; +}; + +static_assert(sizeof(Fence) == 8, "Fence has wrong size"); + +struct MultiFence { + u32 num_fences; + std::array<Fence, 4> fences; +}; + +enum NvResult : u32 { + Success = 0, + BadParameter = 4, + Timeout = 5, + ResourceError = 15, +}; + +enum class EventState { + Free = 0, + Registered = 1, + Waiting = 2, + Busy = 3, +}; + +struct IoctlCtrl { + // First call done to the servioce for services that call itself again after a call. + bool fresh_call{true}; + // Tells the Ioctl Wrapper that it must delay the IPC response and send the thread to sleep + bool must_delay{}; + // Timeout for the delay + s64 timeout{}; + // NV Event Id + s32 event_id{-1}; +}; + +} // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index 6e4b8f2c66..2011a226af 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -4,7 +4,10 @@ #include <utility> +#include <fmt/format.h> #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/readable_event.h" +#include "core/hle/kernel/writable_event.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" @@ -22,8 +25,9 @@ namespace Service::Nvidia { -void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger) { - auto module_ = std::make_shared<Module>(); +void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, + Core::System& system) { + auto module_ = std::make_shared<Module>(system); std::make_shared<NVDRV>(module_, "nvdrv")->InstallAsService(service_manager); std::make_shared<NVDRV>(module_, "nvdrv:a")->InstallAsService(service_manager); std::make_shared<NVDRV>(module_, "nvdrv:s")->InstallAsService(service_manager); @@ -32,17 +36,25 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger nvflinger.SetNVDrvInstance(module_); } -Module::Module() { - auto nvmap_dev = std::make_shared<Devices::nvmap>(); - devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(nvmap_dev); - devices["/dev/nvhost-gpu"] = std::make_shared<Devices::nvhost_gpu>(nvmap_dev); - devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(); +Module::Module(Core::System& system) { + auto& kernel = system.Kernel(); + for (u32 i = 0; i < MaxNvEvents; i++) { + std::string event_label = fmt::format("NVDRV::NvEvent_{}", i); + events_interface.events[i] = Kernel::WritableEvent::CreateEventPair( + kernel, Kernel::ResetType::Automatic, event_label); + events_interface.status[i] = EventState::Free; + events_interface.registered[i] = false; + } + auto nvmap_dev = std::make_shared<Devices::nvmap>(system); + devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev); + devices["/dev/nvhost-gpu"] = std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev); + devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system); devices["/dev/nvmap"] = nvmap_dev; - devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(nvmap_dev); - devices["/dev/nvhost-ctrl"] = std::make_shared<Devices::nvhost_ctrl>(); - devices["/dev/nvhost-nvdec"] = std::make_shared<Devices::nvhost_nvdec>(); - devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(); - devices["/dev/nvhost-vic"] = std::make_shared<Devices::nvhost_vic>(); + devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev); + devices["/dev/nvhost-ctrl"] = std::make_shared<Devices::nvhost_ctrl>(system, events_interface); + devices["/dev/nvhost-nvdec"] = std::make_shared<Devices::nvhost_nvdec>(system); + devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system); + devices["/dev/nvhost-vic"] = std::make_shared<Devices::nvhost_vic>(system); } Module::~Module() = default; @@ -59,12 +71,13 @@ u32 Module::Open(const std::string& device_name) { return fd; } -u32 Module::Ioctl(u32 fd, u32 command, const std::vector<u8>& input, std::vector<u8>& output) { +u32 Module::Ioctl(u32 fd, u32 command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl) { auto itr = open_files.find(fd); ASSERT_MSG(itr != open_files.end(), "Tried to talk to an invalid device"); auto& device = itr->second; - return device->ioctl({command}, input, output); + return device->ioctl({command}, input, output, ctrl); } ResultCode Module::Close(u32 fd) { @@ -77,4 +90,22 @@ ResultCode Module::Close(u32 fd) { return RESULT_SUCCESS; } +void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { + for (u32 i = 0; i < MaxNvEvents; i++) { + if (events_interface.assigned_syncpt[i] == syncpoint_id && + events_interface.assigned_value[i] == value) { + events_interface.LiberateEvent(i); + events_interface.events[i].writable->Signal(); + } + } +} + +Kernel::SharedPtr<Kernel::ReadableEvent> Module::GetEvent(const u32 event_id) const { + return events_interface.events[event_id].readable; +} + +Kernel::SharedPtr<Kernel::WritableEvent> Module::GetEventWriteable(const u32 event_id) const { + return events_interface.events[event_id].writable; +} + } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index 53564f6964..a339ab672b 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -8,8 +8,14 @@ #include <unordered_map> #include <vector> #include "common/common_types.h" +#include "core/hle/kernel/writable_event.h" +#include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/service.h" +namespace Core { +class System; +} + namespace Service::NVFlinger { class NVFlinger; } @@ -20,16 +26,72 @@ namespace Devices { class nvdevice; } -struct IoctlFence { - u32 id; - u32 value; +struct EventInterface { + // Mask representing currently busy events + u64 events_mask{}; + // Each kernel event associated to an NV event + std::array<Kernel::EventPair, MaxNvEvents> events; + // The status of the current NVEvent + std::array<EventState, MaxNvEvents> status{}; + // Tells if an NVEvent is registered or not + std::array<bool, MaxNvEvents> registered{}; + // When an NVEvent is waiting on GPU interrupt, this is the sync_point + // associated with it. + std::array<u32, MaxNvEvents> assigned_syncpt{}; + // This is the value of the GPU interrupt for which the NVEvent is waiting + // for. + std::array<u32, MaxNvEvents> assigned_value{}; + // Constant to denote an unasigned syncpoint. + static constexpr u32 unassigned_syncpt = 0xFFFFFFFF; + std::optional<u32> GetFreeEvent() const { + u64 mask = events_mask; + for (u32 i = 0; i < MaxNvEvents; i++) { + const bool is_free = (mask & 0x1) == 0; + if (is_free) { + if (status[i] == EventState::Registered || status[i] == EventState::Free) { + return {i}; + } + } + mask = mask >> 1; + } + return {}; + } + void SetEventStatus(const u32 event_id, EventState new_status) { + EventState old_status = status[event_id]; + if (old_status == new_status) { + return; + } + status[event_id] = new_status; + if (new_status == EventState::Registered) { + registered[event_id] = true; + } + if (new_status == EventState::Waiting || new_status == EventState::Busy) { + events_mask |= (1ULL << event_id); + } + } + void RegisterEvent(const u32 event_id) { + registered[event_id] = true; + if (status[event_id] == EventState::Free) { + status[event_id] = EventState::Registered; + } + } + void UnregisterEvent(const u32 event_id) { + registered[event_id] = false; + if (status[event_id] == EventState::Registered) { + status[event_id] = EventState::Free; + } + } + void LiberateEvent(const u32 event_id) { + status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free; + events_mask &= ~(1ULL << event_id); + assigned_syncpt[event_id] = unassigned_syncpt; + assigned_value[event_id] = 0; + } }; -static_assert(sizeof(IoctlFence) == 8, "IoctlFence has wrong size"); - class Module final { public: - Module(); + Module(Core::System& system); ~Module(); /// Returns a pointer to one of the available devices, identified by its name. @@ -44,10 +106,17 @@ public: /// Opens a device node and returns a file descriptor to it. u32 Open(const std::string& device_name); /// Sends an ioctl command to the specified file descriptor. - u32 Ioctl(u32 fd, u32 command, const std::vector<u8>& input, std::vector<u8>& output); + u32 Ioctl(u32 fd, u32 command, const std::vector<u8>& input, std::vector<u8>& output, + IoctlCtrl& ctrl); /// Closes a device file descriptor and returns operation success. ResultCode Close(u32 fd); + void SignalSyncpt(const u32 syncpoint_id, const u32 value); + + Kernel::SharedPtr<Kernel::ReadableEvent> GetEvent(u32 event_id) const; + + Kernel::SharedPtr<Kernel::WritableEvent> GetEventWriteable(u32 event_id) const; + private: /// Id to use for the next open file descriptor. u32 next_fd = 1; @@ -57,9 +126,12 @@ private: /// Mapping of device node names to their implementation. std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices; + + EventInterface events_interface; }; /// Registers all NVDRV services with the specified service manager. -void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger); +void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, + Core::System& system); } // namespace Service::Nvidia diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp index 5731e815f1..e1a07d3eef 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue.cpp @@ -34,7 +34,8 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) buffer_wait_event.writable->Signal(); } -std::optional<u32> BufferQueue::DequeueBuffer(u32 width, u32 height) { +std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, + u32 height) { auto itr = std::find_if(queue.begin(), queue.end(), [&](const Buffer& buffer) { // Only consider free buffers. Buffers become free once again after they've been Acquired // and Released by the compositor, see the NVFlinger::Compose method. @@ -51,7 +52,7 @@ std::optional<u32> BufferQueue::DequeueBuffer(u32 width, u32 height) { } itr->status = Buffer::Status::Dequeued; - return itr->slot; + return {{itr->slot, &itr->multi_fence}}; } const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const { @@ -63,7 +64,8 @@ const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const { } void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform, - const Common::Rectangle<int>& crop_rect) { + const Common::Rectangle<int>& crop_rect, u32 swap_interval, + Service::Nvidia::MultiFence& multi_fence) { auto itr = std::find_if(queue.begin(), queue.end(), [&](const Buffer& buffer) { return buffer.slot == slot; }); ASSERT(itr != queue.end()); @@ -71,12 +73,21 @@ void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform, itr->status = Buffer::Status::Queued; itr->transform = transform; itr->crop_rect = crop_rect; + itr->swap_interval = swap_interval; + itr->multi_fence = multi_fence; + queue_sequence.push_back(slot); } std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { - auto itr = std::find_if(queue.begin(), queue.end(), [](const Buffer& buffer) { - return buffer.status == Buffer::Status::Queued; - }); + auto itr = queue.end(); + // Iterate to find a queued buffer matching the requested slot. + while (itr == queue.end() && !queue_sequence.empty()) { + u32 slot = queue_sequence.front(); + itr = std::find_if(queue.begin(), queue.end(), [&slot](const Buffer& buffer) { + return buffer.status == Buffer::Status::Queued && buffer.slot == slot; + }); + queue_sequence.pop_front(); + } if (itr == queue.end()) return {}; itr->status = Buffer::Status::Acquired; diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h index e1ccb61714..356bedb814 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ b/src/core/hle/service/nvflinger/buffer_queue.h @@ -4,6 +4,7 @@ #pragma once +#include <list> #include <optional> #include <vector> @@ -12,6 +13,7 @@ #include "common/swap.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/writable_event.h" +#include "core/hle/service/nvdrv/nvdata.h" namespace Service::NVFlinger { @@ -68,13 +70,17 @@ public: IGBPBuffer igbp_buffer; BufferTransformFlags transform; Common::Rectangle<int> crop_rect; + u32 swap_interval; + Service::Nvidia::MultiFence multi_fence; }; void SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer); - std::optional<u32> DequeueBuffer(u32 width, u32 height); + std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> DequeueBuffer(u32 width, + u32 height); const IGBPBuffer& RequestBuffer(u32 slot) const; void QueueBuffer(u32 slot, BufferTransformFlags transform, - const Common::Rectangle<int>& crop_rect); + const Common::Rectangle<int>& crop_rect, u32 swap_interval, + Service::Nvidia::MultiFence& multi_fence); std::optional<std::reference_wrapper<const Buffer>> AcquireBuffer(); void ReleaseBuffer(u32 slot); u32 Query(QueryType type); @@ -92,6 +98,7 @@ private: u64 layer_id; std::vector<Buffer> queue; + std::list<u32> queue_sequence; Kernel::EventPair buffer_wait_event; }; diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 3c5c53e246..f9db79370c 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp @@ -37,15 +37,14 @@ NVFlinger::NVFlinger(Core::Timing::CoreTiming& core_timing) : core_timing{core_t displays.emplace_back(4, "Null"); // Schedule the screen composition events - const auto ticks = Settings::values.force_30fps_mode ? frame_ticks_30fps : frame_ticks; - - composition_event = core_timing.RegisterEvent( - "ScreenComposition", [this, ticks](u64 userdata, s64 cycles_late) { - Compose(); - this->core_timing.ScheduleEvent(ticks - cycles_late, composition_event); - }); - - core_timing.ScheduleEvent(ticks, composition_event); + composition_event = core_timing.RegisterEvent("ScreenComposition", [this](u64 userdata, + s64 cycles_late) { + Compose(); + const auto ticks = Settings::values.force_30fps_mode ? frame_ticks_30fps : GetNextTicks(); + this->core_timing.ScheduleEvent(std::max<s64>(0LL, ticks - cycles_late), composition_event); + }); + + core_timing.ScheduleEvent(frame_ticks, composition_event); } NVFlinger::~NVFlinger() { @@ -206,8 +205,14 @@ void NVFlinger::Compose() { igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, buffer->get().transform, buffer->get().crop_rect); + swap_interval = buffer->get().swap_interval; buffer_queue.ReleaseBuffer(buffer->get().slot); } } +s64 NVFlinger::GetNextTicks() const { + constexpr s64 max_hertz = 120LL; + return (Core::Timing::BASE_CLOCK_RATE * (1LL << swap_interval)) / max_hertz; +} + } // namespace Service::NVFlinger diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index c0a83fffb8..988be87264 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h @@ -74,6 +74,8 @@ public: /// finished. void Compose(); + s64 GetNextTicks() const; + private: /// Finds the display identified by the specified ID. VI::Display* FindDisplay(u64 display_id); @@ -98,6 +100,8 @@ private: /// layers. u32 next_buffer_queue_id = 1; + u32 swap_interval = 1; + /// Event that handles screen composition. Core::Timing::EventType* composition_event; diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp index ebcc41a43b..fe6b5f798f 100644 --- a/src/core/hle/service/pm/pm.cpp +++ b/src/core/hle/service/pm/pm.cpp @@ -3,11 +3,44 @@ // Refer to the license.txt file included. #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/process.h" #include "core/hle/service/pm/pm.h" #include "core/hle/service/service.h" namespace Service::PM { +namespace { + +constexpr ResultCode ERROR_PROCESS_NOT_FOUND{ErrorModule::PM, 1}; + +constexpr u64 NO_PROCESS_FOUND_PID{0}; + +std::optional<Kernel::SharedPtr<Kernel::Process>> SearchProcessList( + const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list, + std::function<bool(const Kernel::SharedPtr<Kernel::Process>&)> predicate) { + const auto iter = std::find_if(process_list.begin(), process_list.end(), predicate); + + if (iter == process_list.end()) { + return std::nullopt; + } + + return *iter; +} + +void GetApplicationPidGeneric(Kernel::HLERequestContext& ctx, + const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list) { + const auto process = SearchProcessList(process_list, [](const auto& process) { + return process->GetProcessID() == Kernel::Process::ProcessIDMin; + }); + + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(RESULT_SUCCESS); + rb.Push(process.has_value() ? (*process)->GetProcessID() : NO_PROCESS_FOUND_PID); +} + +} // Anonymous namespace + class BootMode final : public ServiceFramework<BootMode> { public: explicit BootMode() : ServiceFramework{"pm:bm"} { @@ -41,14 +74,15 @@ private: class DebugMonitor final : public ServiceFramework<DebugMonitor> { public: - explicit DebugMonitor() : ServiceFramework{"pm:dmnt"} { + explicit DebugMonitor(const Kernel::KernelCore& kernel) + : ServiceFramework{"pm:dmnt"}, kernel(kernel) { // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "GetDebugProcesses"}, {1, nullptr, "StartDebugProcess"}, - {2, nullptr, "GetTitlePid"}, + {2, &DebugMonitor::GetTitlePid, "GetTitlePid"}, {3, nullptr, "EnableDebugForTitleId"}, - {4, nullptr, "GetApplicationPid"}, + {4, &DebugMonitor::GetApplicationPid, "GetApplicationPid"}, {5, nullptr, "EnableDebugForApplication"}, {6, nullptr, "DisableDebug"}, }; @@ -56,21 +90,77 @@ public: RegisterHandlers(functions); } + +private: + void GetTitlePid(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto title_id = rp.PopRaw<u64>(); + + LOG_DEBUG(Service_PM, "called, title_id={:016X}", title_id); + + const auto process = + SearchProcessList(kernel.GetProcessList(), [title_id](const auto& process) { + return process->GetTitleID() == title_id; + }); + + if (!process.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ERROR_PROCESS_NOT_FOUND); + return; + } + + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(RESULT_SUCCESS); + rb.Push((*process)->GetProcessID()); + } + + void GetApplicationPid(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_PM, "called"); + GetApplicationPidGeneric(ctx, kernel.GetProcessList()); + } + + const Kernel::KernelCore& kernel; }; class Info final : public ServiceFramework<Info> { public: - explicit Info() : ServiceFramework{"pm:info"} { + explicit Info(const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list) + : ServiceFramework{"pm:info"}, process_list(process_list) { static const FunctionInfo functions[] = { - {0, nullptr, "GetTitleId"}, + {0, &Info::GetTitleId, "GetTitleId"}, }; RegisterHandlers(functions); } + +private: + void GetTitleId(Kernel::HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto process_id = rp.PopRaw<u64>(); + + LOG_DEBUG(Service_PM, "called, process_id={:016X}", process_id); + + const auto process = SearchProcessList(process_list, [process_id](const auto& process) { + return process->GetProcessID() == process_id; + }); + + if (!process.has_value()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ERROR_PROCESS_NOT_FOUND); + return; + } + + IPC::ResponseBuilder rb{ctx, 4}; + rb.Push(RESULT_SUCCESS); + rb.Push((*process)->GetTitleID()); + } + + const std::vector<Kernel::SharedPtr<Kernel::Process>>& process_list; }; class Shell final : public ServiceFramework<Shell> { public: - explicit Shell() : ServiceFramework{"pm:shell"} { + explicit Shell(const Kernel::KernelCore& kernel) + : ServiceFramework{"pm:shell"}, kernel(kernel) { // clang-format off static const FunctionInfo functions[] = { {0, nullptr, "LaunchProcess"}, @@ -79,21 +169,31 @@ public: {3, nullptr, "GetProcessEventWaiter"}, {4, nullptr, "GetProcessEventType"}, {5, nullptr, "NotifyBootFinished"}, - {6, nullptr, "GetApplicationPid"}, + {6, &Shell::GetApplicationPid, "GetApplicationPid"}, {7, nullptr, "BoostSystemMemoryResourceLimit"}, {8, nullptr, "EnableAdditionalSystemThreads"}, + {9, nullptr, "GetUnimplementedEventHandle"}, }; // clang-format on RegisterHandlers(functions); } + +private: + void GetApplicationPid(Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_PM, "called"); + GetApplicationPidGeneric(ctx, kernel.GetProcessList()); + } + + const Kernel::KernelCore& kernel; }; -void InstallInterfaces(SM::ServiceManager& sm) { - std::make_shared<BootMode>()->InstallAsService(sm); - std::make_shared<DebugMonitor>()->InstallAsService(sm); - std::make_shared<Info>()->InstallAsService(sm); - std::make_shared<Shell>()->InstallAsService(sm); +void InstallInterfaces(Core::System& system) { + std::make_shared<BootMode>()->InstallAsService(system.ServiceManager()); + std::make_shared<DebugMonitor>(system.Kernel())->InstallAsService(system.ServiceManager()); + std::make_shared<Info>(system.Kernel().GetProcessList()) + ->InstallAsService(system.ServiceManager()); + std::make_shared<Shell>(system.Kernel())->InstallAsService(system.ServiceManager()); } } // namespace Service::PM diff --git a/src/core/hle/service/pm/pm.h b/src/core/hle/service/pm/pm.h index cc8d3f2152..852e7050c4 100644 --- a/src/core/hle/service/pm/pm.h +++ b/src/core/hle/service/pm/pm.h @@ -4,8 +4,8 @@ #pragma once -namespace Service::SM { -class ServiceManager; +namespace Core { +class System; } namespace Service::PM { @@ -16,6 +16,6 @@ enum class SystemBootMode { }; /// Registers all PM services with the specified service manager. -void InstallInterfaces(SM::ServiceManager& service_manager); +void InstallInterfaces(Core::System& system); } // namespace Service::PM diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index 5fc7d3cab7..3a0f8c3f68 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp @@ -195,8 +195,7 @@ ResultCode ServiceFrameworkBase::HandleSyncRequest(Kernel::HLERequestContext& co // Module interface /// Initialize ServiceManager -void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system, - FileSys::VfsFilesystem& vfs) { +void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system) { // NVFlinger needs to be accessed by several services like Vi and AppletOE so we instantiate it // here and pass it into the respective InstallInterfaces functions. auto nv_flinger = std::make_shared<NVFlinger::NVFlinger>(system.CoreTiming()); @@ -206,8 +205,8 @@ void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system, Account::InstallInterfaces(system); AM::InstallInterfaces(*sm, nv_flinger, system); AOC::InstallInterfaces(*sm); - APM::InstallInterfaces(*sm); - Audio::InstallInterfaces(*sm); + APM::InstallInterfaces(system); + Audio::InstallInterfaces(*sm, system); BCAT::InstallInterfaces(*sm); BPC::InstallInterfaces(*sm); BtDrv::InstallInterfaces(*sm); @@ -218,7 +217,7 @@ void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system, EUPLD::InstallInterfaces(*sm); Fatal::InstallInterfaces(*sm); FGM::InstallInterfaces(*sm); - FileSystem::InstallInterfaces(*sm, vfs); + FileSystem::InstallInterfaces(system); Friend::InstallInterfaces(*sm); Glue::InstallInterfaces(system); GRC::InstallInterfaces(*sm); @@ -237,12 +236,12 @@ void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system, NIM::InstallInterfaces(*sm); NPNS::InstallInterfaces(*sm); NS::InstallInterfaces(*sm); - Nvidia::InstallInterfaces(*sm, *nv_flinger); + Nvidia::InstallInterfaces(*sm, *nv_flinger, system); PCIe::InstallInterfaces(*sm); PCTL::InstallInterfaces(*sm); PCV::InstallInterfaces(*sm); PlayReport::InstallInterfaces(*sm); - PM::InstallInterfaces(*sm); + PM::InstallInterfaces(system); PSC::InstallInterfaces(*sm); PSM::InstallInterfaces(*sm); Set::InstallInterfaces(*sm); diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h index abbfe5524e..c6c4bdae5a 100644 --- a/src/core/hle/service/service.h +++ b/src/core/hle/service/service.h @@ -182,8 +182,7 @@ private: }; /// Initialize ServiceManager -void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system, - FileSys::VfsFilesystem& vfs); +void Init(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system); /// Shutdown ServiceManager void Shutdown(); diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index f1fa6ccd10..199b306357 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -21,6 +21,7 @@ #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/writable_event.h" +#include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvflinger/buffer_queue.h" #include "core/hle/service/nvflinger/nvflinger.h" @@ -328,32 +329,22 @@ public: Data data; }; -struct BufferProducerFence { - u32 is_valid; - std::array<Nvidia::IoctlFence, 4> fences; -}; -static_assert(sizeof(BufferProducerFence) == 36, "BufferProducerFence has wrong size"); - class IGBPDequeueBufferResponseParcel : public Parcel { public: - explicit IGBPDequeueBufferResponseParcel(u32 slot) : slot(slot) {} + explicit IGBPDequeueBufferResponseParcel(u32 slot, Service::Nvidia::MultiFence& multi_fence) + : slot(slot), multi_fence(multi_fence) {} ~IGBPDequeueBufferResponseParcel() override = default; protected: void SerializeData() override { - // TODO(Subv): Find out how this Fence is used. - BufferProducerFence fence = {}; - fence.is_valid = 1; - for (auto& fence_ : fence.fences) - fence_.id = -1; - Write(slot); Write<u32_le>(1); - WriteObject(fence); + WriteObject(multi_fence); Write<u32_le>(0); } u32_le slot; + Service::Nvidia::MultiFence multi_fence; }; class IGBPRequestBufferRequestParcel : public Parcel { @@ -400,12 +391,6 @@ public: data = Read<Data>(); } - struct Fence { - u32_le id; - u32_le value; - }; - static_assert(sizeof(Fence) == 8, "Fence has wrong size"); - struct Data { u32_le slot; INSERT_PADDING_WORDS(3); @@ -418,15 +403,15 @@ public: s32_le scaling_mode; NVFlinger::BufferQueue::BufferTransformFlags transform; u32_le sticky_transform; - INSERT_PADDING_WORDS(2); - u32_le fence_is_valid; - std::array<Fence, 2> fences; + INSERT_PADDING_WORDS(1); + u32_le swap_interval; + Service::Nvidia::MultiFence multi_fence; Common::Rectangle<int> GetCropRect() const { return {crop_left, crop_top, crop_right, crop_bottom}; } }; - static_assert(sizeof(Data) == 80, "ParcelData has wrong size"); + static_assert(sizeof(Data) == 96, "ParcelData has wrong size"); Data data; }; @@ -547,11 +532,11 @@ private: IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()}; const u32 width{request.data.width}; const u32 height{request.data.height}; - std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); + auto result = buffer_queue.DequeueBuffer(width, height); - if (slot) { + if (result) { // Buffer is available - IGBPDequeueBufferResponseParcel response{*slot}; + IGBPDequeueBufferResponseParcel response{result->first, *result->second}; ctx.WriteBuffer(response.Serialize()); } else { // Wait the current thread until a buffer becomes available @@ -561,10 +546,10 @@ private: Kernel::ThreadWakeupReason reason) { // Repeat TransactParcel DequeueBuffer when a buffer is available auto& buffer_queue = nv_flinger->FindBufferQueue(id); - std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height); - ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer."); + auto result = buffer_queue.DequeueBuffer(width, height); + ASSERT_MSG(result != std::nullopt, "Could not dequeue buffer."); - IGBPDequeueBufferResponseParcel response{*slot}; + IGBPDequeueBufferResponseParcel response{result->first, *result->second}; ctx.WriteBuffer(response.Serialize()); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); @@ -582,7 +567,8 @@ private: IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()}; buffer_queue.QueueBuffer(request.data.slot, request.data.transform, - request.data.GetCropRect()); + request.data.GetCropRect(), request.data.swap_interval, + request.data.multi_fence); IGBPQueueBufferResponseParcel response{1280, 720}; ctx.WriteBuffer(response.Serialize()); diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index 6d4b023758..f1795fdd62 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp @@ -295,7 +295,7 @@ Kernel::CodeSet ElfReader::LoadInto(VAddr vaddr) { } } - std::vector<u8> program_image(total_image_size); + Kernel::PhysicalMemory program_image(total_image_size); std::size_t current_image_position = 0; Kernel::CodeSet codeset; diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp index 70051c13ae..474b55cb13 100644 --- a/src/core/loader/kip.cpp +++ b/src/core/loader/kip.cpp @@ -69,7 +69,7 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::Process& process) { const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); Kernel::CodeSet codeset; - std::vector<u8> program_image; + Kernel::PhysicalMemory program_image; const auto load_segment = [&program_image](Kernel::CodeSet::Segment& segment, const std::vector<u8>& data, u32 offset) { diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index 6a0ca389b0..e92e2e06ea 100644 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp @@ -143,7 +143,7 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data, } // Build program image - std::vector<u8> program_image(PageAlignSize(nro_header.file_size)); + Kernel::PhysicalMemory program_image(PageAlignSize(nro_header.file_size)); std::memcpy(program_image.data(), data.data(), program_image.size()); if (program_image.size() != PageAlignSize(nro_header.file_size)) { return {}; diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 29311404a7..70c90109f3 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -89,7 +89,7 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, // Build program image Kernel::CodeSet codeset; - std::vector<u8> program_image; + Kernel::PhysicalMemory program_image; for (std::size_t i = 0; i < nso_header.segments.size(); ++i) { std::vector<u8> data = file.ReadBytes(nso_header.segments_compressed_size[i], nso_header.segments[i].offset); diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp index 6ea26fda72..cfe0771e23 100644 --- a/src/core/reporter.cpp +++ b/src/core/reporter.cpp @@ -5,8 +5,8 @@ #include <ctime> #include <fstream> +#include <fmt/chrono.h> #include <fmt/format.h> -#include <fmt/time.h> #include <json.hpp> #include "common/file_util.h" @@ -350,6 +350,24 @@ void Reporter::SaveErrorReport(u64 title_id, ResultCode result, SaveToFile(std::move(out), GetPath("error_report", title_id, timestamp)); } +void Reporter::SaveFilesystemAccessReport(Service::FileSystem::LogMode log_mode, + std::string log_message) const { + if (!IsReportingEnabled()) + return; + + const auto timestamp = GetTimestamp(); + const auto title_id = system.CurrentProcess()->GetTitleID(); + json out; + + out["yuzu_version"] = GetYuzuVersionData(); + out["report_common"] = GetReportCommonData(title_id, RESULT_SUCCESS, timestamp); + + out["log_mode"] = fmt::format("{:08X}", static_cast<u32>(log_mode)); + out["log_message"] = std::move(log_message); + + SaveToFile(std::move(out), GetPath("filesystem_access_report", title_id, timestamp)); +} + void Reporter::SaveUserReport() const { if (!IsReportingEnabled()) { return; diff --git a/src/core/reporter.h b/src/core/reporter.h index 4266ca5507..44256de507 100644 --- a/src/core/reporter.h +++ b/src/core/reporter.h @@ -16,6 +16,10 @@ namespace Kernel { class HLERequestContext; } // namespace Kernel +namespace Service::FileSystem { +enum class LogMode : u32; +} + namespace Core { class System; @@ -49,6 +53,9 @@ public: std::optional<std::string> custom_text_main = {}, std::optional<std::string> custom_text_detail = {}) const; + void SaveFilesystemAccessReport(Service::FileSystem::LogMode log_mode, + std::string log_message) const; + void SaveUserReport() const; private: diff --git a/src/core/settings.cpp b/src/core/settings.cpp index 63aa59690e..0dd1632ac2 100644 --- a/src/core/settings.cpp +++ b/src/core/settings.cpp @@ -85,7 +85,6 @@ void LogSettings() { LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0)); LogSetting("System_CurrentUser", Settings::values.current_user); LogSetting("System_LanguageIndex", Settings::values.language_index); - LogSetting("Core_CpuJitEnabled", Settings::values.cpu_jit_enabled); LogSetting("Core_UseMultiCore", Settings::values.use_multi_core); LogSetting("Renderer_UseResolutionFactor", Settings::values.resolution_factor); LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit); diff --git a/src/core/settings.h b/src/core/settings.h index acf18d6532..6638ce8f9b 100644 --- a/src/core/settings.h +++ b/src/core/settings.h @@ -378,7 +378,6 @@ struct Values { std::atomic_bool is_device_reload_pending{true}; // Core - bool cpu_jit_enabled; bool use_multi_core; // Data Storage diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp index 98f49042a0..793d102d3e 100644 --- a/src/core/telemetry_session.cpp +++ b/src/core/telemetry_session.cpp @@ -168,7 +168,6 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) { AddField(Telemetry::FieldType::UserConfig, "Audio_SinkId", Settings::values.sink_id); AddField(Telemetry::FieldType::UserConfig, "Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); - AddField(Telemetry::FieldType::UserConfig, "Core_UseCpuJit", Settings::values.cpu_jit_enabled); AddField(Telemetry::FieldType::UserConfig, "Core_UseMultiCore", Settings::values.use_multi_core); AddField(Telemetry::FieldType::UserConfig, "Renderer_ResolutionFactor", diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 6839abe716..e2f85c5f1c 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -1,4 +1,7 @@ add_library(video_core STATIC + buffer_cache/buffer_block.h + buffer_cache/buffer_cache.h + buffer_cache/map_interval.h dma_pusher.cpp dma_pusher.h debug_utils/debug_utils.cpp @@ -43,8 +46,6 @@ add_library(video_core STATIC renderer_opengl/gl_device.h renderer_opengl/gl_framebuffer_cache.cpp renderer_opengl/gl_framebuffer_cache.h - renderer_opengl/gl_global_cache.cpp - renderer_opengl/gl_global_cache.h renderer_opengl/gl_rasterizer.cpp renderer_opengl/gl_rasterizer.h renderer_opengl/gl_resource_manager.cpp @@ -101,8 +102,11 @@ add_library(video_core STATIC shader/decode/integer_set.cpp shader/decode/half_set.cpp shader/decode/video.cpp + shader/decode/warp.cpp shader/decode/xmad.cpp shader/decode/other.cpp + shader/control_flow.cpp + shader/control_flow.h shader/decode.cpp shader/node_helper.cpp shader/node_helper.h diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h new file mode 100644 index 0000000000..4b91931821 --- /dev/null +++ b/src/video_core/buffer_cache/buffer_block.h @@ -0,0 +1,76 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <unordered_set> +#include <utility> + +#include "common/alignment.h" +#include "common/common_types.h" +#include "video_core/gpu.h" + +namespace VideoCommon { + +class BufferBlock { +public: + bool Overlaps(const CacheAddr start, const CacheAddr end) const { + return (cache_addr < end) && (cache_addr_end > start); + } + + bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const { + return cache_addr <= other_start && other_end <= cache_addr_end; + } + + u8* GetWritableHostPtr() const { + return FromCacheAddr(cache_addr); + } + + u8* GetWritableHostPtr(std::size_t offset) const { + return FromCacheAddr(cache_addr + offset); + } + + std::size_t GetOffset(const CacheAddr in_addr) { + return static_cast<std::size_t>(in_addr - cache_addr); + } + + CacheAddr GetCacheAddr() const { + return cache_addr; + } + + CacheAddr GetCacheAddrEnd() const { + return cache_addr_end; + } + + void SetCacheAddr(const CacheAddr new_addr) { + cache_addr = new_addr; + cache_addr_end = new_addr + size; + } + + std::size_t GetSize() const { + return size; + } + + void SetEpoch(u64 new_epoch) { + epoch = new_epoch; + } + + u64 GetEpoch() { + return epoch; + } + +protected: + explicit BufferBlock(CacheAddr cache_addr, const std::size_t size) : size{size} { + SetCacheAddr(cache_addr); + } + ~BufferBlock() = default; + +private: + CacheAddr cache_addr{}; + CacheAddr cache_addr_end{}; + std::size_t size{}; + u64 epoch{}; +}; + +} // namespace VideoCommon diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h new file mode 100644 index 0000000000..2442ddfd66 --- /dev/null +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -0,0 +1,447 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <memory> +#include <mutex> +#include <unordered_map> +#include <unordered_set> +#include <utility> +#include <vector> + +#include "common/alignment.h" +#include "common/common_types.h" +#include "core/core.h" +#include "video_core/buffer_cache/buffer_block.h" +#include "video_core/buffer_cache/map_interval.h" +#include "video_core/memory_manager.h" +#include "video_core/rasterizer_interface.h" + +namespace VideoCommon { + +using MapInterval = std::shared_ptr<MapIntervalBase>; + +template <typename TBuffer, typename TBufferType, typename StreamBuffer> +class BufferCache { +public: + using BufferInfo = std::pair<const TBufferType*, u64>; + + BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4, + bool is_written = false) { + std::lock_guard lock{mutex}; + + auto& memory_manager = system.GPU().MemoryManager(); + const auto host_ptr = memory_manager.GetPointer(gpu_addr); + if (!host_ptr) { + return {GetEmptyBuffer(size), 0}; + } + const auto cache_addr = ToCacheAddr(host_ptr); + + // Cache management is a big overhead, so only cache entries with a given size. + // TODO: Figure out which size is the best for given games. + constexpr std::size_t max_stream_size = 0x800; + if (size < max_stream_size) { + if (!is_written && !IsRegionWritten(cache_addr, cache_addr + size - 1)) { + return StreamBufferUpload(host_ptr, size, alignment); + } + } + + auto block = GetBlock(cache_addr, size); + auto map = MapAddress(block, gpu_addr, cache_addr, size); + if (is_written) { + map->MarkAsModified(true, GetModifiedTicks()); + if (!map->IsWritten()) { + map->MarkAsWritten(true); + MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); + } + } else { + if (map->IsWritten()) { + WriteBarrier(); + } + } + + const u64 offset = static_cast<u64>(block->GetOffset(cache_addr)); + + return {ToHandle(block), offset}; + } + + /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. + BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size, + std::size_t alignment = 4) { + std::lock_guard lock{mutex}; + return StreamBufferUpload(raw_pointer, size, alignment); + } + + void Map(std::size_t max_size) { + std::lock_guard lock{mutex}; + + std::tie(buffer_ptr, buffer_offset_base, invalidated) = stream_buffer->Map(max_size, 4); + buffer_offset = buffer_offset_base; + } + + /// Finishes the upload stream, returns true on bindings invalidation. + bool Unmap() { + std::lock_guard lock{mutex}; + + stream_buffer->Unmap(buffer_offset - buffer_offset_base); + return std::exchange(invalidated, false); + } + + void TickFrame() { + ++epoch; + while (!pending_destruction.empty()) { + if (pending_destruction.front()->GetEpoch() + 1 > epoch) { + break; + } + pending_destruction.pop_front(); + } + } + + /// Write any cached resources overlapping the specified region back to memory + void FlushRegion(CacheAddr addr, std::size_t size) { + std::lock_guard lock{mutex}; + + std::vector<MapInterval> objects = GetMapsInRange(addr, size); + std::sort(objects.begin(), objects.end(), [](const MapInterval& a, const MapInterval& b) { + return a->GetModificationTick() < b->GetModificationTick(); + }); + for (auto& object : objects) { + if (object->IsModified() && object->IsRegistered()) { + FlushMap(object); + } + } + } + + /// Mark the specified region as being invalidated + void InvalidateRegion(CacheAddr addr, u64 size) { + std::lock_guard lock{mutex}; + + std::vector<MapInterval> objects = GetMapsInRange(addr, size); + for (auto& object : objects) { + if (object->IsRegistered()) { + Unregister(object); + } + } + } + + virtual const TBufferType* GetEmptyBuffer(std::size_t size) = 0; + +protected: + explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, + std::unique_ptr<StreamBuffer> stream_buffer) + : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)}, + stream_buffer_handle{this->stream_buffer->GetHandle()} {} + + ~BufferCache() = default; + + virtual const TBufferType* ToHandle(const TBuffer& storage) = 0; + + virtual void WriteBarrier() = 0; + + virtual TBuffer CreateBlock(CacheAddr cache_addr, std::size_t size) = 0; + + virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, + const u8* data) = 0; + + virtual void DownloadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, + u8* data) = 0; + + virtual void CopyBlock(const TBuffer& src, const TBuffer& dst, std::size_t src_offset, + std::size_t dst_offset, std::size_t size) = 0; + + /// Register an object into the cache + void Register(const MapInterval& new_map, bool inherit_written = false) { + const CacheAddr cache_ptr = new_map->GetStart(); + const std::optional<VAddr> cpu_addr = + system.GPU().MemoryManager().GpuToCpuAddress(new_map->GetGpuAddress()); + if (!cache_ptr || !cpu_addr) { + LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}", + new_map->GetGpuAddress()); + return; + } + const std::size_t size = new_map->GetEnd() - new_map->GetStart(); + new_map->SetCpuAddress(*cpu_addr); + new_map->MarkAsRegistered(true); + const IntervalType interval{new_map->GetStart(), new_map->GetEnd()}; + mapped_addresses.insert({interval, new_map}); + rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1); + if (inherit_written) { + MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1); + new_map->MarkAsWritten(true); + } + } + + /// Unregisters an object from the cache + void Unregister(MapInterval& map) { + const std::size_t size = map->GetEnd() - map->GetStart(); + rasterizer.UpdatePagesCachedCount(map->GetCpuAddress(), size, -1); + map->MarkAsRegistered(false); + if (map->IsWritten()) { + UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); + } + const IntervalType delete_interval{map->GetStart(), map->GetEnd()}; + mapped_addresses.erase(delete_interval); + } + +private: + MapInterval CreateMap(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) { + return std::make_shared<MapIntervalBase>(start, end, gpu_addr); + } + + MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, + const CacheAddr cache_addr, const std::size_t size) { + + std::vector<MapInterval> overlaps = GetMapsInRange(cache_addr, size); + if (overlaps.empty()) { + const CacheAddr cache_addr_end = cache_addr + size; + MapInterval new_map = CreateMap(cache_addr, cache_addr_end, gpu_addr); + u8* host_ptr = FromCacheAddr(cache_addr); + UploadBlockData(block, block->GetOffset(cache_addr), size, host_ptr); + Register(new_map); + return new_map; + } + + const CacheAddr cache_addr_end = cache_addr + size; + if (overlaps.size() == 1) { + MapInterval& current_map = overlaps[0]; + if (current_map->IsInside(cache_addr, cache_addr_end)) { + return current_map; + } + } + CacheAddr new_start = cache_addr; + CacheAddr new_end = cache_addr_end; + bool write_inheritance = false; + bool modified_inheritance = false; + // Calculate new buffer parameters + for (auto& overlap : overlaps) { + new_start = std::min(overlap->GetStart(), new_start); + new_end = std::max(overlap->GetEnd(), new_end); + write_inheritance |= overlap->IsWritten(); + modified_inheritance |= overlap->IsModified(); + } + GPUVAddr new_gpu_addr = gpu_addr + new_start - cache_addr; + for (auto& overlap : overlaps) { + Unregister(overlap); + } + UpdateBlock(block, new_start, new_end, overlaps); + MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr); + if (modified_inheritance) { + new_map->MarkAsModified(true, GetModifiedTicks()); + } + Register(new_map, write_inheritance); + return new_map; + } + + void UpdateBlock(const TBuffer& block, CacheAddr start, CacheAddr end, + std::vector<MapInterval>& overlaps) { + const IntervalType base_interval{start, end}; + IntervalSet interval_set{}; + interval_set.add(base_interval); + for (auto& overlap : overlaps) { + const IntervalType subtract{overlap->GetStart(), overlap->GetEnd()}; + interval_set.subtract(subtract); + } + for (auto& interval : interval_set) { + std::size_t size = interval.upper() - interval.lower(); + if (size > 0) { + u8* host_ptr = FromCacheAddr(interval.lower()); + UploadBlockData(block, block->GetOffset(interval.lower()), size, host_ptr); + } + } + } + + std::vector<MapInterval> GetMapsInRange(CacheAddr addr, std::size_t size) { + if (size == 0) { + return {}; + } + + std::vector<MapInterval> objects{}; + const IntervalType interval{addr, addr + size}; + for (auto& pair : boost::make_iterator_range(mapped_addresses.equal_range(interval))) { + objects.push_back(pair.second); + } + + return objects; + } + + /// Returns a ticks counter used for tracking when cached objects were last modified + u64 GetModifiedTicks() { + return ++modified_ticks; + } + + void FlushMap(MapInterval map) { + std::size_t size = map->GetEnd() - map->GetStart(); + TBuffer block = blocks[map->GetStart() >> block_page_bits]; + u8* host_ptr = FromCacheAddr(map->GetStart()); + DownloadBlockData(block, block->GetOffset(map->GetStart()), size, host_ptr); + map->MarkAsModified(false, 0); + } + + BufferInfo StreamBufferUpload(const void* raw_pointer, std::size_t size, + std::size_t alignment) { + AlignBuffer(alignment); + const std::size_t uploaded_offset = buffer_offset; + std::memcpy(buffer_ptr, raw_pointer, size); + + buffer_ptr += size; + buffer_offset += size; + return {&stream_buffer_handle, uploaded_offset}; + } + + void AlignBuffer(std::size_t alignment) { + // Align the offset, not the mapped pointer + const std::size_t offset_aligned = Common::AlignUp(buffer_offset, alignment); + buffer_ptr += offset_aligned - buffer_offset; + buffer_offset = offset_aligned; + } + + TBuffer EnlargeBlock(TBuffer buffer) { + const std::size_t old_size = buffer->GetSize(); + const std::size_t new_size = old_size + block_page_size; + const CacheAddr cache_addr = buffer->GetCacheAddr(); + TBuffer new_buffer = CreateBlock(cache_addr, new_size); + CopyBlock(buffer, new_buffer, 0, 0, old_size); + buffer->SetEpoch(epoch); + pending_destruction.push_back(buffer); + const CacheAddr cache_addr_end = cache_addr + new_size - 1; + u64 page_start = cache_addr >> block_page_bits; + const u64 page_end = cache_addr_end >> block_page_bits; + while (page_start <= page_end) { + blocks[page_start] = new_buffer; + ++page_start; + } + return new_buffer; + } + + TBuffer MergeBlocks(TBuffer first, TBuffer second) { + const std::size_t size_1 = first->GetSize(); + const std::size_t size_2 = second->GetSize(); + const CacheAddr first_addr = first->GetCacheAddr(); + const CacheAddr second_addr = second->GetCacheAddr(); + const CacheAddr new_addr = std::min(first_addr, second_addr); + const std::size_t new_size = size_1 + size_2; + TBuffer new_buffer = CreateBlock(new_addr, new_size); + CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); + CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2); + first->SetEpoch(epoch); + second->SetEpoch(epoch); + pending_destruction.push_back(first); + pending_destruction.push_back(second); + const CacheAddr cache_addr_end = new_addr + new_size - 1; + u64 page_start = new_addr >> block_page_bits; + const u64 page_end = cache_addr_end >> block_page_bits; + while (page_start <= page_end) { + blocks[page_start] = new_buffer; + ++page_start; + } + return new_buffer; + } + + TBuffer GetBlock(const CacheAddr cache_addr, const std::size_t size) { + TBuffer found{}; + const CacheAddr cache_addr_end = cache_addr + size - 1; + u64 page_start = cache_addr >> block_page_bits; + const u64 page_end = cache_addr_end >> block_page_bits; + while (page_start <= page_end) { + auto it = blocks.find(page_start); + if (it == blocks.end()) { + if (found) { + found = EnlargeBlock(found); + } else { + const CacheAddr start_addr = (page_start << block_page_bits); + found = CreateBlock(start_addr, block_page_size); + blocks[page_start] = found; + } + } else { + if (found) { + if (found == it->second) { + ++page_start; + continue; + } + found = MergeBlocks(found, it->second); + } else { + found = it->second; + } + } + ++page_start; + } + return found; + } + + void MarkRegionAsWritten(const CacheAddr start, const CacheAddr end) { + u64 page_start = start >> write_page_bit; + const u64 page_end = end >> write_page_bit; + while (page_start <= page_end) { + auto it = written_pages.find(page_start); + if (it != written_pages.end()) { + it->second = it->second + 1; + } else { + written_pages[page_start] = 1; + } + page_start++; + } + } + + void UnmarkRegionAsWritten(const CacheAddr start, const CacheAddr end) { + u64 page_start = start >> write_page_bit; + const u64 page_end = end >> write_page_bit; + while (page_start <= page_end) { + auto it = written_pages.find(page_start); + if (it != written_pages.end()) { + if (it->second > 1) { + it->second = it->second - 1; + } else { + written_pages.erase(it); + } + } + page_start++; + } + } + + bool IsRegionWritten(const CacheAddr start, const CacheAddr end) const { + u64 page_start = start >> write_page_bit; + const u64 page_end = end >> write_page_bit; + while (page_start <= page_end) { + if (written_pages.count(page_start) > 0) { + return true; + } + page_start++; + } + return false; + } + + VideoCore::RasterizerInterface& rasterizer; + Core::System& system; + std::unique_ptr<StreamBuffer> stream_buffer; + + TBufferType stream_buffer_handle{}; + + bool invalidated = false; + + u8* buffer_ptr = nullptr; + u64 buffer_offset = 0; + u64 buffer_offset_base = 0; + + using IntervalSet = boost::icl::interval_set<CacheAddr>; + using IntervalCache = boost::icl::interval_map<CacheAddr, MapInterval>; + using IntervalType = typename IntervalCache::interval_type; + IntervalCache mapped_addresses{}; + + static constexpr u64 write_page_bit{11}; + std::unordered_map<u64, u32> written_pages{}; + + static constexpr u64 block_page_bits{21}; + static constexpr u64 block_page_size{1 << block_page_bits}; + std::unordered_map<u64, TBuffer> blocks{}; + + std::list<TBuffer> pending_destruction{}; + u64 epoch{}; + u64 modified_ticks{}; + + std::recursive_mutex mutex; +}; + +} // namespace VideoCommon diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h new file mode 100644 index 0000000000..3a104d5cd3 --- /dev/null +++ b/src/video_core/buffer_cache/map_interval.h @@ -0,0 +1,89 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" +#include "video_core/gpu.h" + +namespace VideoCommon { + +class MapIntervalBase { +public: + MapIntervalBase(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) + : start{start}, end{end}, gpu_addr{gpu_addr} {} + + void SetCpuAddress(VAddr new_cpu_addr) { + cpu_addr = new_cpu_addr; + } + + VAddr GetCpuAddress() const { + return cpu_addr; + } + + GPUVAddr GetGpuAddress() const { + return gpu_addr; + } + + bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const { + return (start <= other_start && other_end <= end); + } + + bool operator==(const MapIntervalBase& rhs) const { + return std::tie(start, end) == std::tie(rhs.start, rhs.end); + } + + bool operator!=(const MapIntervalBase& rhs) const { + return !operator==(rhs); + } + + void MarkAsRegistered(const bool registered) { + is_registered = registered; + } + + bool IsRegistered() const { + return is_registered; + } + + CacheAddr GetStart() const { + return start; + } + + CacheAddr GetEnd() const { + return end; + } + + void MarkAsModified(const bool is_modified_, const u64 tick) { + is_modified = is_modified_; + ticks = tick; + } + + bool IsModified() const { + return is_modified; + } + + u64 GetModificationTick() const { + return ticks; + } + + void MarkAsWritten(const bool is_written_) { + is_written = is_written_; + } + + bool IsWritten() const { + return is_written; + } + +private: + CacheAddr start; + CacheAddr end; + GPUVAddr gpu_addr; + VAddr cpu_addr{}; + bool is_written{}; + bool is_modified{}; + bool is_registered{}; + u64 ticks{}; +}; + +} // namespace VideoCommon diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index 3175579cc5..0094fd715b 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -22,7 +22,7 @@ void DmaPusher::DispatchCalls() { MICROPROFILE_SCOPE(DispatchCalls); // On entering GPU code, assume all memory may be touched by the ARM core. - gpu.Maxwell3D().dirty_flags.OnMemoryWrite(); + gpu.Maxwell3D().dirty.OnMemoryWrite(); dma_pushbuffer_subindex = 0; @@ -31,6 +31,7 @@ void DmaPusher::DispatchCalls() { break; } } + gpu.FlushCommands(); } bool DmaPusher::Step() { diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp index 0ee228e288..98a8b53374 100644 --- a/src/video_core/engines/fermi_2d.cpp +++ b/src/video_core/engines/fermi_2d.cpp @@ -10,8 +10,7 @@ namespace Tegra::Engines { -Fermi2D::Fermi2D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : rasterizer{rasterizer}, memory_manager{memory_manager} {} +Fermi2D::Fermi2D(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {} void Fermi2D::CallMethod(const GPU::MethodCall& method_call) { ASSERT_MSG(method_call.method < Regs::NUM_REGS, diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h index 05421d1853..0901cf2fad 100644 --- a/src/video_core/engines/fermi_2d.h +++ b/src/video_core/engines/fermi_2d.h @@ -33,7 +33,7 @@ namespace Tegra::Engines { class Fermi2D final { public: - explicit Fermi2D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); + explicit Fermi2D(VideoCore::RasterizerInterface& rasterizer); ~Fermi2D() = default; /// Write the value to the register identified by method. @@ -145,7 +145,6 @@ public: private: VideoCore::RasterizerInterface& rasterizer; - MemoryManager& memory_manager; /// Performs the copy from the source surface to the destination surface as configured in the /// registers. diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 7404a8163c..08586d33ca 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp @@ -37,7 +37,7 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) { const bool is_last_call = method_call.IsLastCall(); upload_state.ProcessData(method_call.argument, is_last_call); if (is_last_call) { - system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); + system.GPU().Maxwell3D().dirty.OnMemoryWrite(); } break; } @@ -50,13 +50,14 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) { } void KeplerCompute::ProcessLaunch() { - const GPUVAddr launch_desc_loc = regs.launch_desc_loc.Address(); memory_manager.ReadBlockUnsafe(launch_desc_loc, &launch_description, LaunchParams::NUM_LAUNCH_PARAMETERS * sizeof(u32)); - const GPUVAddr code_loc = regs.code_loc.Address() + launch_description.program_start; - LOG_WARNING(HW_GPU, "Compute Kernel Execute at Address 0x{:016x}, STUBBED", code_loc); + const GPUVAddr code_addr = regs.code_loc.Address() + launch_description.program_start; + LOG_TRACE(HW_GPU, "Compute invocation launched at address 0x{:016x}", code_addr); + + rasterizer.DispatchCompute(code_addr); } } // namespace Tegra::Engines diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index 0561f676cb..fa4a7c5c13 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -15,7 +15,7 @@ namespace Tegra::Engines { KeplerMemory::KeplerMemory(Core::System& system, MemoryManager& memory_manager) - : system{system}, memory_manager{memory_manager}, upload_state{memory_manager, regs.upload} {} + : system{system}, upload_state{memory_manager, regs.upload} {} KeplerMemory::~KeplerMemory() = default; @@ -34,7 +34,7 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) { const bool is_last_call = method_call.IsLastCall(); upload_state.ProcessData(method_call.argument, is_last_call); if (is_last_call) { - system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); + system.GPU().Maxwell3D().dirty.OnMemoryWrite(); } break; } diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h index f3bc675a9d..e0e25c321a 100644 --- a/src/video_core/engines/kepler_memory.h +++ b/src/video_core/engines/kepler_memory.h @@ -65,7 +65,6 @@ public: private: Core::System& system; - MemoryManager& memory_manager; Upload::State upload_state; }; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 8755b8af40..f5158d2198 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -22,6 +22,7 @@ Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& raste MemoryManager& memory_manager) : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager}, macro_interpreter{*this}, upload_state{memory_manager, regs.upload} { + InitDirtySettings(); InitializeRegisterDefaults(); } @@ -69,6 +70,10 @@ void Maxwell3D::InitializeRegisterDefaults() { regs.stencil_back_func_mask = 0xFFFFFFFF; regs.stencil_back_mask = 0xFFFFFFFF; + regs.depth_test_func = Regs::ComparisonOp::Always; + regs.cull.front_face = Regs::Cull::FrontFace::CounterClockWise; + regs.cull.cull_face = Regs::Cull::CullFace::Back; + // TODO(Rodrigo): Most games do not set a point size. I think this is a case of a // register carrying a default value. Assume it's OpenGL's default (1). regs.point_size = 1.0f; @@ -86,21 +91,168 @@ void Maxwell3D::InitializeRegisterDefaults() { regs.rt_separate_frag_data = 1; } +#define DIRTY_REGS_POS(field_name) (offsetof(Maxwell3D::DirtyRegs, field_name)) + +void Maxwell3D::InitDirtySettings() { + const auto set_block = [this](const u32 start, const u32 range, const u8 position) { + const auto start_itr = dirty_pointers.begin() + start; + const auto end_itr = start_itr + range; + std::fill(start_itr, end_itr, position); + }; + dirty.regs.fill(true); + + // Init Render Targets + constexpr u32 registers_per_rt = sizeof(regs.rt[0]) / sizeof(u32); + constexpr u32 rt_start_reg = MAXWELL3D_REG_INDEX(rt); + constexpr u32 rt_end_reg = rt_start_reg + registers_per_rt * 8; + u32 rt_dirty_reg = DIRTY_REGS_POS(render_target); + for (u32 rt_reg = rt_start_reg; rt_reg < rt_end_reg; rt_reg += registers_per_rt) { + set_block(rt_reg, registers_per_rt, rt_dirty_reg); + rt_dirty_reg++; + } + constexpr u32 depth_buffer_flag = DIRTY_REGS_POS(depth_buffer); + dirty_pointers[MAXWELL3D_REG_INDEX(zeta_enable)] = depth_buffer_flag; + dirty_pointers[MAXWELL3D_REG_INDEX(zeta_width)] = depth_buffer_flag; + dirty_pointers[MAXWELL3D_REG_INDEX(zeta_height)] = depth_buffer_flag; + constexpr u32 registers_in_zeta = sizeof(regs.zeta) / sizeof(u32); + constexpr u32 zeta_reg = MAXWELL3D_REG_INDEX(zeta); + set_block(zeta_reg, registers_in_zeta, depth_buffer_flag); + + // Init Vertex Arrays + constexpr u32 vertex_array_start = MAXWELL3D_REG_INDEX(vertex_array); + constexpr u32 vertex_array_size = sizeof(regs.vertex_array[0]) / sizeof(u32); + constexpr u32 vertex_array_end = vertex_array_start + vertex_array_size * Regs::NumVertexArrays; + u32 va_reg = DIRTY_REGS_POS(vertex_array); + u32 vi_reg = DIRTY_REGS_POS(vertex_instance); + for (u32 vertex_reg = vertex_array_start; vertex_reg < vertex_array_end; + vertex_reg += vertex_array_size) { + set_block(vertex_reg, 3, va_reg); + // The divisor concerns vertex array instances + dirty_pointers[vertex_reg + 3] = vi_reg; + va_reg++; + vi_reg++; + } + constexpr u32 vertex_limit_start = MAXWELL3D_REG_INDEX(vertex_array_limit); + constexpr u32 vertex_limit_size = sizeof(regs.vertex_array_limit[0]) / sizeof(u32); + constexpr u32 vertex_limit_end = vertex_limit_start + vertex_limit_size * Regs::NumVertexArrays; + va_reg = DIRTY_REGS_POS(vertex_array); + for (u32 vertex_reg = vertex_limit_start; vertex_reg < vertex_limit_end; + vertex_reg += vertex_limit_size) { + set_block(vertex_reg, vertex_limit_size, va_reg); + va_reg++; + } + constexpr u32 vertex_instance_start = MAXWELL3D_REG_INDEX(instanced_arrays); + constexpr u32 vertex_instance_size = + sizeof(regs.instanced_arrays.is_instanced[0]) / sizeof(u32); + constexpr u32 vertex_instance_end = + vertex_instance_start + vertex_instance_size * Regs::NumVertexArrays; + vi_reg = DIRTY_REGS_POS(vertex_instance); + for (u32 vertex_reg = vertex_instance_start; vertex_reg < vertex_instance_end; + vertex_reg += vertex_instance_size) { + set_block(vertex_reg, vertex_instance_size, vi_reg); + vi_reg++; + } + set_block(MAXWELL3D_REG_INDEX(vertex_attrib_format), regs.vertex_attrib_format.size(), + DIRTY_REGS_POS(vertex_attrib_format)); + + // Init Shaders + constexpr u32 shader_registers_count = + sizeof(regs.shader_config[0]) * Regs::MaxShaderProgram / sizeof(u32); + set_block(MAXWELL3D_REG_INDEX(shader_config[0]), shader_registers_count, + DIRTY_REGS_POS(shaders)); + + // State + + // Viewport + constexpr u32 viewport_dirty_reg = DIRTY_REGS_POS(viewport); + constexpr u32 viewport_start = MAXWELL3D_REG_INDEX(viewports); + constexpr u32 viewport_size = sizeof(regs.viewports) / sizeof(u32); + set_block(viewport_start, viewport_size, viewport_dirty_reg); + constexpr u32 view_volume_start = MAXWELL3D_REG_INDEX(view_volume_clip_control); + constexpr u32 view_volume_size = sizeof(regs.view_volume_clip_control) / sizeof(u32); + set_block(view_volume_start, view_volume_size, viewport_dirty_reg); + + // Viewport transformation + constexpr u32 viewport_trans_start = MAXWELL3D_REG_INDEX(viewport_transform); + constexpr u32 viewport_trans_size = sizeof(regs.viewport_transform) / sizeof(u32); + set_block(viewport_trans_start, viewport_trans_size, DIRTY_REGS_POS(viewport_transform)); + + // Cullmode + constexpr u32 cull_mode_start = MAXWELL3D_REG_INDEX(cull); + constexpr u32 cull_mode_size = sizeof(regs.cull) / sizeof(u32); + set_block(cull_mode_start, cull_mode_size, DIRTY_REGS_POS(cull_mode)); + + // Screen y control + dirty_pointers[MAXWELL3D_REG_INDEX(screen_y_control)] = DIRTY_REGS_POS(screen_y_control); + + // Primitive Restart + constexpr u32 primitive_restart_start = MAXWELL3D_REG_INDEX(primitive_restart); + constexpr u32 primitive_restart_size = sizeof(regs.primitive_restart) / sizeof(u32); + set_block(primitive_restart_start, primitive_restart_size, DIRTY_REGS_POS(primitive_restart)); + + // Depth Test + constexpr u32 depth_test_dirty_reg = DIRTY_REGS_POS(depth_test); + dirty_pointers[MAXWELL3D_REG_INDEX(depth_test_enable)] = depth_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(depth_write_enabled)] = depth_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(depth_test_func)] = depth_test_dirty_reg; + + // Stencil Test + constexpr u32 stencil_test_dirty_reg = DIRTY_REGS_POS(stencil_test); + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_enable)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_func_func)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_func_ref)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_func_mask)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_op_fail)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_op_zfail)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_op_zpass)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_front_mask)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_two_side_enable)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_func_func)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_func_ref)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_func_mask)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_op_fail)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_op_zfail)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_op_zpass)] = stencil_test_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_mask)] = stencil_test_dirty_reg; + + // Color Mask + constexpr u32 color_mask_dirty_reg = DIRTY_REGS_POS(color_mask); + dirty_pointers[MAXWELL3D_REG_INDEX(color_mask_common)] = color_mask_dirty_reg; + set_block(MAXWELL3D_REG_INDEX(color_mask), sizeof(regs.color_mask) / sizeof(u32), + color_mask_dirty_reg); + // Blend State + constexpr u32 blend_state_dirty_reg = DIRTY_REGS_POS(blend_state); + set_block(MAXWELL3D_REG_INDEX(blend_color), sizeof(regs.blend_color) / sizeof(u32), + blend_state_dirty_reg); + dirty_pointers[MAXWELL3D_REG_INDEX(independent_blend_enable)] = blend_state_dirty_reg; + set_block(MAXWELL3D_REG_INDEX(blend), sizeof(regs.blend) / sizeof(u32), blend_state_dirty_reg); + set_block(MAXWELL3D_REG_INDEX(independent_blend), sizeof(regs.independent_blend) / sizeof(u32), + blend_state_dirty_reg); + + // Scissor State + constexpr u32 scissor_test_dirty_reg = DIRTY_REGS_POS(scissor_test); + set_block(MAXWELL3D_REG_INDEX(scissor_test), sizeof(regs.scissor_test) / sizeof(u32), + scissor_test_dirty_reg); + + // Polygon Offset + constexpr u32 polygon_offset_dirty_reg = DIRTY_REGS_POS(polygon_offset); + dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_fill_enable)] = polygon_offset_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_line_enable)] = polygon_offset_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_point_enable)] = polygon_offset_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_units)] = polygon_offset_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_factor)] = polygon_offset_dirty_reg; + dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_clamp)] = polygon_offset_dirty_reg; +} + void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { // Reset the current macro. executing_macro = 0; // Lookup the macro offset - const u32 entry{(method - MacroRegistersStart) >> 1}; - const auto& search{macro_offsets.find(entry)}; - if (search == macro_offsets.end()) { - LOG_CRITICAL(HW_GPU, "macro not found for method 0x{:X}!", method); - UNREACHABLE(); - return; - } + const u32 entry = ((method - MacroRegistersStart) >> 1) % macro_positions.size(); // Execute the current macro. - macro_interpreter.Execute(search->second, std::move(parameters)); + macro_interpreter.Execute(macro_positions[entry], std::move(parameters)); } void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { @@ -108,6 +260,14 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { const u32 method = method_call.method; + if (method == cb_data_state.current) { + regs.reg_array[method] = method_call.argument; + ProcessCBData(method_call.argument); + return; + } else if (cb_data_state.current != null_cb_data) { + FinishCBData(); + } + // It is an error to write to a register other than the current macro's ARG register before it // has finished execution. if (executing_macro != 0) { @@ -143,49 +303,19 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { if (regs.reg_array[method] != method_call.argument) { regs.reg_array[method] = method_call.argument; - // Color buffers - constexpr u32 first_rt_reg = MAXWELL3D_REG_INDEX(rt); - constexpr u32 registers_per_rt = sizeof(regs.rt[0]) / sizeof(u32); - if (method >= first_rt_reg && - method < first_rt_reg + registers_per_rt * Regs::NumRenderTargets) { - const std::size_t rt_index = (method - first_rt_reg) / registers_per_rt; - dirty_flags.color_buffer.set(rt_index); - } - - // Zeta buffer - constexpr u32 registers_in_zeta = sizeof(regs.zeta) / sizeof(u32); - if (method == MAXWELL3D_REG_INDEX(zeta_enable) || - method == MAXWELL3D_REG_INDEX(zeta_width) || - method == MAXWELL3D_REG_INDEX(zeta_height) || - (method >= MAXWELL3D_REG_INDEX(zeta) && - method < MAXWELL3D_REG_INDEX(zeta) + registers_in_zeta)) { - dirty_flags.zeta_buffer = true; - } - - // Shader - constexpr u32 shader_registers_count = - sizeof(regs.shader_config[0]) * Regs::MaxShaderProgram / sizeof(u32); - if (method >= MAXWELL3D_REG_INDEX(shader_config[0]) && - method < MAXWELL3D_REG_INDEX(shader_config[0]) + shader_registers_count) { - dirty_flags.shaders = true; - } - - // Vertex format - if (method >= MAXWELL3D_REG_INDEX(vertex_attrib_format) && - method < MAXWELL3D_REG_INDEX(vertex_attrib_format) + regs.vertex_attrib_format.size()) { - dirty_flags.vertex_attrib_format = true; - } - - // Vertex buffer - if (method >= MAXWELL3D_REG_INDEX(vertex_array) && - method < MAXWELL3D_REG_INDEX(vertex_array) + 4 * Regs::NumVertexArrays) { - dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array)) >> 2); - } else if (method >= MAXWELL3D_REG_INDEX(vertex_array_limit) && - method < MAXWELL3D_REG_INDEX(vertex_array_limit) + 2 * Regs::NumVertexArrays) { - dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array_limit)) >> 1); - } else if (method >= MAXWELL3D_REG_INDEX(instanced_arrays) && - method < MAXWELL3D_REG_INDEX(instanced_arrays) + Regs::NumVertexArrays) { - dirty_flags.vertex_array.set(method - MAXWELL3D_REG_INDEX(instanced_arrays)); + const std::size_t dirty_reg = dirty_pointers[method]; + if (dirty_reg) { + dirty.regs[dirty_reg] = true; + if (dirty_reg >= DIRTY_REGS_POS(vertex_array) && + dirty_reg < DIRTY_REGS_POS(vertex_array_buffers)) { + dirty.vertex_array_buffers = true; + } else if (dirty_reg >= DIRTY_REGS_POS(vertex_instance) && + dirty_reg < DIRTY_REGS_POS(vertex_instances)) { + dirty.vertex_instances = true; + } else if (dirty_reg >= DIRTY_REGS_POS(render_target) && + dirty_reg < DIRTY_REGS_POS(render_settings)) { + dirty.render_settings = true; + } } } @@ -214,7 +344,7 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]): case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]): case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): { - ProcessCBData(method_call.argument); + StartCBData(method); break; } case MAXWELL3D_REG_INDEX(cb_bind[0].raw_config): { @@ -249,6 +379,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { ProcessQueryGet(); break; } + case MAXWELL3D_REG_INDEX(condition.mode): { + ProcessQueryCondition(); + break; + } case MAXWELL3D_REG_INDEX(sync_info): { ProcessSyncPoint(); break; @@ -261,7 +395,7 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { const bool is_last_call = method_call.IsLastCall(); upload_state.ProcessData(method_call.argument, is_last_call); if (is_last_call) { - dirty_flags.OnMemoryWrite(); + dirty.OnMemoryWrite(); } break; } @@ -281,7 +415,7 @@ void Maxwell3D::ProcessMacroUpload(u32 data) { } void Maxwell3D::ProcessMacroBind(u32 data) { - macro_offsets[regs.macros.entry] = data; + macro_positions[regs.macros.entry++] = data; } void Maxwell3D::ProcessQueryGet() { @@ -302,6 +436,7 @@ void Maxwell3D::ProcessQueryGet() { result = regs.query.query_sequence; break; default: + result = 1; UNIMPLEMENTED_MSG("Unimplemented query select type {}", static_cast<u32>(regs.query.query_get.select.Value())); } @@ -333,7 +468,6 @@ void Maxwell3D::ProcessQueryGet() { query_result.timestamp = system.CoreTiming().GetTicks(); memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result)); } - dirty_flags.OnMemoryWrite(); break; } default: @@ -342,12 +476,52 @@ void Maxwell3D::ProcessQueryGet() { } } +void Maxwell3D::ProcessQueryCondition() { + const GPUVAddr condition_address{regs.condition.Address()}; + switch (regs.condition.mode) { + case Regs::ConditionMode::Always: { + execute_on = true; + break; + } + case Regs::ConditionMode::Never: { + execute_on = false; + break; + } + case Regs::ConditionMode::ResNonZero: { + Regs::QueryCompare cmp; + memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp)); + execute_on = cmp.initial_sequence != 0U && cmp.initial_mode != 0U; + break; + } + case Regs::ConditionMode::Equal: { + Regs::QueryCompare cmp; + memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp)); + execute_on = + cmp.initial_sequence == cmp.current_sequence && cmp.initial_mode == cmp.current_mode; + break; + } + case Regs::ConditionMode::NotEqual: { + Regs::QueryCompare cmp; + memory_manager.ReadBlockUnsafe(condition_address, &cmp, sizeof(cmp)); + execute_on = + cmp.initial_sequence != cmp.current_sequence || cmp.initial_mode != cmp.current_mode; + break; + } + default: { + UNIMPLEMENTED_MSG("Uninplemented Condition Mode!"); + execute_on = true; + break; + } + } +} + void Maxwell3D::ProcessSyncPoint() { const u32 sync_point = regs.sync_info.sync_point.Value(); const u32 increment = regs.sync_info.increment.Value(); - const u32 cache_flush = regs.sync_info.unknown.Value(); - LOG_DEBUG(HW_GPU, "Syncpoint set {}, increment: {}, unk: {}", sync_point, increment, - cache_flush); + [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value(); + if (increment) { + system.GPU().IncrementSyncPoint(sync_point); + } } void Maxwell3D::DrawArrays() { @@ -405,23 +579,39 @@ void Maxwell3D::ProcessCBBind(Regs::ShaderStage stage) { } void Maxwell3D::ProcessCBData(u32 value) { + const u32 id = cb_data_state.id; + cb_data_state.buffer[id][cb_data_state.counter] = value; + // Increment the current buffer position. + regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4; + cb_data_state.counter++; +} + +void Maxwell3D::StartCBData(u32 method) { + constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]); + cb_data_state.start_pos = regs.const_buffer.cb_pos; + cb_data_state.id = method - first_cb_data; + cb_data_state.current = method; + cb_data_state.counter = 0; + ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]); +} + +void Maxwell3D::FinishCBData() { // Write the input value to the current const buffer at the current position. const GPUVAddr buffer_address = regs.const_buffer.BufferAddress(); ASSERT(buffer_address != 0); // Don't allow writing past the end of the buffer. - ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size); + ASSERT(regs.const_buffer.cb_pos <= regs.const_buffer.cb_size); - const GPUVAddr address{buffer_address + regs.const_buffer.cb_pos}; + const GPUVAddr address{buffer_address + cb_data_state.start_pos}; + const std::size_t size = regs.const_buffer.cb_pos - cb_data_state.start_pos; - u8* ptr{memory_manager.GetPointer(address)}; - rasterizer.InvalidateRegion(ToCacheAddr(ptr), sizeof(u32)); - memory_manager.Write<u32>(address, value); + const u32 id = cb_data_state.id; + memory_manager.WriteBlock(address, cb_data_state.buffer[id].data(), size); + dirty.OnMemoryWrite(); - dirty_flags.OnMemoryWrite(); - - // Increment the current buffer position. - regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4; + cb_data_state.id = null_cb_data; + cb_data_state.current = null_cb_data; } Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { @@ -430,10 +620,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { Texture::TICEntry tic_entry; memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry)); - const auto r_type{tic_entry.r_type.Value()}; - const auto g_type{tic_entry.g_type.Value()}; - const auto b_type{tic_entry.b_type.Value()}; - const auto a_type{tic_entry.a_type.Value()}; + [[maybe_unused]] const auto r_type{tic_entry.r_type.Value()}; + [[maybe_unused]] const auto g_type{tic_entry.g_type.Value()}; + [[maybe_unused]] const auto b_type{tic_entry.b_type.Value()}; + [[maybe_unused]] const auto a_type{tic_entry.a_type.Value()}; // TODO(Subv): Different data types for separate components are not supported DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type); diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 13e3149444..0184342a07 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -67,6 +67,7 @@ public: static constexpr std::size_t MaxShaderStage = 5; // Maximum number of const buffers per shader stage. static constexpr std::size_t MaxConstBuffers = 18; + static constexpr std::size_t MaxConstBufferSize = 0x10000; enum class QueryMode : u32 { Write = 0, @@ -89,6 +90,20 @@ public: enum class QuerySelect : u32 { Zero = 0, + TimeElapsed = 2, + TransformFeedbackPrimitivesGenerated = 11, + PrimitivesGenerated = 18, + SamplesPassed = 21, + TransformFeedbackUnknown = 26, + }; + + struct QueryCompare { + u32 initial_sequence; + u32 initial_mode; + u32 unknown1; + u32 unknown2; + u32 current_sequence; + u32 current_mode; }; enum class QuerySyncCondition : u32 { @@ -96,6 +111,14 @@ public: GreaterThan = 1, }; + enum class ConditionMode : u32 { + Never = 0, + Always = 1, + ResNonZero = 2, + Equal = 3, + NotEqual = 4, + }; + enum class ShaderProgram : u32 { VertexA = 0, VertexB = 1, @@ -814,7 +837,18 @@ public: BitField<4, 1, u32> alpha_to_one; } multisample_control; - INSERT_PADDING_WORDS(0x7); + INSERT_PADDING_WORDS(0x4); + + struct { + u32 address_high; + u32 address_low; + ConditionMode mode; + + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + } condition; struct { u32 tsc_address_high; @@ -1123,23 +1157,77 @@ public: State state{}; - struct DirtyFlags { - std::bitset<8> color_buffer{0xFF}; - std::bitset<32> vertex_array{0xFFFFFFFF}; + struct DirtyRegs { + static constexpr std::size_t NUM_REGS = 256; + union { + struct { + bool null_dirty; + + // Vertex Attributes + bool vertex_attrib_format; + + // Vertex Arrays + std::array<bool, 32> vertex_array; + + bool vertex_array_buffers; + + // Vertex Instances + std::array<bool, 32> vertex_instance; - bool vertex_attrib_format = true; - bool zeta_buffer = true; - bool shaders = true; + bool vertex_instances; + + // Render Targets + std::array<bool, 8> render_target; + bool depth_buffer; + + bool render_settings; + + // Shaders + bool shaders; + + // Rasterizer State + bool viewport; + bool clip_coefficient; + bool cull_mode; + bool primitive_restart; + bool depth_test; + bool stencil_test; + bool blend_state; + bool scissor_test; + bool transform_feedback; + bool color_mask; + bool polygon_offset; + + // Complementary + bool viewport_transform; + bool screen_y_control; + + bool memory_general; + }; + std::array<bool, NUM_REGS> regs; + }; + + void ResetVertexArrays() { + vertex_array.fill(true); + vertex_array_buffers = true; + } + + void ResetRenderTargets() { + depth_buffer = true; + render_target.fill(true); + render_settings = true; + } void OnMemoryWrite() { - zeta_buffer = true; shaders = true; - color_buffer.set(); - vertex_array.set(); + memory_general = true; + ResetRenderTargets(); + ResetVertexArrays(); } - }; - DirtyFlags dirty_flags; + } dirty{}; + + std::array<u8, Regs::NUM_REGS> dirty_pointers{}; /// Reads a register value located at the input method address u32 GetRegisterValue(u32 method) const; @@ -1168,6 +1256,10 @@ public: return macro_memory; } + bool ShouldExecute() const { + return execute_on; + } + private: void InitializeRegisterDefaults(); @@ -1178,7 +1270,7 @@ private: MemoryManager& memory_manager; /// Start offsets of each macro in macro_memory - std::unordered_map<u32, u32> macro_offsets; + std::array<u32, 0x80> macro_positions = {}; /// Memory for macro code MacroMemory macro_memory; @@ -1191,14 +1283,27 @@ private: /// Interpreter for the macro codes uploaded to the GPU. MacroInterpreter macro_interpreter; + static constexpr u32 null_cb_data = 0xFFFFFFFF; + struct { + std::array<std::array<u32, 0x4000>, 16> buffer; + u32 current{null_cb_data}; + u32 id{null_cb_data}; + u32 start_pos{}; + u32 counter{}; + } cb_data_state; + Upload::State upload_state; + bool execute_on{true}; + /// Retrieves information about a specific TIC entry from the TIC buffer. Texture::TICEntry GetTICEntry(u32 tic_index) const; /// Retrieves information about a specific TSC entry from the TSC buffer. Texture::TSCEntry GetTSCEntry(u32 tsc_index) const; + void InitDirtySettings(); + /** * Call a macro on this engine. * @param method Method to call @@ -1218,11 +1323,16 @@ private: /// Handles a write to the QUERY_GET register. void ProcessQueryGet(); + // Handles Conditional Rendering + void ProcessQueryCondition(); + /// Handles writes to syncing register. void ProcessSyncPoint(); /// Handles a write to the CB_DATA[i] register. + void StartCBData(u32 method); void ProcessCBData(u32 value); + void FinishCBData(); /// Handles a write to the CB_BIND register. void ProcessCBBind(Regs::ShaderStage stage); @@ -1289,6 +1399,7 @@ ASSERT_REG_POSITION(clip_distance_enabled, 0x544); ASSERT_REG_POSITION(point_size, 0x546); ASSERT_REG_POSITION(zeta_enable, 0x54E); ASSERT_REG_POSITION(multisample_control, 0x54F); +ASSERT_REG_POSITION(condition, 0x554); ASSERT_REG_POSITION(tsc, 0x557); ASSERT_REG_POSITION(polygon_offset_factor, 0x55b); ASSERT_REG_POSITION(tic, 0x55D); diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index afb9578d0a..ad8453c5fe 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -5,18 +5,17 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/settings.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/engines/maxwell_dma.h" #include "video_core/memory_manager.h" -#include "video_core/rasterizer_interface.h" #include "video_core/renderer_base.h" #include "video_core/textures/decoders.h" namespace Tegra::Engines { -MaxwellDMA::MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - MemoryManager& memory_manager) - : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager} {} +MaxwellDMA::MaxwellDMA(Core::System& system, MemoryManager& memory_manager) + : system{system}, memory_manager{memory_manager} {} void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) { ASSERT_MSG(method_call.method < Regs::NUM_REGS, @@ -38,7 +37,7 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) { } void MaxwellDMA::HandleCopy() { - LOG_WARNING(HW_GPU, "Requested a DMA copy"); + LOG_TRACE(HW_GPU, "Requested a DMA copy"); const GPUVAddr source = regs.src_address.Address(); const GPUVAddr dest = regs.dst_address.Address(); @@ -58,7 +57,7 @@ void MaxwellDMA::HandleCopy() { } // All copies here update the main memory, so mark all rasterizer states as invalid. - system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); + system.GPU().Maxwell3D().dirty.OnMemoryWrite(); if (regs.exec.is_dst_linear && regs.exec.is_src_linear) { // When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D @@ -84,13 +83,17 @@ void MaxwellDMA::HandleCopy() { ASSERT(regs.exec.enable_2d == 1); if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { - ASSERT(regs.src_params.size_z == 1); + ASSERT(regs.src_params.BlockDepth() == 0); // If the input is tiled and the output is linear, deswizzle the input and copy it over. - const u32 src_bytes_per_pixel = regs.src_pitch / regs.src_params.size_x; + const u32 bytes_per_pixel = regs.dst_pitch / regs.x_count; const std::size_t src_size = Texture::CalculateSize( - true, src_bytes_per_pixel, regs.src_params.size_x, regs.src_params.size_y, + true, bytes_per_pixel, regs.src_params.size_x, regs.src_params.size_y, regs.src_params.size_z, regs.src_params.BlockHeight(), regs.src_params.BlockDepth()); + const std::size_t src_layer_size = Texture::CalculateSize( + true, bytes_per_pixel, regs.src_params.size_x, regs.src_params.size_y, 1, + regs.src_params.BlockHeight(), regs.src_params.BlockDepth()); + const std::size_t dst_size = regs.dst_pitch * regs.y_count; if (read_buffer.size() < src_size) { @@ -104,23 +107,23 @@ void MaxwellDMA::HandleCopy() { memory_manager.ReadBlock(source, read_buffer.data(), src_size); memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); - Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch, - regs.src_params.size_x, src_bytes_per_pixel, read_buffer.data(), - write_buffer.data(), regs.src_params.BlockHeight(), - regs.src_params.pos_x, regs.src_params.pos_y); + Texture::UnswizzleSubrect( + regs.x_count, regs.y_count, regs.dst_pitch, regs.src_params.size_x, bytes_per_pixel, + read_buffer.data() + src_layer_size * regs.src_params.pos_z, write_buffer.data(), + regs.src_params.BlockHeight(), regs.src_params.pos_x, regs.src_params.pos_y); memory_manager.WriteBlock(dest, write_buffer.data(), dst_size); } else { ASSERT(regs.dst_params.BlockDepth() == 0); - const u32 src_bytes_per_pixel = regs.src_pitch / regs.x_count; + const u32 bytes_per_pixel = regs.src_pitch / regs.x_count; const std::size_t dst_size = Texture::CalculateSize( - true, src_bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y, + true, bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y, regs.dst_params.size_z, regs.dst_params.BlockHeight(), regs.dst_params.BlockDepth()); const std::size_t dst_layer_size = Texture::CalculateSize( - true, src_bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y, 1, + true, bytes_per_pixel, regs.dst_params.size_x, regs.dst_params.size_y, 1, regs.dst_params.BlockHeight(), regs.dst_params.BlockDepth()); const std::size_t src_size = regs.src_pitch * regs.y_count; @@ -133,14 +136,19 @@ void MaxwellDMA::HandleCopy() { write_buffer.resize(dst_size); } - memory_manager.ReadBlock(source, read_buffer.data(), src_size); - memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); + if (Settings::values.use_accurate_gpu_emulation) { + memory_manager.ReadBlock(source, read_buffer.data(), src_size); + memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); + } else { + memory_manager.ReadBlockUnsafe(source, read_buffer.data(), src_size); + memory_manager.ReadBlockUnsafe(dest, write_buffer.data(), dst_size); + } // If the input is linear and the output is tiled, swizzle the input and copy it over. - Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, - src_bytes_per_pixel, - write_buffer.data() + dst_layer_size * regs.dst_params.pos_z, - read_buffer.data(), regs.dst_params.BlockHeight()); + Texture::SwizzleSubrect( + regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, bytes_per_pixel, + write_buffer.data() + dst_layer_size * regs.dst_params.pos_z, read_buffer.data(), + regs.dst_params.BlockHeight(), regs.dst_params.pos_x, regs.dst_params.pos_y); memory_manager.WriteBlock(dest, write_buffer.data(), dst_size); } diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 17b015ca7d..93808a9bbd 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -20,10 +20,6 @@ namespace Tegra { class MemoryManager; } -namespace VideoCore { -class RasterizerInterface; -} - namespace Tegra::Engines { /** @@ -33,8 +29,7 @@ namespace Tegra::Engines { class MaxwellDMA final { public: - explicit MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer, - MemoryManager& memory_manager); + explicit MaxwellDMA(Core::System& system, MemoryManager& memory_manager); ~MaxwellDMA() = default; /// Write the value to the register identified by method. @@ -180,8 +175,6 @@ public: private: Core::System& system; - VideoCore::RasterizerInterface& rasterizer; - MemoryManager& memory_manager; std::vector<u8> read_buffer; diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 404d4f5aae..c3678b9eab 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -78,7 +78,7 @@ union Attribute { constexpr explicit Attribute(u64 value) : value(value) {} enum class Index : u64 { - PointSize = 6, + LayerViewportPointSize = 6, Position = 7, Attribute_0 = 8, Attribute_31 = 39, @@ -538,6 +538,12 @@ enum class PhysicalAttributeDirection : u64 { Output = 1, }; +enum class VoteOperation : u64 { + All = 0, // allThreadsNV + Any = 1, // anyThreadNV + Eq = 2, // allThreadsEqualNV +}; + union Instruction { Instruction& operator=(const Instruction& instr) { value = instr.value; @@ -560,6 +566,18 @@ union Instruction { BitField<48, 16, u64> opcode; union { + BitField<8, 5, ConditionCode> cc; + BitField<13, 1, u64> trigger; + } nop; + + union { + BitField<48, 2, VoteOperation> operation; + BitField<45, 3, u64> dest_pred; + BitField<39, 3, u64> value; + BitField<42, 1, u64> negate_value; + } vote; + + union { BitField<8, 8, Register> gpr; BitField<20, 24, s64> offset; } gmem; @@ -868,6 +886,7 @@ union Instruction { union { BitField<0, 3, u64> pred0; BitField<3, 3, u64> pred3; + BitField<6, 1, u64> neg_b; BitField<7, 1, u64> abs_a; BitField<39, 3, u64> pred39; BitField<42, 1, u64> neg_pred; @@ -931,8 +950,6 @@ union Instruction { } csetp; union { - BitField<35, 4, PredCondition> cond; - BitField<49, 1, u64> h_and; BitField<6, 1, u64> ftz; BitField<45, 2, PredOperation> op; BitField<3, 3, u64> pred3; @@ -940,9 +957,21 @@ union Instruction { BitField<43, 1, u64> negate_a; BitField<44, 1, u64> abs_a; BitField<47, 2, HalfType> type_a; - BitField<31, 1, u64> negate_b; - BitField<30, 1, u64> abs_b; - BitField<28, 2, HalfType> type_b; + union { + BitField<35, 4, PredCondition> cond; + BitField<49, 1, u64> h_and; + BitField<31, 1, u64> negate_b; + BitField<30, 1, u64> abs_b; + BitField<28, 2, HalfType> type_b; + } reg; + union { + BitField<56, 1, u64> negate_b; + BitField<54, 1, u64> abs_b; + } cbuf; + union { + BitField<49, 4, PredCondition> cond; + BitField<53, 1, u64> h_and; + } cbuf_and_imm; BitField<42, 1, u64> neg_pred; BitField<39, 3, u64> pred39; } hsetp2; @@ -991,7 +1020,6 @@ union Instruction { } iset; union { - BitField<41, 2, u64> selector; // i2i and i2f only BitField<45, 1, u64> negate_a; BitField<49, 1, u64> abs_a; BitField<10, 2, Register::Size> src_size; @@ -1008,8 +1036,6 @@ union Instruction { } f2i; union { - BitField<8, 2, Register::Size> src_size; - BitField<10, 2, Register::Size> dst_size; BitField<39, 4, u64> rounding; // H0, H1 extract for F16 missing BitField<41, 1, u64> selector; // Guessed as some games set it, TODO: reverse this value @@ -1019,6 +1045,13 @@ union Instruction { } } f2f; + union { + BitField<41, 2, u64> selector; + } int_src; + + union { + BitField<41, 1, u64> selector; + } float_src; } conversion; union { @@ -1278,6 +1311,7 @@ union Instruction { union { BitField<49, 1, u64> nodep_flag; BitField<53, 4, u64> texture_info; + BitField<59, 1, u64> fp32_flag; TextureType GetTextureType() const { // The TLDS instruction has a weird encoding for the texture type. @@ -1368,6 +1402,20 @@ union Instruction { } bra; union { + BitField<20, 24, u64> target; + BitField<5, 1, u64> constant_buffer; + + s32 GetBranchExtend() const { + // Sign extend the branch target offset + u32 mask = 1U << (24 - 1); + u32 value = static_cast<u32>(target); + // The branch offset is relative to the next instruction and is stored in bytes, so + // divide it by the size of an instruction and add 1 to it. + return static_cast<s32>((value ^ mask) - mask) / sizeof(Instruction) + 1; + } + } brx; + + union { BitField<39, 1, u64> emit; // EmitVertex BitField<40, 1, u64> cut; // EndPrimitive } out; @@ -1459,11 +1507,13 @@ public: SYNC, BRK, DEPBAR, + VOTE, BFE_C, BFE_R, BFE_IMM, BFI_IMM_R, BRA, + BRX, PBK, LD_A, LD_L, @@ -1490,6 +1540,7 @@ public: TMML, // Texture Mip Map Level SUST, // Surface Store EXIT, + NOP, IPA, OUT_R, // Emit vertex/primitive ISBERD, @@ -1532,7 +1583,9 @@ public: HFMA2_RC, HFMA2_RR, HFMA2_IMM_R, + HSETP2_C, HSETP2_R, + HSETP2_IMM, HSET2_R, POPC_C, POPC_R, @@ -1617,6 +1670,7 @@ public: Hfma2, Flow, Synch, + Warp, Memory, Texture, Image, @@ -1738,10 +1792,12 @@ private: INST("111000101001----", Id::SSY, Type::Flow, "SSY"), INST("111000101010----", Id::PBK, Type::Flow, "PBK"), INST("111000100100----", Id::BRA, Type::Flow, "BRA"), + INST("111000100101----", Id::BRX, Type::Flow, "BRX"), INST("1111000011111---", Id::SYNC, Type::Flow, "SYNC"), INST("111000110100---", Id::BRK, Type::Flow, "BRK"), INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"), INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"), + INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"), INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"), INST("1110111101001---", Id::LD_S, Type::Memory, "LD_S"), INST("1110111101000---", Id::LD_L, Type::Memory, "LD_L"), @@ -1760,12 +1816,13 @@ private: INST("1101111101010---", Id::TXQ_B, Type::Texture, "TXQ_B"), INST("1101-00---------", Id::TEXS, Type::Texture, "TEXS"), INST("11011100--11----", Id::TLD, Type::Texture, "TLD"), - INST("1101101---------", Id::TLDS, Type::Texture, "TLDS"), + INST("1101-01---------", Id::TLDS, Type::Texture, "TLDS"), INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"), INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"), INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"), INST("1101111101011---", Id::TMML, Type::Texture, "TMML"), INST("11101011001-----", Id::SUST, Type::Image, "SUST"), + INST("0101000010110---", Id::NOP, Type::Trivial, "NOP"), INST("11100000--------", Id::IPA, Type::Trivial, "IPA"), INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"), @@ -1814,7 +1871,9 @@ private: INST("01100---1-------", Id::HFMA2_RC, Type::Hfma2, "HFMA2_RC"), INST("0101110100000---", Id::HFMA2_RR, Type::Hfma2, "HFMA2_RR"), INST("01110---0-------", Id::HFMA2_IMM_R, Type::Hfma2, "HFMA2_R_IMM"), - INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP_R"), + INST("0111111-1-------", Id::HSETP2_C, Type::HalfSetPredicate, "HSETP2_C"), + INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP2_R"), + INST("0111111-0-------", Id::HSETP2_IMM, Type::HalfSetPredicate, "HSETP2_IMM"), INST("0101110100011---", Id::HSET2_R, Type::HalfSet, "HSET2_R"), INST("0101000010000---", Id::MUFU, Type::Arithmetic, "MUFU"), INST("0100110010010---", Id::RRO_C, Type::Arithmetic, "RRO_C"), diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 52706505b0..2c47541cb3 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -17,26 +17,15 @@ namespace Tegra { -u32 FramebufferConfig::BytesPerPixel(PixelFormat format) { - switch (format) { - case PixelFormat::ABGR8: - case PixelFormat::BGRA8: - return 4; - default: - return 4; - } - - UNREACHABLE(); -} - -GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer) : renderer{renderer} { +GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async) + : system{system}, renderer{renderer}, is_async{is_async} { auto& rasterizer{renderer.Rasterizer()}; - memory_manager = std::make_unique<Tegra::MemoryManager>(rasterizer); + memory_manager = std::make_unique<Tegra::MemoryManager>(system, rasterizer); dma_pusher = std::make_unique<Tegra::DmaPusher>(*this); maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager); - fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager); + fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer); kepler_compute = std::make_unique<Engines::KeplerCompute>(system, rasterizer, *memory_manager); - maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, rasterizer, *memory_manager); + maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager); kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager); } @@ -50,6 +39,14 @@ const Engines::Maxwell3D& GPU::Maxwell3D() const { return *maxwell_3d; } +Engines::KeplerCompute& GPU::KeplerCompute() { + return *kepler_compute; +} + +const Engines::KeplerCompute& GPU::KeplerCompute() const { + return *kepler_compute; +} + MemoryManager& GPU::MemoryManager() { return *memory_manager; } @@ -66,6 +63,55 @@ const DmaPusher& GPU::DmaPusher() const { return *dma_pusher; } +void GPU::IncrementSyncPoint(const u32 syncpoint_id) { + syncpoints[syncpoint_id]++; + std::lock_guard lock{sync_mutex}; + if (!syncpt_interrupts[syncpoint_id].empty()) { + u32 value = syncpoints[syncpoint_id].load(); + auto it = syncpt_interrupts[syncpoint_id].begin(); + while (it != syncpt_interrupts[syncpoint_id].end()) { + if (value >= *it) { + TriggerCpuInterrupt(syncpoint_id, *it); + it = syncpt_interrupts[syncpoint_id].erase(it); + continue; + } + it++; + } + } +} + +u32 GPU::GetSyncpointValue(const u32 syncpoint_id) const { + return syncpoints[syncpoint_id].load(); +} + +void GPU::RegisterSyncptInterrupt(const u32 syncpoint_id, const u32 value) { + auto& interrupt = syncpt_interrupts[syncpoint_id]; + bool contains = std::any_of(interrupt.begin(), interrupt.end(), + [value](u32 in_value) { return in_value == value; }); + if (contains) { + return; + } + syncpt_interrupts[syncpoint_id].emplace_back(value); +} + +bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) { + std::lock_guard lock{sync_mutex}; + auto& interrupt = syncpt_interrupts[syncpoint_id]; + const auto iter = + std::find_if(interrupt.begin(), interrupt.end(), + [value](u32 interrupt_value) { return value == interrupt_value; }); + + if (iter == interrupt.end()) { + return false; + } + interrupt.erase(iter); + return true; +} + +void GPU::FlushCommands() { + renderer.Rasterizer().FlushCommands(); +} + u32 RenderTargetBytesPerPixel(RenderTargetFormat format) { ASSERT(format != RenderTargetFormat::NONE); @@ -143,12 +189,12 @@ enum class BufferMethods { NotifyIntr = 0x8, WrcacheFlush = 0x9, Unk28 = 0xA, - Unk2c = 0xB, + UnkCacheFlush = 0xB, RefCnt = 0x14, SemaphoreAcquire = 0x1A, SemaphoreRelease = 0x1B, - Unk70 = 0x1C, - Unk74 = 0x1D, + FenceValue = 0x1C, + FenceAction = 0x1D, Unk78 = 0x1E, Unk7c = 0x1F, Yield = 0x20, @@ -194,6 +240,10 @@ void GPU::CallPullerMethod(const MethodCall& method_call) { case BufferMethods::SemaphoreAddressLow: case BufferMethods::SemaphoreSequence: case BufferMethods::RefCnt: + case BufferMethods::UnkCacheFlush: + case BufferMethods::WrcacheFlush: + case BufferMethods::FenceValue: + case BufferMethods::FenceAction: break; case BufferMethods::SemaphoreTrigger: { ProcessSemaphoreTriggerMethod(); @@ -204,21 +254,11 @@ void GPU::CallPullerMethod(const MethodCall& method_call) { LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented"); break; } - case BufferMethods::WrcacheFlush: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method WrcacheFlush not implemented"); - break; - } case BufferMethods::Unk28: { // TODO(Kmather73): Research and implement this method. LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented"); break; } - case BufferMethods::Unk2c: { - // TODO(Kmather73): Research and implement this method. - LOG_ERROR(HW_GPU, "Special puller engine method Unk2c not implemented"); - break; - } case BufferMethods::SemaphoreAcquire: { ProcessSemaphoreAcquire(); break; diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index fe66289230..78bc0601a0 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -5,8 +5,12 @@ #pragma once #include <array> +#include <atomic> +#include <list> #include <memory> +#include <mutex> #include "common/common_types.h" +#include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvflinger/buffer_queue.h" #include "video_core/dma_pusher.h" @@ -15,6 +19,10 @@ inline CacheAddr ToCacheAddr(const void* host_ptr) { return reinterpret_cast<CacheAddr>(host_ptr); } +inline u8* FromCacheAddr(CacheAddr cache_addr) { + return reinterpret_cast<u8*>(cache_addr); +} + namespace Core { class System; } @@ -87,14 +95,10 @@ class DebugContext; struct FramebufferConfig { enum class PixelFormat : u32 { ABGR8 = 1, + RGB565 = 4, BGRA8 = 5, }; - /** - * Returns the number of bytes per pixel. - */ - static u32 BytesPerPixel(PixelFormat format); - VAddr address; u32 offset; u32 width; @@ -127,7 +131,7 @@ class MemoryManager; class GPU { public: - explicit GPU(Core::System& system, VideoCore::RendererBase& renderer); + explicit GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async); virtual ~GPU(); @@ -149,12 +153,20 @@ public: /// Calls a GPU method. void CallMethod(const MethodCall& method_call); + void FlushCommands(); + /// Returns a reference to the Maxwell3D GPU engine. Engines::Maxwell3D& Maxwell3D(); /// Returns a const reference to the Maxwell3D GPU engine. const Engines::Maxwell3D& Maxwell3D() const; + /// Returns a reference to the KeplerCompute GPU engine. + Engines::KeplerCompute& KeplerCompute(); + + /// Returns a reference to the KeplerCompute GPU engine. + const Engines::KeplerCompute& KeplerCompute() const; + /// Returns a reference to the GPU memory manager. Tegra::MemoryManager& MemoryManager(); @@ -164,6 +176,22 @@ public: /// Returns a reference to the GPU DMA pusher. Tegra::DmaPusher& DmaPusher(); + void IncrementSyncPoint(u32 syncpoint_id); + + u32 GetSyncpointValue(u32 syncpoint_id) const; + + void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value); + + bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value); + + std::unique_lock<std::mutex> LockSync() { + return std::unique_lock{sync_mutex}; + } + + bool IsAsync() const { + return is_async; + } + /// Returns a const reference to the GPU DMA pusher. const Tegra::DmaPusher& DmaPusher() const; @@ -194,7 +222,12 @@ public: u32 semaphore_acquire; u32 semaphore_release; - INSERT_PADDING_WORDS(0xE4); + u32 fence_value; + union { + BitField<4, 4, u32> operation; + BitField<8, 8, u32> id; + } fence_action; + INSERT_PADDING_WORDS(0xE2); // Puller state u32 acquire_mode; @@ -216,8 +249,7 @@ public: virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0; /// Swap buffers (render frame) - virtual void SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0; + virtual void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory virtual void FlushRegion(CacheAddr addr, u64 size) = 0; @@ -228,6 +260,9 @@ public: /// Notify rasterizer that any caches of the specified region should be flushed and invalidated virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0; +protected: + virtual void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const = 0; + private: void ProcessBindMethod(const MethodCall& method_call); void ProcessSemaphoreTriggerMethod(); @@ -245,6 +280,7 @@ private: protected: std::unique_ptr<Tegra::DmaPusher> dma_pusher; + Core::System& system; VideoCore::RendererBase& renderer; private: @@ -262,6 +298,14 @@ private: std::unique_ptr<Engines::MaxwellDMA> maxwell_dma; /// Inline memory engine std::unique_ptr<Engines::KeplerMemory> kepler_memory; + + std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{}; + + std::array<std::list<u32>, Service::Nvidia::MaxSyncPoints> syncpt_interrupts; + + std::mutex sync_mutex; + + const bool is_async; }; #define ASSERT_REG_POSITION(field_name, position) \ @@ -274,6 +318,8 @@ ASSERT_REG_POSITION(semaphore_trigger, 0x7); ASSERT_REG_POSITION(reference_count, 0x14); ASSERT_REG_POSITION(semaphore_acquire, 0x1A); ASSERT_REG_POSITION(semaphore_release, 0x1B); +ASSERT_REG_POSITION(fence_value, 0x1C); +ASSERT_REG_POSITION(fence_action, 0x1D); ASSERT_REG_POSITION(acquire_mode, 0x100); ASSERT_REG_POSITION(acquire_source, 0x101); diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp index d4e2553a95..f2a3a390e1 100644 --- a/src/video_core/gpu_asynch.cpp +++ b/src/video_core/gpu_asynch.cpp @@ -2,6 +2,8 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "core/core.h" +#include "core/hardware_interrupt_manager.h" #include "video_core/gpu_asynch.h" #include "video_core/gpu_thread.h" #include "video_core/renderer_base.h" @@ -9,7 +11,7 @@ namespace VideoCommon { GPUAsynch::GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer) - : GPU(system, renderer), gpu_thread{system} {} + : GPU(system, renderer, true), gpu_thread{system} {} GPUAsynch::~GPUAsynch() = default; @@ -21,9 +23,8 @@ void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) { gpu_thread.SubmitList(std::move(entries)); } -void GPUAsynch::SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { - gpu_thread.SwapBuffers(std::move(framebuffer)); +void GPUAsynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { + gpu_thread.SwapBuffers(framebuffer); } void GPUAsynch::FlushRegion(CacheAddr addr, u64 size) { @@ -38,4 +39,9 @@ void GPUAsynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { gpu_thread.FlushAndInvalidateRegion(addr, size); } +void GPUAsynch::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) const { + auto& interrupt_manager = system.InterruptManager(); + interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); +} + } // namespace VideoCommon diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h index 30be74cba8..a12f9bac41 100644 --- a/src/video_core/gpu_asynch.h +++ b/src/video_core/gpu_asynch.h @@ -14,19 +14,21 @@ class RendererBase; namespace VideoCommon { /// Implementation of GPU interface that runs the GPU asynchronously -class GPUAsynch : public Tegra::GPU { +class GPUAsynch final : public Tegra::GPU { public: explicit GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer); ~GPUAsynch() override; void Start() override; void PushGPUEntries(Tegra::CommandList&& entries) override; - void SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; + void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override; void FlushRegion(CacheAddr addr, u64 size) override; void InvalidateRegion(CacheAddr addr, u64 size) override; void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; +protected: + void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override; + private: GPUThread::ThreadManager gpu_thread; }; diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp index 45e43b1dc5..d482210772 100644 --- a/src/video_core/gpu_synch.cpp +++ b/src/video_core/gpu_synch.cpp @@ -8,7 +8,7 @@ namespace VideoCommon { GPUSynch::GPUSynch(Core::System& system, VideoCore::RendererBase& renderer) - : GPU(system, renderer) {} + : GPU(system, renderer, false) {} GPUSynch::~GPUSynch() = default; @@ -19,9 +19,8 @@ void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) { dma_pusher->DispatchCalls(); } -void GPUSynch::SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { - renderer.SwapBuffers(std::move(framebuffer)); +void GPUSynch::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { + renderer.SwapBuffers(framebuffer); } void GPUSynch::FlushRegion(CacheAddr addr, u64 size) { diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h index 3031fcf725..5eb1c461c2 100644 --- a/src/video_core/gpu_synch.h +++ b/src/video_core/gpu_synch.h @@ -13,18 +13,21 @@ class RendererBase; namespace VideoCommon { /// Implementation of GPU interface that runs the GPU synchronously -class GPUSynch : public Tegra::GPU { +class GPUSynch final : public Tegra::GPU { public: explicit GPUSynch(Core::System& system, VideoCore::RendererBase& renderer); ~GPUSynch() override; void Start() override; void PushGPUEntries(Tegra::CommandList&& entries) override; - void SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; + void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override; void FlushRegion(CacheAddr addr, u64 size) override; void InvalidateRegion(CacheAddr addr, u64 size) override; void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; + +protected: + void TriggerCpuInterrupt([[maybe_unused]] u32 syncpoint_id, + [[maybe_unused]] u32 value) const override {} }; } // namespace VideoCommon diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 3f0939ec9e..5f039e4fdc 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp @@ -21,7 +21,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p MicroProfileOnThreadCreate("GpuThread"); // Wait for first GPU command before acquiring the window context - state.WaitForCommands(); + while (state.queue.Empty()) + ; // If emulation was stopped during disk shader loading, abort before trying to acquire context if (!state.is_running) { @@ -32,14 +33,13 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p CommandDataContainer next; while (state.is_running) { - state.WaitForCommands(); while (!state.queue.Empty()) { state.queue.Pop(next); if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) { dma_pusher.Push(std::move(submit_list->entries)); dma_pusher.DispatchCalls(); } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { - renderer.SwapBuffers(std::move(data->framebuffer)); + renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { renderer.Rasterizer().FlushRegion(data->addr, data->size); } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { @@ -49,8 +49,7 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p } else { UNREACHABLE(); } - state.signaled_fence = next.fence; - state.TrySynchronize(); + state.signaled_fence.store(next.fence); } } } @@ -79,9 +78,9 @@ void ThreadManager::SubmitList(Tegra::CommandList&& entries) { system.CoreTiming().ScheduleEvent(synchronization_ticks, synchronization_event, fence); } -void ThreadManager::SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { - PushCommand(SwapBuffersCommand(std::move(framebuffer))); +void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { + PushCommand(SwapBuffersCommand(framebuffer ? *framebuffer + : std::optional<const Tegra::FramebufferConfig>{})); } void ThreadManager::FlushRegion(CacheAddr addr, u64 size) { @@ -89,12 +88,7 @@ void ThreadManager::FlushRegion(CacheAddr addr, u64 size) { } void ThreadManager::InvalidateRegion(CacheAddr addr, u64 size) { - if (state.queue.Empty()) { - // It's quicker to invalidate a single region on the CPU if the queue is already empty - system.Renderer().Rasterizer().InvalidateRegion(addr, size); - } else { - PushCommand(InvalidateRegionCommand(addr, size)); - } + system.Renderer().Rasterizer().InvalidateRegion(addr, size); } void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { @@ -105,22 +99,13 @@ void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { u64 ThreadManager::PushCommand(CommandData&& command_data) { const u64 fence{++state.last_fence}; state.queue.Push(CommandDataContainer(std::move(command_data), fence)); - state.SignalCommands(); return fence; } MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192)); void SynchState::WaitForSynchronization(u64 fence) { - if (signaled_fence >= fence) { - return; - } - - // Wait for the GPU to be idle (all commands to be executed) - { - MICROPROFILE_SCOPE(GPU_wait); - std::unique_lock lock{synchronization_mutex}; - synchronization_condition.wait(lock, [this, fence] { return signaled_fence >= fence; }); - } + while (signaled_fence.load() < fence) + ; } } // namespace VideoCommon::GPUThread diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 05a168a726..3ae0ec9f33 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h @@ -88,41 +88,9 @@ struct CommandDataContainer { /// Struct used to synchronize the GPU thread struct SynchState final { std::atomic_bool is_running{true}; - std::atomic_int queued_frame_count{}; - std::mutex synchronization_mutex; - std::mutex commands_mutex; - std::condition_variable commands_condition; - std::condition_variable synchronization_condition; - - /// Returns true if the gap in GPU commands is small enough that we can consider the CPU and GPU - /// synchronized. This is entirely empirical. - bool IsSynchronized() const { - constexpr std::size_t max_queue_gap{5}; - return queue.Size() <= max_queue_gap; - } - - void TrySynchronize() { - if (IsSynchronized()) { - std::lock_guard lock{synchronization_mutex}; - synchronization_condition.notify_one(); - } - } void WaitForSynchronization(u64 fence); - void SignalCommands() { - if (queue.Empty()) { - return; - } - - commands_condition.notify_one(); - } - - void WaitForCommands() { - std::unique_lock lock{commands_mutex}; - commands_condition.wait(lock, [this] { return !queue.Empty(); }); - } - using CommandQueue = Common::SPSCQueue<CommandDataContainer>; CommandQueue queue; u64 last_fence{}; @@ -142,8 +110,7 @@ public: void SubmitList(Tegra::CommandList&& entries); /// Swap buffers (render frame) - void SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer); + void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory void FlushRegion(CacheAddr addr, u64 size); diff --git a/src/video_core/macro_interpreter.cpp b/src/video_core/macro_interpreter.cpp index c766ed692b..9f59a2dc1f 100644 --- a/src/video_core/macro_interpreter.cpp +++ b/src/video_core/macro_interpreter.cpp @@ -4,14 +4,18 @@ #include "common/assert.h" #include "common/logging/log.h" +#include "common/microprofile.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/macro_interpreter.h" +MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192)); + namespace Tegra { MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} void MacroInterpreter::Execute(u32 offset, std::vector<u32> parameters) { + MICROPROFILE_SCOPE(MacroInterp); Reset(); registers[1] = parameters[0]; this->parameters = std::move(parameters); diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 3224531162..bffae940c0 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -5,13 +5,17 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/logging/log.h" +#include "core/core.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" namespace Tegra { -MemoryManager::MemoryManager(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} { +MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) + : rasterizer{rasterizer}, system{system} { std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); std::fill(page_table.attributes.begin(), page_table.attributes.end(), Common::PageType::Unmapped); @@ -49,6 +53,11 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr); + ASSERT(system.CurrentProcess() + ->VMManager() + .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, + Kernel::MemoryAttribute::DeviceMapped) + .IsSuccess()); return gpu_addr; } @@ -59,7 +68,11 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) const u64 aligned_size{Common::AlignUp(size, page_size)}; MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), aligned_size, cpu_addr); - + ASSERT(system.CurrentProcess() + ->VMManager() + .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, + Kernel::MemoryAttribute::DeviceMapped) + .IsSuccess()); return gpu_addr; } @@ -68,9 +81,16 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { const u64 aligned_size{Common::AlignUp(size, page_size)}; const CacheAddr cache_addr{ToCacheAddr(GetPointer(gpu_addr))}; + const auto cpu_addr = GpuToCpuAddress(gpu_addr); + ASSERT(cpu_addr); rasterizer.FlushAndInvalidateRegion(cache_addr, aligned_size); UnmapRange(gpu_addr, aligned_size); + ASSERT(system.CurrentProcess() + ->VMManager() + .SetMemoryAttribute(cpu_addr.value(), size, Kernel::MemoryAttribute::DeviceMapped, + Kernel::MemoryAttribute::None) + .IsSuccess()); return gpu_addr; } diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 43a84bd528..aea0100870 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -14,6 +14,10 @@ namespace VideoCore { class RasterizerInterface; } +namespace Core { +class System; +} + namespace Tegra { /** @@ -47,7 +51,7 @@ struct VirtualMemoryArea { class MemoryManager final { public: - explicit MemoryManager(VideoCore::RasterizerInterface& rasterizer); + explicit MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer); ~MemoryManager(); GPUVAddr AllocateSpace(u64 size, u64 align); @@ -173,6 +177,8 @@ private: Common::PageTable page_table{page_bits}; VMAMap vma_map; VideoCore::RasterizerInterface& rasterizer; + + Core::System& system; }; } // namespace Tegra diff --git a/src/video_core/morton.cpp b/src/video_core/morton.cpp index 3e91cbc83d..084f85e670 100644 --- a/src/video_core/morton.cpp +++ b/src/video_core/morton.cpp @@ -25,8 +25,8 @@ static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual // pixel values. - const u32 tile_size_x{GetDefaultBlockWidth(format)}; - const u32 tile_size_y{GetDefaultBlockHeight(format)}; + constexpr u32 tile_size_x{GetDefaultBlockWidth(format)}; + constexpr u32 tile_size_y{GetDefaultBlockHeight(format)}; if constexpr (morton_to_linear) { Tegra::Texture::UnswizzleTexture(buffer, addr, tile_size_x, tile_size_y, bytes_per_pixel, @@ -186,99 +186,6 @@ static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFor return morton_to_linear_fns[static_cast<std::size_t>(format)]; } -static u32 MortonInterleave128(u32 x, u32 y) { - // 128x128 Z-Order coordinate from 2D coordinates - static constexpr u32 xlut[] = { - 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042, - 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809, - 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000, - 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043, - 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a, - 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001, - 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048, - 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b, - 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002, - 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049, - 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840, - 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003, - 0x0008, 0x0009, 0x000a, 0x000b, 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a, - 0x004b, 0x0800, 0x0801, 0x0802, 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841, - 0x0842, 0x0843, 0x0848, 0x0849, 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008, - 0x1009, 0x100a, 0x100b, 0x1040, 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b, - 0x1800, 0x1801, 0x1802, 0x1803, 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842, - 0x1843, 0x1848, 0x1849, 0x184a, 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009, - 0x200a, 0x200b, 0x2040, 0x2041, 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800, - 0x2801, 0x2802, 0x2803, 0x2808, 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843, - 0x2848, 0x2849, 0x284a, 0x284b, 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a, - 0x300b, 0x3040, 0x3041, 0x3042, 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801, - 0x3802, 0x3803, 0x3808, 0x3809, 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848, - 0x3849, 0x384a, 0x384b, 0x0000, 0x0001, 0x0002, 0x0003, 0x0008, 0x0009, 0x000a, 0x000b, - 0x0040, 0x0041, 0x0042, 0x0043, 0x0048, 0x0049, 0x004a, 0x004b, 0x0800, 0x0801, 0x0802, - 0x0803, 0x0808, 0x0809, 0x080a, 0x080b, 0x0840, 0x0841, 0x0842, 0x0843, 0x0848, 0x0849, - 0x084a, 0x084b, 0x1000, 0x1001, 0x1002, 0x1003, 0x1008, 0x1009, 0x100a, 0x100b, 0x1040, - 0x1041, 0x1042, 0x1043, 0x1048, 0x1049, 0x104a, 0x104b, 0x1800, 0x1801, 0x1802, 0x1803, - 0x1808, 0x1809, 0x180a, 0x180b, 0x1840, 0x1841, 0x1842, 0x1843, 0x1848, 0x1849, 0x184a, - 0x184b, 0x2000, 0x2001, 0x2002, 0x2003, 0x2008, 0x2009, 0x200a, 0x200b, 0x2040, 0x2041, - 0x2042, 0x2043, 0x2048, 0x2049, 0x204a, 0x204b, 0x2800, 0x2801, 0x2802, 0x2803, 0x2808, - 0x2809, 0x280a, 0x280b, 0x2840, 0x2841, 0x2842, 0x2843, 0x2848, 0x2849, 0x284a, 0x284b, - 0x3000, 0x3001, 0x3002, 0x3003, 0x3008, 0x3009, 0x300a, 0x300b, 0x3040, 0x3041, 0x3042, - 0x3043, 0x3048, 0x3049, 0x304a, 0x304b, 0x3800, 0x3801, 0x3802, 0x3803, 0x3808, 0x3809, - 0x380a, 0x380b, 0x3840, 0x3841, 0x3842, 0x3843, 0x3848, 0x3849, 0x384a, 0x384b, - }; - static constexpr u32 ylut[] = { - 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090, - 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124, - 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200, - 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294, - 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330, - 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404, - 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0, - 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534, - 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610, - 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4, - 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780, - 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014, - 0x0020, 0x0024, 0x0030, 0x0034, 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0, - 0x00b4, 0x0100, 0x0104, 0x0110, 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184, - 0x0190, 0x0194, 0x01a0, 0x01a4, 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220, - 0x0224, 0x0230, 0x0234, 0x0280, 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4, - 0x0300, 0x0304, 0x0310, 0x0314, 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390, - 0x0394, 0x03a0, 0x03a4, 0x03b0, 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424, - 0x0430, 0x0434, 0x0480, 0x0484, 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500, - 0x0504, 0x0510, 0x0514, 0x0520, 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594, - 0x05a0, 0x05a4, 0x05b0, 0x05b4, 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630, - 0x0634, 0x0680, 0x0684, 0x0690, 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704, - 0x0710, 0x0714, 0x0720, 0x0724, 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0, - 0x07a4, 0x07b0, 0x07b4, 0x0000, 0x0004, 0x0010, 0x0014, 0x0020, 0x0024, 0x0030, 0x0034, - 0x0080, 0x0084, 0x0090, 0x0094, 0x00a0, 0x00a4, 0x00b0, 0x00b4, 0x0100, 0x0104, 0x0110, - 0x0114, 0x0120, 0x0124, 0x0130, 0x0134, 0x0180, 0x0184, 0x0190, 0x0194, 0x01a0, 0x01a4, - 0x01b0, 0x01b4, 0x0200, 0x0204, 0x0210, 0x0214, 0x0220, 0x0224, 0x0230, 0x0234, 0x0280, - 0x0284, 0x0290, 0x0294, 0x02a0, 0x02a4, 0x02b0, 0x02b4, 0x0300, 0x0304, 0x0310, 0x0314, - 0x0320, 0x0324, 0x0330, 0x0334, 0x0380, 0x0384, 0x0390, 0x0394, 0x03a0, 0x03a4, 0x03b0, - 0x03b4, 0x0400, 0x0404, 0x0410, 0x0414, 0x0420, 0x0424, 0x0430, 0x0434, 0x0480, 0x0484, - 0x0490, 0x0494, 0x04a0, 0x04a4, 0x04b0, 0x04b4, 0x0500, 0x0504, 0x0510, 0x0514, 0x0520, - 0x0524, 0x0530, 0x0534, 0x0580, 0x0584, 0x0590, 0x0594, 0x05a0, 0x05a4, 0x05b0, 0x05b4, - 0x0600, 0x0604, 0x0610, 0x0614, 0x0620, 0x0624, 0x0630, 0x0634, 0x0680, 0x0684, 0x0690, - 0x0694, 0x06a0, 0x06a4, 0x06b0, 0x06b4, 0x0700, 0x0704, 0x0710, 0x0714, 0x0720, 0x0724, - 0x0730, 0x0734, 0x0780, 0x0784, 0x0790, 0x0794, 0x07a0, 0x07a4, 0x07b0, 0x07b4, - }; - return xlut[x % 128] + ylut[y % 128]; -} - -static u32 GetMortonOffset128(u32 x, u32 y, u32 bytes_per_pixel) { - // Calculates the offset of the position of the pixel in Morton order - // Framebuffer images are split into 128x128 tiles. - - constexpr u32 block_height = 128; - const u32 coarse_x = x & ~127; - - const u32 i = MortonInterleave128(x, y); - - const u32 offset = coarse_x * block_height; - - return (i + offset) * bytes_per_pixel; -} - void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, u8* buffer, u8* addr) { @@ -286,23 +193,4 @@ void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stri tile_width_spacing, buffer, addr); } -void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel, - u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data) { - const bool morton_to_linear = mode == MortonSwizzleMode::MortonToLinear; - u8* data_ptrs[2]; - for (u32 y = 0; y < height; ++y) { - for (u32 x = 0; x < width; ++x) { - const u32 coarse_y = y & ~127; - const u32 morton_offset = - GetMortonOffset128(x, y, bytes_per_pixel) + coarse_y * width * bytes_per_pixel; - const u32 linear_pixel_index = (x + y * width) * linear_bytes_per_pixel; - - data_ptrs[morton_to_linear ? 1 : 0] = morton_data + morton_offset; - data_ptrs[morton_to_linear ? 0 : 1] = &linear_data[linear_pixel_index]; - - std::memcpy(data_ptrs[0], data_ptrs[1], bytes_per_pixel); - } - } -} - } // namespace VideoCore diff --git a/src/video_core/morton.h b/src/video_core/morton.h index ee5b45555e..b714a7e3fa 100644 --- a/src/video_core/morton.h +++ b/src/video_core/morton.h @@ -15,7 +15,4 @@ void MortonSwizzle(MortonSwizzleMode mode, VideoCore::Surface::PixelFormat forma u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing, u8* buffer, u8* addr); -void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel, - u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data); - } // namespace VideoCore diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 5ee4f8e8ec..6b3f2d50a1 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -34,6 +34,9 @@ public: /// Clear the current framebuffer virtual void Clear() = 0; + /// Dispatches a compute shader invocation + virtual void DispatchCompute(GPUVAddr code_addr) = 0; + /// Notify rasterizer that all caches should be flushed to Switch memory virtual void FlushAll() = 0; @@ -47,6 +50,12 @@ public: /// and invalidated virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0; + /// Notify the rasterizer to send all written commands to the host GPU. + virtual void FlushCommands() = 0; + + /// Notify rasterizer that a frame is about to finish + virtual void TickFrame() = 0; + /// Attempt to use a faster method to perform a surface copy virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst, diff --git a/src/video_core/renderer_base.h b/src/video_core/renderer_base.h index 1d54c3723f..af1bebc4f8 100644 --- a/src/video_core/renderer_base.h +++ b/src/video_core/renderer_base.h @@ -36,8 +36,7 @@ public: virtual ~RendererBase(); /// Swap buffers (render frame) - virtual void SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0; + virtual void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) = 0; /// Initialize the renderer virtual bool Init() = 0; diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index 2b9bd142e3..f8a807c848 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -2,103 +2,71 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <cstring> #include <memory> -#include "common/alignment.h" -#include "core/core.h" -#include "video_core/memory_manager.h" +#include <glad/glad.h> + +#include "common/assert.h" +#include "common/microprofile.h" +#include "video_core/rasterizer_interface.h" #include "video_core/renderer_opengl/gl_buffer_cache.h" #include "video_core/renderer_opengl/gl_rasterizer.h" +#include "video_core/renderer_opengl/gl_resource_manager.h" namespace OpenGL { -CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, - std::size_t alignment, u8* host_ptr) - : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset}, - alignment{alignment} {} - -OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) - : RasterizerCache{rasterizer}, stream_buffer(size, true) {} - -GLintptr OGLBufferCache::UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment, - bool cache) { - std::lock_guard lock{mutex}; - auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager(); - - // Cache management is a big overhead, so only cache entries with a given size. - // TODO: Figure out which size is the best for given games. - cache &= size >= 2048; - - const auto& host_ptr{memory_manager.GetPointer(gpu_addr)}; - if (cache) { - auto entry = TryGet(host_ptr); - if (entry) { - if (entry->GetSize() >= size && entry->GetAlignment() == alignment) { - return entry->GetOffset(); - } - Unregister(entry); - } - } - - AlignBuffer(alignment); - const GLintptr uploaded_offset = buffer_offset; - - if (!host_ptr) { - return uploaded_offset; - } - - std::memcpy(buffer_ptr, host_ptr, size); - buffer_ptr += size; - buffer_offset += size; - - if (cache) { - auto entry = std::make_shared<CachedBufferEntry>( - *memory_manager.GpuToCpuAddress(gpu_addr), size, uploaded_offset, alignment, host_ptr); - Register(entry); - } - - return uploaded_offset; +MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128)); + +CachedBufferBlock::CachedBufferBlock(CacheAddr cache_addr, const std::size_t size) + : VideoCommon::BufferBlock{cache_addr, size} { + gl_buffer.Create(); + glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW); } -GLintptr OGLBufferCache::UploadHostMemory(const void* raw_pointer, std::size_t size, - std::size_t alignment) { - std::lock_guard lock{mutex}; - AlignBuffer(alignment); - std::memcpy(buffer_ptr, raw_pointer, size); - const GLintptr uploaded_offset = buffer_offset; +CachedBufferBlock::~CachedBufferBlock() = default; + +OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system, + std::size_t stream_size) + : VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer>{ + rasterizer, system, std::make_unique<OGLStreamBuffer>(stream_size, true)} {} + +OGLBufferCache::~OGLBufferCache() = default; - buffer_ptr += size; - buffer_offset += size; - return uploaded_offset; +Buffer OGLBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) { + return std::make_shared<CachedBufferBlock>(cache_addr, size); } -bool OGLBufferCache::Map(std::size_t max_size) { - bool invalidate; - std::tie(buffer_ptr, buffer_offset_base, invalidate) = - stream_buffer.Map(static_cast<GLsizeiptr>(max_size), 4); - buffer_offset = buffer_offset_base; +void OGLBufferCache::WriteBarrier() { + glMemoryBarrier(GL_ALL_BARRIER_BITS); +} + +const GLuint* OGLBufferCache::ToHandle(const Buffer& buffer) { + return buffer->GetHandle(); +} - if (invalidate) { - InvalidateAll(); - } - return invalidate; +const GLuint* OGLBufferCache::GetEmptyBuffer(std::size_t) { + static const GLuint null_buffer = 0; + return &null_buffer; } -void OGLBufferCache::Unmap() { - stream_buffer.Unmap(buffer_offset - buffer_offset_base); +void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, + const u8* data) { + glNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset), + static_cast<GLsizeiptr>(size), data); } -GLuint OGLBufferCache::GetHandle() const { - return stream_buffer.GetHandle(); +void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, + u8* data) { + MICROPROFILE_SCOPE(OpenGL_Buffer_Download); + glGetNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset), + static_cast<GLsizeiptr>(size), data); } -void OGLBufferCache::AlignBuffer(std::size_t alignment) { - // Align the offset, not the mapped pointer - const GLintptr offset_aligned = - static_cast<GLintptr>(Common::AlignUp(static_cast<std::size_t>(buffer_offset), alignment)); - buffer_ptr += offset_aligned - buffer_offset; - buffer_offset = offset_aligned; +void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, + std::size_t dst_offset, std::size_t size) { + glCopyNamedBufferSubData(*src->GetHandle(), *dst->GetHandle(), + static_cast<GLintptr>(src_offset), static_cast<GLintptr>(dst_offset), + static_cast<GLsizeiptr>(size)); } } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index f2347581b5..022e7bfa9a 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -4,80 +4,63 @@ #pragma once -#include <cstddef> #include <memory> -#include <tuple> #include "common/common_types.h" +#include "video_core/buffer_cache/buffer_cache.h" #include "video_core/rasterizer_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_stream_buffer.h" +namespace Core { +class System; +} + namespace OpenGL { +class OGLStreamBuffer; class RasterizerOpenGL; -class CachedBufferEntry final : public RasterizerCacheObject { -public: - explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, - std::size_t alignment, u8* host_ptr); - - VAddr GetCpuAddr() const override { - return cpu_addr; - } +class CachedBufferBlock; - std::size_t GetSizeInBytes() const override { - return size; - } - - std::size_t GetSize() const { - return size; - } +using Buffer = std::shared_ptr<CachedBufferBlock>; - GLintptr GetOffset() const { - return offset; - } +class CachedBufferBlock : public VideoCommon::BufferBlock { +public: + explicit CachedBufferBlock(CacheAddr cache_addr, const std::size_t size); + ~CachedBufferBlock(); - std::size_t GetAlignment() const { - return alignment; + const GLuint* GetHandle() const { + return &gl_buffer.handle; } private: - VAddr cpu_addr{}; - std::size_t size{}; - GLintptr offset{}; - std::size_t alignment{}; + OGLBuffer gl_buffer{}; }; -class OGLBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> { +class OGLBufferCache final : public VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer> { public: - explicit OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size); - - /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been - /// allocated. - GLintptr UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4, - bool cache = true); + explicit OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system, + std::size_t stream_size); + ~OGLBufferCache(); - /// Uploads from a host memory. Returns host's buffer offset where it's been allocated. - GLintptr UploadHostMemory(const void* raw_pointer, std::size_t size, std::size_t alignment = 4); + const GLuint* GetEmptyBuffer(std::size_t) override; - bool Map(std::size_t max_size); - void Unmap(); +protected: + Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override; - GLuint GetHandle() const; + void WriteBarrier() override; -protected: - void AlignBuffer(std::size_t alignment); + const GLuint* ToHandle(const Buffer& buffer) override; - // We do not have to flush this cache as things in it are never modified by us. - void FlushObjectInner(const std::shared_ptr<CachedBufferEntry>& object) override {} + void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, + const u8* data) override; -private: - OGLStreamBuffer stream_buffer; + void DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, + u8* data) override; - u8* buffer_ptr = nullptr; - GLintptr buffer_offset = 0; - GLintptr buffer_offset_base = 0; + void CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, + std::size_t dst_offset, std::size_t size) override; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index a48e14d2ee..4f59a87b40 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -14,52 +14,64 @@ namespace OpenGL { namespace { + template <typename T> T GetInteger(GLenum pname) { GLint temporary; glGetIntegerv(pname, &temporary); return static_cast<T>(temporary); } + +bool TestProgram(const GLchar* glsl) { + const GLuint shader{glCreateShaderProgramv(GL_VERTEX_SHADER, 1, &glsl)}; + GLint link_status; + glGetProgramiv(shader, GL_LINK_STATUS, &link_status); + glDeleteProgram(shader); + return link_status == GL_TRUE; +} + } // Anonymous namespace Device::Device() { uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT); + shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT); max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS); max_varyings = GetInteger<u32>(GL_MAX_VARYING_VECTORS); + has_warp_intrinsics = GLAD_GL_NV_gpu_shader5 && GLAD_GL_NV_shader_thread_group && + GLAD_GL_NV_shader_thread_shuffle; + has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array; has_variable_aoffi = TestVariableAoffi(); has_component_indexing_bug = TestComponentIndexingBug(); + has_precise_bug = TestPreciseBug(); + + LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi); + LOG_INFO(Render_OpenGL, "Renderer_ComponentIndexingBug: {}", has_component_indexing_bug); + LOG_INFO(Render_OpenGL, "Renderer_PreciseBug: {}", has_precise_bug); } Device::Device(std::nullptr_t) { uniform_buffer_alignment = 0; max_vertex_attributes = 16; max_varyings = 15; + has_warp_intrinsics = true; + has_vertex_viewport_layer = true; has_variable_aoffi = true; has_component_indexing_bug = false; + has_precise_bug = false; } bool Device::TestVariableAoffi() { - const GLchar* AOFFI_TEST = R"(#version 430 core + return TestProgram(R"(#version 430 core // This is a unit test, please ignore me on apitrace bug reports. uniform sampler2D tex; uniform ivec2 variable_offset; out vec4 output_attribute; void main() { output_attribute = textureOffset(tex, vec2(0), variable_offset); -} -)"; - const GLuint shader{glCreateShaderProgramv(GL_VERTEX_SHADER, 1, &AOFFI_TEST)}; - GLint link_status{}; - glGetProgramiv(shader, GL_LINK_STATUS, &link_status); - glDeleteProgram(shader); - - const bool supported{link_status == GL_TRUE}; - LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", supported); - return supported; +})"); } bool Device::TestComponentIndexingBug() { - constexpr char log_message[] = "Renderer_ComponentIndexingBug: {}"; const GLchar* COMPONENT_TEST = R"(#version 430 core layout (std430, binding = 0) buffer OutputBuffer { uint output_value; @@ -99,12 +111,21 @@ void main() { GLuint result; glGetNamedBufferSubData(ssbo.handle, 0, sizeof(result), &result); if (result != values.at(index)) { - LOG_INFO(Render_OpenGL, log_message, true); return true; } } - LOG_INFO(Render_OpenGL, log_message, false); return false; } +bool Device::TestPreciseBug() { + return !TestProgram(R"(#version 430 core +in vec3 coords; +out float out_value; +uniform sampler2DShadow tex; +void main() { + precise float tmp_value = vec4(texture(tex, coords)).x; + out_value = tmp_value; +})"); +} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h index 8c8c937600..ba6dcd3bef 100644 --- a/src/video_core/renderer_opengl/gl_device.h +++ b/src/video_core/renderer_opengl/gl_device.h @@ -18,6 +18,10 @@ public: return uniform_buffer_alignment; } + std::size_t GetShaderStorageBufferAlignment() const { + return shader_storage_alignment; + } + u32 GetMaxVertexAttributes() const { return max_vertex_attributes; } @@ -26,6 +30,14 @@ public: return max_varyings; } + bool HasWarpIntrinsics() const { + return has_warp_intrinsics; + } + + bool HasVertexViewportLayer() const { + return has_vertex_viewport_layer; + } + bool HasVariableAoffi() const { return has_variable_aoffi; } @@ -34,15 +46,24 @@ public: return has_component_indexing_bug; } + bool HasPreciseBug() const { + return has_precise_bug; + } + private: static bool TestVariableAoffi(); static bool TestComponentIndexingBug(); + static bool TestPreciseBug(); std::size_t uniform_buffer_alignment{}; + std::size_t shader_storage_alignment{}; u32 max_vertex_attributes{}; u32 max_varyings{}; + bool has_warp_intrinsics{}; + bool has_vertex_viewport_layer{}; bool has_variable_aoffi{}; bool has_component_indexing_bug{}; + bool has_precise_bug{}; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp deleted file mode 100644 index d5e385151f..0000000000 --- a/src/video_core/renderer_opengl/gl_global_cache.cpp +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <glad/glad.h> - -#include "common/logging/log.h" -#include "core/core.h" -#include "video_core/memory_manager.h" -#include "video_core/renderer_opengl/gl_global_cache.h" -#include "video_core/renderer_opengl/gl_rasterizer.h" -#include "video_core/renderer_opengl/gl_shader_decompiler.h" -#include "video_core/renderer_opengl/utils.h" - -namespace OpenGL { - -CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u8* host_ptr, u32 size, u32 max_size) - : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, host_ptr{host_ptr}, size{size}, - max_size{max_size} { - buffer.Create(); - LabelGLObject(GL_BUFFER, buffer.handle, cpu_addr, "GlobalMemory"); -} - -CachedGlobalRegion::~CachedGlobalRegion() = default; - -void CachedGlobalRegion::Reload(u32 size_) { - size = size_; - if (size > max_size) { - size = max_size; - LOG_CRITICAL(HW_GPU, "Global region size {} exceeded the supported size {}!", size_, - max_size); - } - glNamedBufferData(buffer.handle, size, host_ptr, GL_STREAM_DRAW); -} - -void CachedGlobalRegion::Flush() { - LOG_DEBUG(Render_OpenGL, "Flushing {} bytes to CPU memory address 0x{:16}", size, cpu_addr); - glGetNamedBufferSubData(buffer.handle, 0, static_cast<GLsizeiptr>(size), host_ptr); -} - -GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(CacheAddr addr, u32 size) const { - const auto search{reserve.find(addr)}; - if (search == reserve.end()) { - return {}; - } - return search->second; -} - -GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(GPUVAddr addr, u8* host_ptr, - u32 size) { - GlobalRegion region{TryGetReservedGlobalRegion(ToCacheAddr(host_ptr), size)}; - if (!region) { - // No reserved surface available, create a new one and reserve it - auto& memory_manager{Core::System::GetInstance().GPU().MemoryManager()}; - const auto cpu_addr{memory_manager.GpuToCpuAddress(addr)}; - ASSERT(cpu_addr); - - region = std::make_shared<CachedGlobalRegion>(*cpu_addr, host_ptr, size, max_ssbo_size); - ReserveGlobalRegion(region); - } - region->Reload(size); - return region; -} - -void GlobalRegionCacheOpenGL::ReserveGlobalRegion(GlobalRegion region) { - reserve.insert_or_assign(region->GetCacheAddr(), std::move(region)); -} - -GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer) - : RasterizerCache{rasterizer} { - GLint max_ssbo_size_; - glGetIntegerv(GL_MAX_SHADER_STORAGE_BLOCK_SIZE, &max_ssbo_size_); - max_ssbo_size = static_cast<u32>(max_ssbo_size_); -} - -GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion( - const GLShader::GlobalMemoryEntry& global_region, - Tegra::Engines::Maxwell3D::Regs::ShaderStage stage) { - std::lock_guard lock{mutex}; - - auto& gpu{Core::System::GetInstance().GPU()}; - auto& memory_manager{gpu.MemoryManager()}; - const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<std::size_t>(stage)]}; - const auto addr{cbufs.const_buffers[global_region.GetCbufIndex()].address + - global_region.GetCbufOffset()}; - const auto actual_addr{memory_manager.Read<u64>(addr)}; - const auto size{memory_manager.Read<u32>(addr + 8)}; - - // Look up global region in the cache based on address - const auto& host_ptr{memory_manager.GetPointer(actual_addr)}; - GlobalRegion region{TryGet(host_ptr)}; - - if (!region) { - // No global region found - create a new one - region = GetUncachedGlobalRegion(actual_addr, host_ptr, size); - Register(region); - } - - return region; -} - -} // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_global_cache.h b/src/video_core/renderer_opengl/gl_global_cache.h deleted file mode 100644 index 2d467a2401..0000000000 --- a/src/video_core/renderer_opengl/gl_global_cache.h +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2018 yuzu Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <memory> -#include <unordered_map> - -#include <glad/glad.h> - -#include "common/assert.h" -#include "common/common_types.h" -#include "video_core/engines/maxwell_3d.h" -#include "video_core/rasterizer_cache.h" -#include "video_core/renderer_opengl/gl_resource_manager.h" - -namespace OpenGL { - -namespace GLShader { -class GlobalMemoryEntry; -} - -class RasterizerOpenGL; -class CachedGlobalRegion; -using GlobalRegion = std::shared_ptr<CachedGlobalRegion>; - -class CachedGlobalRegion final : public RasterizerCacheObject { -public: - explicit CachedGlobalRegion(VAddr cpu_addr, u8* host_ptr, u32 size, u32 max_size); - ~CachedGlobalRegion(); - - VAddr GetCpuAddr() const override { - return cpu_addr; - } - - std::size_t GetSizeInBytes() const override { - return size; - } - - /// Gets the GL program handle for the buffer - GLuint GetBufferHandle() const { - return buffer.handle; - } - - /// Reloads the global region from guest memory - void Reload(u32 size_); - - void Flush(); - -private: - VAddr cpu_addr{}; - u8* host_ptr{}; - u32 size{}; - u32 max_size{}; - - OGLBuffer buffer; -}; - -class GlobalRegionCacheOpenGL final : public RasterizerCache<GlobalRegion> { -public: - explicit GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer); - - /// Gets the current specified shader stage program - GlobalRegion GetGlobalRegion(const GLShader::GlobalMemoryEntry& descriptor, - Tegra::Engines::Maxwell3D::Regs::ShaderStage stage); - -protected: - void FlushObjectInner(const GlobalRegion& object) override { - object->Flush(); - } - -private: - GlobalRegion TryGetReservedGlobalRegion(CacheAddr addr, u32 size) const; - GlobalRegion GetUncachedGlobalRegion(GPUVAddr addr, u8* host_ptr, u32 size); - void ReserveGlobalRegion(GlobalRegion region); - - std::unordered_map<CacheAddr, GlobalRegion> reserve; - u32 max_ssbo_size{}; -}; - -} // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index f45a3c5efc..bb09ecd52d 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -4,6 +4,7 @@ #include <algorithm> #include <array> +#include <bitset> #include <memory> #include <string> #include <string_view> @@ -19,7 +20,9 @@ #include "core/core.h" #include "core/hle/kernel/process.h" #include "core/settings.h" +#include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/memory_manager.h" #include "video_core/renderer_opengl/gl_rasterizer.h" #include "video_core/renderer_opengl/gl_shader_cache.h" #include "video_core/renderer_opengl/gl_shader_gen.h" @@ -80,16 +83,31 @@ struct DrawParameters { } }; +static std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer, + const GLShader::ConstBufferEntry& entry) { + if (!entry.IsIndirect()) { + return entry.GetSize(); + } + + if (buffer.size > Maxwell::MaxConstBufferSize) { + LOG_WARNING(Render_OpenGL, "Indirect constbuffer size {} exceeds maximum {}", buffer.size, + Maxwell::MaxConstBufferSize); + return Maxwell::MaxConstBufferSize; + } + + return buffer.size; +} + RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWindow& emu_window, ScreenInfo& info) : texture_cache{system, *this, device}, shader_cache{*this, system, emu_window, device}, - global_cache{*this}, system{system}, screen_info{info}, - buffer_cache(*this, STREAM_BUFFER_SIZE) { + system{system}, screen_info{info}, buffer_cache{*this, system, STREAM_BUFFER_SIZE} { OpenGLState::ApplyDefaultState(); shader_program_manager = std::make_unique<GLShader::ProgramManager>(); state.draw.shader_program = 0; state.Apply(); + clear_framebuffer.Create(); LOG_DEBUG(Render_OpenGL, "Sync fixed function OpenGL state here"); CheckExtensions(); @@ -109,10 +127,10 @@ GLuint RasterizerOpenGL::SetupVertexFormat() { auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; - if (!gpu.dirty_flags.vertex_attrib_format) { + if (!gpu.dirty.vertex_attrib_format) { return state.draw.vertex_array; } - gpu.dirty_flags.vertex_attrib_format = false; + gpu.dirty.vertex_attrib_format = false; MICROPROFILE_SCOPE(OpenGL_VAO); @@ -129,8 +147,6 @@ GLuint RasterizerOpenGL::SetupVertexFormat() { state.draw.vertex_array = vao; state.ApplyVertexArrayState(); - glVertexArrayElementBuffer(vao, buffer_cache.GetHandle()); - // Use the vertex array as-is, assumes that the data is formatted correctly for OpenGL. // Enables the first 16 vertex attributes always, as we don't know which ones are actually // used until shader time. Note, Tegra technically supports 32, but we're capping this to 16 @@ -168,7 +184,7 @@ GLuint RasterizerOpenGL::SetupVertexFormat() { } // Rebinding the VAO invalidates the vertex buffer bindings. - gpu.dirty_flags.vertex_array.set(); + gpu.dirty.ResetVertexArrays(); state.draw.vertex_array = vao_entry.handle; return vao_entry.handle; @@ -176,17 +192,20 @@ GLuint RasterizerOpenGL::SetupVertexFormat() { void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { auto& gpu = system.GPU().Maxwell3D(); - const auto& regs = gpu.regs; - - if (gpu.dirty_flags.vertex_array.none()) + if (!gpu.dirty.vertex_array_buffers) return; + gpu.dirty.vertex_array_buffers = false; + + const auto& regs = gpu.regs; MICROPROFILE_SCOPE(OpenGL_VB); // Upload all guest vertex arrays sequentially to our buffer for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { - if (!gpu.dirty_flags.vertex_array[index]) + if (!gpu.dirty.vertex_array[index]) continue; + gpu.dirty.vertex_array[index] = false; + gpu.dirty.vertex_instance[index] = false; const auto& vertex_array = regs.vertex_array[index]; if (!vertex_array.IsEnabled()) @@ -197,11 +216,11 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { ASSERT(end > start); const u64 size = end - start + 1; - const GLintptr vertex_buffer_offset = buffer_cache.UploadMemory(start, size); + const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size); // Bind the vertex array to the buffer at the current offset. - glVertexArrayVertexBuffer(vao, index, buffer_cache.GetHandle(), vertex_buffer_offset, - vertex_array.stride); + vertex_array_pushbuffer.SetVertexBuffer(index, vertex_buffer, vertex_buffer_offset, + vertex_array.stride); if (regs.instanced_arrays.IsInstancingEnabled(index) && vertex_array.divisor != 0) { // Enable vertex buffer instancing with the specified divisor. @@ -211,11 +230,47 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) { glVertexArrayBindingDivisor(vao, index, 0); } } +} - gpu.dirty_flags.vertex_array.reset(); +void RasterizerOpenGL::SetupVertexInstances(GLuint vao) { + auto& gpu = system.GPU().Maxwell3D(); + + if (!gpu.dirty.vertex_instances) + return; + gpu.dirty.vertex_instances = false; + + const auto& regs = gpu.regs; + // Upload all guest vertex arrays sequentially to our buffer + for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { + if (!gpu.dirty.vertex_instance[index]) + continue; + + gpu.dirty.vertex_instance[index] = false; + + if (regs.instanced_arrays.IsInstancingEnabled(index) && + regs.vertex_array[index].divisor != 0) { + // Enable vertex buffer instancing with the specified divisor. + glVertexArrayBindingDivisor(vao, index, regs.vertex_array[index].divisor); + } else { + // Disable the vertex buffer instancing. + glVertexArrayBindingDivisor(vao, index, 0); + } + } } -DrawParameters RasterizerOpenGL::SetupDraw() { +GLintptr RasterizerOpenGL::SetupIndexBuffer() { + if (accelerate_draw != AccelDraw::Indexed) { + return 0; + } + MICROPROFILE_SCOPE(OpenGL_Index); + const auto& regs = system.GPU().Maxwell3D().regs; + const std::size_t size = CalculateIndexBufferSize(); + const auto [buffer, offset] = buffer_cache.UploadMemory(regs.index_array.IndexStart(), size); + vertex_array_pushbuffer.SetIndexBuffer(buffer); + return offset; +} + +DrawParameters RasterizerOpenGL::SetupDraw(GLintptr index_buffer_offset) { const auto& gpu = system.GPU().Maxwell3D(); const auto& regs = gpu.regs; const bool is_indexed = accelerate_draw == AccelDraw::Indexed; @@ -227,11 +282,9 @@ DrawParameters RasterizerOpenGL::SetupDraw() { params.primitive_mode = MaxwellToGL::PrimitiveTopology(regs.draw.topology); if (is_indexed) { - MICROPROFILE_SCOPE(OpenGL_Index); params.index_format = MaxwellToGL::IndexFormat(regs.index_array.format); params.count = regs.index_array.count; - params.index_buffer_offset = - buffer_cache.UploadMemory(regs.index_array.IndexStart(), CalculateIndexBufferSize()); + params.index_buffer_offset = index_buffer_offset; params.base_vertex = static_cast<GLint>(regs.vb_element_base); } else { params.count = regs.vertex_buffer.count; @@ -247,10 +300,6 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { BaseBindings base_bindings; std::array<bool, Maxwell::NumClipDistances> clip_distances{}; - // Prepare packed bindings - bind_ubo_pushbuffer.Setup(base_bindings.cbuf); - bind_ssbo_pushbuffer.Setup(base_bindings.gmem); - for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { const auto& shader_config = gpu.regs.shader_config[index]; const Maxwell::ShaderProgram program{static_cast<Maxwell::ShaderProgram>(index)}; @@ -271,18 +320,17 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { GLShader::MaxwellUniformData ubo{}; ubo.SetFromRegs(gpu, stage); - const GLintptr offset = + const auto [buffer, offset] = buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment()); // Bind the emulation info buffer - bind_ubo_pushbuffer.Push(buffer_cache.GetHandle(), offset, - static_cast<GLsizeiptr>(sizeof(ubo))); + bind_ubo_pushbuffer.Push(buffer, offset, static_cast<GLsizeiptr>(sizeof(ubo))); Shader shader{shader_cache.GetStageProgram(program)}; - const auto stage_enum{static_cast<Maxwell::ShaderStage>(stage)}; + const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage); SetupDrawConstBuffers(stage_enum, shader); - SetupGlobalRegions(stage_enum, shader); + SetupDrawGlobalMemory(stage_enum, shader); const auto texture_buffer_usage{SetupTextures(stage_enum, shader, base_bindings)}; const ProgramVariant variant{base_bindings, primitive_mode, texture_buffer_usage}; @@ -321,12 +369,9 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) { base_bindings = next_bindings; } - bind_ubo_pushbuffer.Bind(); - bind_ssbo_pushbuffer.Bind(); - SyncClipEnabled(clip_distances); - gpu.dirty_flags.shaders = false; + gpu.dirty.shaders = false; } std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const { @@ -409,13 +454,13 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers( const FramebufferConfigState fb_config_state{using_color_fb, using_depth_fb, preserve_contents, single_color_target}; - if (fb_config_state == current_framebuffer_config_state && - gpu.dirty_flags.color_buffer.none() && !gpu.dirty_flags.zeta_buffer) { + if (fb_config_state == current_framebuffer_config_state && !gpu.dirty.render_settings) { // Only skip if the previous ConfigureFramebuffers call was from the same kind (multiple or // single color targets). This is done because the guest registers may not change but the // host framebuffer may contain different attachments return current_depth_stencil_usage; } + gpu.dirty.render_settings = false; current_framebuffer_config_state = fb_config_state; texture_cache.GuardRenderTargets(true); @@ -504,13 +549,71 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers( return current_depth_stencil_usage = {static_cast<bool>(depth_surface), fbkey.stencil_enable}; } +void RasterizerOpenGL::ConfigureClearFramebuffer(OpenGLState& current_state, bool using_color_fb, + bool using_depth_fb, bool using_stencil_fb) { + auto& gpu = system.GPU().Maxwell3D(); + const auto& regs = gpu.regs; + + texture_cache.GuardRenderTargets(true); + View color_surface{}; + if (using_color_fb) { + color_surface = texture_cache.GetColorBufferSurface(regs.clear_buffers.RT, false); + } + View depth_surface{}; + if (using_depth_fb || using_stencil_fb) { + depth_surface = texture_cache.GetDepthBufferSurface(false); + } + texture_cache.GuardRenderTargets(false); + + current_state.draw.draw_framebuffer = clear_framebuffer.handle; + current_state.ApplyFramebufferState(); + + if (color_surface) { + color_surface->Attach(GL_COLOR_ATTACHMENT0, GL_DRAW_FRAMEBUFFER); + } else { + glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); + } + + if (depth_surface) { + const auto& params = depth_surface->GetSurfaceParams(); + switch (params.type) { + case VideoCore::Surface::SurfaceType::Depth: { + depth_surface->Attach(GL_DEPTH_ATTACHMENT, GL_DRAW_FRAMEBUFFER); + glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); + break; + } + case VideoCore::Surface::SurfaceType::DepthStencil: { + depth_surface->Attach(GL_DEPTH_ATTACHMENT, GL_DRAW_FRAMEBUFFER); + break; + } + default: { UNIMPLEMENTED(); } + } + } else { + glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, + 0); + } +} + void RasterizerOpenGL::Clear() { - const auto& regs = system.GPU().Maxwell3D().regs; + const auto& maxwell3d = system.GPU().Maxwell3D(); + + if (!maxwell3d.ShouldExecute()) { + return; + } + + const auto& regs = maxwell3d.regs; bool use_color{}; bool use_depth{}; bool use_stencil{}; - OpenGLState clear_state; + OpenGLState prev_state{OpenGLState::GetCurState()}; + SCOPE_EXIT({ + prev_state.AllDirty(); + prev_state.Apply(); + }); + + OpenGLState clear_state{OpenGLState::GetCurState()}; + clear_state.SetDefaultViewports(); if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || regs.clear_buffers.A) { use_color = true; @@ -530,6 +633,7 @@ void RasterizerOpenGL::Clear() { // true. clear_state.depth.test_enabled = true; clear_state.depth.test_func = GL_ALWAYS; + clear_state.depth.write_mask = GL_TRUE; } if (regs.clear_buffers.S) { ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear stencil but buffer is not enabled!"); @@ -566,8 +670,9 @@ void RasterizerOpenGL::Clear() { return; } - const auto [clear_depth, clear_stencil] = ConfigureFramebuffers( - clear_state, use_color, use_depth || use_stencil, false, regs.clear_buffers.RT.Value()); + ConfigureClearFramebuffer(clear_state, use_color, use_depth, use_stencil); + + SyncViewport(clear_state); if (regs.clear_flags.scissor) { SyncScissorTest(clear_state); } @@ -576,21 +681,18 @@ void RasterizerOpenGL::Clear() { clear_state.EmulateViewportWithScissor(); } - clear_state.ApplyColorMask(); - clear_state.ApplyDepth(); - clear_state.ApplyStencilTest(); - clear_state.ApplyViewport(); - clear_state.ApplyFramebufferState(); + clear_state.AllDirty(); + clear_state.Apply(); if (use_color) { - glClearBufferfv(GL_COLOR, regs.clear_buffers.RT, regs.clear_color); + glClearBufferfv(GL_COLOR, 0, regs.clear_color); } - if (clear_depth && clear_stencil) { + if (use_depth && use_stencil) { glClearBufferfi(GL_DEPTH_STENCIL, 0, regs.clear_depth, regs.clear_stencil); - } else if (clear_depth) { + } else if (use_depth) { glClearBufferfv(GL_DEPTH, 0, ®s.clear_depth); - } else if (clear_stencil) { + } else if (use_stencil) { glClearBufferiv(GL_STENCIL, 0, ®s.clear_stencil); } } @@ -601,7 +703,10 @@ void RasterizerOpenGL::DrawArrays() { MICROPROFILE_SCOPE(OpenGL_Drawing); auto& gpu = system.GPU().Maxwell3D(); - const auto& regs = gpu.regs; + + if (!gpu.ShouldExecute()) { + return; + } SyncColorMask(); SyncFragmentColorClampState(); @@ -634,26 +739,47 @@ void RasterizerOpenGL::DrawArrays() { Maxwell::MaxShaderStage; // Add space for at least 18 constant buffers - buffer_size += - Maxwell::MaxConstBuffers * (MaxConstbufferSize + device.GetUniformBufferAlignment()); + buffer_size += Maxwell::MaxConstBuffers * + (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment()); - const bool invalidate = buffer_cache.Map(buffer_size); - if (invalidate) { - // As all cached buffers are invalidated, we need to recheck their state. - gpu.dirty_flags.vertex_array.set(); - } + // Prepare the vertex array. + buffer_cache.Map(buffer_size); + // Prepare vertex array format. const GLuint vao = SetupVertexFormat(); + vertex_array_pushbuffer.Setup(vao); + + // Upload vertex and index data. SetupVertexBuffer(vao); + SetupVertexInstances(vao); + const GLintptr index_buffer_offset = SetupIndexBuffer(); + + // Setup draw parameters. It will automatically choose what glDraw* method to use. + const DrawParameters params = SetupDraw(index_buffer_offset); - DrawParameters params = SetupDraw(); + // Prepare packed bindings. + bind_ubo_pushbuffer.Setup(0); + bind_ssbo_pushbuffer.Setup(0); + + // Setup shaders and their used resources. texture_cache.GuardSamplers(true); SetupShaders(params.primitive_mode); texture_cache.GuardSamplers(false); ConfigureFramebuffers(state); - buffer_cache.Unmap(); + // Signal the buffer cache that we are not going to upload more things. + const bool invalidate = buffer_cache.Unmap(); + + // Now that we are no longer uploading data, we can safely bind the buffers to OpenGL. + vertex_array_pushbuffer.Bind(); + bind_ubo_pushbuffer.Bind(); + bind_ssbo_pushbuffer.Bind(); + + if (invalidate) { + // As all cached buffers are invalidated, we need to recheck their state. + gpu.dirty.ResetVertexArrays(); + } shader_program_manager->ApplyTo(state); state.Apply(); @@ -665,6 +791,46 @@ void RasterizerOpenGL::DrawArrays() { params.DispatchDraw(); accelerate_draw = AccelDraw::Disabled; + gpu.dirty.memory_general = false; +} + +void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { + if (!GLAD_GL_ARB_compute_variable_group_size) { + LOG_ERROR(Render_OpenGL, "Compute is currently not supported on this device due to the " + "lack of GL_ARB_compute_variable_group_size"); + return; + } + + auto kernel = shader_cache.GetComputeKernel(code_addr); + const auto [program, next_bindings] = kernel->GetProgramHandle({}); + state.draw.shader_program = program; + state.draw.program_pipeline = 0; + + const std::size_t buffer_size = + Tegra::Engines::KeplerCompute::NumConstBuffers * + (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment()); + buffer_cache.Map(buffer_size); + + bind_ubo_pushbuffer.Setup(0); + bind_ssbo_pushbuffer.Setup(0); + + SetupComputeConstBuffers(kernel); + SetupComputeGlobalMemory(kernel); + + // TODO(Rodrigo): Bind images and samplers + + buffer_cache.Unmap(); + + bind_ubo_pushbuffer.Bind(); + bind_ssbo_pushbuffer.Bind(); + + state.ApplyShaderProgram(); + state.ApplyProgramPipeline(); + + const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + glDispatchComputeGroupSizeARB(launch_desc.grid_dim_x, launch_desc.grid_dim_y, + launch_desc.grid_dim_z, launch_desc.block_dim_x, + launch_desc.block_dim_y, launch_desc.block_dim_z); } void RasterizerOpenGL::FlushAll() {} @@ -675,7 +841,7 @@ void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) { return; } texture_cache.FlushRegion(addr, size); - global_cache.FlushRegion(addr, size); + buffer_cache.FlushRegion(addr, size); } void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) { @@ -685,7 +851,6 @@ void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) { } texture_cache.InvalidateRegion(addr, size); shader_cache.InvalidateRegion(addr, size); - global_cache.InvalidateRegion(addr, size); buffer_cache.InvalidateRegion(addr, size); } @@ -696,6 +861,14 @@ void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) { InvalidateRegion(addr, size); } +void RasterizerOpenGL::FlushCommands() { + glFlush(); +} + +void RasterizerOpenGL::TickFrame() { + buffer_cache.TickFrame(); +} + bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst, const Tegra::Engines::Fermi2D::Config& copy_config) { @@ -737,14 +910,25 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, void RasterizerOpenGL::SetupDrawConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, const Shader& shader) { MICROPROFILE_SCOPE(OpenGL_UBO); - const auto stage_index = static_cast<std::size_t>(stage); - const auto& shader_stage = system.GPU().Maxwell3D().state.shader_stages[stage_index]; - const auto& entries = shader->GetShaderEntries().const_buffers; + const auto& stages = system.GPU().Maxwell3D().state.shader_stages; + const auto& shader_stage = stages[static_cast<std::size_t>(stage)]; + for (const auto& entry : shader->GetShaderEntries().const_buffers) { + const auto& buffer = shader_stage.const_buffers[entry.GetIndex()]; + SetupConstBuffer(buffer, entry); + } +} - // Upload only the enabled buffers from the 16 constbuffers of each shader stage - for (u32 bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { - const auto& entry = entries[bindpoint]; - SetupConstBuffer(shader_stage.const_buffers[entry.GetIndex()], entry); +void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) { + MICROPROFILE_SCOPE(OpenGL_UBO); + const auto& launch_desc = system.GPU().KeplerCompute().launch_description; + for (const auto& entry : kernel->GetShaderEntries().const_buffers) { + const auto& config = launch_desc.const_buffer_config[entry.GetIndex()]; + const std::bitset<8> mask = launch_desc.memory_config.const_buffer_enable_mask.Value(); + Tegra::Engines::ConstBufferInfo buffer; + buffer.address = config.Address(); + buffer.size = config.size; + buffer.enabled = mask[entry.GetIndex()]; + SetupConstBuffer(buffer, entry); } } @@ -752,49 +936,52 @@ void RasterizerOpenGL::SetupConstBuffer(const Tegra::Engines::ConstBufferInfo& b const GLShader::ConstBufferEntry& entry) { if (!buffer.enabled) { // Set values to zero to unbind buffers - bind_ubo_pushbuffer.Push(0, 0, 0); + bind_ubo_pushbuffer.Push(buffer_cache.GetEmptyBuffer(sizeof(float)), 0, sizeof(float)); return; } - std::size_t size; - if (entry.IsIndirect()) { - // Buffer is accessed indirectly, so upload the entire thing - size = buffer.size; - - if (size > MaxConstbufferSize) { - LOG_WARNING(Render_OpenGL, "Indirect constbuffer size {} exceeds maximum {}", size, - MaxConstbufferSize); - size = MaxConstbufferSize; - } - } else { - // Buffer is accessed directly, upload just what we use - size = entry.GetSize(); - } - // Align the actual size so it ends up being a multiple of vec4 to meet the OpenGL std140 // UBO alignment requirements. - size = Common::AlignUp(size, sizeof(GLvec4)); - ASSERT_MSG(size <= MaxConstbufferSize, "Constant buffer is too big"); + const std::size_t size = Common::AlignUp(GetConstBufferSize(buffer, entry), sizeof(GLvec4)); - const std::size_t alignment = device.GetUniformBufferAlignment(); - const GLintptr offset = buffer_cache.UploadMemory(buffer.address, size, alignment); - bind_ubo_pushbuffer.Push(buffer_cache.GetHandle(), offset, size); + const auto alignment = device.GetUniformBufferAlignment(); + const auto [cbuf, offset] = buffer_cache.UploadMemory(buffer.address, size, alignment); + bind_ubo_pushbuffer.Push(cbuf, offset, size); } -void RasterizerOpenGL::SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, - const Shader& shader) { - const auto& entries = shader->GetShaderEntries().global_memory_entries; - for (std::size_t bindpoint = 0; bindpoint < entries.size(); ++bindpoint) { - const auto& entry{entries[bindpoint]}; - const auto& region{global_cache.GetGlobalRegion(entry, stage)}; - if (entry.IsWritten()) { - region->MarkAsModified(true, global_cache); - } - bind_ssbo_pushbuffer.Push(region->GetBufferHandle(), 0, - static_cast<GLsizeiptr>(region->GetSizeInBytes())); +void RasterizerOpenGL::SetupDrawGlobalMemory(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, + const Shader& shader) { + auto& gpu{system.GPU()}; + auto& memory_manager{gpu.MemoryManager()}; + const auto cbufs{gpu.Maxwell3D().state.shader_stages[static_cast<std::size_t>(stage)]}; + for (const auto& entry : shader->GetShaderEntries().global_memory_entries) { + const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()}; + const auto gpu_addr{memory_manager.Read<u64>(addr)}; + const auto size{memory_manager.Read<u32>(addr + 8)}; + SetupGlobalMemory(entry, gpu_addr, size); + } +} + +void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) { + auto& gpu{system.GPU()}; + auto& memory_manager{gpu.MemoryManager()}; + const auto cbufs{gpu.KeplerCompute().launch_description.const_buffer_config}; + for (const auto& entry : kernel->GetShaderEntries().global_memory_entries) { + const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()}; + const auto gpu_addr{memory_manager.Read<u64>(addr)}; + const auto size{memory_manager.Read<u32>(addr + 8)}; + SetupGlobalMemory(entry, gpu_addr, size); } } +void RasterizerOpenGL::SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entry, + GPUVAddr gpu_addr, std::size_t size) { + const auto alignment{device.GetShaderStorageBufferAlignment()}; + const auto [ssbo, buffer_offset] = + buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten()); + bind_ssbo_pushbuffer.Push(ssbo, buffer_offset, static_cast<GLsizeiptr>(size)); +} + TextureBufferUsage RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader, BaseBindings base_bindings) { MICROPROFILE_SCOPE(OpenGL_Texture); @@ -883,10 +1070,11 @@ void RasterizerOpenGL::SyncClipCoef() { } void RasterizerOpenGL::SyncCullMode() { - const auto& regs = system.GPU().Maxwell3D().regs; + auto& maxwell3d = system.GPU().Maxwell3D(); - state.cull.enabled = regs.cull.enabled != 0; + const auto& regs = maxwell3d.regs; + state.cull.enabled = regs.cull.enabled != 0; if (state.cull.enabled) { state.cull.front_face = MaxwellToGL::FrontFace(regs.cull.front_face); state.cull.mode = MaxwellToGL::CullFace(regs.cull.cull_face); @@ -919,16 +1107,21 @@ void RasterizerOpenGL::SyncDepthTestState() { state.depth.test_enabled = regs.depth_test_enable != 0; state.depth.write_mask = regs.depth_write_enabled ? GL_TRUE : GL_FALSE; - if (!state.depth.test_enabled) + if (!state.depth.test_enabled) { return; + } state.depth.test_func = MaxwellToGL::ComparisonOp(regs.depth_test_func); } void RasterizerOpenGL::SyncStencilTestState() { - const auto& regs = system.GPU().Maxwell3D().regs; - state.stencil.test_enabled = regs.stencil_enable != 0; + auto& maxwell3d = system.GPU().Maxwell3D(); + if (!maxwell3d.dirty.stencil_test) { + return; + } + const auto& regs = maxwell3d.regs; + state.stencil.test_enabled = regs.stencil_enable != 0; if (!regs.stencil_enable) { return; } @@ -957,10 +1150,17 @@ void RasterizerOpenGL::SyncStencilTestState() { state.stencil.back.action_depth_fail = GL_KEEP; state.stencil.back.action_depth_pass = GL_KEEP; } + state.MarkDirtyStencilState(); + maxwell3d.dirty.stencil_test = false; } void RasterizerOpenGL::SyncColorMask() { - const auto& regs = system.GPU().Maxwell3D().regs; + auto& maxwell3d = system.GPU().Maxwell3D(); + if (!maxwell3d.dirty.color_mask) { + return; + } + const auto& regs = maxwell3d.regs; + const std::size_t count = regs.independent_blend_enable ? Tegra::Engines::Maxwell3D::Regs::NumRenderTargets : 1; for (std::size_t i = 0; i < count; i++) { @@ -971,6 +1171,9 @@ void RasterizerOpenGL::SyncColorMask() { dest.blue_enabled = (source.B == 0) ? GL_FALSE : GL_TRUE; dest.alpha_enabled = (source.A == 0) ? GL_FALSE : GL_TRUE; } + + state.MarkDirtyColorMask(); + maxwell3d.dirty.color_mask = false; } void RasterizerOpenGL::SyncMultiSampleState() { @@ -985,7 +1188,11 @@ void RasterizerOpenGL::SyncFragmentColorClampState() { } void RasterizerOpenGL::SyncBlendState() { - const auto& regs = system.GPU().Maxwell3D().regs; + auto& maxwell3d = system.GPU().Maxwell3D(); + if (!maxwell3d.dirty.blend_state) { + return; + } + const auto& regs = maxwell3d.regs; state.blend_color.red = regs.blend_color.r; state.blend_color.green = regs.blend_color.g; @@ -1008,6 +1215,8 @@ void RasterizerOpenGL::SyncBlendState() { for (std::size_t i = 1; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) { state.blend[i].enabled = false; } + maxwell3d.dirty.blend_state = false; + state.MarkDirtyBlendState(); return; } @@ -1024,6 +1233,9 @@ void RasterizerOpenGL::SyncBlendState() { blend.src_a_func = MaxwellToGL::BlendFunc(src.factor_source_a); blend.dst_a_func = MaxwellToGL::BlendFunc(src.factor_dest_a); } + + state.MarkDirtyBlendState(); + maxwell3d.dirty.blend_state = false; } void RasterizerOpenGL::SyncLogicOpState() { @@ -1075,13 +1287,21 @@ void RasterizerOpenGL::SyncPointState() { } void RasterizerOpenGL::SyncPolygonOffset() { - const auto& regs = system.GPU().Maxwell3D().regs; + auto& maxwell3d = system.GPU().Maxwell3D(); + if (!maxwell3d.dirty.polygon_offset) { + return; + } + const auto& regs = maxwell3d.regs; + state.polygon_offset.fill_enable = regs.polygon_offset_fill_enable != 0; state.polygon_offset.line_enable = regs.polygon_offset_line_enable != 0; state.polygon_offset.point_enable = regs.polygon_offset_point_enable != 0; state.polygon_offset.units = regs.polygon_offset_units; state.polygon_offset.factor = regs.polygon_offset_factor; state.polygon_offset.clamp = regs.polygon_offset_clamp; + + state.MarkDirtyPolygonOffset(); + maxwell3d.dirty.polygon_offset = false; } void RasterizerOpenGL::SyncAlphaTest() { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index bf67e3a70d..9d20a4fbf5 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -24,7 +24,6 @@ #include "video_core/renderer_opengl/gl_buffer_cache.h" #include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_framebuffer_cache.h" -#include "video_core/renderer_opengl/gl_global_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_sampler_cache.h" #include "video_core/renderer_opengl/gl_shader_cache.h" @@ -59,10 +58,13 @@ public: void DrawArrays() override; void Clear() override; + void DispatchCompute(GPUVAddr code_addr) override; void FlushAll() override; void FlushRegion(CacheAddr addr, u64 size) override; void InvalidateRegion(CacheAddr addr, u64 size) override; void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override; + void FlushCommands() override; + void TickFrame() override; bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst, const Tegra::Engines::Fermi2D::Config& copy_config) override; @@ -73,11 +75,6 @@ public: void LoadDiskResources(const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback) override; - /// Maximum supported size that a constbuffer can have in bytes. - static constexpr std::size_t MaxConstbufferSize = 0x10000; - static_assert(MaxConstbufferSize % sizeof(GLvec4) == 0, - "The maximum size of a constbuffer must be a multiple of the size of GLvec4"); - private: struct FramebufferConfigState { bool using_color_fb{}; @@ -98,30 +95,45 @@ private: /** * Configures the color and depth framebuffer states. - * @param must_reconfigure If true, tells the framebuffer to skip the cache and reconfigure - * again. Used by the texture cache to solve texception conflicts - * @param use_color_fb If true, configure color framebuffers. - * @param using_depth_fb If true, configure the depth/stencil framebuffer. - * @param preserve_contents If true, tries to preserve data from a previously used framebuffer. + * + * @param current_state The current OpenGL state. + * @param using_color_fb If true, configure color framebuffers. + * @param using_depth_fb If true, configure the depth/stencil framebuffer. + * @param preserve_contents If true, tries to preserve data from a previously used + * framebuffer. * @param single_color_target Specifies if a single color buffer target should be used. + * * @returns If depth (first) or stencil (second) are being stored in the bound zeta texture - * (requires using_depth_fb to be true) + * (requires using_depth_fb to be true) */ std::pair<bool, bool> ConfigureFramebuffers( - OpenGLState& current_state, bool use_color_fb = true, bool using_depth_fb = true, + OpenGLState& current_state, bool using_color_fb = true, bool using_depth_fb = true, bool preserve_contents = true, std::optional<std::size_t> single_color_target = {}); + void ConfigureClearFramebuffer(OpenGLState& current_state, bool using_color_fb, + bool using_depth_fb, bool using_stencil_fb); + /// Configures the current constbuffers to use for the draw command. void SetupDrawConstBuffers(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, const Shader& shader); + /// Configures the current constbuffers to use for the kernel invocation. + void SetupComputeConstBuffers(const Shader& kernel); + /// Configures a constant buffer. void SetupConstBuffer(const Tegra::Engines::ConstBufferInfo& buffer, const GLShader::ConstBufferEntry& entry); /// Configures the current global memory entries to use for the draw command. - void SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, - const Shader& shader); + void SetupDrawGlobalMemory(Tegra::Engines::Maxwell3D::Regs::ShaderStage stage, + const Shader& shader); + + /// Configures the current global memory entries to use for the kernel invocation. + void SetupComputeGlobalMemory(const Shader& kernel); + + /// Configures a constant buffer. + void SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entry, GPUVAddr gpu_addr, + std::size_t size); /// Configures the current textures to use for the draw command. Returns shaders texture buffer /// usage. @@ -189,7 +201,6 @@ private: TextureCacheOpenGL texture_cache; ShaderCacheOpenGL shader_cache; - GlobalRegionCacheOpenGL global_cache; SamplerCacheOpenGL sampler_cache; FramebufferCacheOpenGL framebuffer_cache; @@ -208,6 +219,7 @@ private: static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024; OGLBufferCache buffer_cache; + VertexArrayPushBuffer vertex_array_pushbuffer; BindBuffersRangePushBuffer bind_ubo_pushbuffer{GL_UNIFORM_BUFFER}; BindBuffersRangePushBuffer bind_ssbo_pushbuffer{GL_SHADER_STORAGE_BUFFER}; @@ -219,14 +231,19 @@ private: GLuint SetupVertexFormat(); void SetupVertexBuffer(GLuint vao); + void SetupVertexInstances(GLuint vao); - DrawParameters SetupDraw(); + GLintptr SetupIndexBuffer(); + + DrawParameters SetupDraw(GLintptr index_buffer_offset); void SetupShaders(GLenum primitive_mode); enum class AccelDraw { Disabled, Arrays, Indexed }; AccelDraw accelerate_draw = AccelDraw::Disabled; + OGLFramebuffer clear_framebuffer; + using CachedPageMap = boost::icl::interval_map<u64, int>; CachedPageMap cached_pages; }; diff --git a/src/video_core/renderer_opengl/gl_sampler_cache.h b/src/video_core/renderer_opengl/gl_sampler_cache.h index defbc2d819..34ee37f006 100644 --- a/src/video_core/renderer_opengl/gl_sampler_cache.h +++ b/src/video_core/renderer_opengl/gl_sampler_cache.h @@ -17,9 +17,9 @@ public: ~SamplerCacheOpenGL(); protected: - OGLSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const; + OGLSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override; - GLuint ToSamplerType(const OGLSampler& sampler) const; + GLuint ToSamplerType(const OGLSampler& sampler) const override; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index f9b2b03a0a..cf6a5cddf1 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -23,13 +23,13 @@ namespace OpenGL { using VideoCommon::Shader::ProgramCode; -// One UBO is always reserved for emulation values -constexpr u32 RESERVED_UBOS = 1; +// One UBO is always reserved for emulation values on staged shaders +constexpr u32 STAGE_RESERVED_UBOS = 1; struct UnspecializedShader { std::string code; GLShader::ShaderEntries entries; - Maxwell::ShaderProgram program_type; + ProgramType program_type; }; namespace { @@ -55,15 +55,17 @@ ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr g } /// Gets the shader type from a Maxwell program type -constexpr GLenum GetShaderType(Maxwell::ShaderProgram program_type) { +constexpr GLenum GetShaderType(ProgramType program_type) { switch (program_type) { - case Maxwell::ShaderProgram::VertexA: - case Maxwell::ShaderProgram::VertexB: + case ProgramType::VertexA: + case ProgramType::VertexB: return GL_VERTEX_SHADER; - case Maxwell::ShaderProgram::Geometry: + case ProgramType::Geometry: return GL_GEOMETRY_SHADER; - case Maxwell::ShaderProgram::Fragment: + case ProgramType::Fragment: return GL_FRAGMENT_SHADER; + case ProgramType::Compute: + return GL_COMPUTE_SHADER; default: return GL_NONE; } @@ -100,6 +102,25 @@ constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLen } } +ProgramType GetProgramType(Maxwell::ShaderProgram program) { + switch (program) { + case Maxwell::ShaderProgram::VertexA: + return ProgramType::VertexA; + case Maxwell::ShaderProgram::VertexB: + return ProgramType::VertexB; + case Maxwell::ShaderProgram::TesselationControl: + return ProgramType::TessellationControl; + case Maxwell::ShaderProgram::TesselationEval: + return ProgramType::TessellationEval; + case Maxwell::ShaderProgram::Geometry: + return ProgramType::Geometry; + case Maxwell::ShaderProgram::Fragment: + return ProgramType::Fragment; + } + UNREACHABLE(); + return {}; +} + /// Calculates the size of a program stream std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) { constexpr std::size_t start_offset = 10; @@ -128,11 +149,13 @@ std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) { } /// Hashes one (or two) program streams -u64 GetUniqueIdentifier(Maxwell::ShaderProgram program_type, const ProgramCode& code, - const ProgramCode& code_b) { - u64 unique_identifier = - Common::CityHash64(reinterpret_cast<const char*>(code.data()), CalculateProgramSize(code)); - if (program_type != Maxwell::ShaderProgram::VertexA) { +u64 GetUniqueIdentifier(ProgramType program_type, const ProgramCode& code, + const ProgramCode& code_b, std::size_t size_a = 0, std::size_t size_b = 0) { + if (size_a == 0) { + size_a = CalculateProgramSize(code); + } + u64 unique_identifier = Common::CityHash64(reinterpret_cast<const char*>(code.data()), size_a); + if (program_type != ProgramType::VertexA) { return unique_identifier; } // VertexA programs include two programs @@ -140,50 +163,69 @@ u64 GetUniqueIdentifier(Maxwell::ShaderProgram program_type, const ProgramCode& std::size_t seed = 0; boost::hash_combine(seed, unique_identifier); - const u64 identifier_b = Common::CityHash64(reinterpret_cast<const char*>(code_b.data()), - CalculateProgramSize(code_b)); + if (size_b == 0) { + size_b = CalculateProgramSize(code_b); + } + const u64 identifier_b = + Common::CityHash64(reinterpret_cast<const char*>(code_b.data()), size_b); boost::hash_combine(seed, identifier_b); return static_cast<u64>(seed); } /// Creates an unspecialized program from code streams -GLShader::ProgramResult CreateProgram(const Device& device, Maxwell::ShaderProgram program_type, +GLShader::ProgramResult CreateProgram(const Device& device, ProgramType program_type, ProgramCode program_code, ProgramCode program_code_b) { GLShader::ShaderSetup setup(program_code); - if (program_type == Maxwell::ShaderProgram::VertexA) { + setup.program.size_a = CalculateProgramSize(program_code); + setup.program.size_b = 0; + if (program_type == ProgramType::VertexA) { // VertexB is always enabled, so when VertexA is enabled, we have two vertex shaders. // Conventional HW does not support this, so we combine VertexA and VertexB into one // stage here. setup.SetProgramB(program_code_b); + setup.program.size_b = CalculateProgramSize(program_code_b); } - setup.program.unique_identifier = - GetUniqueIdentifier(program_type, program_code, program_code_b); + setup.program.unique_identifier = GetUniqueIdentifier( + program_type, program_code, program_code_b, setup.program.size_a, setup.program.size_b); switch (program_type) { - case Maxwell::ShaderProgram::VertexA: - case Maxwell::ShaderProgram::VertexB: + case ProgramType::VertexA: + case ProgramType::VertexB: return GLShader::GenerateVertexShader(device, setup); - case Maxwell::ShaderProgram::Geometry: + case ProgramType::Geometry: return GLShader::GenerateGeometryShader(device, setup); - case Maxwell::ShaderProgram::Fragment: + case ProgramType::Fragment: return GLShader::GenerateFragmentShader(device, setup); + case ProgramType::Compute: + return GLShader::GenerateComputeShader(device, setup); default: - LOG_CRITICAL(HW_GPU, "Unimplemented program_type={}", static_cast<u32>(program_type)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented program_type={}", static_cast<u32>(program_type)); return {}; } } CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEntries& entries, - Maxwell::ShaderProgram program_type, const ProgramVariant& variant, + ProgramType program_type, const ProgramVariant& variant, bool hint_retrievable = false) { auto base_bindings{variant.base_bindings}; const auto primitive_mode{variant.primitive_mode}; const auto texture_buffer_usage{variant.texture_buffer_usage}; std::string source = "#version 430 core\n" - "#extension GL_ARB_separate_shader_objects : enable\n\n"; - source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++); + "#extension GL_ARB_separate_shader_objects : enable\n" + "#extension GL_NV_gpu_shader5 : enable\n" + "#extension GL_NV_shader_thread_group : enable\n"; + if (entries.shader_viewport_layer_array) { + source += "#extension GL_ARB_shader_viewport_layer_array : enable\n"; + } + if (program_type == ProgramType::Compute) { + source += "#extension GL_ARB_compute_variable_group_size : require\n"; + } + source += '\n'; + + if (program_type != ProgramType::Compute) { + source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++); + } for (const auto& cbuf : entries.const_buffers) { source += @@ -207,17 +249,24 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn if (!texture_buffer_usage.test(i)) { continue; } - source += fmt::format("#define SAMPLER_{}_IS_BUFFER", i); + source += fmt::format("#define SAMPLER_{}_IS_BUFFER\n", i); + } + if (texture_buffer_usage.any()) { + source += '\n'; } - if (program_type == Maxwell::ShaderProgram::Geometry) { + if (program_type == ProgramType::Geometry) { const auto [glsl_topology, debug_name, max_vertices] = GetPrimitiveDescription(primitive_mode); - source += "layout (" + std::string(glsl_topology) + ") in;\n"; + source += "layout (" + std::string(glsl_topology) + ") in;\n\n"; source += "#define MAX_VERTEX_INPUT " + std::to_string(max_vertices) + '\n'; } + if (program_type == ProgramType::Compute) { + source += "layout (local_size_variable) in;\n"; + } + source += '\n'; source += code; OGLShader shader; @@ -244,9 +293,9 @@ std::set<GLenum> GetSupportedFormats() { } // Anonymous namespace -CachedShader::CachedShader(const ShaderParameters& params, Maxwell::ShaderProgram program_type, +CachedShader::CachedShader(const ShaderParameters& params, ProgramType program_type, GLShader::ProgramResult result) - : RasterizerCacheObject{params.host_ptr}, host_ptr{params.host_ptr}, cpu_addr{params.cpu_addr}, + : RasterizerCacheObject{params.host_ptr}, cpu_addr{params.cpu_addr}, unique_identifier{params.unique_identifier}, program_type{program_type}, disk_cache{params.disk_cache}, precompiled_programs{params.precompiled_programs}, entries{result.second}, code{std::move(result.first)}, shader_length{entries.shader_length} {} @@ -257,29 +306,50 @@ Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params, ProgramCode&& program_code_b) { const auto code_size{CalculateProgramSize(program_code)}; const auto code_size_b{CalculateProgramSize(program_code_b)}; - auto result{CreateProgram(params.device, program_type, program_code, program_code_b)}; + auto result{ + CreateProgram(params.device, GetProgramType(program_type), program_code, program_code_b)}; if (result.first.empty()) { // TODO(Rodrigo): Unimplemented shader stages hit here, avoid using these for now return {}; } params.disk_cache.SaveRaw(ShaderDiskCacheRaw( - params.unique_identifier, program_type, static_cast<u32>(code_size / sizeof(u64)), - static_cast<u32>(code_size_b / sizeof(u64)), std::move(program_code), - std::move(program_code_b))); + params.unique_identifier, GetProgramType(program_type), + static_cast<u32>(code_size / sizeof(u64)), static_cast<u32>(code_size_b / sizeof(u64)), + std::move(program_code), std::move(program_code_b))); - return std::shared_ptr<CachedShader>(new CachedShader(params, program_type, std::move(result))); + return std::shared_ptr<CachedShader>( + new CachedShader(params, GetProgramType(program_type), std::move(result))); } Shader CachedShader::CreateStageFromCache(const ShaderParameters& params, Maxwell::ShaderProgram program_type, GLShader::ProgramResult result) { - return std::shared_ptr<CachedShader>(new CachedShader(params, program_type, std::move(result))); + return std::shared_ptr<CachedShader>( + new CachedShader(params, GetProgramType(program_type), std::move(result))); +} + +Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode&& code) { + auto result{CreateProgram(params.device, ProgramType::Compute, code, {})}; + + const auto code_size{CalculateProgramSize(code)}; + params.disk_cache.SaveRaw(ShaderDiskCacheRaw(params.unique_identifier, ProgramType::Compute, + static_cast<u32>(code_size / sizeof(u64)), 0, + std::move(code), {})); + + return std::shared_ptr<CachedShader>( + new CachedShader(params, ProgramType::Compute, std::move(result))); +} + +Shader CachedShader::CreateKernelFromCache(const ShaderParameters& params, + GLShader::ProgramResult result) { + return std::shared_ptr<CachedShader>( + new CachedShader(params, ProgramType::Compute, std::move(result))); } std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVariant& variant) { GLuint handle{}; - if (program_type == Maxwell::ShaderProgram::Geometry) { + if (program_type == ProgramType::Geometry) { handle = GetGeometryShader(variant); } else { const auto [entry, is_cache_miss] = programs.try_emplace(variant); @@ -297,8 +367,11 @@ std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVar handle = program->handle; } - auto base_bindings{variant.base_bindings}; - base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size()) + RESERVED_UBOS; + auto base_bindings = variant.base_bindings; + base_bindings.cbuf += static_cast<u32>(entries.const_buffers.size()); + if (program_type != ProgramType::Compute) { + base_bindings.cbuf += STAGE_RESERVED_UBOS; + } base_bindings.gmem += static_cast<u32>(entries.global_memory_entries.size()); base_bindings.sampler += static_cast<u32>(entries.samplers.size()); @@ -561,7 +634,7 @@ std::unordered_map<u64, UnspecializedShader> ShaderCacheOpenGL::GenerateUnspecia } Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { - if (!system.GPU().Maxwell3D().dirty_flags.shaders) { + if (!system.GPU().Maxwell3D().dirty.shaders) { return last_shaders[static_cast<std::size_t>(program)]; } @@ -578,13 +651,15 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { // No shader found - create a new one ProgramCode program_code{GetShaderCode(memory_manager, program_addr, host_ptr)}; ProgramCode program_code_b; - if (program == Maxwell::ShaderProgram::VertexA) { + const bool is_program_a{program == Maxwell::ShaderProgram::VertexA}; + if (is_program_a) { const GPUVAddr program_addr_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)}; program_code_b = GetShaderCode(memory_manager, program_addr_b, memory_manager.GetPointer(program_addr_b)); } - const auto unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b); + const auto unique_identifier = + GetUniqueIdentifier(GetProgramType(program), program_code, program_code_b); const auto cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)}; const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr, host_ptr, unique_identifier}; @@ -601,4 +676,30 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { return last_shaders[static_cast<std::size_t>(program)] = shader; } +Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { + auto& memory_manager{system.GPU().MemoryManager()}; + const auto host_ptr{memory_manager.GetPointer(code_addr)}; + auto kernel = TryGet(host_ptr); + if (kernel) { + return kernel; + } + + // No kernel found - create a new one + auto code{GetShaderCode(memory_manager, code_addr, host_ptr)}; + const auto unique_identifier{GetUniqueIdentifier(ProgramType::Compute, code, {})}; + const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)}; + const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr, + host_ptr, unique_identifier}; + + const auto found = precompiled_shaders.find(unique_identifier); + if (found == precompiled_shaders.end()) { + kernel = CachedShader::CreateKernelFromMemory(params, std::move(code)); + } else { + kernel = CachedShader::CreateKernelFromCache(params, found->second); + } + + Register(kernel); + return kernel; +} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index bbb53cdf4e..2c8faf8552 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -61,6 +61,11 @@ public: Maxwell::ShaderProgram program_type, GLShader::ProgramResult result); + static Shader CreateKernelFromMemory(const ShaderParameters& params, ProgramCode&& code); + + static Shader CreateKernelFromCache(const ShaderParameters& params, + GLShader::ProgramResult result); + VAddr GetCpuAddr() const override { return cpu_addr; } @@ -78,7 +83,7 @@ public: std::tuple<GLuint, BaseBindings> GetProgramHandle(const ProgramVariant& variant); private: - explicit CachedShader(const ShaderParameters& params, Maxwell::ShaderProgram program_type, + explicit CachedShader(const ShaderParameters& params, ProgramType program_type, GLShader::ProgramResult result); // Geometry programs. These are needed because GLSL needs an input topology but it's not @@ -101,10 +106,9 @@ private: ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant) const; - u8* host_ptr{}; VAddr cpu_addr{}; u64 unique_identifier{}; - Maxwell::ShaderProgram program_type{}; + ProgramType program_type{}; ShaderDiskCacheOpenGL& disk_cache; const PrecompiledPrograms& precompiled_programs; @@ -132,6 +136,9 @@ public: /// Gets the current specified shader stage program Shader GetStageProgram(Maxwell::ShaderProgram program); + /// Gets a compute kernel in the passed address + Shader GetComputeKernel(GPUVAddr code_addr); + protected: // We do not have to flush this cache as things in it are never modified by us. void FlushObjectInner(const Shader& object) override {} diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 5f2f1510cd..a5cc1a86f2 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -14,6 +14,7 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" +#include "common/logging/log.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_rasterizer.h" @@ -36,19 +37,18 @@ using namespace std::string_literals; using namespace VideoCommon::Shader; using Maxwell = Tegra::Engines::Maxwell3D::Regs; -using ShaderStage = Tegra::Engines::Maxwell3D::Regs::ShaderStage; using Operation = const OperationNode&; -enum class Type { Bool, Bool2, Float, Int, Uint, HalfFloat }; +enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; struct TextureAoffi {}; using TextureArgument = std::pair<Type, Node>; using TextureIR = std::variant<TextureAoffi, TextureArgument>; constexpr u32 MAX_CONSTBUFFER_ELEMENTS = - static_cast<u32>(RasterizerOpenGL::MaxConstbufferSize) / (4 * sizeof(float)); + static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float)); -class ShaderWriter { +class ShaderWriter final { public: void AddExpression(std::string_view text) { DEBUG_ASSERT(scope >= 0); @@ -93,9 +93,157 @@ private: u32 temporary_index = 1; }; +class Expression final { +public: + Expression(std::string code, Type type) : code{std::move(code)}, type{type} { + ASSERT(type != Type::Void); + } + Expression() : type{Type::Void} {} + + Type GetType() const { + return type; + } + + std::string GetCode() const { + return code; + } + + void CheckVoid() const { + ASSERT(type == Type::Void); + } + + std::string As(Type type) const { + switch (type) { + case Type::Bool: + return AsBool(); + case Type::Bool2: + return AsBool2(); + case Type::Float: + return AsFloat(); + case Type::Int: + return AsInt(); + case Type::Uint: + return AsUint(); + case Type::HalfFloat: + return AsHalfFloat(); + default: + UNREACHABLE_MSG("Invalid type"); + return code; + } + } + + std::string AsBool() const { + switch (type) { + case Type::Bool: + return code; + default: + UNREACHABLE_MSG("Incompatible types"); + return code; + } + } + + std::string AsBool2() const { + switch (type) { + case Type::Bool2: + return code; + default: + UNREACHABLE_MSG("Incompatible types"); + return code; + } + } + + std::string AsFloat() const { + switch (type) { + case Type::Float: + return code; + case Type::Uint: + return fmt::format("utof({})", code); + case Type::Int: + return fmt::format("itof({})", code); + case Type::HalfFloat: + return fmt::format("utof(packHalf2x16({}))", code); + default: + UNREACHABLE_MSG("Incompatible types"); + return code; + } + } + + std::string AsInt() const { + switch (type) { + case Type::Float: + return fmt::format("ftoi({})", code); + case Type::Uint: + return fmt::format("int({})", code); + case Type::Int: + return code; + case Type::HalfFloat: + return fmt::format("int(packHalf2x16({}))", code); + default: + UNREACHABLE_MSG("Incompatible types"); + return code; + } + } + + std::string AsUint() const { + switch (type) { + case Type::Float: + return fmt::format("ftou({})", code); + case Type::Uint: + return code; + case Type::Int: + return fmt::format("uint({})", code); + case Type::HalfFloat: + return fmt::format("packHalf2x16({})", code); + default: + UNREACHABLE_MSG("Incompatible types"); + return code; + } + } + + std::string AsHalfFloat() const { + switch (type) { + case Type::Float: + return fmt::format("unpackHalf2x16(ftou({}))", code); + case Type::Uint: + return fmt::format("unpackHalf2x16({})", code); + case Type::Int: + return fmt::format("unpackHalf2x16(int({}))", code); + case Type::HalfFloat: + return code; + default: + UNREACHABLE_MSG("Incompatible types"); + return code; + } + } + +private: + std::string code; + Type type{}; +}; + +constexpr const char* GetTypeString(Type type) { + switch (type) { + case Type::Bool: + return "bool"; + case Type::Bool2: + return "bvec2"; + case Type::Float: + return "float"; + case Type::Int: + return "int"; + case Type::Uint: + return "uint"; + case Type::HalfFloat: + return "vec2"; + default: + UNREACHABLE_MSG("Invalid type"); + return "<invalid type>"; + } +} + /// Generates code to use for a swizzle operation. constexpr const char* GetSwizzle(u32 element) { - constexpr std::array<const char*, 4> swizzle = {".x", ".y", ".z", ".w"}; + constexpr std::array swizzle = {".x", ".y", ".z", ".w"}; return swizzle.at(element); } @@ -134,8 +282,8 @@ constexpr bool IsGenericAttribute(Attribute::Index index) { return index >= Attribute::Index::Attribute_0 && index <= Attribute::Index::Attribute_31; } -constexpr Attribute::Index ToGenericAttribute(u32 value) { - return static_cast<Attribute::Index>(value + static_cast<u32>(Attribute::Index::Attribute_0)); +constexpr Attribute::Index ToGenericAttribute(u64 value) { + return static_cast<Attribute::Index>(value + static_cast<u64>(Attribute::Index::Attribute_0)); } u32 GetGenericAttributeIndex(Attribute::Index index) { @@ -161,9 +309,13 @@ std::string FlowStackTopName(MetaStackClass stack) { return fmt::format("{}_flow_stack_top", GetFlowStackPrefix(stack)); } +constexpr bool IsVertexShader(ProgramType stage) { + return stage == ProgramType::VertexA || stage == ProgramType::VertexB; +} + class GLSLDecompiler final { public: - explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, ShaderStage stage, + explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, ProgramType stage, std::string suffix) : device{device}, ir{ir}, stage{stage}, suffix{suffix}, header{ir.GetHeader()} {} @@ -187,14 +339,16 @@ public: // VM's program counter const auto first_address = ir.GetBasicBlocks().begin()->first; - code.AddLine("uint jmp_to = {}u;", first_address); + code.AddLine("uint jmp_to = {}U;", first_address); // TODO(Subv): Figure out the actual depth of the flow stack, for now it seems // unlikely that shaders will use 20 nested SSYs and PBKs. - constexpr u32 FLOW_STACK_SIZE = 20; - for (const auto stack : std::array{MetaStackClass::Ssy, MetaStackClass::Pbk}) { - code.AddLine("uint {}[{}];", FlowStackName(stack), FLOW_STACK_SIZE); - code.AddLine("uint {} = 0u;", FlowStackTopName(stack)); + if (!ir.IsFlowStackDisabled()) { + constexpr u32 FLOW_STACK_SIZE = 20; + for (const auto stack : std::array{MetaStackClass::Ssy, MetaStackClass::Pbk}) { + code.AddLine("uint {}[{}];", FlowStackName(stack), FLOW_STACK_SIZE); + code.AddLine("uint {} = 0U;", FlowStackTopName(stack)); + } } code.AddLine("while (true) {{"); @@ -204,7 +358,7 @@ public: for (const auto& pair : ir.GetBasicBlocks()) { const auto [address, bb] = pair; - code.AddLine("case 0x{:x}u: {{", address); + code.AddLine("case 0x{:X}U: {{", address); ++code.scope; VisitBlock(bb); @@ -244,24 +398,22 @@ public: usage.is_read, usage.is_written); } entries.clip_distances = ir.GetClipDistances(); + entries.shader_viewport_layer_array = + IsVertexShader(stage) && (ir.UsesLayer() || ir.UsesViewportIndex()); entries.shader_length = ir.GetLength(); return entries; } private: - using OperationDecompilerFn = std::string (GLSLDecompiler::*)(Operation); - using OperationDecompilersArray = - std::array<OperationDecompilerFn, static_cast<std::size_t>(OperationCode::Amount)>; - void DeclareVertex() { - if (stage != ShaderStage::Vertex) + if (!IsVertexShader(stage)) return; DeclareVertexRedeclarations(); } void DeclareGeometry() { - if (stage != ShaderStage::Geometry) { + if (stage != ProgramType::Geometry) { return; } @@ -280,22 +432,35 @@ private: } void DeclareVertexRedeclarations() { - bool clip_distances_declared = false; - code.AddLine("out gl_PerVertex {{"); ++code.scope; code.AddLine("vec4 gl_Position;"); - for (const auto o : ir.GetOutputAttributes()) { - if (o == Attribute::Index::PointSize) - code.AddLine("float gl_PointSize;"); - if (!clip_distances_declared && (o == Attribute::Index::ClipDistances0123 || - o == Attribute::Index::ClipDistances4567)) { + for (const auto attribute : ir.GetOutputAttributes()) { + if (attribute == Attribute::Index::ClipDistances0123 || + attribute == Attribute::Index::ClipDistances4567) { code.AddLine("float gl_ClipDistance[];"); - clip_distances_declared = true; + break; } } + if (!IsVertexShader(stage) || device.HasVertexViewportLayer()) { + if (ir.UsesLayer()) { + code.AddLine("int gl_Layer;"); + } + if (ir.UsesViewportIndex()) { + code.AddLine("int gl_ViewportIndex;"); + } + } else if ((ir.UsesLayer() || ir.UsesViewportIndex()) && IsVertexShader(stage) && + !device.HasVertexViewportLayer()) { + LOG_ERROR( + Render_OpenGL, + "GL_ARB_shader_viewport_layer_array is not available and its required by a shader"); + } + + if (ir.UsesPointSize()) { + code.AddLine("float gl_PointSize;"); + } --code.scope; code.AddLine("}};"); @@ -305,7 +470,7 @@ private: void DeclareRegisters() { const auto& registers = ir.GetRegisters(); for (const u32 gpr : registers) { - code.AddLine("float {} = 0;", GetRegister(gpr)); + code.AddLine("float {} = 0.0f;", GetRegister(gpr)); } if (!registers.empty()) { code.AddNewLine(); @@ -323,11 +488,16 @@ private: } void DeclareLocalMemory() { - if (const u64 local_memory_size = header.GetLocalMemorySize(); local_memory_size > 0) { - const auto element_count = Common::AlignUp(local_memory_size, 4) / 4; - code.AddLine("float {}[{}];", GetLocalMemory(), element_count); - code.AddNewLine(); + // TODO(Rodrigo): Unstub kernel local memory size and pass it from a register at + // specialization time. + const u64 local_memory_size = + stage == ProgramType::Compute ? 0x400 : header.GetLocalMemorySize(); + if (local_memory_size == 0) { + return; } + const auto element_count = Common::AlignUp(local_memory_size, 4) / 4; + code.AddLine("uint {}[{}];", GetLocalMemory(), element_count); + code.AddNewLine(); } void DeclareInternalFlags() { @@ -349,8 +519,6 @@ private: return "noperspective "; default: case AttributeUse::Unused: - UNREACHABLE_MSG("Unused attribute being fetched"); - return {}; UNIMPLEMENTED_MSG("Unknown attribute usage index={}", static_cast<u32>(attribute)); return {}; } @@ -381,12 +549,12 @@ private: const u32 location{GetGenericAttributeIndex(index)}; std::string name{GetInputAttribute(index)}; - if (stage == ShaderStage::Geometry) { + if (stage == ProgramType::Geometry) { name = "gs_" + name + "[]"; } std::string suffix; - if (stage == ShaderStage::Fragment) { + if (stage == ProgramType::Fragment) { const auto input_mode{header.ps.GetAttributeUse(location)}; if (skip_unused && input_mode == AttributeUse::Unused) { return; @@ -398,7 +566,7 @@ private: } void DeclareOutputAttributes() { - if (ir.HasPhysicalAttributes() && stage != ShaderStage::Fragment) { + if (ir.HasPhysicalAttributes() && stage != ProgramType::Fragment) { for (u32 i = 0; i < GetNumPhysicalVaryings(); ++i) { DeclareOutputAttribute(ToGenericAttribute(i)); } @@ -427,7 +595,7 @@ private: const auto [index, size] = entry; code.AddLine("layout (std140, binding = CBUF_BINDING_{}) uniform {} {{", index, GetConstBufferBlock(index)); - code.AddLine(" vec4 {}[MAX_CONSTBUFFER_ELEMENTS];", GetConstBuffer(index)); + code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), MAX_CONSTBUFFER_ELEMENTS); code.AddLine("}};"); code.AddNewLine(); } @@ -448,7 +616,7 @@ private: code.AddLine("layout (std430, binding = GMEM_BINDING_{}_{}) {} buffer {} {{", base.cbuf_index, base.cbuf_offset, qualifier, GetGlobalMemoryBlock(base)); - code.AddLine(" float {}[];", GetGlobalMemory(base)); + code.AddLine(" uint {}[];", GetGlobalMemory(base)); code.AddLine("}};"); code.AddNewLine(); } @@ -506,7 +674,7 @@ private: if (!ir.HasPhysicalAttributes()) { return; } - code.AddLine("float readPhysicalAttribute(uint physical_address) {{"); + code.AddLine("float ReadPhysicalAttribute(uint physical_address) {{"); ++code.scope; code.AddLine("switch (physical_address) {{"); @@ -515,15 +683,16 @@ private: for (u32 index = 0; index < num_attributes; ++index) { const auto attribute{ToGenericAttribute(index)}; for (u32 element = 0; element < 4; ++element) { - constexpr u32 generic_base{0x80}; - constexpr u32 generic_stride{16}; - constexpr u32 element_stride{4}; + constexpr u32 generic_base = 0x80; + constexpr u32 generic_stride = 16; + constexpr u32 element_stride = 4; const u32 address{generic_base + index * generic_stride + element * element_stride}; - const bool declared{stage != ShaderStage::Fragment || - header.ps.GetAttributeUse(index) != AttributeUse::Unused}; - const std::string value{declared ? ReadAttribute(attribute, element) : "0"}; - code.AddLine("case 0x{:x}: return {};", address, value); + const bool declared = stage != ProgramType::Fragment || + header.ps.GetAttributeUse(index) != AttributeUse::Unused; + const std::string value = + declared ? ReadAttribute(attribute, element).AsFloat() : "0.0f"; + code.AddLine("case 0x{:X}U: return {};", address, value); } } @@ -543,7 +712,7 @@ private: case Tegra::Shader::ImageType::Texture1D: return "image1D"; case Tegra::Shader::ImageType::TextureBuffer: - return "bufferImage"; + return "imageBuffer"; case Tegra::Shader::ImageType::Texture1DArray: return "image1DArray"; case Tegra::Shader::ImageType::Texture2D: @@ -568,13 +737,11 @@ private: void VisitBlock(const NodeBlock& bb) { for (const auto& node : bb) { - if (const std::string expr = Visit(node); !expr.empty()) { - code.AddLine(expr); - } + Visit(node).CheckVoid(); } } - std::string Visit(const Node& node) { + Expression Visit(const Node& node) { if (const auto operation = std::get_if<OperationNode>(&*node)) { const auto operation_index = static_cast<std::size_t>(operation->GetCode()); if (operation_index >= operation_decompilers.size()) { @@ -592,18 +759,18 @@ private: if (const auto gpr = std::get_if<GprNode>(&*node)) { const u32 index = gpr->GetIndex(); if (index == Register::ZeroIndex) { - return "0"; + return {"0U", Type::Uint}; } - return GetRegister(index); + return {GetRegister(index), Type::Float}; } if (const auto immediate = std::get_if<ImmediateNode>(&*node)) { const u32 value = immediate->GetValue(); if (value < 10) { // For eyecandy avoid using hex numbers on single digits - return fmt::format("utof({}u)", immediate->GetValue()); + return {fmt::format("{}U", immediate->GetValue()), Type::Uint}; } - return fmt::format("utof(0x{:x}u)", immediate->GetValue()); + return {fmt::format("0x{:X}U", immediate->GetValue()), Type::Uint}; } if (const auto predicate = std::get_if<PredicateNode>(&*node)) { @@ -618,17 +785,18 @@ private: } }(); if (predicate->IsNegated()) { - return fmt::format("!({})", value); + return {fmt::format("!({})", value), Type::Bool}; } - return value; + return {value, Type::Bool}; } if (const auto abuf = std::get_if<AbufNode>(&*node)) { - UNIMPLEMENTED_IF_MSG(abuf->IsPhysicalBuffer() && stage == ShaderStage::Geometry, + UNIMPLEMENTED_IF_MSG(abuf->IsPhysicalBuffer() && stage == ProgramType::Geometry, "Physical attributes in geometry shaders are not implemented"); if (abuf->IsPhysicalBuffer()) { - return fmt::format("readPhysicalAttribute(ftou({}))", - Visit(abuf->GetPhysicalAddress())); + return {fmt::format("ReadPhysicalAttribute({})", + Visit(abuf->GetPhysicalAddress()).AsUint()), + Type::Float}; } return ReadAttribute(abuf->GetIndex(), abuf->GetElement(), abuf->GetBuffer()); } @@ -639,56 +807,64 @@ private: // Direct access const u32 offset_imm = immediate->GetValue(); ASSERT_MSG(offset_imm % 4 == 0, "Unaligned cbuf direct access"); - return fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()), - offset_imm / (4 * 4), (offset_imm / 4) % 4); + return {fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()), + offset_imm / (4 * 4), (offset_imm / 4) % 4), + Type::Uint}; } if (std::holds_alternative<OperationNode>(*offset)) { // Indirect access const std::string final_offset = code.GenerateTemporary(); - code.AddLine("uint {} = ftou({}) >> 2;", final_offset, Visit(offset)); + code.AddLine("uint {} = {} >> 2;", final_offset, Visit(offset).AsUint()); if (!device.HasComponentIndexingBug()) { - return fmt::format("{}[{} >> 2][{} & 3]", GetConstBuffer(cbuf->GetIndex()), - final_offset, final_offset); + return {fmt::format("{}[{} >> 2][{} & 3]", GetConstBuffer(cbuf->GetIndex()), + final_offset, final_offset), + Type::Uint}; } // AMD's proprietary GLSL compiler emits ill code for variable component access. // To bypass this driver bug generate 4 ifs, one per each component. const std::string pack = code.GenerateTemporary(); - code.AddLine("vec4 {} = {}[{} >> 2];", pack, GetConstBuffer(cbuf->GetIndex()), + code.AddLine("uvec4 {} = {}[{} >> 2];", pack, GetConstBuffer(cbuf->GetIndex()), final_offset); const std::string result = code.GenerateTemporary(); - code.AddLine("float {};", result); + code.AddLine("uint {};", result); for (u32 swizzle = 0; swizzle < 4; ++swizzle) { code.AddLine("if (({} & 3) == {}) {} = {}{};", final_offset, swizzle, result, pack, GetSwizzle(swizzle)); } - return result; + return {result, Type::Uint}; } UNREACHABLE_MSG("Unmanaged offset node type"); } if (const auto gmem = std::get_if<GmemNode>(&*node)) { - const std::string real = Visit(gmem->GetRealAddress()); - const std::string base = Visit(gmem->GetBaseAddress()); - const std::string final_offset = fmt::format("(ftou({}) - ftou({})) / 4", real, base); - return fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset); + const std::string real = Visit(gmem->GetRealAddress()).AsUint(); + const std::string base = Visit(gmem->GetBaseAddress()).AsUint(); + const std::string final_offset = fmt::format("({} - {}) >> 2", real, base); + return {fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset), + Type::Uint}; } if (const auto lmem = std::get_if<LmemNode>(&*node)) { - return fmt::format("{}[ftou({}) / 4]", GetLocalMemory(), Visit(lmem->GetAddress())); + if (stage == ProgramType::Compute) { + LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders"); + } + return { + fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()), + Type::Uint}; } if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) { - return GetInternalFlag(internal_flag->GetFlag()); + return {GetInternalFlag(internal_flag->GetFlag()), Type::Bool}; } if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { // It's invalid to call conditional on nested nodes, use an operation instead - code.AddLine("if ({}) {{", Visit(conditional->GetCondition())); + code.AddLine("if ({}) {{", Visit(conditional->GetCondition()).AsBool()); ++code.scope; VisitBlock(conditional->GetCode()); @@ -699,20 +875,21 @@ private: } if (const auto comment = std::get_if<CommentNode>(&*node)) { - return "// " + comment->GetText(); + code.AddLine("// " + comment->GetText()); + return {}; } UNREACHABLE(); return {}; } - std::string ReadAttribute(Attribute::Index attribute, u32 element, const Node& buffer = {}) { + Expression ReadAttribute(Attribute::Index attribute, u32 element, const Node& buffer = {}) { const auto GeometryPass = [&](std::string_view name) { - if (stage == ShaderStage::Geometry && buffer) { + if (stage == ProgramType::Geometry && buffer) { // TODO(Rodrigo): Guard geometry inputs against out of bound reads. Some games // set an 0x80000000 index for those and the shader fails to build. Find out why // this happens and what's its intent. - return fmt::format("gs_{}[ftou({}) % MAX_VERTEX_INPUT]", name, Visit(buffer)); + return fmt::format("gs_{}[{} % MAX_VERTEX_INPUT]", name, Visit(buffer).AsUint()); } return std::string(name); }; @@ -720,72 +897,79 @@ private: switch (attribute) { case Attribute::Index::Position: switch (stage) { - case ShaderStage::Geometry: - return fmt::format("gl_in[ftou({})].gl_Position{}", Visit(buffer), - GetSwizzle(element)); - case ShaderStage::Fragment: - return element == 3 ? "1.0f" : ("gl_FragCoord"s + GetSwizzle(element)); + case ProgramType::Geometry: + return {fmt::format("gl_in[{}].gl_Position{}", Visit(buffer).AsUint(), + GetSwizzle(element)), + Type::Float}; + case ProgramType::Fragment: + return {element == 3 ? "1.0f" : ("gl_FragCoord"s + GetSwizzle(element)), + Type::Float}; default: UNREACHABLE(); } case Attribute::Index::PointCoord: switch (element) { case 0: - return "gl_PointCoord.x"; + return {"gl_PointCoord.x", Type::Float}; case 1: - return "gl_PointCoord.y"; + return {"gl_PointCoord.y", Type::Float}; case 2: case 3: - return "0"; + return {"0.0f", Type::Float}; } UNREACHABLE(); - return "0"; + return {"0", Type::Int}; case Attribute::Index::TessCoordInstanceIDVertexID: // TODO(Subv): Find out what the values are for the first two elements when inside a // vertex shader, and what's the value of the fourth element when inside a Tess Eval // shader. - ASSERT(stage == ShaderStage::Vertex); + ASSERT(IsVertexShader(stage)); switch (element) { case 2: // Config pack's first value is instance_id. - return "uintBitsToFloat(config_pack[0])"; + return {"config_pack[0]", Type::Uint}; case 3: - return "uintBitsToFloat(gl_VertexID)"; + return {"gl_VertexID", Type::Int}; } UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); - return "0"; + return {"0", Type::Int}; case Attribute::Index::FrontFacing: // TODO(Subv): Find out what the values are for the other elements. - ASSERT(stage == ShaderStage::Fragment); + ASSERT(stage == ProgramType::Fragment); switch (element) { case 3: - return "itof(gl_FrontFacing ? -1 : 0)"; + return {"(gl_FrontFacing ? -1 : 0)", Type::Int}; } UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); - return "0"; + return {"0", Type::Int}; default: if (IsGenericAttribute(attribute)) { - return GeometryPass(GetInputAttribute(attribute)) + GetSwizzle(element); + return {GeometryPass(GetInputAttribute(attribute)) + GetSwizzle(element), + Type::Float}; } break; } UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute)); - return "0"; + return {"0", Type::Int}; } - std::string ApplyPrecise(Operation operation, const std::string& value) { + Expression ApplyPrecise(Operation operation, std::string value, Type type) { if (!IsPrecise(operation)) { - return value; + return {std::move(value), type}; } - // There's a bug in NVidia's proprietary drivers that makes precise fail on fragment shaders - const std::string precise = stage != ShaderStage::Fragment ? "precise " : ""; + // Old Nvidia drivers have a bug with precise and texture sampling. These are more likely to + // be found in fragment shaders, so we disable precise there. There are vertex shaders that + // also fail to build but nobody seems to care about those. + // Note: Only bugged drivers will skip precise. + const bool disable_precise = device.HasPreciseBug() && stage == ProgramType::Fragment; - const std::string temporary = code.GenerateTemporary(); - code.AddLine("{}float {} = {};", precise, temporary, value); - return temporary; + std::string temporary = code.GenerateTemporary(); + code.AddLine("{}{} {} = {};", disable_precise ? "" : "precise ", GetTypeString(type), + temporary, value); + return {std::move(temporary), type}; } - std::string VisitOperand(Operation operation, std::size_t operand_index) { + Expression VisitOperand(Operation operation, std::size_t operand_index) { const auto& operand = operation[operand_index]; const bool parent_precise = IsPrecise(operation); const bool child_precise = IsPrecise(operand); @@ -794,102 +978,98 @@ private: return Visit(operand); } - const std::string temporary = code.GenerateTemporary(); - code.AddLine("float {} = {};", temporary, Visit(operand)); - return temporary; - } - - std::string VisitOperand(Operation operation, std::size_t operand_index, Type type) { - return CastOperand(VisitOperand(operation, operand_index), type); - } - - std::string CastOperand(const std::string& value, Type type) const { - switch (type) { - case Type::Bool: - case Type::Bool2: - case Type::Float: - return value; - case Type::Int: - return fmt::format("ftoi({})", value); - case Type::Uint: - return fmt::format("ftou({})", value); - case Type::HalfFloat: - return fmt::format("toHalf2({})", value); - } - UNREACHABLE(); - return value; + Expression value = Visit(operand); + std::string temporary = code.GenerateTemporary(); + code.AddLine("{} {} = {};", GetTypeString(value.GetType()), temporary, value.GetCode()); + return {std::move(temporary), value.GetType()}; } - std::string BitwiseCastResult(const std::string& value, Type type, - bool needs_parenthesis = false) { - switch (type) { - case Type::Bool: - case Type::Bool2: - case Type::Float: - if (needs_parenthesis) { - return fmt::format("({})", value); + Expression GetOutputAttribute(const AbufNode* abuf) { + switch (const auto attribute = abuf->GetIndex()) { + case Attribute::Index::Position: + return {"gl_Position"s + GetSwizzle(abuf->GetElement()), Type::Float}; + case Attribute::Index::LayerViewportPointSize: + switch (abuf->GetElement()) { + case 0: + UNIMPLEMENTED(); + return {}; + case 1: + if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { + return {}; + } + return {"gl_Layer", Type::Int}; + case 2: + if (IsVertexShader(stage) && !device.HasVertexViewportLayer()) { + return {}; + } + return {"gl_ViewportIndex", Type::Int}; + case 3: + UNIMPLEMENTED_MSG("Requires some state changes for gl_PointSize to work in shader"); + return {"gl_PointSize", Type::Float}; } - return value; - case Type::Int: - return fmt::format("itof({})", value); - case Type::Uint: - return fmt::format("utof({})", value); - case Type::HalfFloat: - return fmt::format("fromHalf2({})", value); + return {}; + case Attribute::Index::ClipDistances0123: + return {fmt::format("gl_ClipDistance[{}]", abuf->GetElement()), Type::Float}; + case Attribute::Index::ClipDistances4567: + return {fmt::format("gl_ClipDistance[{}]", abuf->GetElement() + 4), Type::Float}; + default: + if (IsGenericAttribute(attribute)) { + return {GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement()), + Type::Float}; + } + UNIMPLEMENTED_MSG("Unhandled output attribute: {}", static_cast<u32>(attribute)); + return {}; } - UNREACHABLE(); - return value; } - std::string GenerateUnary(Operation operation, const std::string& func, Type result_type, - Type type_a, bool needs_parenthesis = true) { - const std::string op_str = fmt::format("{}({})", func, VisitOperand(operation, 0, type_a)); - - return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type, needs_parenthesis)); + Expression GenerateUnary(Operation operation, std::string_view func, Type result_type, + Type type_a) { + std::string op_str = fmt::format("{}({})", func, VisitOperand(operation, 0).As(type_a)); + return ApplyPrecise(operation, std::move(op_str), result_type); } - std::string GenerateBinaryInfix(Operation operation, const std::string& func, Type result_type, - Type type_a, Type type_b) { - const std::string op_a = VisitOperand(operation, 0, type_a); - const std::string op_b = VisitOperand(operation, 1, type_b); - const std::string op_str = fmt::format("({} {} {})", op_a, func, op_b); + Expression GenerateBinaryInfix(Operation operation, std::string_view func, Type result_type, + Type type_a, Type type_b) { + const std::string op_a = VisitOperand(operation, 0).As(type_a); + const std::string op_b = VisitOperand(operation, 1).As(type_b); + std::string op_str = fmt::format("({} {} {})", op_a, func, op_b); - return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type)); + return ApplyPrecise(operation, std::move(op_str), result_type); } - std::string GenerateBinaryCall(Operation operation, const std::string& func, Type result_type, - Type type_a, Type type_b) { - const std::string op_a = VisitOperand(operation, 0, type_a); - const std::string op_b = VisitOperand(operation, 1, type_b); - const std::string op_str = fmt::format("{}({}, {})", func, op_a, op_b); + Expression GenerateBinaryCall(Operation operation, std::string_view func, Type result_type, + Type type_a, Type type_b) { + const std::string op_a = VisitOperand(operation, 0).As(type_a); + const std::string op_b = VisitOperand(operation, 1).As(type_b); + std::string op_str = fmt::format("{}({}, {})", func, op_a, op_b); - return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type)); + return ApplyPrecise(operation, std::move(op_str), result_type); } - std::string GenerateTernary(Operation operation, const std::string& func, Type result_type, - Type type_a, Type type_b, Type type_c) { - const std::string op_a = VisitOperand(operation, 0, type_a); - const std::string op_b = VisitOperand(operation, 1, type_b); - const std::string op_c = VisitOperand(operation, 2, type_c); - const std::string op_str = fmt::format("{}({}, {}, {})", func, op_a, op_b, op_c); + Expression GenerateTernary(Operation operation, std::string_view func, Type result_type, + Type type_a, Type type_b, Type type_c) { + const std::string op_a = VisitOperand(operation, 0).As(type_a); + const std::string op_b = VisitOperand(operation, 1).As(type_b); + const std::string op_c = VisitOperand(operation, 2).As(type_c); + std::string op_str = fmt::format("{}({}, {}, {})", func, op_a, op_b, op_c); - return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type)); + return ApplyPrecise(operation, std::move(op_str), result_type); } - std::string GenerateQuaternary(Operation operation, const std::string& func, Type result_type, - Type type_a, Type type_b, Type type_c, Type type_d) { - const std::string op_a = VisitOperand(operation, 0, type_a); - const std::string op_b = VisitOperand(operation, 1, type_b); - const std::string op_c = VisitOperand(operation, 2, type_c); - const std::string op_d = VisitOperand(operation, 3, type_d); - const std::string op_str = fmt::format("{}({}, {}, {}, {})", func, op_a, op_b, op_c, op_d); + Expression GenerateQuaternary(Operation operation, const std::string& func, Type result_type, + Type type_a, Type type_b, Type type_c, Type type_d) { + const std::string op_a = VisitOperand(operation, 0).As(type_a); + const std::string op_b = VisitOperand(operation, 1).As(type_b); + const std::string op_c = VisitOperand(operation, 2).As(type_c); + const std::string op_d = VisitOperand(operation, 3).As(type_d); + std::string op_str = fmt::format("{}({}, {}, {}, {})", func, op_a, op_b, op_c, op_d); - return ApplyPrecise(operation, BitwiseCastResult(op_str, result_type)); + return ApplyPrecise(operation, std::move(op_str), result_type); } std::string GenerateTexture(Operation operation, const std::string& function_suffix, const std::vector<TextureIR>& extras) { - constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"}; + constexpr std::array coord_constructors = {"float", "vec2", "vec3", "vec4"}; const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); @@ -906,17 +1086,17 @@ private: expr += coord_constructors.at(count + (has_array ? 1 : 0) + (has_shadow ? 1 : 0) - 1); expr += '('; for (std::size_t i = 0; i < count; ++i) { - expr += Visit(operation[i]); + expr += Visit(operation[i]).AsFloat(); const std::size_t next = i + 1; if (next < count) expr += ", "; } if (has_array) { - expr += ", float(ftoi(" + Visit(meta->array) + "))"; + expr += ", float(" + Visit(meta->array).AsInt() + ')'; } if (has_shadow) { - expr += ", " + Visit(meta->depth_compare); + expr += ", " + Visit(meta->depth_compare).AsFloat(); } expr += ')'; @@ -947,11 +1127,11 @@ private: // required to be constant) expr += std::to_string(static_cast<s32>(immediate->GetValue())); } else { - expr += fmt::format("ftoi({})", Visit(operand)); + expr += Visit(operand).AsInt(); } break; case Type::Float: - expr += Visit(operand); + expr += Visit(operand).AsFloat(); break; default: { const auto type_int = static_cast<u32>(type); @@ -967,7 +1147,7 @@ private: if (aoffi.empty()) { return {}; } - constexpr std::array<const char*, 3> coord_constructors = {"int", "ivec2", "ivec3"}; + constexpr std::array coord_constructors = {"int", "ivec2", "ivec3"}; std::string expr = ", "; expr += coord_constructors.at(aoffi.size() - 1); expr += '('; @@ -980,7 +1160,7 @@ private: expr += std::to_string(static_cast<s32>(immediate->GetValue())); } else if (device.HasVariableAoffi()) { // Avoid using variable AOFFI on unsupported devices. - expr += fmt::format("ftoi({})", Visit(operand)); + expr += Visit(operand).AsInt(); } else { // Insert 0 on devices not supporting variable AOFFI. expr += '0'; @@ -994,318 +1174,314 @@ private: return expr; } - std::string Assign(Operation operation) { + Expression Assign(Operation operation) { const Node& dest = operation[0]; const Node& src = operation[1]; - std::string target; + Expression target; if (const auto gpr = std::get_if<GprNode>(&*dest)) { if (gpr->GetIndex() == Register::ZeroIndex) { // Writing to Register::ZeroIndex is a no op return {}; } - target = GetRegister(gpr->GetIndex()); + target = {GetRegister(gpr->GetIndex()), Type::Float}; } else if (const auto abuf = std::get_if<AbufNode>(&*dest)) { UNIMPLEMENTED_IF(abuf->IsPhysicalBuffer()); - - target = [&]() -> std::string { - switch (const auto attribute = abuf->GetIndex(); abuf->GetIndex()) { - case Attribute::Index::Position: - return "gl_Position"s + GetSwizzle(abuf->GetElement()); - case Attribute::Index::PointSize: - return "gl_PointSize"; - case Attribute::Index::ClipDistances0123: - return fmt::format("gl_ClipDistance[{}]", abuf->GetElement()); - case Attribute::Index::ClipDistances4567: - return fmt::format("gl_ClipDistance[{}]", abuf->GetElement() + 4); - default: - if (IsGenericAttribute(attribute)) { - return GetOutputAttribute(attribute) + GetSwizzle(abuf->GetElement()); - } - UNIMPLEMENTED_MSG("Unhandled output attribute: {}", - static_cast<u32>(attribute)); - return "0"; - } - }(); + target = GetOutputAttribute(abuf); } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) { - target = fmt::format("{}[ftou({}) / 4]", GetLocalMemory(), Visit(lmem->GetAddress())); + if (stage == ProgramType::Compute) { + LOG_WARNING(Render_OpenGL, "Local memory is stubbed on compute shaders"); + } + target = { + fmt::format("{}[{} >> 2]", GetLocalMemory(), Visit(lmem->GetAddress()).AsUint()), + Type::Uint}; } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) { - const std::string real = Visit(gmem->GetRealAddress()); - const std::string base = Visit(gmem->GetBaseAddress()); - const std::string final_offset = fmt::format("(ftou({}) - ftou({})) / 4", real, base); - target = fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset); + const std::string real = Visit(gmem->GetRealAddress()).AsUint(); + const std::string base = Visit(gmem->GetBaseAddress()).AsUint(); + const std::string final_offset = fmt::format("({} - {}) >> 2", real, base); + target = {fmt::format("{}[{}]", GetGlobalMemory(gmem->GetDescriptor()), final_offset), + Type::Uint}; } else { UNREACHABLE_MSG("Assign called without a proper target"); } - code.AddLine("{} = {};", target, Visit(src)); + code.AddLine("{} = {};", target.GetCode(), Visit(src).As(target.GetType())); return {}; } template <Type type> - std::string Add(Operation operation) { + Expression Add(Operation operation) { return GenerateBinaryInfix(operation, "+", type, type, type); } template <Type type> - std::string Mul(Operation operation) { + Expression Mul(Operation operation) { return GenerateBinaryInfix(operation, "*", type, type, type); } template <Type type> - std::string Div(Operation operation) { + Expression Div(Operation operation) { return GenerateBinaryInfix(operation, "/", type, type, type); } template <Type type> - std::string Fma(Operation operation) { + Expression Fma(Operation operation) { return GenerateTernary(operation, "fma", type, type, type, type); } template <Type type> - std::string Negate(Operation operation) { - return GenerateUnary(operation, "-", type, type, true); + Expression Negate(Operation operation) { + return GenerateUnary(operation, "-", type, type); } template <Type type> - std::string Absolute(Operation operation) { - return GenerateUnary(operation, "abs", type, type, false); + Expression Absolute(Operation operation) { + return GenerateUnary(operation, "abs", type, type); } - std::string FClamp(Operation operation) { + Expression FClamp(Operation operation) { return GenerateTernary(operation, "clamp", Type::Float, Type::Float, Type::Float, Type::Float); } + Expression FCastHalf0(Operation operation) { + return {fmt::format("({})[0]", VisitOperand(operation, 0).AsHalfFloat()), Type::Float}; + } + + Expression FCastHalf1(Operation operation) { + return {fmt::format("({})[1]", VisitOperand(operation, 0).AsHalfFloat()), Type::Float}; + } + template <Type type> - std::string Min(Operation operation) { + Expression Min(Operation operation) { return GenerateBinaryCall(operation, "min", type, type, type); } template <Type type> - std::string Max(Operation operation) { + Expression Max(Operation operation) { return GenerateBinaryCall(operation, "max", type, type, type); } - std::string Select(Operation operation) { - const std::string condition = Visit(operation[0]); - const std::string true_case = Visit(operation[1]); - const std::string false_case = Visit(operation[2]); - const std::string op_str = fmt::format("({} ? {} : {})", condition, true_case, false_case); + Expression Select(Operation operation) { + const std::string condition = Visit(operation[0]).AsBool(); + const std::string true_case = Visit(operation[1]).AsUint(); + const std::string false_case = Visit(operation[2]).AsUint(); + std::string op_str = fmt::format("({} ? {} : {})", condition, true_case, false_case); - return ApplyPrecise(operation, op_str); + return ApplyPrecise(operation, std::move(op_str), Type::Uint); } - std::string FCos(Operation operation) { - return GenerateUnary(operation, "cos", Type::Float, Type::Float, false); + Expression FCos(Operation operation) { + return GenerateUnary(operation, "cos", Type::Float, Type::Float); } - std::string FSin(Operation operation) { - return GenerateUnary(operation, "sin", Type::Float, Type::Float, false); + Expression FSin(Operation operation) { + return GenerateUnary(operation, "sin", Type::Float, Type::Float); } - std::string FExp2(Operation operation) { - return GenerateUnary(operation, "exp2", Type::Float, Type::Float, false); + Expression FExp2(Operation operation) { + return GenerateUnary(operation, "exp2", Type::Float, Type::Float); } - std::string FLog2(Operation operation) { - return GenerateUnary(operation, "log2", Type::Float, Type::Float, false); + Expression FLog2(Operation operation) { + return GenerateUnary(operation, "log2", Type::Float, Type::Float); } - std::string FInverseSqrt(Operation operation) { - return GenerateUnary(operation, "inversesqrt", Type::Float, Type::Float, false); + Expression FInverseSqrt(Operation operation) { + return GenerateUnary(operation, "inversesqrt", Type::Float, Type::Float); } - std::string FSqrt(Operation operation) { - return GenerateUnary(operation, "sqrt", Type::Float, Type::Float, false); + Expression FSqrt(Operation operation) { + return GenerateUnary(operation, "sqrt", Type::Float, Type::Float); } - std::string FRoundEven(Operation operation) { - return GenerateUnary(operation, "roundEven", Type::Float, Type::Float, false); + Expression FRoundEven(Operation operation) { + return GenerateUnary(operation, "roundEven", Type::Float, Type::Float); } - std::string FFloor(Operation operation) { - return GenerateUnary(operation, "floor", Type::Float, Type::Float, false); + Expression FFloor(Operation operation) { + return GenerateUnary(operation, "floor", Type::Float, Type::Float); } - std::string FCeil(Operation operation) { - return GenerateUnary(operation, "ceil", Type::Float, Type::Float, false); + Expression FCeil(Operation operation) { + return GenerateUnary(operation, "ceil", Type::Float, Type::Float); } - std::string FTrunc(Operation operation) { - return GenerateUnary(operation, "trunc", Type::Float, Type::Float, false); + Expression FTrunc(Operation operation) { + return GenerateUnary(operation, "trunc", Type::Float, Type::Float); } template <Type type> - std::string FCastInteger(Operation operation) { - return GenerateUnary(operation, "float", Type::Float, type, false); + Expression FCastInteger(Operation operation) { + return GenerateUnary(operation, "float", Type::Float, type); } - std::string ICastFloat(Operation operation) { - return GenerateUnary(operation, "int", Type::Int, Type::Float, false); + Expression ICastFloat(Operation operation) { + return GenerateUnary(operation, "int", Type::Int, Type::Float); } - std::string ICastUnsigned(Operation operation) { - return GenerateUnary(operation, "int", Type::Int, Type::Uint, false); + Expression ICastUnsigned(Operation operation) { + return GenerateUnary(operation, "int", Type::Int, Type::Uint); } template <Type type> - std::string LogicalShiftLeft(Operation operation) { + Expression LogicalShiftLeft(Operation operation) { return GenerateBinaryInfix(operation, "<<", type, type, Type::Uint); } - std::string ILogicalShiftRight(Operation operation) { - const std::string op_a = VisitOperand(operation, 0, Type::Uint); - const std::string op_b = VisitOperand(operation, 1, Type::Uint); - const std::string op_str = fmt::format("int({} >> {})", op_a, op_b); + Expression ILogicalShiftRight(Operation operation) { + const std::string op_a = VisitOperand(operation, 0).AsUint(); + const std::string op_b = VisitOperand(operation, 1).AsUint(); + std::string op_str = fmt::format("int({} >> {})", op_a, op_b); - return ApplyPrecise(operation, BitwiseCastResult(op_str, Type::Int)); + return ApplyPrecise(operation, std::move(op_str), Type::Int); } - std::string IArithmeticShiftRight(Operation operation) { + Expression IArithmeticShiftRight(Operation operation) { return GenerateBinaryInfix(operation, ">>", Type::Int, Type::Int, Type::Uint); } template <Type type> - std::string BitwiseAnd(Operation operation) { + Expression BitwiseAnd(Operation operation) { return GenerateBinaryInfix(operation, "&", type, type, type); } template <Type type> - std::string BitwiseOr(Operation operation) { + Expression BitwiseOr(Operation operation) { return GenerateBinaryInfix(operation, "|", type, type, type); } template <Type type> - std::string BitwiseXor(Operation operation) { + Expression BitwiseXor(Operation operation) { return GenerateBinaryInfix(operation, "^", type, type, type); } template <Type type> - std::string BitwiseNot(Operation operation) { - return GenerateUnary(operation, "~", type, type, false); + Expression BitwiseNot(Operation operation) { + return GenerateUnary(operation, "~", type, type); } - std::string UCastFloat(Operation operation) { - return GenerateUnary(operation, "uint", Type::Uint, Type::Float, false); + Expression UCastFloat(Operation operation) { + return GenerateUnary(operation, "uint", Type::Uint, Type::Float); } - std::string UCastSigned(Operation operation) { - return GenerateUnary(operation, "uint", Type::Uint, Type::Int, false); + Expression UCastSigned(Operation operation) { + return GenerateUnary(operation, "uint", Type::Uint, Type::Int); } - std::string UShiftRight(Operation operation) { + Expression UShiftRight(Operation operation) { return GenerateBinaryInfix(operation, ">>", Type::Uint, Type::Uint, Type::Uint); } template <Type type> - std::string BitfieldInsert(Operation operation) { + Expression BitfieldInsert(Operation operation) { return GenerateQuaternary(operation, "bitfieldInsert", type, type, type, Type::Int, Type::Int); } template <Type type> - std::string BitfieldExtract(Operation operation) { + Expression BitfieldExtract(Operation operation) { return GenerateTernary(operation, "bitfieldExtract", type, type, Type::Int, Type::Int); } template <Type type> - std::string BitCount(Operation operation) { - return GenerateUnary(operation, "bitCount", type, type, false); + Expression BitCount(Operation operation) { + return GenerateUnary(operation, "bitCount", type, type); } - std::string HNegate(Operation operation) { + Expression HNegate(Operation operation) { const auto GetNegate = [&](std::size_t index) { - return VisitOperand(operation, index, Type::Bool) + " ? -1 : 1"; + return VisitOperand(operation, index).AsBool() + " ? -1 : 1"; }; - const std::string value = - fmt::format("({} * vec2({}, {}))", VisitOperand(operation, 0, Type::HalfFloat), - GetNegate(1), GetNegate(2)); - return BitwiseCastResult(value, Type::HalfFloat); - } - - std::string HClamp(Operation operation) { - const std::string value = VisitOperand(operation, 0, Type::HalfFloat); - const std::string min = VisitOperand(operation, 1, Type::Float); - const std::string max = VisitOperand(operation, 2, Type::Float); - const std::string clamped = fmt::format("clamp({}, vec2({}), vec2({}))", value, min, max); - - return ApplyPrecise(operation, BitwiseCastResult(clamped, Type::HalfFloat)); - } - - std::string HUnpack(Operation operation) { - const std::string operand{VisitOperand(operation, 0, Type::HalfFloat)}; - const auto value = [&]() -> std::string { - switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) { - case Tegra::Shader::HalfType::H0_H1: - return operand; - case Tegra::Shader::HalfType::F32: - return fmt::format("vec2(fromHalf2({}))", operand); - case Tegra::Shader::HalfType::H0_H0: - return fmt::format("vec2({}[0])", operand); - case Tegra::Shader::HalfType::H1_H1: - return fmt::format("vec2({}[1])", operand); - } - UNREACHABLE(); - return "0"; - }(); - return fmt::format("fromHalf2({})", value); + return {fmt::format("({} * vec2({}, {}))", VisitOperand(operation, 0).AsHalfFloat(), + GetNegate(1), GetNegate(2)), + Type::HalfFloat}; + } + + Expression HClamp(Operation operation) { + const std::string value = VisitOperand(operation, 0).AsHalfFloat(); + const std::string min = VisitOperand(operation, 1).AsFloat(); + const std::string max = VisitOperand(operation, 2).AsFloat(); + std::string clamped = fmt::format("clamp({}, vec2({}), vec2({}))", value, min, max); + + return ApplyPrecise(operation, std::move(clamped), Type::HalfFloat); + } + + Expression HCastFloat(Operation operation) { + return {fmt::format("vec2({})", VisitOperand(operation, 0).AsFloat()), Type::HalfFloat}; + } + + Expression HUnpack(Operation operation) { + Expression operand = VisitOperand(operation, 0); + switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) { + case Tegra::Shader::HalfType::H0_H1: + return operand; + case Tegra::Shader::HalfType::F32: + return {fmt::format("vec2({})", operand.AsFloat()), Type::HalfFloat}; + case Tegra::Shader::HalfType::H0_H0: + return {fmt::format("vec2({}[0])", operand.AsHalfFloat()), Type::HalfFloat}; + case Tegra::Shader::HalfType::H1_H1: + return {fmt::format("vec2({}[1])", operand.AsHalfFloat()), Type::HalfFloat}; + } } - std::string HMergeF32(Operation operation) { - return fmt::format("float(toHalf2({})[0])", Visit(operation[0])); + Expression HMergeF32(Operation operation) { + return {fmt::format("float({}[0])", VisitOperand(operation, 0).AsHalfFloat()), Type::Float}; } - std::string HMergeH0(Operation operation) { - return fmt::format("fromHalf2(vec2(toHalf2({})[0], toHalf2({})[1]))", Visit(operation[1]), - Visit(operation[0])); + Expression HMergeH0(Operation operation) { + std::string dest = VisitOperand(operation, 0).AsUint(); + std::string src = VisitOperand(operation, 1).AsUint(); + return {fmt::format("(({} & 0x0000FFFFU) | ({} & 0xFFFF0000U))", src, dest), Type::Uint}; } - std::string HMergeH1(Operation operation) { - return fmt::format("fromHalf2(vec2(toHalf2({})[0], toHalf2({})[1]))", Visit(operation[0]), - Visit(operation[1])); + Expression HMergeH1(Operation operation) { + std::string dest = VisitOperand(operation, 0).AsUint(); + std::string src = VisitOperand(operation, 1).AsUint(); + return {fmt::format("(({} & 0x0000FFFFU) | ({} & 0xFFFF0000U))", dest, src), Type::Uint}; } - std::string HPack2(Operation operation) { - return fmt::format("utof(packHalf2x16(vec2({}, {})))", Visit(operation[0]), - Visit(operation[1])); + Expression HPack2(Operation operation) { + return {fmt::format("vec2({}, {})", VisitOperand(operation, 0).AsFloat(), + VisitOperand(operation, 1).AsFloat()), + Type::HalfFloat}; } template <Type type> - std::string LogicalLessThan(Operation operation) { + Expression LogicalLessThan(Operation operation) { return GenerateBinaryInfix(operation, "<", Type::Bool, type, type); } template <Type type> - std::string LogicalEqual(Operation operation) { + Expression LogicalEqual(Operation operation) { return GenerateBinaryInfix(operation, "==", Type::Bool, type, type); } template <Type type> - std::string LogicalLessEqual(Operation operation) { + Expression LogicalLessEqual(Operation operation) { return GenerateBinaryInfix(operation, "<=", Type::Bool, type, type); } template <Type type> - std::string LogicalGreaterThan(Operation operation) { + Expression LogicalGreaterThan(Operation operation) { return GenerateBinaryInfix(operation, ">", Type::Bool, type, type); } template <Type type> - std::string LogicalNotEqual(Operation operation) { + Expression LogicalNotEqual(Operation operation) { return GenerateBinaryInfix(operation, "!=", Type::Bool, type, type); } template <Type type> - std::string LogicalGreaterEqual(Operation operation) { + Expression LogicalGreaterEqual(Operation operation) { return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type); } - std::string LogicalFIsNan(Operation operation) { - return GenerateUnary(operation, "isnan", Type::Bool, Type::Float, false); + Expression LogicalFIsNan(Operation operation) { + return GenerateUnary(operation, "isnan", Type::Bool, Type::Float); } - std::string LogicalAssign(Operation operation) { + Expression LogicalAssign(Operation operation) { const Node& dest = operation[0]; const Node& src = operation[1]; @@ -1326,82 +1502,80 @@ private: target = GetInternalFlag(flag->GetFlag()); } - code.AddLine("{} = {};", target, Visit(src)); + code.AddLine("{} = {};", target, Visit(src).AsBool()); return {}; } - std::string LogicalAnd(Operation operation) { + Expression LogicalAnd(Operation operation) { return GenerateBinaryInfix(operation, "&&", Type::Bool, Type::Bool, Type::Bool); } - std::string LogicalOr(Operation operation) { + Expression LogicalOr(Operation operation) { return GenerateBinaryInfix(operation, "||", Type::Bool, Type::Bool, Type::Bool); } - std::string LogicalXor(Operation operation) { + Expression LogicalXor(Operation operation) { return GenerateBinaryInfix(operation, "^^", Type::Bool, Type::Bool, Type::Bool); } - std::string LogicalNegate(Operation operation) { - return GenerateUnary(operation, "!", Type::Bool, Type::Bool, false); + Expression LogicalNegate(Operation operation) { + return GenerateUnary(operation, "!", Type::Bool, Type::Bool); } - std::string LogicalPick2(Operation operation) { - const std::string pair = VisitOperand(operation, 0, Type::Bool2); - return fmt::format("{}[{}]", pair, VisitOperand(operation, 1, Type::Uint)); + Expression LogicalPick2(Operation operation) { + return {fmt::format("{}[{}]", VisitOperand(operation, 0).AsBool2(), + VisitOperand(operation, 1).AsUint()), + Type::Bool}; } - std::string LogicalAll2(Operation operation) { + Expression LogicalAnd2(Operation operation) { return GenerateUnary(operation, "all", Type::Bool, Type::Bool2); } - std::string LogicalAny2(Operation operation) { - return GenerateUnary(operation, "any", Type::Bool, Type::Bool2); - } - template <bool with_nan> - std::string GenerateHalfComparison(Operation operation, const std::string& compare_op) { - const std::string comparison{GenerateBinaryCall(operation, compare_op, Type::Bool2, - Type::HalfFloat, Type::HalfFloat)}; + Expression GenerateHalfComparison(Operation operation, std::string_view compare_op) { + Expression comparison = GenerateBinaryCall(operation, compare_op, Type::Bool2, + Type::HalfFloat, Type::HalfFloat); if constexpr (!with_nan) { return comparison; } - return fmt::format("halfFloatNanComparison({}, {}, {})", comparison, - VisitOperand(operation, 0, Type::HalfFloat), - VisitOperand(operation, 1, Type::HalfFloat)); + return {fmt::format("HalfFloatNanComparison({}, {}, {})", comparison.AsBool2(), + VisitOperand(operation, 0).AsHalfFloat(), + VisitOperand(operation, 1).AsHalfFloat()), + Type::Bool2}; } template <bool with_nan> - std::string Logical2HLessThan(Operation operation) { + Expression Logical2HLessThan(Operation operation) { return GenerateHalfComparison<with_nan>(operation, "lessThan"); } template <bool with_nan> - std::string Logical2HEqual(Operation operation) { + Expression Logical2HEqual(Operation operation) { return GenerateHalfComparison<with_nan>(operation, "equal"); } template <bool with_nan> - std::string Logical2HLessEqual(Operation operation) { + Expression Logical2HLessEqual(Operation operation) { return GenerateHalfComparison<with_nan>(operation, "lessThanEqual"); } template <bool with_nan> - std::string Logical2HGreaterThan(Operation operation) { + Expression Logical2HGreaterThan(Operation operation) { return GenerateHalfComparison<with_nan>(operation, "greaterThan"); } template <bool with_nan> - std::string Logical2HNotEqual(Operation operation) { + Expression Logical2HNotEqual(Operation operation) { return GenerateHalfComparison<with_nan>(operation, "notEqual"); } template <bool with_nan> - std::string Logical2HGreaterEqual(Operation operation) { + Expression Logical2HGreaterEqual(Operation operation) { return GenerateHalfComparison<with_nan>(operation, "greaterThanEqual"); } - std::string Texture(Operation operation) { + Expression Texture(Operation operation) { const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); @@ -1410,10 +1584,10 @@ private: if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } - return expr + GetSwizzle(meta->element); + return {expr + GetSwizzle(meta->element), Type::Float}; } - std::string TextureLod(Operation operation) { + Expression TextureLod(Operation operation) { const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); @@ -1422,54 +1596,54 @@ private: if (meta->sampler.IsShadow()) { expr = "vec4(" + expr + ')'; } - return expr + GetSwizzle(meta->element); + return {expr + GetSwizzle(meta->element), Type::Float}; } - std::string TextureGather(Operation operation) { + Expression TextureGather(Operation operation) { const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int; - return GenerateTexture(operation, "Gather", - {TextureArgument{type, meta->component}, TextureAoffi{}}) + - GetSwizzle(meta->element); + return {GenerateTexture(operation, "Gather", + {TextureArgument{type, meta->component}, TextureAoffi{}}) + + GetSwizzle(meta->element), + Type::Float}; } - std::string TextureQueryDimensions(Operation operation) { + Expression TextureQueryDimensions(Operation operation) { const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); const std::string sampler = GetSampler(meta->sampler); - const std::string lod = VisitOperand(operation, 0, Type::Int); + const std::string lod = VisitOperand(operation, 0).AsInt(); switch (meta->element) { case 0: case 1: - return fmt::format("itof(int(textureSize({}, {}){}))", sampler, lod, - GetSwizzle(meta->element)); - case 2: - return "0"; + return {fmt::format("textureSize({}, {}){}", sampler, lod, GetSwizzle(meta->element)), + Type::Int}; case 3: - return fmt::format("itof(textureQueryLevels({}))", sampler); + return {fmt::format("textureQueryLevels({})", sampler), Type::Int}; } UNREACHABLE(); - return "0"; + return {"0", Type::Int}; } - std::string TextureQueryLod(Operation operation) { + Expression TextureQueryLod(Operation operation) { const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); if (meta->element < 2) { - return fmt::format("itof(int(({} * vec2(256)){}))", - GenerateTexture(operation, "QueryLod", {}), - GetSwizzle(meta->element)); + return {fmt::format("int(({} * vec2(256)){})", + GenerateTexture(operation, "QueryLod", {}), + GetSwizzle(meta->element)), + Type::Int}; } - return "0"; + return {"0", Type::Int}; } - std::string TexelFetch(Operation operation) { - constexpr std::array<const char*, 4> constructors = {"int", "ivec2", "ivec3", "ivec4"}; + Expression TexelFetch(Operation operation) { + constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"}; const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); UNIMPLEMENTED_IF(meta->sampler.IsArray()); @@ -1482,7 +1656,7 @@ private: expr += constructors.at(operation.GetOperandsCount() - 1); expr += '('; for (std::size_t i = 0; i < count; ++i) { - expr += VisitOperand(operation, i, Type::Int); + expr += VisitOperand(operation, i).AsInt(); const std::size_t next = i + 1; if (next == count) expr += ')'; @@ -1495,7 +1669,7 @@ private: if (meta->lod) { expr += ", "; - expr += CastOperand(Visit(meta->lod), Type::Int); + expr += Visit(meta->lod).AsInt(); } expr += ')'; expr += GetSwizzle(meta->element); @@ -1510,11 +1684,11 @@ private: code.AddLine("float {} = {};", tmp, expr); code.AddLine("#endif"); - return tmp; + return {tmp, Type::Float}; } - std::string ImageStore(Operation operation) { - constexpr std::array<const char*, 4> constructors{"int(", "ivec2(", "ivec3(", "ivec4("}; + Expression ImageStore(Operation operation) { + constexpr std::array constructors{"int(", "ivec2(", "ivec3(", "ivec4("}; const auto meta{std::get<MetaImage>(operation.GetMeta())}; std::string expr = "imageStore("; @@ -1524,7 +1698,7 @@ private: const std::size_t coords_count{operation.GetOperandsCount()}; expr += constructors.at(coords_count - 1); for (std::size_t i = 0; i < coords_count; ++i) { - expr += VisitOperand(operation, i, Type::Int); + expr += VisitOperand(operation, i).AsInt(); if (i + 1 < coords_count) { expr += ", "; } @@ -1535,7 +1709,7 @@ private: UNIMPLEMENTED_IF(values_count != 4); expr += "vec4("; for (std::size_t i = 0; i < values_count; ++i) { - expr += Visit(meta.values.at(i)); + expr += Visit(meta.values.at(i)).AsFloat(); if (i + 1 < values_count) { expr += ", "; } @@ -1546,44 +1720,52 @@ private: return {}; } - std::string Branch(Operation operation) { + Expression Branch(Operation operation) { const auto target = std::get_if<ImmediateNode>(&*operation[0]); UNIMPLEMENTED_IF(!target); - code.AddLine("jmp_to = 0x{:x}u;", target->GetValue()); + code.AddLine("jmp_to = 0x{:X}U;", target->GetValue()); code.AddLine("break;"); return {}; } - std::string PushFlowStack(Operation operation) { + Expression BranchIndirect(Operation operation) { + const std::string op_a = VisitOperand(operation, 0).AsUint(); + + code.AddLine("jmp_to = {};", op_a); + code.AddLine("break;"); + return {}; + } + + Expression PushFlowStack(Operation operation) { const auto stack = std::get<MetaStackClass>(operation.GetMeta()); const auto target = std::get_if<ImmediateNode>(&*operation[0]); UNIMPLEMENTED_IF(!target); - code.AddLine("{}[{}++] = 0x{:x}u;", FlowStackName(stack), FlowStackTopName(stack), + code.AddLine("{}[{}++] = 0x{:X}U;", FlowStackName(stack), FlowStackTopName(stack), target->GetValue()); return {}; } - std::string PopFlowStack(Operation operation) { + Expression PopFlowStack(Operation operation) { const auto stack = std::get<MetaStackClass>(operation.GetMeta()); code.AddLine("jmp_to = {}[--{}];", FlowStackName(stack), FlowStackTopName(stack)); code.AddLine("break;"); return {}; } - std::string Exit(Operation operation) { - if (stage != ShaderStage::Fragment) { + Expression Exit(Operation operation) { + if (stage != ProgramType::Fragment) { code.AddLine("return;"); return {}; } const auto& used_registers = ir.GetRegisters(); - const auto SafeGetRegister = [&](u32 reg) -> std::string { + const auto SafeGetRegister = [&](u32 reg) -> Expression { // TODO(Rodrigo): Replace with contains once C++20 releases if (used_registers.find(reg) != used_registers.end()) { - return GetRegister(reg); + return {GetRegister(reg), Type::Float}; } - return "0.0f"; + return {"0.0f", Type::Float}; }; UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Sample mask write is unimplemented"); @@ -1596,7 +1778,7 @@ private: for (u32 component = 0; component < 4; ++component) { if (header.ps.IsColorComponentOutputEnabled(render_target, component)) { code.AddLine("FragColor{}[{}] = {};", render_target, component, - SafeGetRegister(current_reg)); + SafeGetRegister(current_reg).AsFloat()); ++current_reg; } } @@ -1605,14 +1787,14 @@ private: if (header.ps.omap.depth) { // The depth output is always 2 registers after the last color output, and current_reg // already contains one past the last color register. - code.AddLine("gl_FragDepth = {};", SafeGetRegister(current_reg + 1)); + code.AddLine("gl_FragDepth = {};", SafeGetRegister(current_reg + 1).AsFloat()); } code.AddLine("return;"); return {}; } - std::string Discard(Operation operation) { + Expression Discard(Operation operation) { // Enclose "discard" in a conditional, so that GLSL compilation does not complain // about unexecuted instructions that may follow this. code.AddLine("if (true) {{"); @@ -1623,8 +1805,8 @@ private: return {}; } - std::string EmitVertex(Operation operation) { - ASSERT_MSG(stage == ShaderStage::Geometry, + Expression EmitVertex(Operation operation) { + ASSERT_MSG(stage == ProgramType::Geometry, "EmitVertex is expected to be used in a geometry shader."); // If a geometry shader is attached, it will always flip (it's the last stage before @@ -1634,30 +1816,72 @@ private: return {}; } - std::string EndPrimitive(Operation operation) { - ASSERT_MSG(stage == ShaderStage::Geometry, + Expression EndPrimitive(Operation operation) { + ASSERT_MSG(stage == ProgramType::Geometry, "EndPrimitive is expected to be used in a geometry shader."); code.AddLine("EndPrimitive();"); return {}; } - std::string YNegate(Operation operation) { + Expression YNegate(Operation operation) { // Config pack's third value is Y_NEGATE's state. - return "uintBitsToFloat(config_pack[2])"; + return {"config_pack[2]", Type::Uint}; } template <u32 element> - std::string LocalInvocationId(Operation) { - return "utof(gl_LocalInvocationID"s + GetSwizzle(element) + ')'; + Expression LocalInvocationId(Operation) { + return {"gl_LocalInvocationID"s + GetSwizzle(element), Type::Uint}; } template <u32 element> - std::string WorkGroupId(Operation) { - return "utof(gl_WorkGroupID"s + GetSwizzle(element) + ')'; + Expression WorkGroupId(Operation) { + return {"gl_WorkGroupID"s + GetSwizzle(element), Type::Uint}; } - static constexpr OperationDecompilersArray operation_decompilers = { + Expression BallotThread(Operation operation) { + const std::string value = VisitOperand(operation, 0).AsBool(); + if (!device.HasWarpIntrinsics()) { + LOG_ERROR(Render_OpenGL, + "Nvidia warp intrinsics are not available and its required by a shader"); + // Stub on non-Nvidia devices by simulating all threads voting the same as the active + // one. + return {fmt::format("({} ? 0xFFFFFFFFU : 0U)", value), Type::Uint}; + } + return {fmt::format("ballotThreadNV({})", value), Type::Uint}; + } + + Expression Vote(Operation operation, const char* func) { + const std::string value = VisitOperand(operation, 0).AsBool(); + if (!device.HasWarpIntrinsics()) { + LOG_ERROR(Render_OpenGL, + "Nvidia vote intrinsics are not available and its required by a shader"); + // Stub with a warp size of one. + return {value, Type::Bool}; + } + return {fmt::format("{}({})", func, value), Type::Bool}; + } + + Expression VoteAll(Operation operation) { + return Vote(operation, "allThreadsNV"); + } + + Expression VoteAny(Operation operation) { + return Vote(operation, "anyThreadNV"); + } + + Expression VoteEqual(Operation operation) { + if (!device.HasWarpIntrinsics()) { + LOG_ERROR(Render_OpenGL, + "Nvidia vote intrinsics are not available and its required by a shader"); + // We must return true here since a stub for a theoretical warp size of 1 will always + // return an equal result for all its votes. + return {"true", Type::Bool}; + } + return Vote(operation, "allThreadsEqualNV"); + } + + static constexpr std::array operation_decompilers = { &GLSLDecompiler::Assign, &GLSLDecompiler::Select, @@ -1669,6 +1893,8 @@ private: &GLSLDecompiler::Negate<Type::Float>, &GLSLDecompiler::Absolute<Type::Float>, &GLSLDecompiler::FClamp, + &GLSLDecompiler::FCastHalf0, + &GLSLDecompiler::FCastHalf1, &GLSLDecompiler::Min<Type::Float>, &GLSLDecompiler::Max<Type::Float>, &GLSLDecompiler::FCos, @@ -1729,6 +1955,7 @@ private: &GLSLDecompiler::Absolute<Type::HalfFloat>, &GLSLDecompiler::HNegate, &GLSLDecompiler::HClamp, + &GLSLDecompiler::HCastFloat, &GLSLDecompiler::HUnpack, &GLSLDecompiler::HMergeF32, &GLSLDecompiler::HMergeH0, @@ -1741,8 +1968,7 @@ private: &GLSLDecompiler::LogicalXor, &GLSLDecompiler::LogicalNegate, &GLSLDecompiler::LogicalPick2, - &GLSLDecompiler::LogicalAll2, - &GLSLDecompiler::LogicalAny2, + &GLSLDecompiler::LogicalAnd2, &GLSLDecompiler::LogicalLessThan<Type::Float>, &GLSLDecompiler::LogicalEqual<Type::Float>, @@ -1789,6 +2015,7 @@ private: &GLSLDecompiler::ImageStore, &GLSLDecompiler::Branch, + &GLSLDecompiler::BranchIndirect, &GLSLDecompiler::PushFlowStack, &GLSLDecompiler::PopFlowStack, &GLSLDecompiler::Exit, @@ -1804,7 +2031,13 @@ private: &GLSLDecompiler::WorkGroupId<0>, &GLSLDecompiler::WorkGroupId<1>, &GLSLDecompiler::WorkGroupId<2>, + + &GLSLDecompiler::BallotThread, + &GLSLDecompiler::VoteAll, + &GLSLDecompiler::VoteAny, + &GLSLDecompiler::VoteEqual, }; + static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); std::string GetRegister(u32 index) const { return GetDeclarationWithSuffix(index, "gpr"); @@ -1844,8 +2077,8 @@ private: } std::string GetInternalFlag(InternalFlag flag) const { - constexpr std::array<const char*, 4> InternalFlagNames = {"zero_flag", "sign_flag", - "carry_flag", "overflow_flag"}; + constexpr std::array InternalFlagNames = {"zero_flag", "sign_flag", "carry_flag", + "overflow_flag"}; const auto index = static_cast<u32>(flag); ASSERT(index < static_cast<u32>(InternalFlag::Amount)); @@ -1869,7 +2102,7 @@ private: } u32 GetNumPhysicalInputAttributes() const { - return stage == ShaderStage::Vertex ? GetNumPhysicalAttributes() : GetNumPhysicalVaryings(); + return IsVertexShader(stage) ? GetNumPhysicalAttributes() : GetNumPhysicalVaryings(); } u32 GetNumPhysicalAttributes() const { @@ -1882,7 +2115,7 @@ private: const Device& device; const ShaderIR& ir; - const ShaderStage stage; + const ProgramType stage; const std::string suffix; const Header header; @@ -1893,27 +2126,19 @@ private: std::string GetCommonDeclarations() { return fmt::format( - "#define MAX_CONSTBUFFER_ELEMENTS {}\n" "#define ftoi floatBitsToInt\n" "#define ftou floatBitsToUint\n" "#define itof intBitsToFloat\n" "#define utof uintBitsToFloat\n\n" - "float fromHalf2(vec2 pair) {{\n" - " return utof(packHalf2x16(pair));\n" - "}}\n\n" - "vec2 toHalf2(float value) {{\n" - " return unpackHalf2x16(ftou(value));\n" - "}}\n\n" - "bvec2 halfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) {{\n" + "bvec2 HalfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) {{\n" " bvec2 is_nan1 = isnan(pair1);\n" " bvec2 is_nan2 = isnan(pair2);\n" " return bvec2(comparison.x || is_nan1.x || is_nan2.x, comparison.y || is_nan1.y || " "is_nan2.y);\n" - "}}\n", - MAX_CONSTBUFFER_ELEMENTS); + "}}\n\n"); } -ProgramResult Decompile(const Device& device, const ShaderIR& ir, Maxwell::ShaderStage stage, +ProgramResult Decompile(const Device& device, const ShaderIR& ir, ProgramType stage, const std::string& suffix) { GLSLDecompiler decompiler(device, ir, stage, suffix); decompiler.Decompile(); diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h index 14d11c7fc8..2ea02f5bf3 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.h +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h @@ -12,14 +12,26 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/shader/shader_ir.h" -namespace OpenGL { -class Device; -} - namespace VideoCommon::Shader { class ShaderIR; } +namespace OpenGL { + +class Device; + +enum class ProgramType : u32 { + VertexA = 0, + VertexB = 1, + TessellationControl = 2, + TessellationEval = 3, + Geometry = 4, + Fragment = 5, + Compute = 6 +}; + +} // namespace OpenGL + namespace OpenGL::GLShader { struct ShaderEntries; @@ -78,12 +90,13 @@ struct ShaderEntries { std::vector<ImageEntry> images; std::vector<GlobalMemoryEntry> global_memory_entries; std::array<bool, Maxwell::NumClipDistances> clip_distances{}; + bool shader_viewport_layer_array{}; std::size_t shader_length{}; }; std::string GetCommonDeclarations(); ProgramResult Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir, - Maxwell::ShaderStage stage, const std::string& suffix); + ProgramType stage, const std::string& suffix); } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp index 10688397bc..969fe9ced2 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp @@ -51,7 +51,7 @@ ShaderCacheVersionHash GetShaderCacheVersionHash() { } // namespace -ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, Maxwell::ShaderProgram program_type, +ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type, u32 program_code_size, u32 program_code_size_b, ProgramCode program_code, ProgramCode program_code_b) : unique_identifier{unique_identifier}, program_type{program_type}, @@ -373,6 +373,12 @@ std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEn } } + bool shader_viewport_layer_array{}; + if (!LoadObjectFromPrecompiled(shader_viewport_layer_array)) { + return {}; + } + entry.entries.shader_viewport_layer_array = shader_viewport_layer_array; + u64 shader_length{}; if (!LoadObjectFromPrecompiled(shader_length)) { return {}; @@ -445,6 +451,10 @@ bool ShaderDiskCacheOpenGL::SaveDecompiledFile(u64 unique_identifier, const std: } } + if (!SaveObjectToPrecompiled(entries.shader_viewport_layer_array)) { + return false; + } + if (!SaveObjectToPrecompiled(static_cast<u64>(entries.shader_length))) { return false; } diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h index 4f296dda6e..cc8bbd61e1 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h @@ -18,7 +18,6 @@ #include "common/assert.h" #include "common/common_types.h" #include "core/file_sys/vfs_vector.h" -#include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_opengl/gl_shader_gen.h" namespace Core { @@ -34,14 +33,11 @@ namespace OpenGL { struct ShaderDiskCacheUsage; struct ShaderDiskCacheDump; -using ShaderDumpsMap = std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>; - using ProgramCode = std::vector<u64>; -using Maxwell = Tegra::Engines::Maxwell3D::Regs; - +using ShaderDumpsMap = std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>; using TextureBufferUsage = std::bitset<64>; -/// Allocated bindings used by an OpenGL shader program. +/// Allocated bindings used by an OpenGL shader program struct BaseBindings { u32 cbuf{}; u32 gmem{}; @@ -126,7 +122,7 @@ namespace OpenGL { /// Describes a shader how it's used by the guest GPU class ShaderDiskCacheRaw { public: - explicit ShaderDiskCacheRaw(u64 unique_identifier, Maxwell::ShaderProgram program_type, + explicit ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type, u32 program_code_size, u32 program_code_size_b, ProgramCode program_code, ProgramCode program_code_b); ShaderDiskCacheRaw(); @@ -141,30 +137,13 @@ public: } bool HasProgramA() const { - return program_type == Maxwell::ShaderProgram::VertexA; + return program_type == ProgramType::VertexA; } - Maxwell::ShaderProgram GetProgramType() const { + ProgramType GetProgramType() const { return program_type; } - Maxwell::ShaderStage GetProgramStage() const { - switch (program_type) { - case Maxwell::ShaderProgram::VertexA: - case Maxwell::ShaderProgram::VertexB: - return Maxwell::ShaderStage::Vertex; - case Maxwell::ShaderProgram::TesselationControl: - return Maxwell::ShaderStage::TesselationControl; - case Maxwell::ShaderProgram::TesselationEval: - return Maxwell::ShaderStage::TesselationEval; - case Maxwell::ShaderProgram::Geometry: - return Maxwell::ShaderStage::Geometry; - case Maxwell::ShaderProgram::Fragment: - return Maxwell::ShaderStage::Fragment; - } - UNREACHABLE(); - } - const ProgramCode& GetProgramCode() const { return program_code; } @@ -175,7 +154,7 @@ public: private: u64 unique_identifier{}; - Maxwell::ShaderProgram program_type{}; + ProgramType program_type{}; u32 program_code_size{}; u32 program_code_size_b{}; diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index 9148629ec0..3a8d9e1da4 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp @@ -14,7 +14,8 @@ using Tegra::Engines::Maxwell3D; using VideoCommon::Shader::ProgramCode; using VideoCommon::Shader::ShaderIR; -static constexpr u32 PROGRAM_OFFSET{10}; +static constexpr u32 PROGRAM_OFFSET = 10; +static constexpr u32 COMPUTE_OFFSET = 0; ProgramResult GenerateVertexShader(const Device& device, const ShaderSetup& setup) { const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); @@ -29,17 +30,15 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform vs_config { }; )"; - const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); - ProgramResult program = - Decompile(device, program_ir, Maxwell3D::Regs::ShaderStage::Vertex, "vertex"); + const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a); + const auto stage = setup.IsDualProgram() ? ProgramType::VertexA : ProgramType::VertexB; + ProgramResult program = Decompile(device, program_ir, stage, "vertex"); out += program.first; if (setup.IsDualProgram()) { - const ShaderIR program_ir_b(setup.program.code_b, PROGRAM_OFFSET); - ProgramResult program_b = - Decompile(device, program_ir_b, Maxwell3D::Regs::ShaderStage::Vertex, "vertex_b"); - + const ShaderIR program_ir_b(setup.program.code_b, PROGRAM_OFFSET, setup.program.size_b); + ProgramResult program_b = Decompile(device, program_ir_b, ProgramType::VertexB, "vertex_b"); out += program_b.first; } @@ -80,9 +79,9 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform gs_config { }; )"; - const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); - ProgramResult program = - Decompile(device, program_ir, Maxwell3D::Regs::ShaderStage::Geometry, "geometry"); + + const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a); + ProgramResult program = Decompile(device, program_ir, ProgramType::Geometry, "geometry"); out += program.first; out += R"( @@ -115,10 +114,8 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config { }; )"; - const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET); - ProgramResult program = - Decompile(device, program_ir, Maxwell3D::Regs::ShaderStage::Fragment, "fragment"); - + const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a); + ProgramResult program = Decompile(device, program_ir, ProgramType::Fragment, "fragment"); out += program.first; out += R"( @@ -130,4 +127,22 @@ void main() { return {std::move(out), std::move(program.second)}; } +ProgramResult GenerateComputeShader(const Device& device, const ShaderSetup& setup) { + const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); + + std::string out = "// Shader Unique Id: CS" + id + "\n\n"; + out += GetCommonDeclarations(); + + const ShaderIR program_ir(setup.program.code, COMPUTE_OFFSET, setup.program.size_a); + ProgramResult program = Decompile(device, program_ir, ProgramType::Compute, "compute"); + out += program.first; + + out += R"( +void main() { + execute_compute(); +} +)"; + return {std::move(out), std::move(program.second)}; +} + } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h index 0536c8a034..3833e88ab8 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.h +++ b/src/video_core/renderer_opengl/gl_shader_gen.h @@ -27,6 +27,8 @@ struct ShaderSetup { ProgramCode code; ProgramCode code_b; // Used for dual vertex shaders u64 unique_identifier; + std::size_t size_a; + std::size_t size_b; } program; /// Used in scenarios where we have a dual vertex shaders @@ -52,4 +54,7 @@ ProgramResult GenerateGeometryShader(const Device& device, const ShaderSetup& se /// Generates the GLSL fragment shader program source code for the given FS program ProgramResult GenerateFragmentShader(const Device& device, const ShaderSetup& setup); +/// Generates the GLSL compute shader program source code for the given CS program +ProgramResult GenerateComputeShader(const Device& device, const ShaderSetup& setup); + } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_util.cpp b/src/video_core/renderer_opengl/gl_shader_util.cpp index 5f3fe067e2..9e74eda0d4 100644 --- a/src/video_core/renderer_opengl/gl_shader_util.cpp +++ b/src/video_core/renderer_opengl/gl_shader_util.cpp @@ -10,21 +10,25 @@ namespace OpenGL::GLShader { -GLuint LoadShader(const char* source, GLenum type) { - const char* debug_type; +namespace { +const char* GetStageDebugName(GLenum type) { switch (type) { case GL_VERTEX_SHADER: - debug_type = "vertex"; - break; + return "vertex"; case GL_GEOMETRY_SHADER: - debug_type = "geometry"; - break; + return "geometry"; case GL_FRAGMENT_SHADER: - debug_type = "fragment"; - break; - default: - UNREACHABLE(); + return "fragment"; + case GL_COMPUTE_SHADER: + return "compute"; } + UNIMPLEMENTED(); + return "unknown"; +} +} // Anonymous namespace + +GLuint LoadShader(const char* source, GLenum type) { + const char* debug_type = GetStageDebugName(type); const GLuint shader_id = glCreateShader(type); glShaderSource(shader_id, 1, &source, nullptr); LOG_DEBUG(Render_OpenGL, "Compiling {} shader...", debug_type); diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index d86e137ac1..f4777d0b07 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -6,8 +6,11 @@ #include <glad/glad.h> #include "common/assert.h" #include "common/logging/log.h" +#include "common/microprofile.h" #include "video_core/renderer_opengl/gl_state.h" +MICROPROFILE_DEFINE(OpenGL_State, "OpenGL", "State Change", MP_RGB(192, 128, 128)); + namespace OpenGL { using Maxwell = Tegra::Engines::Maxwell3D::Regs; @@ -162,6 +165,25 @@ OpenGLState::OpenGLState() { alpha_test.ref = 0.0f; } +void OpenGLState::SetDefaultViewports() { + for (auto& item : viewports) { + item.x = 0; + item.y = 0; + item.width = 0; + item.height = 0; + item.depth_range_near = 0.0f; + item.depth_range_far = 1.0f; + item.scissor.enabled = false; + item.scissor.x = 0; + item.scissor.y = 0; + item.scissor.width = 0; + item.scissor.height = 0; + } + + depth_clamp.far_plane = false; + depth_clamp.near_plane = false; +} + void OpenGLState::ApplyDefaultState() { glEnable(GL_BLEND); glDisable(GL_FRAMEBUFFER_SRGB); @@ -523,7 +545,8 @@ void OpenGLState::ApplySamplers() const { } } -void OpenGLState::Apply() const { +void OpenGLState::Apply() { + MICROPROFILE_SCOPE(OpenGL_State); ApplyFramebufferState(); ApplyVertexArrayState(); ApplyShaderProgram(); @@ -532,19 +555,31 @@ void OpenGLState::Apply() const { ApplyPointSize(); ApplyFragmentColorClamp(); ApplyMultisample(); + if (dirty.color_mask) { + ApplyColorMask(); + dirty.color_mask = false; + } ApplyDepthClamp(); - ApplyColorMask(); ApplyViewport(); - ApplyStencilTest(); + if (dirty.stencil_state) { + ApplyStencilTest(); + dirty.stencil_state = false; + } ApplySRgb(); ApplyCulling(); ApplyDepth(); ApplyPrimitiveRestart(); - ApplyBlending(); + if (dirty.blend_state) { + ApplyBlending(); + dirty.blend_state = false; + } ApplyLogicOp(); ApplyTextures(); ApplySamplers(); - ApplyPolygonOffset(); + if (dirty.polygon_offset) { + ApplyPolygonOffset(); + dirty.polygon_offset = false; + } ApplyAlphaTest(); } diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h index b0140495df..fdf9a8a12b 100644 --- a/src/video_core/renderer_opengl/gl_state.h +++ b/src/video_core/renderer_opengl/gl_state.h @@ -195,8 +195,9 @@ public: s_rgb_used = false; } + void SetDefaultViewports(); /// Apply this state as the current OpenGL state - void Apply() const; + void Apply(); void ApplyFramebufferState() const; void ApplyVertexArrayState() const; @@ -237,11 +238,41 @@ public: /// Viewport does not affects glClearBuffer so emulate viewport using scissor test void EmulateViewportWithScissor(); + void MarkDirtyBlendState() { + dirty.blend_state = true; + } + + void MarkDirtyStencilState() { + dirty.stencil_state = true; + } + + void MarkDirtyPolygonOffset() { + dirty.polygon_offset = true; + } + + void MarkDirtyColorMask() { + dirty.color_mask = true; + } + + void AllDirty() { + dirty.blend_state = true; + dirty.stencil_state = true; + dirty.polygon_offset = true; + dirty.color_mask = true; + } + private: static OpenGLState cur_state; // Workaround for sRGB problems caused by QT not supporting srgb output static bool s_rgb_used; + struct { + bool blend_state; + bool stencil_state; + bool viewport_state; + bool polygon_offset; + bool color_mask; + } dirty{}; }; } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 08ae1a429b..4f135fe030 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -31,6 +31,8 @@ using VideoCore::Surface::SurfaceType; MICROPROFILE_DEFINE(OpenGL_Texture_Upload, "OpenGL", "Texture Upload", MP_RGB(128, 192, 128)); MICROPROFILE_DEFINE(OpenGL_Texture_Download, "OpenGL", "Texture Download", MP_RGB(128, 192, 128)); +MICROPROFILE_DEFINE(OpenGL_Texture_Buffer_Copy, "OpenGL", "Texture Buffer Copy", + MP_RGB(128, 192, 128)); namespace { @@ -135,7 +137,6 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) { ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size()); const auto& format{tex_format_tuples[static_cast<std::size_t>(pixel_format)]}; - ASSERT(component_type == format.component_type); return format; } @@ -183,6 +184,9 @@ GLint GetSwizzleSource(SwizzleSource source) { } void ApplyTextureDefaults(const SurfaceParams& params, GLuint texture) { + if (params.IsBuffer()) { + return; + } glTextureParameteri(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTextureParameteri(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTextureParameteri(texture, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); @@ -207,6 +211,7 @@ OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum inte glNamedBufferStorage(texture_buffer.handle, params.width * params.GetBytesPerPixel(), nullptr, GL_DYNAMIC_STORAGE_BIT); glTextureBuffer(texture.handle, internal_format, texture_buffer.handle); + break; case SurfaceTarget::Texture2D: case SurfaceTarget::TextureCubemap: glTextureStorage2D(texture.handle, params.emulated_levels, internal_format, params.width, @@ -483,11 +488,15 @@ void TextureCacheOpenGL::ImageBlit(View& src_view, View& dst_view, const auto& dst_params{dst_view->GetSurfaceParams()}; OpenGLState prev_state{OpenGLState::GetCurState()}; - SCOPE_EXIT({ prev_state.Apply(); }); + SCOPE_EXIT({ + prev_state.AllDirty(); + prev_state.Apply(); + }); OpenGLState state; state.draw.read_framebuffer = src_framebuffer.handle; state.draw.draw_framebuffer = dst_framebuffer.handle; + state.AllDirty(); state.Apply(); u32 buffers{}; @@ -535,6 +544,7 @@ void TextureCacheOpenGL::ImageBlit(View& src_view, View& dst_view, } void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface) { + MICROPROFILE_SCOPE(OpenGL_Texture_Buffer_Copy); const auto& src_params = src_surface->GetSurfaceParams(); const auto& dst_params = dst_surface->GetSurfaceParams(); UNIMPLEMENTED_IF(src_params.num_levels > 1 || dst_params.num_levels > 1); diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h index ff6ab69881..21324488aa 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.h +++ b/src/video_core/renderer_opengl/gl_texture_cache.h @@ -51,7 +51,7 @@ public: } protected: - void DecorateSurfaceName(); + void DecorateSurfaceName() override; View CreateView(const ViewParams& view_key) override; View CreateViewInner(const ViewParams& view_key, bool is_proxy); diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index b142521ecc..af9684839b 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -101,21 +101,19 @@ RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::Syst RendererOpenGL::~RendererOpenGL() = default; -/// Swap buffers (render frame) -void RendererOpenGL::SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) { - +void RendererOpenGL::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { system.GetPerfStats().EndSystemFrame(); // Maintain the rasterizer's state as a priority OpenGLState prev_state = OpenGLState::GetCurState(); + state.AllDirty(); state.Apply(); if (framebuffer) { // If framebuffer is provided, reload it from memory to a texture - if (screen_info.texture.width != (GLsizei)framebuffer->get().width || - screen_info.texture.height != (GLsizei)framebuffer->get().height || - screen_info.texture.pixel_format != framebuffer->get().pixel_format) { + if (screen_info.texture.width != static_cast<GLsizei>(framebuffer->width) || + screen_info.texture.height != static_cast<GLsizei>(framebuffer->height) || + screen_info.texture.pixel_format != framebuffer->pixel_format) { // Reallocate texture if the framebuffer size has changed. // This is expected to not happen very often and hence should not be a // performance problem. @@ -130,6 +128,8 @@ void RendererOpenGL::SwapBuffers( DrawScreen(render_window.GetFramebufferLayout()); + rasterizer->TickFrame(); + render_window.SwapBuffers(); } @@ -139,6 +139,7 @@ void RendererOpenGL::SwapBuffers( system.GetPerfStats().BeginSystemFrame(); // Restore the rasterizer state + prev_state.AllDirty(); prev_state.Apply(); } @@ -146,43 +147,43 @@ void RendererOpenGL::SwapBuffers( * Loads framebuffer from emulated memory into the active OpenGL texture. */ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuffer) { - const u32 bytes_per_pixel{Tegra::FramebufferConfig::BytesPerPixel(framebuffer.pixel_format)}; - const u64 size_in_bytes{framebuffer.stride * framebuffer.height * bytes_per_pixel}; - const VAddr framebuffer_addr{framebuffer.address + framebuffer.offset}; - // Framebuffer orientation handling framebuffer_transform_flags = framebuffer.transform_flags; framebuffer_crop_rect = framebuffer.crop_rect; - // Ensure no bad interactions with GL_UNPACK_ALIGNMENT, which by default - // only allows rows to have a memory alignement of 4. - ASSERT(framebuffer.stride % 4 == 0); - - if (!rasterizer->AccelerateDisplay(framebuffer, framebuffer_addr, framebuffer.stride)) { - // Reset the screen info's display texture to its own permanent texture - screen_info.display_texture = screen_info.texture.resource.handle; - - rasterizer->FlushRegion(ToCacheAddr(Memory::GetPointer(framebuffer_addr)), size_in_bytes); - - constexpr u32 linear_bpp = 4; - VideoCore::MortonCopyPixels128(VideoCore::MortonSwizzleMode::MortonToLinear, - framebuffer.width, framebuffer.height, bytes_per_pixel, - linear_bpp, Memory::GetPointer(framebuffer_addr), - gl_framebuffer_data.data()); - - glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride)); + const VAddr framebuffer_addr{framebuffer.address + framebuffer.offset}; + if (rasterizer->AccelerateDisplay(framebuffer, framebuffer_addr, framebuffer.stride)) { + return; + } - // Update existing texture - // TODO: Test what happens on hardware when you change the framebuffer dimensions so that - // they differ from the LCD resolution. - // TODO: Applications could theoretically crash yuzu here by specifying too large - // framebuffer sizes. We should make sure that this cannot happen. - glTextureSubImage2D(screen_info.texture.resource.handle, 0, 0, 0, framebuffer.width, - framebuffer.height, screen_info.texture.gl_format, - screen_info.texture.gl_type, gl_framebuffer_data.data()); + // Reset the screen info's display texture to its own permanent texture + screen_info.display_texture = screen_info.texture.resource.handle; - glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); - } + const auto pixel_format{ + VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format)}; + const u32 bytes_per_pixel{VideoCore::Surface::GetBytesPerPixel(pixel_format)}; + const u64 size_in_bytes{framebuffer.stride * framebuffer.height * bytes_per_pixel}; + const auto host_ptr{Memory::GetPointer(framebuffer_addr)}; + rasterizer->FlushRegion(ToCacheAddr(host_ptr), size_in_bytes); + + // TODO(Rodrigo): Read this from HLE + constexpr u32 block_height_log2 = 4; + VideoCore::MortonSwizzle(VideoCore::MortonSwizzleMode::MortonToLinear, pixel_format, + framebuffer.stride, block_height_log2, framebuffer.height, 0, 1, 1, + gl_framebuffer_data.data(), host_ptr); + + glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride)); + + // Update existing texture + // TODO: Test what happens on hardware when you change the framebuffer dimensions so that + // they differ from the LCD resolution. + // TODO: Applications could theoretically crash yuzu here by specifying too large + // framebuffer sizes. We should make sure that this cannot happen. + glTextureSubImage2D(screen_info.texture.resource.handle, 0, 0, 0, framebuffer.width, + framebuffer.height, screen_info.texture.gl_format, + screen_info.texture.gl_type, gl_framebuffer_data.data()); + + glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); } /** @@ -205,6 +206,7 @@ void RendererOpenGL::InitOpenGLObjects() { // Link shaders and get variable locations shader.CreateFromSource(vertex_shader, nullptr, fragment_shader); state.draw.shader_program = shader.handle; + state.AllDirty(); state.Apply(); uniform_modelview_matrix = glGetUniformLocation(shader.handle, "modelview_matrix"); uniform_color_texture = glGetUniformLocation(shader.handle, "color_texture"); @@ -262,7 +264,6 @@ void RendererOpenGL::CreateRasterizer() { if (rasterizer) { return; } - // Initialize sRGB Usage OpenGLState::ClearsRGBUsed(); rasterizer = std::make_unique<RasterizerOpenGL>(system, emu_window, screen_info); } @@ -273,22 +274,29 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, texture.height = framebuffer.height; texture.pixel_format = framebuffer.pixel_format; + const auto pixel_format{ + VideoCore::Surface::PixelFormatFromGPUPixelFormat(framebuffer.pixel_format)}; + const u32 bytes_per_pixel{VideoCore::Surface::GetBytesPerPixel(pixel_format)}; + gl_framebuffer_data.resize(texture.width * texture.height * bytes_per_pixel); + GLint internal_format; switch (framebuffer.pixel_format) { case Tegra::FramebufferConfig::PixelFormat::ABGR8: internal_format = GL_RGBA8; texture.gl_format = GL_RGBA; texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV; - gl_framebuffer_data.resize(texture.width * texture.height * 4); + break; + case Tegra::FramebufferConfig::PixelFormat::RGB565: + internal_format = GL_RGB565; + texture.gl_format = GL_RGB; + texture.gl_type = GL_UNSIGNED_SHORT_5_6_5; break; default: internal_format = GL_RGBA8; texture.gl_format = GL_RGBA; texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV; - gl_framebuffer_data.resize(texture.width * texture.height * 4); - LOG_CRITICAL(Render_OpenGL, "Unknown framebuffer pixel format: {}", - static_cast<u32>(framebuffer.pixel_format)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}", + static_cast<u32>(framebuffer.pixel_format)); } texture.resource.Release(); @@ -338,12 +346,14 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x, // Workaround brigthness problems in SMO by enabling sRGB in the final output // if it has been used in the frame. Needed because of this bug in QT: QTBUG-50987 state.framebuffer_srgb.enabled = OpenGLState::GetsRGBUsed(); + state.AllDirty(); state.Apply(); glNamedBufferSubData(vertex_buffer.handle, 0, sizeof(vertices), vertices.data()); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); // Restore default state state.framebuffer_srgb.enabled = false; state.texture_units[0].texture = 0; + state.AllDirty(); state.Apply(); // Clear sRGB state for the next frame OpenGLState::ClearsRGBUsed(); @@ -388,6 +398,7 @@ void RendererOpenGL::CaptureScreenshot() { GLuint old_read_fb = state.draw.read_framebuffer; GLuint old_draw_fb = state.draw.draw_framebuffer; state.draw.read_framebuffer = state.draw.draw_framebuffer = screenshot_framebuffer.handle; + state.AllDirty(); state.Apply(); Layout::FramebufferLayout layout{renderer_settings.screenshot_framebuffer_layout}; @@ -407,6 +418,7 @@ void RendererOpenGL::CaptureScreenshot() { screenshot_framebuffer.Release(); state.draw.read_framebuffer = old_read_fb; state.draw.draw_framebuffer = old_draw_fb; + state.AllDirty(); state.Apply(); glDeleteRenderbuffers(1, &renderbuffer); diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h index 4aebf2321b..9bd0863689 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.h +++ b/src/video_core/renderer_opengl/renderer_opengl.h @@ -43,14 +43,13 @@ struct ScreenInfo { TextureInfo texture; }; -class RendererOpenGL : public VideoCore::RendererBase { +class RendererOpenGL final : public VideoCore::RendererBase { public: explicit RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::System& system); ~RendererOpenGL() override; /// Swap buffers (render frame) - void SwapBuffers( - std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override; + void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) override; /// Initialize the renderer bool Init() override; diff --git a/src/video_core/renderer_opengl/utils.cpp b/src/video_core/renderer_opengl/utils.cpp index 68c36988dd..c504a2c1aa 100644 --- a/src/video_core/renderer_opengl/utils.cpp +++ b/src/video_core/renderer_opengl/utils.cpp @@ -13,29 +13,67 @@ namespace OpenGL { +VertexArrayPushBuffer::VertexArrayPushBuffer() = default; + +VertexArrayPushBuffer::~VertexArrayPushBuffer() = default; + +void VertexArrayPushBuffer::Setup(GLuint vao_) { + vao = vao_; + index_buffer = nullptr; + vertex_buffers.clear(); +} + +void VertexArrayPushBuffer::SetIndexBuffer(const GLuint* buffer) { + index_buffer = buffer; +} + +void VertexArrayPushBuffer::SetVertexBuffer(GLuint binding_index, const GLuint* buffer, + GLintptr offset, GLsizei stride) { + vertex_buffers.push_back(Entry{binding_index, buffer, offset, stride}); +} + +void VertexArrayPushBuffer::Bind() { + if (index_buffer) { + glVertexArrayElementBuffer(vao, *index_buffer); + } + + // TODO(Rodrigo): Find a way to ARB_multi_bind this + for (const auto& entry : vertex_buffers) { + glVertexArrayVertexBuffer(vao, entry.binding_index, *entry.buffer, entry.offset, + entry.stride); + } +} + BindBuffersRangePushBuffer::BindBuffersRangePushBuffer(GLenum target) : target{target} {} BindBuffersRangePushBuffer::~BindBuffersRangePushBuffer() = default; void BindBuffersRangePushBuffer::Setup(GLuint first_) { first = first_; - buffers.clear(); + buffer_pointers.clear(); offsets.clear(); sizes.clear(); } -void BindBuffersRangePushBuffer::Push(GLuint buffer, GLintptr offset, GLsizeiptr size) { - buffers.push_back(buffer); +void BindBuffersRangePushBuffer::Push(const GLuint* buffer, GLintptr offset, GLsizeiptr size) { + buffer_pointers.push_back(buffer); offsets.push_back(offset); sizes.push_back(size); } -void BindBuffersRangePushBuffer::Bind() const { - const std::size_t count{buffers.size()}; +void BindBuffersRangePushBuffer::Bind() { + // Ensure sizes are valid. + const std::size_t count{buffer_pointers.size()}; DEBUG_ASSERT(count == offsets.size() && count == sizes.size()); if (count == 0) { return; } + + // Dereference buffers. + buffers.resize(count); + std::transform(buffer_pointers.begin(), buffer_pointers.end(), buffers.begin(), + [](const GLuint* pointer) { return *pointer; }); + glBindBuffersRange(target, first, static_cast<GLsizei>(count), buffers.data(), offsets.data(), sizes.data()); } diff --git a/src/video_core/renderer_opengl/utils.h b/src/video_core/renderer_opengl/utils.h index 4a752f3b49..6c2b455466 100644 --- a/src/video_core/renderer_opengl/utils.h +++ b/src/video_core/renderer_opengl/utils.h @@ -11,20 +11,49 @@ namespace OpenGL { -class BindBuffersRangePushBuffer { +class VertexArrayPushBuffer final { public: - BindBuffersRangePushBuffer(GLenum target); + explicit VertexArrayPushBuffer(); + ~VertexArrayPushBuffer(); + + void Setup(GLuint vao_); + + void SetIndexBuffer(const GLuint* buffer); + + void SetVertexBuffer(GLuint binding_index, const GLuint* buffer, GLintptr offset, + GLsizei stride); + + void Bind(); + +private: + struct Entry { + GLuint binding_index{}; + const GLuint* buffer{}; + GLintptr offset{}; + GLsizei stride{}; + }; + + GLuint vao{}; + const GLuint* index_buffer{}; + std::vector<Entry> vertex_buffers; +}; + +class BindBuffersRangePushBuffer final { +public: + explicit BindBuffersRangePushBuffer(GLenum target); ~BindBuffersRangePushBuffer(); void Setup(GLuint first_); - void Push(GLuint buffer, GLintptr offset, GLsizeiptr size); + void Push(const GLuint* buffer, GLintptr offset, GLsizeiptr size); - void Bind() const; + void Bind(); private: - GLenum target; - GLuint first; + GLenum target{}; + GLuint first{}; + std::vector<const GLuint*> buffer_pointers; + std::vector<GLuint> buffers; std::vector<GLintptr> offsets; std::vector<GLsizeiptr> sizes; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 02a9f5ecb3..d2e9f40315 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -109,8 +109,8 @@ void VKBufferCache::Reserve(std::size_t max_size) { } } -VKExecutionContext VKBufferCache::Send(VKExecutionContext exctx) { - return stream_buffer->Send(exctx, buffer_offset - buffer_offset_base); +void VKBufferCache::Send() { + stream_buffer->Send(buffer_offset - buffer_offset_base); } void VKBufferCache::AlignBuffer(std::size_t alignment) { diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 3edf460df7..49f13bcdcd 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -77,7 +77,7 @@ public: void Reserve(std::size_t max_size); /// Ensures that the set data is sent to the device. - [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx); + void Send(); /// Returns the buffer cache handle. vk::Buffer GetBuffer() const { diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.h b/src/video_core/renderer_vulkan/vk_sampler_cache.h index 771b05c739..1f73b716b5 100644 --- a/src/video_core/renderer_vulkan/vk_sampler_cache.h +++ b/src/video_core/renderer_vulkan/vk_sampler_cache.h @@ -4,9 +4,6 @@ #pragma once -#include <unordered_map> - -#include "common/common_types.h" #include "video_core/renderer_vulkan/declarations.h" #include "video_core/sampler_cache.h" #include "video_core/textures/texture.h" @@ -21,9 +18,9 @@ public: ~VKSamplerCache(); protected: - UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const; + UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override; - vk::Sampler ToSamplerType(const UniqueSampler& sampler) const; + vk::Sampler ToSamplerType(const UniqueSampler& sampler) const override; private: const VKDevice& device; diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp index f1fea1871d..0f81164585 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.cpp +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -19,23 +19,19 @@ VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_man VKScheduler::~VKScheduler() = default; -VKExecutionContext VKScheduler::GetExecutionContext() const { - return VKExecutionContext(current_fence, current_cmdbuf); -} - -VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) { +void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) { SubmitExecution(semaphore); - current_fence->Release(); + if (release_fence) + current_fence->Release(); AllocateNewContext(); - return GetExecutionContext(); } -VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) { +void VKScheduler::Finish(bool release_fence, vk::Semaphore semaphore) { SubmitExecution(semaphore); current_fence->Wait(); - current_fence->Release(); + if (release_fence) + current_fence->Release(); AllocateNewContext(); - return GetExecutionContext(); } void VKScheduler::SubmitExecution(vk::Semaphore semaphore) { diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h index cfaf5376fc..0e5b49c7f8 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.h +++ b/src/video_core/renderer_vulkan/vk_scheduler.h @@ -10,10 +10,43 @@ namespace Vulkan { class VKDevice; -class VKExecutionContext; class VKFence; class VKResourceManager; +class VKFenceView { +public: + VKFenceView() = default; + VKFenceView(VKFence* const& fence) : fence{fence} {} + + VKFence* operator->() const noexcept { + return fence; + } + + operator VKFence&() const noexcept { + return *fence; + } + +private: + VKFence* const& fence; +}; + +class VKCommandBufferView { +public: + VKCommandBufferView() = default; + VKCommandBufferView(const vk::CommandBuffer& cmdbuf) : cmdbuf{cmdbuf} {} + + const vk::CommandBuffer* operator->() const noexcept { + return &cmdbuf; + } + + operator vk::CommandBuffer() const noexcept { + return cmdbuf; + } + +private: + const vk::CommandBuffer& cmdbuf; +}; + /// The scheduler abstracts command buffer and fence management with an interface that's able to do /// OpenGL-like operations on Vulkan command buffers. class VKScheduler { @@ -21,16 +54,21 @@ public: explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager); ~VKScheduler(); - /// Gets the current execution context. - [[nodiscard]] VKExecutionContext GetExecutionContext() const; + /// Gets a reference to the current fence. + VKFenceView GetFence() const { + return current_fence; + } + + /// Gets a reference to the current command buffer. + VKCommandBufferView GetCommandBuffer() const { + return current_cmdbuf; + } - /// Sends the current execution context to the GPU. It invalidates the current execution context - /// and returns a new one. - VKExecutionContext Flush(vk::Semaphore semaphore = nullptr); + /// Sends the current execution context to the GPU. + void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr); - /// Sends the current execution context to the GPU and waits for it to complete. It invalidates - /// the current execution context and returns a new one. - VKExecutionContext Finish(vk::Semaphore semaphore = nullptr); + /// Sends the current execution context to the GPU and waits for it to complete. + void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr); private: void SubmitExecution(vk::Semaphore semaphore); @@ -44,26 +82,4 @@ private: VKFence* next_fence = nullptr; }; -class VKExecutionContext { - friend class VKScheduler; - -public: - VKExecutionContext() = default; - - VKFence& GetFence() const { - return *fence; - } - - vk::CommandBuffer GetCommandBuffer() const { - return cmdbuf; - } - -private: - explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf) - : fence{fence}, cmdbuf{cmdbuf} {} - - VKFence* fence{}; - vk::CommandBuffer cmdbuf; -}; - } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 97ce214b18..a35b45c9c8 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -205,10 +205,6 @@ public: } private: - using OperationDecompilerFn = Id (SPIRVDecompiler::*)(Operation); - using OperationDecompilersArray = - std::array<OperationDecompilerFn, static_cast<std::size_t>(OperationCode::Amount)>; - static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount); void AllocateBindings() { @@ -430,20 +426,17 @@ private: instance_index = DeclareBuiltIn(spv::BuiltIn::InstanceIndex, spv::StorageClass::Input, t_in_uint, "instance_index"); - bool is_point_size_declared = false; bool is_clip_distances_declared = false; for (const auto index : ir.GetOutputAttributes()) { - if (index == Attribute::Index::PointSize) { - is_point_size_declared = true; - } else if (index == Attribute::Index::ClipDistances0123 || - index == Attribute::Index::ClipDistances4567) { + if (index == Attribute::Index::ClipDistances0123 || + index == Attribute::Index::ClipDistances4567) { is_clip_distances_declared = true; } } std::vector<Id> members; members.push_back(t_float4); - if (is_point_size_declared) { + if (ir.UsesPointSize()) { members.push_back(t_float); } if (is_clip_distances_declared) { @@ -466,7 +459,7 @@ private: position_index = MemberDecorateBuiltIn(spv::BuiltIn::Position, "position", true); point_size_index = - MemberDecorateBuiltIn(spv::BuiltIn::PointSize, "point_size", is_point_size_declared); + MemberDecorateBuiltIn(spv::BuiltIn::PointSize, "point_size", ir.UsesPointSize()); clip_distances_index = MemberDecorateBuiltIn(spv::BuiltIn::ClipDistance, "clip_distances", is_clip_distances_declared); @@ -712,7 +705,8 @@ private: case Attribute::Index::Position: return AccessElement(t_out_float, per_vertex, position_index, abuf->GetElement()); - case Attribute::Index::PointSize: + case Attribute::Index::LayerViewportPointSize: + UNIMPLEMENTED_IF(abuf->GetElement() != 3); return AccessElement(t_out_float, per_vertex, point_size_index); case Attribute::Index::ClipDistances0123: return AccessElement(t_out_float, per_vertex, clip_distances_index, @@ -741,6 +735,16 @@ private: return {}; } + Id FCastHalf0(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + + Id FCastHalf1(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + Id HNegate(Operation operation) { UNIMPLEMENTED(); return {}; @@ -751,6 +755,11 @@ private: return {}; } + Id HCastFloat(Operation operation) { + UNIMPLEMENTED(); + return {}; + } + Id HUnpack(Operation operation) { UNIMPLEMENTED(); return {}; @@ -806,12 +815,7 @@ private: return {}; } - Id LogicalAll2(Operation operation) { - UNIMPLEMENTED(); - return {}; - } - - Id LogicalAny2(Operation operation) { + Id LogicalAnd2(Operation operation) { UNIMPLEMENTED(); return {}; } @@ -949,6 +953,14 @@ private: return {}; } + Id BranchIndirect(Operation operation) { + const Id op_a = VisitOperand<Type::Uint>(operation, 0); + + Emit(OpStore(jmp_to, op_a)); + BranchingOp([&]() { Emit(OpBranch(continue_label)); }); + return {}; + } + Id PushFlowStack(Operation operation) { const auto target = std::get_if<ImmediateNode>(&*operation[0]); ASSERT(target); @@ -1060,6 +1072,26 @@ private: return {}; } + Id BallotThread(Operation) { + UNIMPLEMENTED(); + return {}; + } + + Id VoteAll(Operation) { + UNIMPLEMENTED(); + return {}; + } + + Id VoteAny(Operation) { + UNIMPLEMENTED(); + return {}; + } + + Id VoteEqual(Operation) { + UNIMPLEMENTED(); + return {}; + } + Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, const std::string& name) { const Id id = OpVariable(type, storage); @@ -1200,7 +1232,7 @@ private: return {}; } - static constexpr OperationDecompilersArray operation_decompilers = { + static constexpr std::array operation_decompilers = { &SPIRVDecompiler::Assign, &SPIRVDecompiler::Ternary<&Module::OpSelect, Type::Float, Type::Bool, Type::Float, @@ -1213,6 +1245,8 @@ private: &SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>, &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>, &SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>, + &SPIRVDecompiler::FCastHalf0, + &SPIRVDecompiler::FCastHalf1, &SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>, &SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>, @@ -1273,6 +1307,7 @@ private: &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::HalfFloat>, &SPIRVDecompiler::HNegate, &SPIRVDecompiler::HClamp, + &SPIRVDecompiler::HCastFloat, &SPIRVDecompiler::HUnpack, &SPIRVDecompiler::HMergeF32, &SPIRVDecompiler::HMergeH0, @@ -1285,8 +1320,7 @@ private: &SPIRVDecompiler::Binary<&Module::OpLogicalNotEqual, Type::Bool>, &SPIRVDecompiler::Unary<&Module::OpLogicalNot, Type::Bool>, &SPIRVDecompiler::LogicalPick2, - &SPIRVDecompiler::LogicalAll2, - &SPIRVDecompiler::LogicalAny2, + &SPIRVDecompiler::LogicalAnd2, &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::Float>, &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::Float>, @@ -1334,6 +1368,7 @@ private: &SPIRVDecompiler::ImageStore, &SPIRVDecompiler::Branch, + &SPIRVDecompiler::BranchIndirect, &SPIRVDecompiler::PushFlowStack, &SPIRVDecompiler::PopFlowStack, &SPIRVDecompiler::Exit, @@ -1349,7 +1384,13 @@ private: &SPIRVDecompiler::WorkGroupId<0>, &SPIRVDecompiler::WorkGroupId<1>, &SPIRVDecompiler::WorkGroupId<2>, + + &SPIRVDecompiler::BallotThread, + &SPIRVDecompiler::VoteAll, + &SPIRVDecompiler::VoteAny, + &SPIRVDecompiler::VoteEqual, }; + static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); const VKDevice& device; const ShaderIR& ir; diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp index 58ffa42f28..62f1427f5a 100644 --- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp @@ -46,12 +46,12 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Reserve(u64 size) { return {mapped_pointer + offset, offset, invalidation_mark.has_value()}; } -VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) { +void VKStreamBuffer::Send(u64 size) { ASSERT_MSG(size <= mapped_size, "Reserved size is too small"); if (invalidation_mark) { // TODO(Rodrigo): Find a better way to invalidate than waiting for all watches to finish. - exctx = scheduler.Flush(); + scheduler.Flush(); std::for_each(watches.begin(), watches.begin() + *invalidation_mark, [&](auto& resource) { resource->Wait(); }); invalidation_mark = std::nullopt; @@ -62,11 +62,9 @@ VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) { ReserveWatches(WATCHES_RESERVE_CHUNK); } // Add a watch for this allocation. - watches[used_watches++]->Watch(exctx.GetFence()); + watches[used_watches++]->Watch(scheduler.GetFence()); offset += size; - - return exctx; } void VKStreamBuffer::CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage) { diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h index 69d036ccd3..842e541625 100644 --- a/src/video_core/renderer_vulkan/vk_stream_buffer.h +++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h @@ -37,7 +37,7 @@ public: std::tuple<u8*, u64, bool> Reserve(u64 size); /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy. - [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx, u64 size); + void Send(u64 size); vk::Buffer GetBuffer() const { return *buffer; diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp new file mode 100644 index 0000000000..ec3a766900 --- /dev/null +++ b/src/video_core/shader/control_flow.cpp @@ -0,0 +1,481 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <list> +#include <map> +#include <stack> +#include <unordered_map> +#include <unordered_set> +#include <vector> + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/shader/control_flow.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { +namespace { +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; + +constexpr s32 unassigned_branch = -2; + +struct Query { + u32 address{}; + std::stack<u32> ssy_stack{}; + std::stack<u32> pbk_stack{}; +}; + +struct BlockStack { + BlockStack() = default; + explicit BlockStack(const Query& q) : ssy_stack{q.ssy_stack}, pbk_stack{q.pbk_stack} {} + std::stack<u32> ssy_stack{}; + std::stack<u32> pbk_stack{}; +}; + +struct BlockBranchInfo { + Condition condition{}; + s32 address{exit_branch}; + bool kill{}; + bool is_sync{}; + bool is_brk{}; + bool ignore{}; +}; + +struct BlockInfo { + u32 start{}; + u32 end{}; + bool visited{}; + BlockBranchInfo branch{}; + + bool IsInside(const u32 address) const { + return start <= address && address <= end; + } +}; + +struct CFGRebuildState { + explicit CFGRebuildState(const ProgramCode& program_code, const std::size_t program_size, + const u32 start) + : start{start}, program_code{program_code}, program_size{program_size} {} + + u32 start{}; + std::vector<BlockInfo> block_info{}; + std::list<u32> inspect_queries{}; + std::list<Query> queries{}; + std::unordered_map<u32, u32> registered{}; + std::unordered_set<u32> labels{}; + std::map<u32, u32> ssy_labels{}; + std::map<u32, u32> pbk_labels{}; + std::unordered_map<u32, BlockStack> stacks{}; + const ProgramCode& program_code; + const std::size_t program_size; +}; + +enum class BlockCollision : u32 { None, Found, Inside }; + +std::pair<BlockCollision, u32> TryGetBlock(CFGRebuildState& state, u32 address) { + const auto& blocks = state.block_info; + for (u32 index = 0; index < blocks.size(); index++) { + if (blocks[index].start == address) { + return {BlockCollision::Found, index}; + } + if (blocks[index].IsInside(address)) { + return {BlockCollision::Inside, index}; + } + } + return {BlockCollision::None, 0xFFFFFFFF}; +} + +struct ParseInfo { + BlockBranchInfo branch_info{}; + u32 end_address{}; +}; + +BlockInfo& CreateBlockInfo(CFGRebuildState& state, u32 start, u32 end) { + auto& it = state.block_info.emplace_back(); + it.start = start; + it.end = end; + const u32 index = static_cast<u32>(state.block_info.size() - 1); + state.registered.insert({start, index}); + return it; +} + +Pred GetPredicate(u32 index, bool negated) { + return static_cast<Pred>(index + (negated ? 8 : 0)); +} + +/** + * Returns whether the instruction at the specified offset is a 'sched' instruction. + * Sched instructions always appear before a sequence of 3 instructions. + */ +constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) { + constexpr u32 SchedPeriod = 4; + u32 absolute_offset = offset - main_offset; + + return (absolute_offset % SchedPeriod) == 0; +} + +enum class ParseResult : u32 { + ControlCaught, + BlockEnd, + AbnormalFlow, +}; + +std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) { + u32 offset = static_cast<u32>(address); + const u32 end_address = static_cast<u32>(state.program_size / sizeof(Instruction)); + ParseInfo parse_info{}; + + const auto insert_label = [](CFGRebuildState& state, u32 address) { + const auto pair = state.labels.emplace(address); + if (pair.second) { + state.inspect_queries.push_back(address); + } + }; + + while (true) { + if (offset >= end_address) { + // ASSERT_OR_EXECUTE can't be used, as it ignores the break + ASSERT_MSG(false, "Shader passed the current limit!"); + parse_info.branch_info.address = exit_branch; + parse_info.branch_info.ignore = false; + break; + } + if (state.registered.count(offset) != 0) { + parse_info.branch_info.address = offset; + parse_info.branch_info.ignore = true; + break; + } + if (IsSchedInstruction(offset, state.start)) { + offset++; + continue; + } + const Instruction instr = {state.program_code[offset]}; + const auto opcode = OpCode::Decode(instr); + if (!opcode || opcode->get().GetType() != OpCode::Type::Flow) { + offset++; + continue; + } + + switch (opcode->get().GetId()) { + case OpCode::Id::EXIT: { + const auto pred_index = static_cast<u32>(instr.pred.pred_index); + parse_info.branch_info.condition.predicate = + GetPredicate(pred_index, instr.negate_pred != 0); + if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + offset++; + continue; + } + const ConditionCode cc = instr.flow_condition_code; + parse_info.branch_info.condition.cc = cc; + if (cc == ConditionCode::F) { + offset++; + continue; + } + parse_info.branch_info.address = exit_branch; + parse_info.branch_info.kill = false; + parse_info.branch_info.is_sync = false; + parse_info.branch_info.is_brk = false; + parse_info.branch_info.ignore = false; + parse_info.end_address = offset; + + return {ParseResult::ControlCaught, parse_info}; + } + case OpCode::Id::BRA: { + if (instr.bra.constant_buffer != 0) { + return {ParseResult::AbnormalFlow, parse_info}; + } + const auto pred_index = static_cast<u32>(instr.pred.pred_index); + parse_info.branch_info.condition.predicate = + GetPredicate(pred_index, instr.negate_pred != 0); + if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + offset++; + continue; + } + const ConditionCode cc = instr.flow_condition_code; + parse_info.branch_info.condition.cc = cc; + if (cc == ConditionCode::F) { + offset++; + continue; + } + const u32 branch_offset = offset + instr.bra.GetBranchTarget(); + if (branch_offset == 0) { + parse_info.branch_info.address = exit_branch; + } else { + parse_info.branch_info.address = branch_offset; + } + insert_label(state, branch_offset); + parse_info.branch_info.kill = false; + parse_info.branch_info.is_sync = false; + parse_info.branch_info.is_brk = false; + parse_info.branch_info.ignore = false; + parse_info.end_address = offset; + + return {ParseResult::ControlCaught, parse_info}; + } + case OpCode::Id::SYNC: { + const auto pred_index = static_cast<u32>(instr.pred.pred_index); + parse_info.branch_info.condition.predicate = + GetPredicate(pred_index, instr.negate_pred != 0); + if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + offset++; + continue; + } + const ConditionCode cc = instr.flow_condition_code; + parse_info.branch_info.condition.cc = cc; + if (cc == ConditionCode::F) { + offset++; + continue; + } + parse_info.branch_info.address = unassigned_branch; + parse_info.branch_info.kill = false; + parse_info.branch_info.is_sync = true; + parse_info.branch_info.is_brk = false; + parse_info.branch_info.ignore = false; + parse_info.end_address = offset; + + return {ParseResult::ControlCaught, parse_info}; + } + case OpCode::Id::BRK: { + const auto pred_index = static_cast<u32>(instr.pred.pred_index); + parse_info.branch_info.condition.predicate = + GetPredicate(pred_index, instr.negate_pred != 0); + if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + offset++; + continue; + } + const ConditionCode cc = instr.flow_condition_code; + parse_info.branch_info.condition.cc = cc; + if (cc == ConditionCode::F) { + offset++; + continue; + } + parse_info.branch_info.address = unassigned_branch; + parse_info.branch_info.kill = false; + parse_info.branch_info.is_sync = false; + parse_info.branch_info.is_brk = true; + parse_info.branch_info.ignore = false; + parse_info.end_address = offset; + + return {ParseResult::ControlCaught, parse_info}; + } + case OpCode::Id::KIL: { + const auto pred_index = static_cast<u32>(instr.pred.pred_index); + parse_info.branch_info.condition.predicate = + GetPredicate(pred_index, instr.negate_pred != 0); + if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + offset++; + continue; + } + const ConditionCode cc = instr.flow_condition_code; + parse_info.branch_info.condition.cc = cc; + if (cc == ConditionCode::F) { + offset++; + continue; + } + parse_info.branch_info.address = exit_branch; + parse_info.branch_info.kill = true; + parse_info.branch_info.is_sync = false; + parse_info.branch_info.is_brk = false; + parse_info.branch_info.ignore = false; + parse_info.end_address = offset; + + return {ParseResult::ControlCaught, parse_info}; + } + case OpCode::Id::SSY: { + const u32 target = offset + instr.bra.GetBranchTarget(); + insert_label(state, target); + state.ssy_labels.emplace(offset, target); + break; + } + case OpCode::Id::PBK: { + const u32 target = offset + instr.bra.GetBranchTarget(); + insert_label(state, target); + state.pbk_labels.emplace(offset, target); + break; + } + case OpCode::Id::BRX: { + return {ParseResult::AbnormalFlow, parse_info}; + } + default: + break; + } + + offset++; + } + parse_info.branch_info.kill = false; + parse_info.branch_info.is_sync = false; + parse_info.branch_info.is_brk = false; + parse_info.end_address = offset - 1; + return {ParseResult::BlockEnd, parse_info}; +} + +bool TryInspectAddress(CFGRebuildState& state) { + if (state.inspect_queries.empty()) { + return false; + } + + const u32 address = state.inspect_queries.front(); + state.inspect_queries.pop_front(); + const auto [result, block_index] = TryGetBlock(state, address); + switch (result) { + case BlockCollision::Found: { + return true; + } + case BlockCollision::Inside: { + // This case is the tricky one: + // We need to Split the block in 2 sepparate blocks + const u32 end = state.block_info[block_index].end; + BlockInfo& new_block = CreateBlockInfo(state, address, end); + BlockInfo& current_block = state.block_info[block_index]; + current_block.end = address - 1; + new_block.branch = current_block.branch; + BlockBranchInfo forward_branch{}; + forward_branch.address = address; + forward_branch.ignore = true; + current_block.branch = forward_branch; + return true; + } + default: + break; + } + const auto [parse_result, parse_info] = ParseCode(state, address); + if (parse_result == ParseResult::AbnormalFlow) { + // if it's AbnormalFlow, we end it as false, ending the CFG reconstruction + return false; + } + + BlockInfo& block_info = CreateBlockInfo(state, address, parse_info.end_address); + block_info.branch = parse_info.branch_info; + if (parse_info.branch_info.condition.IsUnconditional()) { + return true; + } + + const u32 fallthrough_address = parse_info.end_address + 1; + state.inspect_queries.push_front(fallthrough_address); + return true; +} + +bool TryQuery(CFGRebuildState& state) { + const auto gather_labels = [](std::stack<u32>& cc, std::map<u32, u32>& labels, + BlockInfo& block) { + auto gather_start = labels.lower_bound(block.start); + const auto gather_end = labels.upper_bound(block.end); + while (gather_start != gather_end) { + cc.push(gather_start->second); + ++gather_start; + } + }; + if (state.queries.empty()) { + return false; + } + + Query& q = state.queries.front(); + const u32 block_index = state.registered[q.address]; + BlockInfo& block = state.block_info[block_index]; + // If the block is visited, check if the stacks match, else gather the ssy/pbk + // labels into the current stack and look if the branch at the end of the block + // consumes a label. Schedule new queries accordingly + if (block.visited) { + BlockStack& stack = state.stacks[q.address]; + const bool all_okay = (stack.ssy_stack.empty() || q.ssy_stack == stack.ssy_stack) && + (stack.pbk_stack.empty() || q.pbk_stack == stack.pbk_stack); + state.queries.pop_front(); + return all_okay; + } + block.visited = true; + state.stacks.insert_or_assign(q.address, BlockStack{q}); + + Query q2(q); + state.queries.pop_front(); + gather_labels(q2.ssy_stack, state.ssy_labels, block); + gather_labels(q2.pbk_stack, state.pbk_labels, block); + if (!block.branch.condition.IsUnconditional()) { + q2.address = block.end + 1; + state.queries.push_back(q2); + } + + Query conditional_query{q2}; + if (block.branch.is_sync) { + if (block.branch.address == unassigned_branch) { + block.branch.address = conditional_query.ssy_stack.top(); + } + conditional_query.ssy_stack.pop(); + } + if (block.branch.is_brk) { + if (block.branch.address == unassigned_branch) { + block.branch.address = conditional_query.pbk_stack.top(); + } + conditional_query.pbk_stack.pop(); + } + conditional_query.address = block.branch.address; + state.queries.push_back(std::move(conditional_query)); + return true; +} +} // Anonymous namespace + +std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, + std::size_t program_size, u32 start_address) { + CFGRebuildState state{program_code, program_size, start_address}; + + // Inspect Code and generate blocks + state.labels.clear(); + state.labels.emplace(start_address); + state.inspect_queries.push_back(state.start); + while (!state.inspect_queries.empty()) { + if (!TryInspectAddress(state)) { + return {}; + } + } + + // Decompile Stacks + state.queries.push_back(Query{state.start, {}, {}}); + bool decompiled = true; + while (!state.queries.empty()) { + if (!TryQuery(state)) { + decompiled = false; + break; + } + } + + // Sort and organize results + std::sort(state.block_info.begin(), state.block_info.end(), + [](const BlockInfo& a, const BlockInfo& b) { return a.start < b.start; }); + ShaderCharacteristics result_out{}; + result_out.decompilable = decompiled; + result_out.start = start_address; + result_out.end = start_address; + for (const auto& block : state.block_info) { + ShaderBlock new_block{}; + new_block.start = block.start; + new_block.end = block.end; + new_block.ignore_branch = block.branch.ignore; + if (!new_block.ignore_branch) { + new_block.branch.cond = block.branch.condition; + new_block.branch.kills = block.branch.kill; + new_block.branch.address = block.branch.address; + } + result_out.end = std::max(result_out.end, block.end); + result_out.blocks.push_back(new_block); + } + if (result_out.decompilable) { + result_out.labels = std::move(state.labels); + return {std::move(result_out)}; + } + + // If it's not decompilable, merge the unlabelled blocks together + auto back = result_out.blocks.begin(); + auto next = std::next(back); + while (next != result_out.blocks.end()) { + if (state.labels.count(next->start) == 0 && next->start == back->end + 1) { + back->end = next->end; + next = result_out.blocks.erase(next); + continue; + } + back = next; + ++next; + } + return {std::move(result_out)}; +} +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/control_flow.h b/src/video_core/shader/control_flow.h new file mode 100644 index 0000000000..b0a5e4f8c9 --- /dev/null +++ b/src/video_core/shader/control_flow.h @@ -0,0 +1,79 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <list> +#include <optional> +#include <unordered_set> + +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::ConditionCode; +using Tegra::Shader::Pred; + +constexpr s32 exit_branch = -1; + +struct Condition { + Pred predicate{Pred::UnusedIndex}; + ConditionCode cc{ConditionCode::T}; + + bool IsUnconditional() const { + return predicate == Pred::UnusedIndex && cc == ConditionCode::T; + } + + bool operator==(const Condition& other) const { + return std::tie(predicate, cc) == std::tie(other.predicate, other.cc); + } + + bool operator!=(const Condition& other) const { + return !operator==(other); + } +}; + +struct ShaderBlock { + struct Branch { + Condition cond{}; + bool kills{}; + s32 address{}; + + bool operator==(const Branch& b) const { + return std::tie(cond, kills, address) == std::tie(b.cond, b.kills, b.address); + } + + bool operator!=(const Branch& b) const { + return !operator==(b); + } + }; + + u32 start{}; + u32 end{}; + bool ignore_branch{}; + Branch branch{}; + + bool operator==(const ShaderBlock& sb) const { + return std::tie(start, end, ignore_branch, branch) == + std::tie(sb.start, sb.end, sb.ignore_branch, sb.branch); + } + + bool operator!=(const ShaderBlock& sb) const { + return !operator==(sb); + } +}; + +struct ShaderCharacteristics { + std::list<ShaderBlock> blocks{}; + bool decompilable{}; + u32 start{}; + u32 end{}; + std::unordered_set<u32> labels{}; +}; + +std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, + std::size_t program_size, u32 start_address); + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp index 2c9ff28f2b..47a9fd9615 100644 --- a/src/video_core/shader/decode.cpp +++ b/src/video_core/shader/decode.cpp @@ -11,6 +11,7 @@ #include "common/common_types.h" #include "video_core/engines/shader_bytecode.h" #include "video_core/engines/shader_header.h" +#include "video_core/shader/control_flow.h" #include "video_core/shader/node_helper.h" #include "video_core/shader/shader_ir.h" @@ -21,20 +22,6 @@ using Tegra::Shader::OpCode; namespace { -/// Merges exit method of two parallel branches. -constexpr ExitMethod ParallelExit(ExitMethod a, ExitMethod b) { - if (a == ExitMethod::Undetermined) { - return b; - } - if (b == ExitMethod::Undetermined) { - return a; - } - if (a == b) { - return a; - } - return ExitMethod::Conditional; -} - /** * Returns whether the instruction at the specified offset is a 'sched' instruction. * Sched instructions always appear before a sequence of 3 instructions. @@ -51,85 +38,104 @@ constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) { void ShaderIR::Decode() { std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header)); - std::set<u32> labels; - const ExitMethod exit_method = Scan(main_offset, MAX_PROGRAM_LENGTH, labels); - if (exit_method != ExitMethod::AlwaysEnd) { - UNREACHABLE_MSG("Program does not always end"); - } - - if (labels.empty()) { - basic_blocks.insert({main_offset, DecodeRange(main_offset, MAX_PROGRAM_LENGTH)}); + disable_flow_stack = false; + const auto info = ScanFlow(program_code, program_size, main_offset); + if (info) { + const auto& shader_info = *info; + coverage_begin = shader_info.start; + coverage_end = shader_info.end; + if (shader_info.decompilable) { + disable_flow_stack = true; + const auto insert_block = [this](NodeBlock& nodes, u32 label) { + if (label == static_cast<u32>(exit_branch)) { + return; + } + basic_blocks.insert({label, nodes}); + }; + const auto& blocks = shader_info.blocks; + NodeBlock current_block; + u32 current_label = static_cast<u32>(exit_branch); + for (auto& block : blocks) { + if (shader_info.labels.count(block.start) != 0) { + insert_block(current_block, current_label); + current_block.clear(); + current_label = block.start; + } + if (!block.ignore_branch) { + DecodeRangeInner(current_block, block.start, block.end); + InsertControlFlow(current_block, block); + } else { + DecodeRangeInner(current_block, block.start, block.end + 1); + } + } + insert_block(current_block, current_label); + return; + } + LOG_WARNING(HW_GPU, "Flow Stack Removing Failed! Falling back to old method"); + // we can't decompile it, fallback to standard method + for (const auto& block : shader_info.blocks) { + basic_blocks.insert({block.start, DecodeRange(block.start, block.end + 1)}); + } return; } + LOG_WARNING(HW_GPU, "Flow Analysis Failed! Falling back to brute force compiling"); + + // Now we need to deal with an undecompilable shader. We need to brute force + // a shader that captures every position. + coverage_begin = main_offset; + const u32 shader_end = static_cast<u32>(program_size / sizeof(u64)); + coverage_end = shader_end; + for (u32 label = main_offset; label < shader_end; label++) { + basic_blocks.insert({label, DecodeRange(label, label + 1)}); + } +} - labels.insert(main_offset); - - for (const u32 label : labels) { - const auto next_it = labels.lower_bound(label + 1); - const u32 next_label = next_it == labels.end() ? MAX_PROGRAM_LENGTH : *next_it; +NodeBlock ShaderIR::DecodeRange(u32 begin, u32 end) { + NodeBlock basic_block; + DecodeRangeInner(basic_block, begin, end); + return basic_block; +} - basic_blocks.insert({label, DecodeRange(label, next_label)}); +void ShaderIR::DecodeRangeInner(NodeBlock& bb, u32 begin, u32 end) { + for (u32 pc = begin; pc < (begin > end ? MAX_PROGRAM_LENGTH : end);) { + pc = DecodeInstr(bb, pc); } } -ExitMethod ShaderIR::Scan(u32 begin, u32 end, std::set<u32>& labels) { - const auto [iter, inserted] = - exit_method_map.emplace(std::make_pair(begin, end), ExitMethod::Undetermined); - ExitMethod& exit_method = iter->second; - if (!inserted) - return exit_method; - - for (u32 offset = begin; offset != end && offset != MAX_PROGRAM_LENGTH; ++offset) { - coverage_begin = std::min(coverage_begin, offset); - coverage_end = std::max(coverage_end, offset + 1); - - const Instruction instr = {program_code[offset]}; - const auto opcode = OpCode::Decode(instr); - if (!opcode) - continue; - switch (opcode->get().GetId()) { - case OpCode::Id::EXIT: { - // The EXIT instruction can be predicated, which means that the shader can conditionally - // end on this instruction. We have to consider the case where the condition is not met - // and check the exit method of that other basic block. - using Tegra::Shader::Pred; - if (instr.pred.pred_index == static_cast<u64>(Pred::UnusedIndex)) { - return exit_method = ExitMethod::AlwaysEnd; - } else { - const ExitMethod not_met = Scan(offset + 1, end, labels); - return exit_method = ParallelExit(ExitMethod::AlwaysEnd, not_met); - } +void ShaderIR::InsertControlFlow(NodeBlock& bb, const ShaderBlock& block) { + const auto apply_conditions = [&](const Condition& cond, Node n) -> Node { + Node result = n; + if (cond.cc != ConditionCode::T) { + result = Conditional(GetConditionCode(cond.cc), {result}); } - case OpCode::Id::BRA: { - const u32 target = offset + instr.bra.GetBranchTarget(); - labels.insert(target); - const ExitMethod no_jmp = Scan(offset + 1, end, labels); - const ExitMethod jmp = Scan(target, end, labels); - return exit_method = ParallelExit(no_jmp, jmp); - } - case OpCode::Id::SSY: - case OpCode::Id::PBK: { - // The SSY and PBK use a similar encoding as the BRA instruction. - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "Constant buffer branching is not supported"); - const u32 target = offset + instr.bra.GetBranchTarget(); - labels.insert(target); - // Continue scanning for an exit method. - break; + if (cond.predicate != Pred::UnusedIndex) { + u32 pred = static_cast<u32>(cond.predicate); + const bool is_neg = pred > 7; + if (is_neg) { + pred -= 8; + } + result = Conditional(GetPredicate(pred, is_neg), {result}); } - default: - break; + return result; + }; + if (block.branch.address < 0) { + if (block.branch.kills) { + Node n = Operation(OperationCode::Discard); + n = apply_conditions(block.branch.cond, n); + bb.push_back(n); + global_code.push_back(n); + return; } + Node n = Operation(OperationCode::Exit); + n = apply_conditions(block.branch.cond, n); + bb.push_back(n); + global_code.push_back(n); + return; } - return exit_method = ExitMethod::AlwaysReturn; -} - -NodeBlock ShaderIR::DecodeRange(u32 begin, u32 end) { - NodeBlock basic_block; - for (u32 pc = begin; pc < (begin > end ? MAX_PROGRAM_LENGTH : end);) { - pc = DecodeInstr(basic_block, pc); - } - return basic_block; + Node n = Operation(OperationCode::Branch, Immediate(block.branch.address)); + n = apply_conditions(block.branch.cond, n); + bb.push_back(n); + global_code.push_back(n); } u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) { @@ -140,15 +146,18 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); + const u32 nv_address = ConvertAddressToNvidiaSpace(pc); // Decoding failure if (!opcode) { UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value); + bb.push_back(Comment(fmt::format("{:05x} Unimplemented Shader instruction (0x{:016x})", + nv_address, instr.value))); return pc + 1; } - bb.push_back( - Comment(fmt::format("{}: {} (0x{:016x})", pc, opcode->get().GetName(), instr.value))); + bb.push_back(Comment( + fmt::format("{:05x} {} (0x{:016x})", nv_address, opcode->get().GetName(), instr.value))); using Tegra::Shader::Pred; UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute, @@ -167,6 +176,7 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) { {OpCode::Type::Ffma, &ShaderIR::DecodeFfma}, {OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2}, {OpCode::Type::Conversion, &ShaderIR::DecodeConversion}, + {OpCode::Type::Warp, &ShaderIR::DecodeWarp}, {OpCode::Type::Memory, &ShaderIR::DecodeMemory}, {OpCode::Type::Texture, &ShaderIR::DecodeTexture}, {OpCode::Type::Image, &ShaderIR::DecodeImage}, diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp index 87d8fecaa3..1473c282a0 100644 --- a/src/video_core/shader/decode/arithmetic.cpp +++ b/src/video_core/shader/decode/arithmetic.cpp @@ -42,11 +42,14 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) { case OpCode::Id::FMUL_R: case OpCode::Id::FMUL_IMM: { // FMUL does not have 'abs' bits and only the second operand has a 'neg' bit. - UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, "FMUL tab5cb8_2({}) is not implemented", - instr.fmul.tab5cb8_2.Value()); - UNIMPLEMENTED_IF_MSG( - instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented", - instr.fmul.tab5c68_0.Value()); // SMO typical sends 1 here which seems to be the default + if (instr.fmul.tab5cb8_2 != 0) { + LOG_WARNING(HW_GPU, "FMUL tab5cb8_2({}) is not implemented", + instr.fmul.tab5cb8_2.Value()); + } + if (instr.fmul.tab5c68_0 != 1) { + LOG_WARNING(HW_GPU, "FMUL tab5cb8_0({}) is not implemented", + instr.fmul.tab5c68_0.Value()); + } op_b = GetOperandAbsNegFloat(op_b, false, instr.fmul.negate_b); diff --git a/src/video_core/shader/decode/arithmetic_half_immediate.cpp b/src/video_core/shader/decode/arithmetic_half_immediate.cpp index 7bcf38f233..6466fc0117 100644 --- a/src/video_core/shader/decode/arithmetic_half_immediate.cpp +++ b/src/video_core/shader/decode/arithmetic_half_immediate.cpp @@ -23,7 +23,9 @@ u32 ShaderIR::DecodeArithmeticHalfImmediate(NodeBlock& bb, u32 pc) { LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName()); } } else { - UNIMPLEMENTED_IF(instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None); + if (instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None) { + LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName()); + } } Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half_imm.type_a); diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp index 4221f0c58a..32facd6bae 100644 --- a/src/video_core/shader/decode/conversion.cpp +++ b/src/video_core/shader/decode/conversion.cpp @@ -14,6 +14,12 @@ using Tegra::Shader::Instruction; using Tegra::Shader::OpCode; using Tegra::Shader::Register; +namespace { +constexpr OperationCode GetFloatSelector(u64 selector) { + return selector == 0 ? OperationCode::FCastHalf0 : OperationCode::FCastHalf1; +} +} // Anonymous namespace + u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); @@ -22,7 +28,7 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { case OpCode::Id::I2I_R: case OpCode::Id::I2I_C: case OpCode::Id::I2I_IMM: { - UNIMPLEMENTED_IF(instr.conversion.selector); + UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0); UNIMPLEMENTED_IF(instr.conversion.dst_size != Register::Size::Word); UNIMPLEMENTED_IF(instr.alu.saturate_d); @@ -57,8 +63,8 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { case OpCode::Id::I2F_R: case OpCode::Id::I2F_C: case OpCode::Id::I2F_IMM: { - UNIMPLEMENTED_IF(instr.conversion.dst_size != Register::Size::Word); - UNIMPLEMENTED_IF(instr.conversion.selector); + UNIMPLEMENTED_IF(instr.conversion.int_src.selector != 0); + UNIMPLEMENTED_IF(instr.conversion.dst_size == Register::Size::Long); UNIMPLEMENTED_IF_MSG(instr.generates_cc, "Condition codes generation in I2F is not implemented"); @@ -82,14 +88,19 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { value = GetOperandAbsNegFloat(value, false, instr.conversion.negate_a); SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + + if (instr.conversion.dst_size == Register::Size::Short) { + value = Operation(OperationCode::HCastFloat, PRECISE, value); + } + SetRegister(bb, instr.gpr0, value); break; } case OpCode::Id::F2F_R: case OpCode::Id::F2F_C: case OpCode::Id::F2F_IMM: { - UNIMPLEMENTED_IF(instr.conversion.f2f.dst_size != Register::Size::Word); - UNIMPLEMENTED_IF(instr.conversion.f2f.src_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.dst_size == Register::Size::Long); + UNIMPLEMENTED_IF(instr.conversion.src_size == Register::Size::Long); UNIMPLEMENTED_IF_MSG(instr.generates_cc, "Condition codes generation in F2F is not implemented"); @@ -107,6 +118,13 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { } }(); + if (instr.conversion.src_size == Register::Size::Short) { + value = Operation(GetFloatSelector(instr.conversion.float_src.selector), NO_PRECISE, + std::move(value)); + } else { + ASSERT(instr.conversion.float_src.selector == 0); + } + value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a); value = [&]() { @@ -124,19 +142,24 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { default: UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}", static_cast<u32>(instr.conversion.f2f.rounding.Value())); - return Immediate(0); + return value; } }(); value = GetSaturatedFloat(value, instr.alu.saturate_d); SetInternalFlagsFromFloat(bb, value, instr.generates_cc); + + if (instr.conversion.dst_size == Register::Size::Short) { + value = Operation(OperationCode::HCastFloat, PRECISE, value); + } + SetRegister(bb, instr.gpr0, value); break; } case OpCode::Id::F2I_R: case OpCode::Id::F2I_C: case OpCode::Id::F2I_IMM: { - UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word); + UNIMPLEMENTED_IF(instr.conversion.src_size == Register::Size::Long); UNIMPLEMENTED_IF_MSG(instr.generates_cc, "Condition codes generation in F2I is not implemented"); Node value = [&]() { @@ -153,6 +176,13 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) { } }(); + if (instr.conversion.src_size == Register::Size::Short) { + value = Operation(GetFloatSelector(instr.conversion.float_src.selector), NO_PRECISE, + std::move(value)); + } else { + ASSERT(instr.conversion.float_src.selector == 0); + } + value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a); value = [&]() { diff --git a/src/video_core/shader/decode/ffma.cpp b/src/video_core/shader/decode/ffma.cpp index 29be25ca32..ca2f39e8db 100644 --- a/src/video_core/shader/decode/ffma.cpp +++ b/src/video_core/shader/decode/ffma.cpp @@ -18,10 +18,12 @@ u32 ShaderIR::DecodeFfma(NodeBlock& bb, u32 pc) { const auto opcode = OpCode::Decode(instr); UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented"); - UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented", - instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO - UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented", - instr.ffma.tab5980_1.Value()); + if (instr.ffma.tab5980_0 != 1) { + LOG_WARNING(HW_GPU, "FFMA tab5980_0({}) not implemented", instr.ffma.tab5980_0.Value()); + } + if (instr.ffma.tab5980_1 != 0) { + LOG_WARNING(HW_GPU, "FFMA tab5980_1({}) not implemented", instr.ffma.tab5980_1.Value()); + } const Node op_a = GetRegister(instr.gpr8); diff --git a/src/video_core/shader/decode/float_set.cpp b/src/video_core/shader/decode/float_set.cpp index f5013e44a7..5614e8a0da 100644 --- a/src/video_core/shader/decode/float_set.cpp +++ b/src/video_core/shader/decode/float_set.cpp @@ -15,7 +15,6 @@ using Tegra::Shader::OpCode; u32 ShaderIR::DecodeFloatSet(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; - const auto opcode = OpCode::Decode(instr); const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fset.abs_a != 0, instr.fset.neg_a != 0); diff --git a/src/video_core/shader/decode/float_set_predicate.cpp b/src/video_core/shader/decode/float_set_predicate.cpp index 2323052b0c..200c2c983f 100644 --- a/src/video_core/shader/decode/float_set_predicate.cpp +++ b/src/video_core/shader/decode/float_set_predicate.cpp @@ -16,10 +16,9 @@ using Tegra::Shader::Pred; u32 ShaderIR::DecodeFloatSetPredicate(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; - const auto opcode = OpCode::Decode(instr); - const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fsetp.abs_a != 0, - instr.fsetp.neg_a != 0); + Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fsetp.abs_a != 0, + instr.fsetp.neg_a != 0); Node op_b = [&]() { if (instr.is_b_imm) { return GetImmediate19(instr); @@ -29,12 +28,13 @@ u32 ShaderIR::DecodeFloatSetPredicate(NodeBlock& bb, u32 pc) { return GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()); } }(); - op_b = GetOperandAbsNegFloat(op_b, instr.fsetp.abs_b, false); + op_b = GetOperandAbsNegFloat(std::move(op_b), instr.fsetp.abs_b, instr.fsetp.neg_b); // We can't use the constant predicate as destination. ASSERT(instr.fsetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); - const Node predicate = GetPredicateComparisonFloat(instr.fsetp.cond, op_a, op_b); + const Node predicate = + GetPredicateComparisonFloat(instr.fsetp.cond, std::move(op_a), std::move(op_b)); const Node second_pred = GetPredicate(instr.fsetp.pred39, instr.fsetp.neg_pred != 0); const OperationCode combiner = GetPredicateCombiner(instr.fsetp.op); diff --git a/src/video_core/shader/decode/half_set_predicate.cpp b/src/video_core/shader/decode/half_set_predicate.cpp index d59d15bd8a..afea33e5fd 100644 --- a/src/video_core/shader/decode/half_set_predicate.cpp +++ b/src/video_core/shader/decode/half_set_predicate.cpp @@ -18,43 +18,56 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; const auto opcode = OpCode::Decode(instr); - UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0); + DEBUG_ASSERT(instr.hsetp2.ftz == 0); Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hsetp2.type_a); op_a = GetOperandAbsNegHalf(op_a, instr.hsetp2.abs_a, instr.hsetp2.negate_a); - Node op_b = [&]() { - switch (opcode->get().GetId()) { - case OpCode::Id::HSETP2_R: - return GetOperandAbsNegHalf(GetRegister(instr.gpr20), instr.hsetp2.abs_a, - instr.hsetp2.negate_b); - default: - UNREACHABLE(); - return Immediate(0); - } - }(); - op_b = UnpackHalfFloat(op_b, instr.hsetp2.type_b); - - // We can't use the constant predicate as destination. - ASSERT(instr.hsetp2.pred3 != static_cast<u64>(Pred::UnusedIndex)); - - const Node second_pred = GetPredicate(instr.hsetp2.pred39, instr.hsetp2.neg_pred != 0); + Tegra::Shader::PredCondition cond{}; + bool h_and{}; + Node op_b{}; + switch (opcode->get().GetId()) { + case OpCode::Id::HSETP2_C: + cond = instr.hsetp2.cbuf_and_imm.cond; + h_and = instr.hsetp2.cbuf_and_imm.h_and; + op_b = GetOperandAbsNegHalf(GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()), + instr.hsetp2.cbuf.abs_b, instr.hsetp2.cbuf.negate_b); + break; + case OpCode::Id::HSETP2_IMM: + cond = instr.hsetp2.cbuf_and_imm.cond; + h_and = instr.hsetp2.cbuf_and_imm.h_and; + op_b = UnpackHalfImmediate(instr, true); + break; + case OpCode::Id::HSETP2_R: + cond = instr.hsetp2.reg.cond; + h_and = instr.hsetp2.reg.h_and; + op_b = + UnpackHalfFloat(GetOperandAbsNegHalf(GetRegister(instr.gpr20), instr.hsetp2.reg.abs_b, + instr.hsetp2.reg.negate_b), + instr.hsetp2.reg.type_b); + break; + default: + UNREACHABLE(); + op_b = Immediate(0); + } const OperationCode combiner = GetPredicateCombiner(instr.hsetp2.op); - const OperationCode pair_combiner = - instr.hsetp2.h_and ? OperationCode::LogicalAll2 : OperationCode::LogicalAny2; - - const Node comparison = GetPredicateComparisonHalf(instr.hsetp2.cond, op_a, op_b); - const Node first_pred = Operation(pair_combiner, comparison); + const Node combined_pred = GetPredicate(instr.hsetp2.pred3, instr.hsetp2.neg_pred); - // Set the primary predicate to the result of Predicate OP SecondPredicate - const Node value = Operation(combiner, first_pred, second_pred); - SetPredicate(bb, instr.hsetp2.pred3, value); + const auto Write = [&](u64 dest, Node src) { + SetPredicate(bb, dest, Operation(combiner, std::move(src), combined_pred)); + }; - if (instr.hsetp2.pred0 != static_cast<u64>(Pred::UnusedIndex)) { - // Set the secondary predicate to the result of !Predicate OP SecondPredicate, if enabled - const Node negated_pred = Operation(OperationCode::LogicalNegate, first_pred); - SetPredicate(bb, instr.hsetp2.pred0, Operation(combiner, negated_pred, second_pred)); + const Node comparison = GetPredicateComparisonHalf(cond, op_a, op_b); + const u64 first = instr.hsetp2.pred0; + const u64 second = instr.hsetp2.pred39; + if (h_and) { + const Node joined = Operation(OperationCode::LogicalAnd2, comparison); + Write(first, joined); + Write(second, Operation(OperationCode::LogicalNegate, joined)); + } else { + Write(first, Operation(OperationCode::LogicalPick2, comparison, Immediate(0u))); + Write(second, Operation(OperationCode::LogicalPick2, comparison, Immediate(1u))); } return pc; diff --git a/src/video_core/shader/decode/hfma2.cpp b/src/video_core/shader/decode/hfma2.cpp index c3bcf1ae9d..5b44cb79cb 100644 --- a/src/video_core/shader/decode/hfma2.cpp +++ b/src/video_core/shader/decode/hfma2.cpp @@ -22,9 +22,9 @@ u32 ShaderIR::DecodeHfma2(NodeBlock& bb, u32 pc) { const auto opcode = OpCode::Decode(instr); if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) { - UNIMPLEMENTED_IF(instr.hfma2.rr.precision != HalfPrecision::None); + DEBUG_ASSERT(instr.hfma2.rr.precision == HalfPrecision::None); } else { - UNIMPLEMENTED_IF(instr.hfma2.precision != HalfPrecision::None); + DEBUG_ASSERT(instr.hfma2.precision == HalfPrecision::None); } constexpr auto identity = HalfType::H0_H1; diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp index 24f022cc04..77151a24be 100644 --- a/src/video_core/shader/decode/image.cpp +++ b/src/video_core/shader/decode/image.cpp @@ -95,12 +95,8 @@ const Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::Image const Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) { const Node image_register{GetRegister(reg)}; - const Node base_image{ + const auto [base_image, cbuf_index, cbuf_offset]{ TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size()))}; - const auto cbuf{std::get_if<CbufNode>(&*base_image)}; - const auto cbuf_offset_imm{std::get_if<ImmediateNode>(&*cbuf->GetOffset())}; - const auto cbuf_offset{cbuf_offset_imm->GetValue()}; - const auto cbuf_index{cbuf->GetIndex()}; const auto cbuf_key{(static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset)}; // If this image has already been used, return the existing mapping. diff --git a/src/video_core/shader/decode/integer_set.cpp b/src/video_core/shader/decode/integer_set.cpp index 46e3d59057..59809bcd82 100644 --- a/src/video_core/shader/decode/integer_set.cpp +++ b/src/video_core/shader/decode/integer_set.cpp @@ -14,7 +14,6 @@ using Tegra::Shader::OpCode; u32 ShaderIR::DecodeIntegerSet(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; - const auto opcode = OpCode::Decode(instr); const Node op_a = GetRegister(instr.gpr8); const Node op_b = [&]() { diff --git a/src/video_core/shader/decode/integer_set_predicate.cpp b/src/video_core/shader/decode/integer_set_predicate.cpp index dd20775d79..25e48fef84 100644 --- a/src/video_core/shader/decode/integer_set_predicate.cpp +++ b/src/video_core/shader/decode/integer_set_predicate.cpp @@ -16,7 +16,6 @@ using Tegra::Shader::Pred; u32 ShaderIR::DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; - const auto opcode = OpCode::Decode(instr); const Node op_a = GetRegister(instr.gpr8); diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 80fc0ccfc1..ed108bea8b 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -95,10 +95,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { const Node op_b = GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.GetOffset() + 4, index); - SetTemporal(bb, 0, op_a); - SetTemporal(bb, 1, op_b); - SetRegister(bb, instr.gpr0, GetTemporal(0)); - SetRegister(bb, instr.gpr0.Value() + 1, GetTemporal(1)); + SetTemporary(bb, 0, op_a); + SetTemporary(bb, 1, op_b); + SetRegister(bb, instr.gpr0, GetTemporary(0)); + SetRegister(bb, instr.gpr0.Value() + 1, GetTemporary(1)); break; } default: @@ -136,9 +136,9 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { } }(); for (u32 i = 0; i < count; ++i) - SetTemporal(bb, i, GetLmem(i * 4)); + SetTemporary(bb, i, GetLmem(i * 4)); for (u32 i = 0; i < count; ++i) - SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); break; } default: @@ -172,10 +172,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset); const Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); - SetTemporal(bb, i, gmem); + SetTemporary(bb, i, gmem); } for (u32 i = 0; i < count; ++i) { - SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); } break; } @@ -253,11 +253,11 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { TrackAndGetGlobalMemory(bb, instr, true); // Encode in temporary registers like this: real_base_address, {registers_to_be_written...} - SetTemporal(bb, 0, real_address_base); + SetTemporary(bb, 0, real_address_base); const u32 count = GetUniformTypeElementsCount(type); for (u32 i = 0; i < count; ++i) { - SetTemporal(bb, i + 1, GetRegister(instr.gpr0.Value() + i)); + SetTemporary(bb, i + 1, GetRegister(instr.gpr0.Value() + i)); } for (u32 i = 0; i < count; ++i) { const Node it_offset = Immediate(i * 4); @@ -265,7 +265,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { Operation(OperationCode::UAdd, NO_PRECISE, real_address_base, it_offset); const Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); - bb.push_back(Operation(OperationCode::Assign, gmem, GetTemporal(i + 1))); + bb.push_back(Operation(OperationCode::Assign, gmem, GetTemporary(i + 1))); } break; } @@ -297,18 +297,13 @@ std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackAndGetGlobalMemory(NodeB const auto addr_register{GetRegister(instr.gmem.gpr)}; const auto immediate_offset{static_cast<u32>(instr.gmem.offset)}; - const Node base_address{ - TrackCbuf(addr_register, global_code, static_cast<s64>(global_code.size()))}; - const auto cbuf = std::get_if<CbufNode>(&*base_address); - ASSERT(cbuf != nullptr); - const auto cbuf_offset_imm = std::get_if<ImmediateNode>(&*cbuf->GetOffset()); - ASSERT(cbuf_offset_imm != nullptr); - const auto cbuf_offset = cbuf_offset_imm->GetValue(); + const auto [base_address, index, offset] = + TrackCbuf(addr_register, global_code, static_cast<s64>(global_code.size())); + ASSERT(base_address != nullptr); - bb.push_back( - Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", cbuf->GetIndex(), cbuf_offset))); + bb.push_back(Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", index, offset))); - const GlobalMemoryBase descriptor{cbuf->GetIndex(), cbuf_offset}; + const GlobalMemoryBase descriptor{index, offset}; const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor); auto& usage = entry->second; if (is_write) { diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp index d46a8ab82d..d46e0f8232 100644 --- a/src/video_core/shader/decode/other.cpp +++ b/src/video_core/shader/decode/other.cpp @@ -22,6 +22,12 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { const auto opcode = OpCode::Decode(instr); switch (opcode->get().GetId()) { + case OpCode::Id::NOP: { + UNIMPLEMENTED_IF(instr.nop.cc != Tegra::Shader::ConditionCode::T); + UNIMPLEMENTED_IF(instr.nop.trigger != 0); + // With the previous preconditions, this instruction is a no-operation. + break; + } case OpCode::Id::EXIT: { const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "EXIT condition code used: {}", @@ -68,6 +74,13 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { case SystemVariable::InvocationInfo: LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete"); return Immediate(0u); + case SystemVariable::Tid: { + Node value = Immediate(0); + value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdX), 0, 9); + value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdY), 16, 9); + value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdZ), 26, 5); + return value; + } case SystemVariable::TidX: return Operation(OperationCode::LocalInvocationIdX); case SystemVariable::TidY: @@ -91,11 +104,46 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { break; } case OpCode::Id::BRA: { - UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, - "BRA with constant buffers are not implemented"); + Node branch; + if (instr.bra.constant_buffer == 0) { + const u32 target = pc + instr.bra.GetBranchTarget(); + branch = Operation(OperationCode::Branch, Immediate(target)); + } else { + const u32 target = pc + 1; + const Node op_a = GetConstBuffer(instr.cbuf36.index, instr.cbuf36.GetOffset()); + const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true, + PRECISE, op_a, Immediate(3)); + const Node operand = + Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target)); + branch = Operation(OperationCode::BranchIndirect, operand); + } - const u32 target = pc + instr.bra.GetBranchTarget(); - const Node branch = Operation(OperationCode::Branch, Immediate(target)); + const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; + if (cc != Tegra::Shader::ConditionCode::T) { + bb.push_back(Conditional(GetConditionCode(cc), {branch})); + } else { + bb.push_back(branch); + } + break; + } + case OpCode::Id::BRX: { + Node operand; + if (instr.brx.constant_buffer != 0) { + const s32 target = pc + 1; + const Node index = GetRegister(instr.gpr8); + const Node op_a = + GetConstBufferIndirect(instr.cbuf36.index, instr.cbuf36.GetOffset() + 0, index); + const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true, + PRECISE, op_a, Immediate(3)); + operand = Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target)); + } else { + const s32 target = pc + instr.brx.GetBranchExtend(); + const Node op_a = GetRegister(instr.gpr8); + const Node convert = SignedOperation(OperationCode::IArithmeticShiftRight, true, + PRECISE, op_a, Immediate(3)); + operand = Operation(OperationCode::IAdd, PRECISE, convert, Immediate(target)); + } + const Node branch = Operation(OperationCode::BranchIndirect, operand); const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; if (cc != Tegra::Shader::ConditionCode::T) { @@ -109,6 +157,10 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, "Constant buffer flow is not supported"); + if (disable_flow_stack) { + break; + } + // The SSY opcode tells the GPU where to re-converge divergent execution paths with SYNC. const u32 target = pc + instr.bra.GetBranchTarget(); bb.push_back( @@ -119,6 +171,10 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0, "Constant buffer PBK is not supported"); + if (disable_flow_stack) { + break; + } + // PBK pushes to a stack the address where BRK will jump to. const u32 target = pc + instr.bra.GetBranchTarget(); bb.push_back( @@ -130,6 +186,10 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "SYNC condition code used: {}", static_cast<u32>(cc)); + if (disable_flow_stack) { + break; + } + // The SYNC opcode jumps to the address previously set by the SSY opcode bb.push_back(Operation(OperationCode::PopFlowStack, MetaStackClass::Ssy)); break; @@ -138,6 +198,9 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { const Tegra::Shader::ConditionCode cc = instr.flow_condition_code; UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "BRK condition code used: {}", static_cast<u32>(cc)); + if (disable_flow_stack) { + break; + } // The BRK opcode jumps to the address previously set by the PBK opcode bb.push_back(Operation(OperationCode::PopFlowStack, MetaStackClass::Pbk)); diff --git a/src/video_core/shader/decode/predicate_set_register.cpp b/src/video_core/shader/decode/predicate_set_register.cpp index febbfeb50b..84dbc50fec 100644 --- a/src/video_core/shader/decode/predicate_set_register.cpp +++ b/src/video_core/shader/decode/predicate_set_register.cpp @@ -15,7 +15,6 @@ using Tegra::Shader::OpCode; u32 ShaderIR::DecodePredicateSetRegister(NodeBlock& bb, u32 pc) { const Instruction instr = {program_code[pc]}; - const auto opcode = OpCode::Decode(instr); UNIMPLEMENTED_IF_MSG(instr.generates_cc, "Condition codes generation in PSET is not implemented"); diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp index cb480be9bd..0b934a0696 100644 --- a/src/video_core/shader/decode/texture.cpp +++ b/src/video_core/shader/decode/texture.cpp @@ -181,10 +181,10 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { const Node value = Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8.Value() + (is_bindless ? 1 : 0))); - SetTemporal(bb, indexer++, value); + SetTemporary(bb, indexer++, value); } for (u32 i = 0; i < indexer; ++i) { - SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); } break; } @@ -238,10 +238,10 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { auto params = coords; MetaTexture meta{sampler, {}, {}, {}, {}, {}, {}, element}; const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params)); - SetTemporal(bb, indexer++, value); + SetTemporary(bb, indexer++, value); } for (u32 i = 0; i < indexer; ++i) { - SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); } break; } @@ -269,7 +269,13 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete"); } - WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); + const Node4 components = GetTldsCode(instr, texture_type, is_array); + + if (instr.tlds.fp32_flag) { + WriteTexsInstructionFloat(bb, instr, components); + } else { + WriteTexsInstructionHalfFloat(bb, instr, components); + } break; } default: @@ -302,13 +308,9 @@ const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, Textu const Sampler& ShaderIR::GetBindlessSampler(const Tegra::Shader::Register& reg, TextureType type, bool is_array, bool is_shadow) { const Node sampler_register = GetRegister(reg); - const Node base_sampler = + const auto [base_sampler, cbuf_index, cbuf_offset] = TrackCbuf(sampler_register, global_code, static_cast<s64>(global_code.size())); - const auto cbuf = std::get_if<CbufNode>(&*base_sampler); - const auto cbuf_offset_imm = std::get_if<ImmediateNode>(&*cbuf->GetOffset()); - ASSERT(cbuf_offset_imm != nullptr); - const auto cbuf_offset = cbuf_offset_imm->GetValue(); - const auto cbuf_index = cbuf->GetIndex(); + ASSERT(base_sampler != nullptr); const auto cbuf_key = (static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset); // If this sampler has already been used, return the existing mapping. @@ -334,11 +336,11 @@ void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const // Skip disabled components continue; } - SetTemporal(bb, dest_elem++, components[elem]); + SetTemporary(bb, dest_elem++, components[elem]); } // After writing values in temporals, move them to the real registers for (u32 i = 0; i < dest_elem; ++i) { - SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i)); + SetRegister(bb, instr.gpr0.Value() + i, GetTemporary(i)); } } @@ -351,17 +353,17 @@ void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, for (u32 component = 0; component < 4; ++component) { if (!instr.texs.IsComponentEnabled(component)) continue; - SetTemporal(bb, dest_elem++, components[component]); + SetTemporary(bb, dest_elem++, components[component]); } for (u32 i = 0; i < dest_elem; ++i) { if (i < 2) { // Write the first two swizzle components to gpr0 and gpr0+1 - SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i)); + SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporary(i)); } else { ASSERT(instr.texs.HasTwoDestinations()); // Write the rest of the swizzle components to gpr28 and gpr28+1 - SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i)); + SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporary(i)); } } } @@ -389,11 +391,11 @@ void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr, return; } - SetTemporal(bb, 0, first_value); - SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); + SetTemporary(bb, 0, first_value); + SetTemporary(bb, 1, Operation(OperationCode::HPack2, values[2], values[3])); - SetRegister(bb, instr.gpr0, GetTemporal(0)); - SetRegister(bb, instr.gpr28, GetTemporal(1)); + SetRegister(bb, instr.gpr0, GetTemporary(0)); + SetRegister(bb, instr.gpr28, GetTemporary(1)); } Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, diff --git a/src/video_core/shader/decode/warp.cpp b/src/video_core/shader/decode/warp.cpp new file mode 100644 index 0000000000..04ca74f464 --- /dev/null +++ b/src/video_core/shader/decode/warp.cpp @@ -0,0 +1,55 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/shader/node_helper.h" +#include "video_core/shader/shader_ir.h" + +namespace VideoCommon::Shader { + +using Tegra::Shader::Instruction; +using Tegra::Shader::OpCode; +using Tegra::Shader::Pred; +using Tegra::Shader::VoteOperation; + +namespace { +OperationCode GetOperationCode(VoteOperation vote_op) { + switch (vote_op) { + case VoteOperation::All: + return OperationCode::VoteAll; + case VoteOperation::Any: + return OperationCode::VoteAny; + case VoteOperation::Eq: + return OperationCode::VoteEqual; + default: + UNREACHABLE_MSG("Invalid vote operation={}", static_cast<u64>(vote_op)); + return OperationCode::VoteAll; + } +} +} // Anonymous namespace + +u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) { + const Instruction instr = {program_code[pc]}; + const auto opcode = OpCode::Decode(instr); + + switch (opcode->get().GetId()) { + case OpCode::Id::VOTE: { + const Node value = GetPredicate(instr.vote.value, instr.vote.negate_value != 0); + const Node active = Operation(OperationCode::BallotThread, value); + const Node vote = Operation(GetOperationCode(instr.vote.operation), value); + SetRegister(bb, instr.gpr0, active); + SetPredicate(bb, instr.vote.dest_pred, vote); + break; + } + default: + UNIMPLEMENTED_MSG("Unhandled warp instruction: {}", opcode->get().GetName()); + break; + } + + return pc; +} + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/decode/xmad.cpp b/src/video_core/shader/decode/xmad.cpp index 93dee77d1c..206961909f 100644 --- a/src/video_core/shader/decode/xmad.cpp +++ b/src/video_core/shader/decode/xmad.cpp @@ -73,8 +73,8 @@ u32 ShaderIR::DecodeXmad(NodeBlock& bb, u32 pc) { if (is_psl) { product = Operation(OperationCode::ILogicalShiftLeft, NO_PRECISE, product, Immediate(16)); } - SetTemporal(bb, 0, product); - product = GetTemporal(0); + SetTemporary(bb, 0, product); + product = GetTemporary(0); const Node original_c = op_c; const Tegra::Shader::XmadMode set_mode = mode; // Workaround to clang compile error @@ -98,13 +98,13 @@ u32 ShaderIR::DecodeXmad(NodeBlock& bb, u32 pc) { } }(); - SetTemporal(bb, 1, op_c); - op_c = GetTemporal(1); + SetTemporary(bb, 1, op_c); + op_c = GetTemporary(1); // TODO(Rodrigo): Use an appropiate sign for this operation Node sum = Operation(OperationCode::IAdd, product, op_c); - SetTemporal(bb, 2, sum); - sum = GetTemporal(2); + SetTemporary(bb, 2, sum); + sum = GetTemporary(2); if (is_merge) { const Node a = BitfieldExtract(sum, 0, 16); const Node b = diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h index 0ac83fcf08..5db9313c48 100644 --- a/src/video_core/shader/node.h +++ b/src/video_core/shader/node.h @@ -30,6 +30,8 @@ enum class OperationCode { FNegate, /// (MetaArithmetic, float a) -> float FAbsolute, /// (MetaArithmetic, float a) -> float FClamp, /// (MetaArithmetic, float value, float min, float max) -> float + FCastHalf0, /// (MetaArithmetic, f16vec2 a) -> float + FCastHalf1, /// (MetaArithmetic, f16vec2 a) -> float FMin, /// (MetaArithmetic, float a, float b) -> float FMax, /// (MetaArithmetic, float a, float b) -> float FCos, /// (MetaArithmetic, float a) -> float @@ -83,17 +85,18 @@ enum class OperationCode { UBitfieldExtract, /// (MetaArithmetic, uint value, int offset, int offset) -> uint UBitCount, /// (MetaArithmetic, uint) -> uint - HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 - HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 - HFma, /// (MetaArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2 - HAbsolute, /// (f16vec2 a) -> f16vec2 - HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2 - HClamp, /// (f16vec2 src, float min, float max) -> f16vec2 - HUnpack, /// (Tegra::Shader::HalfType, T value) -> f16vec2 - HMergeF32, /// (f16vec2 src) -> float - HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2 - HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2 - HPack2, /// (float a, float b) -> f16vec2 + HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 + HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2 + HFma, /// (MetaArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2 + HAbsolute, /// (f16vec2 a) -> f16vec2 + HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2 + HClamp, /// (f16vec2 src, float min, float max) -> f16vec2 + HCastFloat, /// (MetaArithmetic, float a) -> f16vec2 + HUnpack, /// (Tegra::Shader::HalfType, T value) -> f16vec2 + HMergeF32, /// (f16vec2 src) -> float + HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2 + HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2 + HPack2, /// (float a, float b) -> f16vec2 LogicalAssign, /// (bool& dst, bool src) -> void LogicalAnd, /// (bool a, bool b) -> bool @@ -101,8 +104,7 @@ enum class OperationCode { LogicalXor, /// (bool a, bool b) -> bool LogicalNegate, /// (bool a) -> bool LogicalPick2, /// (bool2 pair, uint index) -> bool - LogicalAll2, /// (bool2 a) -> bool - LogicalAny2, /// (bool2 a) -> bool + LogicalAnd2, /// (bool2 a) -> bool LogicalFLessThan, /// (float a, float b) -> bool LogicalFEqual, /// (float a, float b) -> bool @@ -148,11 +150,12 @@ enum class OperationCode { ImageStore, /// (MetaImage, float[N] coords) -> void - Branch, /// (uint branch_target) -> void - PushFlowStack, /// (uint branch_target) -> void - PopFlowStack, /// () -> void - Exit, /// () -> void - Discard, /// () -> void + Branch, /// (uint branch_target) -> void + BranchIndirect, /// (uint branch_target) -> void + PushFlowStack, /// (uint branch_target) -> void + PopFlowStack, /// () -> void + Exit, /// () -> void + Discard, /// () -> void EmitVertex, /// () -> void EndPrimitive, /// () -> void @@ -165,6 +168,11 @@ enum class OperationCode { WorkGroupIdY, /// () -> uint WorkGroupIdZ, /// () -> uint + BallotThread, /// (bool) -> uint + VoteAll, /// (bool) -> bool + VoteAny, /// (bool) -> bool + VoteEqual, /// (bool) -> bool + Amount, }; diff --git a/src/video_core/shader/node_helper.cpp b/src/video_core/shader/node_helper.cpp index 6fccbbba3a..b3dcd291c1 100644 --- a/src/video_core/shader/node_helper.cpp +++ b/src/video_core/shader/node_helper.cpp @@ -12,7 +12,7 @@ namespace VideoCommon::Shader { Node Conditional(Node condition, std::vector<Node> code) { - return MakeNode<ConditionalNode>(condition, std::move(code)); + return MakeNode<ConditionalNode>(std::move(condition), std::move(code)); } Node Comment(std::string text) { diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp index 11b545ccac..1e5c7f6605 100644 --- a/src/video_core/shader/shader_ir.cpp +++ b/src/video_core/shader/shader_ir.cpp @@ -22,8 +22,8 @@ using Tegra::Shader::PredCondition; using Tegra::Shader::PredOperation; using Tegra::Shader::Register; -ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset) - : program_code{program_code}, main_offset{main_offset} { +ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset, const std::size_t size) + : program_code{program_code}, main_offset{main_offset}, program_size{size} { Decode(); } @@ -61,8 +61,17 @@ Node ShaderIR::GetConstBufferIndirect(u64 index_, u64 offset_, Node node) { const auto [entry, is_new] = used_cbufs.try_emplace(index); entry->second.MarkAsUsedIndirect(); - const Node final_offset = Operation(OperationCode::UAdd, NO_PRECISE, node, Immediate(offset)); - return MakeNode<CbufNode>(index, final_offset); + Node final_offset = [&] { + // Attempt to inline constant buffer without a variable offset. This is done to allow + // tracking LDC calls. + if (const auto gpr = std::get_if<GprNode>(&*node)) { + if (gpr->GetIndex() == Register::ZeroIndex) { + return Immediate(offset); + } + } + return Operation(OperationCode::UAdd, NO_PRECISE, std::move(node), Immediate(offset)); + }(); + return MakeNode<CbufNode>(index, std::move(final_offset)); } Node ShaderIR::GetPredicate(u64 pred_, bool negated) { @@ -80,7 +89,7 @@ Node ShaderIR::GetPredicate(bool immediate) { Node ShaderIR::GetInputAttribute(Attribute::Index index, u64 element, Node buffer) { used_input_attributes.emplace(index); - return MakeNode<AbufNode>(index, static_cast<u32>(element), buffer); + return MakeNode<AbufNode>(index, static_cast<u32>(element), std::move(buffer)); } Node ShaderIR::GetPhysicalInputAttribute(Tegra::Shader::Register physical_address, Node buffer) { @@ -89,6 +98,22 @@ Node ShaderIR::GetPhysicalInputAttribute(Tegra::Shader::Register physical_addres } Node ShaderIR::GetOutputAttribute(Attribute::Index index, u64 element, Node buffer) { + if (index == Attribute::Index::LayerViewportPointSize) { + switch (element) { + case 0: + UNIMPLEMENTED(); + break; + case 1: + uses_layer = true; + break; + case 2: + uses_viewport_index = true; + break; + case 3: + uses_point_size = true; + break; + } + } if (index == Attribute::Index::ClipDistances0123 || index == Attribute::Index::ClipDistances4567) { const auto clip_index = @@ -97,7 +122,7 @@ Node ShaderIR::GetOutputAttribute(Attribute::Index index, u64 element, Node buff } used_output_attributes.insert(index); - return MakeNode<AbufNode>(index, static_cast<u32>(element), buffer); + return MakeNode<AbufNode>(index, static_cast<u32>(element), std::move(buffer)); } Node ShaderIR::GetInternalFlag(InternalFlag flag, bool negated) { @@ -109,19 +134,19 @@ Node ShaderIR::GetInternalFlag(InternalFlag flag, bool negated) { } Node ShaderIR::GetLocalMemory(Node address) { - return MakeNode<LmemNode>(address); + return MakeNode<LmemNode>(std::move(address)); } -Node ShaderIR::GetTemporal(u32 id) { +Node ShaderIR::GetTemporary(u32 id) { return GetRegister(Register::ZeroIndex + 1 + id); } Node ShaderIR::GetOperandAbsNegFloat(Node value, bool absolute, bool negate) { if (absolute) { - value = Operation(OperationCode::FAbsolute, NO_PRECISE, value); + value = Operation(OperationCode::FAbsolute, NO_PRECISE, std::move(value)); } if (negate) { - value = Operation(OperationCode::FNegate, NO_PRECISE, value); + value = Operation(OperationCode::FNegate, NO_PRECISE, std::move(value)); } return value; } @@ -130,24 +155,26 @@ Node ShaderIR::GetSaturatedFloat(Node value, bool saturate) { if (!saturate) { return value; } - const Node positive_zero = Immediate(std::copysignf(0, 1)); - const Node positive_one = Immediate(1.0f); - return Operation(OperationCode::FClamp, NO_PRECISE, value, positive_zero, positive_one); + + Node positive_zero = Immediate(std::copysignf(0, 1)); + Node positive_one = Immediate(1.0f); + return Operation(OperationCode::FClamp, NO_PRECISE, std::move(value), std::move(positive_zero), + std::move(positive_one)); } -Node ShaderIR::ConvertIntegerSize(Node value, Tegra::Shader::Register::Size size, bool is_signed) { +Node ShaderIR::ConvertIntegerSize(Node value, Register::Size size, bool is_signed) { switch (size) { case Register::Size::Byte: - value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value, - Immediate(24)); - value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value, - Immediate(24)); + value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, + std::move(value), Immediate(24)); + value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, + std::move(value), Immediate(24)); return value; case Register::Size::Short: - value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, value, - Immediate(16)); - value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, value, - Immediate(16)); + value = SignedOperation(OperationCode::ILogicalShiftLeft, is_signed, NO_PRECISE, + std::move(value), Immediate(16)); + value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, + std::move(value), Immediate(16)); case Register::Size::Word: // Default - do nothing return value; @@ -163,27 +190,29 @@ Node ShaderIR::GetOperandAbsNegInteger(Node value, bool absolute, bool negate, b return value; } if (absolute) { - value = Operation(OperationCode::IAbsolute, NO_PRECISE, value); + value = Operation(OperationCode::IAbsolute, NO_PRECISE, std::move(value)); } if (negate) { - value = Operation(OperationCode::INegate, NO_PRECISE, value); + value = Operation(OperationCode::INegate, NO_PRECISE, std::move(value)); } return value; } Node ShaderIR::UnpackHalfImmediate(Instruction instr, bool has_negation) { - const Node value = Immediate(instr.half_imm.PackImmediates()); + Node value = Immediate(instr.half_imm.PackImmediates()); if (!has_negation) { return value; } - const Node first_negate = GetPredicate(instr.half_imm.first_negate != 0); - const Node second_negate = GetPredicate(instr.half_imm.second_negate != 0); - return Operation(OperationCode::HNegate, NO_PRECISE, value, first_negate, second_negate); + Node first_negate = GetPredicate(instr.half_imm.first_negate != 0); + Node second_negate = GetPredicate(instr.half_imm.second_negate != 0); + + return Operation(OperationCode::HNegate, NO_PRECISE, std::move(value), std::move(first_negate), + std::move(second_negate)); } Node ShaderIR::UnpackHalfFloat(Node value, Tegra::Shader::HalfType type) { - return Operation(OperationCode::HUnpack, type, value); + return Operation(OperationCode::HUnpack, type, std::move(value)); } Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) { @@ -191,11 +220,11 @@ Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) { case Tegra::Shader::HalfMerge::H0_H1: return src; case Tegra::Shader::HalfMerge::F32: - return Operation(OperationCode::HMergeF32, src); + return Operation(OperationCode::HMergeF32, std::move(src)); case Tegra::Shader::HalfMerge::Mrg_H0: - return Operation(OperationCode::HMergeH0, dest, src); + return Operation(OperationCode::HMergeH0, std::move(dest), std::move(src)); case Tegra::Shader::HalfMerge::Mrg_H1: - return Operation(OperationCode::HMergeH1, dest, src); + return Operation(OperationCode::HMergeH1, std::move(dest), std::move(src)); } UNREACHABLE(); return src; @@ -203,10 +232,10 @@ Node ShaderIR::HalfMerge(Node dest, Node src, Tegra::Shader::HalfMerge merge) { Node ShaderIR::GetOperandAbsNegHalf(Node value, bool absolute, bool negate) { if (absolute) { - value = Operation(OperationCode::HAbsolute, NO_PRECISE, value); + value = Operation(OperationCode::HAbsolute, NO_PRECISE, std::move(value)); } if (negate) { - value = Operation(OperationCode::HNegate, NO_PRECISE, value, GetPredicate(true), + value = Operation(OperationCode::HNegate, NO_PRECISE, std::move(value), GetPredicate(true), GetPredicate(true)); } return value; @@ -216,9 +245,11 @@ Node ShaderIR::GetSaturatedHalfFloat(Node value, bool saturate) { if (!saturate) { return value; } - const Node positive_zero = Immediate(std::copysignf(0, 1)); - const Node positive_one = Immediate(1.0f); - return Operation(OperationCode::HClamp, NO_PRECISE, value, positive_zero, positive_one); + + Node positive_zero = Immediate(std::copysignf(0, 1)); + Node positive_one = Immediate(1.0f); + return Operation(OperationCode::HClamp, NO_PRECISE, std::move(value), std::move(positive_zero), + std::move(positive_one)); } Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, Node op_b) { @@ -246,7 +277,6 @@ Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, N condition == PredCondition::LessEqualWithNan || condition == PredCondition::GreaterThanWithNan || condition == PredCondition::GreaterEqualWithNan) { - predicate = Operation(OperationCode::LogicalOr, predicate, Operation(OperationCode::LogicalFIsNan, op_a)); predicate = Operation(OperationCode::LogicalOr, predicate, @@ -275,7 +305,8 @@ Node ShaderIR::GetPredicateComparisonInteger(PredCondition condition, bool is_si UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), "Unknown predicate comparison operation"); - Node predicate = SignedOperation(comparison->second, is_signed, NO_PRECISE, op_a, op_b); + Node predicate = SignedOperation(comparison->second, is_signed, NO_PRECISE, std::move(op_a), + std::move(op_b)); UNIMPLEMENTED_IF_MSG(condition == PredCondition::LessThanWithNan || condition == PredCondition::NotEqualWithNan || @@ -305,9 +336,7 @@ Node ShaderIR::GetPredicateComparisonHalf(Tegra::Shader::PredCondition condition UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), "Unknown predicate comparison operation"); - const Node predicate = Operation(comparison->second, NO_PRECISE, op_a, op_b); - - return predicate; + return Operation(comparison->second, NO_PRECISE, std::move(op_a), std::move(op_b)); } OperationCode ShaderIR::GetPredicateCombiner(PredOperation operation) { @@ -333,31 +362,32 @@ Node ShaderIR::GetConditionCode(Tegra::Shader::ConditionCode cc) { } void ShaderIR::SetRegister(NodeBlock& bb, Register dest, Node src) { - bb.push_back(Operation(OperationCode::Assign, GetRegister(dest), src)); + bb.push_back(Operation(OperationCode::Assign, GetRegister(dest), std::move(src))); } void ShaderIR::SetPredicate(NodeBlock& bb, u64 dest, Node src) { - bb.push_back(Operation(OperationCode::LogicalAssign, GetPredicate(dest), src)); + bb.push_back(Operation(OperationCode::LogicalAssign, GetPredicate(dest), std::move(src))); } void ShaderIR::SetInternalFlag(NodeBlock& bb, InternalFlag flag, Node value) { - bb.push_back(Operation(OperationCode::LogicalAssign, GetInternalFlag(flag), value)); + bb.push_back(Operation(OperationCode::LogicalAssign, GetInternalFlag(flag), std::move(value))); } void ShaderIR::SetLocalMemory(NodeBlock& bb, Node address, Node value) { - bb.push_back(Operation(OperationCode::Assign, GetLocalMemory(address), value)); + bb.push_back( + Operation(OperationCode::Assign, GetLocalMemory(std::move(address)), std::move(value))); } -void ShaderIR::SetTemporal(NodeBlock& bb, u32 id, Node value) { - SetRegister(bb, Register::ZeroIndex + 1 + id, value); +void ShaderIR::SetTemporary(NodeBlock& bb, u32 id, Node value) { + SetRegister(bb, Register::ZeroIndex + 1 + id, std::move(value)); } void ShaderIR::SetInternalFlagsFromFloat(NodeBlock& bb, Node value, bool sets_cc) { if (!sets_cc) { return; } - const Node zerop = Operation(OperationCode::LogicalFEqual, value, Immediate(0.0f)); - SetInternalFlag(bb, InternalFlag::Zero, zerop); + Node zerop = Operation(OperationCode::LogicalFEqual, std::move(value), Immediate(0.0f)); + SetInternalFlag(bb, InternalFlag::Zero, std::move(zerop)); LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete"); } @@ -365,13 +395,18 @@ void ShaderIR::SetInternalFlagsFromInteger(NodeBlock& bb, Node value, bool sets_ if (!sets_cc) { return; } - const Node zerop = Operation(OperationCode::LogicalIEqual, value, Immediate(0)); - SetInternalFlag(bb, InternalFlag::Zero, zerop); + Node zerop = Operation(OperationCode::LogicalIEqual, std::move(value), Immediate(0)); + SetInternalFlag(bb, InternalFlag::Zero, std::move(zerop)); LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete"); } Node ShaderIR::BitfieldExtract(Node value, u32 offset, u32 bits) { - return Operation(OperationCode::UBitfieldExtract, NO_PRECISE, value, Immediate(offset), + return Operation(OperationCode::UBitfieldExtract, NO_PRECISE, std::move(value), + Immediate(offset), Immediate(bits)); +} + +Node ShaderIR::BitfieldInsert(Node base, Node insert, u32 offset, u32 bits) { + return Operation(OperationCode::UBitfieldInsert, NO_PRECISE, base, insert, Immediate(offset), Immediate(bits)); } diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index e225482081..bcc9b79b67 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h @@ -5,13 +5,10 @@ #pragma once #include <array> -#include <cstring> #include <map> #include <optional> #include <set> -#include <string> #include <tuple> -#include <variant> #include <vector> #include "common/common_types.h" @@ -22,18 +19,12 @@ namespace VideoCommon::Shader { +struct ShaderBlock; + using ProgramCode = std::vector<u64>; constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; -/// Describes the behaviour of code path of a given entry point and a return point. -enum class ExitMethod { - Undetermined, ///< Internal value. Only occur when analyzing JMP loop. - AlwaysReturn, ///< All code paths reach the return point. - Conditional, ///< Code path reaches the return point or an END instruction conditionally. - AlwaysEnd, ///< All code paths reach a END instruction. -}; - class ConstBuffer { public: explicit ConstBuffer(u32 max_offset, bool is_indirect) @@ -73,7 +64,7 @@ struct GlobalMemoryUsage { class ShaderIR final { public: - explicit ShaderIR(const ProgramCode& program_code, u32 main_offset); + explicit ShaderIR(const ProgramCode& program_code, u32 main_offset, std::size_t size); ~ShaderIR(); const std::map<u32, NodeBlock>& GetBasicBlocks() const { @@ -121,6 +112,18 @@ public: return static_cast<std::size_t>(coverage_end * sizeof(u64)); } + bool UsesLayer() const { + return uses_layer; + } + + bool UsesViewportIndex() const { + return uses_viewport_index; + } + + bool UsesPointSize() const { + return uses_point_size; + } + bool HasPhysicalAttributes() const { return uses_physical_attributes; } @@ -129,12 +132,20 @@ public: return header; } + bool IsFlowStackDisabled() const { + return disable_flow_stack; + } + + u32 ConvertAddressToNvidiaSpace(const u32 address) const { + return (address - main_offset) * sizeof(Tegra::Shader::Instruction); + } + private: void Decode(); - ExitMethod Scan(u32 begin, u32 end, std::set<u32>& labels); - NodeBlock DecodeRange(u32 begin, u32 end); + void DecodeRangeInner(NodeBlock& bb, u32 begin, u32 end); + void InsertControlFlow(NodeBlock& bb, const ShaderBlock& block); /** * Decodes a single instruction from Tegra to IR. @@ -156,6 +167,7 @@ private: u32 DecodeFfma(NodeBlock& bb, u32 pc); u32 DecodeHfma2(NodeBlock& bb, u32 pc); u32 DecodeConversion(NodeBlock& bb, u32 pc); + u32 DecodeWarp(NodeBlock& bb, u32 pc); u32 DecodeMemory(NodeBlock& bb, u32 pc); u32 DecodeTexture(NodeBlock& bb, u32 pc); u32 DecodeImage(NodeBlock& bb, u32 pc); @@ -196,8 +208,8 @@ private: Node GetInternalFlag(InternalFlag flag, bool negated = false); /// Generates a node representing a local memory address Node GetLocalMemory(Node address); - /// Generates a temporal, internally it uses a post-RZ register - Node GetTemporal(u32 id); + /// Generates a temporary, internally it uses a post-RZ register + Node GetTemporary(u32 id); /// Sets a register. src value must be a number-evaluated node. void SetRegister(NodeBlock& bb, Tegra::Shader::Register dest, Node src); @@ -207,8 +219,8 @@ private: void SetInternalFlag(NodeBlock& bb, InternalFlag flag, Node value); /// Sets a local memory address. address and value must be a number-evaluated node void SetLocalMemory(NodeBlock& bb, Node address, Node value); - /// Sets a temporal. Internally it uses a post-RZ register - void SetTemporal(NodeBlock& bb, u32 id, Node value); + /// Sets a temporary. Internally it uses a post-RZ register + void SetTemporary(NodeBlock& bb, u32 id, Node value); /// Sets internal flags from a float void SetInternalFlagsFromFloat(NodeBlock& bb, Node value, bool sets_cc = true); @@ -268,6 +280,9 @@ private: /// Extracts a sequence of bits from a node Node BitfieldExtract(Node value, u32 offset, u32 bits); + /// Inserts a sequence of bits from a node + Node BitfieldInsert(Node base, Node insert, u32 offset, u32 bits); + void WriteTexInstructionFloat(NodeBlock& bb, Tegra::Shader::Instruction instr, const Node4& components); @@ -314,7 +329,7 @@ private: void WriteLop3Instruction(NodeBlock& bb, Tegra::Shader::Register dest, Node op_a, Node op_b, Node op_c, Node imm_lut, bool sets_cc); - Node TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const; + std::tuple<Node, u32, u32> TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const; std::optional<u32> TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const; @@ -326,10 +341,11 @@ private: const ProgramCode& program_code; const u32 main_offset; + const std::size_t program_size; + bool disable_flow_stack{}; u32 coverage_begin{}; u32 coverage_end{}; - std::map<std::pair<u32, u32>, ExitMethod> exit_method_map; std::map<u32, NodeBlock> basic_blocks; NodeBlock global_code; @@ -343,6 +359,9 @@ private: std::set<Image> used_images; std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> used_clip_distances{}; std::map<GlobalMemoryBase, GlobalMemoryUsage> used_global_memory; + bool uses_layer{}; + bool uses_viewport_index{}; + bool uses_point_size{}; bool uses_physical_attributes{}; // Shader uses AL2P or physical attribute read/writes Tegra::Shader::Header header; diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index fc957d980e..55f5949e4b 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp @@ -15,56 +15,63 @@ namespace { std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor, OperationCode operation_code) { for (; cursor >= 0; --cursor) { - const Node node = code.at(cursor); + Node node = code.at(cursor); + if (const auto operation = std::get_if<OperationNode>(&*node)) { if (operation->GetCode() == operation_code) { - return {node, cursor}; + return {std::move(node), cursor}; } } + if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { const auto& conditional_code = conditional->GetCode(); - const auto [found, internal_cursor] = FindOperation( + auto [found, internal_cursor] = FindOperation( conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code); if (found) { - return {found, cursor}; + return {std::move(found), cursor}; } } } return {}; } -} // namespace +} // Anonymous namespace -Node ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) const { +std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, + s64 cursor) const { if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) { - // Cbuf found, but it has to be immediate - return std::holds_alternative<ImmediateNode>(*cbuf->GetOffset()) ? tracked : nullptr; + // Constant buffer found, test if it's an immediate + const auto offset = cbuf->GetOffset(); + if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { + return {tracked, cbuf->GetIndex(), immediate->GetValue()}; + } + return {}; } if (const auto gpr = std::get_if<GprNode>(&*tracked)) { if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) { - return nullptr; + return {}; } // Reduce the cursor in one to avoid infinite loops when the instruction sets the same // register that it uses as operand const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1); if (!source) { - return nullptr; + return {}; } return TrackCbuf(source, code, new_cursor); } if (const auto operation = std::get_if<OperationNode>(&*tracked)) { - for (std::size_t i = 0; i < operation->GetOperandsCount(); ++i) { - if (const auto found = TrackCbuf((*operation)[i], code, cursor)) { - // Cbuf found in operand + for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) { + if (auto found = TrackCbuf((*operation)[i - 1], code, cursor); std::get<0>(found)) { + // Cbuf found in operand. return found; } } - return nullptr; + return {}; } if (const auto conditional = std::get_if<ConditionalNode>(&*tracked)) { const auto& conditional_code = conditional->GetCode(); return TrackCbuf(tracked, conditional_code, static_cast<s64>(conditional_code.size())); } - return nullptr; + return {}; } std::optional<u32> ShaderIR::TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const { diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index c50f6354d0..4ceb219be4 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp @@ -445,11 +445,12 @@ PixelFormat PixelFormatFromGPUPixelFormat(Tegra::FramebufferConfig::PixelFormat switch (format) { case Tegra::FramebufferConfig::PixelFormat::ABGR8: return PixelFormat::ABGR8U; + case Tegra::FramebufferConfig::PixelFormat::RGB565: + return PixelFormat::B5G6R5U; case Tegra::FramebufferConfig::PixelFormat::BGRA8: return PixelFormat::BGRA8; default: - LOG_CRITICAL(HW_GPU, "Unimplemented format={}", static_cast<u32>(format)); - UNREACHABLE(); + UNIMPLEMENTED_MSG("Unimplemented format={}", static_cast<u32>(format)); return PixelFormat::ABGR8U; } } diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp index 7a0fdb19bc..683c492072 100644 --- a/src/video_core/texture_cache/surface_base.cpp +++ b/src/video_core/texture_cache/surface_base.cpp @@ -24,9 +24,8 @@ StagingCache::StagingCache() = default; StagingCache::~StagingCache() = default; SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params) - : params{params}, mipmap_sizes(params.num_levels), - mipmap_offsets(params.num_levels), gpu_addr{gpu_addr}, host_memory_size{ - params.GetHostSizeInBytes()} { + : params{params}, host_memory_size{params.GetHostSizeInBytes()}, gpu_addr{gpu_addr}, + mipmap_sizes(params.num_levels), mipmap_offsets(params.num_levels) { std::size_t offset = 0; for (u32 level = 0; level < params.num_levels; ++level) { const std::size_t mipmap_size{params.GetGuestMipmapSize(level)}; @@ -75,9 +74,12 @@ MatchStructureResult SurfaceBaseImpl::MatchesStructure(const SurfaceParams& rhs) // Linear Surface check if (!params.is_tiled) { - if (std::tie(params.width, params.height, params.pitch) == - std::tie(rhs.width, rhs.height, rhs.pitch)) { - return MatchStructureResult::FullMatch; + if (std::tie(params.height, params.pitch) == std::tie(rhs.height, rhs.pitch)) { + if (params.width == rhs.width) { + return MatchStructureResult::FullMatch; + } else { + return MatchStructureResult::SemiMatch; + } } return MatchStructureResult::None; } diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h index 8ba386a8ac..bcce8d8634 100644 --- a/src/video_core/texture_cache/surface_base.h +++ b/src/video_core/texture_cache/surface_base.h @@ -200,8 +200,9 @@ public: modification_tick = tick; } - void MarkAsRenderTarget(const bool is_target) { + void MarkAsRenderTarget(const bool is_target, const u32 index) { this->is_target = is_target; + this->index = index; } void MarkAsPicked(const bool is_picked) { @@ -221,6 +222,10 @@ public: return is_target; } + u32 GetRenderTarget() const { + return index; + } + bool IsRegistered() const { return is_registered; } @@ -307,10 +312,13 @@ private: return view; } + static constexpr u32 NO_RT = 0xFFFFFFFF; + bool is_modified{}; bool is_target{}; bool is_registered{}; bool is_picked{}; + u32 index{NO_RT}; u64 modification_tick{}; }; diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index 9c56e2b4f1..fd54724513 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp @@ -290,12 +290,19 @@ std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) co std::size_t SurfaceParams::GetInnerMipmapMemorySize(u32 level, bool as_host_size, bool uncompressed) const { - const bool tiled{as_host_size ? false : is_tiled}; const u32 width{GetMipmapSize(uncompressed, GetMipWidth(level), GetDefaultBlockWidth())}; const u32 height{GetMipmapSize(uncompressed, GetMipHeight(level), GetDefaultBlockHeight())}; const u32 depth{is_layered ? 1U : GetMipDepth(level)}; - return Tegra::Texture::CalculateSize(tiled, GetBytesPerPixel(), width, height, depth, - GetMipBlockHeight(level), GetMipBlockDepth(level)); + if (is_tiled) { + return Tegra::Texture::CalculateSize(!as_host_size, GetBytesPerPixel(), width, height, + depth, GetMipBlockHeight(level), + GetMipBlockDepth(level)); + } else if (as_host_size || IsBuffer()) { + return GetBytesPerPixel() * width * height * depth; + } else { + // Linear Texture Case + return pitch * height * depth; + } } bool SurfaceParams::operator==(const SurfaceParams& rhs) const { diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h index 358d6757c4..e7ef66ee23 100644 --- a/src/video_core/texture_cache/surface_params.h +++ b/src/video_core/texture_cache/surface_params.h @@ -58,7 +58,6 @@ public: std::size_t GetHostSizeInBytes() const { std::size_t host_size_in_bytes; if (GetCompressionType() == SurfaceCompression::Converted) { - constexpr std::size_t rgb8_bpp = 4ULL; // ASTC is uncompressed in software, in emulated as RGBA8 host_size_in_bytes = 0; for (u32 level = 0; level < num_levels; ++level) { diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index c9e72531a5..2ec0203d13 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -116,10 +116,10 @@ public: std::lock_guard lock{mutex}; auto& maxwell3d = system.GPU().Maxwell3D(); - if (!maxwell3d.dirty_flags.zeta_buffer) { + if (!maxwell3d.dirty.depth_buffer) { return depth_buffer.view; } - maxwell3d.dirty_flags.zeta_buffer = false; + maxwell3d.dirty.depth_buffer = false; const auto& regs{maxwell3d.regs}; const auto gpu_addr{regs.zeta.Address()}; @@ -133,11 +133,11 @@ public: regs.zeta.memory_layout.block_depth, regs.zeta.memory_layout.type)}; auto surface_view = GetSurface(gpu_addr, depth_params, preserve_contents, true); if (depth_buffer.target) - depth_buffer.target->MarkAsRenderTarget(false); + depth_buffer.target->MarkAsRenderTarget(false, NO_RT); depth_buffer.target = surface_view.first; depth_buffer.view = surface_view.second; if (depth_buffer.target) - depth_buffer.target->MarkAsRenderTarget(true); + depth_buffer.target->MarkAsRenderTarget(true, DEPTH_RT); return surface_view.second; } @@ -145,10 +145,10 @@ public: std::lock_guard lock{mutex}; ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); auto& maxwell3d = system.GPU().Maxwell3D(); - if (!maxwell3d.dirty_flags.color_buffer[index]) { + if (!maxwell3d.dirty.render_target[index]) { return render_targets[index].view; } - maxwell3d.dirty_flags.color_buffer.reset(index); + maxwell3d.dirty.render_target[index] = false; const auto& regs{maxwell3d.regs}; if (index >= regs.rt_control.count || regs.rt[index].Address() == 0 || @@ -167,11 +167,11 @@ public: auto surface_view = GetSurface(gpu_addr, SurfaceParams::CreateForFramebuffer(system, index), preserve_contents, true); if (render_targets[index].target) - render_targets[index].target->MarkAsRenderTarget(false); + render_targets[index].target->MarkAsRenderTarget(false, NO_RT); render_targets[index].target = surface_view.first; render_targets[index].view = surface_view.second; if (render_targets[index].target) - render_targets[index].target->MarkAsRenderTarget(true); + render_targets[index].target->MarkAsRenderTarget(true, static_cast<u32>(index)); return surface_view.second; } @@ -191,7 +191,7 @@ public: if (depth_buffer.target == nullptr) { return; } - depth_buffer.target->MarkAsRenderTarget(false); + depth_buffer.target->MarkAsRenderTarget(false, NO_RT); depth_buffer.target = nullptr; depth_buffer.view = nullptr; } @@ -200,7 +200,7 @@ public: if (render_targets[index].target == nullptr) { return; } - render_targets[index].target->MarkAsRenderTarget(false); + render_targets[index].target->MarkAsRenderTarget(false, NO_RT); render_targets[index].target = nullptr; render_targets[index].view = nullptr; } @@ -270,6 +270,17 @@ protected: // and reading it from a sepparate buffer. virtual void BufferCopy(TSurface& src_surface, TSurface& dst_surface) = 0; + void ManageRenderTargetUnregister(TSurface& surface) { + auto& maxwell3d = system.GPU().Maxwell3D(); + const u32 index = surface->GetRenderTarget(); + if (index == DEPTH_RT) { + maxwell3d.dirty.depth_buffer = true; + } else { + maxwell3d.dirty.render_target[index] = true; + } + maxwell3d.dirty.render_settings = true; + } + void Register(TSurface surface) { const GPUVAddr gpu_addr = surface->GetGpuAddr(); const CacheAddr cache_ptr = ToCacheAddr(system.GPU().MemoryManager().GetPointer(gpu_addr)); @@ -294,8 +305,9 @@ protected: if (guard_render_targets && surface->IsProtected()) { return; } - const GPUVAddr gpu_addr = surface->GetGpuAddr(); - const CacheAddr cache_ptr = surface->GetCacheAddr(); + if (!guard_render_targets && surface->IsRenderTarget()) { + ManageRenderTargetUnregister(surface); + } const std::size_t size = surface->GetSizeInBytes(); const VAddr cpu_addr = surface->GetCpuAddr(); rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1); @@ -649,15 +661,6 @@ private: } return {current_surface, *view}; } - // The next case is unsafe, so if we r in accurate GPU, just skip it - if (Settings::values.use_accurate_gpu_emulation) { - return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, - MatchTopologyResult::FullMatch); - } - // This is the case the texture is a part of the parent. - if (current_surface->MatchesSubTexture(params, gpu_addr)) { - return RebuildSurface(current_surface, params, is_render); - } } else { // If there are many overlaps, odds are they are subtextures of the candidate // surface. We try to construct a new surface based on the candidate parameters, @@ -793,6 +796,9 @@ private: static constexpr u64 registry_page_size{1 << registry_page_bits}; std::unordered_map<CacheAddr, std::vector<TSurface>> registry; + static constexpr u32 DEPTH_RT = 8; + static constexpr u32 NO_RT = 0xFFFFFFFF; + // The L1 Cache is used for fast texture lookup before checking the overlaps // This avoids calculating size and other stuffs. std::unordered_map<CacheAddr, TSurface> l1_cache; diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 7e82959440..7df5f14521 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -257,19 +257,21 @@ std::vector<u8> UnswizzleTexture(u8* address, u32 tile_size_x, u32 tile_size_y, void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, - u32 block_height_bit) { + u32 block_height_bit, u32 offset_x, u32 offset_y) { const u32 block_height = 1U << block_height_bit; const u32 image_width_in_gobs{(swizzled_width * bytes_per_pixel + (gob_size_x - 1)) / gob_size_x}; for (u32 line = 0; line < subrect_height; ++line) { + const u32 dst_y = line + offset_y; const u32 gob_address_y = - (line / (gob_size_y * block_height)) * gob_size * block_height * image_width_in_gobs + - ((line % (gob_size_y * block_height)) / gob_size_y) * gob_size; - const auto& table = legacy_swizzle_table[line % gob_size_y]; + (dst_y / (gob_size_y * block_height)) * gob_size * block_height * image_width_in_gobs + + ((dst_y % (gob_size_y * block_height)) / gob_size_y) * gob_size; + const auto& table = legacy_swizzle_table[dst_y % gob_size_y]; for (u32 x = 0; x < subrect_width; ++x) { + const u32 dst_x = x + offset_x; const u32 gob_address = - gob_address_y + (x * bytes_per_pixel / gob_size_x) * gob_size * block_height; - const u32 swizzled_offset = gob_address + table[(x * bytes_per_pixel) % gob_size_x]; + gob_address_y + (dst_x * bytes_per_pixel / gob_size_x) * gob_size * block_height; + const u32 swizzled_offset = gob_address + table[(dst_x * bytes_per_pixel) % gob_size_x]; u8* source_line = unswizzled_data + line * source_pitch + x * bytes_per_pixel; u8* dest_addr = swizzled_data + swizzled_offset; diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index eaec9b5a5b..f1e3952bcf 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -44,7 +44,8 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height /// Copies an untiled subrectangle into a tiled surface. void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, - u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height); + u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height, + u32 offset_x, u32 offset_y); /// Copies a tiled subrectangle into a linear surface. void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h index e3be018b97..e36bc2c04e 100644 --- a/src/video_core/textures/texture.h +++ b/src/video_core/textures/texture.h @@ -213,7 +213,7 @@ struct TICEntry { if (header_version != TICHeaderVersion::OneDBuffer) { return width_minus_1 + 1; } - return (buffer_high_width_minus_one << 16) | buffer_low_width_minus_one; + return ((buffer_high_width_minus_one << 16) | buffer_low_width_minus_one) + 1; } u32 Height() const { diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt index 3dc0e47d06..f051e17b46 100644 --- a/src/yuzu/CMakeLists.txt +++ b/src/yuzu/CMakeLists.txt @@ -1,5 +1,6 @@ set(CMAKE_AUTOMOC ON) set(CMAKE_AUTORCC ON) +set(CMAKE_AUTOUIC ON) set(CMAKE_INCLUDE_CURRENT_DIR ON) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/CMakeModules) @@ -7,6 +8,7 @@ add_executable(yuzu Info.plist about_dialog.cpp about_dialog.h + aboutdialog.ui applets/error.cpp applets/error.h applets/profile_select.cpp @@ -17,42 +19,59 @@ add_executable(yuzu applets/web_browser.h bootmanager.cpp bootmanager.h + compatdb.ui compatibility_list.cpp compatibility_list.h configuration/config.cpp configuration/config.h + configuration/configure.ui configuration/configure_audio.cpp configuration/configure_audio.h + configuration/configure_audio.ui configuration/configure_debug.cpp configuration/configure_debug.h + configuration/configure_debug.ui configuration/configure_dialog.cpp configuration/configure_dialog.h configuration/configure_gamelist.cpp configuration/configure_gamelist.h + configuration/configure_gamelist.ui configuration/configure_general.cpp configuration/configure_general.h + configuration/configure_general.ui configuration/configure_graphics.cpp configuration/configure_graphics.h + configuration/configure_graphics.ui configuration/configure_hotkeys.cpp configuration/configure_hotkeys.h + configuration/configure_hotkeys.ui configuration/configure_input.cpp configuration/configure_input.h + configuration/configure_input.ui configuration/configure_input_player.cpp configuration/configure_input_player.h + configuration/configure_input_player.ui configuration/configure_input_simple.cpp configuration/configure_input_simple.h + configuration/configure_input_simple.ui configuration/configure_mouse_advanced.cpp configuration/configure_mouse_advanced.h + configuration/configure_mouse_advanced.ui + configuration/configure_per_general.cpp + configuration/configure_per_general.h + configuration/configure_per_general.ui configuration/configure_profile_manager.cpp configuration/configure_profile_manager.h + configuration/configure_profile_manager.ui configuration/configure_system.cpp configuration/configure_system.h - configuration/configure_per_general.cpp - configuration/configure_per_general.h + configuration/configure_system.ui configuration/configure_touchscreen_advanced.cpp configuration/configure_touchscreen_advanced.h + configuration/configure_touchscreen_advanced.ui configuration/configure_web.cpp configuration/configure_web.h + configuration/configure_web.ui debugger/graphics/graphics_breakpoint_observer.cpp debugger/graphics/graphics_breakpoint_observer.h debugger/graphics/graphics_breakpoints.cpp @@ -72,12 +91,14 @@ add_executable(yuzu game_list_worker.h loading_screen.cpp loading_screen.h + loading_screen.ui hotkeys.cpp hotkeys.h main.cpp main.h - ui_settings.cpp - ui_settings.h + main.ui + uisettings.cpp + uisettings.h util/limitable_input_dialog.cpp util/limitable_input_dialog.h util/sequence_dialog/sequence_dialog.cpp @@ -89,44 +110,18 @@ add_executable(yuzu yuzu.rc ) -set(UIS - aboutdialog.ui - configuration/configure.ui - configuration/configure_audio.ui - configuration/configure_debug.ui - configuration/configure_gamelist.ui - configuration/configure_general.ui - configuration/configure_graphics.ui - configuration/configure_hotkeys.ui - configuration/configure_input.ui - configuration/configure_input_player.ui - configuration/configure_input_simple.ui - configuration/configure_mouse_advanced.ui - configuration/configure_per_general.ui - configuration/configure_profile_manager.ui - configuration/configure_system.ui - configuration/configure_touchscreen_advanced.ui - configuration/configure_web.ui - compatdb.ui - loading_screen.ui - main.ui -) - file(GLOB COMPAT_LIST ${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.qrc ${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.json) file(GLOB_RECURSE ICONS ${PROJECT_SOURCE_DIR}/dist/icons/*) file(GLOB_RECURSE THEMES ${PROJECT_SOURCE_DIR}/dist/qt_themes/*) -qt5_wrap_ui(UI_HDRS ${UIS}) target_sources(yuzu PRIVATE ${COMPAT_LIST} ${ICONS} ${THEMES} - ${UI_HDRS} - ${UIS} ) if (APPLE) diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp index 73978ff5b0..f594106bf9 100644 --- a/src/yuzu/configuration/config.cpp +++ b/src/yuzu/configuration/config.cpp @@ -11,7 +11,7 @@ #include "core/hle/service/hid/controllers/npad.h" #include "input_common/main.h" #include "yuzu/configuration/config.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" Config::Config() { // TODO: Don't hardcode the path; let the frontend decide where to put the config files. @@ -436,8 +436,6 @@ void Config::ReadControlValues() { void Config::ReadCoreValues() { qt_config->beginGroup(QStringLiteral("Core")); - Settings::values.cpu_jit_enabled = - ReadSetting(QStringLiteral("cpu_jit_enabled"), true).toBool(); Settings::values.use_multi_core = ReadSetting(QStringLiteral("use_multi_core"), false).toBool(); qt_config->endGroup(); @@ -518,10 +516,38 @@ void Config::ReadPathValues() { UISettings::values.roms_path = ReadSetting(QStringLiteral("romsPath")).toString(); UISettings::values.symbols_path = ReadSetting(QStringLiteral("symbolsPath")).toString(); - UISettings::values.game_directory_path = + UISettings::values.screenshot_path = ReadSetting(QStringLiteral("screenshotPath")).toString(); + UISettings::values.game_dir_deprecated = ReadSetting(QStringLiteral("gameListRootDir"), QStringLiteral(".")).toString(); - UISettings::values.game_directory_deepscan = + UISettings::values.game_dir_deprecated_deepscan = ReadSetting(QStringLiteral("gameListDeepScan"), false).toBool(); + const int gamedirs_size = qt_config->beginReadArray(QStringLiteral("gamedirs")); + for (int i = 0; i < gamedirs_size; ++i) { + qt_config->setArrayIndex(i); + UISettings::GameDir game_dir; + game_dir.path = ReadSetting(QStringLiteral("path")).toString(); + game_dir.deep_scan = ReadSetting(QStringLiteral("deep_scan"), false).toBool(); + game_dir.expanded = ReadSetting(QStringLiteral("expanded"), true).toBool(); + UISettings::values.game_dirs.append(game_dir); + } + qt_config->endArray(); + // create NAND and SD card directories if empty, these are not removable through the UI, + // also carries over old game list settings if present + if (UISettings::values.game_dirs.isEmpty()) { + UISettings::GameDir game_dir; + game_dir.path = QStringLiteral("SDMC"); + game_dir.expanded = true; + UISettings::values.game_dirs.append(game_dir); + game_dir.path = QStringLiteral("UserNAND"); + UISettings::values.game_dirs.append(game_dir); + game_dir.path = QStringLiteral("SysNAND"); + UISettings::values.game_dirs.append(game_dir); + if (UISettings::values.game_dir_deprecated != QStringLiteral(".")) { + game_dir.path = UISettings::values.game_dir_deprecated; + game_dir.deep_scan = UISettings::values.game_dir_deprecated_deepscan; + UISettings::values.game_dirs.append(game_dir); + } + } UISettings::values.recent_files = ReadSetting(QStringLiteral("recentFiles")).toStringList(); qt_config->endGroup(); @@ -831,7 +857,6 @@ void Config::SaveControlValues() { void Config::SaveCoreValues() { qt_config->beginGroup(QStringLiteral("Core")); - WriteSetting(QStringLiteral("cpu_jit_enabled"), Settings::values.cpu_jit_enabled, true); WriteSetting(QStringLiteral("use_multi_core"), Settings::values.use_multi_core, false); qt_config->endGroup(); @@ -901,10 +926,15 @@ void Config::SavePathValues() { WriteSetting(QStringLiteral("romsPath"), UISettings::values.roms_path); WriteSetting(QStringLiteral("symbolsPath"), UISettings::values.symbols_path); WriteSetting(QStringLiteral("screenshotPath"), UISettings::values.screenshot_path); - WriteSetting(QStringLiteral("gameListRootDir"), UISettings::values.game_directory_path, - QStringLiteral(".")); - WriteSetting(QStringLiteral("gameListDeepScan"), UISettings::values.game_directory_deepscan, - false); + qt_config->beginWriteArray(QStringLiteral("gamedirs")); + for (int i = 0; i < UISettings::values.game_dirs.size(); ++i) { + qt_config->setArrayIndex(i); + const auto& game_dir = UISettings::values.game_dirs[i]; + WriteSetting(QStringLiteral("path"), game_dir.path); + WriteSetting(QStringLiteral("deep_scan"), game_dir.deep_scan, false); + WriteSetting(QStringLiteral("expanded"), game_dir.expanded, true); + } + qt_config->endArray(); WriteSetting(QStringLiteral("recentFiles"), UISettings::values.recent_files); qt_config->endGroup(); diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp index 9a13bb7979..5b7e030569 100644 --- a/src/yuzu/configuration/configure_debug.cpp +++ b/src/yuzu/configuration/configure_debug.cpp @@ -12,13 +12,13 @@ #include "ui_configure_debug.h" #include "yuzu/configuration/configure_debug.h" #include "yuzu/debugger/console.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" ConfigureDebug::ConfigureDebug(QWidget* parent) : QWidget(parent), ui(new Ui::ConfigureDebug) { ui->setupUi(this); SetConfiguration(); - connect(ui->open_log_button, &QPushButton::pressed, []() { + connect(ui->open_log_button, &QPushButton::clicked, []() { QString path = QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::LogDir)); QDesktopServices::openUrl(QUrl::fromLocalFile(path)); }); diff --git a/src/yuzu/configuration/configure_dialog.cpp b/src/yuzu/configuration/configure_dialog.cpp index e636964e34..775e3f2eac 100644 --- a/src/yuzu/configuration/configure_dialog.cpp +++ b/src/yuzu/configuration/configure_dialog.cpp @@ -68,12 +68,14 @@ void ConfigureDialog::RetranslateUI() { ui->tabWidget->setCurrentIndex(old_index); } +Q_DECLARE_METATYPE(QList<QWidget*>); + void ConfigureDialog::PopulateSelectionList() { - const std::array<std::pair<QString, QStringList>, 4> items{ - {{tr("General"), {tr("General"), tr("Web"), tr("Debug"), tr("Game List")}}, - {tr("System"), {tr("System"), tr("Profiles"), tr("Audio")}}, - {tr("Graphics"), {tr("Graphics")}}, - {tr("Controls"), {tr("Input"), tr("Hotkeys")}}}, + const std::array<std::pair<QString, QList<QWidget*>>, 4> items{ + {{tr("General"), {ui->generalTab, ui->webTab, ui->debugTab, ui->gameListTab}}, + {tr("System"), {ui->systemTab, ui->profileManagerTab, ui->audioTab}}, + {tr("Graphics"), {ui->graphicsTab}}, + {tr("Controls"), {ui->inputTab, ui->hotkeysTab}}}, }; [[maybe_unused]] const QSignalBlocker blocker(ui->selectorList); @@ -81,7 +83,7 @@ void ConfigureDialog::PopulateSelectionList() { ui->selectorList->clear(); for (const auto& entry : items) { auto* const item = new QListWidgetItem(entry.first); - item->setData(Qt::UserRole, entry.second); + item->setData(Qt::UserRole, QVariant::fromValue(entry.second)); ui->selectorList->addItem(item); } @@ -93,24 +95,26 @@ void ConfigureDialog::UpdateVisibleTabs() { return; } - const std::map<QString, QWidget*> widgets = { - {tr("General"), ui->generalTab}, - {tr("System"), ui->systemTab}, - {tr("Profiles"), ui->profileManagerTab}, - {tr("Input"), ui->inputTab}, - {tr("Hotkeys"), ui->hotkeysTab}, - {tr("Graphics"), ui->graphicsTab}, - {tr("Audio"), ui->audioTab}, - {tr("Debug"), ui->debugTab}, - {tr("Web"), ui->webTab}, - {tr("Game List"), ui->gameListTab}, + const std::map<QWidget*, QString> widgets = { + {ui->generalTab, tr("General")}, + {ui->systemTab, tr("System")}, + {ui->profileManagerTab, tr("Profiles")}, + {ui->inputTab, tr("Input")}, + {ui->hotkeysTab, tr("Hotkeys")}, + {ui->graphicsTab, tr("Graphics")}, + {ui->audioTab, tr("Audio")}, + {ui->debugTab, tr("Debug")}, + {ui->webTab, tr("Web")}, + {ui->gameListTab, tr("Game List")}, }; [[maybe_unused]] const QSignalBlocker blocker(ui->tabWidget); ui->tabWidget->clear(); - const QStringList tabs = items[0]->data(Qt::UserRole).toStringList(); - for (const auto& tab : tabs) { - ui->tabWidget->addTab(widgets.find(tab)->second, tab); + + const QList<QWidget*> tabs = qvariant_cast<QList<QWidget*>>(items[0]->data(Qt::UserRole)); + + for (const auto tab : tabs) { + ui->tabWidget->addTab(tab, widgets.at(tab)); } } diff --git a/src/yuzu/configuration/configure_gamelist.cpp b/src/yuzu/configuration/configure_gamelist.cpp index d1724ba89e..daedbc33e5 100644 --- a/src/yuzu/configuration/configure_gamelist.cpp +++ b/src/yuzu/configuration/configure_gamelist.cpp @@ -9,7 +9,7 @@ #include "core/settings.h" #include "ui_configure_gamelist.h" #include "yuzu/configuration/configure_gamelist.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" namespace { constexpr std::array default_icon_sizes{ diff --git a/src/yuzu/configuration/configure_general.cpp b/src/yuzu/configuration/configure_general.cpp index 7a6e921cd2..10bcd650ea 100644 --- a/src/yuzu/configuration/configure_general.cpp +++ b/src/yuzu/configuration/configure_general.cpp @@ -6,7 +6,7 @@ #include "core/settings.h" #include "ui_configure_general.h" #include "yuzu/configuration/configure_general.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" ConfigureGeneral::ConfigureGeneral(QWidget* parent) : QWidget(parent), ui(new Ui::ConfigureGeneral) { @@ -20,25 +20,29 @@ ConfigureGeneral::ConfigureGeneral(QWidget* parent) SetConfiguration(); - connect(ui->toggle_deepscan, &QCheckBox::stateChanged, this, - [] { UISettings::values.is_game_list_reload_pending.exchange(true); }); + connect(ui->toggle_frame_limit, &QCheckBox::toggled, ui->frame_limit, &QSpinBox::setEnabled); } ConfigureGeneral::~ConfigureGeneral() = default; void ConfigureGeneral::SetConfiguration() { - ui->toggle_deepscan->setChecked(UISettings::values.game_directory_deepscan); ui->toggle_check_exit->setChecked(UISettings::values.confirm_before_closing); ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot); ui->theme_combobox->setCurrentIndex(ui->theme_combobox->findData(UISettings::values.theme)); + + ui->toggle_frame_limit->setChecked(Settings::values.use_frame_limit); + ui->frame_limit->setEnabled(ui->toggle_frame_limit->isChecked()); + ui->frame_limit->setValue(Settings::values.frame_limit); } void ConfigureGeneral::ApplyConfiguration() { - UISettings::values.game_directory_deepscan = ui->toggle_deepscan->isChecked(); UISettings::values.confirm_before_closing = ui->toggle_check_exit->isChecked(); UISettings::values.select_user_on_boot = ui->toggle_user_on_boot->isChecked(); UISettings::values.theme = ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString(); + + Settings::values.use_frame_limit = ui->toggle_frame_limit->isChecked(); + Settings::values.frame_limit = ui->frame_limit->value(); } void ConfigureGeneral::changeEvent(QEvent* event) { diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui index 184fdd3298..0bb91d64b7 100644 --- a/src/yuzu/configuration/configure_general.ui +++ b/src/yuzu/configuration/configure_general.ui @@ -25,11 +25,31 @@ <item> <layout class="QVBoxLayout" name="GeneralVerticalLayout"> <item> - <widget class="QCheckBox" name="toggle_deepscan"> - <property name="text"> - <string>Search sub-directories for games</string> - </property> - </widget> + <layout class="QHBoxLayout" name="horizontalLayout_2"> + <item> + <widget class="QCheckBox" name="toggle_frame_limit"> + <property name="text"> + <string>Limit Speed Percent</string> + </property> + </widget> + </item> + <item> + <widget class="QSpinBox" name="frame_limit"> + <property name="suffix"> + <string>%</string> + </property> + <property name="minimum"> + <number>1</number> + </property> + <property name="maximum"> + <number>9999</number> + </property> + <property name="value"> + <number>100</number> + </property> + </widget> + </item> + </layout> </item> <item> <widget class="QCheckBox" name="toggle_check_exit"> diff --git a/src/yuzu/configuration/configure_graphics.cpp b/src/yuzu/configuration/configure_graphics.cpp index 2b17b250cf..2c9e322c94 100644 --- a/src/yuzu/configuration/configure_graphics.cpp +++ b/src/yuzu/configuration/configure_graphics.cpp @@ -55,7 +55,6 @@ ConfigureGraphics::ConfigureGraphics(QWidget* parent) SetConfiguration(); - connect(ui->toggle_frame_limit, &QCheckBox::toggled, ui->frame_limit, &QSpinBox::setEnabled); connect(ui->bg_button, &QPushButton::clicked, this, [this] { const QColor new_bg_color = QColorDialog::getColor(bg_color); if (!new_bg_color.isValid()) { @@ -72,9 +71,6 @@ void ConfigureGraphics::SetConfiguration() { ui->resolution_factor_combobox->setCurrentIndex( static_cast<int>(FromResolutionFactor(Settings::values.resolution_factor))); - ui->toggle_frame_limit->setChecked(Settings::values.use_frame_limit); - ui->frame_limit->setEnabled(ui->toggle_frame_limit->isChecked()); - ui->frame_limit->setValue(Settings::values.frame_limit); ui->use_disk_shader_cache->setEnabled(runtime_lock); ui->use_disk_shader_cache->setChecked(Settings::values.use_disk_shader_cache); ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation); @@ -89,8 +85,6 @@ void ConfigureGraphics::SetConfiguration() { void ConfigureGraphics::ApplyConfiguration() { Settings::values.resolution_factor = ToResolutionFactor(static_cast<Resolution>(ui->resolution_factor_combobox->currentIndex())); - Settings::values.use_frame_limit = ui->toggle_frame_limit->isChecked(); - Settings::values.frame_limit = ui->frame_limit->value(); Settings::values.use_disk_shader_cache = ui->use_disk_shader_cache->isChecked(); Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked(); Settings::values.use_asynchronous_gpu_emulation = diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui index 15ab18ecd0..0309ee3002 100644 --- a/src/yuzu/configuration/configure_graphics.ui +++ b/src/yuzu/configuration/configure_graphics.ui @@ -23,33 +23,6 @@ </property> <layout class="QVBoxLayout" name="verticalLayout_2"> <item> - <layout class="QHBoxLayout" name="horizontalLayout_2"> - <item> - <widget class="QCheckBox" name="toggle_frame_limit"> - <property name="text"> - <string>Limit Speed Percent</string> - </property> - </widget> - </item> - <item> - <widget class="QSpinBox" name="frame_limit"> - <property name="suffix"> - <string>%</string> - </property> - <property name="minimum"> - <number>1</number> - </property> - <property name="maximum"> - <number>9999</number> - </property> - <property name="value"> - <number>100</number> - </property> - </widget> - </item> - </layout> - </item> - <item> <widget class="QCheckBox" name="use_disk_shader_cache"> <property name="text"> <string>Use disk shader cache</string> diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp index 4dd775aab1..7613197f25 100644 --- a/src/yuzu/configuration/configure_input.cpp +++ b/src/yuzu/configuration/configure_input.cpp @@ -79,7 +79,7 @@ ConfigureInput::ConfigureInput(QWidget* parent) LoadConfiguration(); UpdateUIEnabled(); - connect(ui->restore_defaults_button, &QPushButton::pressed, this, + connect(ui->restore_defaults_button, &QPushButton::clicked, this, &ConfigureInput::RestoreDefaults); for (auto* enabled : players_controller) { @@ -96,20 +96,20 @@ ConfigureInput::ConfigureInput(QWidget* parent) &ConfigureInput::UpdateUIEnabled); for (std::size_t i = 0; i < players_configure.size(); ++i) { - connect(players_configure[i], &QPushButton::pressed, this, + connect(players_configure[i], &QPushButton::clicked, this, [this, i] { CallConfigureDialog<ConfigureInputPlayer>(*this, i, false); }); } - connect(ui->handheld_configure, &QPushButton::pressed, this, + connect(ui->handheld_configure, &QPushButton::clicked, this, [this] { CallConfigureDialog<ConfigureInputPlayer>(*this, 8, false); }); - connect(ui->debug_configure, &QPushButton::pressed, this, + connect(ui->debug_configure, &QPushButton::clicked, this, [this] { CallConfigureDialog<ConfigureInputPlayer>(*this, 9, true); }); - connect(ui->mouse_advanced, &QPushButton::pressed, this, + connect(ui->mouse_advanced, &QPushButton::clicked, this, [this] { CallConfigureDialog<ConfigureMouseAdvanced>(*this); }); - connect(ui->touchscreen_advanced, &QPushButton::pressed, this, + connect(ui->touchscreen_advanced, &QPushButton::clicked, this, [this] { CallConfigureDialog<ConfigureTouchscreenAdvanced>(*this); }); } diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp index 916baccc1c..a968cfb5d6 100644 --- a/src/yuzu/configuration/configure_input_player.cpp +++ b/src/yuzu/configuration/configure_input_player.cpp @@ -244,7 +244,7 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i } button->setContextMenuPolicy(Qt::CustomContextMenu); - connect(button, &QPushButton::released, [=] { + connect(button, &QPushButton::clicked, [=] { HandleClick( button_map[button_id], [=](const Common::ParamPackage& params) { buttons_param[button_id] = params; }, @@ -273,7 +273,7 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i } analog_button->setContextMenuPolicy(Qt::CustomContextMenu); - connect(analog_button, &QPushButton::released, [=]() { + connect(analog_button, &QPushButton::clicked, [=]() { HandleClick(analog_map_buttons[analog_id][sub_button_id], [=](const Common::ParamPackage& params) { SetAnalogButton(params, analogs_param[analog_id], @@ -300,19 +300,22 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i menu_location)); }); } - connect(analog_map_stick[analog_id], &QPushButton::released, [=] { - QMessageBox::information(this, tr("Information"), - tr("After pressing OK, first move your joystick horizontally, " - "and then vertically.")); - HandleClick( - analog_map_stick[analog_id], - [=](const Common::ParamPackage& params) { analogs_param[analog_id] = params; }, - InputCommon::Polling::DeviceType::Analog); + connect(analog_map_stick[analog_id], &QPushButton::clicked, [=] { + if (QMessageBox::information( + this, tr("Information"), + tr("After pressing OK, first move your joystick horizontally, " + "and then vertically."), + QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Ok) { + HandleClick( + analog_map_stick[analog_id], + [=](const Common::ParamPackage& params) { analogs_param[analog_id] = params; }, + InputCommon::Polling::DeviceType::Analog); + } }); } - connect(ui->buttonClearAll, &QPushButton::released, [this] { ClearAll(); }); - connect(ui->buttonRestoreDefaults, &QPushButton::released, [this] { RestoreDefaults(); }); + connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); }); + connect(ui->buttonRestoreDefaults, &QPushButton::clicked, [this] { RestoreDefaults(); }); timeout_timer->setSingleShot(true); connect(timeout_timer.get(), &QTimer::timeout, [this] { SetPollingResult({}, true); }); diff --git a/src/yuzu/configuration/configure_input_simple.cpp b/src/yuzu/configuration/configure_input_simple.cpp index 864803ea3c..ab3a11d304 100644 --- a/src/yuzu/configuration/configure_input_simple.cpp +++ b/src/yuzu/configuration/configure_input_simple.cpp @@ -9,7 +9,7 @@ #include "yuzu/configuration/configure_input.h" #include "yuzu/configuration/configure_input_player.h" #include "yuzu/configuration/configure_input_simple.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" namespace { @@ -101,7 +101,7 @@ ConfigureInputSimple::ConfigureInputSimple(QWidget* parent) connect(ui->profile_combobox, QOverload<int>::of(&QComboBox::currentIndexChanged), this, &ConfigureInputSimple::OnSelectProfile); - connect(ui->profile_configure, &QPushButton::pressed, this, &ConfigureInputSimple::OnConfigure); + connect(ui->profile_configure, &QPushButton::clicked, this, &ConfigureInputSimple::OnConfigure); LoadConfiguration(); } diff --git a/src/yuzu/configuration/configure_mouse_advanced.cpp b/src/yuzu/configuration/configure_mouse_advanced.cpp index b7305e653f..0a4abe34f3 100644 --- a/src/yuzu/configuration/configure_mouse_advanced.cpp +++ b/src/yuzu/configuration/configure_mouse_advanced.cpp @@ -83,7 +83,7 @@ ConfigureMouseAdvanced::ConfigureMouseAdvanced(QWidget* parent) } button->setContextMenuPolicy(Qt::CustomContextMenu); - connect(button, &QPushButton::released, [=] { + connect(button, &QPushButton::clicked, [=] { HandleClick( button_map[button_id], [=](const Common::ParamPackage& params) { buttons_param[button_id] = params; }, @@ -104,8 +104,8 @@ ConfigureMouseAdvanced::ConfigureMouseAdvanced(QWidget* parent) }); } - connect(ui->buttonClearAll, &QPushButton::released, [this] { ClearAll(); }); - connect(ui->buttonRestoreDefaults, &QPushButton::released, [this] { RestoreDefaults(); }); + connect(ui->buttonClearAll, &QPushButton::clicked, [this] { ClearAll(); }); + connect(ui->buttonRestoreDefaults, &QPushButton::clicked, [this] { RestoreDefaults(); }); timeout_timer->setSingleShot(true); connect(timeout_timer.get(), &QTimer::timeout, [this] { SetPollingResult({}, true); }); diff --git a/src/yuzu/configuration/configure_per_general.cpp b/src/yuzu/configuration/configure_per_general.cpp index 90336e235b..d7f259f12a 100644 --- a/src/yuzu/configuration/configure_per_general.cpp +++ b/src/yuzu/configuration/configure_per_general.cpp @@ -23,7 +23,7 @@ #include "yuzu/configuration/config.h" #include "yuzu/configuration/configure_input.h" #include "yuzu/configuration/configure_per_general.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" #include "yuzu/util/util.h" ConfigurePerGameGeneral::ConfigurePerGameGeneral(QWidget* parent, u64 title_id) diff --git a/src/yuzu/configuration/configure_profile_manager.cpp b/src/yuzu/configuration/configure_profile_manager.cpp index c90f4cdd81..f53423440d 100644 --- a/src/yuzu/configuration/configure_profile_manager.cpp +++ b/src/yuzu/configuration/configure_profile_manager.cpp @@ -108,10 +108,10 @@ ConfigureProfileManager ::ConfigureProfileManager(QWidget* parent) connect(tree_view, &QTreeView::clicked, this, &ConfigureProfileManager::SelectUser); - connect(ui->pm_add, &QPushButton::pressed, this, &ConfigureProfileManager::AddUser); - connect(ui->pm_rename, &QPushButton::pressed, this, &ConfigureProfileManager::RenameUser); - connect(ui->pm_remove, &QPushButton::pressed, this, &ConfigureProfileManager::DeleteUser); - connect(ui->pm_set_image, &QPushButton::pressed, this, &ConfigureProfileManager::SetUserImage); + connect(ui->pm_add, &QPushButton::clicked, this, &ConfigureProfileManager::AddUser); + connect(ui->pm_rename, &QPushButton::clicked, this, &ConfigureProfileManager::RenameUser); + connect(ui->pm_remove, &QPushButton::clicked, this, &ConfigureProfileManager::DeleteUser); + connect(ui->pm_set_image, &QPushButton::clicked, this, &ConfigureProfileManager::SetUserImage); scene = new QGraphicsScene; ui->current_user_icon->setScene(scene); diff --git a/src/yuzu/configuration/configure_touchscreen_advanced.cpp b/src/yuzu/configuration/configure_touchscreen_advanced.cpp index 8ced28c756..7d7cc00b72 100644 --- a/src/yuzu/configuration/configure_touchscreen_advanced.cpp +++ b/src/yuzu/configuration/configure_touchscreen_advanced.cpp @@ -11,7 +11,7 @@ ConfigureTouchscreenAdvanced::ConfigureTouchscreenAdvanced(QWidget* parent) : QDialog(parent), ui(std::make_unique<Ui::ConfigureTouchscreenAdvanced>()) { ui->setupUi(this); - connect(ui->restore_defaults_button, &QPushButton::pressed, this, + connect(ui->restore_defaults_button, &QPushButton::clicked, this, &ConfigureTouchscreenAdvanced::RestoreDefaults); LoadConfiguration(); diff --git a/src/yuzu/configuration/configure_web.cpp b/src/yuzu/configuration/configure_web.cpp index 5a70ef168b..336b062b34 100644 --- a/src/yuzu/configuration/configure_web.cpp +++ b/src/yuzu/configuration/configure_web.cpp @@ -9,7 +9,7 @@ #include "core/telemetry_session.h" #include "ui_configure_web.h" #include "yuzu/configuration/configure_web.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" ConfigureWeb::ConfigureWeb(QWidget* parent) : QWidget(parent), ui(std::make_unique<Ui::ConfigureWeb>()) { diff --git a/src/yuzu/debugger/console.cpp b/src/yuzu/debugger/console.cpp index 320898f6af..207ff4d585 100644 --- a/src/yuzu/debugger/console.cpp +++ b/src/yuzu/debugger/console.cpp @@ -10,7 +10,7 @@ #include "common/logging/backend.h" #include "yuzu/debugger/console.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" namespace Debugger { void ToggleConsole() { diff --git a/src/yuzu/discord_impl.cpp b/src/yuzu/discord_impl.cpp index 9d87a41ebb..ea00793534 100644 --- a/src/yuzu/discord_impl.cpp +++ b/src/yuzu/discord_impl.cpp @@ -9,7 +9,7 @@ #include "core/core.h" #include "core/loader/loader.h" #include "yuzu/discord_impl.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" namespace DiscordRPC { diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp index 1885587afd..d5fab2f1fd 100644 --- a/src/yuzu/game_list.cpp +++ b/src/yuzu/game_list.cpp @@ -23,7 +23,7 @@ #include "yuzu/game_list_p.h" #include "yuzu/game_list_worker.h" #include "yuzu/main.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" GameListSearchField::KeyReleaseEater::KeyReleaseEater(GameList* gamelist) : gamelist{gamelist} {} @@ -34,7 +34,6 @@ bool GameListSearchField::KeyReleaseEater::eventFilter(QObject* obj, QEvent* eve return QObject::eventFilter(obj, event); QKeyEvent* keyEvent = static_cast<QKeyEvent*>(event); - int rowCount = gamelist->tree_view->model()->rowCount(); QString edit_filter_text = gamelist->search_field->edit_filter->text().toLower(); // If the searchfield's text hasn't changed special function keys get checked @@ -56,19 +55,9 @@ bool GameListSearchField::KeyReleaseEater::eventFilter(QObject* obj, QEvent* eve // If there is only one result launch this game case Qt::Key_Return: case Qt::Key_Enter: { - QStandardItemModel* item_model = new QStandardItemModel(gamelist->tree_view); - QModelIndex root_index = item_model->invisibleRootItem()->index(); - QStandardItem* child_file; - QString file_path; - int resultCount = 0; - for (int i = 0; i < rowCount; ++i) { - if (!gamelist->tree_view->isRowHidden(i, root_index)) { - ++resultCount; - child_file = gamelist->item_model->item(i, 0); - file_path = child_file->data(GameListItemPath::FullPathRole).toString(); - } - } - if (resultCount == 1) { + if (gamelist->search_field->visible == 1) { + QString file_path = gamelist->getLastFilterResultItem(); + // To avoid loading error dialog loops while confirming them using enter // Also users usually want to run a different game after closing one gamelist->search_field->edit_filter->clear(); @@ -88,9 +77,31 @@ bool GameListSearchField::KeyReleaseEater::eventFilter(QObject* obj, QEvent* eve } void GameListSearchField::setFilterResult(int visible, int total) { + this->visible = visible; + this->total = total; + label_filter_result->setText(tr("%1 of %n result(s)", "", total).arg(visible)); } +QString GameList::getLastFilterResultItem() const { + QStandardItem* folder; + QStandardItem* child; + QString file_path; + const int folder_count = item_model->rowCount(); + for (int i = 0; i < folder_count; ++i) { + folder = item_model->item(i, 0); + const QModelIndex folder_index = folder->index(); + const int children_count = folder->rowCount(); + for (int j = 0; j < children_count; ++j) { + if (!tree_view->isRowHidden(j, folder_index)) { + child = folder->child(j, 0); + file_path = child->data(GameListItemPath::FullPathRole).toString(); + } + } + } + return file_path; +} + void GameListSearchField::clear() { edit_filter->clear(); } @@ -147,45 +158,120 @@ static bool ContainsAllWords(const QString& haystack, const QString& userinput) [&haystack](const QString& s) { return haystack.contains(s); }); } +// Syncs the expanded state of Game Directories with settings to persist across sessions +void GameList::onItemExpanded(const QModelIndex& item) { + const auto type = item.data(GameListItem::TypeRole).value<GameListItemType>(); + if (type == GameListItemType::CustomDir || type == GameListItemType::SdmcDir || + type == GameListItemType::UserNandDir || type == GameListItemType::SysNandDir) + item.data(GameListDir::GameDirRole).value<UISettings::GameDir*>()->expanded = + tree_view->isExpanded(item); +} + // Event in order to filter the gamelist after editing the searchfield void GameList::onTextChanged(const QString& new_text) { - const int row_count = tree_view->model()->rowCount(); - const QString edit_filter_text = new_text.toLower(); - const QModelIndex root_index = item_model->invisibleRootItem()->index(); + const int folder_count = tree_view->model()->rowCount(); + QString edit_filter_text = new_text.toLower(); + QStandardItem* folder; + QStandardItem* child; + int children_total = 0; + QModelIndex root_index = item_model->invisibleRootItem()->index(); // If the searchfield is empty every item is visible // Otherwise the filter gets applied if (edit_filter_text.isEmpty()) { - for (int i = 0; i < row_count; ++i) { - tree_view->setRowHidden(i, root_index, false); + for (int i = 0; i < folder_count; ++i) { + folder = item_model->item(i, 0); + const QModelIndex folder_index = folder->index(); + const int children_count = folder->rowCount(); + for (int j = 0; j < children_count; ++j) { + ++children_total; + tree_view->setRowHidden(j, folder_index, false); + } } - search_field->setFilterResult(row_count, row_count); + search_field->setFilterResult(children_total, children_total); } else { int result_count = 0; - for (int i = 0; i < row_count; ++i) { - const QStandardItem* child_file = item_model->item(i, 0); - const QString file_path = - child_file->data(GameListItemPath::FullPathRole).toString().toLower(); - const QString file_title = - child_file->data(GameListItemPath::TitleRole).toString().toLower(); - const QString file_program_id = - child_file->data(GameListItemPath::ProgramIdRole).toString().toLower(); - - // Only items which filename in combination with its title contains all words - // that are in the searchfield will be visible in the gamelist - // The search is case insensitive because of toLower() - // I decided not to use Qt::CaseInsensitive in containsAllWords to prevent - // multiple conversions of edit_filter_text for each game in the gamelist - const QString file_name = file_path.mid(file_path.lastIndexOf(QLatin1Char{'/'}) + 1) + - QLatin1Char{' '} + file_title; - if (ContainsAllWords(file_name, edit_filter_text) || - (file_program_id.count() == 16 && edit_filter_text.contains(file_program_id))) { - tree_view->setRowHidden(i, root_index, false); - ++result_count; - } else { - tree_view->setRowHidden(i, root_index, true); + for (int i = 0; i < folder_count; ++i) { + folder = item_model->item(i, 0); + const QModelIndex folder_index = folder->index(); + const int children_count = folder->rowCount(); + for (int j = 0; j < children_count; ++j) { + ++children_total; + const QStandardItem* child = folder->child(j, 0); + const QString file_path = + child->data(GameListItemPath::FullPathRole).toString().toLower(); + const QString file_title = + child->data(GameListItemPath::TitleRole).toString().toLower(); + const QString file_program_id = + child->data(GameListItemPath::ProgramIdRole).toString().toLower(); + + // Only items which filename in combination with its title contains all words + // that are in the searchfield will be visible in the gamelist + // The search is case insensitive because of toLower() + // I decided not to use Qt::CaseInsensitive in containsAllWords to prevent + // multiple conversions of edit_filter_text for each game in the gamelist + const QString file_name = + file_path.mid(file_path.lastIndexOf(QLatin1Char{'/'}) + 1) + QLatin1Char{' '} + + file_title; + if (ContainsAllWords(file_name, edit_filter_text) || + (file_program_id.count() == 16 && edit_filter_text.contains(file_program_id))) { + tree_view->setRowHidden(j, folder_index, false); + ++result_count; + } else { + tree_view->setRowHidden(j, folder_index, true); + } + search_field->setFilterResult(result_count, children_total); } - search_field->setFilterResult(result_count, row_count); + } + } +} + +void GameList::onUpdateThemedIcons() { + for (int i = 0; i < item_model->invisibleRootItem()->rowCount(); i++) { + QStandardItem* child = item_model->invisibleRootItem()->child(i); + + const int icon_size = std::min(static_cast<int>(UISettings::values.icon_size), 64); + switch (child->data(GameListItem::TypeRole).value<GameListItemType>()) { + case GameListItemType::SdmcDir: + child->setData( + QIcon::fromTheme(QStringLiteral("sd_card")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + break; + case GameListItemType::UserNandDir: + child->setData( + QIcon::fromTheme(QStringLiteral("chip")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + break; + case GameListItemType::SysNandDir: + child->setData( + QIcon::fromTheme(QStringLiteral("chip")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + break; + case GameListItemType::CustomDir: { + const UISettings::GameDir* game_dir = + child->data(GameListDir::GameDirRole).value<UISettings::GameDir*>(); + const QString icon_name = QFileInfo::exists(game_dir->path) + ? QStringLiteral("folder") + : QStringLiteral("bad_folder"); + child->setData( + QIcon::fromTheme(icon_name).pixmap(icon_size).scaled( + icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + break; + } + case GameListItemType::AddDir: + child->setData( + QIcon::fromTheme(QStringLiteral("plus")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + break; } } } @@ -214,7 +300,6 @@ GameList::GameList(FileSys::VirtualFilesystem vfs, FileSys::ManualContentProvide tree_view->setHorizontalScrollMode(QHeaderView::ScrollPerPixel); tree_view->setSortingEnabled(true); tree_view->setEditTriggers(QHeaderView::NoEditTriggers); - tree_view->setUniformRowHeights(true); tree_view->setContextMenuPolicy(Qt::CustomContextMenu); tree_view->setStyleSheet(QStringLiteral("QTreeView{ border: none; }")); @@ -230,12 +315,16 @@ GameList::GameList(FileSys::VirtualFilesystem vfs, FileSys::ManualContentProvide item_model->setHeaderData(COLUMN_FILE_TYPE - 1, Qt::Horizontal, tr("File type")); item_model->setHeaderData(COLUMN_SIZE - 1, Qt::Horizontal, tr("Size")); } + item_model->setSortRole(GameListItemPath::TitleRole); + connect(main_window, &GMainWindow::UpdateThemedIcons, this, &GameList::onUpdateThemedIcons); connect(tree_view, &QTreeView::activated, this, &GameList::ValidateEntry); connect(tree_view, &QTreeView::customContextMenuRequested, this, &GameList::PopupContextMenu); + connect(tree_view, &QTreeView::expanded, this, &GameList::onItemExpanded); + connect(tree_view, &QTreeView::collapsed, this, &GameList::onItemExpanded); - // We must register all custom types with the Qt Automoc system so that we are able to use it - // with signals/slots. In this case, QList falls under the umbrells of custom types. + // We must register all custom types with the Qt Automoc system so that we are able to use + // it with signals/slots. In this case, QList falls under the umbrells of custom types. qRegisterMetaType<QList<QStandardItem*>>("QList<QStandardItem*>"); layout->setContentsMargins(0, 0, 0, 0); @@ -263,38 +352,68 @@ void GameList::clearFilter() { search_field->clear(); } -void GameList::AddEntry(const QList<QStandardItem*>& entry_items) { +void GameList::AddDirEntry(GameListDir* entry_items) { item_model->invisibleRootItem()->appendRow(entry_items); + tree_view->setExpanded( + entry_items->index(), + entry_items->data(GameListDir::GameDirRole).value<UISettings::GameDir*>()->expanded); } -void GameList::ValidateEntry(const QModelIndex& item) { - // We don't care about the individual QStandardItem that was selected, but its row. - const int row = item_model->itemFromIndex(item)->row(); - const QStandardItem* child_file = item_model->invisibleRootItem()->child(row, COLUMN_NAME); - const QString file_path = child_file->data(GameListItemPath::FullPathRole).toString(); - - if (file_path.isEmpty()) - return; - - if (!QFileInfo::exists(file_path)) - return; +void GameList::AddEntry(const QList<QStandardItem*>& entry_items, GameListDir* parent) { + parent->appendRow(entry_items); +} - const QFileInfo file_info{file_path}; - if (file_info.isDir()) { - const QDir dir{file_path}; - const QStringList matching_main = dir.entryList({QStringLiteral("main")}, QDir::Files); - if (matching_main.size() == 1) { - emit GameChosen(dir.path() + QDir::separator() + matching_main[0]); +void GameList::ValidateEntry(const QModelIndex& item) { + const auto selected = item.sibling(item.row(), 0); + + switch (selected.data(GameListItem::TypeRole).value<GameListItemType>()) { + case GameListItemType::Game: { + const QString file_path = selected.data(GameListItemPath::FullPathRole).toString(); + if (file_path.isEmpty()) + return; + const QFileInfo file_info(file_path); + if (!file_info.exists()) + return; + + if (file_info.isDir()) { + const QDir dir{file_path}; + const QStringList matching_main = dir.entryList({QStringLiteral("main")}, QDir::Files); + if (matching_main.size() == 1) { + emit GameChosen(dir.path() + QDir::separator() + matching_main[0]); + } + return; } - return; + + // Users usually want to run a different game after closing one + search_field->clear(); + emit GameChosen(file_path); + break; } + case GameListItemType::AddDir: + emit AddDirectory(); + break; + } +} - // Users usually want to run a diffrent game after closing one - search_field->clear(); - emit GameChosen(file_path); +bool GameList::isEmpty() const { + for (int i = 0; i < item_model->rowCount(); i++) { + const QStandardItem* child = item_model->invisibleRootItem()->child(i); + const auto type = static_cast<GameListItemType>(child->type()); + if (!child->hasChildren() && + (type == GameListItemType::SdmcDir || type == GameListItemType::UserNandDir || + type == GameListItemType::SysNandDir)) { + item_model->invisibleRootItem()->removeRow(child->row()); + i--; + }; + } + return !item_model->invisibleRootItem()->hasChildren(); } void GameList::DonePopulating(QStringList watch_list) { + emit ShowList(!isEmpty()); + + item_model->invisibleRootItem()->appendRow(new GameListAddDir()); + // Clear out the old directories to watch for changes and add the new ones auto watch_dirs = watcher->directories(); if (!watch_dirs.isEmpty()) { @@ -311,9 +430,13 @@ void GameList::DonePopulating(QStringList watch_list) { QCoreApplication::processEvents(); } tree_view->setEnabled(true); - int rowCount = tree_view->model()->rowCount(); - search_field->setFilterResult(rowCount, rowCount); - if (rowCount > 0) { + const int folder_count = tree_view->model()->rowCount(); + int children_total = 0; + for (int i = 0; i < folder_count; ++i) { + children_total += item_model->item(i, 0)->rowCount(); + } + search_field->setFilterResult(children_total, children_total); + if (children_total > 0) { search_field->setFocus(); } } @@ -323,12 +446,27 @@ void GameList::PopupContextMenu(const QPoint& menu_location) { if (!item.isValid()) return; - int row = item_model->itemFromIndex(item)->row(); - QStandardItem* child_file = item_model->invisibleRootItem()->child(row, COLUMN_NAME); - u64 program_id = child_file->data(GameListItemPath::ProgramIdRole).toULongLong(); - std::string path = child_file->data(GameListItemPath::FullPathRole).toString().toStdString(); - + const auto selected = item.sibling(item.row(), 0); QMenu context_menu; + switch (selected.data(GameListItem::TypeRole).value<GameListItemType>()) { + case GameListItemType::Game: + AddGamePopup(context_menu, selected.data(GameListItemPath::ProgramIdRole).toULongLong(), + selected.data(GameListItemPath::FullPathRole).toString().toStdString()); + break; + case GameListItemType::CustomDir: + AddPermDirPopup(context_menu, selected); + AddCustomDirPopup(context_menu, selected); + break; + case GameListItemType::SdmcDir: + case GameListItemType::UserNandDir: + case GameListItemType::SysNandDir: + AddPermDirPopup(context_menu, selected); + break; + } + context_menu.exec(tree_view->viewport()->mapToGlobal(menu_location)); +} + +void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, std::string path) { QAction* open_save_location = context_menu.addAction(tr("Open Save Data Location")); QAction* open_lfs_location = context_menu.addAction(tr("Open Mod Data Location")); QAction* open_transferable_shader_cache = @@ -344,19 +482,86 @@ void GameList::PopupContextMenu(const QPoint& menu_location) { auto it = FindMatchingCompatibilityEntry(compatibility_list, program_id); navigate_to_gamedb_entry->setVisible(it != compatibility_list.end() && program_id != 0); - connect(open_save_location, &QAction::triggered, - [&]() { emit OpenFolderRequested(program_id, GameListOpenTarget::SaveData); }); - connect(open_lfs_location, &QAction::triggered, - [&]() { emit OpenFolderRequested(program_id, GameListOpenTarget::ModData); }); + connect(open_save_location, &QAction::triggered, [this, program_id]() { + emit OpenFolderRequested(program_id, GameListOpenTarget::SaveData); + }); + connect(open_lfs_location, &QAction::triggered, [this, program_id]() { + emit OpenFolderRequested(program_id, GameListOpenTarget::ModData); + }); connect(open_transferable_shader_cache, &QAction::triggered, - [&]() { emit OpenTransferableShaderCacheRequested(program_id); }); - connect(dump_romfs, &QAction::triggered, [&]() { emit DumpRomFSRequested(program_id, path); }); - connect(copy_tid, &QAction::triggered, [&]() { emit CopyTIDRequested(program_id); }); - connect(navigate_to_gamedb_entry, &QAction::triggered, - [&]() { emit NavigateToGamedbEntryRequested(program_id, compatibility_list); }); - connect(properties, &QAction::triggered, [&]() { emit OpenPerGameGeneralRequested(path); }); + [this, program_id]() { emit OpenTransferableShaderCacheRequested(program_id); }); + connect(dump_romfs, &QAction::triggered, + [this, program_id, path]() { emit DumpRomFSRequested(program_id, path); }); + connect(copy_tid, &QAction::triggered, + [this, program_id]() { emit CopyTIDRequested(program_id); }); + connect(navigate_to_gamedb_entry, &QAction::triggered, [this, program_id]() { + emit NavigateToGamedbEntryRequested(program_id, compatibility_list); + }); + connect(properties, &QAction::triggered, + [this, path]() { emit OpenPerGameGeneralRequested(path); }); +}; + +void GameList::AddCustomDirPopup(QMenu& context_menu, QModelIndex selected) { + UISettings::GameDir& game_dir = + *selected.data(GameListDir::GameDirRole).value<UISettings::GameDir*>(); + + QAction* deep_scan = context_menu.addAction(tr("Scan Subfolders")); + QAction* delete_dir = context_menu.addAction(tr("Remove Game Directory")); + + deep_scan->setCheckable(true); + deep_scan->setChecked(game_dir.deep_scan); + + connect(deep_scan, &QAction::triggered, [this, &game_dir] { + game_dir.deep_scan = !game_dir.deep_scan; + PopulateAsync(UISettings::values.game_dirs); + }); + connect(delete_dir, &QAction::triggered, [this, &game_dir, selected] { + UISettings::values.game_dirs.removeOne(game_dir); + item_model->invisibleRootItem()->removeRow(selected.row()); + }); +} - context_menu.exec(tree_view->viewport()->mapToGlobal(menu_location)); +void GameList::AddPermDirPopup(QMenu& context_menu, QModelIndex selected) { + UISettings::GameDir& game_dir = + *selected.data(GameListDir::GameDirRole).value<UISettings::GameDir*>(); + + QAction* move_up = context_menu.addAction(tr(u8"\U000025b2 Move Up")); + QAction* move_down = context_menu.addAction(tr(u8"\U000025bc Move Down ")); + QAction* open_directory_location = context_menu.addAction(tr("Open Directory Location")); + + const int row = selected.row(); + + move_up->setEnabled(row > 0); + move_down->setEnabled(row < item_model->rowCount() - 2); + + connect(move_up, &QAction::triggered, [this, selected, row, &game_dir] { + // find the indices of the items in settings and swap them + std::swap(UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf(game_dir)], + UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf( + *selected.sibling(row - 1, 0) + .data(GameListDir::GameDirRole) + .value<UISettings::GameDir*>())]); + // move the treeview items + QList<QStandardItem*> item = item_model->takeRow(row); + item_model->invisibleRootItem()->insertRow(row - 1, item); + tree_view->setExpanded(selected, game_dir.expanded); + }); + + connect(move_down, &QAction::triggered, [this, selected, row, &game_dir] { + // find the indices of the items in settings and swap them + std::swap(UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf(game_dir)], + UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf( + *selected.sibling(row + 1, 0) + .data(GameListDir::GameDirRole) + .value<UISettings::GameDir*>())]); + // move the treeview items + const QList<QStandardItem*> item = item_model->takeRow(row); + item_model->invisibleRootItem()->insertRow(row + 1, item); + tree_view->setExpanded(selected, game_dir.expanded); + }); + + connect(open_directory_location, &QAction::triggered, + [this, game_dir] { emit OpenDirectory(game_dir.path); }); } void GameList::LoadCompatibilityList() { @@ -403,14 +608,7 @@ void GameList::LoadCompatibilityList() { } } -void GameList::PopulateAsync(const QString& dir_path, bool deep_scan) { - const QFileInfo dir_info{dir_path}; - if (!dir_info.exists() || !dir_info.isDir()) { - LOG_ERROR(Frontend, "Could not find game list folder at {}", dir_path.toStdString()); - search_field->setFilterResult(0, 0); - return; - } - +void GameList::PopulateAsync(QVector<UISettings::GameDir>& game_dirs) { tree_view->setEnabled(false); // Update the columns in case UISettings has changed @@ -433,17 +631,19 @@ void GameList::PopulateAsync(const QString& dir_path, bool deep_scan) { // Delete any rows that might already exist if we're repopulating item_model->removeRows(0, item_model->rowCount()); + search_field->clear(); emit ShouldCancelWorker(); - GameListWorker* worker = - new GameListWorker(vfs, provider, dir_path, deep_scan, compatibility_list); + GameListWorker* worker = new GameListWorker(vfs, provider, game_dirs, compatibility_list); connect(worker, &GameListWorker::EntryReady, this, &GameList::AddEntry, Qt::QueuedConnection); + connect(worker, &GameListWorker::DirEntryReady, this, &GameList::AddDirEntry, + Qt::QueuedConnection); connect(worker, &GameListWorker::Finished, this, &GameList::DonePopulating, Qt::QueuedConnection); - // Use DirectConnection here because worker->Cancel() is thread-safe and we want it to cancel - // without delay. + // Use DirectConnection here because worker->Cancel() is thread-safe and we want it to + // cancel without delay. connect(this, &GameList::ShouldCancelWorker, worker, &GameListWorker::Cancel, Qt::DirectConnection); @@ -471,10 +671,40 @@ const QStringList GameList::supported_file_extensions = { QStringLiteral("xci"), QStringLiteral("nsp"), QStringLiteral("kip")}; void GameList::RefreshGameDirectory() { - if (!UISettings::values.game_directory_path.isEmpty() && current_worker != nullptr) { + if (!UISettings::values.game_dirs.isEmpty() && current_worker != nullptr) { LOG_INFO(Frontend, "Change detected in the games directory. Reloading game list."); - search_field->clear(); - PopulateAsync(UISettings::values.game_directory_path, - UISettings::values.game_directory_deepscan); + PopulateAsync(UISettings::values.game_dirs); } } + +GameListPlaceholder::GameListPlaceholder(GMainWindow* parent) : QWidget{parent} { + connect(parent, &GMainWindow::UpdateThemedIcons, this, + &GameListPlaceholder::onUpdateThemedIcons); + + layout = new QVBoxLayout; + image = new QLabel; + text = new QLabel; + layout->setAlignment(Qt::AlignCenter); + image->setPixmap(QIcon::fromTheme(QStringLiteral("plus_folder")).pixmap(200)); + + text->setText(tr("Double-click to add a new folder to the game list")); + QFont font = text->font(); + font.setPointSize(20); + text->setFont(font); + text->setAlignment(Qt::AlignHCenter); + image->setAlignment(Qt::AlignHCenter); + + layout->addWidget(image); + layout->addWidget(text); + setLayout(layout); +} + +GameListPlaceholder::~GameListPlaceholder() = default; + +void GameListPlaceholder::onUpdateThemedIcons() { + image->setPixmap(QIcon::fromTheme(QStringLiteral("plus_folder")).pixmap(200)); +} + +void GameListPlaceholder::mouseDoubleClickEvent(QMouseEvent* event) { + emit GameListPlaceholder::AddDirectory(); +} diff --git a/src/yuzu/game_list.h b/src/yuzu/game_list.h index f8f8bd6c5f..878d944138 100644 --- a/src/yuzu/game_list.h +++ b/src/yuzu/game_list.h @@ -8,6 +8,7 @@ #include <QHBoxLayout> #include <QLabel> #include <QLineEdit> +#include <QList> #include <QModelIndex> #include <QSettings> #include <QStandardItem> @@ -16,13 +17,16 @@ #include <QToolButton> #include <QTreeView> #include <QVBoxLayout> +#include <QVector> #include <QWidget> #include "common/common_types.h" +#include "uisettings.h" #include "yuzu/compatibility_list.h" class GameListWorker; class GameListSearchField; +class GameListDir; class GMainWindow; namespace FileSys { @@ -52,12 +56,14 @@ public: FileSys::ManualContentProvider* provider, GMainWindow* parent = nullptr); ~GameList() override; + QString getLastFilterResultItem() const; void clearFilter(); void setFilterFocus(); void setFilterVisible(bool visibility); + bool isEmpty() const; void LoadCompatibilityList(); - void PopulateAsync(const QString& dir_path, bool deep_scan); + void PopulateAsync(QVector<UISettings::GameDir>& game_dirs); void SaveInterfaceLayout(); void LoadInterfaceLayout(); @@ -74,19 +80,29 @@ signals: void NavigateToGamedbEntryRequested(u64 program_id, const CompatibilityList& compatibility_list); void OpenPerGameGeneralRequested(const std::string& file); + void OpenDirectory(const QString& directory); + void AddDirectory(); + void ShowList(bool show); private slots: + void onItemExpanded(const QModelIndex& item); void onTextChanged(const QString& new_text); void onFilterCloseClicked(); + void onUpdateThemedIcons(); private: - void AddEntry(const QList<QStandardItem*>& entry_items); + void AddDirEntry(GameListDir* entry_items); + void AddEntry(const QList<QStandardItem*>& entry_items, GameListDir* parent); void ValidateEntry(const QModelIndex& item); void DonePopulating(QStringList watch_list); - void PopupContextMenu(const QPoint& menu_location); void RefreshGameDirectory(); + void PopupContextMenu(const QPoint& menu_location); + void AddGamePopup(QMenu& context_menu, u64 program_id, std::string path); + void AddCustomDirPopup(QMenu& context_menu, QModelIndex selected); + void AddPermDirPopup(QMenu& context_menu, QModelIndex selected); + std::shared_ptr<FileSys::VfsFilesystem> vfs; FileSys::ManualContentProvider* provider; GameListSearchField* search_field; @@ -102,3 +118,24 @@ private: }; Q_DECLARE_METATYPE(GameListOpenTarget); + +class GameListPlaceholder : public QWidget { + Q_OBJECT +public: + explicit GameListPlaceholder(GMainWindow* parent = nullptr); + ~GameListPlaceholder(); + +signals: + void AddDirectory(); + +private slots: + void onUpdateThemedIcons(); + +protected: + void mouseDoubleClickEvent(QMouseEvent* event) override; + +private: + QVBoxLayout* layout = nullptr; + QLabel* image = nullptr; + QLabel* text = nullptr; +}; diff --git a/src/yuzu/game_list_p.h b/src/yuzu/game_list_p.h index 0b458ef487..a8d888fee9 100644 --- a/src/yuzu/game_list_p.h +++ b/src/yuzu/game_list_p.h @@ -10,6 +10,7 @@ #include <utility> #include <QCoreApplication> +#include <QFileInfo> #include <QImage> #include <QObject> #include <QStandardItem> @@ -19,9 +20,20 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "common/string_util.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" #include "yuzu/util/util.h" +enum class GameListItemType { + Game = QStandardItem::UserType + 1, + CustomDir = QStandardItem::UserType + 2, + SdmcDir = QStandardItem::UserType + 3, + UserNandDir = QStandardItem::UserType + 4, + SysNandDir = QStandardItem::UserType + 5, + AddDir = QStandardItem::UserType + 6 +}; + +Q_DECLARE_METATYPE(GameListItemType); + /** * Gets the default icon (for games without valid title metadata) * @param size The desired width and height of the default icon. @@ -36,8 +48,13 @@ static QPixmap GetDefaultIcon(u32 size) { class GameListItem : public QStandardItem { public: + // used to access type from item index + static const int TypeRole = Qt::UserRole + 1; + static const int SortRole = Qt::UserRole + 2; GameListItem() = default; - explicit GameListItem(const QString& string) : QStandardItem(string) {} + GameListItem(const QString& string) : QStandardItem(string) { + setData(string, SortRole); + } }; /** @@ -48,14 +65,15 @@ public: */ class GameListItemPath : public GameListItem { public: - static const int FullPathRole = Qt::UserRole + 1; - static const int TitleRole = Qt::UserRole + 2; - static const int ProgramIdRole = Qt::UserRole + 3; - static const int FileTypeRole = Qt::UserRole + 4; + static const int TitleRole = SortRole; + static const int FullPathRole = SortRole + 1; + static const int ProgramIdRole = SortRole + 2; + static const int FileTypeRole = SortRole + 3; GameListItemPath() = default; GameListItemPath(const QString& game_path, const std::vector<u8>& picture_data, const QString& game_name, const QString& game_type, u64 program_id) { + setData(type(), TypeRole); setData(game_path, FullPathRole); setData(game_name, TitleRole); setData(qulonglong(program_id), ProgramIdRole); @@ -72,6 +90,10 @@ public: setData(picture, Qt::DecorationRole); } + int type() const override { + return static_cast<int>(GameListItemType::Game); + } + QVariant data(int role) const override { if (role == Qt::DisplayRole) { std::string filename; @@ -103,9 +125,11 @@ public: class GameListItemCompat : public GameListItem { Q_DECLARE_TR_FUNCTIONS(GameListItemCompat) public: - static const int CompatNumberRole = Qt::UserRole + 1; + static const int CompatNumberRole = SortRole; GameListItemCompat() = default; explicit GameListItemCompat(const QString& compatibility) { + setData(type(), TypeRole); + struct CompatStatus { QString color; const char* text; @@ -135,6 +159,10 @@ public: setData(CreateCirclePixmapFromColor(status.color), Qt::DecorationRole); } + int type() const override { + return static_cast<int>(GameListItemType::Game); + } + bool operator<(const QStandardItem& other) const override { return data(CompatNumberRole) < other.data(CompatNumberRole); } @@ -146,12 +174,12 @@ public: * human-readable string representation will be displayed to the user. */ class GameListItemSize : public GameListItem { - public: - static const int SizeRole = Qt::UserRole + 1; + static const int SizeRole = SortRole; GameListItemSize() = default; explicit GameListItemSize(const qulonglong size_bytes) { + setData(type(), TypeRole); setData(size_bytes, SizeRole); } @@ -167,6 +195,10 @@ public: } } + int type() const override { + return static_cast<int>(GameListItemType::Game); + } + /** * This operator is, in practice, only used by the TreeView sorting systems. * Override it so that it will correctly sort by numerical value instead of by string @@ -177,6 +209,82 @@ public: } }; +class GameListDir : public GameListItem { +public: + static const int GameDirRole = Qt::UserRole + 2; + + explicit GameListDir(UISettings::GameDir& directory, + GameListItemType dir_type = GameListItemType::CustomDir) + : dir_type{dir_type} { + setData(type(), TypeRole); + + UISettings::GameDir* game_dir = &directory; + setData(QVariant::fromValue(game_dir), GameDirRole); + + const int icon_size = std::min(static_cast<int>(UISettings::values.icon_size), 64); + switch (dir_type) { + case GameListItemType::SdmcDir: + setData( + QIcon::fromTheme(QStringLiteral("sd_card")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + setData(QObject::tr("Installed SD Titles"), Qt::DisplayRole); + break; + case GameListItemType::UserNandDir: + setData( + QIcon::fromTheme(QStringLiteral("chip")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + setData(QObject::tr("Installed NAND Titles"), Qt::DisplayRole); + break; + case GameListItemType::SysNandDir: + setData( + QIcon::fromTheme(QStringLiteral("chip")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + setData(QObject::tr("System Titles"), Qt::DisplayRole); + break; + case GameListItemType::CustomDir: + const QString icon_name = QFileInfo::exists(game_dir->path) + ? QStringLiteral("folder") + : QStringLiteral("bad_folder"); + setData(QIcon::fromTheme(icon_name).pixmap(icon_size).scaled( + icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + setData(game_dir->path, Qt::DisplayRole); + break; + }; + }; + + int type() const override { + return static_cast<int>(dir_type); + } + +private: + GameListItemType dir_type; +}; + +class GameListAddDir : public GameListItem { +public: + explicit GameListAddDir() { + setData(type(), TypeRole); + + const int icon_size = std::min(static_cast<int>(UISettings::values.icon_size), 64); + setData(QIcon::fromTheme(QStringLiteral("plus")) + .pixmap(icon_size) + .scaled(icon_size, icon_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation), + Qt::DecorationRole); + setData(QObject::tr("Add New Game Directory"), Qt::DisplayRole); + } + + int type() const override { + return static_cast<int>(GameListItemType::AddDir); + } +}; + class GameList; class QHBoxLayout; class QTreeView; @@ -208,6 +316,9 @@ private: // EventFilter in order to process systemkeys while editing the searchfield bool eventFilter(QObject* obj, QEvent* event) override; }; + int visible; + int total; + QHBoxLayout* layout_filter = nullptr; QTreeView* tree_view = nullptr; QLabel* label_filter = nullptr; diff --git a/src/yuzu/game_list_worker.cpp b/src/yuzu/game_list_worker.cpp index 4f30e9147a..fd21a97615 100644 --- a/src/yuzu/game_list_worker.cpp +++ b/src/yuzu/game_list_worker.cpp @@ -29,7 +29,7 @@ #include "yuzu/game_list.h" #include "yuzu/game_list_p.h" #include "yuzu/game_list_worker.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" namespace { @@ -223,21 +223,37 @@ QList<QStandardItem*> MakeGameListEntry(const std::string& path, const std::stri } // Anonymous namespace GameListWorker::GameListWorker(FileSys::VirtualFilesystem vfs, - FileSys::ManualContentProvider* provider, QString dir_path, - bool deep_scan, const CompatibilityList& compatibility_list) - : vfs(std::move(vfs)), provider(provider), dir_path(std::move(dir_path)), deep_scan(deep_scan), + FileSys::ManualContentProvider* provider, + QVector<UISettings::GameDir>& game_dirs, + const CompatibilityList& compatibility_list) + : vfs(std::move(vfs)), provider(provider), game_dirs(game_dirs), compatibility_list(compatibility_list) {} GameListWorker::~GameListWorker() = default; -void GameListWorker::AddTitlesToGameList() { - const auto& cache = dynamic_cast<FileSys::ContentProviderUnion&>( - Core::System::GetInstance().GetContentProvider()); - const auto installed_games = cache.ListEntriesFilterOrigin( - std::nullopt, FileSys::TitleType::Application, FileSys::ContentRecordType::Program); +void GameListWorker::AddTitlesToGameList(GameListDir* parent_dir) { + using namespace FileSys; + + const auto& cache = + dynamic_cast<ContentProviderUnion&>(Core::System::GetInstance().GetContentProvider()); + + std::vector<std::pair<ContentProviderUnionSlot, ContentProviderEntry>> installed_games; + installed_games = cache.ListEntriesFilterOrigin(std::nullopt, TitleType::Application, + ContentRecordType::Program); + + if (parent_dir->type() == static_cast<int>(GameListItemType::SdmcDir)) { + installed_games = cache.ListEntriesFilterOrigin( + ContentProviderUnionSlot::SDMC, TitleType::Application, ContentRecordType::Program); + } else if (parent_dir->type() == static_cast<int>(GameListItemType::UserNandDir)) { + installed_games = cache.ListEntriesFilterOrigin( + ContentProviderUnionSlot::UserNAND, TitleType::Application, ContentRecordType::Program); + } else if (parent_dir->type() == static_cast<int>(GameListItemType::SysNandDir)) { + installed_games = cache.ListEntriesFilterOrigin( + ContentProviderUnionSlot::SysNAND, TitleType::Application, ContentRecordType::Program); + } for (const auto& [slot, game] : installed_games) { - if (slot == FileSys::ContentProviderUnionSlot::FrontendManual) + if (slot == ContentProviderUnionSlot::FrontendManual) continue; const auto file = cache.GetEntryUnparsed(game.title_id, game.type); @@ -250,21 +266,22 @@ void GameListWorker::AddTitlesToGameList() { u64 program_id = 0; loader->ReadProgramId(program_id); - const FileSys::PatchManager patch{program_id}; - const auto control = cache.GetEntry(game.title_id, FileSys::ContentRecordType::Control); + const PatchManager patch{program_id}; + const auto control = cache.GetEntry(game.title_id, ContentRecordType::Control); if (control != nullptr) GetMetadataFromControlNCA(patch, *control, icon, name); emit EntryReady(MakeGameListEntry(file->GetFullPath(), name, icon, *loader, program_id, - compatibility_list, patch)); + compatibility_list, patch), + parent_dir); } } void GameListWorker::ScanFileSystem(ScanTarget target, const std::string& dir_path, - unsigned int recursion) { - const auto callback = [this, target, recursion](u64* num_entries_out, - const std::string& directory, - const std::string& virtual_name) -> bool { + unsigned int recursion, GameListDir* parent_dir) { + const auto callback = [this, target, recursion, + parent_dir](u64* num_entries_out, const std::string& directory, + const std::string& virtual_name) -> bool { if (stop_processing) { // Breaks the callback loop. return false; @@ -317,11 +334,12 @@ void GameListWorker::ScanFileSystem(ScanTarget target, const std::string& dir_pa const FileSys::PatchManager patch{program_id}; emit EntryReady(MakeGameListEntry(physical_name, name, icon, *loader, program_id, - compatibility_list, patch)); + compatibility_list, patch), + parent_dir); } } else if (is_dir && recursion > 0) { watch_list.append(QString::fromStdString(physical_name)); - ScanFileSystem(target, physical_name, recursion - 1); + ScanFileSystem(target, physical_name, recursion - 1, parent_dir); } return true; @@ -332,12 +350,32 @@ void GameListWorker::ScanFileSystem(ScanTarget target, const std::string& dir_pa void GameListWorker::run() { stop_processing = false; - watch_list.append(dir_path); - provider->ClearAllEntries(); - ScanFileSystem(ScanTarget::FillManualContentProvider, dir_path.toStdString(), - deep_scan ? 256 : 0); - AddTitlesToGameList(); - ScanFileSystem(ScanTarget::PopulateGameList, dir_path.toStdString(), deep_scan ? 256 : 0); + + for (UISettings::GameDir& game_dir : game_dirs) { + if (game_dir.path == QStringLiteral("SDMC")) { + auto* const game_list_dir = new GameListDir(game_dir, GameListItemType::SdmcDir); + emit DirEntryReady({game_list_dir}); + AddTitlesToGameList(game_list_dir); + } else if (game_dir.path == QStringLiteral("UserNAND")) { + auto* const game_list_dir = new GameListDir(game_dir, GameListItemType::UserNandDir); + emit DirEntryReady({game_list_dir}); + AddTitlesToGameList(game_list_dir); + } else if (game_dir.path == QStringLiteral("SysNAND")) { + auto* const game_list_dir = new GameListDir(game_dir, GameListItemType::SysNandDir); + emit DirEntryReady({game_list_dir}); + AddTitlesToGameList(game_list_dir); + } else { + watch_list.append(game_dir.path); + auto* const game_list_dir = new GameListDir(game_dir); + emit DirEntryReady({game_list_dir}); + provider->ClearAllEntries(); + ScanFileSystem(ScanTarget::FillManualContentProvider, game_dir.path.toStdString(), 2, + game_list_dir); + ScanFileSystem(ScanTarget::PopulateGameList, game_dir.path.toStdString(), + game_dir.deep_scan ? 256 : 0, game_list_dir); + } + }; + emit Finished(watch_list); } diff --git a/src/yuzu/game_list_worker.h b/src/yuzu/game_list_worker.h index 7c3074af9e..6e52fca89e 100644 --- a/src/yuzu/game_list_worker.h +++ b/src/yuzu/game_list_worker.h @@ -14,6 +14,7 @@ #include <QObject> #include <QRunnable> #include <QString> +#include <QVector> #include "common/common_types.h" #include "yuzu/compatibility_list.h" @@ -33,9 +34,10 @@ class GameListWorker : public QObject, public QRunnable { Q_OBJECT public: - GameListWorker(std::shared_ptr<FileSys::VfsFilesystem> vfs, - FileSys::ManualContentProvider* provider, QString dir_path, bool deep_scan, - const CompatibilityList& compatibility_list); + explicit GameListWorker(std::shared_ptr<FileSys::VfsFilesystem> vfs, + FileSys::ManualContentProvider* provider, + QVector<UISettings::GameDir>& game_dirs, + const CompatibilityList& compatibility_list); ~GameListWorker() override; /// Starts the processing of directory tree information. @@ -48,31 +50,33 @@ signals: /** * The `EntryReady` signal is emitted once an entry has been prepared and is ready * to be added to the game list. - * @param entry_items a list with `QStandardItem`s that make up the columns of the new entry. + * @param entry_items a list with `QStandardItem`s that make up the columns of the new + * entry. */ - void EntryReady(QList<QStandardItem*> entry_items); + void DirEntryReady(GameListDir* entry_items); + void EntryReady(QList<QStandardItem*> entry_items, GameListDir* parent_dir); /** - * After the worker has traversed the game directory looking for entries, this signal is emitted - * with a list of folders that should be watched for changes as well. + * After the worker has traversed the game directory looking for entries, this signal is + * emitted with a list of folders that should be watched for changes as well. */ void Finished(QStringList watch_list); private: - void AddTitlesToGameList(); + void AddTitlesToGameList(GameListDir* parent_dir); enum class ScanTarget { FillManualContentProvider, PopulateGameList, }; - void ScanFileSystem(ScanTarget target, const std::string& dir_path, unsigned int recursion = 0); + void ScanFileSystem(ScanTarget target, const std::string& dir_path, unsigned int recursion, + GameListDir* parent_dir); std::shared_ptr<FileSys::VfsFilesystem> vfs; FileSys::ManualContentProvider* provider; QStringList watch_list; - QString dir_path; - bool deep_scan; const CompatibilityList& compatibility_list; + QVector<UISettings::GameDir>& game_dirs; std::atomic_bool stop_processing; }; diff --git a/src/yuzu/hotkeys.cpp b/src/yuzu/hotkeys.cpp index 4582e7f215..d4e97fa166 100644 --- a/src/yuzu/hotkeys.cpp +++ b/src/yuzu/hotkeys.cpp @@ -7,7 +7,7 @@ #include <QTreeWidgetItem> #include <QtGlobal> #include "yuzu/hotkeys.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" HotkeyRegistry::HotkeyRegistry() = default; HotkeyRegistry::~HotkeyRegistry() = default; diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index ae21f47535..6d249cb3ea 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -100,7 +100,7 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual #include "yuzu/hotkeys.h" #include "yuzu/loading_screen.h" #include "yuzu/main.h" -#include "yuzu/ui_settings.h" +#include "yuzu/uisettings.h" #ifdef USE_DISCORD_PRESENCE #include "yuzu/discord_impl.h" @@ -119,6 +119,7 @@ Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin); #endif #ifdef _WIN32 +#include <windows.h> extern "C" { // tells Nvidia and AMD drivers to use the dedicated GPU by default on laptops with switchable // graphics @@ -215,8 +216,7 @@ GMainWindow::GMainWindow() OnReinitializeKeys(ReinitializeKeyBehavior::NoWarning); game_list->LoadCompatibilityList(); - game_list->PopulateAsync(UISettings::values.game_directory_path, - UISettings::values.game_directory_deepscan); + game_list->PopulateAsync(UISettings::values.game_dirs); // Show one-time "callout" messages to the user ShowTelemetryCallout(); @@ -426,6 +426,10 @@ void GMainWindow::InitializeWidgets() { game_list = new GameList(vfs, provider.get(), this); ui.horizontalLayout->addWidget(game_list); + game_list_placeholder = new GameListPlaceholder(this); + ui.horizontalLayout->addWidget(game_list_placeholder); + game_list_placeholder->setVisible(false); + loading_screen = new LoadingScreen(this); loading_screen->hide(); ui.horizontalLayout->addWidget(loading_screen); @@ -659,6 +663,7 @@ void GMainWindow::RestoreUIState() { void GMainWindow::ConnectWidgetEvents() { connect(game_list, &GameList::GameChosen, this, &GMainWindow::OnGameListLoadFile); + connect(game_list, &GameList::OpenDirectory, this, &GMainWindow::OnGameListOpenDirectory); connect(game_list, &GameList::OpenFolderRequested, this, &GMainWindow::OnGameListOpenFolder); connect(game_list, &GameList::OpenTransferableShaderCacheRequested, this, &GMainWindow::OnTransferableShaderCacheOpenFile); @@ -666,6 +671,11 @@ void GMainWindow::ConnectWidgetEvents() { connect(game_list, &GameList::CopyTIDRequested, this, &GMainWindow::OnGameListCopyTID); connect(game_list, &GameList::NavigateToGamedbEntryRequested, this, &GMainWindow::OnGameListNavigateToGamedbEntry); + connect(game_list, &GameList::AddDirectory, this, &GMainWindow::OnGameListAddDirectory); + connect(game_list_placeholder, &GameListPlaceholder::AddDirectory, this, + &GMainWindow::OnGameListAddDirectory); + connect(game_list, &GameList::ShowList, this, &GMainWindow::OnGameListShowList); + connect(game_list, &GameList::OpenPerGameGeneralRequested, this, &GMainWindow::OnGameListOpenPerGameProperties); @@ -683,8 +693,6 @@ void GMainWindow::ConnectMenuEvents() { connect(ui.action_Load_Folder, &QAction::triggered, this, &GMainWindow::OnMenuLoadFolder); connect(ui.action_Install_File_NAND, &QAction::triggered, this, &GMainWindow::OnMenuInstallToNAND); - connect(ui.action_Select_Game_List_Root, &QAction::triggered, this, - &GMainWindow::OnMenuSelectGameListRoot); connect(ui.action_Select_NAND_Directory, &QAction::triggered, this, [this] { OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget::NAND); }); connect(ui.action_Select_SDMC_Directory, &QAction::triggered, this, @@ -747,6 +755,18 @@ void GMainWindow::OnDisplayTitleBars(bool show) { } } +void GMainWindow::PreventOSSleep() { +#ifdef _WIN32 + SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED | ES_DISPLAY_REQUIRED); +#endif +} + +void GMainWindow::AllowOSSleep() { +#ifdef _WIN32 + SetThreadExecutionState(ES_CONTINUOUS); +#endif +} + QStringList GMainWindow::GetUnsupportedGLExtensions() { QStringList unsupported_ext; @@ -937,6 +957,7 @@ void GMainWindow::BootGame(const QString& filename) { // Update the GUI if (ui.action_Single_Window_Mode->isChecked()) { game_list->hide(); + game_list_placeholder->hide(); } status_bar_update_timer.start(2000); @@ -966,6 +987,8 @@ void GMainWindow::BootGame(const QString& filename) { } void GMainWindow::ShutdownGame() { + AllowOSSleep(); + discord_rpc->Pause(); emu_thread->RequestStop(); @@ -992,7 +1015,10 @@ void GMainWindow::ShutdownGame() { render_window->hide(); loading_screen->hide(); loading_screen->Clear(); - game_list->show(); + if (game_list->isEmpty()) + game_list_placeholder->show(); + else + game_list->show(); game_list->setFilterFocus(); UpdateWindowTitle(); @@ -1283,6 +1309,47 @@ void GMainWindow::OnGameListNavigateToGamedbEntry(u64 program_id, QDesktopServices::openUrl(QUrl(QStringLiteral("https://yuzu-emu.org/game/") + directory)); } +void GMainWindow::OnGameListOpenDirectory(const QString& directory) { + QString path; + if (directory == QStringLiteral("SDMC")) { + path = QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::SDMCDir) + + "Nintendo/Contents/registered"); + } else if (directory == QStringLiteral("UserNAND")) { + path = QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir) + + "user/Contents/registered"); + } else if (directory == QStringLiteral("SysNAND")) { + path = QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::NANDDir) + + "system/Contents/registered"); + } else { + path = directory; + } + if (!QFileInfo::exists(path)) { + QMessageBox::critical(this, tr("Error Opening %1").arg(path), tr("Folder does not exist!")); + return; + } + QDesktopServices::openUrl(QUrl::fromLocalFile(path)); +} + +void GMainWindow::OnGameListAddDirectory() { + const QString dir_path = QFileDialog::getExistingDirectory(this, tr("Select Directory")); + if (dir_path.isEmpty()) + return; + UISettings::GameDir game_dir{dir_path, false, true}; + if (!UISettings::values.game_dirs.contains(game_dir)) { + UISettings::values.game_dirs.append(game_dir); + game_list->PopulateAsync(UISettings::values.game_dirs); + } else { + LOG_WARNING(Frontend, "Selected directory is already in the game list"); + } +} + +void GMainWindow::OnGameListShowList(bool show) { + if (emulation_running && ui.action_Single_Window_Mode->isChecked()) + return; + game_list->setVisible(show); + game_list_placeholder->setVisible(!show); +}; + void GMainWindow::OnGameListOpenPerGameProperties(const std::string& file) { u64 title_id{}; const auto v_file = Core::GetGameFileFromPath(vfs, file); @@ -1301,8 +1368,7 @@ void GMainWindow::OnGameListOpenPerGameProperties(const std::string& file) { const auto reload = UISettings::values.is_game_list_reload_pending.exchange(false); if (reload) { - game_list->PopulateAsync(UISettings::values.game_directory_path, - UISettings::values.game_directory_deepscan); + game_list->PopulateAsync(UISettings::values.game_dirs); } config->Save(); @@ -1392,8 +1458,7 @@ void GMainWindow::OnMenuInstallToNAND() { const auto success = [this]() { QMessageBox::information(this, tr("Successfully Installed"), tr("The file was successfully installed.")); - game_list->PopulateAsync(UISettings::values.game_directory_path, - UISettings::values.game_directory_deepscan); + game_list->PopulateAsync(UISettings::values.game_dirs); FileUtil::DeleteDirRecursively(FileUtil::GetUserPath(FileUtil::UserPath::CacheDir) + DIR_SEP + "game_list"); }; @@ -1518,14 +1583,6 @@ void GMainWindow::OnMenuInstallToNAND() { } } -void GMainWindow::OnMenuSelectGameListRoot() { - QString dir_path = QFileDialog::getExistingDirectory(this, tr("Select Directory")); - if (!dir_path.isEmpty()) { - UISettings::values.game_directory_path = dir_path; - game_list->PopulateAsync(dir_path, UISettings::values.game_directory_deepscan); - } -} - void GMainWindow::OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget target) { const auto res = QMessageBox::information( this, tr("Changing Emulated Directory"), @@ -1544,8 +1601,7 @@ void GMainWindow::OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget target) : FileUtil::UserPath::NANDDir, dir_path.toStdString()); Service::FileSystem::CreateFactories(*vfs); - game_list->PopulateAsync(UISettings::values.game_directory_path, - UISettings::values.game_directory_deepscan); + game_list->PopulateAsync(UISettings::values.game_dirs); } } @@ -1567,6 +1623,8 @@ void GMainWindow::OnMenuRecentFile() { } void GMainWindow::OnStartGame() { + PreventOSSleep(); + emu_thread->SetRunning(true); qRegisterMetaType<Core::Frontend::SoftwareKeyboardParameters>( @@ -1598,6 +1656,8 @@ void GMainWindow::OnPauseGame() { ui.action_Pause->setEnabled(false); ui.action_Stop->setEnabled(true); ui.action_Capture_Screenshot->setEnabled(false); + + AllowOSSleep(); } void GMainWindow::OnStopGame() { @@ -1705,11 +1765,11 @@ void GMainWindow::OnConfigure() { if (UISettings::values.enable_discord_presence != old_discord_presence) { SetDiscordEnabled(UISettings::values.enable_discord_presence); } + emit UpdateThemedIcons(); const auto reload = UISettings::values.is_game_list_reload_pending.exchange(false); if (reload) { - game_list->PopulateAsync(UISettings::values.game_directory_path, - UISettings::values.game_directory_deepscan); + game_list->PopulateAsync(UISettings::values.game_dirs); } config->Save(); @@ -1843,13 +1903,14 @@ void GMainWindow::OnCoreError(Core::System::ResultStatus result, std::string det "data, or other bugs."); switch (result) { case Core::System::ResultStatus::ErrorSystemFiles: { - QString message = tr("yuzu was unable to locate a Switch system archive"); - if (!details.empty()) { - message.append(tr(": %1. ").arg(QString::fromStdString(details))); + QString message; + if (details.empty()) { + message = + tr("yuzu was unable to locate a Switch system archive. %1").arg(common_message); } else { - message.append(tr(". ")); + message = tr("yuzu was unable to locate a Switch system archive: %1. %2") + .arg(QString::fromStdString(details), common_message); } - message.append(common_message); answer = QMessageBox::question(this, tr("System Archive Not Found"), message, QMessageBox::Yes | QMessageBox::No, QMessageBox::No); @@ -1858,8 +1919,8 @@ void GMainWindow::OnCoreError(Core::System::ResultStatus result, std::string det } case Core::System::ResultStatus::ErrorSharedFont: { - QString message = tr("yuzu was unable to locate the Switch shared fonts. "); - message.append(common_message); + const QString message = + tr("yuzu was unable to locate the Switch shared fonts. %1").arg(common_message); answer = QMessageBox::question(this, tr("Shared Fonts Not Found"), message, QMessageBox::Yes | QMessageBox::No, QMessageBox::No); status_message = tr("Shared Font Missing"); @@ -1972,8 +2033,7 @@ void GMainWindow::OnReinitializeKeys(ReinitializeKeyBehavior behavior) { Service::FileSystem::CreateFactories(*vfs); if (behavior == ReinitializeKeyBehavior::Warning) { - game_list->PopulateAsync(UISettings::values.game_directory_path, - UISettings::values.game_directory_deepscan); + game_list->PopulateAsync(UISettings::values.game_dirs); } } @@ -2138,7 +2198,6 @@ void GMainWindow::UpdateUITheme() { } QIcon::setThemeSearchPaths(theme_paths); - emit UpdateThemedIcons(); } void GMainWindow::SetDiscordEnabled([[maybe_unused]] bool state) { diff --git a/src/yuzu/main.h b/src/yuzu/main.h index 1137bbc7a6..7d16188cbc 100644 --- a/src/yuzu/main.h +++ b/src/yuzu/main.h @@ -30,6 +30,7 @@ class ProfilerWidget; class QLabel; class WaitTreeWidget; enum class GameListOpenTarget; +class GameListPlaceholder; namespace Core::Frontend { struct SoftwareKeyboardParameters; @@ -130,6 +131,9 @@ private: void ConnectWidgetEvents(); void ConnectMenuEvents(); + void PreventOSSleep(); + void AllowOSSleep(); + QStringList GetUnsupportedGLExtensions(); bool LoadROM(const QString& filename); void BootGame(const QString& filename); @@ -183,12 +187,13 @@ private slots: void OnGameListCopyTID(u64 program_id); void OnGameListNavigateToGamedbEntry(u64 program_id, const CompatibilityList& compatibility_list); + void OnGameListOpenDirectory(const QString& directory); + void OnGameListAddDirectory(); + void OnGameListShowList(bool show); void OnGameListOpenPerGameProperties(const std::string& file); void OnMenuLoadFile(); void OnMenuLoadFolder(); void OnMenuInstallToNAND(); - /// Called whenever a user selects the "File->Select Game List Root" menu item - void OnMenuSelectGameListRoot(); /// Called whenever a user select the "File->Select -- Directory" where -- is NAND or SD Card void OnMenuSelectEmulatedDirectory(EmulatedDirectoryTarget target); void OnMenuRecentFile(); @@ -220,6 +225,8 @@ private: GameList* game_list; LoadingScreen* loading_screen; + GameListPlaceholder* game_list_placeholder; + // Status bar elements QLabel* message_label = nullptr; QLabel* emu_speed_label = nullptr; diff --git a/src/yuzu/main.ui b/src/yuzu/main.ui index ffcabb4954..a1ce3c0c35 100644 --- a/src/yuzu/main.ui +++ b/src/yuzu/main.ui @@ -62,7 +62,6 @@ <addaction name="action_Load_File"/> <addaction name="action_Load_Folder"/> <addaction name="separator"/> - <addaction name="action_Select_Game_List_Root"/> <addaction name="menu_recent_files"/> <addaction name="separator"/> <addaction name="action_Select_NAND_Directory"/> diff --git a/src/yuzu/ui_settings.cpp b/src/yuzu/uisettings.cpp index 4bdc302e0f..7f7d247a34 100644 --- a/src/yuzu/ui_settings.cpp +++ b/src/yuzu/uisettings.cpp @@ -2,7 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include "ui_settings.h" +#include "yuzu/uisettings.h" namespace UISettings { diff --git a/src/yuzu/ui_settings.h b/src/yuzu/uisettings.h index a62cd69115..c572900062 100644 --- a/src/yuzu/ui_settings.h +++ b/src/yuzu/uisettings.h @@ -8,8 +8,10 @@ #include <atomic> #include <vector> #include <QByteArray> +#include <QMetaType> #include <QString> #include <QStringList> +#include <QVector> #include "common/common_types.h" namespace UISettings { @@ -25,6 +27,18 @@ struct Shortcut { using Themes = std::array<std::pair<const char*, const char*>, 2>; extern const Themes themes; +struct GameDir { + QString path; + bool deep_scan; + bool expanded; + bool operator==(const GameDir& rhs) const { + return path == rhs.path; + }; + bool operator!=(const GameDir& rhs) const { + return !operator==(rhs); + }; +}; + struct Values { QByteArray geometry; QByteArray state; @@ -55,8 +69,9 @@ struct Values { QString roms_path; QString symbols_path; QString screenshot_path; - QString game_directory_path; - bool game_directory_deepscan; + QString game_dir_deprecated; + bool game_dir_deprecated_deepscan; + QVector<UISettings::GameDir> game_dirs; QStringList recent_files; QString theme; @@ -84,3 +99,5 @@ struct Values { extern Values values; } // namespace UISettings + +Q_DECLARE_METATYPE(UISettings::GameDir*); diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp index 30b22341ba..067d58d809 100644 --- a/src/yuzu_cmd/config.cpp +++ b/src/yuzu_cmd/config.cpp @@ -340,7 +340,6 @@ void Config::ReadValues() { } // Core - Settings::values.cpu_jit_enabled = sdl2_config->GetBoolean("Core", "cpu_jit_enabled", true); Settings::values.use_multi_core = sdl2_config->GetBoolean("Core", "use_multi_core", false); // Renderer diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h index 4f1add4340..0cfc111a63 100644 --- a/src/yuzu_cmd/default_ini.h +++ b/src/yuzu_cmd/default_ini.h @@ -76,10 +76,6 @@ motion_device= touch_device= [Core] -# Whether to use the Just-In-Time (JIT) compiler for CPU emulation -# 0: Interpreter (slow), 1 (default): JIT (fast) -cpu_jit_enabled = - # Whether to use multi-core for CPU emulation # 0 (default): Disabled, 1: Enabled use_multi_core= diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp index b96b7d2793..9a11dc6c33 100644 --- a/src/yuzu_tester/config.cpp +++ b/src/yuzu_tester/config.cpp @@ -114,7 +114,6 @@ void Config::ReadValues() { } // Core - Settings::values.cpu_jit_enabled = sdl2_config->GetBoolean("Core", "cpu_jit_enabled", true); Settings::values.use_multi_core = sdl2_config->GetBoolean("Core", "use_multi_core", false); // Renderer diff --git a/src/yuzu_tester/default_ini.h b/src/yuzu_tester/default_ini.h index 0f880d8c7e..9a3e86d68f 100644 --- a/src/yuzu_tester/default_ini.h +++ b/src/yuzu_tester/default_ini.h @@ -8,10 +8,6 @@ namespace DefaultINI { const char* sdl2_config_file = R"( [Core] -# Whether to use the Just-In-Time (JIT) compiler for CPU emulation -# 0: Interpreter (slow), 1 (default): JIT (fast) -cpu_jit_enabled = - # Whether to use multi-core for CPU emulation # 0 (default): Disabled, 1: Enabled use_multi_core= diff --git a/src/yuzu_tester/yuzu.cpp b/src/yuzu_tester/yuzu.cpp index b589c3de3f..0ee97aa54a 100644 --- a/src/yuzu_tester/yuzu.cpp +++ b/src/yuzu_tester/yuzu.cpp @@ -92,7 +92,6 @@ int main(int argc, char** argv) { int option_index = 0; - char* endarg; #ifdef _WIN32 int argc_w; auto argv_w = CommandLineToArgvW(GetCommandLineW(), &argc_w); @@ -226,7 +225,7 @@ int main(int argc, char** argv) { switch (load_result) { case Core::System::ResultStatus::ErrorGetLoader: - LOG_CRITICAL(Frontend, "Failed to obtain loader for %s!", filepath.c_str()); + LOG_CRITICAL(Frontend, "Failed to obtain loader for {}!", filepath); return -1; case Core::System::ResultStatus::ErrorLoader: LOG_CRITICAL(Frontend, "Failed to load ROM!"); |