From 16cb00c521cae6e93ec49d10e15b575b7bc4857e Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Fri, 5 Feb 2021 23:11:23 -0300
Subject: shader: Add pools and rename files

---
 src/shader_recompiler/frontend/ir/opcodes.cpp | 67 +++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)
 create mode 100644 src/shader_recompiler/frontend/ir/opcodes.cpp

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
new file mode 100644
index 0000000000..1f188411a9
--- /dev/null
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -0,0 +1,67 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <array>
+#include <string_view>
+
+#include "shader_recompiler/exception.h"
+#include "shader_recompiler/frontend/ir/opcodes.h"
+
+namespace Shader::IR {
+namespace {
+struct OpcodeMeta {
+    std::string_view name;
+    Type type;
+    std::array<Type, 4> arg_types;
+};
+
+using enum Type;
+
+constexpr std::array META_TABLE{
+#define OPCODE(name_token, type_token, ...)                                                        \
+    OpcodeMeta{                                                                                    \
+        .name{#name_token},                                                                        \
+        .type{type_token},                                                                         \
+        .arg_types{__VA_ARGS__},                                                                   \
+    },
+#include "opcodes.inc"
+#undef OPCODE
+};
+
+void ValidateOpcode(Opcode op) {
+    const size_t raw{static_cast<size_t>(op)};
+    if (raw >= META_TABLE.size()) {
+        throw InvalidArgument("Invalid opcode with raw value {}", raw);
+    }
+}
+} // Anonymous namespace
+
+Type TypeOf(Opcode op) {
+    ValidateOpcode(op);
+    return META_TABLE[static_cast<size_t>(op)].type;
+}
+
+size_t NumArgsOf(Opcode op) {
+    ValidateOpcode(op);
+    const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
+    const auto distance{std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void))};
+    return static_cast<size_t>(distance);
+}
+
+Type ArgTypeOf(Opcode op, size_t arg_index) {
+    ValidateOpcode(op);
+    const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
+    if (arg_index >= arg_types.size() || arg_types[arg_index] == Type::Void) {
+        throw InvalidArgument("Out of bounds argument");
+    }
+    return arg_types[arg_index];
+}
+
+std::string_view NameOf(Opcode op) {
+    ValidateOpcode(op);
+    return META_TABLE[static_cast<size_t>(op)].name;
+}
+
+} // namespace Shader::IR
-- 
cgit v1.2.3-70-g09d2


From ab463712474de5f99eec137a9c6233e55fe184f0 Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Mon, 8 Mar 2021 18:31:53 -0300
Subject: shader: Initial support for textures and TEX

---
 src/shader_recompiler/CMakeLists.txt               |   3 +
 .../backend/spirv/emit_context.cpp                 |  69 ++-
 src/shader_recompiler/backend/spirv/emit_context.h |   7 +
 src/shader_recompiler/backend/spirv/emit_spirv.cpp |  12 +
 src/shader_recompiler/backend/spirv/emit_spirv.h   |  32 +-
 .../backend/spirv/emit_spirv_convert.cpp           |  48 ++
 .../backend/spirv/emit_spirv_image.cpp             | 146 ++++++
 .../backend/spirv/emit_spirv_memory.cpp            |  18 +-
 src/shader_recompiler/environment.h                |   2 +
 src/shader_recompiler/file_environment.cpp         |   4 +
 src/shader_recompiler/file_environment.h           |   4 +-
 src/shader_recompiler/frontend/ir/ir_emitter.cpp   | 133 ++++-
 src/shader_recompiler/frontend/ir/ir_emitter.h     |  21 +-
 .../frontend/ir/microinstruction.cpp               |  73 ++-
 .../frontend/ir/microinstruction.h                 |  22 +-
 src/shader_recompiler/frontend/ir/modifiers.h      |  10 +
 src/shader_recompiler/frontend/ir/opcodes.cpp      |   2 +-
 src/shader_recompiler/frontend/ir/opcodes.inc      | 569 +++++++++++----------
 src/shader_recompiler/frontend/ir/reg.h            |  11 +
 src/shader_recompiler/frontend/ir/value.h          |   1 +
 src/shader_recompiler/frontend/maxwell/maxwell.inc |   4 +-
 src/shader_recompiler/frontend/maxwell/program.cpp |   1 +
 .../maxwell/translate/impl/not_implemented.cpp     |   8 -
 .../maxwell/translate/impl/texture_sample.cpp      | 232 +++++++++
 .../ir_opt/collect_shader_info_pass.cpp            |  19 +
 .../global_memory_to_storage_buffer_pass.cpp       |  15 +-
 src/shader_recompiler/ir_opt/passes.h              |   2 +
 src/shader_recompiler/ir_opt/texture_pass.cpp      | 199 +++++++
 src/shader_recompiler/shader_info.h                |  52 +-
 .../renderer_vulkan/vk_compute_pipeline.cpp        | 101 ++++
 .../renderer_vulkan/vk_compute_pipeline.h          |   4 +
 .../renderer_vulkan/vk_pipeline_cache.cpp          |   4 +
 src/video_core/renderer_vulkan/vk_rasterizer.cpp   |   3 +-
 33 files changed, 1489 insertions(+), 342 deletions(-)
 create mode 100644 src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
 create mode 100644 src/shader_recompiler/frontend/maxwell/translate/impl/texture_sample.cpp
 create mode 100644 src/shader_recompiler/ir_opt/texture_pass.cpp

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index cc38b28ed6..fa268d38fc 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -9,6 +9,7 @@ add_library(shader_recompiler STATIC
     backend/spirv/emit_spirv_control_flow.cpp
     backend/spirv/emit_spirv_convert.cpp
     backend/spirv/emit_spirv_floating_point.cpp
+    backend/spirv/emit_spirv_image.cpp
     backend/spirv/emit_spirv_integer.cpp
     backend/spirv/emit_spirv_logical.cpp
     backend/spirv/emit_spirv_memory.cpp
@@ -100,6 +101,7 @@ add_library(shader_recompiler STATIC
     frontend/maxwell/translate/impl/predicate_set_predicate.cpp
     frontend/maxwell/translate/impl/predicate_set_register.cpp
     frontend/maxwell/translate/impl/select_source_with_predicate.cpp
+    frontend/maxwell/translate/impl/texture_sample.cpp
     frontend/maxwell/translate/translate.cpp
     frontend/maxwell/translate/translate.h
     ir_opt/collect_shader_info_pass.cpp
@@ -110,6 +112,7 @@ add_library(shader_recompiler STATIC
     ir_opt/lower_fp16_to_fp32.cpp
     ir_opt/passes.h
     ir_opt/ssa_rewrite_pass.cpp
+    ir_opt/texture_pass.cpp
     ir_opt/verification_pass.cpp
     object_pool.h
     profile.h
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index d2dbd56d44..21900d3878 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -12,6 +12,43 @@
 #include "shader_recompiler/backend/spirv/emit_context.h"
 
 namespace Shader::Backend::SPIRV {
+namespace {
+Id ImageType(EmitContext& ctx, const TextureDescriptor& desc) {
+    const spv::ImageFormat format{spv::ImageFormat::Unknown};
+    const Id type{ctx.F32[1]};
+    switch (desc.type) {
+    case TextureType::Color1D:
+        return ctx.TypeImage(type, spv::Dim::Dim1D, false, false, false, 1, format);
+    case TextureType::ColorArray1D:
+        return ctx.TypeImage(type, spv::Dim::Dim1D, false, true, false, 1, format);
+    case TextureType::Color2D:
+        return ctx.TypeImage(type, spv::Dim::Dim2D, false, false, false, 1, format);
+    case TextureType::ColorArray2D:
+        return ctx.TypeImage(type, spv::Dim::Dim2D, false, true, false, 1, format);
+    case TextureType::Color3D:
+        return ctx.TypeImage(type, spv::Dim::Dim3D, false, false, false, 1, format);
+    case TextureType::ColorCube:
+        return ctx.TypeImage(type, spv::Dim::Cube, false, false, false, 1, format);
+    case TextureType::ColorArrayCube:
+        return ctx.TypeImage(type, spv::Dim::Cube, false, true, false, 1, format);
+    case TextureType::Shadow1D:
+        return ctx.TypeImage(type, spv::Dim::Dim1D, true, false, false, 1, format);
+    case TextureType::ShadowArray1D:
+        return ctx.TypeImage(type, spv::Dim::Dim1D, true, true, false, 1, format);
+    case TextureType::Shadow2D:
+        return ctx.TypeImage(type, spv::Dim::Dim2D, true, false, false, 1, format);
+    case TextureType::ShadowArray2D:
+        return ctx.TypeImage(type, spv::Dim::Dim2D, true, true, false, 1, format);
+    case TextureType::Shadow3D:
+        return ctx.TypeImage(type, spv::Dim::Dim3D, true, false, false, 1, format);
+    case TextureType::ShadowCube:
+        return ctx.TypeImage(type, spv::Dim::Cube, true, false, false, 1, format);
+    case TextureType::ShadowArrayCube:
+        return ctx.TypeImage(type, spv::Dim::Cube, false, true, false, 1, format);
+    }
+    throw InvalidArgument("Invalid texture type {}", desc.type);
+}
+} // Anonymous namespace
 
 void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) {
     defs[0] = sirit_ctx.Name(base_type, name);
@@ -35,6 +72,7 @@ EmitContext::EmitContext(const Profile& profile_, IR::Program& program)
     u32 binding{};
     DefineConstantBuffers(program.info, binding);
     DefineStorageBuffers(program.info, binding);
+    DefineTextures(program.info, binding);
 
     DefineLabels(program);
 }
@@ -46,6 +84,10 @@ Id EmitContext::Def(const IR::Value& value) {
         return value.Inst()->Definition<Id>();
     }
     switch (value.Type()) {
+    case IR::Type::Void:
+        // Void instructions are used for optional arguments (e.g. texture offsets)
+        // They are not meant to be used in the SPIR-V module
+        return Id{};
     case IR::Type::U1:
         return value.U1() ? true_value : false_value;
     case IR::Type::U32:
@@ -122,7 +164,7 @@ void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
     uniform_u32 = TypePointer(spv::StorageClass::Uniform, U32[1]);
 
     u32 index{};
-    for (const Info::ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
+    for (const ConstantBufferDescriptor& desc : info.constant_buffer_descriptors) {
         const Id id{AddGlobalVariable(uniform_type, spv::StorageClass::Uniform)};
         Decorate(id, spv::Decoration::Binding, binding);
         Decorate(id, spv::Decoration::DescriptorSet, 0U);
@@ -152,7 +194,7 @@ void EmitContext::DefineStorageBuffers(const Info& info, u32& binding) {
     storage_u32 = TypePointer(spv::StorageClass::StorageBuffer, U32[1]);
 
     u32 index{};
-    for (const Info::StorageBufferDescriptor& desc : info.storage_buffers_descriptors) {
+    for (const StorageBufferDescriptor& desc : info.storage_buffers_descriptors) {
         const Id id{AddGlobalVariable(storage_type, spv::StorageClass::StorageBuffer)};
         Decorate(id, spv::Decoration::Binding, binding);
         Decorate(id, spv::Decoration::DescriptorSet, 0U);
@@ -163,6 +205,29 @@ void EmitContext::DefineStorageBuffers(const Info& info, u32& binding) {
     }
 }
 
+void EmitContext::DefineTextures(const Info& info, u32& binding) {
+    textures.reserve(info.texture_descriptors.size());
+    for (const TextureDescriptor& desc : info.texture_descriptors) {
+        if (desc.count != 1) {
+            throw NotImplementedException("Array of textures");
+        }
+        const Id type{TypeSampledImage(ImageType(*this, desc))};
+        const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, type)};
+        const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
+        Decorate(id, spv::Decoration::Binding, binding);
+        Decorate(id, spv::Decoration::DescriptorSet, 0U);
+        Name(id, fmt::format("tex{}_{:02x}", desc.cbuf_index, desc.cbuf_offset));
+        for (u32 index = 0; index < desc.count; ++index) {
+            // TODO: Pass count info
+            textures.push_back(TextureDefinition{
+                .id{id},
+                .type{type},
+            });
+        }
+        binding += desc.count;
+    }
+}
+
 void EmitContext::DefineLabels(IR::Program& program) {
     for (const IR::Function& function : program.functions) {
         for (IR::Block* const block : function.blocks) {
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
index d20cf387ef..8b3109eb8c 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.h
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -29,6 +29,11 @@ private:
     std::array<Id, 4> defs{};
 };
 
+struct TextureDefinition {
+    Id id;
+    Id type;
+};
+
 class EmitContext final : public Sirit::Module {
 public:
     explicit EmitContext(const Profile& profile, IR::Program& program);
@@ -56,6 +61,7 @@ public:
 
     std::array<Id, Info::MAX_CBUFS> cbufs{};
     std::array<Id, Info::MAX_SSBOS> ssbos{};
+    std::vector<TextureDefinition> textures;
 
     Id workgroup_id{};
     Id local_invocation_id{};
@@ -66,6 +72,7 @@ private:
     void DefineSpecialVariables(const Info& info);
     void DefineConstantBuffers(const Info& info, u32& binding);
     void DefineStorageBuffers(const Info& info, u32& binding);
+    void DefineTextures(const Info& info, u32& binding);
     void DefineLabels(IR::Program& program);
 };
 
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index 8097fe82dc..a94e9cb2d2 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -221,6 +221,14 @@ std::vector<u32> EmitSPIRV(const Profile& profile, Environment& env, IR::Program
                          workgroup_size[2]);
 
     SetupDenormControl(profile, program, ctx, func);
+    if (info.uses_sampled_1d) {
+        ctx.AddCapability(spv::Capability::Sampled1D);
+    }
+    if (info.uses_sparse_residency) {
+        ctx.AddCapability(spv::Capability::SparseResidency);
+    }
+    // TODO: Track this usage
+    ctx.AddCapability(spv::Capability::ImageGatherExtended);
 
     return ctx.Assemble();
 }
@@ -259,4 +267,8 @@ void EmitGetOverflowFromOp(EmitContext&) {
     throw LogicError("Unreachable instruction");
 }
 
+void EmitGetSparseFromOp(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
 } // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h
index 92387ca280..69698c478e 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.h
@@ -83,7 +83,8 @@ void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Va
                         Id value);
 void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
                         Id value);
-void EmitWriteStorage128(EmitContext& ctx);
+void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+                         Id value);
 Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2);
 Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3);
 Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4);
@@ -145,6 +146,7 @@ void EmitGetZeroFromOp(EmitContext& ctx);
 void EmitGetSignFromOp(EmitContext& ctx);
 void EmitGetCarryFromOp(EmitContext& ctx);
 void EmitGetOverflowFromOp(EmitContext& ctx);
+void EmitGetSparseFromOp(EmitContext& ctx);
 Id EmitFPAbs16(EmitContext& ctx, Id value);
 Id EmitFPAbs32(EmitContext& ctx, Id value);
 Id EmitFPAbs64(EmitContext& ctx, Id value);
@@ -291,5 +293,33 @@ Id EmitConvertF16F32(EmitContext& ctx, Id value);
 Id EmitConvertF32F16(EmitContext& ctx, Id value);
 Id EmitConvertF32F64(EmitContext& ctx, Id value);
 Id EmitConvertF64F32(EmitContext& ctx, Id value);
+Id EmitConvertF16S32(EmitContext& ctx, Id value);
+Id EmitConvertF16S64(EmitContext& ctx, Id value);
+Id EmitConvertF16U32(EmitContext& ctx, Id value);
+Id EmitConvertF16U64(EmitContext& ctx, Id value);
+Id EmitConvertF32S32(EmitContext& ctx, Id value);
+Id EmitConvertF32S64(EmitContext& ctx, Id value);
+Id EmitConvertF32U32(EmitContext& ctx, Id value);
+Id EmitConvertF32U64(EmitContext& ctx, Id value);
+Id EmitConvertF64S32(EmitContext& ctx, Id value);
+Id EmitConvertF64S64(EmitContext& ctx, Id value);
+Id EmitConvertF64U32(EmitContext& ctx, Id value);
+Id EmitConvertF64U64(EmitContext& ctx, Id value);
+Id EmitBindlessImageSampleImplicitLod(EmitContext&);
+Id EmitBindlessImageSampleExplicitLod(EmitContext&);
+Id EmitBindlessImageSampleDrefImplicitLod(EmitContext&);
+Id EmitBindlessImageSampleDrefExplicitLod(EmitContext&);
+Id EmitBoundImageSampleImplicitLod(EmitContext&);
+Id EmitBoundImageSampleExplicitLod(EmitContext&);
+Id EmitBoundImageSampleDrefImplicitLod(EmitContext&);
+Id EmitBoundImageSampleDrefExplicitLod(EmitContext&);
+Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+                              Id bias_lc, Id offset);
+Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+                              Id lod_lc, Id offset);
+Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+                                  Id coords, Id dref, Id bias_lc, Id offset);
+Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+                                  Id coords, Id dref, Id lod_lc, Id offset);
 
 } // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp
index edcc2a1cca..2aff673aa5 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_convert.cpp
@@ -102,4 +102,52 @@ Id EmitConvertF64F32(EmitContext& ctx, Id value) {
     return ctx.OpFConvert(ctx.F64[1], value);
 }
 
+Id EmitConvertF16S32(EmitContext& ctx, Id value) {
+    return ctx.OpConvertSToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF16S64(EmitContext& ctx, Id value) {
+    return ctx.OpConvertSToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF16U32(EmitContext& ctx, Id value) {
+    return ctx.OpConvertUToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF16U64(EmitContext& ctx, Id value) {
+    return ctx.OpConvertUToF(ctx.F16[1], value);
+}
+
+Id EmitConvertF32S32(EmitContext& ctx, Id value) {
+    return ctx.OpConvertSToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF32S64(EmitContext& ctx, Id value) {
+    return ctx.OpConvertSToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF32U32(EmitContext& ctx, Id value) {
+    return ctx.OpConvertUToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF32U64(EmitContext& ctx, Id value) {
+    return ctx.OpConvertUToF(ctx.F32[1], value);
+}
+
+Id EmitConvertF64S32(EmitContext& ctx, Id value) {
+    return ctx.OpConvertSToF(ctx.F64[1], value);
+}
+
+Id EmitConvertF64S64(EmitContext& ctx, Id value) {
+    return ctx.OpConvertSToF(ctx.F64[1], value);
+}
+
+Id EmitConvertF64U32(EmitContext& ctx, Id value) {
+    return ctx.OpConvertUToF(ctx.F64[1], value);
+}
+
+Id EmitConvertF64U64(EmitContext& ctx, Id value) {
+    return ctx.OpConvertUToF(ctx.F64[1], value);
+}
+
 } // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
new file mode 100644
index 0000000000..5f4783c952
--- /dev/null
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -0,0 +1,146 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <boost/container/static_vector.hpp>
+
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+
+namespace Shader::Backend::SPIRV {
+namespace {
+class ImageOperands {
+public:
+    explicit ImageOperands(EmitContext& ctx, bool has_bias, bool has_lod, bool has_lod_clamp,
+                           Id lod, Id offset) {
+        if (has_bias) {
+            const Id bias{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod};
+            Add(spv::ImageOperandsMask::Bias, bias);
+        }
+        if (has_lod) {
+            const Id lod_value{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod};
+            Add(spv::ImageOperandsMask::Lod, lod_value);
+        }
+        if (Sirit::ValidId(offset)) {
+            Add(spv::ImageOperandsMask::Offset, offset);
+        }
+        if (has_lod_clamp) {
+            const Id lod_clamp{has_bias ? ctx.OpCompositeExtract(ctx.F32[1], lod, 1) : lod};
+            Add(spv::ImageOperandsMask::MinLod, lod_clamp);
+        }
+    }
+
+    void Add(spv::ImageOperandsMask new_mask, Id value) {
+        mask = static_cast<spv::ImageOperandsMask>(static_cast<unsigned>(mask) |
+                                                   static_cast<unsigned>(new_mask));
+        operands.push_back(value);
+    }
+
+    std::span<const Id> Span() const noexcept {
+        return std::span{operands.data(), operands.size()};
+    }
+
+    spv::ImageOperandsMask Mask() const noexcept {
+        return mask;
+    }
+
+private:
+    boost::container::static_vector<Id, 3> operands;
+    spv::ImageOperandsMask mask{};
+};
+
+Id Texture(EmitContext& ctx, const IR::Value& index) {
+    if (index.IsImmediate()) {
+        const TextureDefinition def{ctx.textures.at(index.U32())};
+        return ctx.OpLoad(def.type, def.id);
+    }
+    throw NotImplementedException("Indirect texture sample");
+}
+
+template <typename MethodPtrType, typename... Args>
+Id Emit(MethodPtrType sparse_ptr, MethodPtrType non_sparse_ptr, EmitContext& ctx, IR::Inst* inst,
+        Id result_type, Args&&... args) {
+    IR::Inst* const sparse{inst->GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
+    if (!sparse) {
+        return (ctx.*non_sparse_ptr)(result_type, std::forward<Args>(args)...);
+    }
+    const Id struct_type{ctx.TypeStruct(ctx.U32[1], result_type)};
+    const Id sample{(ctx.*sparse_ptr)(struct_type, std::forward<Args>(args)...)};
+    const Id resident_code{ctx.OpCompositeExtract(ctx.U32[1], sample, 0U)};
+    sparse->SetDefinition(ctx.OpImageSparseTexelsResident(ctx.U1, resident_code));
+    sparse->Invalidate();
+    return ctx.OpCompositeExtract(result_type, sample, 1U);
+}
+} // Anonymous namespace
+
+Id EmitBindlessImageSampleImplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageSampleExplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageSampleDrefImplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitBindlessImageSampleDrefExplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleImplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleExplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleDrefImplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitBoundImageSampleDrefExplicitLod(EmitContext&) {
+    throw LogicError("Unreachable instruction");
+}
+
+Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+                              Id bias_lc, Id offset) {
+    const auto info{inst->Flags<IR::TextureInstInfo>()};
+    const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0, bias_lc,
+                                 offset);
+    return Emit(&EmitContext::OpImageSparseSampleImplicitLod,
+                &EmitContext::OpImageSampleImplicitLod, ctx, inst, ctx.F32[4], Texture(ctx, index),
+                coords, operands.Mask(), operands.Span());
+}
+
+Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
+                              Id lod_lc, Id offset) {
+    const auto info{inst->Flags<IR::TextureInstInfo>()};
+    const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod_lc, offset);
+    return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
+                &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4], Texture(ctx, index),
+                coords, operands.Mask(), operands.Span());
+}
+
+Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+                                  Id coords, Id dref, Id bias_lc, Id offset) {
+    const auto info{inst->Flags<IR::TextureInstInfo>()};
+    const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0, bias_lc,
+                                 offset);
+    return Emit(&EmitContext::OpImageSparseSampleDrefImplicitLod,
+                &EmitContext::OpImageSampleDrefImplicitLod, ctx, inst, ctx.F32[1],
+                Texture(ctx, index), coords, dref, operands.Mask(), operands.Span());
+}
+
+Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
+                                  Id coords, Id dref, Id lod_lc, Id offset) {
+    const auto info{inst->Flags<IR::TextureInstInfo>()};
+    const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod_lc, offset);
+    return Emit(&EmitContext::OpImageSparseSampleDrefExplicitLod,
+                &EmitContext::OpImageSampleDrefExplicitLod, ctx, inst, ctx.F32[1],
+                Texture(ctx, index), coords, dref, operands.Mask(), operands.Span());
+}
+
+} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp
index 808c1b4016..7d3efc7418 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp
@@ -154,8 +154,22 @@ void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Va
     ctx.OpStore(high_pointer, ctx.OpCompositeExtract(ctx.U32[1], value, 1U));
 }
 
-void EmitWriteStorage128(EmitContext&) {
-    throw NotImplementedException("SPIR-V Instruction");
+void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
+                         Id value) {
+    if (!binding.IsImmediate()) {
+        throw NotImplementedException("Dynamic storage buffer indexing");
+    }
+    // TODO: Support reinterpreting bindings, guaranteed to be aligned
+    const Id ssbo{ctx.ssbos[binding.U32()]};
+    const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
+    for (u32 element = 0; element < 4; ++element) {
+        Id index = base_index;
+        if (element > 0) {
+            index = ctx.OpIAdd(ctx.U32[1], base_index, ctx.Constant(ctx.U32[1], element));
+        }
+        const Id pointer{ctx.OpAccessChain(ctx.storage_u32, ssbo, ctx.u32_zero_value, index)};
+        ctx.OpStore(pointer, ctx.OpCompositeExtract(ctx.U32[1], value, element));
+    }
 }
 
 } // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/environment.h b/src/shader_recompiler/environment.h
index 0ba681fb96..0fcb68050d 100644
--- a/src/shader_recompiler/environment.h
+++ b/src/shader_recompiler/environment.h
@@ -12,6 +12,8 @@ public:
 
     [[nodiscard]] virtual u64 ReadInstruction(u32 address) = 0;
 
+    [[nodiscard]] virtual u32 TextureBoundBuffer() = 0;
+
     [[nodiscard]] virtual std::array<u32, 3> WorkgroupSize() = 0;
 };
 
diff --git a/src/shader_recompiler/file_environment.cpp b/src/shader_recompiler/file_environment.cpp
index 5127523f97..21700c72b0 100644
--- a/src/shader_recompiler/file_environment.cpp
+++ b/src/shader_recompiler/file_environment.cpp
@@ -39,6 +39,10 @@ u64 FileEnvironment::ReadInstruction(u32 offset) {
     return data[offset / 8];
 }
 
+u32 FileEnvironment::TextureBoundBuffer() {
+    throw NotImplementedException("Texture bound buffer serialization");
+}
+
 std::array<u32, 3> FileEnvironment::WorkgroupSize() {
     return {1, 1, 1};
 }
diff --git a/src/shader_recompiler/file_environment.h b/src/shader_recompiler/file_environment.h
index b8c4bbadd9..62302bc8ed 100644
--- a/src/shader_recompiler/file_environment.h
+++ b/src/shader_recompiler/file_environment.h
@@ -3,7 +3,7 @@
 #include <vector>
 
 #include "common/common_types.h"
-#include "environment.h"
+#include "shader_recompiler/environment.h"
 
 namespace Shader {
 
@@ -14,6 +14,8 @@ public:
 
     u64 ReadInstruction(u32 offset) override;
 
+    u32 TextureBoundBuffer() override;
+
     std::array<u32, 3> WorkgroupSize() override;
 
 private:
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
index f38b46bace..ae3354c669 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
@@ -7,11 +7,24 @@
 #include "shader_recompiler/frontend/ir/value.h"
 
 namespace Shader::IR {
-
-[[noreturn]] static void ThrowInvalidType(Type type) {
+namespace {
+[[noreturn]] void ThrowInvalidType(Type type) {
     throw InvalidArgument("Invalid type {}", type);
 }
 
+Value MakeLodClampPair(IREmitter& ir, const F32& bias_lod, const F32& lod_clamp) {
+    if (!bias_lod.IsEmpty() && !lod_clamp.IsEmpty()) {
+        return ir.CompositeConstruct(bias_lod, lod_clamp);
+    } else if (!bias_lod.IsEmpty()) {
+        return bias_lod;
+    } else if (!lod_clamp.IsEmpty()) {
+        return lod_clamp;
+    } else {
+        return Value{};
+    }
+}
+} // Anonymous namespace
+
 U1 IREmitter::Imm1(bool value) const {
     return U1{Value{value}};
 }
@@ -261,6 +274,10 @@ U1 IREmitter::GetOverflowFromOp(const Value& op) {
     return Inst<U1>(Opcode::GetOverflowFromOp, op);
 }
 
+U1 IREmitter::GetSparseFromOp(const Value& op) {
+    return Inst<U1>(Opcode::GetSparseFromOp, op);
+}
+
 F16F32F64 IREmitter::FPAdd(const F16F32F64& a, const F16F32F64& b, FpControl control) {
     if (a.Type() != a.Type()) {
         throw InvalidArgument("Mismatching types {} and {}", a.Type(), b.Type());
@@ -1035,6 +1052,82 @@ U32U64 IREmitter::ConvertFToI(size_t bitsize, bool is_signed, const F16F32F64& v
     }
 }
 
+F16F32F64 IREmitter::ConvertSToF(size_t bitsize, const U32U64& value) {
+    switch (bitsize) {
+    case 16:
+        switch (value.Type()) {
+        case Type::U32:
+            return Inst<F16>(Opcode::ConvertF16S32, value);
+        case Type::U64:
+            return Inst<F16>(Opcode::ConvertF16S64, value);
+        default:
+            ThrowInvalidType(value.Type());
+        }
+    case 32:
+        switch (value.Type()) {
+        case Type::U32:
+            return Inst<F32>(Opcode::ConvertF32S32, value);
+        case Type::U64:
+            return Inst<F32>(Opcode::ConvertF32S64, value);
+        default:
+            ThrowInvalidType(value.Type());
+        }
+    case 64:
+        switch (value.Type()) {
+        case Type::U32:
+            return Inst<F16>(Opcode::ConvertF64S32, value);
+        case Type::U64:
+            return Inst<F16>(Opcode::ConvertF64S64, value);
+        default:
+            ThrowInvalidType(value.Type());
+        }
+    default:
+        throw InvalidArgument("Invalid destination bitsize {}", bitsize);
+    }
+}
+
+F16F32F64 IREmitter::ConvertUToF(size_t bitsize, const U32U64& value) {
+    switch (bitsize) {
+    case 16:
+        switch (value.Type()) {
+        case Type::U32:
+            return Inst<F16>(Opcode::ConvertF16U32, value);
+        case Type::U64:
+            return Inst<F16>(Opcode::ConvertF16U64, value);
+        default:
+            ThrowInvalidType(value.Type());
+        }
+    case 32:
+        switch (value.Type()) {
+        case Type::U32:
+            return Inst<F32>(Opcode::ConvertF32U32, value);
+        case Type::U64:
+            return Inst<F32>(Opcode::ConvertF32U64, value);
+        default:
+            ThrowInvalidType(value.Type());
+        }
+    case 64:
+        switch (value.Type()) {
+        case Type::U32:
+            return Inst<F16>(Opcode::ConvertF64U32, value);
+        case Type::U64:
+            return Inst<F16>(Opcode::ConvertF64U64, value);
+        default:
+            ThrowInvalidType(value.Type());
+        }
+    default:
+        throw InvalidArgument("Invalid destination bitsize {}", bitsize);
+    }
+}
+
+F16F32F64 IREmitter::ConvertIToF(size_t bitsize, bool is_signed, const U32U64& value) {
+    if (is_signed) {
+        return ConvertSToF(bitsize, value);
+    } else {
+        return ConvertUToF(bitsize, value);
+    }
+}
+
 U32U64 IREmitter::UConvert(size_t result_bitsize, const U32U64& value) {
     switch (result_bitsize) {
     case 32:
@@ -1107,4 +1200,40 @@ F16F32F64 IREmitter::FPConvert(size_t result_bitsize, const F16F32F64& value) {
     throw NotImplementedException("Conversion from {} to {} bits", value.Type(), result_bitsize);
 }
 
+Value IREmitter::ImageSampleImplicitLod(const Value& handle, const Value& coords, const F32& bias,
+                                        const Value& offset, const F32& lod_clamp,
+                                        TextureInstInfo info) {
+    const Value bias_lc{MakeLodClampPair(*this, bias, lod_clamp)};
+    const Opcode op{handle.IsImmediate() ? Opcode::BoundImageSampleImplicitLod
+                                         : Opcode::BindlessImageSampleImplicitLod};
+    return Inst(op, Flags{info}, handle, coords, bias_lc, offset);
+}
+
+Value IREmitter::ImageSampleExplicitLod(const Value& handle, const Value& coords, const F32& lod,
+                                        const Value& offset, const F32& lod_clamp,
+                                        TextureInstInfo info) {
+    const Value lod_lc{MakeLodClampPair(*this, lod, lod_clamp)};
+    const Opcode op{handle.IsImmediate() ? Opcode::BoundImageSampleExplicitLod
+                                         : Opcode::BindlessImageSampleExplicitLod};
+    return Inst(op, Flags{info}, handle, coords, lod_lc, offset);
+}
+
+F32 IREmitter::ImageSampleDrefImplicitLod(const Value& handle, const Value& coords, const F32& dref,
+                                          const F32& bias, const Value& offset,
+                                          const F32& lod_clamp, TextureInstInfo info) {
+    const Value bias_lc{MakeLodClampPair(*this, bias, lod_clamp)};
+    const Opcode op{handle.IsImmediate() ? Opcode::BoundImageSampleDrefImplicitLod
+                                         : Opcode::BindlessImageSampleDrefImplicitLod};
+    return Inst<F32>(op, Flags{info}, handle, coords, dref, bias_lc, offset);
+}
+
+F32 IREmitter::ImageSampleDrefExplicitLod(const Value& handle, const Value& coords, const F32& dref,
+                                          const F32& lod, const Value& offset, const F32& lod_clamp,
+                                          TextureInstInfo info) {
+    const Value lod_lc{MakeLodClampPair(*this, lod, lod_clamp)};
+    const Opcode op{handle.IsImmediate() ? Opcode::BoundImageSampleDrefExplicitLod
+                                         : Opcode::BindlessImageSampleDrefExplicitLod};
+    return Inst<F32>(op, Flags{info}, handle, coords, dref, lod_lc, offset);
+}
+
 } // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.h b/src/shader_recompiler/frontend/ir/ir_emitter.h
index 6e29bf0e20..cb2a7710a1 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.h
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.h
@@ -91,6 +91,7 @@ public:
     [[nodiscard]] U1 GetSignFromOp(const Value& op);
     [[nodiscard]] U1 GetCarryFromOp(const Value& op);
     [[nodiscard]] U1 GetOverflowFromOp(const Value& op);
+    [[nodiscard]] U1 GetSparseFromOp(const Value& op);
 
     [[nodiscard]] Value CompositeConstruct(const Value& e1, const Value& e2);
     [[nodiscard]] Value CompositeConstruct(const Value& e1, const Value& e2, const Value& e3);
@@ -159,7 +160,7 @@ public:
     [[nodiscard]] U32 BitFieldInsert(const U32& base, const U32& insert, const U32& offset,
                                      const U32& count);
     [[nodiscard]] U32 BitFieldExtract(const U32& base, const U32& offset, const U32& count,
-                                      bool is_signed);
+                                      bool is_signed = false);
     [[nodiscard]] U32 BitReverse(const U32& value);
     [[nodiscard]] U32 BitCount(const U32& value);
     [[nodiscard]] U32 BitwiseNot(const U32& value);
@@ -186,10 +187,28 @@ public:
     [[nodiscard]] U32U64 ConvertFToS(size_t bitsize, const F16F32F64& value);
     [[nodiscard]] U32U64 ConvertFToU(size_t bitsize, const F16F32F64& value);
     [[nodiscard]] U32U64 ConvertFToI(size_t bitsize, bool is_signed, const F16F32F64& value);
+    [[nodiscard]] F16F32F64 ConvertSToF(size_t bitsize, const U32U64& value);
+    [[nodiscard]] F16F32F64 ConvertUToF(size_t bitsize, const U32U64& value);
+    [[nodiscard]] F16F32F64 ConvertIToF(size_t bitsize, bool is_signed, const U32U64& value);
 
     [[nodiscard]] U32U64 UConvert(size_t result_bitsize, const U32U64& value);
     [[nodiscard]] F16F32F64 FPConvert(size_t result_bitsize, const F16F32F64& value);
 
+    [[nodiscard]] Value ImageSampleImplicitLod(const Value& handle, const Value& coords,
+                                               const F32& bias, const Value& offset,
+                                               const F32& lod_clamp, TextureInstInfo info);
+    [[nodiscard]] Value ImageSampleExplicitLod(const Value& handle, const Value& coords,
+                                               const F32& lod, const Value& offset,
+                                               const F32& lod_clamp, TextureInstInfo info);
+    [[nodiscard]] F32 ImageSampleDrefImplicitLod(const Value& handle, const Value& coords,
+                                                 const F32& dref, const F32& bias,
+                                                 const Value& offset, const F32& lod_clamp,
+                                                 TextureInstInfo info);
+    [[nodiscard]] F32 ImageSampleDrefExplicitLod(const Value& handle, const Value& coords,
+                                                 const F32& dref, const F32& lod,
+                                                 const Value& offset, const F32& lod_clamp,
+                                                 TextureInstInfo info);
+
 private:
     IR::Block::iterator insertion_point;
 
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index d6a9be87d5..88e186f215 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -10,26 +10,27 @@
 #include "shader_recompiler/frontend/ir/type.h"
 
 namespace Shader::IR {
-
-static void CheckPseudoInstruction(IR::Inst* inst, IR::Opcode opcode) {
+namespace {
+void CheckPseudoInstruction(IR::Inst* inst, IR::Opcode opcode) {
     if (inst && inst->Opcode() != opcode) {
         throw LogicError("Invalid pseudo-instruction");
     }
 }
 
-static void SetPseudoInstruction(IR::Inst*& dest_inst, IR::Inst* pseudo_inst) {
+void SetPseudoInstruction(IR::Inst*& dest_inst, IR::Inst* pseudo_inst) {
     if (dest_inst) {
         throw LogicError("Only one of each type of pseudo-op allowed");
     }
     dest_inst = pseudo_inst;
 }
 
-static void RemovePseudoInstruction(IR::Inst*& inst, IR::Opcode expected_opcode) {
+void RemovePseudoInstruction(IR::Inst*& inst, IR::Opcode expected_opcode) {
     if (inst->Opcode() != expected_opcode) {
         throw LogicError("Undoing use of invalid pseudo-op");
     }
     inst = nullptr;
 }
+} // Anonymous namespace
 
 Inst::Inst(IR::Opcode op_, u32 flags_) noexcept : op{op_}, flags{flags_} {
     if (op == Opcode::Phi) {
@@ -82,6 +83,7 @@ bool Inst::IsPseudoInstruction() const noexcept {
     case Opcode::GetSignFromOp:
     case Opcode::GetCarryFromOp:
     case Opcode::GetOverflowFromOp:
+    case Opcode::GetSparseFromOp:
         return true;
     default:
         return false;
@@ -96,25 +98,26 @@ bool Inst::AreAllArgsImmediates() const {
                        [](const IR::Value& value) { return value.IsImmediate(); });
 }
 
-bool Inst::HasAssociatedPseudoOperation() const noexcept {
-    return zero_inst || sign_inst || carry_inst || overflow_inst;
-}
-
 Inst* Inst::GetAssociatedPseudoOperation(IR::Opcode opcode) {
-    // This is faster than doing a search through the block.
+    if (!associated_insts) {
+        return nullptr;
+    }
     switch (opcode) {
     case Opcode::GetZeroFromOp:
-        CheckPseudoInstruction(zero_inst, Opcode::GetZeroFromOp);
-        return zero_inst;
+        CheckPseudoInstruction(associated_insts->zero_inst, Opcode::GetZeroFromOp);
+        return associated_insts->zero_inst;
     case Opcode::GetSignFromOp:
-        CheckPseudoInstruction(sign_inst, Opcode::GetSignFromOp);
-        return sign_inst;
+        CheckPseudoInstruction(associated_insts->sign_inst, Opcode::GetSignFromOp);
+        return associated_insts->sign_inst;
     case Opcode::GetCarryFromOp:
-        CheckPseudoInstruction(carry_inst, Opcode::GetCarryFromOp);
-        return carry_inst;
+        CheckPseudoInstruction(associated_insts->carry_inst, Opcode::GetCarryFromOp);
+        return associated_insts->carry_inst;
     case Opcode::GetOverflowFromOp:
-        CheckPseudoInstruction(overflow_inst, Opcode::GetOverflowFromOp);
-        return overflow_inst;
+        CheckPseudoInstruction(associated_insts->overflow_inst, Opcode::GetOverflowFromOp);
+        return associated_insts->overflow_inst;
+    case Opcode::GetSparseFromOp:
+        CheckPseudoInstruction(associated_insts->sparse_inst, Opcode::GetSparseFromOp);
+        return associated_insts->sparse_inst;
     default:
         throw InvalidArgument("{} is not a pseudo-instruction", opcode);
     }
@@ -220,22 +223,37 @@ void Inst::ReplaceOpcode(IR::Opcode opcode) {
     op = opcode;
 }
 
+void AllocAssociatedInsts(std::unique_ptr<AssociatedInsts>& associated_insts) {
+    if (!associated_insts) {
+        associated_insts = std::make_unique<AssociatedInsts>();
+    }
+}
+
 void Inst::Use(const Value& value) {
     Inst* const inst{value.Inst()};
     ++inst->use_count;
 
+    std::unique_ptr<AssociatedInsts>& assoc_inst{inst->associated_insts};
     switch (op) {
     case Opcode::GetZeroFromOp:
-        SetPseudoInstruction(inst->zero_inst, this);
+        AllocAssociatedInsts(assoc_inst);
+        SetPseudoInstruction(assoc_inst->zero_inst, this);
         break;
     case Opcode::GetSignFromOp:
-        SetPseudoInstruction(inst->sign_inst, this);
+        AllocAssociatedInsts(assoc_inst);
+        SetPseudoInstruction(assoc_inst->sign_inst, this);
         break;
     case Opcode::GetCarryFromOp:
-        SetPseudoInstruction(inst->carry_inst, this);
+        AllocAssociatedInsts(assoc_inst);
+        SetPseudoInstruction(assoc_inst->carry_inst, this);
         break;
     case Opcode::GetOverflowFromOp:
-        SetPseudoInstruction(inst->overflow_inst, this);
+        AllocAssociatedInsts(assoc_inst);
+        SetPseudoInstruction(assoc_inst->overflow_inst, this);
+        break;
+    case Opcode::GetSparseFromOp:
+        AllocAssociatedInsts(assoc_inst);
+        SetPseudoInstruction(assoc_inst->sparse_inst, this);
         break;
     default:
         break;
@@ -246,18 +264,23 @@ void Inst::UndoUse(const Value& value) {
     Inst* const inst{value.Inst()};
     --inst->use_count;
 
+    std::unique_ptr<AssociatedInsts>& assoc_inst{inst->associated_insts};
     switch (op) {
     case Opcode::GetZeroFromOp:
-        RemovePseudoInstruction(inst->zero_inst, Opcode::GetZeroFromOp);
+        AllocAssociatedInsts(assoc_inst);
+        RemovePseudoInstruction(assoc_inst->zero_inst, Opcode::GetZeroFromOp);
         break;
     case Opcode::GetSignFromOp:
-        RemovePseudoInstruction(inst->sign_inst, Opcode::GetSignFromOp);
+        AllocAssociatedInsts(assoc_inst);
+        RemovePseudoInstruction(assoc_inst->sign_inst, Opcode::GetSignFromOp);
         break;
     case Opcode::GetCarryFromOp:
-        RemovePseudoInstruction(inst->carry_inst, Opcode::GetCarryFromOp);
+        AllocAssociatedInsts(assoc_inst);
+        RemovePseudoInstruction(assoc_inst->carry_inst, Opcode::GetCarryFromOp);
         break;
     case Opcode::GetOverflowFromOp:
-        RemovePseudoInstruction(inst->overflow_inst, Opcode::GetOverflowFromOp);
+        AllocAssociatedInsts(assoc_inst);
+        RemovePseudoInstruction(assoc_inst->overflow_inst, Opcode::GetOverflowFromOp);
         break;
     default:
         break;
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.h b/src/shader_recompiler/frontend/ir/microinstruction.h
index 321393dd7c..d5336c4383 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.h
+++ b/src/shader_recompiler/frontend/ir/microinstruction.h
@@ -22,7 +22,7 @@ namespace Shader::IR {
 
 class Block;
 
-constexpr size_t MAX_ARG_COUNT = 4;
+struct AssociatedInsts;
 
 class Inst : public boost::intrusive::list_base_hook<> {
 public:
@@ -50,6 +50,11 @@ public:
         return op;
     }
 
+    /// Determines if there is a pseudo-operation associated with this instruction.
+    [[nodiscard]] bool HasAssociatedPseudoOperation() const noexcept {
+        return associated_insts != nullptr;
+    }
+
     /// Determines whether or not this instruction may have side effects.
     [[nodiscard]] bool MayHaveSideEffects() const noexcept;
 
@@ -60,8 +65,6 @@ public:
     /// Determines if all arguments of this instruction are immediates.
     [[nodiscard]] bool AreAllArgsImmediates() const;
 
-    /// Determines if there is a pseudo-operation associated with this instruction.
-    [[nodiscard]] bool HasAssociatedPseudoOperation() const noexcept;
     /// Gets a pseudo-operation associated with this instruction
     [[nodiscard]] Inst* GetAssociatedPseudoOperation(IR::Opcode opcode);
 
@@ -122,14 +125,21 @@ private:
     u32 definition{};
     union {
         NonTriviallyDummy dummy{};
-        std::array<Value, MAX_ARG_COUNT> args;
         std::vector<std::pair<Block*, Value>> phi_args;
+        std::array<Value, 5> args;
+    };
+    std::unique_ptr<AssociatedInsts> associated_insts;
+};
+static_assert(sizeof(Inst) <= 128, "Inst size unintentionally increased");
+
+struct AssociatedInsts {
+    union {
+        Inst* sparse_inst;
+        Inst* zero_inst{};
     };
-    Inst* zero_inst{};
     Inst* sign_inst{};
     Inst* carry_inst{};
     Inst* overflow_inst{};
 };
-static_assert(sizeof(Inst) <= 128, "Inst size unintentionally increased its size");
 
 } // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/modifiers.h b/src/shader_recompiler/frontend/ir/modifiers.h
index 44652eae7c..ad07700aef 100644
--- a/src/shader_recompiler/frontend/ir/modifiers.h
+++ b/src/shader_recompiler/frontend/ir/modifiers.h
@@ -4,7 +4,9 @@
 
 #pragma once
 
+#include "common/bit_field.h"
 #include "common/common_types.h"
+#include "shader_recompiler/shader_info.h"
 
 namespace Shader::IR {
 
@@ -30,4 +32,12 @@ struct FpControl {
 };
 static_assert(sizeof(FpControl) <= sizeof(u32));
 
+union TextureInstInfo {
+    u32 raw;
+    BitField<0, 8, TextureType> type;
+    BitField<8, 1, u32> has_bias;
+    BitField<16, 1, u32> has_lod_clamp;
+};
+static_assert(sizeof(TextureInstInfo) <= sizeof(u32));
+
 } // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 1f188411a9..8492a13d54 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -14,7 +14,7 @@ namespace {
 struct OpcodeMeta {
     std::string_view name;
     Type type;
-    std::array<Type, 4> arg_types;
+    std::array<Type, 5> arg_types;
 };
 
 using enum Type;
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc
index c4e72c84d9..aa011fab1b 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.inc
+++ b/src/shader_recompiler/frontend/ir/opcodes.inc
@@ -2,301 +2,330 @@
 // Licensed under GPLv2 or any later version
 // Refer to the license.txt file included.
 
-//     opcode name,                                         return type,    arg1 type,      arg2 type,      arg3 type,      arg4 type,      ...
-OPCODE(Phi,                                                 Opaque,                                                                         )
-OPCODE(Identity,                                            Opaque,         Opaque,                                                         )
-OPCODE(Void,                                                Void,                                                                           )
+//     opcode name,                                         return type,    arg1 type,      arg2 type,      arg3 type,      arg4 type,      arg4 type,      ...
+OPCODE(Phi,                                                 Opaque,                                                                                         )
+OPCODE(Identity,                                            Opaque,         Opaque,                                                                         )
+OPCODE(Void,                                                Void,                                                                                           )
 
 // Control flow
-OPCODE(Branch,                                              Void,           Label,                                                          )
-OPCODE(BranchConditional,                                   Void,           U1,             Label,          Label,                          )
-OPCODE(LoopMerge,                                           Void,           Label,          Label,                                          )
-OPCODE(SelectionMerge,                                      Void,           Label,                                                          )
-OPCODE(Return,                                              Void,                                                                           )
+OPCODE(Branch,                                              Void,           Label,                                                                          )
+OPCODE(BranchConditional,                                   Void,           U1,             Label,          Label,                                          )
+OPCODE(LoopMerge,                                           Void,           Label,          Label,                                                          )
+OPCODE(SelectionMerge,                                      Void,           Label,                                                                          )
+OPCODE(Return,                                              Void,                                                                                           )
 
 // Context getters/setters
-OPCODE(GetRegister,                                         U32,            Reg,                                                            )
-OPCODE(SetRegister,                                         Void,           Reg,            U32,                                            )
-OPCODE(GetPred,                                             U1,             Pred,                                                           )
-OPCODE(SetPred,                                             Void,           Pred,           U1,                                             )
-OPCODE(GetGotoVariable,                                     U1,             U32,                                                            )
-OPCODE(SetGotoVariable,                                     Void,           U32,            U1,                                             )
-OPCODE(GetCbuf,                                             U32,            U32,            U32,                                            )
-OPCODE(GetAttribute,                                        U32,            Attribute,                                                      )
-OPCODE(SetAttribute,                                        Void,           Attribute,      U32,                                            )
-OPCODE(GetAttributeIndexed,                                 U32,            U32,                                                            )
-OPCODE(SetAttributeIndexed,                                 Void,           U32,            U32,                                            )
-OPCODE(GetZFlag,                                            U1,             Void,                                                           )
-OPCODE(GetSFlag,                                            U1,             Void,                                                           )
-OPCODE(GetCFlag,                                            U1,             Void,                                                           )
-OPCODE(GetOFlag,                                            U1,             Void,                                                           )
-OPCODE(SetZFlag,                                            Void,           U1,                                                             )
-OPCODE(SetSFlag,                                            Void,           U1,                                                             )
-OPCODE(SetCFlag,                                            Void,           U1,                                                             )
-OPCODE(SetOFlag,                                            Void,           U1,                                                             )
-OPCODE(WorkgroupId,                                         U32x3,                                                                          )
-OPCODE(LocalInvocationId,                                   U32x3,                                                                          )
+OPCODE(GetRegister,                                         U32,            Reg,                                                                            )
+OPCODE(SetRegister,                                         Void,           Reg,            U32,                                                            )
+OPCODE(GetPred,                                             U1,             Pred,                                                                           )
+OPCODE(SetPred,                                             Void,           Pred,           U1,                                                             )
+OPCODE(GetGotoVariable,                                     U1,             U32,                                                                            )
+OPCODE(SetGotoVariable,                                     Void,           U32,            U1,                                                             )
+OPCODE(GetCbuf,                                             U32,            U32,            U32,                                                            )
+OPCODE(GetAttribute,                                        U32,            Attribute,                                                                      )
+OPCODE(SetAttribute,                                        Void,           Attribute,      U32,                                                            )
+OPCODE(GetAttributeIndexed,                                 U32,            U32,                                                                            )
+OPCODE(SetAttributeIndexed,                                 Void,           U32,            U32,                                                            )
+OPCODE(GetZFlag,                                            U1,             Void,                                                                           )
+OPCODE(GetSFlag,                                            U1,             Void,                                                                           )
+OPCODE(GetCFlag,                                            U1,             Void,                                                                           )
+OPCODE(GetOFlag,                                            U1,             Void,                                                                           )
+OPCODE(SetZFlag,                                            Void,           U1,                                                                             )
+OPCODE(SetSFlag,                                            Void,           U1,                                                                             )
+OPCODE(SetCFlag,                                            Void,           U1,                                                                             )
+OPCODE(SetOFlag,                                            Void,           U1,                                                                             )
+OPCODE(WorkgroupId,                                         U32x3,                                                                                          )
+OPCODE(LocalInvocationId,                                   U32x3,                                                                                          )
 
 // Undefined
-OPCODE(UndefU1,                                             U1,                                                                             )
-OPCODE(UndefU8,                                             U8,                                                                             )
-OPCODE(UndefU16,                                            U16,                                                                            )
-OPCODE(UndefU32,                                            U32,                                                                            )
-OPCODE(UndefU64,                                            U64,                                                                            )
+OPCODE(UndefU1,                                             U1,                                                                                             )
+OPCODE(UndefU8,                                             U8,                                                                                             )
+OPCODE(UndefU16,                                            U16,                                                                                            )
+OPCODE(UndefU32,                                            U32,                                                                                            )
+OPCODE(UndefU64,                                            U64,                                                                                            )
 
 // Memory operations
-OPCODE(LoadGlobalU8,                                        U32,            U64,                                                            )
-OPCODE(LoadGlobalS8,                                        U32,            U64,                                                            )
-OPCODE(LoadGlobalU16,                                       U32,            U64,                                                            )
-OPCODE(LoadGlobalS16,                                       U32,            U64,                                                            )
-OPCODE(LoadGlobal32,                                        U32,            U64,                                                            )
-OPCODE(LoadGlobal64,                                        U32x2,          U64,                                                            )
-OPCODE(LoadGlobal128,                                       U32x4,          U64,                                                            )
-OPCODE(WriteGlobalU8,                                       Void,           U64,            U32,                                            )
-OPCODE(WriteGlobalS8,                                       Void,           U64,            U32,                                            )
-OPCODE(WriteGlobalU16,                                      Void,           U64,            U32,                                            )
-OPCODE(WriteGlobalS16,                                      Void,           U64,            U32,                                            )
-OPCODE(WriteGlobal32,                                       Void,           U64,            U32,                                            )
-OPCODE(WriteGlobal64,                                       Void,           U64,            U32x2,                                          )
-OPCODE(WriteGlobal128,                                      Void,           U64,            U32x4,                                          )
+OPCODE(LoadGlobalU8,                                        U32,            U64,                                                                            )
+OPCODE(LoadGlobalS8,                                        U32,            U64,                                                                            )
+OPCODE(LoadGlobalU16,                                       U32,            U64,                                                                            )
+OPCODE(LoadGlobalS16,                                       U32,            U64,                                                                            )
+OPCODE(LoadGlobal32,                                        U32,            U64,                                                                            )
+OPCODE(LoadGlobal64,                                        U32x2,          U64,                                                                            )
+OPCODE(LoadGlobal128,                                       U32x4,          U64,                                                                            )
+OPCODE(WriteGlobalU8,                                       Void,           U64,            U32,                                                            )
+OPCODE(WriteGlobalS8,                                       Void,           U64,            U32,                                                            )
+OPCODE(WriteGlobalU16,                                      Void,           U64,            U32,                                                            )
+OPCODE(WriteGlobalS16,                                      Void,           U64,            U32,                                                            )
+OPCODE(WriteGlobal32,                                       Void,           U64,            U32,                                                            )
+OPCODE(WriteGlobal64,                                       Void,           U64,            U32x2,                                                          )
+OPCODE(WriteGlobal128,                                      Void,           U64,            U32x4,                                                          )
 
 // Storage buffer operations
-OPCODE(LoadStorageU8,                                       U32,            U32,            U32,                                            )
-OPCODE(LoadStorageS8,                                       U32,            U32,            U32,                                            )
-OPCODE(LoadStorageU16,                                      U32,            U32,            U32,                                            )
-OPCODE(LoadStorageS16,                                      U32,            U32,            U32,                                            )
-OPCODE(LoadStorage32,                                       U32,            U32,            U32,                                            )
-OPCODE(LoadStorage64,                                       U32x2,          U32,            U32,                                            )
-OPCODE(LoadStorage128,                                      U32x4,          U32,            U32,                                            )
-OPCODE(WriteStorageU8,                                      Void,           U32,            U32,            U32,                            )
-OPCODE(WriteStorageS8,                                      Void,           U32,            U32,            U32,                            )
-OPCODE(WriteStorageU16,                                     Void,           U32,            U32,            U32,                            )
-OPCODE(WriteStorageS16,                                     Void,           U32,            U32,            U32,                            )
-OPCODE(WriteStorage32,                                      Void,           U32,            U32,            U32,                            )
-OPCODE(WriteStorage64,                                      Void,           U32,            U32,            U32x2,                          )
-OPCODE(WriteStorage128,                                     Void,           U32,            U32,            U32x4,                          )
+OPCODE(LoadStorageU8,                                       U32,            U32,            U32,                                                            )
+OPCODE(LoadStorageS8,                                       U32,            U32,            U32,                                                            )
+OPCODE(LoadStorageU16,                                      U32,            U32,            U32,                                                            )
+OPCODE(LoadStorageS16,                                      U32,            U32,            U32,                                                            )
+OPCODE(LoadStorage32,                                       U32,            U32,            U32,                                                            )
+OPCODE(LoadStorage64,                                       U32x2,          U32,            U32,                                                            )
+OPCODE(LoadStorage128,                                      U32x4,          U32,            U32,                                                            )
+OPCODE(WriteStorageU8,                                      Void,           U32,            U32,            U32,                                            )
+OPCODE(WriteStorageS8,                                      Void,           U32,            U32,            U32,                                            )
+OPCODE(WriteStorageU16,                                     Void,           U32,            U32,            U32,                                            )
+OPCODE(WriteStorageS16,                                     Void,           U32,            U32,            U32,                                            )
+OPCODE(WriteStorage32,                                      Void,           U32,            U32,            U32,                                            )
+OPCODE(WriteStorage64,                                      Void,           U32,            U32,            U32x2,                                          )
+OPCODE(WriteStorage128,                                     Void,           U32,            U32,            U32x4,                                          )
 
 // Vector utility
-OPCODE(CompositeConstructU32x2,                             U32x2,          U32,            U32,                                            )
-OPCODE(CompositeConstructU32x3,                             U32x3,          U32,            U32,            U32,                            )
-OPCODE(CompositeConstructU32x4,                             U32x4,          U32,            U32,            U32,            U32,            )
-OPCODE(CompositeExtractU32x2,                               U32,            U32x2,          U32,                                            )
-OPCODE(CompositeExtractU32x3,                               U32,            U32x3,          U32,                                            )
-OPCODE(CompositeExtractU32x4,                               U32,            U32x4,          U32,                                            )
-OPCODE(CompositeInsertU32x2,                                U32x2,          U32x2,          U32,            U32,                            )
-OPCODE(CompositeInsertU32x3,                                U32x3,          U32x3,          U32,            U32,                            )
-OPCODE(CompositeInsertU32x4,                                U32x4,          U32x4,          U32,            U32,                            )
-OPCODE(CompositeConstructF16x2,                             F16x2,          F16,            F16,                                            )
-OPCODE(CompositeConstructF16x3,                             F16x3,          F16,            F16,            F16,                            )
-OPCODE(CompositeConstructF16x4,                             F16x4,          F16,            F16,            F16,            F16,            )
-OPCODE(CompositeExtractF16x2,                               F16,            F16x2,          U32,                                            )
-OPCODE(CompositeExtractF16x3,                               F16,            F16x3,          U32,                                            )
-OPCODE(CompositeExtractF16x4,                               F16,            F16x4,          U32,                                            )
-OPCODE(CompositeInsertF16x2,                                F16x2,          F16x2,          F16,            U32,                            )
-OPCODE(CompositeInsertF16x3,                                F16x3,          F16x3,          F16,            U32,                            )
-OPCODE(CompositeInsertF16x4,                                F16x4,          F16x4,          F16,            U32,                            )
-OPCODE(CompositeConstructF32x2,                             F32x2,          F32,            F32,                                            )
-OPCODE(CompositeConstructF32x3,                             F32x3,          F32,            F32,            F32,                            )
-OPCODE(CompositeConstructF32x4,                             F32x4,          F32,            F32,            F32,            F32,            )
-OPCODE(CompositeExtractF32x2,                               F32,            F32x2,          U32,                                            )
-OPCODE(CompositeExtractF32x3,                               F32,            F32x3,          U32,                                            )
-OPCODE(CompositeExtractF32x4,                               F32,            F32x4,          U32,                                            )
-OPCODE(CompositeInsertF32x2,                                F32x2,          F32x2,          F32,            U32,                            )
-OPCODE(CompositeInsertF32x3,                                F32x3,          F32x3,          F32,            U32,                            )
-OPCODE(CompositeInsertF32x4,                                F32x4,          F32x4,          F32,            U32,                            )
-OPCODE(CompositeConstructF64x2,                             F64x2,          F64,            F64,                                            )
-OPCODE(CompositeConstructF64x3,                             F64x3,          F64,            F64,            F64,                            )
-OPCODE(CompositeConstructF64x4,                             F64x4,          F64,            F64,            F64,            F64,            )
-OPCODE(CompositeExtractF64x2,                               F64,            F64x2,          U32,                                            )
-OPCODE(CompositeExtractF64x3,                               F64,            F64x3,          U32,                                            )
-OPCODE(CompositeExtractF64x4,                               F64,            F64x4,          U32,                                            )
-OPCODE(CompositeInsertF64x2,                                F64x2,          F64x2,          F64,            U32,                            )
-OPCODE(CompositeInsertF64x3,                                F64x3,          F64x3,          F64,            U32,                            )
-OPCODE(CompositeInsertF64x4,                                F64x4,          F64x4,          F64,            U32,                            )
+OPCODE(CompositeConstructU32x2,                             U32x2,          U32,            U32,                                                            )
+OPCODE(CompositeConstructU32x3,                             U32x3,          U32,            U32,            U32,                                            )
+OPCODE(CompositeConstructU32x4,                             U32x4,          U32,            U32,            U32,            U32,                            )
+OPCODE(CompositeExtractU32x2,                               U32,            U32x2,          U32,                                                            )
+OPCODE(CompositeExtractU32x3,                               U32,            U32x3,          U32,                                                            )
+OPCODE(CompositeExtractU32x4,                               U32,            U32x4,          U32,                                                            )
+OPCODE(CompositeInsertU32x2,                                U32x2,          U32x2,          U32,            U32,                                            )
+OPCODE(CompositeInsertU32x3,                                U32x3,          U32x3,          U32,            U32,                                            )
+OPCODE(CompositeInsertU32x4,                                U32x4,          U32x4,          U32,            U32,                                            )
+OPCODE(CompositeConstructF16x2,                             F16x2,          F16,            F16,                                                            )
+OPCODE(CompositeConstructF16x3,                             F16x3,          F16,            F16,            F16,                                            )
+OPCODE(CompositeConstructF16x4,                             F16x4,          F16,            F16,            F16,            F16,                            )
+OPCODE(CompositeExtractF16x2,                               F16,            F16x2,          U32,                                                            )
+OPCODE(CompositeExtractF16x3,                               F16,            F16x3,          U32,                                                            )
+OPCODE(CompositeExtractF16x4,                               F16,            F16x4,          U32,                                                            )
+OPCODE(CompositeInsertF16x2,                                F16x2,          F16x2,          F16,            U32,                                            )
+OPCODE(CompositeInsertF16x3,                                F16x3,          F16x3,          F16,            U32,                                            )
+OPCODE(CompositeInsertF16x4,                                F16x4,          F16x4,          F16,            U32,                                            )
+OPCODE(CompositeConstructF32x2,                             F32x2,          F32,            F32,                                                            )
+OPCODE(CompositeConstructF32x3,                             F32x3,          F32,            F32,            F32,                                            )
+OPCODE(CompositeConstructF32x4,                             F32x4,          F32,            F32,            F32,            F32,                            )
+OPCODE(CompositeExtractF32x2,                               F32,            F32x2,          U32,                                                            )
+OPCODE(CompositeExtractF32x3,                               F32,            F32x3,          U32,                                                            )
+OPCODE(CompositeExtractF32x4,                               F32,            F32x4,          U32,                                                            )
+OPCODE(CompositeInsertF32x2,                                F32x2,          F32x2,          F32,            U32,                                            )
+OPCODE(CompositeInsertF32x3,                                F32x3,          F32x3,          F32,            U32,                                            )
+OPCODE(CompositeInsertF32x4,                                F32x4,          F32x4,          F32,            U32,                                            )
+OPCODE(CompositeConstructF64x2,                             F64x2,          F64,            F64,                                                            )
+OPCODE(CompositeConstructF64x3,                             F64x3,          F64,            F64,            F64,                                            )
+OPCODE(CompositeConstructF64x4,                             F64x4,          F64,            F64,            F64,            F64,                            )
+OPCODE(CompositeExtractF64x2,                               F64,            F64x2,          U32,                                                            )
+OPCODE(CompositeExtractF64x3,                               F64,            F64x3,          U32,                                                            )
+OPCODE(CompositeExtractF64x4,                               F64,            F64x4,          U32,                                                            )
+OPCODE(CompositeInsertF64x2,                                F64x2,          F64x2,          F64,            U32,                                            )
+OPCODE(CompositeInsertF64x3,                                F64x3,          F64x3,          F64,            U32,                                            )
+OPCODE(CompositeInsertF64x4,                                F64x4,          F64x4,          F64,            U32,                                            )
 
 // Select operations
-OPCODE(SelectU1,                                            U1,             U1,             U1,             U1,                             )
-OPCODE(SelectU8,                                            U8,             U1,             U8,             U8,                             )
-OPCODE(SelectU16,                                           U16,            U1,             U16,            U16,                            )
-OPCODE(SelectU32,                                           U32,            U1,             U32,            U32,                            )
-OPCODE(SelectU64,                                           U64,            U1,             U64,            U64,                            )
-OPCODE(SelectF16,                                           F16,            U1,             F16,            F16,                            )
-OPCODE(SelectF32,                                           F32,            U1,             F32,            F32,                            )
+OPCODE(SelectU1,                                            U1,             U1,             U1,             U1,                                             )
+OPCODE(SelectU8,                                            U8,             U1,             U8,             U8,                                             )
+OPCODE(SelectU16,                                           U16,            U1,             U16,            U16,                                            )
+OPCODE(SelectU32,                                           U32,            U1,             U32,            U32,                                            )
+OPCODE(SelectU64,                                           U64,            U1,             U64,            U64,                                            )
+OPCODE(SelectF16,                                           F16,            U1,             F16,            F16,                                            )
+OPCODE(SelectF32,                                           F32,            U1,             F32,            F32,                                            )
 
 // Bitwise conversions
-OPCODE(BitCastU16F16,                                       U16,            F16,                                                            )
-OPCODE(BitCastU32F32,                                       U32,            F32,                                                            )
-OPCODE(BitCastU64F64,                                       U64,            F64,                                                            )
-OPCODE(BitCastF16U16,                                       F16,            U16,                                                            )
-OPCODE(BitCastF32U32,                                       F32,            U32,                                                            )
-OPCODE(BitCastF64U64,                                       F64,            U64,                                                            )
-OPCODE(PackUint2x32,                                        U64,            U32x2,                                                          )
-OPCODE(UnpackUint2x32,                                      U32x2,          U64,                                                            )
-OPCODE(PackFloat2x16,                                       U32,            F16x2,                                                          )
-OPCODE(UnpackFloat2x16,                                     F16x2,          U32,                                                            )
-OPCODE(PackHalf2x16,                                        U32,            F32x2,                                                          )
-OPCODE(UnpackHalf2x16,                                      F32x2,          U32,                                                            )
-OPCODE(PackDouble2x32,                                      F64,            U32x2,                                                          )
-OPCODE(UnpackDouble2x32,                                    U32x2,          F64,                                                            )
+OPCODE(BitCastU16F16,                                       U16,            F16,                                                                            )
+OPCODE(BitCastU32F32,                                       U32,            F32,                                                                            )
+OPCODE(BitCastU64F64,                                       U64,            F64,                                                                            )
+OPCODE(BitCastF16U16,                                       F16,            U16,                                                                            )
+OPCODE(BitCastF32U32,                                       F32,            U32,                                                                            )
+OPCODE(BitCastF64U64,                                       F64,            U64,                                                                            )
+OPCODE(PackUint2x32,                                        U64,            U32x2,                                                                          )
+OPCODE(UnpackUint2x32,                                      U32x2,          U64,                                                                            )
+OPCODE(PackFloat2x16,                                       U32,            F16x2,                                                                          )
+OPCODE(UnpackFloat2x16,                                     F16x2,          U32,                                                                            )
+OPCODE(PackHalf2x16,                                        U32,            F32x2,                                                                          )
+OPCODE(UnpackHalf2x16,                                      F32x2,          U32,                                                                            )
+OPCODE(PackDouble2x32,                                      F64,            U32x2,                                                                          )
+OPCODE(UnpackDouble2x32,                                    U32x2,          F64,                                                                            )
 
 // Pseudo-operation, handled specially at final emit
-OPCODE(GetZeroFromOp,                                       U1,             Opaque,                                                         )
-OPCODE(GetSignFromOp,                                       U1,             Opaque,                                                         )
-OPCODE(GetCarryFromOp,                                      U1,             Opaque,                                                         )
-OPCODE(GetOverflowFromOp,                                   U1,             Opaque,                                                         )
+OPCODE(GetZeroFromOp,                                       U1,             Opaque,                                                                         )
+OPCODE(GetSignFromOp,                                       U1,             Opaque,                                                                         )
+OPCODE(GetCarryFromOp,                                      U1,             Opaque,                                                                         )
+OPCODE(GetOverflowFromOp,                                   U1,             Opaque,                                                                         )
+OPCODE(GetSparseFromOp,                                     U1,             Opaque,                                                                         )
 
 // Floating-point operations
-OPCODE(FPAbs16,                                             F16,            F16,                                                            )
-OPCODE(FPAbs32,                                             F32,            F32,                                                            )
-OPCODE(FPAbs64,                                             F64,            F64,                                                            )
-OPCODE(FPAdd16,                                             F16,            F16,            F16,                                            )
-OPCODE(FPAdd32,                                             F32,            F32,            F32,                                            )
-OPCODE(FPAdd64,                                             F64,            F64,            F64,                                            )
-OPCODE(FPFma16,                                             F16,            F16,            F16,            F16,                            )
-OPCODE(FPFma32,                                             F32,            F32,            F32,            F32,                            )
-OPCODE(FPFma64,                                             F64,            F64,            F64,            F64,                            )
-OPCODE(FPMax32,                                             F32,            F32,            F32,                                            )
-OPCODE(FPMax64,                                             F64,            F64,            F64,                                            )
-OPCODE(FPMin32,                                             F32,            F32,            F32,                                            )
-OPCODE(FPMin64,                                             F64,            F64,            F64,                                            )
-OPCODE(FPMul16,                                             F16,            F16,            F16,                                            )
-OPCODE(FPMul32,                                             F32,            F32,            F32,                                            )
-OPCODE(FPMul64,                                             F64,            F64,            F64,                                            )
-OPCODE(FPNeg16,                                             F16,            F16,                                                            )
-OPCODE(FPNeg32,                                             F32,            F32,                                                            )
-OPCODE(FPNeg64,                                             F64,            F64,                                                            )
-OPCODE(FPRecip32,                                           F32,            F32,                                                            )
-OPCODE(FPRecip64,                                           F64,            F64,                                                            )
-OPCODE(FPRecipSqrt32,                                       F32,            F32,                                                            )
-OPCODE(FPRecipSqrt64,                                       F64,            F64,                                                            )
-OPCODE(FPSqrt,                                              F32,            F32,                                                            )
-OPCODE(FPSin,                                               F32,            F32,                                                            )
-OPCODE(FPExp2,                                              F32,            F32,                                                            )
-OPCODE(FPCos,                                               F32,            F32,                                                            )
-OPCODE(FPLog2,                                              F32,            F32,                                                            )
-OPCODE(FPSaturate16,                                        F16,            F16,                                                            )
-OPCODE(FPSaturate32,                                        F32,            F32,                                                            )
-OPCODE(FPSaturate64,                                        F64,            F64,                                                            )
-OPCODE(FPRoundEven16,                                       F16,            F16,                                                            )
-OPCODE(FPRoundEven32,                                       F32,            F32,                                                            )
-OPCODE(FPRoundEven64,                                       F64,            F64,                                                            )
-OPCODE(FPFloor16,                                           F16,            F16,                                                            )
-OPCODE(FPFloor32,                                           F32,            F32,                                                            )
-OPCODE(FPFloor64,                                           F64,            F64,                                                            )
-OPCODE(FPCeil16,                                            F16,            F16,                                                            )
-OPCODE(FPCeil32,                                            F32,            F32,                                                            )
-OPCODE(FPCeil64,                                            F64,            F64,                                                            )
-OPCODE(FPTrunc16,                                           F16,            F16,                                                            )
-OPCODE(FPTrunc32,                                           F32,            F32,                                                            )
-OPCODE(FPTrunc64,                                           F64,            F64,                                                            )
+OPCODE(FPAbs16,                                             F16,            F16,                                                                            )
+OPCODE(FPAbs32,                                             F32,            F32,                                                                            )
+OPCODE(FPAbs64,                                             F64,            F64,                                                                            )
+OPCODE(FPAdd16,                                             F16,            F16,            F16,                                                            )
+OPCODE(FPAdd32,                                             F32,            F32,            F32,                                                            )
+OPCODE(FPAdd64,                                             F64,            F64,            F64,                                                            )
+OPCODE(FPFma16,                                             F16,            F16,            F16,            F16,                                            )
+OPCODE(FPFma32,                                             F32,            F32,            F32,            F32,                                            )
+OPCODE(FPFma64,                                             F64,            F64,            F64,            F64,                                            )
+OPCODE(FPMax32,                                             F32,            F32,            F32,                                                            )
+OPCODE(FPMax64,                                             F64,            F64,            F64,                                                            )
+OPCODE(FPMin32,                                             F32,            F32,            F32,                                                            )
+OPCODE(FPMin64,                                             F64,            F64,            F64,                                                            )
+OPCODE(FPMul16,                                             F16,            F16,            F16,                                                            )
+OPCODE(FPMul32,                                             F32,            F32,            F32,                                                            )
+OPCODE(FPMul64,                                             F64,            F64,            F64,                                                            )
+OPCODE(FPNeg16,                                             F16,            F16,                                                                            )
+OPCODE(FPNeg32,                                             F32,            F32,                                                                            )
+OPCODE(FPNeg64,                                             F64,            F64,                                                                            )
+OPCODE(FPRecip32,                                           F32,            F32,                                                                            )
+OPCODE(FPRecip64,                                           F64,            F64,                                                                            )
+OPCODE(FPRecipSqrt32,                                       F32,            F32,                                                                            )
+OPCODE(FPRecipSqrt64,                                       F64,            F64,                                                                            )
+OPCODE(FPSqrt,                                              F32,            F32,                                                                            )
+OPCODE(FPSin,                                               F32,            F32,                                                                            )
+OPCODE(FPExp2,                                              F32,            F32,                                                                            )
+OPCODE(FPCos,                                               F32,            F32,                                                                            )
+OPCODE(FPLog2,                                              F32,            F32,                                                                            )
+OPCODE(FPSaturate16,                                        F16,            F16,                                                                            )
+OPCODE(FPSaturate32,                                        F32,            F32,                                                                            )
+OPCODE(FPSaturate64,                                        F64,            F64,                                                                            )
+OPCODE(FPRoundEven16,                                       F16,            F16,                                                                            )
+OPCODE(FPRoundEven32,                                       F32,            F32,                                                                            )
+OPCODE(FPRoundEven64,                                       F64,            F64,                                                                            )
+OPCODE(FPFloor16,                                           F16,            F16,                                                                            )
+OPCODE(FPFloor32,                                           F32,            F32,                                                                            )
+OPCODE(FPFloor64,                                           F64,            F64,                                                                            )
+OPCODE(FPCeil16,                                            F16,            F16,                                                                            )
+OPCODE(FPCeil32,                                            F32,            F32,                                                                            )
+OPCODE(FPCeil64,                                            F64,            F64,                                                                            )
+OPCODE(FPTrunc16,                                           F16,            F16,                                                                            )
+OPCODE(FPTrunc32,                                           F32,            F32,                                                                            )
+OPCODE(FPTrunc64,                                           F64,            F64,                                                                            )
 
-OPCODE(FPOrdEqual16,                                        U1,             F16,            F16,                                            )
-OPCODE(FPOrdEqual32,                                        U1,             F32,            F32,                                            )
-OPCODE(FPOrdEqual64,                                        U1,             F64,            F64,                                            )
-OPCODE(FPUnordEqual16,                                      U1,             F16,            F16,                                            )
-OPCODE(FPUnordEqual32,                                      U1,             F32,            F32,                                            )
-OPCODE(FPUnordEqual64,                                      U1,             F64,            F64,                                            )
-OPCODE(FPOrdNotEqual16,                                     U1,             F16,            F16,                                            )
-OPCODE(FPOrdNotEqual32,                                     U1,             F32,            F32,                                            )
-OPCODE(FPOrdNotEqual64,                                     U1,             F64,            F64,                                            )
-OPCODE(FPUnordNotEqual16,                                   U1,             F16,            F16,                                            )
-OPCODE(FPUnordNotEqual32,                                   U1,             F32,            F32,                                            )
-OPCODE(FPUnordNotEqual64,                                   U1,             F64,            F64,                                            )
-OPCODE(FPOrdLessThan16,                                     U1,             F16,            F16,                                            )
-OPCODE(FPOrdLessThan32,                                     U1,             F32,            F32,                                            )
-OPCODE(FPOrdLessThan64,                                     U1,             F64,            F64,                                            )
-OPCODE(FPUnordLessThan16,                                   U1,             F16,            F16,                                            )
-OPCODE(FPUnordLessThan32,                                   U1,             F32,            F32,                                            )
-OPCODE(FPUnordLessThan64,                                   U1,             F64,            F64,                                            )
-OPCODE(FPOrdGreaterThan16,                                  U1,             F16,            F16,                                            )
-OPCODE(FPOrdGreaterThan32,                                  U1,             F32,            F32,                                            )
-OPCODE(FPOrdGreaterThan64,                                  U1,             F64,            F64,                                            )
-OPCODE(FPUnordGreaterThan16,                                U1,             F16,            F16,                                            )
-OPCODE(FPUnordGreaterThan32,                                U1,             F32,            F32,                                            )
-OPCODE(FPUnordGreaterThan64,                                U1,             F64,            F64,                                            )
-OPCODE(FPOrdLessThanEqual16,                                U1,             F16,            F16,                                            )
-OPCODE(FPOrdLessThanEqual32,                                U1,             F32,            F32,                                            )
-OPCODE(FPOrdLessThanEqual64,                                U1,             F64,            F64,                                            )
-OPCODE(FPUnordLessThanEqual16,                              U1,             F16,            F16,                                            )
-OPCODE(FPUnordLessThanEqual32,                              U1,             F32,            F32,                                            )
-OPCODE(FPUnordLessThanEqual64,                              U1,             F64,            F64,                                            )
-OPCODE(FPOrdGreaterThanEqual16,                             U1,             F16,            F16,                                            )
-OPCODE(FPOrdGreaterThanEqual32,                             U1,             F32,            F32,                                            )
-OPCODE(FPOrdGreaterThanEqual64,                             U1,             F64,            F64,                                            )
-OPCODE(FPUnordGreaterThanEqual16,                           U1,             F16,            F16,                                            )
-OPCODE(FPUnordGreaterThanEqual32,                           U1,             F32,            F32,                                            )
-OPCODE(FPUnordGreaterThanEqual64,                           U1,             F64,            F64,                                            )
+OPCODE(FPOrdEqual16,                                        U1,             F16,            F16,                                                            )
+OPCODE(FPOrdEqual32,                                        U1,             F32,            F32,                                                            )
+OPCODE(FPOrdEqual64,                                        U1,             F64,            F64,                                                            )
+OPCODE(FPUnordEqual16,                                      U1,             F16,            F16,                                                            )
+OPCODE(FPUnordEqual32,                                      U1,             F32,            F32,                                                            )
+OPCODE(FPUnordEqual64,                                      U1,             F64,            F64,                                                            )
+OPCODE(FPOrdNotEqual16,                                     U1,             F16,            F16,                                                            )
+OPCODE(FPOrdNotEqual32,                                     U1,             F32,            F32,                                                            )
+OPCODE(FPOrdNotEqual64,                                     U1,             F64,            F64,                                                            )
+OPCODE(FPUnordNotEqual16,                                   U1,             F16,            F16,                                                            )
+OPCODE(FPUnordNotEqual32,                                   U1,             F32,            F32,                                                            )
+OPCODE(FPUnordNotEqual64,                                   U1,             F64,            F64,                                                            )
+OPCODE(FPOrdLessThan16,                                     U1,             F16,            F16,                                                            )
+OPCODE(FPOrdLessThan32,                                     U1,             F32,            F32,                                                            )
+OPCODE(FPOrdLessThan64,                                     U1,             F64,            F64,                                                            )
+OPCODE(FPUnordLessThan16,                                   U1,             F16,            F16,                                                            )
+OPCODE(FPUnordLessThan32,                                   U1,             F32,            F32,                                                            )
+OPCODE(FPUnordLessThan64,                                   U1,             F64,            F64,                                                            )
+OPCODE(FPOrdGreaterThan16,                                  U1,             F16,            F16,                                                            )
+OPCODE(FPOrdGreaterThan32,                                  U1,             F32,            F32,                                                            )
+OPCODE(FPOrdGreaterThan64,                                  U1,             F64,            F64,                                                            )
+OPCODE(FPUnordGreaterThan16,                                U1,             F16,            F16,                                                            )
+OPCODE(FPUnordGreaterThan32,                                U1,             F32,            F32,                                                            )
+OPCODE(FPUnordGreaterThan64,                                U1,             F64,            F64,                                                            )
+OPCODE(FPOrdLessThanEqual16,                                U1,             F16,            F16,                                                            )
+OPCODE(FPOrdLessThanEqual32,                                U1,             F32,            F32,                                                            )
+OPCODE(FPOrdLessThanEqual64,                                U1,             F64,            F64,                                                            )
+OPCODE(FPUnordLessThanEqual16,                              U1,             F16,            F16,                                                            )
+OPCODE(FPUnordLessThanEqual32,                              U1,             F32,            F32,                                                            )
+OPCODE(FPUnordLessThanEqual64,                              U1,             F64,            F64,                                                            )
+OPCODE(FPOrdGreaterThanEqual16,                             U1,             F16,            F16,                                                            )
+OPCODE(FPOrdGreaterThanEqual32,                             U1,             F32,            F32,                                                            )
+OPCODE(FPOrdGreaterThanEqual64,                             U1,             F64,            F64,                                                            )
+OPCODE(FPUnordGreaterThanEqual16,                           U1,             F16,            F16,                                                            )
+OPCODE(FPUnordGreaterThanEqual32,                           U1,             F32,            F32,                                                            )
+OPCODE(FPUnordGreaterThanEqual64,                           U1,             F64,            F64,                                                            )
 
 // Integer operations
-OPCODE(IAdd32,                                              U32,            U32,            U32,                                            )
-OPCODE(IAdd64,                                              U64,            U64,            U64,                                            )
-OPCODE(ISub32,                                              U32,            U32,            U32,                                            )
-OPCODE(ISub64,                                              U64,            U64,            U64,                                            )
-OPCODE(IMul32,                                              U32,            U32,            U32,                                            )
-OPCODE(INeg32,                                              U32,            U32,                                                            )
-OPCODE(INeg64,                                              U64,            U64,                                                            )
-OPCODE(IAbs32,                                              U32,            U32,                                                            )
-OPCODE(ShiftLeftLogical32,                                  U32,            U32,            U32,                                            )
-OPCODE(ShiftLeftLogical64,                                  U64,            U64,            U32,                                            )
-OPCODE(ShiftRightLogical32,                                 U32,            U32,            U32,                                            )
-OPCODE(ShiftRightLogical64,                                 U64,            U64,            U32,                                            )
-OPCODE(ShiftRightArithmetic32,                              U32,            U32,            U32,                                            )
-OPCODE(ShiftRightArithmetic64,                              U64,            U64,            U32,                                            )
-OPCODE(BitwiseAnd32,                                        U32,            U32,            U32,                                            )
-OPCODE(BitwiseOr32,                                         U32,            U32,            U32,                                            )
-OPCODE(BitwiseXor32,                                        U32,            U32,            U32,                                            )
-OPCODE(BitFieldInsert,                                      U32,            U32,            U32,            U32,            U32,            )
-OPCODE(BitFieldSExtract,                                    U32,            U32,            U32,            U32,                            )
-OPCODE(BitFieldUExtract,                                    U32,            U32,            U32,            U32,                            )
-OPCODE(BitReverse32,                                        U32,            U32,                                                            )
-OPCODE(BitCount32,                                          U32,            U32,                                                            )
-OPCODE(BitwiseNot32,                                        U32,            U32,                                                            )
+OPCODE(IAdd32,                                              U32,            U32,            U32,                                                            )
+OPCODE(IAdd64,                                              U64,            U64,            U64,                                                            )
+OPCODE(ISub32,                                              U32,            U32,            U32,                                                            )
+OPCODE(ISub64,                                              U64,            U64,            U64,                                                            )
+OPCODE(IMul32,                                              U32,            U32,            U32,                                                            )
+OPCODE(INeg32,                                              U32,            U32,                                                                            )
+OPCODE(INeg64,                                              U64,            U64,                                                                            )
+OPCODE(IAbs32,                                              U32,            U32,                                                                            )
+OPCODE(ShiftLeftLogical32,                                  U32,            U32,            U32,                                                            )
+OPCODE(ShiftLeftLogical64,                                  U64,            U64,            U32,                                                            )
+OPCODE(ShiftRightLogical32,                                 U32,            U32,            U32,                                                            )
+OPCODE(ShiftRightLogical64,                                 U64,            U64,            U32,                                                            )
+OPCODE(ShiftRightArithmetic32,                              U32,            U32,            U32,                                                            )
+OPCODE(ShiftRightArithmetic64,                              U64,            U64,            U32,                                                            )
+OPCODE(BitwiseAnd32,                                        U32,            U32,            U32,                                                            )
+OPCODE(BitwiseOr32,                                         U32,            U32,            U32,                                                            )
+OPCODE(BitwiseXor32,                                        U32,            U32,            U32,                                                            )
+OPCODE(BitFieldInsert,                                      U32,            U32,            U32,            U32,            U32,                            )
+OPCODE(BitFieldSExtract,                                    U32,            U32,            U32,            U32,                                            )
+OPCODE(BitFieldUExtract,                                    U32,            U32,            U32,            U32,                                            )
+OPCODE(BitReverse32,                                        U32,            U32,                                                                            )
+OPCODE(BitCount32,                                          U32,            U32,                                                                            )
+OPCODE(BitwiseNot32,                                        U32,            U32,                                                                            )
 
-OPCODE(FindSMsb32,                                          U32,            U32,                                                            )
-OPCODE(FindUMsb32,                                          U32,            U32,                                                            )
-OPCODE(SMin32,                                              U32,            U32,            U32,                                            )
-OPCODE(UMin32,                                              U32,            U32,            U32,                                            )
-OPCODE(SMax32,                                              U32,            U32,            U32,                                            )
-OPCODE(UMax32,                                              U32,            U32,            U32,                                            )
-OPCODE(SLessThan,                                           U1,             U32,            U32,                                            )
-OPCODE(ULessThan,                                           U1,             U32,            U32,                                            )
-OPCODE(IEqual,                                              U1,             U32,            U32,                                            )
-OPCODE(SLessThanEqual,                                      U1,             U32,            U32,                                            )
-OPCODE(ULessThanEqual,                                      U1,             U32,            U32,                                            )
-OPCODE(SGreaterThan,                                        U1,             U32,            U32,                                            )
-OPCODE(UGreaterThan,                                        U1,             U32,            U32,                                            )
-OPCODE(INotEqual,                                           U1,             U32,            U32,                                            )
-OPCODE(SGreaterThanEqual,                                   U1,             U32,            U32,                                            )
-OPCODE(UGreaterThanEqual,                                   U1,             U32,            U32,                                            )
+OPCODE(FindSMsb32,                                          U32,            U32,                                                                            )
+OPCODE(FindUMsb32,                                          U32,            U32,                                                                            )
+OPCODE(SMin32,                                              U32,            U32,            U32,                                                            )
+OPCODE(UMin32,                                              U32,            U32,            U32,                                                            )
+OPCODE(SMax32,                                              U32,            U32,            U32,                                                            )
+OPCODE(UMax32,                                              U32,            U32,            U32,                                                            )
+OPCODE(SLessThan,                                           U1,             U32,            U32,                                                            )
+OPCODE(ULessThan,                                           U1,             U32,            U32,                                                            )
+OPCODE(IEqual,                                              U1,             U32,            U32,                                                            )
+OPCODE(SLessThanEqual,                                      U1,             U32,            U32,                                                            )
+OPCODE(ULessThanEqual,                                      U1,             U32,            U32,                                                            )
+OPCODE(SGreaterThan,                                        U1,             U32,            U32,                                                            )
+OPCODE(UGreaterThan,                                        U1,             U32,            U32,                                                            )
+OPCODE(INotEqual,                                           U1,             U32,            U32,                                                            )
+OPCODE(SGreaterThanEqual,                                   U1,             U32,            U32,                                                            )
+OPCODE(UGreaterThanEqual,                                   U1,             U32,            U32,                                                            )
 
 // Logical operations
-OPCODE(LogicalOr,                                           U1,             U1,             U1,                                             )
-OPCODE(LogicalAnd,                                          U1,             U1,             U1,                                             )
-OPCODE(LogicalXor,                                          U1,             U1,             U1,                                             )
-OPCODE(LogicalNot,                                          U1,             U1,                                                             )
+OPCODE(LogicalOr,                                           U1,             U1,             U1,                                                             )
+OPCODE(LogicalAnd,                                          U1,             U1,             U1,                                                             )
+OPCODE(LogicalXor,                                          U1,             U1,             U1,                                                             )
+OPCODE(LogicalNot,                                          U1,             U1,                                                                             )
 
 // Conversion operations
-OPCODE(ConvertS16F16,                                       U32,            F16,                                                            )
-OPCODE(ConvertS16F32,                                       U32,            F32,                                                            )
-OPCODE(ConvertS16F64,                                       U32,            F64,                                                            )
-OPCODE(ConvertS32F16,                                       U32,            F16,                                                            )
-OPCODE(ConvertS32F32,                                       U32,            F32,                                                            )
-OPCODE(ConvertS32F64,                                       U32,            F64,                                                            )
-OPCODE(ConvertS64F16,                                       U64,            F16,                                                            )
-OPCODE(ConvertS64F32,                                       U64,            F32,                                                            )
-OPCODE(ConvertS64F64,                                       U64,            F64,                                                            )
-OPCODE(ConvertU16F16,                                       U32,            F16,                                                            )
-OPCODE(ConvertU16F32,                                       U32,            F32,                                                            )
-OPCODE(ConvertU16F64,                                       U32,            F64,                                                            )
-OPCODE(ConvertU32F16,                                       U32,            F16,                                                            )
-OPCODE(ConvertU32F32,                                       U32,            F32,                                                            )
-OPCODE(ConvertU32F64,                                       U32,            F64,                                                            )
-OPCODE(ConvertU64F16,                                       U64,            F16,                                                            )
-OPCODE(ConvertU64F32,                                       U64,            F32,                                                            )
-OPCODE(ConvertU64F64,                                       U64,            F64,                                                            )
-OPCODE(ConvertU64U32,                                       U64,            U32,                                                            )
-OPCODE(ConvertU32U64,                                       U32,            U64,                                                            )
-OPCODE(ConvertF16F32,                                       F16,            F32,                                                            )
-OPCODE(ConvertF32F16,                                       F32,            F16,                                                            )
-OPCODE(ConvertF32F64,                                       F32,            F64,                                                            )
-OPCODE(ConvertF64F32,                                       F64,            F32,                                                            )
+OPCODE(ConvertS16F16,                                       U32,            F16,                                                                            )
+OPCODE(ConvertS16F32,                                       U32,            F32,                                                                            )
+OPCODE(ConvertS16F64,                                       U32,            F64,                                                                            )
+OPCODE(ConvertS32F16,                                       U32,            F16,                                                                            )
+OPCODE(ConvertS32F32,                                       U32,            F32,                                                                            )
+OPCODE(ConvertS32F64,                                       U32,            F64,                                                                            )
+OPCODE(ConvertS64F16,                                       U64,            F16,                                                                            )
+OPCODE(ConvertS64F32,                                       U64,            F32,                                                                            )
+OPCODE(ConvertS64F64,                                       U64,            F64,                                                                            )
+OPCODE(ConvertU16F16,                                       U32,            F16,                                                                            )
+OPCODE(ConvertU16F32,                                       U32,            F32,                                                                            )
+OPCODE(ConvertU16F64,                                       U32,            F64,                                                                            )
+OPCODE(ConvertU32F16,                                       U32,            F16,                                                                            )
+OPCODE(ConvertU32F32,                                       U32,            F32,                                                                            )
+OPCODE(ConvertU32F64,                                       U32,            F64,                                                                            )
+OPCODE(ConvertU64F16,                                       U64,            F16,                                                                            )
+OPCODE(ConvertU64F32,                                       U64,            F32,                                                                            )
+OPCODE(ConvertU64F64,                                       U64,            F64,                                                                            )
+OPCODE(ConvertU64U32,                                       U64,            U32,                                                                            )
+OPCODE(ConvertU32U64,                                       U32,            U64,                                                                            )
+OPCODE(ConvertF16F32,                                       F16,            F32,                                                                            )
+OPCODE(ConvertF32F16,                                       F32,            F16,                                                                            )
+OPCODE(ConvertF32F64,                                       F32,            F64,                                                                            )
+OPCODE(ConvertF64F32,                                       F64,            F32,                                                                            )
+OPCODE(ConvertF16S32,                                       F16,            U32,                                                                            )
+OPCODE(ConvertF16S64,                                       F16,            U64,                                                                            )
+OPCODE(ConvertF16U32,                                       F16,            U32,                                                                            )
+OPCODE(ConvertF16U64,                                       F16,            U64,                                                                            )
+OPCODE(ConvertF32S32,                                       F32,            U32,                                                                            )
+OPCODE(ConvertF32S64,                                       F32,            U64,                                                                            )
+OPCODE(ConvertF32U32,                                       F32,            U32,                                                                            )
+OPCODE(ConvertF32U64,                                       F32,            U64,                                                                            )
+OPCODE(ConvertF64S32,                                       F64,            U32,                                                                            )
+OPCODE(ConvertF64S64,                                       F64,            U64,                                                                            )
+OPCODE(ConvertF64U32,                                       F64,            U32,                                                                            )
+OPCODE(ConvertF64U64,                                       F64,            U64,                                                                            )
+
+// Image operations
+OPCODE(BindlessImageSampleImplicitLod,                      F32x4,          U32,            Opaque,         Opaque,         Opaque,                         )
+OPCODE(BindlessImageSampleExplicitLod,                      F32x4,          U32,            Opaque,         Opaque,         Opaque,                         )
+OPCODE(BindlessImageSampleDrefImplicitLod,                  F32,            U32,            Opaque,         F32,            Opaque,         Opaque,         )
+OPCODE(BindlessImageSampleDrefExplicitLod,                  F32,            U32,            Opaque,         F32,            Opaque,         Opaque,         )
+
+OPCODE(BoundImageSampleImplicitLod,                         F32x4,          U32,            Opaque,         Opaque,         Opaque,                         )
+OPCODE(BoundImageSampleExplicitLod,                         F32x4,          U32,            Opaque,         Opaque,         Opaque,                         )
+OPCODE(BoundImageSampleDrefImplicitLod,                     F32,            U32,            Opaque,         F32,            Opaque,         Opaque,         )
+OPCODE(BoundImageSampleDrefExplicitLod,                     F32,            U32,            Opaque,         F32,            Opaque,         Opaque,         )
+
+OPCODE(ImageSampleImplicitLod,                              F32x4,          U32,            Opaque,         Opaque,         Opaque,                         )
+OPCODE(ImageSampleExplicitLod,                              F32x4,          U32,            Opaque,         Opaque,         Opaque,                         )
+OPCODE(ImageSampleDrefImplicitLod,                          F32,            U32,            Opaque,         F32,            Opaque,         Opaque,         )
+OPCODE(ImageSampleDrefExplicitLod,                          F32,            U32,            Opaque,         F32,            Opaque,         Opaque,         )
diff --git a/src/shader_recompiler/frontend/ir/reg.h b/src/shader_recompiler/frontend/ir/reg.h
index 771094eb9a..8fea05f7be 100644
--- a/src/shader_recompiler/frontend/ir/reg.h
+++ b/src/shader_recompiler/frontend/ir/reg.h
@@ -293,6 +293,17 @@ constexpr size_t NUM_REGS = 256;
     return reg + (-num);
 }
 
+[[nodiscard]] constexpr Reg operator++(Reg& reg) {
+    reg = reg + 1;
+    return reg;
+}
+
+[[nodiscard]] constexpr Reg operator++(Reg& reg, int) {
+    const Reg copy{reg};
+    reg = reg + 1;
+    return copy;
+}
+
 [[nodiscard]] constexpr size_t RegIndex(Reg reg) noexcept {
     return static_cast<size_t>(reg);
 }
diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h
index 9b7e1480be..3602883d6f 100644
--- a/src/shader_recompiler/frontend/ir/value.h
+++ b/src/shader_recompiler/frontend/ir/value.h
@@ -75,6 +75,7 @@ private:
         f64 imm_f64;
     };
 };
+static_assert(std::is_trivially_copyable_v<Value>);
 
 template <IR::Type type_>
 class TypedValue : public Value {
diff --git a/src/shader_recompiler/frontend/maxwell/maxwell.inc b/src/shader_recompiler/frontend/maxwell/maxwell.inc
index 5d0b91598e..f2a2ff331e 100644
--- a/src/shader_recompiler/frontend/maxwell/maxwell.inc
+++ b/src/shader_recompiler/frontend/maxwell/maxwell.inc
@@ -249,8 +249,8 @@ INST(SULD,         "SULD",           "1110 1011 000- ----")
 INST(SURED,        "SURED",          "1110 1011 010- ----")
 INST(SUST,         "SUST",           "1110 1011 001- ----")
 INST(SYNC,         "SYNC",           "1111 0000 1111 1---")
-INST(TEX,          "TEX",            "1100 00-- --11 1---")
-INST(TEX_b,        "TEX (b)",        "1101 1110 1011 1---")
+INST(TEX,          "TEX",            "1100 0--- ---- ----")
+INST(TEX_b,        "TEX (b)",        "1101 1110 10-- ----")
 INST(TEXS,         "TEXS",           "1101 -00- ---- ----")
 INST(TLD,          "TLD",            "1101 1100 --11 1---")
 INST(TLD_b,        "TLD (b)",        "1101 1101 --11 1---")
diff --git a/src/shader_recompiler/frontend/maxwell/program.cpp b/src/shader_recompiler/frontend/maxwell/program.cpp
index dbfc04f75e..b270bbccdb 100644
--- a/src/shader_recompiler/frontend/maxwell/program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/program.cpp
@@ -62,6 +62,7 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
         Optimization::SsaRewritePass(function.post_order_blocks);
     }
     Optimization::GlobalMemoryToStorageBufferPass(program);
+    Optimization::TexturePass(env, program);
     for (IR::Function& function : functions) {
         Optimization::PostOrderInvoke(Optimization::ConstantPropagationPass, function);
         Optimization::PostOrderInvoke(Optimization::DeadCodeEliminationPass, function);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
index fc6030e04e..ff429c1263 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
@@ -585,14 +585,6 @@ void TranslatorVisitor::SYNC(u64) {
     ThrowNotImplemented(Opcode::SYNC);
 }
 
-void TranslatorVisitor::TEX(u64) {
-    ThrowNotImplemented(Opcode::TEX);
-}
-
-void TranslatorVisitor::TEX_b(u64) {
-    ThrowNotImplemented(Opcode::TEX_b);
-}
-
 void TranslatorVisitor::TEXS(u64) {
     ThrowNotImplemented(Opcode::TEXS);
 }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_sample.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_sample.cpp
new file mode 100644
index 0000000000..98d9f4c648
--- /dev/null
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_sample.cpp
@@ -0,0 +1,232 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <optional>
+
+#include "common/bit_field.h"
+#include "common/common_types.h"
+#include "shader_recompiler/frontend/ir/modifiers.h"
+#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"
+
+namespace Shader::Maxwell {
+namespace {
+enum class Blod : u64 {
+    None,
+    LZ,
+    LB,
+    LL,
+    INVALIDBLOD4,
+    INVALIDBLOD5,
+    LBA,
+    LLA,
+};
+
+enum class TextureType : u64 {
+    _1D,
+    ARRAY_1D,
+    _2D,
+    ARRAY_2D,
+    _3D,
+    ARRAY_3D,
+    CUBE,
+    ARRAY_CUBE,
+};
+
+Shader::TextureType GetType(TextureType type, bool dc) {
+    switch (type) {
+    case TextureType::_1D:
+        return dc ? Shader::TextureType::Shadow1D : Shader::TextureType::Color1D;
+    case TextureType::ARRAY_1D:
+        return dc ? Shader::TextureType::ShadowArray1D : Shader::TextureType::ColorArray1D;
+    case TextureType::_2D:
+        return dc ? Shader::TextureType::Shadow2D : Shader::TextureType::Color2D;
+    case TextureType::ARRAY_2D:
+        return dc ? Shader::TextureType::ShadowArray2D : Shader::TextureType::ColorArray2D;
+    case TextureType::_3D:
+        return dc ? Shader::TextureType::Shadow3D : Shader::TextureType::Color3D;
+    case TextureType::ARRAY_3D:
+        throw NotImplementedException("3D array texture type");
+    case TextureType::CUBE:
+        return dc ? Shader::TextureType::ShadowCube : Shader::TextureType::ColorCube;
+    case TextureType::ARRAY_CUBE:
+        return dc ? Shader::TextureType::ShadowArrayCube : Shader::TextureType::ColorArrayCube;
+    }
+    throw NotImplementedException("Invalid texture type {}", type);
+}
+
+IR::Value MakeCoords(TranslatorVisitor& v, IR::Reg reg, TextureType type) {
+    const auto read_array{[&]() -> IR::F32 { return v.ir.ConvertUToF(32, v.X(reg)); }};
+    switch (type) {
+    case TextureType::_1D:
+        return v.F(reg);
+    case TextureType::ARRAY_1D:
+        return v.ir.CompositeConstruct(read_array(), v.F(reg + 1));
+    case TextureType::_2D:
+        return v.ir.CompositeConstruct(v.F(reg), v.F(reg + 1));
+    case TextureType::ARRAY_2D:
+        return v.ir.CompositeConstruct(read_array(), v.F(reg + 1), v.F(reg + 2));
+    case TextureType::_3D:
+        return v.ir.CompositeConstruct(v.F(reg), v.F(reg + 1), v.F(reg + 2));
+    case TextureType::ARRAY_3D:
+        throw NotImplementedException("3D array texture type");
+    case TextureType::CUBE:
+        return v.ir.CompositeConstruct(v.F(reg), v.F(reg + 1), v.F(reg + 2));
+    case TextureType::ARRAY_CUBE:
+        return v.ir.CompositeConstruct(read_array(), v.F(reg + 1), v.F(reg + 2), v.F(reg + 3));
+    }
+    throw NotImplementedException("Invalid texture type {}", type);
+}
+
+IR::F32 MakeLod(TranslatorVisitor& v, IR::Reg& reg, Blod blod) {
+    switch (blod) {
+    case Blod::None:
+        return v.ir.Imm32(0.0f);
+    case Blod::LZ:
+        return v.ir.Imm32(0.0f);
+    case Blod::LB:
+    case Blod::LL:
+    case Blod::LBA:
+    case Blod::LLA:
+        return v.F(reg++);
+    case Blod::INVALIDBLOD4:
+    case Blod::INVALIDBLOD5:
+        break;
+    }
+    throw NotImplementedException("Invalid blod {}", blod);
+}
+
+IR::Value MakeOffset(TranslatorVisitor& v, IR::Reg& reg, TextureType type) {
+    const IR::U32 value{v.X(reg++)};
+    switch (type) {
+    case TextureType::_1D:
+    case TextureType::ARRAY_1D:
+        return v.ir.BitFieldExtract(value, v.ir.Imm32(0), v.ir.Imm32(4));
+    case TextureType::_2D:
+    case TextureType::ARRAY_2D:
+        return v.ir.CompositeConstruct(v.ir.BitFieldExtract(value, v.ir.Imm32(0), v.ir.Imm32(4)),
+                                       v.ir.BitFieldExtract(value, v.ir.Imm32(4), v.ir.Imm32(4)));
+    case TextureType::_3D:
+    case TextureType::ARRAY_3D:
+        return v.ir.CompositeConstruct(v.ir.BitFieldExtract(value, v.ir.Imm32(0), v.ir.Imm32(4)),
+                                       v.ir.BitFieldExtract(value, v.ir.Imm32(4), v.ir.Imm32(4)),
+                                       v.ir.BitFieldExtract(value, v.ir.Imm32(8), v.ir.Imm32(4)));
+    case TextureType::CUBE:
+    case TextureType::ARRAY_CUBE:
+        throw NotImplementedException("Illegal offset on CUBE sample");
+    }
+    throw NotImplementedException("Invalid texture type {}", type);
+}
+
+bool HasExplicitLod(Blod blod) {
+    switch (blod) {
+    case Blod::LL:
+    case Blod::LLA:
+    case Blod::LZ:
+        return true;
+    default:
+        return false;
+    }
+}
+
+void Impl(TranslatorVisitor& v, u64 insn, bool aoffi, Blod blod, bool lc,
+          std::optional<u32> cbuf_offset) {
+    union {
+        u64 raw;
+        BitField<35, 1, u64> ndv;
+        BitField<49, 1, u64> nodep;
+        BitField<50, 1, u64> dc;
+        BitField<51, 3, IR::Pred> sparse_pred;
+        BitField<0, 8, IR::Reg> dest_reg;
+        BitField<8, 8, IR::Reg> coord_reg;
+        BitField<20, 8, IR::Reg> meta_reg;
+        BitField<28, 3, TextureType> type;
+        BitField<31, 4, u64> mask;
+    } const tex{insn};
+
+    if (lc) {
+        throw NotImplementedException("LC");
+    }
+    const IR::Value coords{MakeCoords(v, tex.coord_reg, tex.type)};
+
+    IR::Reg meta_reg{tex.meta_reg};
+    IR::Value handle;
+    IR::Value offset;
+    IR::F32 dref;
+    IR::F32 lod_clamp;
+    if (cbuf_offset) {
+        handle = v.ir.Imm32(*cbuf_offset);
+    } else {
+        handle = v.X(meta_reg++);
+    }
+    const IR::F32 lod{MakeLod(v, meta_reg, blod)};
+    if (aoffi) {
+        offset = MakeOffset(v, meta_reg, tex.type);
+    }
+    if (tex.dc != 0) {
+        dref = v.F(meta_reg++);
+    }
+    IR::TextureInstInfo info{};
+    info.type.Assign(GetType(tex.type, tex.dc != 0));
+    info.has_bias.Assign(blod == Blod::LB || blod == Blod::LBA ? 1 : 0);
+    info.has_lod_clamp.Assign(lc ? 1 : 0);
+
+    const IR::Value sample{[&]() -> IR::Value {
+        if (tex.dc == 0) {
+            if (HasExplicitLod(blod)) {
+                return v.ir.ImageSampleExplicitLod(handle, coords, lod, offset, lod_clamp, info);
+            } else {
+                return v.ir.ImageSampleImplicitLod(handle, coords, lod, offset, lod_clamp, info);
+            }
+        }
+        if (HasExplicitLod(blod)) {
+            return v.ir.ImageSampleDrefExplicitLod(handle, coords, dref, lod, offset, lod_clamp,
+                                                   info);
+        } else {
+            return v.ir.ImageSampleDrefImplicitLod(handle, coords, dref, lod, offset, lod_clamp,
+                                                   info);
+        }
+    }()};
+
+    for (int element = 0; element < 4; ++element) {
+        if (((tex.mask >> element) & 1) == 0) {
+            continue;
+        }
+        IR::F32 value;
+        if (tex.dc != 0) {
+            value = element < 3 ? IR::F32{sample} : v.ir.Imm32(1.0f);
+        } else {
+            value = IR::F32{v.ir.CompositeExtract(sample, element)};
+        }
+        v.F(tex.dest_reg + element, value);
+    }
+    if (tex.sparse_pred != IR::Pred::PT) {
+        v.ir.SetPred(tex.sparse_pred, v.ir.LogicalNot(v.ir.GetSparseFromOp(sample)));
+    }
+}
+} // Anonymous namespace
+
+void TranslatorVisitor::TEX(u64 insn) {
+    union {
+        u64 raw;
+        BitField<54, 1, u64> aoffi;
+        BitField<55, 3, Blod> blod;
+        BitField<58, 1, u64> lc;
+        BitField<36, 13, u64> cbuf_offset;
+    } const tex{insn};
+
+    Impl(*this, insn, tex.aoffi != 0, tex.blod, tex.lc != 0, static_cast<u32>(tex.cbuf_offset));
+}
+
+void TranslatorVisitor::TEX_b(u64 insn) {
+    union {
+        u64 raw;
+        BitField<36, 1, u64> aoffi;
+        BitField<37, 3, Blod> blod;
+        BitField<40, 1, u64> lc;
+    } const tex{insn};
+
+    Impl(*this, insn, tex.aoffi != 0, tex.blod, tex.lc != 0, std::nullopt);
+}
+
+} // namespace Shader::Maxwell
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 6662ef4cdc..960beadd43 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -82,6 +82,25 @@ void VisitUsages(Info& info, IR::Inst& inst) {
             throw NotImplementedException("Constant buffer with non-immediate index");
         }
         break;
+    case IR::Opcode::BindlessImageSampleImplicitLod:
+    case IR::Opcode::BindlessImageSampleExplicitLod:
+    case IR::Opcode::BindlessImageSampleDrefImplicitLod:
+    case IR::Opcode::BindlessImageSampleDrefExplicitLod:
+    case IR::Opcode::BoundImageSampleImplicitLod:
+    case IR::Opcode::BoundImageSampleExplicitLod:
+    case IR::Opcode::BoundImageSampleDrefImplicitLod:
+    case IR::Opcode::BoundImageSampleDrefExplicitLod:
+    case IR::Opcode::ImageSampleImplicitLod:
+    case IR::Opcode::ImageSampleExplicitLod:
+    case IR::Opcode::ImageSampleDrefImplicitLod:
+    case IR::Opcode::ImageSampleDrefExplicitLod: {
+        const TextureType type{inst.Flags<IR::TextureInstInfo>().type};
+        info.uses_sampled_1d |= type == TextureType::Color1D || type == TextureType::ColorArray1D ||
+                                type == TextureType::Shadow1D || type == TextureType::ShadowArray1D;
+        info.uses_sparse_residency |=
+            inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp) != nullptr;
+        break;
+    }
     default:
         break;
     }
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 965e521352..2625c0bb2b 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -226,6 +226,7 @@ std::optional<StorageBufferAddr> Track(IR::Block* block, const IR::Value& value,
     }
     // Reversed loops are more likely to find the right result
     for (size_t arg = inst->NumArgs(); arg--;) {
+        IR::Block* inst_block{block};
         if (inst->Opcode() == IR::Opcode::Phi) {
             // If we are going through a phi node, mark the current block as visited
             visited.insert(block);
@@ -235,15 +236,11 @@ std::optional<StorageBufferAddr> Track(IR::Block* block, const IR::Value& value,
                 // Already visited, skip
                 continue;
             }
-            const std::optional storage_buffer{Track(phi_block, inst->Arg(arg), bias, visited)};
-            if (storage_buffer) {
-                return *storage_buffer;
-            }
-        } else {
-            const std::optional storage_buffer{Track(block, inst->Arg(arg), bias, visited)};
-            if (storage_buffer) {
-                return *storage_buffer;
-            }
+            inst_block = phi_block;
+        }
+        const std::optional storage_buffer{Track(inst_block, inst->Arg(arg), bias, visited)};
+        if (storage_buffer) {
+            return *storage_buffer;
         }
     }
     return std::nullopt;
diff --git a/src/shader_recompiler/ir_opt/passes.h b/src/shader_recompiler/ir_opt/passes.h
index 38106308cb..3b7e7306bb 100644
--- a/src/shader_recompiler/ir_opt/passes.h
+++ b/src/shader_recompiler/ir_opt/passes.h
@@ -6,6 +6,7 @@
 
 #include <span>
 
+#include "shader_recompiler/environment.h"
 #include "shader_recompiler/frontend/ir/basic_block.h"
 #include "shader_recompiler/frontend/ir/function.h"
 #include "shader_recompiler/frontend/ir/program.h"
@@ -26,6 +27,7 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program);
 void IdentityRemovalPass(IR::Function& function);
 void LowerFp16ToFp32(IR::Program& program);
 void SsaRewritePass(std::span<IR::Block* const> post_order_blocks);
+void TexturePass(Environment& env, IR::Program& program);
 void VerificationPass(const IR::Function& function);
 
 } // namespace Shader::Optimization
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
new file mode 100644
index 0000000000..80e4ad6a97
--- /dev/null
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -0,0 +1,199 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <optional>
+
+#include <boost/container/flat_set.hpp>
+#include <boost/container/small_vector.hpp>
+
+#include "shader_recompiler/environment.h"
+#include "shader_recompiler/frontend/ir/basic_block.h"
+#include "shader_recompiler/frontend/ir/ir_emitter.h"
+#include "shader_recompiler/ir_opt/passes.h"
+#include "shader_recompiler/shader_info.h"
+
+namespace Shader::Optimization {
+namespace {
+struct ConstBufferAddr {
+    u32 index;
+    u32 offset;
+};
+
+struct TextureInst {
+    ConstBufferAddr cbuf;
+    IR::Inst* inst;
+    IR::Block* block;
+};
+
+using TextureInstVector = boost::container::small_vector<TextureInst, 24>;
+
+using VisitedBlocks = boost::container::flat_set<IR::Block*, std::less<IR::Block*>,
+                                                 boost::container::small_vector<IR::Block*, 2>>;
+
+IR::Opcode IndexedInstruction(const IR::Inst& inst) {
+    switch (inst.Opcode()) {
+    case IR::Opcode::BindlessImageSampleImplicitLod:
+    case IR::Opcode::BoundImageSampleImplicitLod:
+        return IR::Opcode::ImageSampleImplicitLod;
+    case IR::Opcode::BoundImageSampleExplicitLod:
+    case IR::Opcode::BindlessImageSampleExplicitLod:
+        return IR::Opcode::ImageSampleExplicitLod;
+    case IR::Opcode::BoundImageSampleDrefImplicitLod:
+    case IR::Opcode::BindlessImageSampleDrefImplicitLod:
+        return IR::Opcode::ImageSampleDrefImplicitLod;
+    case IR::Opcode::BoundImageSampleDrefExplicitLod:
+    case IR::Opcode::BindlessImageSampleDrefExplicitLod:
+        return IR::Opcode::ImageSampleDrefExplicitLod;
+    default:
+        return IR::Opcode::Void;
+    }
+}
+
+bool IsBindless(const IR::Inst& inst) {
+    switch (inst.Opcode()) {
+    case IR::Opcode::BindlessImageSampleImplicitLod:
+    case IR::Opcode::BindlessImageSampleExplicitLod:
+    case IR::Opcode::BindlessImageSampleDrefImplicitLod:
+    case IR::Opcode::BindlessImageSampleDrefExplicitLod:
+        return true;
+    case IR::Opcode::BoundImageSampleImplicitLod:
+    case IR::Opcode::BoundImageSampleExplicitLod:
+    case IR::Opcode::BoundImageSampleDrefImplicitLod:
+    case IR::Opcode::BoundImageSampleDrefExplicitLod:
+        return false;
+    default:
+        throw InvalidArgument("Invalid opcode {}", inst.Opcode());
+    }
+}
+
+bool IsTextureInstruction(const IR::Inst& inst) {
+    return IndexedInstruction(inst) != IR::Opcode::Void;
+}
+
+std::optional<ConstBufferAddr> Track(IR::Block* block, const IR::Value& value,
+                                     VisitedBlocks& visited) {
+    if (value.IsImmediate()) {
+        // Immediates can't be a storage buffer
+        return std::nullopt;
+    }
+    const IR::Inst* const inst{value.InstRecursive()};
+    if (inst->Opcode() == IR::Opcode::GetCbuf) {
+        const IR::Value index{inst->Arg(0)};
+        const IR::Value offset{inst->Arg(1)};
+        if (!index.IsImmediate()) {
+            // Reading a bindless texture from variable indices is valid
+            // but not supported here at the moment
+            return std::nullopt;
+        }
+        if (!offset.IsImmediate()) {
+            // TODO: Support arrays of textures
+            return std::nullopt;
+        }
+        return ConstBufferAddr{
+            .index{index.U32()},
+            .offset{offset.U32()},
+        };
+    }
+    // Reversed loops are more likely to find the right result
+    for (size_t arg = inst->NumArgs(); arg--;) {
+        IR::Block* inst_block{block};
+        if (inst->Opcode() == IR::Opcode::Phi) {
+            // If we are going through a phi node, mark the current block as visited
+            visited.insert(block);
+            // and skip already visited blocks to avoid looping forever
+            IR::Block* const phi_block{inst->PhiBlock(arg)};
+            if (visited.contains(phi_block)) {
+                // Already visited, skip
+                continue;
+            }
+            inst_block = phi_block;
+        }
+        const std::optional storage_buffer{Track(inst_block, inst->Arg(arg), visited)};
+        if (storage_buffer) {
+            return *storage_buffer;
+        }
+    }
+    return std::nullopt;
+}
+
+TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
+    ConstBufferAddr addr;
+    if (IsBindless(inst)) {
+        VisitedBlocks visited;
+        const std::optional<ConstBufferAddr> track_addr{Track(block, IR::Value{&inst}, visited)};
+        if (!track_addr) {
+            throw NotImplementedException("Failed to track bindless texture constant buffer");
+        }
+        addr = *track_addr;
+    } else {
+        addr = ConstBufferAddr{
+            .index{env.TextureBoundBuffer()},
+            .offset{inst.Arg(0).U32()},
+        };
+    }
+    return TextureInst{
+        .cbuf{addr},
+        .inst{&inst},
+        .block{block},
+    };
+}
+
+class Descriptors {
+public:
+    explicit Descriptors(TextureDescriptors& descriptors_) : descriptors{descriptors_} {}
+
+    u32 Add(const TextureDescriptor& descriptor) {
+        // TODO: Handle arrays
+        auto it{std::ranges::find_if(descriptors, [&descriptor](const TextureDescriptor& existing) {
+            return descriptor.cbuf_index == existing.cbuf_index &&
+                   descriptor.cbuf_offset == existing.cbuf_offset &&
+                   descriptor.type == existing.type;
+        })};
+        if (it != descriptors.end()) {
+            return static_cast<u32>(std::distance(descriptors.begin(), it));
+        }
+        descriptors.push_back(descriptor);
+        return static_cast<u32>(descriptors.size()) - 1;
+    }
+
+private:
+    TextureDescriptors& descriptors;
+};
+} // Anonymous namespace
+
+void TexturePass(Environment& env, IR::Program& program) {
+    TextureInstVector to_replace;
+    for (IR::Function& function : program.functions) {
+        for (IR::Block* const block : function.post_order_blocks) {
+            for (IR::Inst& inst : block->Instructions()) {
+                if (!IsTextureInstruction(inst)) {
+                    continue;
+                }
+                to_replace.push_back(MakeInst(env, block, inst));
+            }
+        }
+    }
+    // Sort instructions to visit textures by constant buffer index, then by offset
+    std::ranges::sort(to_replace, [](const auto& lhs, const auto& rhs) {
+        return lhs.cbuf.offset < rhs.cbuf.offset;
+    });
+    std::stable_sort(to_replace.begin(), to_replace.end(), [](const auto& lhs, const auto& rhs) {
+        return lhs.cbuf.index < rhs.cbuf.index;
+    });
+    Descriptors descriptors{program.info.texture_descriptors};
+    for (TextureInst& texture_inst : to_replace) {
+        // TODO: Handle arrays
+        IR::Inst* const inst{texture_inst.inst};
+        const u32 index{descriptors.Add(TextureDescriptor{
+            .type{inst->Flags<IR::TextureInstInfo>().type},
+            .cbuf_index{texture_inst.cbuf.index},
+            .cbuf_offset{texture_inst.cbuf.offset},
+            .count{1},
+        })};
+        inst->ReplaceOpcode(IndexedInstruction(*inst));
+        inst->SetArg(0, IR::Value{index});
+    }
+}
+
+} // namespace Shader::Optimization
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index 8766bf13e9..103a2f0b43 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -8,25 +8,51 @@
 
 #include "common/common_types.h"
 
+#include <boost/container/small_vector.hpp>
 #include <boost/container/static_vector.hpp>
 
 namespace Shader {
 
+enum class TextureType : u32 {
+    Color1D,
+    ColorArray1D,
+    Color2D,
+    ColorArray2D,
+    Color3D,
+    ColorCube,
+    ColorArrayCube,
+    Shadow1D,
+    ShadowArray1D,
+    Shadow2D,
+    ShadowArray2D,
+    Shadow3D,
+    ShadowCube,
+    ShadowArrayCube,
+};
+
+struct TextureDescriptor {
+    TextureType type;
+    u32 cbuf_index;
+    u32 cbuf_offset;
+    u32 count;
+};
+using TextureDescriptors = boost::container::small_vector<TextureDescriptor, 12>;
+
+struct ConstantBufferDescriptor {
+    u32 index;
+    u32 count;
+};
+
+struct StorageBufferDescriptor {
+    u32 cbuf_index;
+    u32 cbuf_offset;
+    u32 count;
+};
+
 struct Info {
     static constexpr size_t MAX_CBUFS{18};
     static constexpr size_t MAX_SSBOS{16};
 
-    struct ConstantBufferDescriptor {
-        u32 index;
-        u32 count;
-    };
-
-    struct StorageBufferDescriptor {
-        u32 cbuf_index;
-        u32 cbuf_offset;
-        u32 count;
-    };
-
     bool uses_workgroup_id{};
     bool uses_local_invocation_id{};
     bool uses_fp16{};
@@ -35,12 +61,16 @@ struct Info {
     bool uses_fp16_denorms_preserve{};
     bool uses_fp32_denorms_flush{};
     bool uses_fp32_denorms_preserve{};
+    bool uses_image_1d{};
+    bool uses_sampled_1d{};
+    bool uses_sparse_residency{};
 
     u32 constant_buffer_mask{};
 
     boost::container::static_vector<ConstantBufferDescriptor, MAX_CBUFS>
         constant_buffer_descriptors;
     boost::container::static_vector<StorageBufferDescriptor, MAX_SSBOS> storage_buffers_descriptors;
+    TextureDescriptors texture_descriptors;
 };
 
 } // namespace Shader
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index a658a3276b..ef8bef6ffc 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -40,6 +40,16 @@ vk::DescriptorSetLayout CreateDescriptorSetLayout(const Device& device, const Sh
         });
         ++binding;
     }
+    for (const auto& desc : info.texture_descriptors) {
+        bindings.push_back({
+            .binding = binding,
+            .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+            .descriptorCount = 1,
+            .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
+            .pImmutableSamplers = nullptr,
+        });
+        ++binding;
+    }
     return device.GetLogical().CreateDescriptorSetLayout({
         .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
         .pNext = nullptr,
@@ -79,6 +89,18 @@ vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
         ++binding;
         offset += sizeof(DescriptorUpdateEntry);
     }
+    for (const auto& desc : info.texture_descriptors) {
+        entries.push_back({
+            .dstBinding = binding,
+            .dstArrayElement = 0,
+            .descriptorCount = 1,
+            .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+            .offset = offset,
+            .stride = sizeof(DescriptorUpdateEntry),
+        });
+        ++binding;
+        offset += sizeof(DescriptorUpdateEntry);
+    }
     return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
         .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,
         .pNext = nullptr,
@@ -92,6 +114,44 @@ vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
         .set = 0,
     });
 }
+
+struct TextureHandle {
+    explicit TextureHandle(u32 data, bool via_header_index) {
+        const Tegra::Texture::TextureHandle handle{data};
+        image = handle.tic_id;
+        sampler = via_header_index ? image : handle.tsc_id.Value();
+    }
+
+    u32 image;
+    u32 sampler;
+};
+
+VideoCommon::ImageViewType CastType(Shader::TextureType type) {
+    switch (type) {
+    case Shader::TextureType::Color1D:
+    case Shader::TextureType::Shadow1D:
+        return VideoCommon::ImageViewType::e1D;
+    case Shader::TextureType::ColorArray1D:
+    case Shader::TextureType::ShadowArray1D:
+        return VideoCommon::ImageViewType::e1DArray;
+    case Shader::TextureType::Color2D:
+    case Shader::TextureType::Shadow2D:
+        return VideoCommon::ImageViewType::e2D;
+    case Shader::TextureType::ColorArray2D:
+    case Shader::TextureType::ShadowArray2D:
+        return VideoCommon::ImageViewType::e2DArray;
+    case Shader::TextureType::Color3D:
+    case Shader::TextureType::Shadow3D:
+        return VideoCommon::ImageViewType::e3D;
+    case Shader::TextureType::ColorCube:
+    case Shader::TextureType::ShadowCube:
+        return VideoCommon::ImageViewType::Cube;
+    case Shader::TextureType::ColorArrayCube:
+    case Shader::TextureType::ShadowArrayCube:
+        return VideoCommon::ImageViewType::CubeArray;
+    }
+    UNREACHABLE_MSG("Invalid texture type {}", type);
+}
 } // Anonymous namespace
 
 ComputePipeline::ComputePipeline(const Device& device, VKDescriptorPool& descriptor_pool,
@@ -143,6 +203,47 @@ void ComputePipeline::ConfigureBufferCache(BufferCache& buffer_cache) {
     buffer_cache.BindHostComputeBuffers();
 }
 
+void ComputePipeline::ConfigureTextureCache(Tegra::Engines::KeplerCompute& kepler_compute,
+                                            Tegra::MemoryManager& gpu_memory,
+                                            TextureCache& texture_cache) {
+    texture_cache.SynchronizeComputeDescriptors();
+
+    static constexpr size_t max_elements = 64;
+    std::array<ImageId, max_elements> image_view_ids;
+    boost::container::static_vector<u32, max_elements> image_view_indices;
+    boost::container::static_vector<VkSampler, max_elements> sampler_handles;
+
+    const auto& launch_desc{kepler_compute.launch_description};
+    const auto& cbufs{launch_desc.const_buffer_config};
+    const bool via_header_index{launch_desc.linked_tsc};
+    for (const auto& desc : info.texture_descriptors) {
+        const u32 cbuf_index{desc.cbuf_index};
+        const u32 cbuf_offset{desc.cbuf_offset};
+        ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
+
+        const GPUVAddr addr{cbufs[cbuf_index].Address() + cbuf_offset};
+        const u32 raw_handle{gpu_memory.Read<u32>(addr)};
+
+        const TextureHandle handle(raw_handle, via_header_index);
+        image_view_indices.push_back(handle.image);
+
+        Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
+        sampler_handles.push_back(sampler->Handle());
+    }
+
+    const std::span indices_span(image_view_indices.data(), image_view_indices.size());
+    texture_cache.FillComputeImageViews(indices_span, image_view_ids);
+
+    size_t index{};
+    for (const auto& desc : info.texture_descriptors) {
+        const VkSampler vk_sampler{sampler_handles[index]};
+        ImageView& image_view{texture_cache.GetImageView(image_view_ids[index])};
+        const VkImageView vk_image_view{image_view.Handle(CastType(desc.type))};
+        update_descriptor_queue->AddSampledImage(vk_image_view, vk_sampler);
+        ++index;
+    }
+}
+
 VkDescriptorSet ComputePipeline::UpdateDescriptorSet() {
     const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
     update_descriptor_queue->Send(*descriptor_update_template, descriptor_set);
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index dc045d5245..08d73a2a4b 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -6,9 +6,11 @@
 
 #include "common/common_types.h"
 #include "shader_recompiler/shader_info.h"
+#include "video_core/memory_manager.h"
 #include "video_core/renderer_vulkan/vk_buffer_cache.h"
 #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
 #include "video_core/renderer_vulkan/vk_pipeline.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
 #include "video_core/renderer_vulkan/vk_update_descriptor.h"
 #include "video_core/vulkan_common/vulkan_wrapper.h"
 
@@ -30,6 +32,8 @@ public:
     ComputePipeline(const ComputePipeline&) = delete;
 
     void ConfigureBufferCache(BufferCache& buffer_cache);
+    void ConfigureTextureCache(Tegra::Engines::KeplerCompute& kepler_compute,
+                               Tegra::MemoryManager& gpu_memory, TextureCache& texture_cache);
 
     [[nodiscard]] VkDescriptorSet UpdateDescriptorSet();
 
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 2497c2385f..bcb7dd2eb4 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -76,6 +76,10 @@ public:
         return gpu_memory.Read<u64>(program_base + address);
     }
 
+    u32 TextureBoundBuffer() override {
+        return kepler_compute.regs.tex_cb_index;
+    }
+
     std::array<u32, 3> WorkgroupSize() override {
         const auto& qmd{kepler_compute.launch_description};
         return {qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z};
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 1b662f9f3a..c94419d29c 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -241,9 +241,10 @@ void RasterizerVulkan::DispatchCompute() {
     if (!pipeline) {
         return;
     }
-    std::scoped_lock lock{buffer_cache.mutex};
+    std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
     update_descriptor_queue.Acquire();
     pipeline->ConfigureBufferCache(buffer_cache);
+    pipeline->ConfigureTextureCache(kepler_compute, gpu_memory, texture_cache);
     const VkDescriptorSet descriptor_set{pipeline->UpdateDescriptorSet()};
 
     const auto& qmd{kepler_compute.launch_description};
-- 
cgit v1.2.3-70-g09d2


From c826220733678198e9aef328a9808b062b06c5df Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Wed, 31 Mar 2021 01:06:17 -0300
Subject: shader: Unroll "using enum" for opcode declarations

---
 src/shader_recompiler/frontend/ir/opcodes.cpp | 28 ++++++++++++++++++++++++++-
 1 file changed, 27 insertions(+), 1 deletion(-)

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 8492a13d54..1cb9db6c9c 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -17,7 +17,33 @@ struct OpcodeMeta {
     std::array<Type, 5> arg_types;
 };
 
-using enum Type;
+// using enum Type;
+constexpr Type Void{Type::Void};
+constexpr Type Opaque{Type::Opaque};
+constexpr Type Label{Type::Label};
+constexpr Type Reg{Type::Reg};
+constexpr Type Pred{Type::Pred};
+constexpr Type Attribute{Type::Attribute};
+constexpr Type U1{Type::U1};
+constexpr Type U8{Type::U8};
+constexpr Type U16{Type::U16};
+constexpr Type U32{Type::U32};
+constexpr Type U64{Type::U64};
+constexpr Type F16{Type::F16};
+constexpr Type F32{Type::F32};
+constexpr Type F64{Type::F64};
+constexpr Type U32x2{Type::U32x2};
+constexpr Type U32x3{Type::U32x3};
+constexpr Type U32x4{Type::U32x4};
+constexpr Type F16x2{Type::F16x2};
+constexpr Type F16x3{Type::F16x3};
+constexpr Type F16x4{Type::F16x4};
+constexpr Type F32x2{Type::F32x2};
+constexpr Type F32x3{Type::F32x3};
+constexpr Type F32x4{Type::F32x4};
+constexpr Type F64x2{Type::F64x2};
+constexpr Type F64x3{Type::F64x3};
+constexpr Type F64x4{Type::F64x4};
 
 constexpr std::array META_TABLE{
 #define OPCODE(name_token, type_token, ...)                                                        \
-- 
cgit v1.2.3-70-g09d2


From 0bb85f6a753c769266c95c4ba146b25b9eaaaffd Mon Sep 17 00:00:00 2001
From: lat9nq <22451773+lat9nq@users.noreply.github.com>
Date: Mon, 5 Apr 2021 22:25:22 -0400
Subject: shader_recompiler,video_core: Cleanup some GCC and Clang errors

Mostly fixing unused *, implicit conversion, braced scalar init,
fpermissive, and some others.

Some Clang errors likely remain in video_core, and std::ranges is still
a pertinent issue in shader_recompiler

shader_recompiler: cmake: Force bracket depth to 1024 on Clang
Increases the maximum fold expression depth

thread_worker: Include condition_variable

Don't use list initializers in control flow

Co-authored-by: ReinUsesLisp <reinuseslisp@airmail.cc>
---
 src/common/thread_worker.h                         |   1 +
 src/shader_recompiler/CMakeLists.txt               |   2 +
 .../backend/spirv/emit_context.cpp                 |   4 +-
 src/shader_recompiler/backend/spirv/emit_spirv.cpp |  19 +--
 .../backend/spirv/emit_spirv_image.cpp             |  11 +-
 .../backend/spirv/emit_spirv_warp.cpp              |   2 +-
 src/shader_recompiler/file_environment.h           |   2 +-
 src/shader_recompiler/frontend/ir/attribute.cpp    |   4 +-
 src/shader_recompiler/frontend/ir/basic_block.cpp  |   2 +-
 src/shader_recompiler/frontend/ir/condition.cpp    |   6 +-
 src/shader_recompiler/frontend/ir/condition.h      |   4 +-
 src/shader_recompiler/frontend/ir/ir_emitter.cpp   |   4 +-
 .../frontend/ir/microinstruction.cpp               |  16 +--
 .../frontend/ir/microinstruction.h                 |   4 +-
 src/shader_recompiler/frontend/ir/opcodes.cpp      |   2 +-
 src/shader_recompiler/frontend/ir/program.cpp      |   2 -
 src/shader_recompiler/frontend/ir/value.cpp        |   4 +-
 src/shader_recompiler/frontend/ir/value.h          |   2 +-
 .../frontend/maxwell/control_flow.cpp              | 140 +++++++++------------
 src/shader_recompiler/frontend/maxwell/decode.cpp  |  10 +-
 .../maxwell/indirect_branch_table_track.cpp        |  10 +-
 .../frontend/maxwell/structured_control_flow.cpp   |   3 +-
 .../frontend/maxwell/translate/impl/double_add.cpp |   6 +-
 .../translate/impl/double_fused_multiply_add.cpp   |   6 +-
 .../maxwell/translate/impl/double_multiply.cpp     |   6 +-
 .../maxwell/translate/impl/floating_point_add.cpp  |   6 +-
 .../translate/impl/floating_point_compare.cpp      |   3 +-
 .../impl/floating_point_compare_and_set.cpp        |   6 +-
 .../floating_point_conversion_floating_point.cpp   |   6 +-
 .../impl/floating_point_conversion_integer.cpp     |  11 +-
 .../impl/floating_point_fused_multiply_add.cpp     |   6 +-
 .../translate/impl/floating_point_min_max.cpp      |   6 +-
 .../translate/impl/floating_point_multiply.cpp     |   8 +-
 .../impl/floating_point_set_predicate.cpp          |   6 +-
 .../translate/impl/floating_point_swizzled_add.cpp |   6 +-
 .../translate/impl/half_floating_point_add.cpp     |  11 +-
 .../half_floating_point_fused_multiply_add.cpp     |  11 +-
 .../impl/half_floating_point_multiply.cpp          |  11 +-
 .../translate/impl/half_floating_point_set.cpp     |  11 +-
 .../impl/half_floating_point_set_predicate.cpp     |  12 +-
 .../frontend/maxwell/translate/impl/impl.cpp       |   8 +-
 .../maxwell/translate/impl/integer_add.cpp         |   1 -
 .../impl/integer_floating_point_conversion.cpp     |   4 +-
 .../maxwell/translate/impl/load_constant.cpp       |   2 +-
 .../translate/impl/load_store_local_shared.cpp     |   9 +-
 .../maxwell/translate/impl/load_store_memory.cpp   |   4 +-
 .../maxwell/translate/impl/texture_fetch.cpp       |   2 +-
 .../translate/impl/texture_fetch_swizzled.cpp      |   2 +-
 .../translate/impl/texture_gather_swizzled.cpp     |   2 +-
 .../translate/impl/texture_load_swizzled.cpp       |   2 +-
 .../maxwell/translate/impl/texture_query.cpp       |   2 +-
 .../maxwell/translate/impl/video_set_predicate.cpp |   1 -
 .../ir_opt/collect_shader_info_pass.cpp            |  20 +--
 .../ir_opt/constant_propagation_pass.cpp           |  49 ++++----
 .../global_memory_to_storage_buffer_pass.cpp       |  42 +++----
 .../ir_opt/identity_removal_pass.cpp               |   3 +-
 .../ir_opt/lower_fp16_to_fp32.cpp                  |   2 +-
 src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp  |   4 +-
 src/shader_recompiler/ir_opt/texture_pass.cpp      |  32 ++---
 src/shader_recompiler/ir_opt/verification_pass.cpp |   4 +-
 src/tests/common/unique_function.cpp               |   2 +
 src/video_core/CMakeLists.txt                      |   2 +-
 .../renderer_vulkan/vk_graphics_pipeline.cpp       |  21 ++--
 .../renderer_vulkan/vk_pipeline_cache.cpp          |   5 +-
 .../renderer_vulkan/vk_render_pass_cache.cpp       |   2 -
 .../renderer_vulkan/vk_texture_cache.cpp           |   2 +-
 66 files changed, 308 insertions(+), 313 deletions(-)

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/common/thread_worker.h b/src/common/thread_worker.h
index 0a975a869d..cd0017726f 100644
--- a/src/common/thread_worker.h
+++ b/src/common/thread_worker.h
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <atomic>
+#include <condition_variable>
 #include <functional>
 #include <mutex>
 #include <stop_token>
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index 22639fe132..551bf1c582 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -196,6 +196,8 @@ else()
         $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
         $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
         -Werror=unused-variable
+
+        $<$<CXX_COMPILER_ID:Clang>:-fbracket-depth=1024>
     )
 endif()
 
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index b738e00cc2..0c114402b4 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -4,6 +4,7 @@
 
 #include <algorithm>
 #include <array>
+#include <climits>
 #include <string_view>
 
 #include <fmt/format.h>
@@ -116,7 +117,8 @@ void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_vie
         const std::string_view def_name_view(
             def_name.data(),
             fmt::format_to_n(def_name.data(), def_name.size(), "{}x{}", name, i + 1).size);
-        defs[i] = sirit_ctx.Name(sirit_ctx.TypeVector(base_type, i + 1), def_name_view);
+        defs[static_cast<size_t>(i)] =
+            sirit_ctx.Name(sirit_ctx.TypeVector(base_type, i + 1), def_name_view);
     }
 }
 
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index 32512a0e5f..355cf0ca8a 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -16,7 +16,7 @@
 namespace Shader::Backend::SPIRV {
 namespace {
 template <class Func>
-struct FuncTraits : FuncTraits<Func> {};
+struct FuncTraits {};
 
 template <class ReturnType_, class... Args>
 struct FuncTraits<ReturnType_ (*)(Args...)> {
@@ -64,17 +64,20 @@ ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
 template <auto func, bool is_first_arg_inst, size_t... I>
 void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
     using Traits = FuncTraits<decltype(func)>;
-    if constexpr (std::is_same_v<Traits::ReturnType, Id>) {
+    if constexpr (std::is_same_v<typename Traits::ReturnType, Id>) {
         if constexpr (is_first_arg_inst) {
-            SetDefinition<func>(ctx, inst, inst, Arg<Traits::ArgType<I + 2>>(ctx, inst->Arg(I))...);
+            SetDefinition<func>(
+                ctx, inst, inst,
+                Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
         } else {
-            SetDefinition<func>(ctx, inst, Arg<Traits::ArgType<I + 1>>(ctx, inst->Arg(I))...);
+            SetDefinition<func>(
+                ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
         }
     } else {
         if constexpr (is_first_arg_inst) {
-            func(ctx, inst, Arg<Traits::ArgType<I + 2>>(ctx, inst->Arg(I))...);
+            func(ctx, inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
         } else {
-            func(ctx, Arg<Traits::ArgType<I + 1>>(ctx, inst->Arg(I))...);
+            func(ctx, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
         }
     }
 }
@@ -94,14 +97,14 @@ void Invoke(EmitContext& ctx, IR::Inst* inst) {
 }
 
 void EmitInst(EmitContext& ctx, IR::Inst* inst) {
-    switch (inst->Opcode()) {
+    switch (inst->GetOpcode()) {
 #define OPCODE(name, result_type, ...)                                                             \
     case IR::Opcode::name:                                                                         \
         return Invoke<&Emit##name>(ctx, inst);
 #include "shader_recompiler/frontend/ir/opcodes.inc"
 #undef OPCODE
     }
-    throw LogicError("Invalid opcode {}", inst->Opcode());
+    throw LogicError("Invalid opcode {}", inst->GetOpcode());
 }
 
 Id TypeId(const EmitContext& ctx, IR::Type type) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index f0f8db8c37..815ca62992 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -43,11 +43,13 @@ public:
             // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING");
             return;
         }
-        const IR::Opcode opcode{values[0]->Opcode()};
-        if (opcode != values[1]->Opcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
+        const IR::Opcode opcode{values[0]->GetOpcode()};
+        if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
             throw LogicError("Invalid PTP arguments");
         }
-        auto read{[&](int a, int b) { return ctx.Constant(ctx.U32[1], values[a]->Arg(b).U32()); }};
+        auto read{[&](unsigned int a, unsigned int b) {
+            return ctx.Constant(ctx.U32[1], values[a]->Arg(b).U32());
+        }};
 
         const Id offsets{
             ctx.ConstantComposite(ctx.TypeArray(ctx.U32[2], ctx.Constant(ctx.U32[1], 4)),
@@ -297,13 +299,14 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
 
 Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
                        const IR::Value& offset, const IR::Value& offset2, Id dref) {
-    const auto info{inst->Flags<IR::TextureInstInfo>()};
     const ImageOperands operands(ctx, offset, offset2);
     return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst,
                 ctx.F32[4], Texture(ctx, index), coords, dref, operands.Mask(), operands.Span());
 }
 
+#ifdef _WIN32
 #pragma optimize("", off)
+#endif
 
 Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
                   Id lod, Id ms) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
index c57bd291db..12a03ed6ed 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
@@ -7,7 +7,7 @@
 namespace Shader::Backend::SPIRV {
 namespace {
 Id WarpExtract(EmitContext& ctx, Id value) {
-    const Id shift{ctx.Constant(ctx.U32[1], 5)};
+    [[maybe_unused]] const Id shift{ctx.Constant(ctx.U32[1], 5)};
     const Id local_index{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
     return ctx.OpVectorExtractDynamic(ctx.U32[1], value, local_index);
 }
diff --git a/src/shader_recompiler/file_environment.h b/src/shader_recompiler/file_environment.h
index 17640a6229..71601f8fd6 100644
--- a/src/shader_recompiler/file_environment.h
+++ b/src/shader_recompiler/file_environment.h
@@ -7,7 +7,7 @@
 
 namespace Shader {
 
-class FileEnvironment final : public Environment {
+class FileEnvironment : public Environment {
 public:
     explicit FileEnvironment(const char* path);
     ~FileEnvironment() override;
diff --git a/src/shader_recompiler/frontend/ir/attribute.cpp b/src/shader_recompiler/frontend/ir/attribute.cpp
index 4811242ea0..7993e5c436 100644
--- a/src/shader_recompiler/frontend/ir/attribute.cpp
+++ b/src/shader_recompiler/frontend/ir/attribute.cpp
@@ -17,7 +17,7 @@ u32 GenericAttributeIndex(Attribute attribute) {
     if (!IsGeneric(attribute)) {
         throw InvalidArgument("Attribute is not generic {}", attribute);
     }
-    return (static_cast<int>(attribute) - static_cast<int>(Attribute::Generic0X)) / 4;
+    return (static_cast<u32>(attribute) - static_cast<u32>(Attribute::Generic0X)) / 4u;
 }
 
 std::string NameOf(Attribute attribute) {
@@ -444,4 +444,4 @@ std::string NameOf(Attribute attribute) {
     return fmt::format("<reserved attribute {}>", static_cast<int>(attribute));
 }
 
-} // namespace Shader::IR
\ No newline at end of file
+} // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/basic_block.cpp b/src/shader_recompiler/frontend/ir/basic_block.cpp
index ec029dfd6e..e1f0191f40 100644
--- a/src/shader_recompiler/frontend/ir/basic_block.cpp
+++ b/src/shader_recompiler/frontend/ir/basic_block.cpp
@@ -155,7 +155,7 @@ std::string DumpBlock(const Block& block, const std::map<const Block*, size_t>&
     ret += fmt::format(": begin={:04x} end={:04x}\n", block.LocationBegin(), block.LocationEnd());
 
     for (const Inst& inst : block) {
-        const Opcode op{inst.Opcode()};
+        const Opcode op{inst.GetOpcode()};
         ret += fmt::format("[{:016x}] ", reinterpret_cast<u64>(&inst));
         if (TypeOf(op) != Type::Void) {
             ret += fmt::format("%{:<5} = {}", InstIndex(inst_to_index, inst_index, &inst), op);
diff --git a/src/shader_recompiler/frontend/ir/condition.cpp b/src/shader_recompiler/frontend/ir/condition.cpp
index ec1659e2bc..fc18ea2a2f 100644
--- a/src/shader_recompiler/frontend/ir/condition.cpp
+++ b/src/shader_recompiler/frontend/ir/condition.cpp
@@ -12,10 +12,10 @@ namespace Shader::IR {
 
 std::string NameOf(Condition condition) {
     std::string ret;
-    if (condition.FlowTest() != FlowTest::T) {
-        ret = fmt::to_string(condition.FlowTest());
+    if (condition.GetFlowTest() != FlowTest::T) {
+        ret = fmt::to_string(condition.GetFlowTest());
     }
-    const auto [pred, negated]{condition.Pred()};
+    const auto [pred, negated]{condition.GetPred()};
     if (!ret.empty()) {
         ret += '&';
     }
diff --git a/src/shader_recompiler/frontend/ir/condition.h b/src/shader_recompiler/frontend/ir/condition.h
index 51c2f15cf5..aa8597c608 100644
--- a/src/shader_recompiler/frontend/ir/condition.h
+++ b/src/shader_recompiler/frontend/ir/condition.h
@@ -30,11 +30,11 @@ public:
 
     auto operator<=>(const Condition&) const noexcept = default;
 
-    [[nodiscard]] IR::FlowTest FlowTest() const noexcept {
+    [[nodiscard]] IR::FlowTest GetFlowTest() const noexcept {
         return static_cast<IR::FlowTest>(flow_test);
     }
 
-    [[nodiscard]] std::pair<IR::Pred, bool> Pred() const noexcept {
+    [[nodiscard]] std::pair<IR::Pred, bool> GetPred() const noexcept {
         return {static_cast<IR::Pred>(pred), pred_negated != 0};
     }
 
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
index 13eb2de4c4..a2104bdb31 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
@@ -290,8 +290,8 @@ static U1 GetFlowTest(IREmitter& ir, FlowTest flow_test) {
 }
 
 U1 IREmitter::Condition(IR::Condition cond) {
-    const FlowTest flow_test{cond.FlowTest()};
-    const auto [pred, is_negated]{cond.Pred()};
+    const FlowTest flow_test{cond.GetFlowTest()};
+    const auto [pred, is_negated]{cond.GetPred()};
     return LogicalAnd(GetPred(pred, is_negated), GetFlowTest(*this, flow_test));
 }
 
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 481202d94b..ceb44e6042 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -12,7 +12,7 @@
 namespace Shader::IR {
 namespace {
 void CheckPseudoInstruction(IR::Inst* inst, IR::Opcode opcode) {
-    if (inst && inst->Opcode() != opcode) {
+    if (inst && inst->GetOpcode() != opcode) {
         throw LogicError("Invalid pseudo-instruction");
     }
 }
@@ -25,11 +25,17 @@ void SetPseudoInstruction(IR::Inst*& dest_inst, IR::Inst* pseudo_inst) {
 }
 
 void RemovePseudoInstruction(IR::Inst*& inst, IR::Opcode expected_opcode) {
-    if (inst->Opcode() != expected_opcode) {
+    if (inst->GetOpcode() != expected_opcode) {
         throw LogicError("Undoing use of invalid pseudo-op");
     }
     inst = nullptr;
 }
+
+void AllocAssociatedInsts(std::unique_ptr<AssociatedInsts>& associated_insts) {
+    if (!associated_insts) {
+        associated_insts = std::make_unique<AssociatedInsts>();
+    }
+}
 } // Anonymous namespace
 
 Inst::Inst(IR::Opcode op_, u32 flags_) noexcept : op{op_}, flags{flags_} {
@@ -249,12 +255,6 @@ void Inst::ReplaceOpcode(IR::Opcode opcode) {
     op = opcode;
 }
 
-void AllocAssociatedInsts(std::unique_ptr<AssociatedInsts>& associated_insts) {
-    if (!associated_insts) {
-        associated_insts = std::make_unique<AssociatedInsts>();
-    }
-}
-
 void Inst::Use(const Value& value) {
     Inst* const inst{value.Inst()};
     ++inst->use_count;
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.h b/src/shader_recompiler/frontend/ir/microinstruction.h
index 6658dc674e..97dc91d855 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.h
+++ b/src/shader_recompiler/frontend/ir/microinstruction.h
@@ -46,7 +46,7 @@ public:
     }
 
     /// Get the opcode this microinstruction represents.
-    [[nodiscard]] IR::Opcode Opcode() const noexcept {
+    [[nodiscard]] IR::Opcode GetOpcode() const noexcept {
         return op;
     }
 
@@ -95,7 +95,7 @@ public:
     requires(sizeof(FlagsType) <= sizeof(u32) && std::is_trivially_copyable_v<FlagsType>)
         [[nodiscard]] FlagsType Flags() const noexcept {
         FlagsType ret;
-        std::memcpy(&ret, &flags, sizeof(ret));
+        std::memcpy(reinterpret_cast<char*>(&ret), &flags, sizeof(ret));
         return ret;
     }
 
diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 1cb9db6c9c..002dbf94e9 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -49,7 +49,7 @@ constexpr std::array META_TABLE{
 #define OPCODE(name_token, type_token, ...)                                                        \
     OpcodeMeta{                                                                                    \
         .name{#name_token},                                                                        \
-        .type{type_token},                                                                         \
+        .type = type_token,                                                                         \
         .arg_types{__VA_ARGS__},                                                                   \
     },
 #include "opcodes.inc"
diff --git a/src/shader_recompiler/frontend/ir/program.cpp b/src/shader_recompiler/frontend/ir/program.cpp
index 5f51aeb5f3..89a17fb1b4 100644
--- a/src/shader_recompiler/frontend/ir/program.cpp
+++ b/src/shader_recompiler/frontend/ir/program.cpp
@@ -2,8 +2,6 @@
 // Licensed under GPLv2 or any later version
 // Refer to the license.txt file included.
 
-#pragma once
-
 #include <map>
 #include <string>
 
diff --git a/src/shader_recompiler/frontend/ir/value.cpp b/src/shader_recompiler/frontend/ir/value.cpp
index 837c1b487f..1e7ffb86d5 100644
--- a/src/shader_recompiler/frontend/ir/value.cpp
+++ b/src/shader_recompiler/frontend/ir/value.cpp
@@ -33,11 +33,11 @@ Value::Value(u64 value) noexcept : type{Type::U64}, imm_u64{value} {}
 Value::Value(f64 value) noexcept : type{Type::F64}, imm_f64{value} {}
 
 bool Value::IsIdentity() const noexcept {
-    return type == Type::Opaque && inst->Opcode() == Opcode::Identity;
+    return type == Type::Opaque && inst->GetOpcode() == Opcode::Identity;
 }
 
 bool Value::IsPhi() const noexcept {
-    return type == Type::Opaque && inst->Opcode() == Opcode::Phi;
+    return type == Type::Opaque && inst->GetOpcode() == Opcode::Phi;
 }
 
 bool Value::IsEmpty() const noexcept {
diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h
index b27601e704..a0962863d8 100644
--- a/src/shader_recompiler/frontend/ir/value.h
+++ b/src/shader_recompiler/frontend/ir/value.h
@@ -94,7 +94,7 @@ public:
         }
     }
 
-    explicit TypedValue(IR::Inst* inst) : TypedValue(Value(inst)) {}
+    explicit TypedValue(IR::Inst* inst_) : TypedValue(Value(inst_)) {}
 };
 
 using U1 = TypedValue<Type::U1>;
diff --git a/src/shader_recompiler/frontend/maxwell/control_flow.cpp b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
index 847bb19864..cb8ec7eaa3 100644
--- a/src/shader_recompiler/frontend/maxwell/control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
@@ -34,41 +34,37 @@ struct Compare {
 };
 
 u32 BranchOffset(Location pc, Instruction inst) {
-    return pc.Offset() + inst.branch.Offset() + 8;
+    return pc.Offset() + static_cast<u32>(inst.branch.Offset()) + 8u;
 }
 
 void Split(Block* old_block, Block* new_block, Location pc) {
     if (pc <= old_block->begin || pc >= old_block->end) {
         throw InvalidArgument("Invalid address to split={}", pc);
     }
-    *new_block = Block{
-        .begin{pc},
-        .end{old_block->end},
-        .end_class{old_block->end_class},
-        .cond{old_block->cond},
-        .stack{old_block->stack},
-        .branch_true{old_block->branch_true},
-        .branch_false{old_block->branch_false},
-        .function_call{old_block->function_call},
-        .return_block{old_block->return_block},
-        .branch_reg{old_block->branch_reg},
-        .branch_offset{old_block->branch_offset},
-        .indirect_branches{std::move(old_block->indirect_branches)},
-    };
-    *old_block = Block{
-        .begin{old_block->begin},
-        .end{pc},
-        .end_class{EndClass::Branch},
-        .cond{true},
-        .stack{std::move(old_block->stack)},
-        .branch_true{new_block},
-        .branch_false{nullptr},
-        .function_call{},
-        .return_block{},
-        .branch_reg{},
-        .branch_offset{},
-        .indirect_branches{},
-    };
+    *new_block = Block{};
+    new_block->begin = pc;
+    new_block->end = old_block->end;
+    new_block->end_class = old_block->end_class,
+    new_block->cond = old_block->cond;
+    new_block->stack = old_block->stack;
+    new_block->branch_true = old_block->branch_true;
+    new_block->branch_false = old_block->branch_false;
+    new_block->function_call = old_block->function_call;
+    new_block->return_block = old_block->return_block;
+    new_block->branch_reg = old_block->branch_reg;
+    new_block->branch_offset = old_block->branch_offset;
+    new_block->indirect_branches = std::move(old_block->indirect_branches);
+
+    const Location old_begin{old_block->begin};
+    Stack old_stack{std::move(old_block->stack)};
+    *old_block = Block{};
+    old_block->begin = old_begin;
+    old_block->end = pc;
+    old_block->end_class = EndClass::Branch;
+    old_block->cond = IR::Condition(true);
+    old_block->stack = old_stack;
+    old_block->branch_true = new_block;
+    old_block->branch_false = nullptr;
 }
 
 Token OpcodeToken(Opcode opcode) {
@@ -141,7 +137,7 @@ std::string NameOf(const Block& block) {
 
 void Stack::Push(Token token, Location target) {
     entries.push_back({
-        .token{token},
+        .token = token,
         .target{target},
     });
 }
@@ -177,24 +173,17 @@ bool Block::Contains(Location pc) const noexcept {
 }
 
 Function::Function(ObjectPool<Block>& block_pool, Location start_address)
-    : entrypoint{start_address}, labels{{
-                                     .address{start_address},
-                                     .block{block_pool.Create(Block{
-                                         .begin{start_address},
-                                         .end{start_address},
-                                         .end_class{EndClass::Branch},
-                                         .cond{true},
-                                         .stack{},
-                                         .branch_true{nullptr},
-                                         .branch_false{nullptr},
-                                         .function_call{},
-                                         .return_block{},
-                                         .branch_reg{},
-                                         .branch_offset{},
-                                         .indirect_branches{},
-                                     })},
-                                     .stack{},
-                                 }} {}
+    : entrypoint{start_address} {
+    Label& label{labels.emplace_back()};
+    label.address = start_address;
+    label.block = block_pool.Create(Block{});
+    label.block->begin = start_address;
+    label.block->end = start_address;
+    label.block->end_class = EndClass::Branch;
+    label.block->cond = IR::Condition(true);
+    label.block->branch_true = nullptr;
+    label.block->branch_false = nullptr;
+}
 
 CFG::CFG(Environment& env_, ObjectPool<Block>& block_pool_, Location start_address)
     : env{env_}, block_pool{block_pool_}, program_start{start_address} {
@@ -327,7 +316,8 @@ CFG::AnalysisState CFG::AnalyzeInst(Block* block, FunctionId function_id, Locati
         // Insert the function into the list if it doesn't exist
         const auto it{std::ranges::find(functions, cal_pc, &Function::entrypoint)};
         const bool exists{it != functions.end()};
-        const FunctionId call_id{exists ? std::distance(functions.begin(), it) : functions.size()};
+        const FunctionId call_id{exists ? static_cast<size_t>(std::distance(functions.begin(), it))
+                                        : functions.size()};
         if (!exists) {
             functions.emplace_back(block_pool, cal_pc);
         }
@@ -362,20 +352,14 @@ void CFG::AnalyzeCondInst(Block* block, FunctionId function_id, Location pc,
     }
     // Create a virtual block and a conditional block
     Block* const conditional_block{block_pool.Create()};
-    Block virtual_block{
-        .begin{block->begin.Virtual()},
-        .end{block->begin.Virtual()},
-        .end_class{EndClass::Branch},
-        .cond{cond},
-        .stack{block->stack},
-        .branch_true{conditional_block},
-        .branch_false{nullptr},
-        .function_call{},
-        .return_block{},
-        .branch_reg{},
-        .branch_offset{},
-        .indirect_branches{},
-    };
+    Block virtual_block{};
+    virtual_block.begin = block->begin.Virtual();
+    virtual_block.end = block->begin.Virtual();
+    virtual_block.end_class = EndClass::Branch;
+    virtual_block.stack = block->stack;
+    virtual_block.cond = cond;
+    virtual_block.branch_true = conditional_block;
+    virtual_block.branch_false = nullptr;
     // Save the contents of the visited block in the conditional block
     *conditional_block = std::move(*block);
     // Impersonate the visited block with a virtual block
@@ -444,7 +428,7 @@ CFG::AnalysisState CFG::AnalyzeBRX(Block* block, Location pc, Instruction inst,
         if (!is_absolute) {
             target += pc.Offset();
         }
-        target += brx_table->branch_offset;
+        target += static_cast<unsigned int>(brx_table->branch_offset);
         target += 8;
         targets.push_back(target);
     }
@@ -455,8 +439,8 @@ CFG::AnalysisState CFG::AnalyzeBRX(Block* block, Location pc, Instruction inst,
     for (const u32 target : targets) {
         Block* const branch{AddLabel(block, block->stack, target, function_id)};
         block->indirect_branches.push_back({
-            .block{branch},
-            .address{target},
+            .block = branch,
+            .address = target,
         });
     }
     block->cond = IR::Condition{true};
@@ -523,23 +507,17 @@ Block* CFG::AddLabel(Block* block, Stack stack, Location pc, FunctionId function
     if (label_it != function.labels.end()) {
         return label_it->block;
     }
-    Block* const new_block{block_pool.Create(Block{
-        .begin{pc},
-        .end{pc},
-        .end_class{EndClass::Branch},
-        .cond{true},
-        .stack{stack},
-        .branch_true{nullptr},
-        .branch_false{nullptr},
-        .function_call{},
-        .return_block{},
-        .branch_reg{},
-        .branch_offset{},
-        .indirect_branches{},
-    })};
+    Block* const new_block{block_pool.Create()};
+    new_block->begin = pc;
+    new_block->end = pc;
+    new_block->end_class = EndClass::Branch;
+    new_block->cond = IR::Condition(true);
+    new_block->stack = stack;
+    new_block->branch_true = nullptr;
+    new_block->branch_false = nullptr;
     function.labels.push_back(Label{
         .address{pc},
-        .block{new_block},
+        .block = new_block,
         .stack{std::move(stack)},
     });
     return new_block;
diff --git a/src/shader_recompiler/frontend/maxwell/decode.cpp b/src/shader_recompiler/frontend/maxwell/decode.cpp
index bd85afa1e5..932d19c1d4 100644
--- a/src/shader_recompiler/frontend/maxwell/decode.cpp
+++ b/src/shader_recompiler/frontend/maxwell/decode.cpp
@@ -45,7 +45,7 @@ constexpr MaskValue MaskValueFromEncoding(const char* encoding) {
             bit >>= 1;
         }
     }
-    return MaskValue{.mask{mask}, .value{value}};
+    return MaskValue{.mask = mask, .value = value};
 }
 
 struct InstEncoding {
@@ -56,7 +56,7 @@ constexpr std::array UNORDERED_ENCODINGS{
 #define INST(name, cute, encode)                                                                   \
     InstEncoding{                                                                                  \
         .mask_value{MaskValueFromEncoding(encode)},                                                \
-        .opcode{Opcode::name},                                                                     \
+        .opcode = Opcode::name,                                                                     \
     },
 #include "maxwell.inc"
 #undef INST
@@ -116,9 +116,9 @@ constexpr auto MakeFastLookupTableIndex(size_t index) {
         const size_t value{ToFastLookupIndex(encoding.mask_value.value)};
         if ((index & mask) == value) {
             encodings.at(element) = InstInfo{
-                .high_mask{static_cast<u16>(encoding.mask_value.mask >> MASK_SHIFT)},
-                .high_value{static_cast<u16>(encoding.mask_value.value >> MASK_SHIFT)},
-                .opcode{encoding.opcode},
+                .high_mask = static_cast<u16>(encoding.mask_value.mask >> MASK_SHIFT),
+                .high_value = static_cast<u16>(encoding.mask_value.value >> MASK_SHIFT),
+                .opcode = encoding.opcode,
             };
             ++element;
         }
diff --git a/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp b/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp
index 96453509d5..008625cb37 100644
--- a/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp
+++ b/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp
@@ -97,11 +97,11 @@ std::optional<IndirectBranchTableInfo> TrackIndirectBranchTable(Environment& env
     }
     const u32 imnmx_immediate{static_cast<u32>(imnmx.immediate.Value())};
     return IndirectBranchTableInfo{
-        .cbuf_index{cbuf_index},
-        .cbuf_offset{cbuf_offset},
-        .num_entries{imnmx_immediate + 1},
-        .branch_offset{brx_offset},
-        .branch_reg{brx_reg},
+        .cbuf_index = cbuf_index,
+        .cbuf_offset = cbuf_offset,
+        .num_entries = imnmx_immediate + 1,
+        .branch_offset = brx_offset,
+        .branch_reg = brx_reg,
     };
 }
 
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
index c804c2a8e9..02cef26455 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
@@ -558,7 +558,6 @@ private:
         const Node label{goto_stmt->label};
         const u32 label_id{label->id};
         const Node label_nested_stmt{FindStatementWithLabel(body, goto_stmt)};
-        const auto type{label_nested_stmt->type};
 
         Tree loop_body;
         loop_body.splice(loop_body.begin(), body, label_nested_stmt, goto_stmt);
@@ -566,7 +565,7 @@ private:
         Statement* const variable{pool.Create(Variable{}, label_id)};
         Statement* const loop_stmt{pool.Create(Loop{}, variable, std::move(loop_body), parent)};
         UpdateTreeUp(loop_stmt);
-        const Node loop_node{body.insert(goto_stmt, *loop_stmt)};
+        body.insert(goto_stmt, *loop_stmt);
 
         Statement* const new_goto{pool.Create(Goto{}, variable, label, loop_stmt)};
         loop_stmt->children.push_front(*new_goto);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp
index ac1433dea7..5a1b3a8fcb 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp
@@ -31,9 +31,9 @@ void DADD(TranslatorVisitor& v, u64 insn, const IR::F64& src_b) {
     const IR::F64 op_b{v.ir.FPAbsNeg(src_b, dadd.abs_b != 0, dadd.neg_b != 0)};
 
     const IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(dadd.fp_rounding)},
-        .fmz_mode{IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(dadd.fp_rounding),
+        .fmz_mode = IR::FmzMode::None,
     };
 
     v.D(dadd.dest_reg, v.ir.FPAdd(op_a, op_b, control));
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp
index ff73218629..7238414962 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp
@@ -25,9 +25,9 @@ void DFMA(TranslatorVisitor& v, u64 insn, const IR::F64& src_b, const IR::F64& s
     const IR::F64 op_c{v.ir.FPAbsNeg(src_c, false, dfma.neg_c != 0)};
 
     const IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(dfma.fp_rounding)},
-        .fmz_mode{IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(dfma.fp_rounding),
+        .fmz_mode = IR::FmzMode::None,
     };
 
     v.D(dfma.dest_reg, v.ir.FPFma(src_a, op_b, op_c, control));
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp
index 3e83d1c95c..4a49299a0b 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp
@@ -21,9 +21,9 @@ void DMUL(TranslatorVisitor& v, u64 insn, const IR::F64& src_b) {
 
     const IR::F64 src_a{v.ir.FPAbsNeg(v.D(dmul.src_a_reg), false, dmul.neg != 0)};
     const IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(dmul.fp_rounding)},
-        .fmz_mode{IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(dmul.fp_rounding),
+        .fmz_mode = IR::FmzMode::None,
     };
 
     v.D(dmul.dest_reg, v.ir.FPMul(src_a, src_b, control));
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp
index b39950c849..b8c89810cb 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp
@@ -23,9 +23,9 @@ void FADD(TranslatorVisitor& v, u64 insn, bool sat, bool cc, bool ftz, FpRoundin
     const IR::F32 op_a{v.ir.FPAbsNeg(v.F(fadd.src_a), abs_a, neg_a)};
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, abs_b, neg_b)};
     IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(fp_rounding)},
-        .fmz_mode{ftz ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(fp_rounding),
+        .fmz_mode = (ftz ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     IR::F32 value{v.ir.FPAdd(op_a, op_b, control)};
     if (sat) {
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp
index c02a40209e..80109ca0e5 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp
@@ -19,8 +19,7 @@ void FCMP(TranslatorVisitor& v, u64 insn, const IR::U32& src_a, const IR::F32& o
     } const fcmp{insn};
 
     const IR::F32 zero{v.ir.Imm32(0.0f)};
-    const IR::F32 neg_zero{v.ir.Imm32(-0.0f)};
-    const IR::FpControl control{.fmz_mode{fcmp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None}};
+    const IR::FpControl control{.fmz_mode = (fcmp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None)};
     const IR::U1 cmp_result{FloatingPointCompare(v.ir, operand, zero, fcmp.compare_op, control)};
     const IR::U32 src_reg{v.X(fcmp.src_reg)};
     const IR::U32 result{v.ir.Select(cmp_result, src_reg, src_a)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp
index c5417775e1..b9f4ee0d9b 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp
@@ -29,9 +29,9 @@ void FSET(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
     const IR::F32 op_a{v.ir.FPAbsNeg(v.F(fset.src_a_reg), fset.abs_a != 0, fset.negate_a != 0)};
     const IR::F32 op_b = v.ir.FPAbsNeg(src_b, fset.abs_b != 0, fset.negate_b != 0);
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fset.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (fset.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     IR::U1 pred{v.ir.GetPred(fset.pred)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp
index 1e366fde03..035f8782a7 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp
@@ -57,9 +57,9 @@ void F2F(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a, bool abs) {
 
     const bool any_fp64{f2f.src_size == FloatFormat::F64 || f2f.dst_size == FloatFormat::F64};
     IR::FpControl fp_control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{f2f.ftz != 0 && !any_fp64 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (f2f.ftz != 0 && !any_fp64 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     if (f2f.src_size != f2f.dst_size) {
         fp_control.rounding = CastFpRounding(f2f.rounding);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp
index 21ae92be1e..cf3cf1ba69 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp
@@ -123,9 +123,9 @@ void TranslateF2I(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a) {
         fmz_mode = f2i.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None;
     }
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fmz_mode},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = fmz_mode,
     };
     const IR::F16F32F64 op_a{v.ir.FPAbsNeg(src_a, f2i.abs != 0, f2i.neg != 0)};
     const IR::F16F32F64 rounded_value{[&] {
@@ -186,14 +186,14 @@ void TranslateF2I(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a) {
         } else if (f2i.dest_format == DestFormat::I64) {
             handled_special_case = true;
             result = IR::U64{
-                v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0x8000'0000'0000'0000ULL), result)};
+                v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0x8000'0000'0000'0000UL), result)};
         }
     }
     if (!handled_special_case && is_signed) {
         if (bitsize != 64) {
             result = IR::U32{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm32(0U), result)};
         } else {
-            result = IR::U64{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0ULL), result)};
+            result = IR::U64{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0UL), result)};
         }
     }
 
@@ -211,6 +211,7 @@ void TranslateF2I(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a) {
 
 void TranslatorVisitor::F2I_reg(u64 insn) {
     union {
+        u64 raw;
         F2I base;
         BitField<20, 8, IR::Reg> src_reg;
     } const f2i{insn};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
index 18561bc9c7..fa2a7807b7 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
@@ -24,9 +24,9 @@ void FFMA(TranslatorVisitor& v, u64 insn, const IR::F32& src_b, const IR::F32& s
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, false, neg_b)};
     const IR::F32 op_c{v.ir.FPAbsNeg(src_c, false, neg_c)};
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(fp_rounding)},
-        .fmz_mode{CastFmzMode(fmz_mode)},
+        .no_contraction = true,
+        .rounding = CastFpRounding(fp_rounding),
+        .fmz_mode = CastFmzMode(fmz_mode),
     };
     IR::F32 value{v.ir.FPFma(op_a, op_b, op_c, fp_control)};
     if (fmz_mode == FmzMode::FMZ && !sat) {
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp
index 343d91032b..8ae4375287 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp
@@ -27,9 +27,9 @@ void FMNMX(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, fmnmx.abs_b != 0, fmnmx.negate_b != 0)};
 
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fmnmx.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (fmnmx.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     IR::F32 max{v.ir.FPMax(op_a, op_b, control)};
     IR::F32 min{v.ir.FPMin(op_a, op_b, control)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp
index 72f0a18ae8..06226b7ce2 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp
@@ -64,9 +64,9 @@ void FMUL(TranslatorVisitor& v, u64 insn, const IR::F32& src_b, FmzMode fmz_mode
     }
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, false, neg_b)};
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(fp_rounding)},
-        .fmz_mode{CastFmzMode(fmz_mode)},
+        .no_contraction = true,
+        .rounding = CastFpRounding(fp_rounding),
+        .fmz_mode = CastFmzMode(fmz_mode),
     };
     IR::F32 value{v.ir.FPMul(op_a, op_b, fp_control)};
     if (fmz_mode == FmzMode::FMZ && !sat) {
@@ -124,4 +124,4 @@ void TranslatorVisitor::FMUL32I(u64 insn) {
          fmul32i.sat != 0, fmul32i.cc != 0, false);
 }
 
-} // namespace Shader::Maxwell
\ No newline at end of file
+} // namespace Shader::Maxwell
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp
index 8ff9db8438..5f93a15130 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp
@@ -29,9 +29,9 @@ void FSETP(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
     const IR::F32 op_a{v.ir.FPAbsNeg(v.F(fsetp.src_a_reg), fsetp.abs_a != 0, fsetp.negate_a != 0)};
     const IR::F32 op_b = v.ir.FPAbsNeg(src_b, fsetp.abs_b != 0, fsetp.negate_b != 0);
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fsetp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (fsetp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     const BooleanOp bop{fsetp.bop};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp
index e42921a216..7550a8d4c4 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp
@@ -28,9 +28,9 @@ void TranslatorVisitor::FSWZADD(u64 insn) {
     const IR::U32 swizzle{ir.Imm32(static_cast<u32>(fswzadd.swizzle))};
 
     const IR::FpControl fp_control{
-        .no_contraction{false},
-        .rounding{CastFpRounding(fswzadd.round)},
-        .fmz_mode{fswzadd.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = CastFpRounding(fswzadd.round),
+        .fmz_mode = (fswzadd.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     const IR::F32 result{ir.FSwizzleAdd(src_a, src_b, swizzle, fp_control)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp
index 03e7bf047d..f2738a93b2 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp
@@ -34,9 +34,9 @@ void HADD2(TranslatorVisitor& v, u64 insn, Merge merge, bool ftz, bool sat, bool
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{ftz ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (ftz ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     IR::F16F32F64 lhs{v.ir.FPAdd(lhs_a, lhs_b, fp_control)};
     IR::F16F32F64 rhs{v.ir.FPAdd(rhs_a, rhs_b, fp_control)};
@@ -102,8 +102,9 @@ void TranslatorVisitor::HADD2_imm(u64 insn) {
         BitField<20, 9, u64> low;
     } const hadd2{insn};
 
-    const u32 imm{static_cast<u32>(hadd2.low << 6) | ((hadd2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hadd2.high << 22) | ((hadd2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hadd2.low << 6) | static_cast<u32>((hadd2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hadd2.high << 22) | static_cast<u32>((hadd2.neg_high != 0 ? 1 : 0) << 31)};
     HADD2(*this, insn, hadd2.sat != 0, false, false, Swizzle::H1_H0, ir.Imm32(imm));
 }
 
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp
index 8b234bd6ae..fd79867016 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp
@@ -41,9 +41,9 @@ void HFMA2(TranslatorVisitor& v, u64 insn, Merge merge, Swizzle swizzle_a, bool
     rhs_c = v.ir.FPAbsNeg(rhs_c, false, neg_c);
 
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{HalfPrecision2FmzMode(precision)},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = HalfPrecision2FmzMode(precision),
     };
     IR::F16F32F64 lhs{v.ir.FPFma(lhs_a, lhs_b, lhs_c, fp_control)};
     IR::F16F32F64 rhs{v.ir.FPFma(rhs_a, rhs_b, rhs_c, fp_control)};
@@ -143,8 +143,9 @@ void TranslatorVisitor::HFMA2_imm(u64 insn) {
         BitField<57, 2, HalfPrecision> precision;
     } const hfma2{insn};
 
-    const u32 imm{static_cast<u32>(hfma2.low << 6) | ((hfma2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hfma2.high << 22) | ((hfma2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hfma2.low << 6) | static_cast<u32>((hfma2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hfma2.high << 22) | static_cast<u32>((hfma2.neg_high != 0 ? 1 : 0) << 31)};
 
     HFMA2(*this, insn, false, hfma2.neg_c != 0, Swizzle::H1_H0, hfma2.swizzle_c, ir.Imm32(imm),
           GetReg39(insn), hfma2.saturate != 0, hfma2.precision);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp
index 2451a6ef68..3f548ce761 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp
@@ -35,9 +35,9 @@ void HMUL2(TranslatorVisitor& v, u64 insn, Merge merge, bool sat, bool abs_a, bo
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{HalfPrecision2FmzMode(precision)},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = HalfPrecision2FmzMode(precision),
     };
     IR::F16F32F64 lhs{v.ir.FPMul(lhs_a, lhs_b, fp_control)};
     IR::F16F32F64 rhs{v.ir.FPMul(rhs_a, rhs_b, fp_control)};
@@ -119,8 +119,9 @@ void TranslatorVisitor::HMUL2_imm(u64 insn) {
         BitField<44, 1, u64> abs_a;
     } const hmul2{insn};
 
-    const u32 imm{static_cast<u32>(hmul2.low << 6) | ((hmul2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hmul2.high << 22) | ((hmul2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hmul2.low << 6) | static_cast<u32>((hmul2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hmul2.high << 22) | static_cast<u32>((hmul2.neg_high != 0 ? 1 : 0) << 31)};
     HMUL2(*this, insn, hmul2.sat != 0, hmul2.abs_a != 0, hmul2.neg_a != 0, false, false,
           Swizzle::H1_H0, ir.Imm32(imm));
 }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp
index 7f1f4b88c8..cca5b831fd 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp
@@ -41,9 +41,9 @@ void HSET2(TranslatorVisitor& v, u64 insn, const IR::U32& src_b, bool bf, bool f
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{ftz ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (ftz ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     IR::U1 pred{v.ir.GetPred(hset2.pred)};
@@ -106,8 +106,9 @@ void TranslatorVisitor::HSET2_imm(u64 insn) {
         BitField<20, 9, u64> low;
     } const hset2{insn};
 
-    const u32 imm{static_cast<u32>(hset2.low << 6) | ((hset2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hset2.high << 22) | ((hset2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hset2.low << 6) | static_cast<u32>((hset2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hset2.high << 22) | static_cast<u32>((hset2.neg_high != 0 ? 1 : 0) << 31)};
 
     HSET2(*this, insn, ir.Imm32(imm), hset2.bf != 0, hset2.ftz != 0, false, false, hset2.compare_op,
           Swizzle::H1_H0);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp
index 3e2a23c92d..b3931dae32 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp
@@ -43,9 +43,9 @@ void HSETP2(TranslatorVisitor& v, u64 insn, const IR::U32& src_b, bool neg_b, bo
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{hsetp2.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (hsetp2.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     IR::U1 pred{v.ir.GetPred(hsetp2.pred)};
@@ -106,8 +106,10 @@ void TranslatorVisitor::HSETP2_imm(u64 insn) {
         BitField<20, 9, u64> low;
     } const hsetp2{insn};
 
-    const u32 imm{static_cast<u32>(hsetp2.low << 6) | ((hsetp2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hsetp2.high << 22) | ((hsetp2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{static_cast<u32>(hsetp2.low << 6) |
+                  static_cast<u32>((hsetp2.neg_low != 0 ? 1 : 0) << 15) |
+                  static_cast<u32>(hsetp2.high << 22) |
+                  static_cast<u32>((hsetp2.neg_high != 0 ? 1 : 0) << 31)};
 
     HSETP2(*this, insn, ir.Imm32(imm), false, false, Swizzle::H1_H0, hsetp2.compare_op,
            hsetp2.h_and != 0);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp
index 30b570ce4d..88bbac0a50 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp
@@ -49,7 +49,7 @@ void TranslatorVisitor::L(IR::Reg dest_reg, const IR::U64& value) {
     }
     const IR::Value result{ir.UnpackUint2x32(value)};
     for (int i = 0; i < 2; i++) {
-        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, i)});
+        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, static_cast<size_t>(i))});
     }
 }
 
@@ -63,7 +63,7 @@ void TranslatorVisitor::D(IR::Reg dest_reg, const IR::F64& value) {
     }
     const IR::Value result{ir.UnpackDouble2x32(value)};
     for (int i = 0; i < 2; i++) {
-        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, i)});
+        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, static_cast<size_t>(i))});
     }
 }
 
@@ -156,7 +156,7 @@ IR::F64 TranslatorVisitor::GetDoubleCbuf(u64 insn) {
     const auto [binding, offset_value]{CbufAddr(insn)};
     const bool unaligned{cbuf.unaligned != 0};
     const u32 offset{offset_value.U32()};
-    const IR::Value addr{unaligned ? offset | 4 : (offset & ~7) | 4};
+    const IR::Value addr{unaligned ? offset | 4u : (offset & ~7u) | 4u};
 
     const IR::U32 value{ir.GetCbuf(binding, IR::U32{addr})};
     const IR::U32 lower_bits{CbufLowerBits(ir, unaligned, binding, offset)};
@@ -200,7 +200,7 @@ IR::F32 TranslatorVisitor::GetFloatImm20(u64 insn) {
         BitField<20, 19, u64> value;
         BitField<56, 1, u64> is_negative;
     } const imm{insn};
-    const u32 sign_bit{imm.is_negative != 0 ? (1ULL << 31) : 0};
+    const u32 sign_bit{static_cast<u32>(imm.is_negative != 0 ? (1ULL << 31) : 0)};
     const u32 value{static_cast<u32>(imm.value) << 12};
     return ir.Imm32(Common::BitCast<f32>(value | sign_bit));
 }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp
index 1493e18151..8ffd84867d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp
@@ -68,7 +68,6 @@ void IADD(TranslatorVisitor& v, u64 insn, IR::U32 op_b) {
     } const iadd{insn};
 
     const bool po{iadd.three_for_po == 3};
-    const bool neg_a{!po && iadd.neg_a != 0};
     if (!po && iadd.neg_b != 0) {
         op_b = v.ir.INeg(op_b);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
index e8b5ae1d2d..5a0fc36a03 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
@@ -131,7 +131,7 @@ void I2F(TranslatorVisitor& v, u64 insn, IR::U32U64 src) {
         }
         const IR::Value vector{v.ir.UnpackDouble2x32(value)};
         for (int i = 0; i < 2; ++i) {
-            v.X(i2f.dest_reg + i, IR::U32{v.ir.CompositeExtract(vector, i)});
+            v.X(i2f.dest_reg + i, IR::U32{v.ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
@@ -170,4 +170,4 @@ void TranslatorVisitor::I2F_imm(u64 insn) {
     }
 }
 
-} // namespace Shader::Maxwell
\ No newline at end of file
+} // namespace Shader::Maxwell
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp
index ae3ecea325..2300088e38 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp
@@ -50,7 +50,7 @@ void TranslatorVisitor::LDC(u64 insn) {
         }
         const IR::Value vector{ir.GetCbuf(index, offset, 64, false)};
         for (int i = 0; i < 2; ++i) {
-            X(ldc.dest_reg + i, IR::U32{ir.CompositeExtract(vector, i)});
+            X(ldc.dest_reg + i, IR::U32{ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp
index 68963c8ea6..e24b497210 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp
@@ -40,7 +40,6 @@ std::pair<int, bool> GetSize(u64 insn) {
         BitField<48, 3, Size> size;
     } const encoding{insn};
 
-    const Size nnn = encoding.size;
     switch (encoding.size) {
     case Size::U8:
         return {8, false};
@@ -99,7 +98,7 @@ void TranslatorVisitor::LDL(u64 insn) {
     case 32:
     case 64:
     case 128:
-        if (!IR::IsAligned(dest, bit_size / 32)) {
+        if (!IR::IsAligned(dest, static_cast<size_t>(bit_size / 32))) {
             throw NotImplementedException("Unaligned destination register {}", dest);
         }
         X(dest, ir.LoadLocal(word_offset));
@@ -123,11 +122,11 @@ void TranslatorVisitor::LDS(u64 insn) {
         break;
     case 64:
     case 128:
-        if (!IR::IsAligned(dest, bit_size / 32)) {
+        if (!IR::IsAligned(dest, static_cast<size_t>(bit_size / 32))) {
             throw NotImplementedException("Unaligned destination register {}", dest);
         }
         for (int element = 0; element < bit_size / 32; ++element) {
-            X(dest + element, IR::U32{ir.CompositeExtract(value, element)});
+            X(dest + element, IR::U32{ir.CompositeExtract(value, static_cast<size_t>(element))});
         }
         break;
     }
@@ -156,7 +155,7 @@ void TranslatorVisitor::STL(u64 insn) {
     case 32:
     case 64:
     case 128:
-        if (!IR::IsAligned(reg, bit_size / 32)) {
+        if (!IR::IsAligned(reg, static_cast<size_t>(bit_size / 32))) {
             throw NotImplementedException("Unaligned source register");
         }
         ir.WriteLocal(word_offset, src);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp
index 71688b1d78..36c5cff2f1 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp
@@ -114,7 +114,7 @@ void TranslatorVisitor::LDG(u64 insn) {
         }
         const IR::Value vector{ir.LoadGlobal64(address)};
         for (int i = 0; i < 2; ++i) {
-            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, i)});
+            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
@@ -125,7 +125,7 @@ void TranslatorVisitor::LDG(u64 insn) {
         }
         const IR::Value vector{ir.LoadGlobal128(address)};
         for (int i = 0; i < 4; ++i) {
-            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, i)});
+            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
index b2da079f9c..95d4165863 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
@@ -199,7 +199,7 @@ void Impl(TranslatorVisitor& v, u64 insn, bool aoffi, Blod blod, bool lc,
         if (tex.dc != 0) {
             value = element < 3 ? IR::F32{sample} : v.ir.Imm32(1.0f);
         } else {
-            value = IR::F32{v.ir.CompositeExtract(sample, element)};
+            value = IR::F32{v.ir.CompositeExtract(sample, static_cast<size_t>(element))};
         }
         v.F(dest_reg, value);
         ++dest_reg;
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
index d5fda20f42..fe2c7db85d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
@@ -53,7 +53,7 @@ constexpr std::array RGBA_LUT{
     R | G | B | A, //
 };
 
-void CheckAlignment(IR::Reg reg, int alignment) {
+void CheckAlignment(IR::Reg reg, size_t alignment) {
     if (!IR::IsAligned(reg, alignment)) {
         throw NotImplementedException("Unaligned source register {}", reg);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp
index beab515ad9..2ba9c1018a 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp
@@ -37,7 +37,7 @@ union Encoding {
     BitField<36, 13, u64> cbuf_offset;
 };
 
-void CheckAlignment(IR::Reg reg, int alignment) {
+void CheckAlignment(IR::Reg reg, size_t alignment) {
     if (!IR::IsAligned(reg, alignment)) {
         throw NotImplementedException("Unaligned source register {}", reg);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp
index 623b8fc23b..0863bdfcd4 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp
@@ -56,7 +56,7 @@ union Encoding {
     BitField<53, 4, u64> encoding;
 };
 
-void CheckAlignment(IR::Reg reg, int alignment) {
+void CheckAlignment(IR::Reg reg, size_t alignment) {
     if (!IR::IsAligned(reg, alignment)) {
         throw NotImplementedException("Unaligned source register {}", reg);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp
index 8c7e04bcab..0459e5473e 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp
@@ -54,7 +54,7 @@ void Impl(TranslatorVisitor& v, u64 insn, std::optional<u32> cbuf_offset) {
         if (((txq.mask >> element) & 1) == 0) {
             continue;
         }
-        v.X(dest_reg, IR::U32{v.ir.CompositeExtract(query, element)});
+        v.X(dest_reg, IR::U32{v.ir.CompositeExtract(query, static_cast<size_t>(element))});
         ++dest_reg;
     }
 }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp
index af13b3fccf..ec5e74f6d8 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp
@@ -69,7 +69,6 @@ void TranslatorVisitor::VSETP(u64 insn) {
     const IR::U32 src_b{is_b_imm ? ir.Imm32(static_cast<u32>(vsetp.src_b_imm)) : GetReg20(insn)};
 
     const u32 a_selector{static_cast<u32>(vsetp.src_a_selector)};
-    const u32 b_selector{is_b_imm ? 0U : static_cast<u32>(vsetp.src_b_selector)};
     const VideoWidth a_width{vsetp.src_a_width};
     const VideoWidth b_width{GetVideoSourceWidth(vsetp.src_b_width, is_b_imm)};
 
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 1c03ee82af..edbfcd3082 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -6,6 +6,7 @@
 #include "shader_recompiler/frontend/ir/microinstruction.h"
 #include "shader_recompiler/frontend/ir/modifiers.h"
 #include "shader_recompiler/frontend/ir/program.h"
+#include "shader_recompiler/ir_opt/passes.h"
 #include "shader_recompiler/shader_info.h"
 
 namespace Shader::Optimization {
@@ -22,8 +23,8 @@ void AddConstantBufferDescriptor(Info& info, u32 index, u32 count) {
     auto& cbufs{info.constant_buffer_descriptors};
     cbufs.insert(std::ranges::lower_bound(cbufs, index, {}, &ConstantBufferDescriptor::index),
                  ConstantBufferDescriptor{
-                     .index{index},
-                     .count{1},
+                     .index = index,
+                     .count = 1,
                  });
 }
 
@@ -91,7 +92,7 @@ void SetAttribute(Info& info, IR::Attribute attribute) {
 }
 
 void VisitUsages(Info& info, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::CompositeConstructF16x2:
     case IR::Opcode::CompositeConstructF16x3:
     case IR::Opcode::CompositeConstructF16x4:
@@ -209,7 +210,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::GetCbufU8:
     case IR::Opcode::GetCbufS8:
     case IR::Opcode::UndefU8:
@@ -236,7 +237,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::GetCbufU16:
     case IR::Opcode::GetCbufS16:
     case IR::Opcode::UndefU16:
@@ -271,7 +272,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::UndefU64:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS8:
@@ -314,7 +315,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::DemoteToHelperInvocation:
         info.uses_demote_to_helper_invocation = true;
         break;
@@ -361,7 +362,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
         } else {
             throw NotImplementedException("Constant buffer with non-immediate index");
         }
-        switch (inst.Opcode()) {
+        switch (inst.GetOpcode()) {
         case IR::Opcode::GetCbufU8:
         case IR::Opcode::GetCbufS8:
             info.used_constant_buffer_types |= IR::Type::U8;
@@ -443,7 +444,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
 }
 
 void VisitFpModifiers(Info& info, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::FPAdd16:
     case IR::Opcode::FPFma16:
     case IR::Opcode::FPMul16:
@@ -540,7 +541,6 @@ void GatherInfoFromHeader(Environment& env, Info& info) {
         info.stores_position |= header.vtg.omap_systemb.position != 0;
     }
 }
-
 } // Anonymous namespace
 
 void CollectShaderInfoPass(Environment& env, IR::Program& program) {
diff --git a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
index 1720d7a092..61fbbe04cb 100644
--- a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
+++ b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
@@ -58,7 +58,7 @@ bool FoldCommutative(IR::Inst& inst, ImmFn&& imm_fn) {
     }
     if (is_lhs_immediate && !is_rhs_immediate) {
         IR::Inst* const rhs_inst{rhs.InstRecursive()};
-        if (rhs_inst->Opcode() == inst.Opcode() && rhs_inst->Arg(1).IsImmediate()) {
+        if (rhs_inst->GetOpcode() == inst.GetOpcode() && rhs_inst->Arg(1).IsImmediate()) {
             const auto combined{imm_fn(Arg<T>(lhs), Arg<T>(rhs_inst->Arg(1)))};
             inst.SetArg(0, rhs_inst->Arg(0));
             inst.SetArg(1, IR::Value{combined});
@@ -70,7 +70,7 @@ bool FoldCommutative(IR::Inst& inst, ImmFn&& imm_fn) {
     }
     if (!is_lhs_immediate && is_rhs_immediate) {
         const IR::Inst* const lhs_inst{lhs.InstRecursive()};
-        if (lhs_inst->Opcode() == inst.Opcode() && lhs_inst->Arg(1).IsImmediate()) {
+        if (lhs_inst->GetOpcode() == inst.GetOpcode() && lhs_inst->Arg(1).IsImmediate()) {
             const auto combined{imm_fn(Arg<T>(rhs), Arg<T>(lhs_inst->Arg(1)))};
             inst.SetArg(0, lhs_inst->Arg(0));
             inst.SetArg(1, IR::Value{combined});
@@ -123,7 +123,8 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
         return false;
     }
     IR::Inst* const lhs_shl{lhs_arg.InstRecursive()};
-    if (lhs_shl->Opcode() != IR::Opcode::ShiftLeftLogical32 || lhs_shl->Arg(1) != IR::Value{16U}) {
+    if (lhs_shl->GetOpcode() != IR::Opcode::ShiftLeftLogical32 ||
+        lhs_shl->Arg(1) != IR::Value{16U}) {
         return false;
     }
     if (lhs_shl->Arg(0).IsImmediate()) {
@@ -131,7 +132,7 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
     }
     IR::Inst* const lhs_mul{lhs_shl->Arg(0).InstRecursive()};
     IR::Inst* const rhs_mul{rhs_arg.InstRecursive()};
-    if (lhs_mul->Opcode() != IR::Opcode::IMul32 || rhs_mul->Opcode() != IR::Opcode::IMul32) {
+    if (lhs_mul->GetOpcode() != IR::Opcode::IMul32 || rhs_mul->GetOpcode() != IR::Opcode::IMul32) {
         return false;
     }
     if (lhs_mul->Arg(1).Resolve() != rhs_mul->Arg(1).Resolve()) {
@@ -143,10 +144,10 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
     }
     IR::Inst* const lhs_bfe{lhs_mul->Arg(0).InstRecursive()};
     IR::Inst* const rhs_bfe{rhs_mul->Arg(0).InstRecursive()};
-    if (lhs_bfe->Opcode() != IR::Opcode::BitFieldUExtract) {
+    if (lhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) {
         return false;
     }
-    if (rhs_bfe->Opcode() != IR::Opcode::BitFieldUExtract) {
+    if (rhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) {
         return false;
     }
     if (lhs_bfe->Arg(1) != IR::Value{16U} || lhs_bfe->Arg(2) != IR::Value{16U}) {
@@ -194,8 +195,9 @@ void FoldISub32(IR::Inst& inst) {
     // ISub32 is generally used to subtract two constant buffers, compare and replace this with
     // zero if they equal.
     const auto equal_cbuf{[](IR::Inst* a, IR::Inst* b) {
-        return a->Opcode() == IR::Opcode::GetCbufU32 && b->Opcode() == IR::Opcode::GetCbufU32 &&
-               a->Arg(0) == b->Arg(0) && a->Arg(1) == b->Arg(1);
+        return a->GetOpcode() == IR::Opcode::GetCbufU32 &&
+               b->GetOpcode() == IR::Opcode::GetCbufU32 && a->Arg(0) == b->Arg(0) &&
+               a->Arg(1) == b->Arg(1);
     }};
     IR::Inst* op_a{inst.Arg(0).InstRecursive()};
     IR::Inst* op_b{inst.Arg(1).InstRecursive()};
@@ -204,15 +206,15 @@ void FoldISub32(IR::Inst& inst) {
         return;
     }
     // It's also possible a value is being added to a cbuf and then subtracted
-    if (op_b->Opcode() == IR::Opcode::IAdd32) {
+    if (op_b->GetOpcode() == IR::Opcode::IAdd32) {
         // Canonicalize local variables to simplify the following logic
         std::swap(op_a, op_b);
     }
-    if (op_b->Opcode() != IR::Opcode::GetCbufU32) {
+    if (op_b->GetOpcode() != IR::Opcode::GetCbufU32) {
         return;
     }
     IR::Inst* const inst_cbuf{op_b};
-    if (op_a->Opcode() != IR::Opcode::IAdd32) {
+    if (op_a->GetOpcode() != IR::Opcode::IAdd32) {
         return;
     }
     IR::Value add_op_a{op_a->Arg(0)};
@@ -250,7 +252,8 @@ void FoldFPMul32(IR::Inst& inst) {
     }
     IR::Inst* const lhs_op{lhs_value.InstRecursive()};
     IR::Inst* const rhs_op{rhs_value.InstRecursive()};
-    if (lhs_op->Opcode() != IR::Opcode::FPMul32 || rhs_op->Opcode() != IR::Opcode::FPRecip32) {
+    if (lhs_op->GetOpcode() != IR::Opcode::FPMul32 ||
+        rhs_op->GetOpcode() != IR::Opcode::FPRecip32) {
         return;
     }
     const IR::Value recip_source{rhs_op->Arg(0)};
@@ -260,8 +263,8 @@ void FoldFPMul32(IR::Inst& inst) {
     }
     IR::Inst* const attr_a{recip_source.InstRecursive()};
     IR::Inst* const attr_b{lhs_mul_source.InstRecursive()};
-    if (attr_a->Opcode() != IR::Opcode::GetAttribute ||
-        attr_b->Opcode() != IR::Opcode::GetAttribute) {
+    if (attr_a->GetOpcode() != IR::Opcode::GetAttribute ||
+        attr_b->GetOpcode() != IR::Opcode::GetAttribute) {
         return;
     }
     if (attr_a->Arg(0).Attribute() == attr_b->Arg(0).Attribute()) {
@@ -304,7 +307,7 @@ void FoldLogicalNot(IR::Inst& inst) {
         return;
     }
     IR::Inst* const arg{value.InstRecursive()};
-    if (arg->Opcode() == IR::Opcode::LogicalNot) {
+    if (arg->GetOpcode() == IR::Opcode::LogicalNot) {
         inst.ReplaceUsesWith(arg->Arg(0));
     }
 }
@@ -317,12 +320,12 @@ void FoldBitCast(IR::Inst& inst, IR::Opcode reverse) {
         return;
     }
     IR::Inst* const arg_inst{value.InstRecursive()};
-    if (arg_inst->Opcode() == reverse) {
+    if (arg_inst->GetOpcode() == reverse) {
         inst.ReplaceUsesWith(arg_inst->Arg(0));
         return;
     }
     if constexpr (op == IR::Opcode::BitCastF32U32) {
-        if (arg_inst->Opcode() == IR::Opcode::GetCbufU32) {
+        if (arg_inst->GetOpcode() == IR::Opcode::GetCbufU32) {
             // Replace the bitcast with a typed constant buffer read
             inst.ReplaceOpcode(IR::Opcode::GetCbufF32);
             inst.SetArg(0, arg_inst->Arg(0));
@@ -338,7 +341,7 @@ void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
         return;
     }
     IR::Inst* const arg_inst{value.InstRecursive()};
-    if (arg_inst->Opcode() == reverse) {
+    if (arg_inst->GetOpcode() == reverse) {
         inst.ReplaceUsesWith(arg_inst->Arg(0));
         return;
     }
@@ -347,7 +350,7 @@ void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
 template <typename Func, size_t... I>
 IR::Value EvalImmediates(const IR::Inst& inst, Func&& func, std::index_sequence<I...>) {
     using Traits = LambdaTraits<decltype(func)>;
-    return IR::Value{func(Arg<Traits::ArgType<I>>(inst.Arg(I))...)};
+    return IR::Value{func(Arg<typename Traits::template ArgType<I>>(inst.Arg(I))...)};
 }
 
 void FoldBranchConditional(IR::Inst& inst) {
@@ -357,7 +360,7 @@ void FoldBranchConditional(IR::Inst& inst) {
         return;
     }
     const IR::Inst* cond_inst{cond.InstRecursive()};
-    if (cond_inst->Opcode() == IR::Opcode::LogicalNot) {
+    if (cond_inst->GetOpcode() == IR::Opcode::LogicalNot) {
         const IR::Value true_label{inst.Arg(1)};
         const IR::Value false_label{inst.Arg(2)};
         // Remove negation on the conditional (take the parameter out of LogicalNot) and swap
@@ -371,10 +374,10 @@ void FoldBranchConditional(IR::Inst& inst) {
 std::optional<IR::Value> FoldCompositeExtractImpl(IR::Value inst_value, IR::Opcode insert,
                                                   IR::Opcode construct, u32 first_index) {
     IR::Inst* const inst{inst_value.InstRecursive()};
-    if (inst->Opcode() == construct) {
+    if (inst->GetOpcode() == construct) {
         return inst->Arg(first_index);
     }
-    if (inst->Opcode() != insert) {
+    if (inst->GetOpcode() != insert) {
         return std::nullopt;
     }
     IR::Value value_index{inst->Arg(2)};
@@ -410,7 +413,7 @@ void FoldCompositeExtract(IR::Inst& inst, IR::Opcode construct, IR::Opcode inser
 }
 
 void ConstantPropagation(IR::Block& block, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::GetRegister:
         return FoldGetRegister(inst);
     case IR::Opcode::GetPred:
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 0858a0bddd..90a65dd167 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -57,7 +57,7 @@ struct StorageInfo {
 
 /// Returns true when the instruction is a global memory instruction
 bool IsGlobalMemory(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::LoadGlobalS8:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS16:
@@ -80,7 +80,7 @@ bool IsGlobalMemory(const IR::Inst& inst) {
 
 /// Returns true when the instruction is a global memory instruction
 bool IsGlobalMemoryWrite(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::WriteGlobalS8:
     case IR::Opcode::WriteGlobalU8:
     case IR::Opcode::WriteGlobalS16:
@@ -140,7 +140,7 @@ bool MeetsBias(const StorageBufferAddr& storage_buffer, const Bias& bias) noexce
 void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) {
     IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
     const IR::Value zero{u32{0}};
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::LoadGlobalS8:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS16:
@@ -164,7 +164,7 @@ void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) {
         inst.Invalidate();
         break;
     default:
-        throw LogicError("Invalid opcode to discard its global memory operation {}", inst.Opcode());
+        throw LogicError("Invalid opcode to discard its global memory operation {}", inst.GetOpcode());
     }
 }
 
@@ -184,7 +184,7 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
     // This address is expected to either be a PackUint2x32 or a IAdd64
     IR::Inst* addr_inst{addr.InstRecursive()};
     s32 imm_offset{0};
-    if (addr_inst->Opcode() == IR::Opcode::IAdd64) {
+    if (addr_inst->GetOpcode() == IR::Opcode::IAdd64) {
         // If it's an IAdd64, get the immediate offset it is applying and grab the address
         // instruction. This expects for the instruction to be canonicalized having the address on
         // the first argument and the immediate offset on the second one.
@@ -200,7 +200,7 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
         addr_inst = iadd_addr.Inst();
     }
     // With IAdd64 handled, now PackUint2x32 is expected without exceptions
-    if (addr_inst->Opcode() != IR::Opcode::PackUint2x32) {
+    if (addr_inst->GetOpcode() != IR::Opcode::PackUint2x32) {
         return std::nullopt;
     }
     // PackUint2x32 is expected to be generated from a vector
@@ -210,20 +210,20 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
     }
     // This vector is expected to be a CompositeConstructU32x2
     IR::Inst* const vector_inst{vector.InstRecursive()};
-    if (vector_inst->Opcode() != IR::Opcode::CompositeConstructU32x2) {
+    if (vector_inst->GetOpcode() != IR::Opcode::CompositeConstructU32x2) {
         return std::nullopt;
     }
     // Grab the first argument from the CompositeConstructU32x2, this is the low address.
     return LowAddrInfo{
         .value{IR::U32{vector_inst->Arg(0)}},
-        .imm_offset{imm_offset},
+        .imm_offset = imm_offset,
     };
 }
 
 /// Tries to track the storage buffer address used by a global memory instruction
 std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias) {
     const auto pred{[bias](const IR::Inst* inst) -> std::optional<StorageBufferAddr> {
-        if (inst->Opcode() != IR::Opcode::GetCbufU32) {
+        if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
             return std::nullopt;
         }
         const IR::Value index{inst->Arg(0)};
@@ -256,9 +256,9 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
     // NVN puts storage buffers in a specific range, we have to bias towards these addresses to
     // avoid getting false positives
     static constexpr Bias nvn_bias{
-        .index{0},
-        .offset_begin{0x110},
-        .offset_end{0x610},
+        .index = 0,
+        .offset_begin = 0x110,
+        .offset_end = 0x610,
     };
     // Track the low address of the instruction
     const std::optional<LowAddrInfo> low_addr_info{TrackLowAddress(&inst)};
@@ -286,8 +286,8 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
     info.set.insert(*storage_buffer);
     info.to_replace.push_back(StorageInst{
         .storage_buffer{*storage_buffer},
-        .inst{&inst},
-        .block{&block},
+        .inst = &inst,
+        .block = &block,
     });
 }
 
@@ -312,7 +312,7 @@ IR::U32 StorageOffset(IR::Block& block, IR::Inst& inst, StorageBufferAddr buffer
 /// Replace a global memory load instruction with its storage buffer equivalent
 void ReplaceLoad(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
                  const IR::U32& offset) {
-    const IR::Opcode new_opcode{GlobalToStorage(inst.Opcode())};
+    const IR::Opcode new_opcode{GlobalToStorage(inst.GetOpcode())};
     const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
     const IR::Value value{&*block.PrependNewInst(it, new_opcode, {storage_index, offset})};
     inst.ReplaceUsesWith(value);
@@ -321,7 +321,7 @@ void ReplaceLoad(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
 /// Replace a global memory write instruction with its storage buffer equivalent
 void ReplaceWrite(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
                   const IR::U32& offset) {
-    const IR::Opcode new_opcode{GlobalToStorage(inst.Opcode())};
+    const IR::Opcode new_opcode{GlobalToStorage(inst.GetOpcode())};
     const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
     block.PrependNewInst(it, new_opcode, {storage_index, offset, inst.Arg(1)});
     inst.Invalidate();
@@ -330,7 +330,7 @@ void ReplaceWrite(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index
 /// Replace a global memory instruction with its storage buffer equivalent
 void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
              const IR::U32& offset) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::LoadGlobalS8:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS16:
@@ -348,7 +348,7 @@ void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
     case IR::Opcode::WriteGlobal128:
         return ReplaceWrite(block, inst, storage_index, offset);
     default:
-        throw InvalidArgument("Invalid global memory opcode {}", inst.Opcode());
+        throw InvalidArgument("Invalid global memory opcode {}", inst.GetOpcode());
     }
 }
 } // Anonymous namespace
@@ -366,9 +366,9 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program) {
     u32 storage_index{};
     for (const StorageBufferAddr& storage_buffer : info.set) {
         program.info.storage_buffers_descriptors.push_back({
-            .cbuf_index{storage_buffer.index},
-            .cbuf_offset{storage_buffer.offset},
-            .count{1},
+            .cbuf_index = storage_buffer.index,
+            .cbuf_offset = storage_buffer.offset,
+            .count = 1,
             .is_written{info.writes.contains(storage_buffer)},
         });
         ++storage_index;
diff --git a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
index 8790b48f21..38af72dfea 100644
--- a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
+++ b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
@@ -22,7 +22,8 @@ void IdentityRemovalPass(IR::Program& program) {
                     inst->SetArg(i, arg.Inst()->Arg(0));
                 }
             }
-            if (inst->Opcode() == IR::Opcode::Identity || inst->Opcode() == IR::Opcode::Void) {
+            if (inst->GetOpcode() == IR::Opcode::Identity ||
+                inst->GetOpcode() == IR::Opcode::Void) {
                 to_invalidate.push_back(&*inst);
                 inst = block->Instructions().erase(inst);
             } else {
diff --git a/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp b/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
index 0d2c91ed61..52576b07fc 100644
--- a/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
+++ b/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
@@ -123,7 +123,7 @@ IR::Opcode Replace(IR::Opcode op) {
 void LowerFp16ToFp32(IR::Program& program) {
     for (IR::Block* const block : program.blocks) {
         for (IR::Inst& inst : block->Instructions()) {
-            inst.ReplaceOpcode(Replace(inst.Opcode()));
+            inst.ReplaceOpcode(Replace(inst.GetOpcode()));
         }
     }
 }
diff --git a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
index ca36253d14..346fcc3774 100644
--- a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
+++ b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
@@ -116,7 +116,7 @@ IR::Opcode UndefOpcode(IndirectBranchVariable) noexcept {
 }
 
 [[nodiscard]] bool IsPhi(const IR::Inst& inst) noexcept {
-    return inst.Opcode() == IR::Opcode::Phi;
+    return inst.GetOpcode() == IR::Opcode::Phi;
 }
 
 enum class Status {
@@ -278,7 +278,7 @@ private:
 };
 
 void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::SetRegister:
         if (const IR::Reg reg{inst.Arg(0).Reg()}; reg != IR::Reg::RZ) {
             pass.WriteVariable(reg, block, inst.Arg(1));
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
index 290ce41791..c8aee3d3d5 100644
--- a/src/shader_recompiler/ir_opt/texture_pass.cpp
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -30,7 +30,7 @@ struct TextureInst {
 using TextureInstVector = boost::container::small_vector<TextureInst, 24>;
 
 IR::Opcode IndexedInstruction(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::BindlessImageSampleImplicitLod:
     case IR::Opcode::BoundImageSampleImplicitLod:
         return IR::Opcode::ImageSampleImplicitLod;
@@ -67,7 +67,7 @@ IR::Opcode IndexedInstruction(const IR::Inst& inst) {
 }
 
 bool IsBindless(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::BindlessImageSampleImplicitLod:
     case IR::Opcode::BindlessImageSampleExplicitLod:
     case IR::Opcode::BindlessImageSampleDrefImplicitLod:
@@ -91,7 +91,7 @@ bool IsBindless(const IR::Inst& inst) {
     case IR::Opcode::BoundImageGradient:
         return false;
     default:
-        throw InvalidArgument("Invalid opcode {}", inst.Opcode());
+        throw InvalidArgument("Invalid opcode {}", inst.GetOpcode());
     }
 }
 
@@ -100,7 +100,7 @@ bool IsTextureInstruction(const IR::Inst& inst) {
 }
 
 std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
-    if (inst->Opcode() != IR::Opcode::GetCbufU32) {
+    if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
         return std::nullopt;
     }
     const IR::Value index{inst->Arg(0)};
@@ -134,14 +134,14 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
         addr = *track_addr;
     } else {
         addr = ConstBufferAddr{
-            .index{env.TextureBoundBuffer()},
-            .offset{inst.Arg(0).U32()},
+            .index = env.TextureBoundBuffer(),
+            .offset = inst.Arg(0).U32(),
         };
     }
     return TextureInst{
         .cbuf{addr},
-        .inst{&inst},
-        .block{block},
+        .inst = &inst,
+        .block = block,
     };
 }
 
@@ -211,7 +211,7 @@ void TexturePass(Environment& env, IR::Program& program) {
 
         const auto& cbuf{texture_inst.cbuf};
         auto flags{inst->Flags<IR::TextureInstInfo>()};
-        switch (inst->Opcode()) {
+        switch (inst->GetOpcode()) {
         case IR::Opcode::ImageQueryDimensions:
             flags.type.Assign(env.ReadTextureType(cbuf.index, cbuf.offset));
             inst->SetFlags(flags);
@@ -235,16 +235,16 @@ void TexturePass(Environment& env, IR::Program& program) {
         u32 index;
         if (flags.type == TextureType::Buffer) {
             index = descriptors.Add(TextureBufferDescriptor{
-                .cbuf_index{cbuf.index},
-                .cbuf_offset{cbuf.offset},
-                .count{1},
+                .cbuf_index = cbuf.index,
+                .cbuf_offset = cbuf.offset,
+                .count = 1,
             });
         } else {
             index = descriptors.Add(TextureDescriptor{
-                .type{flags.type},
-                .cbuf_index{cbuf.index},
-                .cbuf_offset{cbuf.offset},
-                .count{1},
+                .type = flags.type,
+                .cbuf_index = cbuf.index,
+                .cbuf_offset = cbuf.offset,
+                .count = 1,
             });
         }
         inst->SetArg(0, IR::Value{index});
diff --git a/src/shader_recompiler/ir_opt/verification_pass.cpp b/src/shader_recompiler/ir_opt/verification_pass.cpp
index 4080b37cca..dbec96d84a 100644
--- a/src/shader_recompiler/ir_opt/verification_pass.cpp
+++ b/src/shader_recompiler/ir_opt/verification_pass.cpp
@@ -14,14 +14,14 @@ namespace Shader::Optimization {
 static void ValidateTypes(const IR::Program& program) {
     for (const auto& block : program.blocks) {
         for (const IR::Inst& inst : *block) {
-            if (inst.Opcode() == IR::Opcode::Phi) {
+            if (inst.GetOpcode() == IR::Opcode::Phi) {
                 // Skip validation on phi nodes
                 continue;
             }
             const size_t num_args{inst.NumArgs()};
             for (size_t i = 0; i < num_args; ++i) {
                 const IR::Type t1{inst.Arg(i).Type()};
-                const IR::Type t2{IR::ArgTypeOf(inst.Opcode(), i)};
+                const IR::Type t2{IR::ArgTypeOf(inst.GetOpcode(), i)};
                 if (!IR::AreTypesCompatible(t1, t2)) {
                     throw LogicError("Invalid types in block:\n{}", IR::DumpBlock(*block));
                 }
diff --git a/src/tests/common/unique_function.cpp b/src/tests/common/unique_function.cpp
index ac9912738a..aa6e865934 100644
--- a/src/tests/common/unique_function.cpp
+++ b/src/tests/common/unique_function.cpp
@@ -17,10 +17,12 @@ struct Noisy {
     Noisy& operator=(Noisy&& rhs) noexcept {
         state = "Move assigned";
         rhs.state = "Moved away";
+        return *this;
     }
     Noisy(const Noisy&) : state{"Copied constructed"} {}
     Noisy& operator=(const Noisy&) {
         state = "Copied assigned";
+        return *this;
     }
 
     std::string state;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 71b07c1940..3166a69dc1 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -203,7 +203,7 @@ add_library(video_core STATIC
 create_target_directory_groups(video_core)
 
 target_link_libraries(video_core PUBLIC common core)
-target_link_libraries(video_core PRIVATE glad shader_recompiler xbyak)
+target_link_libraries(video_core PUBLIC glad shader_recompiler xbyak)
 
 if (YUZU_USE_BUNDLED_FFMPEG AND NOT WIN32)
     add_dependencies(video_core ffmpeg-build)
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 893258b4aa..57e2d569c2 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -447,7 +447,7 @@ void GraphicsPipeline::MakePipeline(const Device& device, VkRenderPass render_pa
         .dynamicStateCount = static_cast<u32>(dynamic_states.size()),
         .pDynamicStates = dynamic_states.data(),
     };
-    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
+    [[maybe_unused]] const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
         .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
         .pNext = nullptr,
         .requiredSubgroupSize = GuestWarpSize,
@@ -457,15 +457,16 @@ void GraphicsPipeline::MakePipeline(const Device& device, VkRenderPass render_pa
         if (!spv_modules[stage]) {
             continue;
         }
-        [[maybe_unused]] auto& stage_ci = shader_stages.emplace_back(VkPipelineShaderStageCreateInfo{
-            .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
-            .pNext = nullptr,
-            .flags = 0,
-            .stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage)),
-            .module = *spv_modules[stage],
-            .pName = "main",
-            .pSpecializationInfo = nullptr,
-        });
+        [[maybe_unused]] auto& stage_ci =
+            shader_stages.emplace_back(VkPipelineShaderStageCreateInfo{
+                .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+                .pNext = nullptr,
+                .flags = 0,
+                .stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage)),
+                .module = *spv_modules[stage],
+                .pName = "main",
+                .pSpecializationInfo = nullptr,
+            });
         /*
         if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
             stage_ci.pNext = &subgroup_size_ci;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 23bf84a92f..fcebb8f6e2 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -47,7 +47,7 @@ auto MakeSpan(Container& container) {
     return std::span(container.data(), container.size());
 }
 
-u64 MakeCbufKey(u32 index, u32 offset) {
+static u64 MakeCbufKey(u32 index, u32 offset) {
     return (static_cast<u64>(index) << 32) | offset;
 }
 
@@ -638,6 +638,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_,
         .warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
         .has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR,
         .generic_input_types{},
+        .fixed_state_point_size{},
     };
 }
 
@@ -748,7 +749,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
         Shader::Environment& env{*envs[env_index]};
         ++env_index;
 
-        const u32 cfg_offset{env.StartAddress() + sizeof(Shader::ProgramHeader)};
+        const u32 cfg_offset{static_cast<u32>(env.StartAddress() + sizeof(Shader::ProgramHeader))};
         Shader::Maxwell::Flow::CFG cfg(env, pools.flow_block, cfg_offset);
         programs[index] = TranslateProgram(pools.inst, pools.block, env, cfg);
     }
diff --git a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
index b2dcd74ab9..991afe521e 100644
--- a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
@@ -2,8 +2,6 @@
 // Licensed under GPLv2 or any later version
 // Refer to the license.txt file included.
 
-#pragma once
-
 #include <unordered_map>
 
 #include <boost/container/static_vector.hpp>
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index e42b091c5f..70328680dd 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -279,7 +279,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
     };
 }
 
-[[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
+[[maybe_unused]] [[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
     std::span<const VideoCommon::BufferCopy> copies, size_t buffer_offset) {
     std::vector<VkBufferCopy> result(copies.size());
     std::ranges::transform(
-- 
cgit v1.2.3-70-g09d2


From 5bfcafa0a21619e8cd82c38ec51e260838f42042 Mon Sep 17 00:00:00 2001
From: lat9nq <22451773+lat9nq@users.noreply.github.com>
Date: Sat, 10 Apr 2021 02:32:55 -0400
Subject: shader: Address feedback + clang format

---
 src/shader_recompiler/CMakeLists.txt                     |  2 ++
 src/shader_recompiler/backend/spirv/emit_spirv_image.cpp |  4 ----
 src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp  |  1 -
 src/shader_recompiler/frontend/ir/opcodes.cpp            |  2 +-
 src/shader_recompiler/frontend/maxwell/control_flow.cpp  |  4 ++--
 src/shader_recompiler/frontend/maxwell/control_flow.h    | 16 ++++++++--------
 src/shader_recompiler/frontend/maxwell/decode.cpp        |  2 +-
 .../frontend/maxwell/translate/impl/common_funcs.cpp     |  5 +++--
 .../frontend/maxwell/translate/impl/not_implemented.cpp  |  1 -
 .../ir_opt/global_memory_to_storage_buffer_pass.cpp      |  3 ++-
 src/shader_recompiler/object_pool.h                      |  2 +-
 src/video_core/renderer_vulkan/vk_compute_pipeline.h     |  4 ++--
 12 files changed, 22 insertions(+), 24 deletions(-)

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index 551bf1c582..6b5df23e29 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -197,6 +197,8 @@ else()
         $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
         -Werror=unused-variable
 
+        # Bracket depth determines maximum size of a fold expression in Clang since 9c9974c3ccb6.
+        # And this in turns limits the size of a std::array.
         $<$<CXX_COMPILER_ID:Clang>:-fbracket-depth=1024>
     )
 endif()
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 815ca62992..6a89c0f795 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -304,10 +304,6 @@ Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index,
                 ctx.F32[4], Texture(ctx, index), coords, dref, operands.Mask(), operands.Span());
 }
 
-#ifdef _WIN32
-#pragma optimize("", off)
-#endif
-
 Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
                   Id lod, Id ms) {
     const auto info{inst->Flags<IR::TextureInstInfo>()};
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
index 12a03ed6ed..f6196653a9 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
@@ -7,7 +7,6 @@
 namespace Shader::Backend::SPIRV {
 namespace {
 Id WarpExtract(EmitContext& ctx, Id value) {
-    [[maybe_unused]] const Id shift{ctx.Constant(ctx.U32[1], 5)};
     const Id local_index{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
     return ctx.OpVectorExtractDynamic(ctx.U32[1], value, local_index);
 }
diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 002dbf94e9..7d3e0b2ab5 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -49,7 +49,7 @@ constexpr std::array META_TABLE{
 #define OPCODE(name_token, type_token, ...)                                                        \
     OpcodeMeta{                                                                                    \
         .name{#name_token},                                                                        \
-        .type = type_token,                                                                         \
+        .type = type_token,                                                                        \
         .arg_types{__VA_ARGS__},                                                                   \
     },
 #include "opcodes.inc"
diff --git a/src/shader_recompiler/frontend/maxwell/control_flow.cpp b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
index cb8ec7eaa3..9811183f12 100644
--- a/src/shader_recompiler/frontend/maxwell/control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
@@ -44,7 +44,7 @@ void Split(Block* old_block, Block* new_block, Location pc) {
     *new_block = Block{};
     new_block->begin = pc;
     new_block->end = old_block->end;
-    new_block->end_class = old_block->end_class,
+    new_block->end_class = old_block->end_class;
     new_block->cond = old_block->cond;
     new_block->stack = old_block->stack;
     new_block->branch_true = old_block->branch_true;
@@ -428,7 +428,7 @@ CFG::AnalysisState CFG::AnalyzeBRX(Block* block, Location pc, Instruction inst,
         if (!is_absolute) {
             target += pc.Offset();
         }
-        target += static_cast<unsigned int>(brx_table->branch_offset);
+        target += static_cast<u32>(brx_table->branch_offset);
         target += 8;
         targets.push_back(target);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/control_flow.h b/src/shader_recompiler/frontend/maxwell/control_flow.h
index 9f570fbb50..89966b16aa 100644
--- a/src/shader_recompiler/frontend/maxwell/control_flow.h
+++ b/src/shader_recompiler/frontend/maxwell/control_flow.h
@@ -78,15 +78,15 @@ struct Block : boost::intrusive::set_base_hook<
 
     Location begin;
     Location end;
-    EndClass end_class;
-    IR::Condition cond;
+    EndClass end_class{};
+    IR::Condition cond{};
     Stack stack;
-    Block* branch_true;
-    Block* branch_false;
-    FunctionId function_call;
-    Block* return_block;
-    IR::Reg branch_reg;
-    s32 branch_offset;
+    Block* branch_true{};
+    Block* branch_false{};
+    FunctionId function_call{};
+    Block* return_block{};
+    IR::Reg branch_reg{};
+    s32 branch_offset{};
     std::vector<IndirectBranch> indirect_branches;
 };
 
diff --git a/src/shader_recompiler/frontend/maxwell/decode.cpp b/src/shader_recompiler/frontend/maxwell/decode.cpp
index 932d19c1d4..972f677dc9 100644
--- a/src/shader_recompiler/frontend/maxwell/decode.cpp
+++ b/src/shader_recompiler/frontend/maxwell/decode.cpp
@@ -56,7 +56,7 @@ constexpr std::array UNORDERED_ENCODINGS{
 #define INST(name, cute, encode)                                                                   \
     InstEncoding{                                                                                  \
         .mask_value{MaskValueFromEncoding(encode)},                                                \
-        .opcode = Opcode::name,                                                                     \
+        .opcode = Opcode::name,                                                                    \
     },
 #include "maxwell.inc"
 #undef INST
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/common_funcs.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/common_funcs.cpp
index d30e82b10e..10bb01d99d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/common_funcs.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/common_funcs.cpp
@@ -72,8 +72,9 @@ bool IsCompareOpOrdered(FPCompareOp op) {
     }
 }
 
-IR::U1 FloatingPointCompare(IR::IREmitter& ir, const IR::F16F32F64& operand_1, const IR::F16F32F64& operand_2,
-                            FPCompareOp compare_op, IR::FpControl control) {
+IR::U1 FloatingPointCompare(IR::IREmitter& ir, const IR::F16F32F64& operand_1,
+                            const IR::F16F32F64& operand_2, FPCompareOp compare_op,
+                            IR::FpControl control) {
     const bool ordered{IsCompareOpOrdered(compare_op)};
     switch (compare_op) {
     case FPCompareOp::F:
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
index ba0cfa673b..c239010527 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
@@ -65,7 +65,6 @@ void TranslatorVisitor::CS2R(u64) {
     ThrowNotImplemented(Opcode::CS2R);
 }
 
-
 void TranslatorVisitor::FCHK_reg(u64) {
     ThrowNotImplemented(Opcode::FCHK_reg);
 }
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 90a65dd167..afe871505e 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -164,7 +164,8 @@ void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) {
         inst.Invalidate();
         break;
     default:
-        throw LogicError("Invalid opcode to discard its global memory operation {}", inst.GetOpcode());
+        throw LogicError("Invalid opcode to discard its global memory operation {}",
+                         inst.GetOpcode());
     }
 }
 
diff --git a/src/shader_recompiler/object_pool.h b/src/shader_recompiler/object_pool.h
index 4242816344..f8b255b66c 100644
--- a/src/shader_recompiler/object_pool.h
+++ b/src/shader_recompiler/object_pool.h
@@ -18,7 +18,7 @@ public:
     }
 
     template <typename... Args>
-    requires std::is_constructible_v<T, Args...> [[nodiscard]] T* Create(Args&&... args) {
+    requires std::is_constructible_v<T, Args...>[[nodiscard]] T* Create(Args&&... args) {
         return std::construct_at(Memory(), std::forward<Args>(args)...);
     }
 
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 104e6cc850..8efdc29260 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -4,9 +4,9 @@
 
 #pragma once
 
-#include <mutex>
-#include <condition_variable>
 #include <atomic>
+#include <condition_variable>
+#include <mutex>
 
 #include "common/common_types.h"
 #include "common/thread_worker.h"
-- 
cgit v1.2.3-70-g09d2


From 183855e396cc6918d36fbf3e38ea426e934b4e3e Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Thu, 15 Apr 2021 22:46:11 -0300
Subject: shader: Implement tessellation shaders, polygon mode and invocation
 id

---
 src/shader_recompiler/CMakeLists.txt               |   2 +
 .../backend/spirv/emit_context.cpp                 | 147 ++++++++++++++------
 src/shader_recompiler/backend/spirv/emit_context.h |  10 +-
 src/shader_recompiler/backend/spirv/emit_spirv.cpp |  39 ++++++
 src/shader_recompiler/backend/spirv/emit_spirv.h   |   3 +
 .../backend/spirv/emit_spirv_context_get_set.cpp   |  88 ++++++++++--
 src/shader_recompiler/frontend/ir/ir_emitter.cpp   |  12 ++
 src/shader_recompiler/frontend/ir/ir_emitter.h     |   4 +
 .../frontend/ir/microinstruction.cpp               |   1 +
 src/shader_recompiler/frontend/ir/opcodes.cpp      |   1 +
 src/shader_recompiler/frontend/ir/opcodes.inc      |   3 +
 src/shader_recompiler/frontend/ir/patch.cpp        |  28 ++++
 src/shader_recompiler/frontend/ir/patch.h          | 149 +++++++++++++++++++++
 src/shader_recompiler/frontend/ir/type.h           |  41 +++---
 src/shader_recompiler/frontend/ir/value.cpp        |   9 ++
 src/shader_recompiler/frontend/ir/value.h          |   4 +
 src/shader_recompiler/frontend/maxwell/program.cpp |   5 +
 .../translate/impl/load_store_attribute.cpp        |  33 +++--
 .../translate/impl/move_special_register.cpp       |   2 +
 .../ir_opt/collect_shader_info_pass.cpp            |  41 ++++++
 src/shader_recompiler/profile.h                    |  16 +++
 src/shader_recompiler/shader_info.h                |   5 +
 src/video_core/renderer_vulkan/maxwell_to_vk.cpp   |  13 ++
 src/video_core/renderer_vulkan/maxwell_to_vk.h     |   2 +
 .../renderer_vulkan/vk_graphics_pipeline.cpp       |   3 +-
 .../renderer_vulkan/vk_pipeline_cache.cpp          |  30 +++++
 .../renderer_vulkan/vk_staging_buffer_pool.cpp     |   2 +-
 src/video_core/vulkan_common/vulkan_device.cpp     |   3 +-
 28 files changed, 605 insertions(+), 91 deletions(-)
 create mode 100644 src/shader_recompiler/frontend/ir/patch.cpp
 create mode 100644 src/shader_recompiler/frontend/ir/patch.h

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index bbbfa98a3f..7c11d15bfd 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -41,6 +41,8 @@ add_library(shader_recompiler STATIC
     frontend/ir/opcodes.cpp
     frontend/ir/opcodes.h
     frontend/ir/opcodes.inc
+    frontend/ir/patch.cpp
+    frontend/ir/patch.h
     frontend/ir/post_order.cpp
     frontend/ir/post_order.h
     frontend/ir/pred.h
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index 032cf5e03e..067f616137 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -125,19 +125,36 @@ u32 NumVertices(InputTopology input_topology) {
     throw InvalidArgument("Invalid input topology {}", input_topology);
 }
 
-Id DefineInput(EmitContext& ctx, Id type, std::optional<spv::BuiltIn> builtin = std::nullopt) {
-    if (ctx.stage == Stage::Geometry) {
-        const u32 num_vertices{NumVertices(ctx.profile.input_topology)};
-        type = ctx.TypeArray(type, ctx.Constant(ctx.U32[1], num_vertices));
+Id DefineInput(EmitContext& ctx, Id type, bool per_invocation,
+               std::optional<spv::BuiltIn> builtin = std::nullopt) {
+    switch (ctx.stage) {
+    case Stage::TessellationControl:
+    case Stage::TessellationEval:
+        if (per_invocation) {
+            type = ctx.TypeArray(type, ctx.Constant(ctx.U32[1], 32u));
+        }
+        break;
+    case Stage::Geometry:
+        if (per_invocation) {
+            const u32 num_vertices{NumVertices(ctx.profile.input_topology)};
+            type = ctx.TypeArray(type, ctx.Constant(ctx.U32[1], num_vertices));
+        }
+        break;
+    default:
+        break;
     }
     return DefineVariable(ctx, type, builtin, spv::StorageClass::Input);
 }
 
-Id DefineOutput(EmitContext& ctx, Id type, std::optional<spv::BuiltIn> builtin = std::nullopt) {
+Id DefineOutput(EmitContext& ctx, Id type, std::optional<u32> invocations,
+                std::optional<spv::BuiltIn> builtin = std::nullopt) {
+    if (invocations && ctx.stage == Stage::TessellationControl) {
+        type = ctx.TypeArray(type, ctx.Constant(ctx.U32[1], *invocations));
+    }
     return DefineVariable(ctx, type, builtin, spv::StorageClass::Output);
 }
 
-void DefineGenericOutput(EmitContext& ctx, size_t index) {
+void DefineGenericOutput(EmitContext& ctx, size_t index, std::optional<u32> invocations) {
     static constexpr std::string_view swizzle{"xyzw"};
     const size_t base_attr_index{static_cast<size_t>(IR::Attribute::Generic0X) + index * 4};
     u32 element{0};
@@ -150,7 +167,7 @@ void DefineGenericOutput(EmitContext& ctx, size_t index) {
         }
         const u32 num_components{xfb_varying ? xfb_varying->components : remainder};
 
-        const Id id{DefineOutput(ctx, ctx.F32[num_components])};
+        const Id id{DefineOutput(ctx, ctx.F32[num_components], invocations)};
         ctx.Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
         if (element > 0) {
             ctx.Decorate(id, spv::Decoration::Component, element);
@@ -161,10 +178,10 @@ void DefineGenericOutput(EmitContext& ctx, size_t index) {
             ctx.Decorate(id, spv::Decoration::Offset, xfb_varying->offset);
         }
         if (num_components < 4 || element > 0) {
-            ctx.Name(id, fmt::format("out_attr{}", index));
-        } else {
             const std::string_view subswizzle{swizzle.substr(element, num_components)};
             ctx.Name(id, fmt::format("out_attr{}_{}", index, subswizzle));
+        } else {
+            ctx.Name(id, fmt::format("out_attr{}", index));
         }
         const GenericElementInfo info{
             .id = id,
@@ -383,7 +400,7 @@ EmitContext::EmitContext(const Profile& profile_, IR::Program& program, u32& bin
     AddCapability(spv::Capability::Shader);
     DefineCommonTypes(program.info);
     DefineCommonConstants();
-    DefineInterfaces(program.info);
+    DefineInterfaces(program);
     DefineLocalMemory(program);
     DefineSharedMemory(program);
     DefineSharedMemoryFunctions(program);
@@ -472,9 +489,9 @@ void EmitContext::DefineCommonConstants() {
     f32_zero_value = Constant(F32[1], 0.0f);
 }
 
-void EmitContext::DefineInterfaces(const Info& info) {
-    DefineInputs(info);
-    DefineOutputs(info);
+void EmitContext::DefineInterfaces(const IR::Program& program) {
+    DefineInputs(program.info);
+    DefineOutputs(program);
 }
 
 void EmitContext::DefineLocalMemory(const IR::Program& program) {
@@ -972,26 +989,29 @@ void EmitContext::DefineLabels(IR::Program& program) {
 
 void EmitContext::DefineInputs(const Info& info) {
     if (info.uses_workgroup_id) {
-        workgroup_id = DefineInput(*this, U32[3], spv::BuiltIn::WorkgroupId);
+        workgroup_id = DefineInput(*this, U32[3], false, spv::BuiltIn::WorkgroupId);
     }
     if (info.uses_local_invocation_id) {
-        local_invocation_id = DefineInput(*this, U32[3], spv::BuiltIn::LocalInvocationId);
+        local_invocation_id = DefineInput(*this, U32[3], false, spv::BuiltIn::LocalInvocationId);
+    }
+    if (info.uses_invocation_id) {
+        invocation_id = DefineInput(*this, U32[1], false, spv::BuiltIn::InvocationId);
     }
     if (info.uses_is_helper_invocation) {
-        is_helper_invocation = DefineInput(*this, U1, spv::BuiltIn::HelperInvocation);
+        is_helper_invocation = DefineInput(*this, U1, false, spv::BuiltIn::HelperInvocation);
     }
     if (info.uses_subgroup_mask) {
-        subgroup_mask_eq = DefineInput(*this, U32[4], spv::BuiltIn::SubgroupEqMaskKHR);
-        subgroup_mask_lt = DefineInput(*this, U32[4], spv::BuiltIn::SubgroupLtMaskKHR);
-        subgroup_mask_le = DefineInput(*this, U32[4], spv::BuiltIn::SubgroupLeMaskKHR);
-        subgroup_mask_gt = DefineInput(*this, U32[4], spv::BuiltIn::SubgroupGtMaskKHR);
-        subgroup_mask_ge = DefineInput(*this, U32[4], spv::BuiltIn::SubgroupGeMaskKHR);
+        subgroup_mask_eq = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupEqMaskKHR);
+        subgroup_mask_lt = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupLtMaskKHR);
+        subgroup_mask_le = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupLeMaskKHR);
+        subgroup_mask_gt = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGtMaskKHR);
+        subgroup_mask_ge = DefineInput(*this, U32[4], false, spv::BuiltIn::SubgroupGeMaskKHR);
     }
     if (info.uses_subgroup_invocation_id ||
         (profile.warp_size_potentially_larger_than_guest &&
          (info.uses_subgroup_vote || info.uses_subgroup_mask))) {
         subgroup_local_invocation_id =
-            DefineInput(*this, U32[1], spv::BuiltIn::SubgroupLocalInvocationId);
+            DefineInput(*this, U32[1], false, spv::BuiltIn::SubgroupLocalInvocationId);
     }
     if (info.uses_fswzadd) {
         const Id f32_one{Constant(F32[1], 1.0f)};
@@ -1004,29 +1024,32 @@ void EmitContext::DefineInputs(const Info& info) {
     if (info.loads_position) {
         const bool is_fragment{stage != Stage::Fragment};
         const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::Position : spv::BuiltIn::FragCoord};
-        input_position = DefineInput(*this, F32[4], built_in);
+        input_position = DefineInput(*this, F32[4], true, built_in);
     }
     if (info.loads_instance_id) {
         if (profile.support_vertex_instance_id) {
-            instance_id = DefineInput(*this, U32[1], spv::BuiltIn::InstanceId);
+            instance_id = DefineInput(*this, U32[1], true, spv::BuiltIn::InstanceId);
         } else {
-            instance_index = DefineInput(*this, U32[1], spv::BuiltIn::InstanceIndex);
-            base_instance = DefineInput(*this, U32[1], spv::BuiltIn::BaseInstance);
+            instance_index = DefineInput(*this, U32[1], true, spv::BuiltIn::InstanceIndex);
+            base_instance = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseInstance);
         }
     }
     if (info.loads_vertex_id) {
         if (profile.support_vertex_instance_id) {
-            vertex_id = DefineInput(*this, U32[1], spv::BuiltIn::VertexId);
+            vertex_id = DefineInput(*this, U32[1], true, spv::BuiltIn::VertexId);
         } else {
-            vertex_index = DefineInput(*this, U32[1], spv::BuiltIn::VertexIndex);
-            base_vertex = DefineInput(*this, U32[1], spv::BuiltIn::BaseVertex);
+            vertex_index = DefineInput(*this, U32[1], true, spv::BuiltIn::VertexIndex);
+            base_vertex = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseVertex);
         }
     }
     if (info.loads_front_face) {
-        front_face = DefineInput(*this, U1, spv::BuiltIn::FrontFacing);
+        front_face = DefineInput(*this, U1, true, spv::BuiltIn::FrontFacing);
     }
     if (info.loads_point_coord) {
-        point_coord = DefineInput(*this, F32[2], spv::BuiltIn::PointCoord);
+        point_coord = DefineInput(*this, F32[2], true, spv::BuiltIn::PointCoord);
+    }
+    if (info.loads_tess_coord) {
+        tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord);
     }
     for (size_t index = 0; index < info.input_generics.size(); ++index) {
         const InputVarying generic{info.input_generics[index]};
@@ -1038,7 +1061,7 @@ void EmitContext::DefineInputs(const Info& info) {
             continue;
         }
         const Id type{GetAttributeType(*this, input_type)};
-        const Id id{DefineInput(*this, type)};
+        const Id id{DefineInput(*this, type, true)};
         Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
         Name(id, fmt::format("in_attr{}", index));
         input_generics[index] = id;
@@ -1059,58 +1082,98 @@ void EmitContext::DefineInputs(const Info& info) {
             break;
         }
     }
+    if (stage == Stage::TessellationEval) {
+        for (size_t index = 0; index < info.uses_patches.size(); ++index) {
+            if (!info.uses_patches[index]) {
+                continue;
+            }
+            const Id id{DefineInput(*this, F32[4], false)};
+            Decorate(id, spv::Decoration::Patch);
+            Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
+            patches[index] = id;
+        }
+    }
 }
 
-void EmitContext::DefineOutputs(const Info& info) {
+void EmitContext::DefineOutputs(const IR::Program& program) {
+    const Info& info{program.info};
+    const std::optional<u32> invocations{program.invocations};
     if (info.stores_position || stage == Stage::VertexB) {
-        output_position = DefineOutput(*this, F32[4], spv::BuiltIn::Position);
+        output_position = DefineOutput(*this, F32[4], invocations, spv::BuiltIn::Position);
     }
     if (info.stores_point_size || profile.fixed_state_point_size) {
         if (stage == Stage::Fragment) {
             throw NotImplementedException("Storing PointSize in fragment stage");
         }
-        output_point_size = DefineOutput(*this, F32[1], spv::BuiltIn::PointSize);
+        output_point_size = DefineOutput(*this, F32[1], invocations, spv::BuiltIn::PointSize);
     }
     if (info.stores_clip_distance) {
         if (stage == Stage::Fragment) {
             throw NotImplementedException("Storing ClipDistance in fragment stage");
         }
         const Id type{TypeArray(F32[1], Constant(U32[1], 8U))};
-        clip_distances = DefineOutput(*this, type, spv::BuiltIn::ClipDistance);
+        clip_distances = DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance);
     }
     if (info.stores_layer &&
         (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
         if (stage == Stage::Fragment) {
             throw NotImplementedException("Storing Layer in fragment stage");
         }
-        layer = DefineOutput(*this, U32[1], spv::BuiltIn::Layer);
+        layer = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::Layer);
     }
     if (info.stores_viewport_index &&
         (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
         if (stage == Stage::Fragment) {
             throw NotImplementedException("Storing ViewportIndex in fragment stage");
         }
-        viewport_index = DefineOutput(*this, U32[1], spv::BuiltIn::ViewportIndex);
+        viewport_index = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::ViewportIndex);
     }
     for (size_t index = 0; index < info.stores_generics.size(); ++index) {
         if (info.stores_generics[index]) {
-            DefineGenericOutput(*this, index);
+            DefineGenericOutput(*this, index, invocations);
         }
     }
-    if (stage == Stage::Fragment) {
+    switch (stage) {
+    case Stage::TessellationControl:
+        if (info.stores_tess_level_outer) {
+            const Id type{TypeArray(F32[1], Constant(U32[1], 4))};
+            output_tess_level_outer =
+                DefineOutput(*this, type, std::nullopt, spv::BuiltIn::TessLevelOuter);
+            Decorate(output_tess_level_outer, spv::Decoration::Patch);
+        }
+        if (info.stores_tess_level_inner) {
+            const Id type{TypeArray(F32[1], Constant(U32[1], 2))};
+            output_tess_level_inner =
+                DefineOutput(*this, type, std::nullopt, spv::BuiltIn::TessLevelInner);
+            Decorate(output_tess_level_inner, spv::Decoration::Patch);
+        }
+        for (size_t index = 0; index < info.uses_patches.size(); ++index) {
+            if (!info.uses_patches[index]) {
+                continue;
+            }
+            const Id id{DefineOutput(*this, F32[4], std::nullopt)};
+            Decorate(id, spv::Decoration::Patch);
+            Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
+            patches[index] = id;
+        }
+        break;
+    case Stage::Fragment:
         for (u32 index = 0; index < 8; ++index) {
             if (!info.stores_frag_color[index]) {
                 continue;
             }
-            frag_color[index] = DefineOutput(*this, F32[4]);
+            frag_color[index] = DefineOutput(*this, F32[4], std::nullopt);
             Decorate(frag_color[index], spv::Decoration::Location, index);
             Name(frag_color[index], fmt::format("frag_color{}", index));
         }
         if (info.stores_frag_depth) {
-            frag_depth = DefineOutput(*this, F32[1]);
+            frag_depth = DefineOutput(*this, F32[1], std::nullopt);
             Decorate(frag_depth, spv::Decoration::BuiltIn, spv::BuiltIn::FragDepth);
             Name(frag_depth, "frag_depth");
         }
+        break;
+    default:
+        break;
     }
 }
 
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
index 0da14d5f8e..ba0a253b35 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.h
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -147,6 +147,7 @@ public:
 
     Id workgroup_id{};
     Id local_invocation_id{};
+    Id invocation_id{};
     Id is_helper_invocation{};
     Id subgroup_local_invocation_id{};
     Id subgroup_mask_eq{};
@@ -162,6 +163,7 @@ public:
     Id base_vertex{};
     Id front_face{};
     Id point_coord{};
+    Id tess_coord{};
     Id clip_distances{};
     Id layer{};
     Id viewport_index{};
@@ -204,6 +206,10 @@ public:
     Id output_position{};
     std::array<std::array<GenericElementInfo, 4>, 32> output_generics{};
 
+    Id output_tess_level_outer{};
+    Id output_tess_level_inner{};
+    std::array<Id, 30> patches{};
+
     std::array<Id, 8> frag_color{};
     Id frag_depth{};
 
@@ -212,7 +218,7 @@ public:
 private:
     void DefineCommonTypes(const Info& info);
     void DefineCommonConstants();
-    void DefineInterfaces(const Info& info);
+    void DefineInterfaces(const IR::Program& program);
     void DefineLocalMemory(const IR::Program& program);
     void DefineSharedMemory(const IR::Program& program);
     void DefineSharedMemoryFunctions(const IR::Program& program);
@@ -226,7 +232,7 @@ private:
     void DefineLabels(IR::Program& program);
 
     void DefineInputs(const Info& info);
-    void DefineOutputs(const Info& info);
+    void DefineOutputs(const IR::Program& program);
 };
 
 } // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index 3bf4c6a9ec..105602ccf5 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -45,6 +45,8 @@ ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
         return arg.Label();
     } else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
         return arg.Attribute();
+    } else if constexpr (std::is_same_v<ArgType, IR::Patch>) {
+        return arg.Patch();
     } else if constexpr (std::is_same_v<ArgType, IR::Reg>) {
         return arg.Reg();
     }
@@ -120,6 +122,30 @@ Id DefineMain(EmitContext& ctx, IR::Program& program) {
     return main;
 }
 
+spv::ExecutionMode ExecutionMode(TessPrimitive primitive) {
+    switch (primitive) {
+    case TessPrimitive::Isolines:
+        return spv::ExecutionMode::Isolines;
+    case TessPrimitive::Triangles:
+        return spv::ExecutionMode::Triangles;
+    case TessPrimitive::Quads:
+        return spv::ExecutionMode::Quads;
+    }
+    throw InvalidArgument("Tessellation primitive {}", primitive);
+}
+
+spv::ExecutionMode ExecutionMode(TessSpacing spacing) {
+    switch (spacing) {
+    case TessSpacing::Equal:
+        return spv::ExecutionMode::SpacingEqual;
+    case TessSpacing::FractionalOdd:
+        return spv::ExecutionMode::SpacingFractionalOdd;
+    case TessSpacing::FractionalEven:
+        return spv::ExecutionMode::SpacingFractionalEven;
+    }
+    throw InvalidArgument("Tessellation spacing {}", spacing);
+}
+
 void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
     const std::span interfaces(ctx.interfaces.data(), ctx.interfaces.size());
     spv::ExecutionModel execution_model{};
@@ -134,6 +160,19 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
     case Stage::VertexB:
         execution_model = spv::ExecutionModel::Vertex;
         break;
+    case Stage::TessellationControl:
+        execution_model = spv::ExecutionModel::TessellationControl;
+        ctx.AddCapability(spv::Capability::Tessellation);
+        ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.invocations);
+        break;
+    case Stage::TessellationEval:
+        execution_model = spv::ExecutionModel::TessellationEvaluation;
+        ctx.AddCapability(spv::Capability::Tessellation);
+        ctx.AddExecutionMode(main, ExecutionMode(ctx.profile.tess_primitive));
+        ctx.AddExecutionMode(main, ExecutionMode(ctx.profile.tess_spacing));
+        ctx.AddExecutionMode(main, ctx.profile.tess_clockwise ? spv::ExecutionMode::VertexOrderCw
+                                                              : spv::ExecutionMode::VertexOrderCcw);
+        break;
     case Stage::Geometry:
         execution_model = spv::ExecutionModel::Geometry;
         ctx.AddCapability(spv::Capability::Geometry);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h
index 55b2edba0c..8caf30f1b0 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.h
@@ -55,6 +55,8 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex);
 void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, Id vertex);
 Id EmitGetAttributeIndexed(EmitContext& ctx, Id offset, Id vertex);
 void EmitSetAttributeIndexed(EmitContext& ctx, Id offset, Id value, Id vertex);
+Id EmitGetPatch(EmitContext& ctx, IR::Patch patch);
+void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value);
 void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value);
 void EmitSetFragDepth(EmitContext& ctx, Id value);
 void EmitGetZFlag(EmitContext& ctx);
@@ -67,6 +69,7 @@ void EmitSetCFlag(EmitContext& ctx);
 void EmitSetOFlag(EmitContext& ctx);
 Id EmitWorkgroupId(EmitContext& ctx);
 Id EmitLocalInvocationId(EmitContext& ctx);
+Id EmitInvocationId(EmitContext& ctx);
 Id EmitIsHelperInvocation(EmitContext& ctx);
 Id EmitLoadLocal(EmitContext& ctx, Id word_offset);
 void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 59c56c5ba8..4a1aeece5a 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -32,13 +32,26 @@ std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
 
 template <typename... Args>
 Id AttrPointer(EmitContext& ctx, Id pointer_type, Id vertex, Id base, Args&&... args) {
-    if (ctx.stage == Stage::Geometry) {
+    switch (ctx.stage) {
+    case Stage::TessellationControl:
+    case Stage::TessellationEval:
+    case Stage::Geometry:
         return ctx.OpAccessChain(pointer_type, base, vertex, std::forward<Args>(args)...);
-    } else {
+    default:
         return ctx.OpAccessChain(pointer_type, base, std::forward<Args>(args)...);
     }
 }
 
+template <typename... Args>
+Id OutputAccessChain(EmitContext& ctx, Id result_type, Id base, Args&&... args) {
+    if (ctx.stage == Stage::TessellationControl) {
+        const Id invocation_id{ctx.OpLoad(ctx.U32[1], ctx.invocation_id)};
+        return ctx.OpAccessChain(result_type, base, invocation_id, std::forward<Args>(args)...);
+    } else {
+        return ctx.OpAccessChain(result_type, base, std::forward<Args>(args)...);
+    }
+}
+
 std::optional<Id> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
     if (IR::IsGeneric(attr)) {
         const u32 index{IR::GenericAttributeIndex(attr)};
@@ -49,7 +62,7 @@ std::optional<Id> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
         } else {
             const u32 index_element{element - info.first_element};
             const Id index_id{ctx.Constant(ctx.U32[1], index_element)};
-            return ctx.OpAccessChain(ctx.output_f32, info.id, index_id);
+            return OutputAccessChain(ctx, ctx.output_f32, info.id, index_id);
         }
     }
     switch (attr) {
@@ -61,7 +74,7 @@ std::optional<Id> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
     case IR::Attribute::PositionW: {
         const u32 element{static_cast<u32>(attr) % 4};
         const Id element_id{ctx.Constant(ctx.U32[1], element)};
-        return ctx.OpAccessChain(ctx.output_f32, ctx.output_position, element_id);
+        return OutputAccessChain(ctx, ctx.output_f32, ctx.output_position, element_id);
     }
     case IR::Attribute::ClipDistance0:
     case IR::Attribute::ClipDistance1:
@@ -74,7 +87,7 @@ std::optional<Id> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
         const u32 base{static_cast<u32>(IR::Attribute::ClipDistance0)};
         const u32 index{static_cast<u32>(attr) - base};
         const Id clip_num{ctx.Constant(ctx.U32[1], index)};
-        return ctx.OpAccessChain(ctx.output_f32, ctx.clip_distances, clip_num);
+        return OutputAccessChain(ctx, ctx.output_f32, ctx.clip_distances, clip_num);
     }
     case IR::Attribute::Layer:
         return ctx.profile.support_viewport_index_layer_non_geometry ||
@@ -222,11 +235,18 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
                             ctx.Constant(ctx.U32[1], std::numeric_limits<u32>::max()),
                             ctx.u32_zero_value);
     case IR::Attribute::PointSpriteS:
-        return ctx.OpLoad(ctx.F32[1], AttrPointer(ctx, ctx.input_f32, vertex, ctx.point_coord,
-                                                  ctx.u32_zero_value));
+        return ctx.OpLoad(ctx.F32[1],
+                          ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.u32_zero_value));
     case IR::Attribute::PointSpriteT:
-        return ctx.OpLoad(ctx.F32[1], AttrPointer(ctx, ctx.input_f32, vertex, ctx.point_coord,
-                                                  ctx.Constant(ctx.U32[1], 1U)));
+        return ctx.OpLoad(ctx.F32[1], ctx.OpAccessChain(ctx.input_f32, ctx.point_coord,
+                                                        ctx.Constant(ctx.U32[1], 1U)));
+    case IR::Attribute::TessellationEvaluationPointU:
+        return ctx.OpLoad(ctx.F32[1],
+                          ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.u32_zero_value));
+    case IR::Attribute::TessellationEvaluationPointV:
+        return ctx.OpLoad(ctx.F32[1], ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord,
+                                                        ctx.Constant(ctx.U32[1], 1U)));
+
     default:
         throw NotImplementedException("Read attribute {}", attr);
     }
@@ -240,9 +260,12 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, [[maybe_un
 }
 
 Id EmitGetAttributeIndexed(EmitContext& ctx, Id offset, Id vertex) {
-    if (ctx.stage == Stage::Geometry) {
+    switch (ctx.stage) {
+    case Stage::TessellationControl:
+    case Stage::TessellationEval:
+    case Stage::Geometry:
         return ctx.OpFunctionCall(ctx.F32[1], ctx.indexed_load_func, offset, vertex);
-    } else {
+    default:
         return ctx.OpFunctionCall(ctx.F32[1], ctx.indexed_load_func, offset);
     }
 }
@@ -251,6 +274,45 @@ void EmitSetAttributeIndexed(EmitContext& ctx, Id offset, Id value, [[maybe_unus
     ctx.OpFunctionCall(ctx.void_id, ctx.indexed_store_func, offset, value);
 }
 
+Id EmitGetPatch(EmitContext& ctx, IR::Patch patch) {
+    if (!IR::IsGeneric(patch)) {
+        throw NotImplementedException("Non-generic patch load");
+    }
+    const u32 index{IR::GenericPatchIndex(patch)};
+    const Id element{ctx.Constant(ctx.U32[1], IR::GenericPatchElement(patch))};
+    const Id pointer{ctx.OpAccessChain(ctx.input_f32, ctx.patches.at(index), element)};
+    return ctx.OpLoad(ctx.F32[1], pointer);
+}
+
+void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
+    const Id pointer{[&] {
+        if (IR::IsGeneric(patch)) {
+            const u32 index{IR::GenericPatchIndex(patch)};
+            const Id element{ctx.Constant(ctx.U32[1], IR::GenericPatchElement(patch))};
+            return ctx.OpAccessChain(ctx.output_f32, ctx.patches.at(index), element);
+        }
+        switch (patch) {
+        case IR::Patch::TessellationLodLeft:
+        case IR::Patch::TessellationLodRight:
+        case IR::Patch::TessellationLodTop:
+        case IR::Patch::TessellationLodBottom: {
+            const u32 index{static_cast<u32>(patch) - u32(IR::Patch::TessellationLodLeft)};
+            const Id index_id{ctx.Constant(ctx.U32[1], index)};
+            return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_outer, index_id);
+        }
+        case IR::Patch::TessellationLodInteriorU:
+            return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner,
+                                     ctx.u32_zero_value);
+        case IR::Patch::TessellationLodInteriorV:
+            return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner,
+                                     ctx.Constant(ctx.U32[1], 1u));
+        default:
+            throw NotImplementedException("Patch {}", patch);
+        }
+    }()};
+    ctx.OpStore(pointer, value);
+}
+
 void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value) {
     const Id component_id{ctx.Constant(ctx.U32[1], component)};
     const Id pointer{ctx.OpAccessChain(ctx.output_f32, ctx.frag_color.at(index), component_id)};
@@ -301,6 +363,10 @@ Id EmitLocalInvocationId(EmitContext& ctx) {
     return ctx.OpLoad(ctx.U32[3], ctx.local_invocation_id);
 }
 
+Id EmitInvocationId(EmitContext& ctx) {
+    return ctx.OpLoad(ctx.U32[1], ctx.invocation_id);
+}
+
 Id EmitIsHelperInvocation(EmitContext& ctx) {
     return ctx.OpLoad(ctx.U1, ctx.is_helper_invocation);
 }
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
index d66eb17a6e..b821d9f476 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
@@ -331,6 +331,14 @@ void IREmitter::SetAttributeIndexed(const U32& phys_address, const F32& value, c
     Inst(Opcode::SetAttributeIndexed, phys_address, value, vertex);
 }
 
+F32 IREmitter::GetPatch(Patch patch) {
+    return Inst<F32>(Opcode::GetPatch, patch);
+}
+
+void IREmitter::SetPatch(Patch patch, const F32& value) {
+    Inst(Opcode::SetPatch, patch, value);
+}
+
 void IREmitter::SetFragColor(u32 index, u32 component, const F32& value) {
     Inst(Opcode::SetFragColor, Imm32(index), Imm32(component), value);
 }
@@ -363,6 +371,10 @@ U32 IREmitter::LocalInvocationIdZ() {
     return U32{CompositeExtract(Inst(Opcode::LocalInvocationId), 2)};
 }
 
+U32 IREmitter::InvocationId() {
+    return Inst<U32>(Opcode::InvocationId);
+}
+
 U1 IREmitter::IsHelperInvocation() {
     return Inst<U1>(Opcode::IsHelperInvocation);
 }
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.h b/src/shader_recompiler/frontend/ir/ir_emitter.h
index e70359eb11..7f8f1ad426 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.h
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.h
@@ -84,6 +84,9 @@ public:
     [[nodiscard]] F32 GetAttributeIndexed(const U32& phys_address, const U32& vertex);
     void SetAttributeIndexed(const U32& phys_address, const F32& value, const U32& vertex);
 
+    [[nodiscard]] F32 GetPatch(Patch patch);
+    void SetPatch(Patch patch, const F32& value);
+
     void SetFragColor(u32 index, u32 component, const F32& value);
     void SetFragDepth(const F32& value);
 
@@ -95,6 +98,7 @@ public:
     [[nodiscard]] U32 LocalInvocationIdY();
     [[nodiscard]] U32 LocalInvocationIdZ();
 
+    [[nodiscard]] U32 InvocationId();
     [[nodiscard]] U1 IsHelperInvocation();
 
     [[nodiscard]] U32 LaneId();
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 204c55fa85..b2d7573d99 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -73,6 +73,7 @@ bool Inst::MayHaveSideEffects() const noexcept {
     case Opcode::EndPrimitive:
     case Opcode::SetAttribute:
     case Opcode::SetAttributeIndexed:
+    case Opcode::SetPatch:
     case Opcode::SetFragColor:
     case Opcode::SetFragDepth:
     case Opcode::WriteGlobalU8:
diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 7d3e0b2ab5..7f04b647b0 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -24,6 +24,7 @@ constexpr Type Label{Type::Label};
 constexpr Type Reg{Type::Reg};
 constexpr Type Pred{Type::Pred};
 constexpr Type Attribute{Type::Attribute};
+constexpr Type Patch{Type::Patch};
 constexpr Type U1{Type::U1};
 constexpr Type U8{Type::U8};
 constexpr Type U16{Type::U16};
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc
index 7a21fe7465..a86542cd8d 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.inc
+++ b/src/shader_recompiler/frontend/ir/opcodes.inc
@@ -48,6 +48,8 @@ OPCODE(GetAttribute,                                        F32,            Attr
 OPCODE(SetAttribute,                                        Void,           Attribute,      F32,            U32,                                            )
 OPCODE(GetAttributeIndexed,                                 F32,            U32,            U32,                                                            )
 OPCODE(SetAttributeIndexed,                                 Void,           U32,            F32,            U32,                                            )
+OPCODE(GetPatch,                                            F32,            Patch,                                                                          )
+OPCODE(SetPatch,                                            Void,           Patch,          F32,                                                            )
 OPCODE(SetFragColor,                                        Void,           U32,            U32,            F32,                                            )
 OPCODE(SetFragDepth,                                        Void,           F32,                                                                            )
 OPCODE(GetZFlag,                                            U1,             Void,                                                                           )
@@ -60,6 +62,7 @@ OPCODE(SetCFlag,                                            Void,           U1,
 OPCODE(SetOFlag,                                            Void,           U1,                                                                             )
 OPCODE(WorkgroupId,                                         U32x3,                                                                                          )
 OPCODE(LocalInvocationId,                                   U32x3,                                                                                          )
+OPCODE(InvocationId,                                        U32,                                                                                            )
 OPCODE(IsHelperInvocation,                                  U1,                                                                                             )
 
 // Undefined
diff --git a/src/shader_recompiler/frontend/ir/patch.cpp b/src/shader_recompiler/frontend/ir/patch.cpp
new file mode 100644
index 0000000000..1f770bc488
--- /dev/null
+++ b/src/shader_recompiler/frontend/ir/patch.cpp
@@ -0,0 +1,28 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "shader_recompiler/frontend/ir/patch.h"
+#include "shader_recompiler/exception.h"
+
+namespace Shader::IR {
+
+bool IsGeneric(Patch patch) noexcept {
+    return patch >= Patch::Component0 && patch <= Patch::Component119;
+}
+
+u32 GenericPatchIndex(Patch patch) {
+    if (!IsGeneric(patch)) {
+        throw InvalidArgument("Patch {} is not generic", patch);
+    }
+    return (static_cast<u32>(patch) - static_cast<u32>(Patch::Component0)) / 4;
+}
+
+u32 GenericPatchElement(Patch patch) {
+    if (!IsGeneric(patch)) {
+        throw InvalidArgument("Patch {} is not generic", patch);
+    }
+    return (static_cast<u32>(patch) - static_cast<u32>(Patch::Component0)) % 4;
+}
+
+} // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/patch.h b/src/shader_recompiler/frontend/ir/patch.h
new file mode 100644
index 0000000000..6d66ff0d6c
--- /dev/null
+++ b/src/shader_recompiler/frontend/ir/patch.h
@@ -0,0 +1,149 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Shader::IR {
+
+enum class Patch : u64 {
+    TessellationLodLeft,
+    TessellationLodTop,
+    TessellationLodRight,
+    TessellationLodBottom,
+    TessellationLodInteriorU,
+    TessellationLodInteriorV,
+    ComponentPadding0,
+    ComponentPadding1,
+    Component0,
+    Component1,
+    Component2,
+    Component3,
+    Component4,
+    Component5,
+    Component6,
+    Component7,
+    Component8,
+    Component9,
+    Component10,
+    Component11,
+    Component12,
+    Component13,
+    Component14,
+    Component15,
+    Component16,
+    Component17,
+    Component18,
+    Component19,
+    Component20,
+    Component21,
+    Component22,
+    Component23,
+    Component24,
+    Component25,
+    Component26,
+    Component27,
+    Component28,
+    Component29,
+    Component30,
+    Component31,
+    Component32,
+    Component33,
+    Component34,
+    Component35,
+    Component36,
+    Component37,
+    Component38,
+    Component39,
+    Component40,
+    Component41,
+    Component42,
+    Component43,
+    Component44,
+    Component45,
+    Component46,
+    Component47,
+    Component48,
+    Component49,
+    Component50,
+    Component51,
+    Component52,
+    Component53,
+    Component54,
+    Component55,
+    Component56,
+    Component57,
+    Component58,
+    Component59,
+    Component60,
+    Component61,
+    Component62,
+    Component63,
+    Component64,
+    Component65,
+    Component66,
+    Component67,
+    Component68,
+    Component69,
+    Component70,
+    Component71,
+    Component72,
+    Component73,
+    Component74,
+    Component75,
+    Component76,
+    Component77,
+    Component78,
+    Component79,
+    Component80,
+    Component81,
+    Component82,
+    Component83,
+    Component84,
+    Component85,
+    Component86,
+    Component87,
+    Component88,
+    Component89,
+    Component90,
+    Component91,
+    Component92,
+    Component93,
+    Component94,
+    Component95,
+    Component96,
+    Component97,
+    Component98,
+    Component99,
+    Component100,
+    Component101,
+    Component102,
+    Component103,
+    Component104,
+    Component105,
+    Component106,
+    Component107,
+    Component108,
+    Component109,
+    Component110,
+    Component111,
+    Component112,
+    Component113,
+    Component114,
+    Component115,
+    Component116,
+    Component117,
+    Component118,
+    Component119,
+};
+static_assert(static_cast<u64>(Patch::Component119) == 127);
+
+[[nodiscard]] bool IsGeneric(Patch patch) noexcept;
+
+[[nodiscard]] u32 GenericPatchIndex(Patch patch);
+
+[[nodiscard]] u32 GenericPatchElement(Patch patch);
+
+} // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/type.h b/src/shader_recompiler/frontend/ir/type.h
index 9a32ca1e8a..8b3b338528 100644
--- a/src/shader_recompiler/frontend/ir/type.h
+++ b/src/shader_recompiler/frontend/ir/type.h
@@ -20,26 +20,27 @@ enum class Type {
     Reg = 1 << 2,
     Pred = 1 << 3,
     Attribute = 1 << 4,
-    U1 = 1 << 5,
-    U8 = 1 << 6,
-    U16 = 1 << 7,
-    U32 = 1 << 8,
-    U64 = 1 << 9,
-    F16 = 1 << 10,
-    F32 = 1 << 11,
-    F64 = 1 << 12,
-    U32x2 = 1 << 13,
-    U32x3 = 1 << 14,
-    U32x4 = 1 << 15,
-    F16x2 = 1 << 16,
-    F16x3 = 1 << 17,
-    F16x4 = 1 << 18,
-    F32x2 = 1 << 19,
-    F32x3 = 1 << 20,
-    F32x4 = 1 << 21,
-    F64x2 = 1 << 22,
-    F64x3 = 1 << 23,
-    F64x4 = 1 << 24,
+    Patch = 1 << 5,
+    U1 = 1 << 6,
+    U8 = 1 << 7,
+    U16 = 1 << 8,
+    U32 = 1 << 9,
+    U64 = 1 << 10,
+    F16 = 1 << 11,
+    F32 = 1 << 12,
+    F64 = 1 << 13,
+    U32x2 = 1 << 14,
+    U32x3 = 1 << 15,
+    U32x4 = 1 << 16,
+    F16x2 = 1 << 17,
+    F16x3 = 1 << 18,
+    F16x4 = 1 << 19,
+    F32x2 = 1 << 20,
+    F32x3 = 1 << 21,
+    F32x4 = 1 << 22,
+    F64x2 = 1 << 23,
+    F64x3 = 1 << 24,
+    F64x4 = 1 << 25,
 };
 DECLARE_ENUM_FLAG_OPERATORS(Type)
 
diff --git a/src/shader_recompiler/frontend/ir/value.cpp b/src/shader_recompiler/frontend/ir/value.cpp
index 1e7ffb86d5..bf5f8c0c20 100644
--- a/src/shader_recompiler/frontend/ir/value.cpp
+++ b/src/shader_recompiler/frontend/ir/value.cpp
@@ -18,6 +18,8 @@ Value::Value(IR::Pred value) noexcept : type{Type::Pred}, pred{value} {}
 
 Value::Value(IR::Attribute value) noexcept : type{Type::Attribute}, attribute{value} {}
 
+Value::Value(IR::Patch value) noexcept : type{Type::Patch}, patch{value} {}
+
 Value::Value(bool value) noexcept : type{Type::U1}, imm_u1{value} {}
 
 Value::Value(u8 value) noexcept : type{Type::U8}, imm_u8{value} {}
@@ -109,6 +111,11 @@ IR::Attribute Value::Attribute() const {
     return attribute;
 }
 
+IR::Patch Value::Patch() const {
+    ValidateAccess(Type::Patch);
+    return patch;
+}
+
 bool Value::U1() const {
     if (IsIdentity()) {
         return inst->Arg(0).U1();
@@ -182,6 +189,8 @@ bool Value::operator==(const Value& other) const {
         return pred == other.pred;
     case Type::Attribute:
         return attribute == other.attribute;
+    case Type::Patch:
+        return patch == other.patch;
     case Type::U1:
         return imm_u1 == other.imm_u1;
     case Type::U8:
diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h
index a0962863d8..3037455632 100644
--- a/src/shader_recompiler/frontend/ir/value.h
+++ b/src/shader_recompiler/frontend/ir/value.h
@@ -9,6 +9,7 @@
 #include "shader_recompiler/frontend/ir/attribute.h"
 #include "shader_recompiler/frontend/ir/pred.h"
 #include "shader_recompiler/frontend/ir/reg.h"
+#include "shader_recompiler/frontend/ir/patch.h"
 #include "shader_recompiler/frontend/ir/type.h"
 
 namespace Shader::IR {
@@ -24,6 +25,7 @@ public:
     explicit Value(IR::Reg value) noexcept;
     explicit Value(IR::Pred value) noexcept;
     explicit Value(IR::Attribute value) noexcept;
+    explicit Value(IR::Patch value) noexcept;
     explicit Value(bool value) noexcept;
     explicit Value(u8 value) noexcept;
     explicit Value(u16 value) noexcept;
@@ -46,6 +48,7 @@ public:
     [[nodiscard]] IR::Reg Reg() const;
     [[nodiscard]] IR::Pred Pred() const;
     [[nodiscard]] IR::Attribute Attribute() const;
+    [[nodiscard]] IR::Patch Patch() const;
     [[nodiscard]] bool U1() const;
     [[nodiscard]] u8 U8() const;
     [[nodiscard]] u16 U16() const;
@@ -67,6 +70,7 @@ private:
         IR::Reg reg;
         IR::Pred pred;
         IR::Attribute attribute;
+        IR::Patch patch;
         bool imm_u1;
         u8 imm_u8;
         u16 imm_u16;
diff --git a/src/shader_recompiler/frontend/maxwell/program.cpp b/src/shader_recompiler/frontend/maxwell/program.cpp
index ab67446c80..20a1d61cc4 100644
--- a/src/shader_recompiler/frontend/maxwell/program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/program.cpp
@@ -70,6 +70,11 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
     program.stage = env.ShaderStage();
     program.local_memory_size = env.LocalMemorySize();
     switch (program.stage) {
+    case Stage::TessellationControl: {
+        const ProgramHeader& sph{env.SPH()};
+        program.invocations = sph.common2.threads_per_input_primitive;
+        break;
+    }
     case Stage::Geometry: {
         const ProgramHeader& sph{env.SPH()};
         program.output_topology = sph.common3.output_topology;
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
index eb6a80de23..7d7dcc3cbd 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
@@ -70,12 +70,6 @@ void TranslatorVisitor::ALD(u64 insn) {
         BitField<47, 2, Size> size;
     } const ald{insn};
 
-    if (ald.o != 0) {
-        throw NotImplementedException("O");
-    }
-    if (ald.patch != 0) {
-        throw NotImplementedException("P");
-    }
     const u64 offset{ald.absolute_offset.Value()};
     if (offset % 4 != 0) {
         throw NotImplementedException("Unaligned absolute offset {}", offset);
@@ -84,11 +78,19 @@ void TranslatorVisitor::ALD(u64 insn) {
     const u32 num_elements{NumElements(ald.size)};
     if (ald.index_reg == IR::Reg::RZ) {
         for (u32 element = 0; element < num_elements; ++element) {
-            const IR::Attribute attr{offset / 4 + element};
-            F(ald.dest_reg + element, ir.GetAttribute(attr, vertex));
+            if (ald.patch != 0) {
+                const IR::Patch patch{offset / 4 + element};
+                F(ald.dest_reg + element, ir.GetPatch(patch));
+            } else {
+                const IR::Attribute attr{offset / 4 + element};
+                F(ald.dest_reg + element, ir.GetAttribute(attr, vertex));
+            }
         }
         return;
     }
+    if (ald.patch != 0) {
+        throw NotImplementedException("Indirect patch read");
+    }
     HandleIndexed(*this, ald.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) {
         F(ald.dest_reg + element, ir.GetAttributeIndexed(final_offset, vertex));
     });
@@ -106,9 +108,6 @@ void TranslatorVisitor::AST(u64 insn) {
         BitField<47, 2, Size> size;
     } const ast{insn};
 
-    if (ast.patch != 0) {
-        throw NotImplementedException("P");
-    }
     if (ast.index_reg != IR::Reg::RZ) {
         throw NotImplementedException("Indexed store");
     }
@@ -120,11 +119,19 @@ void TranslatorVisitor::AST(u64 insn) {
     const u32 num_elements{NumElements(ast.size)};
     if (ast.index_reg == IR::Reg::RZ) {
         for (u32 element = 0; element < num_elements; ++element) {
-            const IR::Attribute attr{offset / 4 + element};
-            ir.SetAttribute(attr, F(ast.src_reg + element), vertex);
+            if (ast.patch != 0) {
+                const IR::Patch patch{offset / 4 + element};
+                ir.SetPatch(patch, F(ast.src_reg + element));
+            } else {
+                const IR::Attribute attr{offset / 4 + element};
+                ir.SetAttribute(attr, F(ast.src_reg + element), vertex);
+            }
         }
         return;
     }
+    if (ast.patch != 0) {
+        throw NotImplementedException("Indexed tessellation patch store");
+    }
     HandleIndexed(*this, ast.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) {
         ir.SetAttributeIndexed(final_offset, F(ast.src_reg + element), vertex);
     });
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp
index bc822d585e..660b84c20b 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp
@@ -113,6 +113,8 @@ enum class SpecialRegister : u64 {
 
 [[nodiscard]] IR::U32 Read(IR::IREmitter& ir, SpecialRegister special_register) {
     switch (special_register) {
+    case SpecialRegister::SR_INVOCATION_ID:
+        return ir.InvocationId();
     case SpecialRegister::SR_THREAD_KILL:
         return IR::U32{ir.Select(ir.IsHelperInvocation(), ir.Imm32(-1), ir.Imm32(0))};
     case SpecialRegister::SR_INVOCATION_INFO:
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 617ec05cee..aadcf7999c 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -53,6 +53,10 @@ void GetAttribute(Info& info, IR::Attribute attribute) {
     case IR::Attribute::PointSpriteT:
         info.loads_point_coord = true;
         break;
+    case IR::Attribute::TessellationEvaluationPointU:
+    case IR::Attribute::TessellationEvaluationPointV:
+        info.loads_tess_coord = true;
+        break;
     default:
         throw NotImplementedException("Get attribute {}", attribute);
     }
@@ -94,6 +98,34 @@ void SetAttribute(Info& info, IR::Attribute attribute) {
     }
 }
 
+void GetPatch(Info& info, IR::Patch patch) {
+    if (!IR::IsGeneric(patch)) {
+        throw NotImplementedException("Reading non-generic patch {}", patch);
+    }
+    info.uses_patches.at(IR::GenericPatchIndex(patch)) = true;
+}
+
+void SetPatch(Info& info, IR::Patch patch) {
+    if (IR::IsGeneric(patch)) {
+        info.uses_patches.at(IR::GenericPatchIndex(patch)) = true;
+        return;
+    }
+    switch (patch) {
+    case IR::Patch::TessellationLodLeft:
+    case IR::Patch::TessellationLodTop:
+    case IR::Patch::TessellationLodRight:
+    case IR::Patch::TessellationLodBottom:
+        info.stores_tess_level_outer = true;
+        break;
+    case IR::Patch::TessellationLodInteriorU:
+    case IR::Patch::TessellationLodInteriorV:
+        info.stores_tess_level_inner = true;
+        break;
+    default:
+        throw NotImplementedException("Set patch {}", patch);
+    }
+}
+
 void VisitUsages(Info& info, IR::Inst& inst) {
     switch (inst.GetOpcode()) {
     case IR::Opcode::CompositeConstructF16x2:
@@ -350,6 +382,12 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     case IR::Opcode::SetAttribute:
         SetAttribute(info, inst.Arg(0).Attribute());
         break;
+    case IR::Opcode::GetPatch:
+        GetPatch(info, inst.Arg(0).Patch());
+        break;
+    case IR::Opcode::SetPatch:
+        SetPatch(info, inst.Arg(0).Patch());
+        break;
     case IR::Opcode::GetAttributeIndexed:
         info.loads_indexed_attributes = true;
         break;
@@ -368,6 +406,9 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     case IR::Opcode::LocalInvocationId:
         info.uses_local_invocation_id = true;
         break;
+    case IR::Opcode::InvocationId:
+        info.uses_invocation_id = true;
+        break;
     case IR::Opcode::IsHelperInvocation:
         info.uses_is_helper_invocation = true;
         break;
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index c26017d75f..3a04f075ee 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -38,6 +38,18 @@ enum class CompareFunction {
     Always,
 };
 
+enum class TessPrimitive {
+    Isolines,
+    Triangles,
+    Quads,
+};
+
+enum class TessSpacing {
+    Equal,
+    FractionalOdd,
+    FractionalEven,
+};
+
 struct TransformFeedbackVarying {
     u32 buffer{};
     u32 stride{};
@@ -74,6 +86,10 @@ struct Profile {
     bool convert_depth_mode{};
     bool force_early_z{};
 
+    TessPrimitive tess_primitive{};
+    TessSpacing tess_spacing{};
+    bool tess_clockwise{};
+
     InputTopology input_topology{};
 
     std::optional<float> fixed_state_point_size;
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index 336c6131ab..4dbf9ed12a 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -101,8 +101,10 @@ struct Info {
 
     bool uses_workgroup_id{};
     bool uses_local_invocation_id{};
+    bool uses_invocation_id{};
     bool uses_is_helper_invocation{};
     bool uses_subgroup_invocation_id{};
+    std::array<bool, 30> uses_patches{};
 
     std::array<InputVarying, 32> input_generics{};
     bool loads_position{};
@@ -110,6 +112,7 @@ struct Info {
     bool loads_vertex_id{};
     bool loads_front_face{};
     bool loads_point_coord{};
+    bool loads_tess_coord{};
     bool loads_indexed_attributes{};
 
     std::array<bool, 8> stores_frag_color{};
@@ -120,6 +123,8 @@ struct Info {
     bool stores_clip_distance{};
     bool stores_layer{};
     bool stores_viewport_index{};
+    bool stores_tess_level_outer{};
+    bool stores_tess_level_inner{};
     bool stores_indexed_attributes{};
 
     bool uses_fp16{};
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index dc4ff0da2b..8f0b0b8ecc 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -685,6 +685,19 @@ VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face) {
     return {};
 }
 
+VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode) {
+    switch (polygon_mode) {
+    case Maxwell::PolygonMode::Point:
+        return VK_POLYGON_MODE_POINT;
+    case Maxwell::PolygonMode::Line:
+        return VK_POLYGON_MODE_LINE;
+    case Maxwell::PolygonMode::Fill:
+        return VK_POLYGON_MODE_FILL;
+    }
+    UNIMPLEMENTED_MSG("Unimplemented polygon mode={}", polygon_mode);
+    return {};
+}
+
 VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
     switch (swizzle) {
     case Tegra::Texture::SwizzleSource::Zero:
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h
index 9f78e15b6a..50a599c116 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.h
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h
@@ -65,6 +65,8 @@ VkFrontFace FrontFace(Maxwell::FrontFace front_face);
 
 VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face);
 
+VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode);
+
 VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
 
 VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle);
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 84720a6f92..d5e9dae0f7 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -355,7 +355,8 @@ void GraphicsPipeline::MakePipeline(const Device& device, VkRenderPass render_pa
             static_cast<VkBool32>(state.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE),
         .rasterizerDiscardEnable =
             static_cast<VkBool32>(state.rasterize_enable == 0 ? VK_TRUE : VK_FALSE),
-        .polygonMode = VK_POLYGON_MODE_FILL,
+        .polygonMode =
+            MaxwellToVK::PolygonMode(FixedPipelineState::UnpackPolygonMode(state.polygon_mode)),
         .cullMode = static_cast<VkCullModeFlags>(
             dynamic.cull_enable ? MaxwellToVK::CullFace(dynamic.CullFace()) : VK_CULL_MODE_NONE),
         .frontFace = MaxwellToVK::FrontFace(dynamic.FrontFace()),
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index ee22255bfa..0bccc640ac 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -1040,6 +1040,36 @@ Shader::Profile PipelineCache::MakeProfile(const GraphicsPipelineCacheKey& key,
         std::ranges::transform(key.state.attributes, profile.generic_input_types.begin(),
                                &CastAttributeType);
         break;
+    case Shader::Stage::TessellationEval:
+        // We have to flip tessellation clockwise for some reason...
+        profile.tess_clockwise = key.state.tessellation_clockwise == 0;
+        profile.tess_primitive = [&key] {
+            const u32 raw{key.state.tessellation_primitive.Value()};
+            switch (static_cast<Maxwell::TessellationPrimitive>(raw)) {
+            case Maxwell::TessellationPrimitive::Isolines:
+                return Shader::TessPrimitive::Isolines;
+            case Maxwell::TessellationPrimitive::Triangles:
+                return Shader::TessPrimitive::Triangles;
+            case Maxwell::TessellationPrimitive::Quads:
+                return Shader::TessPrimitive::Quads;
+            }
+            UNREACHABLE();
+            return Shader::TessPrimitive::Triangles;
+        }();
+        profile.tess_spacing = [&] {
+            const u32 raw{key.state.tessellation_spacing};
+            switch (static_cast<Maxwell::TessellationSpacing>(raw)) {
+            case Maxwell::TessellationSpacing::Equal:
+                return Shader::TessSpacing::Equal;
+            case Maxwell::TessellationSpacing::FractionalOdd:
+                return Shader::TessSpacing::FractionalOdd;
+            case Maxwell::TessellationSpacing::FractionalEven:
+                return Shader::TessSpacing::FractionalEven;
+            }
+            UNREACHABLE();
+            return Shader::TessSpacing::Equal;
+        }();
+        break;
     case Shader::Stage::Geometry:
         if (program.output_topology == Shader::OutputTopology::PointList) {
             profile.fixed_state_point_size = point_size;
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 0412b52343..555b12ed72 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -91,7 +91,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
         .flags = 0,
         .size = STREAM_BUFFER_SIZE,
         .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
-                 VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
+                 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
         .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
         .queueFamilyIndexCount = 0,
         .pQueueFamilyIndices = nullptr,
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 87cfe6312d..f0de19ba11 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -225,7 +225,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
             .drawIndirectFirstInstance = false,
             .depthClamp = true,
             .depthBiasClamp = true,
-            .fillModeNonSolid = false,
+            .fillModeNonSolid = true,
             .depthBounds = false,
             .wideLines = false,
             .largePoints = true,
@@ -670,6 +670,7 @@ void Device::CheckSuitability(bool requires_swapchain) const {
         std::make_pair(features.largePoints, "largePoints"),
         std::make_pair(features.multiViewport, "multiViewport"),
         std::make_pair(features.depthBiasClamp, "depthBiasClamp"),
+        std::make_pair(features.fillModeNonSolid, "fillModeNonSolid"),
         std::make_pair(features.geometryShader, "geometryShader"),
         std::make_pair(features.tessellationShader, "tessellationShader"),
         std::make_pair(features.occlusionQueryPrecise, "occlusionQueryPrecise"),
-- 
cgit v1.2.3-70-g09d2


From 79c2e43fcd5a254121d48e6957ac159041c4fac0 Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Tue, 20 Apr 2021 22:20:46 -0300
Subject: shader: Calculate number of arguments in an opcode at compile time

---
 src/shader_recompiler/frontend/ir/opcodes.cpp | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 7f04b647b0..4207d548c5 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -57,6 +57,17 @@ constexpr std::array META_TABLE{
 #undef OPCODE
 };
 
+constexpr size_t CalculateNumArgsOf(Opcode op) {
+    const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
+    return std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void));
+}
+
+constexpr std::array NUM_ARGS{
+#define OPCODE(name_token, type_token, ...) CalculateNumArgsOf(Opcode::name_token),
+#include "opcodes.inc"
+#undef OPCODE
+};
+
 void ValidateOpcode(Opcode op) {
     const size_t raw{static_cast<size_t>(op)};
     if (raw >= META_TABLE.size()) {
@@ -72,9 +83,7 @@ Type TypeOf(Opcode op) {
 
 size_t NumArgsOf(Opcode op) {
     ValidateOpcode(op);
-    const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
-    const auto distance{std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void))};
-    return static_cast<size_t>(distance);
+    return NUM_ARGS[static_cast<size_t>(op)];
 }
 
 Type ArgTypeOf(Opcode op, size_t arg_index) {
-- 
cgit v1.2.3-70-g09d2


From 6944cabb899c4367a63cde97ae2bc2eb1a0fb790 Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Wed, 21 Apr 2021 00:25:46 -0300
Subject: shader: Inline common Opcode and Inst functions

---
 .../frontend/ir/microinstruction.cpp               | 18 -----
 .../frontend/ir/microinstruction.h                 | 13 +++-
 src/shader_recompiler/frontend/ir/opcodes.cpp      | 90 +---------------------
 src/shader_recompiler/frontend/ir/opcodes.h        | 74 +++++++++++++++++-
 4 files changed, 83 insertions(+), 112 deletions(-)

(limited to 'src/shader_recompiler/frontend/ir/opcodes.cpp')

diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 7555ac00a1..41f9fa0cd8 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -221,28 +221,10 @@ Inst* Inst::GetAssociatedPseudoOperation(IR::Opcode opcode) {
     }
 }
 
-size_t Inst::NumArgs() const {
-    return op == Opcode::Phi ? phi_args.size() : NumArgsOf(op);
-}
-
 IR::Type Inst::Type() const {
     return TypeOf(op);
 }
 
-Value Inst::Arg(size_t index) const {
-    if (op == Opcode::Phi) {
-        if (index >= phi_args.size()) {
-            throw InvalidArgument("Out of bounds argument index {} in phi instruction", index);
-        }
-        return phi_args[index].second;
-    } else {
-        if (index >= NumArgsOf(op)) {
-            throw InvalidArgument("Out of bounds argument index {} in opcode {}", index, op);
-        }
-        return args[index];
-    }
-}
-
 void Inst::SetArg(size_t index, Value value) {
     if (index >= NumArgs()) {
         throw InvalidArgument("Out of bounds argument index {} in opcode {}", index, op);
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.h b/src/shader_recompiler/frontend/ir/microinstruction.h
index dc9f683fe5..ea55fc29cc 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.h
+++ b/src/shader_recompiler/frontend/ir/microinstruction.h
@@ -73,10 +73,19 @@ public:
     [[nodiscard]] IR::Type Type() const;
 
     /// Get the number of arguments this instruction has.
-    [[nodiscard]] size_t NumArgs() const;
+    [[nodiscard]] size_t NumArgs() const {
+        return op == Opcode::Phi ? phi_args.size() : NumArgsOf(op);
+    }
 
     /// Get the value of a given argument index.
-    [[nodiscard]] Value Arg(size_t index) const;
+    [[nodiscard]] Value Arg(size_t index) const noexcept {
+        if (op == Opcode::Phi) {
+            return phi_args[index].second;
+        } else {
+            return args[index];
+        }
+    }
+
     /// Set the value of a given argument index.
     void SetArg(size_t index, Value value);
 
diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 4207d548c5..24d024ad7c 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -2,102 +2,14 @@
 // Licensed under GPLv2 or any later version
 // Refer to the license.txt file included.
 
-#include <algorithm>
-#include <array>
 #include <string_view>
 
-#include "shader_recompiler/exception.h"
 #include "shader_recompiler/frontend/ir/opcodes.h"
 
 namespace Shader::IR {
-namespace {
-struct OpcodeMeta {
-    std::string_view name;
-    Type type;
-    std::array<Type, 5> arg_types;
-};
-
-// using enum Type;
-constexpr Type Void{Type::Void};
-constexpr Type Opaque{Type::Opaque};
-constexpr Type Label{Type::Label};
-constexpr Type Reg{Type::Reg};
-constexpr Type Pred{Type::Pred};
-constexpr Type Attribute{Type::Attribute};
-constexpr Type Patch{Type::Patch};
-constexpr Type U1{Type::U1};
-constexpr Type U8{Type::U8};
-constexpr Type U16{Type::U16};
-constexpr Type U32{Type::U32};
-constexpr Type U64{Type::U64};
-constexpr Type F16{Type::F16};
-constexpr Type F32{Type::F32};
-constexpr Type F64{Type::F64};
-constexpr Type U32x2{Type::U32x2};
-constexpr Type U32x3{Type::U32x3};
-constexpr Type U32x4{Type::U32x4};
-constexpr Type F16x2{Type::F16x2};
-constexpr Type F16x3{Type::F16x3};
-constexpr Type F16x4{Type::F16x4};
-constexpr Type F32x2{Type::F32x2};
-constexpr Type F32x3{Type::F32x3};
-constexpr Type F32x4{Type::F32x4};
-constexpr Type F64x2{Type::F64x2};
-constexpr Type F64x3{Type::F64x3};
-constexpr Type F64x4{Type::F64x4};
-
-constexpr std::array META_TABLE{
-#define OPCODE(name_token, type_token, ...)                                                        \
-    OpcodeMeta{                                                                                    \
-        .name{#name_token},                                                                        \
-        .type = type_token,                                                                        \
-        .arg_types{__VA_ARGS__},                                                                   \
-    },
-#include "opcodes.inc"
-#undef OPCODE
-};
-
-constexpr size_t CalculateNumArgsOf(Opcode op) {
-    const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
-    return std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void));
-}
-
-constexpr std::array NUM_ARGS{
-#define OPCODE(name_token, type_token, ...) CalculateNumArgsOf(Opcode::name_token),
-#include "opcodes.inc"
-#undef OPCODE
-};
-
-void ValidateOpcode(Opcode op) {
-    const size_t raw{static_cast<size_t>(op)};
-    if (raw >= META_TABLE.size()) {
-        throw InvalidArgument("Invalid opcode with raw value {}", raw);
-    }
-}
-} // Anonymous namespace
-
-Type TypeOf(Opcode op) {
-    ValidateOpcode(op);
-    return META_TABLE[static_cast<size_t>(op)].type;
-}
-
-size_t NumArgsOf(Opcode op) {
-    ValidateOpcode(op);
-    return NUM_ARGS[static_cast<size_t>(op)];
-}
-
-Type ArgTypeOf(Opcode op, size_t arg_index) {
-    ValidateOpcode(op);
-    const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
-    if (arg_index >= arg_types.size() || arg_types[arg_index] == Type::Void) {
-        throw InvalidArgument("Out of bounds argument");
-    }
-    return arg_types[arg_index];
-}
 
 std::string_view NameOf(Opcode op) {
-    ValidateOpcode(op);
-    return META_TABLE[static_cast<size_t>(op)].name;
+    return Detail::META_TABLE[static_cast<size_t>(op)].name;
 }
 
 } // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/opcodes.h b/src/shader_recompiler/frontend/ir/opcodes.h
index 999fb2e775..b5697c7f97 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.h
+++ b/src/shader_recompiler/frontend/ir/opcodes.h
@@ -4,6 +4,8 @@
 
 #pragma once
 
+#include <array>
+#include <algorithm>
 #include <string_view>
 
 #include <fmt/format.h>
@@ -18,14 +20,80 @@ enum class Opcode {
 #undef OPCODE
 };
 
+namespace Detail {
+
+struct OpcodeMeta {
+    std::string_view name;
+    Type type;
+    std::array<Type, 5> arg_types;
+};
+
+// using enum Type;
+constexpr Type Void{Type::Void};
+constexpr Type Opaque{Type::Opaque};
+constexpr Type Label{Type::Label};
+constexpr Type Reg{Type::Reg};
+constexpr Type Pred{Type::Pred};
+constexpr Type Attribute{Type::Attribute};
+constexpr Type Patch{Type::Patch};
+constexpr Type U1{Type::U1};
+constexpr Type U8{Type::U8};
+constexpr Type U16{Type::U16};
+constexpr Type U32{Type::U32};
+constexpr Type U64{Type::U64};
+constexpr Type F16{Type::F16};
+constexpr Type F32{Type::F32};
+constexpr Type F64{Type::F64};
+constexpr Type U32x2{Type::U32x2};
+constexpr Type U32x3{Type::U32x3};
+constexpr Type U32x4{Type::U32x4};
+constexpr Type F16x2{Type::F16x2};
+constexpr Type F16x3{Type::F16x3};
+constexpr Type F16x4{Type::F16x4};
+constexpr Type F32x2{Type::F32x2};
+constexpr Type F32x3{Type::F32x3};
+constexpr Type F32x4{Type::F32x4};
+constexpr Type F64x2{Type::F64x2};
+constexpr Type F64x3{Type::F64x3};
+constexpr Type F64x4{Type::F64x4};
+
+constexpr std::array META_TABLE{
+#define OPCODE(name_token, type_token, ...)                                                        \
+    OpcodeMeta{                                                                                    \
+        .name{#name_token},                                                                        \
+        .type = type_token,                                                                        \
+        .arg_types{__VA_ARGS__},                                                                   \
+    },
+#include "opcodes.inc"
+#undef OPCODE
+};
+
+constexpr size_t CalculateNumArgsOf(Opcode op) {
+    const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
+    return std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void));
+}
+
+constexpr std::array NUM_ARGS{
+#define OPCODE(name_token, type_token, ...) CalculateNumArgsOf(Opcode::name_token),
+#include "opcodes.inc"
+#undef OPCODE
+};
+} // namespace Detail
+
 /// Get return type of an opcode
-[[nodiscard]] Type TypeOf(Opcode op);
+[[nodiscard]] inline Type TypeOf(Opcode op) noexcept {
+    return Detail::META_TABLE[static_cast<size_t>(op)].type;
+}
 
 /// Get the number of arguments an opcode accepts
-[[nodiscard]] size_t NumArgsOf(Opcode op);
+[[nodiscard]] inline size_t NumArgsOf(Opcode op) noexcept {
+    return Detail::NUM_ARGS[static_cast<size_t>(op)];
+}
 
 /// Get the required type of an argument of an opcode
-[[nodiscard]] Type ArgTypeOf(Opcode op, size_t arg_index);
+[[nodiscard]] inline Type ArgTypeOf(Opcode op, size_t arg_index) noexcept {
+    return Detail::META_TABLE[static_cast<size_t>(op)].arg_types[arg_index];
+}
 
 /// Get the name of an opcode
 [[nodiscard]] std::string_view NameOf(Opcode op);
-- 
cgit v1.2.3-70-g09d2