From 2d48a7b4d0666ad16d03a22d85712617a0849046 Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Sat, 9 Jan 2021 03:30:07 -0300
Subject: shader: Initial recompiler work

---
 src/shader_recompiler/frontend/ir/attribute.cpp | 447 ++++++++++++++++++++++++
 1 file changed, 447 insertions(+)
 create mode 100644 src/shader_recompiler/frontend/ir/attribute.cpp

(limited to 'src/shader_recompiler/frontend/ir/attribute.cpp')

diff --git a/src/shader_recompiler/frontend/ir/attribute.cpp b/src/shader_recompiler/frontend/ir/attribute.cpp
new file mode 100644
index 0000000000..2fb7d576ff
--- /dev/null
+++ b/src/shader_recompiler/frontend/ir/attribute.cpp
@@ -0,0 +1,447 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <fmt/format.h>
+
+#include "shader_recompiler/exception.h"
+#include "shader_recompiler/frontend/ir/attribute.h"
+
+namespace Shader::IR {
+
+bool IsGeneric(Attribute attribute) noexcept {
+    return attribute >= Attribute::Generic0X && attribute <= Attribute::Generic31X;
+}
+
+int GenericAttributeIndex(Attribute attribute) {
+    if (!IsGeneric(attribute)) {
+        throw InvalidArgument("Attribute is not generic {}", attribute);
+    }
+    return (static_cast<int>(attribute) - static_cast<int>(Attribute::Generic0X)) / 4;
+}
+
+std::string NameOf(Attribute attribute) {
+    switch (attribute) {
+    case Attribute::PrimitiveId:
+        return "PrimitiveId";
+    case Attribute::Layer:
+        return "Layer";
+    case Attribute::ViewportIndex:
+        return "ViewportIndex";
+    case Attribute::PointSize:
+        return "PointSize";
+    case Attribute::PositionX:
+        return "Position.X";
+    case Attribute::PositionY:
+        return "Position.Y";
+    case Attribute::PositionZ:
+        return "Position.Z";
+    case Attribute::PositionW:
+        return "Position.W";
+    case Attribute::Generic0X:
+        return "Generic[0].X";
+    case Attribute::Generic0Y:
+        return "Generic[0].Y";
+    case Attribute::Generic0Z:
+        return "Generic[0].Z";
+    case Attribute::Generic0W:
+        return "Generic[0].W";
+    case Attribute::Generic1X:
+        return "Generic[1].X";
+    case Attribute::Generic1Y:
+        return "Generic[1].Y";
+    case Attribute::Generic1Z:
+        return "Generic[1].Z";
+    case Attribute::Generic1W:
+        return "Generic[1].W";
+    case Attribute::Generic2X:
+        return "Generic[2].X";
+    case Attribute::Generic2Y:
+        return "Generic[2].Y";
+    case Attribute::Generic2Z:
+        return "Generic[2].Z";
+    case Attribute::Generic2W:
+        return "Generic[2].W";
+    case Attribute::Generic3X:
+        return "Generic[3].X";
+    case Attribute::Generic3Y:
+        return "Generic[3].Y";
+    case Attribute::Generic3Z:
+        return "Generic[3].Z";
+    case Attribute::Generic3W:
+        return "Generic[3].W";
+    case Attribute::Generic4X:
+        return "Generic[4].X";
+    case Attribute::Generic4Y:
+        return "Generic[4].Y";
+    case Attribute::Generic4Z:
+        return "Generic[4].Z";
+    case Attribute::Generic4W:
+        return "Generic[4].W";
+    case Attribute::Generic5X:
+        return "Generic[5].X";
+    case Attribute::Generic5Y:
+        return "Generic[5].Y";
+    case Attribute::Generic5Z:
+        return "Generic[5].Z";
+    case Attribute::Generic5W:
+        return "Generic[5].W";
+    case Attribute::Generic6X:
+        return "Generic[6].X";
+    case Attribute::Generic6Y:
+        return "Generic[6].Y";
+    case Attribute::Generic6Z:
+        return "Generic[6].Z";
+    case Attribute::Generic6W:
+        return "Generic[6].W";
+    case Attribute::Generic7X:
+        return "Generic[7].X";
+    case Attribute::Generic7Y:
+        return "Generic[7].Y";
+    case Attribute::Generic7Z:
+        return "Generic[7].Z";
+    case Attribute::Generic7W:
+        return "Generic[7].W";
+    case Attribute::Generic8X:
+        return "Generic[8].X";
+    case Attribute::Generic8Y:
+        return "Generic[8].Y";
+    case Attribute::Generic8Z:
+        return "Generic[8].Z";
+    case Attribute::Generic8W:
+        return "Generic[8].W";
+    case Attribute::Generic9X:
+        return "Generic[9].X";
+    case Attribute::Generic9Y:
+        return "Generic[9].Y";
+    case Attribute::Generic9Z:
+        return "Generic[9].Z";
+    case Attribute::Generic9W:
+        return "Generic[9].W";
+    case Attribute::Generic10X:
+        return "Generic[10].X";
+    case Attribute::Generic10Y:
+        return "Generic[10].Y";
+    case Attribute::Generic10Z:
+        return "Generic[10].Z";
+    case Attribute::Generic10W:
+        return "Generic[10].W";
+    case Attribute::Generic11X:
+        return "Generic[11].X";
+    case Attribute::Generic11Y:
+        return "Generic[11].Y";
+    case Attribute::Generic11Z:
+        return "Generic[11].Z";
+    case Attribute::Generic11W:
+        return "Generic[11].W";
+    case Attribute::Generic12X:
+        return "Generic[12].X";
+    case Attribute::Generic12Y:
+        return "Generic[12].Y";
+    case Attribute::Generic12Z:
+        return "Generic[12].Z";
+    case Attribute::Generic12W:
+        return "Generic[12].W";
+    case Attribute::Generic13X:
+        return "Generic[13].X";
+    case Attribute::Generic13Y:
+        return "Generic[13].Y";
+    case Attribute::Generic13Z:
+        return "Generic[13].Z";
+    case Attribute::Generic13W:
+        return "Generic[13].W";
+    case Attribute::Generic14X:
+        return "Generic[14].X";
+    case Attribute::Generic14Y:
+        return "Generic[14].Y";
+    case Attribute::Generic14Z:
+        return "Generic[14].Z";
+    case Attribute::Generic14W:
+        return "Generic[14].W";
+    case Attribute::Generic15X:
+        return "Generic[15].X";
+    case Attribute::Generic15Y:
+        return "Generic[15].Y";
+    case Attribute::Generic15Z:
+        return "Generic[15].Z";
+    case Attribute::Generic15W:
+        return "Generic[15].W";
+    case Attribute::Generic16X:
+        return "Generic[16].X";
+    case Attribute::Generic16Y:
+        return "Generic[16].Y";
+    case Attribute::Generic16Z:
+        return "Generic[16].Z";
+    case Attribute::Generic16W:
+        return "Generic[16].W";
+    case Attribute::Generic17X:
+        return "Generic[17].X";
+    case Attribute::Generic17Y:
+        return "Generic[17].Y";
+    case Attribute::Generic17Z:
+        return "Generic[17].Z";
+    case Attribute::Generic17W:
+        return "Generic[17].W";
+    case Attribute::Generic18X:
+        return "Generic[18].X";
+    case Attribute::Generic18Y:
+        return "Generic[18].Y";
+    case Attribute::Generic18Z:
+        return "Generic[18].Z";
+    case Attribute::Generic18W:
+        return "Generic[18].W";
+    case Attribute::Generic19X:
+        return "Generic[19].X";
+    case Attribute::Generic19Y:
+        return "Generic[19].Y";
+    case Attribute::Generic19Z:
+        return "Generic[19].Z";
+    case Attribute::Generic19W:
+        return "Generic[19].W";
+    case Attribute::Generic20X:
+        return "Generic[20].X";
+    case Attribute::Generic20Y:
+        return "Generic[20].Y";
+    case Attribute::Generic20Z:
+        return "Generic[20].Z";
+    case Attribute::Generic20W:
+        return "Generic[20].W";
+    case Attribute::Generic21X:
+        return "Generic[21].X";
+    case Attribute::Generic21Y:
+        return "Generic[21].Y";
+    case Attribute::Generic21Z:
+        return "Generic[21].Z";
+    case Attribute::Generic21W:
+        return "Generic[21].W";
+    case Attribute::Generic22X:
+        return "Generic[22].X";
+    case Attribute::Generic22Y:
+        return "Generic[22].Y";
+    case Attribute::Generic22Z:
+        return "Generic[22].Z";
+    case Attribute::Generic22W:
+        return "Generic[22].W";
+    case Attribute::Generic23X:
+        return "Generic[23].X";
+    case Attribute::Generic23Y:
+        return "Generic[23].Y";
+    case Attribute::Generic23Z:
+        return "Generic[23].Z";
+    case Attribute::Generic23W:
+        return "Generic[23].W";
+    case Attribute::Generic24X:
+        return "Generic[24].X";
+    case Attribute::Generic24Y:
+        return "Generic[24].Y";
+    case Attribute::Generic24Z:
+        return "Generic[24].Z";
+    case Attribute::Generic24W:
+        return "Generic[24].W";
+    case Attribute::Generic25X:
+        return "Generic[25].X";
+    case Attribute::Generic25Y:
+        return "Generic[25].Y";
+    case Attribute::Generic25Z:
+        return "Generic[25].Z";
+    case Attribute::Generic25W:
+        return "Generic[25].W";
+    case Attribute::Generic26X:
+        return "Generic[26].X";
+    case Attribute::Generic26Y:
+        return "Generic[26].Y";
+    case Attribute::Generic26Z:
+        return "Generic[26].Z";
+    case Attribute::Generic26W:
+        return "Generic[26].W";
+    case Attribute::Generic27X:
+        return "Generic[27].X";
+    case Attribute::Generic27Y:
+        return "Generic[27].Y";
+    case Attribute::Generic27Z:
+        return "Generic[27].Z";
+    case Attribute::Generic27W:
+        return "Generic[27].W";
+    case Attribute::Generic28X:
+        return "Generic[28].X";
+    case Attribute::Generic28Y:
+        return "Generic[28].Y";
+    case Attribute::Generic28Z:
+        return "Generic[28].Z";
+    case Attribute::Generic28W:
+        return "Generic[28].W";
+    case Attribute::Generic29X:
+        return "Generic[29].X";
+    case Attribute::Generic29Y:
+        return "Generic[29].Y";
+    case Attribute::Generic29Z:
+        return "Generic[29].Z";
+    case Attribute::Generic29W:
+        return "Generic[29].W";
+    case Attribute::Generic30X:
+        return "Generic[30].X";
+    case Attribute::Generic30Y:
+        return "Generic[30].Y";
+    case Attribute::Generic30Z:
+        return "Generic[30].Z";
+    case Attribute::Generic30W:
+        return "Generic[30].W";
+    case Attribute::Generic31X:
+        return "Generic[31].X";
+    case Attribute::Generic31Y:
+        return "Generic[31].Y";
+    case Attribute::Generic31Z:
+        return "Generic[31].Z";
+    case Attribute::Generic31W:
+        return "Generic[31].W";
+    case Attribute::ColorFrontDiffuseR:
+        return "ColorFrontDiffuse.R";
+    case Attribute::ColorFrontDiffuseG:
+        return "ColorFrontDiffuse.G";
+    case Attribute::ColorFrontDiffuseB:
+        return "ColorFrontDiffuse.B";
+    case Attribute::ColorFrontDiffuseA:
+        return "ColorFrontDiffuse.A";
+    case Attribute::ColorFrontSpecularR:
+        return "ColorFrontSpecular.R";
+    case Attribute::ColorFrontSpecularG:
+        return "ColorFrontSpecular.G";
+    case Attribute::ColorFrontSpecularB:
+        return "ColorFrontSpecular.B";
+    case Attribute::ColorFrontSpecularA:
+        return "ColorFrontSpecular.A";
+    case Attribute::ColorBackDiffuseR:
+        return "ColorBackDiffuse.R";
+    case Attribute::ColorBackDiffuseG:
+        return "ColorBackDiffuse.G";
+    case Attribute::ColorBackDiffuseB:
+        return "ColorBackDiffuse.B";
+    case Attribute::ColorBackDiffuseA:
+        return "ColorBackDiffuse.A";
+    case Attribute::ColorBackSpecularR:
+        return "ColorBackSpecular.R";
+    case Attribute::ColorBackSpecularG:
+        return "ColorBackSpecular.G";
+    case Attribute::ColorBackSpecularB:
+        return "ColorBackSpecular.B";
+    case Attribute::ColorBackSpecularA:
+        return "ColorBackSpecular.A";
+    case Attribute::ClipDistance0:
+        return "ClipDistance[0]";
+    case Attribute::ClipDistance1:
+        return "ClipDistance[1]";
+    case Attribute::ClipDistance2:
+        return "ClipDistance[2]";
+    case Attribute::ClipDistance3:
+        return "ClipDistance[3]";
+    case Attribute::ClipDistance4:
+        return "ClipDistance[4]";
+    case Attribute::ClipDistance5:
+        return "ClipDistance[5]";
+    case Attribute::ClipDistance6:
+        return "ClipDistance[6]";
+    case Attribute::ClipDistance7:
+        return "ClipDistance[7]";
+    case Attribute::PointSpriteS:
+        return "PointSprite.S";
+    case Attribute::PointSpriteT:
+        return "PointSprite.T";
+    case Attribute::FogCoordinate:
+        return "FogCoordinate";
+    case Attribute::TessellationEvaluationPointU:
+        return "TessellationEvaluationPoint.U";
+    case Attribute::TessellationEvaluationPointV:
+        return "TessellationEvaluationPoint.V";
+    case Attribute::InstanceId:
+        return "InstanceId";
+    case Attribute::VertexId:
+        return "VertexId";
+    case Attribute::FixedFncTexture0S:
+        return "FixedFncTexture[0].S";
+    case Attribute::FixedFncTexture0T:
+        return "FixedFncTexture[0].T";
+    case Attribute::FixedFncTexture0R:
+        return "FixedFncTexture[0].R";
+    case Attribute::FixedFncTexture0Q:
+        return "FixedFncTexture[0].Q";
+    case Attribute::FixedFncTexture1S:
+        return "FixedFncTexture[1].S";
+    case Attribute::FixedFncTexture1T:
+        return "FixedFncTexture[1].T";
+    case Attribute::FixedFncTexture1R:
+        return "FixedFncTexture[1].R";
+    case Attribute::FixedFncTexture1Q:
+        return "FixedFncTexture[1].Q";
+    case Attribute::FixedFncTexture2S:
+        return "FixedFncTexture[2].S";
+    case Attribute::FixedFncTexture2T:
+        return "FixedFncTexture[2].T";
+    case Attribute::FixedFncTexture2R:
+        return "FixedFncTexture[2].R";
+    case Attribute::FixedFncTexture2Q:
+        return "FixedFncTexture[2].Q";
+    case Attribute::FixedFncTexture3S:
+        return "FixedFncTexture[3].S";
+    case Attribute::FixedFncTexture3T:
+        return "FixedFncTexture[3].T";
+    case Attribute::FixedFncTexture3R:
+        return "FixedFncTexture[3].R";
+    case Attribute::FixedFncTexture3Q:
+        return "FixedFncTexture[3].Q";
+    case Attribute::FixedFncTexture4S:
+        return "FixedFncTexture[4].S";
+    case Attribute::FixedFncTexture4T:
+        return "FixedFncTexture[4].T";
+    case Attribute::FixedFncTexture4R:
+        return "FixedFncTexture[4].R";
+    case Attribute::FixedFncTexture4Q:
+        return "FixedFncTexture[4].Q";
+    case Attribute::FixedFncTexture5S:
+        return "FixedFncTexture[5].S";
+    case Attribute::FixedFncTexture5T:
+        return "FixedFncTexture[5].T";
+    case Attribute::FixedFncTexture5R:
+        return "FixedFncTexture[5].R";
+    case Attribute::FixedFncTexture5Q:
+        return "FixedFncTexture[5].Q";
+    case Attribute::FixedFncTexture6S:
+        return "FixedFncTexture[6].S";
+    case Attribute::FixedFncTexture6T:
+        return "FixedFncTexture[6].T";
+    case Attribute::FixedFncTexture6R:
+        return "FixedFncTexture[6].R";
+    case Attribute::FixedFncTexture6Q:
+        return "FixedFncTexture[6].Q";
+    case Attribute::FixedFncTexture7S:
+        return "FixedFncTexture[7].S";
+    case Attribute::FixedFncTexture7T:
+        return "FixedFncTexture[7].T";
+    case Attribute::FixedFncTexture7R:
+        return "FixedFncTexture[7].R";
+    case Attribute::FixedFncTexture7Q:
+        return "FixedFncTexture[7].Q";
+    case Attribute::FixedFncTexture8S:
+        return "FixedFncTexture[8].S";
+    case Attribute::FixedFncTexture8T:
+        return "FixedFncTexture[8].T";
+    case Attribute::FixedFncTexture8R:
+        return "FixedFncTexture[8].R";
+    case Attribute::FixedFncTexture8Q:
+        return "FixedFncTexture[8].Q";
+    case Attribute::FixedFncTexture9S:
+        return "FixedFncTexture[9].S";
+    case Attribute::FixedFncTexture9T:
+        return "FixedFncTexture[9].T";
+    case Attribute::FixedFncTexture9R:
+        return "FixedFncTexture[9].R";
+    case Attribute::FixedFncTexture9Q:
+        return "FixedFncTexture[9].Q";
+    case Attribute::ViewportMask:
+        return "ViewportMask";
+    case Attribute::FrontFace:
+        return "FrontFace";
+    }
+    return fmt::format("<reserved attribute {}>", static_cast<int>(attribute));
+}
+
+} // namespace Shader::IR
\ No newline at end of file
-- 
cgit v1.2.3-70-g09d2


From 260743f371236f7c57b01334b1c3474b15a47c39 Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Fri, 19 Mar 2021 19:28:31 -0300
Subject: shader: Add partial rasterizer integration

---
 src/shader_recompiler/CMakeLists.txt               |   4 +-
 .../backend/spirv/emit_context.cpp                 |  64 ++-
 src/shader_recompiler/backend/spirv/emit_context.h |  18 +-
 src/shader_recompiler/backend/spirv/emit_spirv.cpp |  44 +-
 src/shader_recompiler/backend/spirv/emit_spirv.h   |  18 +-
 .../backend/spirv/emit_spirv_context_get_set.cpp   |  55 ++-
 .../backend/spirv/emit_spirv_control_flow.cpp      |  23 +-
 src/shader_recompiler/environment.h                |  14 +
 src/shader_recompiler/frontend/ir/attribute.cpp    |   2 +-
 src/shader_recompiler/frontend/ir/attribute.h      |   2 +-
 src/shader_recompiler/frontend/ir/ir_emitter.cpp   |  14 +
 src/shader_recompiler/frontend/ir/ir_emitter.h     |   4 +
 .../frontend/ir/microinstruction.cpp               |   3 +
 src/shader_recompiler/frontend/ir/opcodes.inc      |  11 +-
 src/shader_recompiler/frontend/ir/program.h        |   2 +
 src/shader_recompiler/frontend/ir/reg.h            |   4 +-
 .../frontend/maxwell/control_flow.cpp              |  31 +-
 .../frontend/maxwell/control_flow.h                |   3 +-
 src/shader_recompiler/frontend/maxwell/program.cpp |   1 +
 .../frontend/maxwell/structured_control_flow.cpp   |  18 +
 .../frontend/maxwell/translate/impl/exit.cpp       |  15 -
 .../maxwell/translate/impl/exit_program.cpp        |  43 ++
 .../frontend/maxwell/translate/impl/impl.h         |   4 +-
 .../translate/impl/load_store_attribute.cpp        |  86 +++-
 .../maxwell/translate/impl/not_implemented.cpp     |  16 +-
 .../maxwell/translate/impl/texture_fetch.cpp       |   2 +-
 .../translate/impl/texture_fetch_swizzled.cpp      |   2 +-
 .../ir_opt/collect_shader_info_pass.cpp            |  60 ++-
 src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp  |   2 +-
 src/shader_recompiler/program_header.h             | 143 +++++++
 src/shader_recompiler/recompiler.cpp               |  28 --
 src/shader_recompiler/recompiler.h                 |  20 -
 src/shader_recompiler/shader_info.h                |  10 +
 src/shader_recompiler/stage.h                      |  19 +
 src/video_core/CMakeLists.txt                      |   6 +-
 .../renderer_vulkan/fixed_pipeline_state.cpp       |   4 +
 .../renderer_vulkan/fixed_pipeline_state.h         |   9 +-
 src/video_core/renderer_vulkan/maxwell_to_vk.cpp   |  24 ++
 src/video_core/renderer_vulkan/maxwell_to_vk.h     |   2 +
 src/video_core/renderer_vulkan/pipeline_helper.h   | 162 ++++++++
 .../renderer_vulkan/vk_compute_pipeline.cpp        | 209 ++--------
 .../renderer_vulkan/vk_compute_pipeline.h          |   3 +-
 .../renderer_vulkan/vk_graphics_pipeline.cpp       | 445 +++++++++++++++++++++
 .../renderer_vulkan/vk_graphics_pipeline.h         |  66 +++
 src/video_core/renderer_vulkan/vk_pipeline.h       |  36 --
 .../renderer_vulkan/vk_pipeline_cache.cpp          | 346 ++++++++++++----
 src/video_core/renderer_vulkan/vk_pipeline_cache.h |  82 +++-
 src/video_core/renderer_vulkan/vk_rasterizer.cpp   |  47 ++-
 src/video_core/renderer_vulkan/vk_rasterizer.h     |   2 +
 .../renderer_vulkan/vk_render_pass_cache.cpp       | 100 +++++
 .../renderer_vulkan/vk_render_pass_cache.h         |  53 +++
 .../renderer_vulkan/vk_texture_cache.cpp           |  68 +---
 src/video_core/renderer_vulkan/vk_texture_cache.h  |  29 +-
 src/video_core/vulkan_common/vulkan_device.cpp     |  15 +
 54 files changed, 1927 insertions(+), 566 deletions(-)
 delete mode 100644 src/shader_recompiler/frontend/maxwell/translate/impl/exit.cpp
 create mode 100644 src/shader_recompiler/frontend/maxwell/translate/impl/exit_program.cpp
 create mode 100644 src/shader_recompiler/program_header.h
 delete mode 100644 src/shader_recompiler/recompiler.cpp
 delete mode 100644 src/shader_recompiler/recompiler.h
 create mode 100644 src/shader_recompiler/stage.h
 create mode 100644 src/video_core/renderer_vulkan/pipeline_helper.h
 create mode 100644 src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
 create mode 100644 src/video_core/renderer_vulkan/vk_graphics_pipeline.h
 delete mode 100644 src/video_core/renderer_vulkan/vk_pipeline.h
 create mode 100644 src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
 create mode 100644 src/video_core/renderer_vulkan/vk_render_pass_cache.h

(limited to 'src/shader_recompiler/frontend/ir/attribute.cpp')

diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index b870e99378..31c3941064 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -65,6 +65,7 @@ add_library(shader_recompiler STATIC
     frontend/maxwell/translate/impl/common_funcs.h
     frontend/maxwell/translate/impl/condition_code_set.cpp
     frontend/maxwell/translate/impl/double_add.cpp
+    frontend/maxwell/translate/impl/exit_program.cpp
     frontend/maxwell/translate/impl/find_leading_one.cpp
     frontend/maxwell/translate/impl/floating_point_add.cpp
     frontend/maxwell/translate/impl/floating_point_compare.cpp
@@ -121,9 +122,8 @@ add_library(shader_recompiler STATIC
     ir_opt/texture_pass.cpp
     ir_opt/verification_pass.cpp
     object_pool.h
+    program_header.h
     profile.h
-    recompiler.cpp
-    recompiler.h
     shader_info.h
 )
 
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index 204389d749..6c79b611bf 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -62,18 +62,15 @@ void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_vie
     }
 }
 
-EmitContext::EmitContext(const Profile& profile_, IR::Program& program)
+EmitContext::EmitContext(const Profile& profile_, IR::Program& program, u32& binding)
     : Sirit::Module(0x00010000), profile{profile_} {
     AddCapability(spv::Capability::Shader);
     DefineCommonTypes(program.info);
     DefineCommonConstants();
-    DefineSpecialVariables(program.info);
-
-    u32 binding{};
+    DefineInterfaces(program.info, program.stage);
     DefineConstantBuffers(program.info, binding);
     DefineStorageBuffers(program.info, binding);
     DefineTextures(program.info, binding);
-
     DefineLabels(program);
 }
 
@@ -96,6 +93,8 @@ Id EmitContext::Def(const IR::Value& value) {
         return Constant(F32[1], value.F32());
     case IR::Type::F64:
         return Constant(F64[1], value.F64());
+    case IR::Type::Label:
+        return value.Label()->Definition<Id>();
     default:
         throw NotImplementedException("Immediate type {}", value.Type());
     }
@@ -109,6 +108,9 @@ void EmitContext::DefineCommonTypes(const Info& info) {
     F32.Define(*this, TypeFloat(32), "f32");
     U32.Define(*this, TypeInt(32, false), "u32");
 
+    input_f32 = Name(TypePointer(spv::StorageClass::Input, F32[1]), "input_f32");
+    output_f32 = Name(TypePointer(spv::StorageClass::Output, F32[1]), "output_f32");
+
     if (info.uses_int8) {
         AddCapability(spv::Capability::Int8);
         U8 = Name(TypeInt(8, false), "u8");
@@ -139,15 +141,20 @@ void EmitContext::DefineCommonConstants() {
     u32_zero_value = Constant(U32[1], 0U);
 }
 
-void EmitContext::DefineSpecialVariables(const Info& info) {
-    const auto define{[this](Id type, spv::BuiltIn builtin, spv::StorageClass storage_class) {
-        const Id pointer_type{TypePointer(storage_class, type)};
-        const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::Input)};
-        Decorate(id, spv::Decoration::BuiltIn, builtin);
-        return id;
-    }};
+void EmitContext::DefineInterfaces(const Info& info, Stage stage) {
+    const auto define{
+        [this](Id type, std::optional<spv::BuiltIn> builtin, spv::StorageClass storage_class) {
+            const Id pointer_type{TypePointer(storage_class, type)};
+            const Id id{AddGlobalVariable(pointer_type, storage_class)};
+            if (builtin) {
+                Decorate(id, spv::Decoration::BuiltIn, *builtin);
+            }
+            interfaces.push_back(id);
+            return id;
+        }};
     using namespace std::placeholders;
     const auto define_input{std::bind(define, _1, _2, spv::StorageClass::Input)};
+    const auto define_output{std::bind(define, _1, _2, spv::StorageClass::Output)};
 
     if (info.uses_workgroup_id) {
         workgroup_id = define_input(U32[3], spv::BuiltIn::WorkgroupId);
@@ -155,6 +162,39 @@ void EmitContext::DefineSpecialVariables(const Info& info) {
     if (info.uses_local_invocation_id) {
         local_invocation_id = define_input(U32[3], spv::BuiltIn::LocalInvocationId);
     }
+    if (info.loads_position) {
+        const bool is_fragment{stage != Stage::Fragment};
+        const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::Position : spv::BuiltIn::FragCoord};
+        input_position = define_input(F32[4], built_in);
+    }
+    for (size_t i = 0; i < info.loads_generics.size(); ++i) {
+        if (info.loads_generics[i]) {
+            // FIXME: Declare size from input
+            input_generics[i] = define_input(F32[4], std::nullopt);
+            Decorate(input_generics[i], spv::Decoration::Location, static_cast<u32>(i));
+            Name(input_generics[i], fmt::format("in_attr{}", i));
+        }
+    }
+    if (info.stores_position) {
+        output_position = define_output(F32[4], spv::BuiltIn::Position);
+    }
+    for (size_t i = 0; i < info.stores_generics.size(); ++i) {
+        if (info.stores_generics[i]) {
+            output_generics[i] = define_output(F32[4], std::nullopt);
+            Decorate(output_generics[i], spv::Decoration::Location, static_cast<u32>(i));
+            Name(output_generics[i], fmt::format("out_attr{}", i));
+        }
+    }
+    if (stage == Stage::Fragment) {
+        for (size_t i = 0; i < 8; ++i) {
+            if (!info.stores_frag_color[i]) {
+                continue;
+            }
+            frag_color[i] = define_output(F32[4], std::nullopt);
+            Decorate(frag_color[i], spv::Decoration::Location, static_cast<u32>(i));
+            Name(frag_color[i], fmt::format("frag_color{}", i));
+        }
+    }
 }
 
 void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
index 35eca258a8..2d7961ac3b 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.h
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -46,7 +46,7 @@ struct UniformDefinitions {
 
 class EmitContext final : public Sirit::Module {
 public:
-    explicit EmitContext(const Profile& profile, IR::Program& program);
+    explicit EmitContext(const Profile& profile, IR::Program& program, u32& binding);
     ~EmitContext();
 
     [[nodiscard]] Id Def(const IR::Value& value);
@@ -71,6 +71,9 @@ public:
 
     UniformDefinitions uniform_types;
 
+    Id input_f32{};
+    Id output_f32{};
+
     Id storage_u32{};
 
     std::array<UniformDefinitions, Info::MAX_CBUFS> cbufs{};
@@ -80,10 +83,21 @@ public:
     Id workgroup_id{};
     Id local_invocation_id{};
 
+    Id input_position{};
+    std::array<Id, 32> input_generics{};
+
+    Id output_position{};
+    std::array<Id, 32> output_generics{};
+
+    std::array<Id, 8> frag_color{};
+    Id frag_depth {};
+
+    std::vector<Id> interfaces;
+
 private:
     void DefineCommonTypes(const Info& info);
     void DefineCommonConstants();
-    void DefineSpecialVariables(const Info& info);
+    void DefineInterfaces(const Info& info, Stage stage);
     void DefineConstantBuffers(const Info& info, u32& binding);
     void DefineConstantBuffers(const Info& info, Id UniformDefinitions::*member_type, u32 binding,
                                Id type, char type_char, u32 element_size);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index 50c0f72437..b8978b94a4 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -54,6 +54,8 @@ ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
         return arg.U32();
     } else if constexpr (std::is_same_v<ArgType, IR::Block*>) {
         return arg.Label();
+    } else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
+        return arg.Attribute();
     }
 }
 
@@ -197,8 +199,9 @@ Id PhiArgDef(EmitContext& ctx, IR::Inst* inst, size_t index) {
 }
 } // Anonymous namespace
 
-std::vector<u32> EmitSPIRV(const Profile& profile, Environment& env, IR::Program& program) {
-    EmitContext ctx{profile, program};
+std::vector<u32> EmitSPIRV(const Profile& profile, Environment& env, IR::Program& program,
+                           u32& binding) {
+    EmitContext ctx{profile, program, binding};
     const Id void_function{ctx.TypeFunction(ctx.void_id)};
     const Id func{ctx.OpFunction(ctx.void_id, spv::FunctionControlMask::MaskNone, void_function)};
     for (IR::Block* const block : program.blocks) {
@@ -208,28 +211,41 @@ std::vector<u32> EmitSPIRV(const Profile& profile, Environment& env, IR::Program
         }
     }
     ctx.OpFunctionEnd();
-    boost::container::small_vector<Id, 32> interfaces;
-    const Info& info{program.info};
-    if (info.uses_workgroup_id) {
-        interfaces.push_back(ctx.workgroup_id);
+
+    const std::span interfaces(ctx.interfaces.data(), ctx.interfaces.size());
+    spv::ExecutionModel execution_model{};
+    switch (env.ShaderStage()) {
+    case Shader::Stage::Compute: {
+        const std::array<u32, 3> workgroup_size{env.WorkgroupSize()};
+        execution_model = spv::ExecutionModel::GLCompute;
+        ctx.AddExecutionMode(func, spv::ExecutionMode::LocalSize, workgroup_size[0],
+                             workgroup_size[1], workgroup_size[2]);
+        break;
     }
-    if (info.uses_local_invocation_id) {
-        interfaces.push_back(ctx.local_invocation_id);
+    case Shader::Stage::VertexB:
+        execution_model = spv::ExecutionModel::Vertex;
+        break;
+    case Shader::Stage::Fragment:
+        execution_model = spv::ExecutionModel::Fragment;
+        ctx.AddExecutionMode(func, spv::ExecutionMode::OriginUpperLeft);
+        break;
+    default:
+        throw NotImplementedException("Stage {}", env.ShaderStage());
     }
-    const std::span interfaces_span(interfaces.data(), interfaces.size());
-    ctx.AddEntryPoint(spv::ExecutionModel::GLCompute, func, "main", interfaces_span);
-
-    const std::array<u32, 3> workgroup_size{env.WorkgroupSize()};
-    ctx.AddExecutionMode(func, spv::ExecutionMode::LocalSize, workgroup_size[0], workgroup_size[1],
-                         workgroup_size[2]);
+    ctx.AddEntryPoint(execution_model, func, "main", interfaces);
 
     SetupDenormControl(profile, program, ctx, func);
+    const Info& info{program.info};
     if (info.uses_sampled_1d) {
         ctx.AddCapability(spv::Capability::Sampled1D);
     }
     if (info.uses_sparse_residency) {
         ctx.AddCapability(spv::Capability::SparseResidency);
     }
+    if (info.uses_demote_to_helper_invocation) {
+        ctx.AddExtension("SPV_EXT_demote_to_helper_invocation");
+        ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT);
+    }
     // TODO: Track this usage
     ctx.AddCapability(spv::Capability::ImageGatherExtended);
 
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h
index 89566c83d0..ae121f5344 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.h
@@ -16,18 +16,18 @@
 namespace Shader::Backend::SPIRV {
 
 [[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, Environment& env,
-                                         IR::Program& program);
+                                         IR::Program& program, u32& binding);
 
 // Microinstruction emitters
 Id EmitPhi(EmitContext& ctx, IR::Inst* inst);
 void EmitVoid(EmitContext& ctx);
 Id EmitIdentity(EmitContext& ctx, const IR::Value& value);
-void EmitBranch(EmitContext& ctx, IR::Block* label);
-void EmitBranchConditional(EmitContext& ctx, Id condition, IR::Block* true_label,
-                           IR::Block* false_label);
-void EmitLoopMerge(EmitContext& ctx, IR::Block* merge_label, IR::Block* continue_label);
-void EmitSelectionMerge(EmitContext& ctx, IR::Block* merge_label);
+void EmitBranch(EmitContext& ctx, Id label);
+void EmitBranchConditional(EmitContext& ctx, Id condition, Id true_label, Id false_label);
+void EmitLoopMerge(EmitContext& ctx, Id merge_label, Id continue_label);
+void EmitSelectionMerge(EmitContext& ctx, Id merge_label);
 void EmitReturn(EmitContext& ctx);
+void EmitDemoteToHelperInvocation(EmitContext& ctx, Id continue_label);
 void EmitGetRegister(EmitContext& ctx);
 void EmitSetRegister(EmitContext& ctx);
 void EmitGetPred(EmitContext& ctx);
@@ -41,10 +41,12 @@ Id EmitGetCbufS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
 Id EmitGetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
 Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
 Id EmitGetCbufU64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
-void EmitGetAttribute(EmitContext& ctx);
-void EmitSetAttribute(EmitContext& ctx);
+Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr);
+void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value);
 void EmitGetAttributeIndexed(EmitContext& ctx);
 void EmitSetAttributeIndexed(EmitContext& ctx);
+void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value);
+void EmitSetFragDepth(EmitContext& ctx, Id value);
 void EmitGetZFlag(EmitContext& ctx);
 void EmitGetSFlag(EmitContext& ctx);
 void EmitGetCFlag(EmitContext& ctx);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 125b58cf74..02d1157403 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -5,6 +5,43 @@
 #include "shader_recompiler/backend/spirv/emit_spirv.h"
 
 namespace Shader::Backend::SPIRV {
+namespace {
+Id InputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
+    const u32 element{static_cast<u32>(attr) % 4};
+    const auto element_id{[&] { return ctx.Constant(ctx.U32[1], element); }};
+    if (IR::IsGeneric(attr)) {
+        const u32 index{IR::GenericAttributeIndex(attr)};
+        return ctx.OpAccessChain(ctx.input_f32, ctx.input_generics.at(index), element_id());
+    }
+    switch (attr) {
+    case IR::Attribute::PositionX:
+    case IR::Attribute::PositionY:
+    case IR::Attribute::PositionZ:
+    case IR::Attribute::PositionW:
+        return ctx.OpAccessChain(ctx.input_f32, ctx.input_position, element_id());
+    default:
+        throw NotImplementedException("Read attribute {}", attr);
+    }
+}
+
+Id OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
+    const u32 element{static_cast<u32>(attr) % 4};
+    const auto element_id{[&] { return ctx.Constant(ctx.U32[1], element); }};
+    if (IR::IsGeneric(attr)) {
+        const u32 index{IR::GenericAttributeIndex(attr)};
+        return ctx.OpAccessChain(ctx.output_f32, ctx.output_generics.at(index), element_id());
+    }
+    switch (attr) {
+    case IR::Attribute::PositionX:
+    case IR::Attribute::PositionY:
+    case IR::Attribute::PositionZ:
+    case IR::Attribute::PositionW:
+        return ctx.OpAccessChain(ctx.output_f32, ctx.output_position, element_id());
+    default:
+        throw NotImplementedException("Read attribute {}", attr);
+    }
+}
+} // Anonymous namespace
 
 void EmitGetRegister(EmitContext&) {
     throw NotImplementedException("SPIR-V Instruction");
@@ -87,12 +124,12 @@ Id EmitGetCbufU64(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
     return GetCbuf(ctx, ctx.U64, &UniformDefinitions::U64, sizeof(u64), binding, offset);
 }
 
-void EmitGetAttribute(EmitContext&) {
-    throw NotImplementedException("SPIR-V Instruction");
+Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr) {
+    return ctx.OpLoad(ctx.F32[1], InputAttrPointer(ctx, attr));
 }
 
-void EmitSetAttribute(EmitContext&) {
-    throw NotImplementedException("SPIR-V Instruction");
+void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value) {
+    ctx.OpStore(OutputAttrPointer(ctx, attr), value);
 }
 
 void EmitGetAttributeIndexed(EmitContext&) {
@@ -103,6 +140,16 @@ void EmitSetAttributeIndexed(EmitContext&) {
     throw NotImplementedException("SPIR-V Instruction");
 }
 
+void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value) {
+    const Id component_id{ctx.Constant(ctx.U32[1], component)};
+    const Id pointer{ctx.OpAccessChain(ctx.output_f32, ctx.frag_color.at(index), component_id)};
+    ctx.OpStore(pointer, value);
+}
+
+void EmitSetFragDepth(EmitContext& ctx, Id value) {
+    ctx.OpStore(ctx.frag_depth, value);
+}
+
 void EmitGetZFlag(EmitContext&) {
     throw NotImplementedException("SPIR-V Instruction");
 }
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp
index 48755b8278..6b81f01694 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp
@@ -6,26 +6,29 @@
 
 namespace Shader::Backend::SPIRV {
 
-void EmitBranch(EmitContext& ctx, IR::Block* label) {
-    ctx.OpBranch(label->Definition<Id>());
+void EmitBranch(EmitContext& ctx, Id label) {
+    ctx.OpBranch(label);
 }
 
-void EmitBranchConditional(EmitContext& ctx, Id condition, IR::Block* true_label,
-                           IR::Block* false_label) {
-    ctx.OpBranchConditional(condition, true_label->Definition<Id>(), false_label->Definition<Id>());
+void EmitBranchConditional(EmitContext& ctx, Id condition, Id true_label, Id false_label) {
+    ctx.OpBranchConditional(condition, true_label, false_label);
 }
 
-void EmitLoopMerge(EmitContext& ctx, IR::Block* merge_label, IR::Block* continue_label) {
-    ctx.OpLoopMerge(merge_label->Definition<Id>(), continue_label->Definition<Id>(),
-                    spv::LoopControlMask::MaskNone);
+void EmitLoopMerge(EmitContext& ctx, Id merge_label, Id continue_label) {
+    ctx.OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::MaskNone);
 }
 
-void EmitSelectionMerge(EmitContext& ctx, IR::Block* merge_label) {
-    ctx.OpSelectionMerge(merge_label->Definition<Id>(), spv::SelectionControlMask::MaskNone);
+void EmitSelectionMerge(EmitContext& ctx, Id merge_label) {
+    ctx.OpSelectionMerge(merge_label, spv::SelectionControlMask::MaskNone);
 }
 
 void EmitReturn(EmitContext& ctx) {
     ctx.OpReturn();
 }
 
+void EmitDemoteToHelperInvocation(EmitContext& ctx, Id continue_label) {
+    ctx.OpDemoteToHelperInvocationEXT();
+    ctx.OpBranch(continue_label);
+}
+
 } // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/environment.h b/src/shader_recompiler/environment.h
index 0fcb68050d..1fcaa56dda 100644
--- a/src/shader_recompiler/environment.h
+++ b/src/shader_recompiler/environment.h
@@ -3,6 +3,8 @@
 #include <array>
 
 #include "common/common_types.h"
+#include "shader_recompiler/stage.h"
+#include "shader_recompiler/program_header.h"
 
 namespace Shader {
 
@@ -15,6 +17,18 @@ public:
     [[nodiscard]] virtual u32 TextureBoundBuffer() = 0;
 
     [[nodiscard]] virtual std::array<u32, 3> WorkgroupSize() = 0;
+
+    [[nodiscard]] const ProgramHeader& SPH() const noexcept {
+        return sph;
+    }
+
+    [[nodiscard]] Stage ShaderStage() const noexcept {
+        return stage;
+    }
+
+protected:
+    ProgramHeader sph{};
+    Stage stage{};
 };
 
 } // namespace Shader
diff --git a/src/shader_recompiler/frontend/ir/attribute.cpp b/src/shader_recompiler/frontend/ir/attribute.cpp
index 2fb7d576ff..4811242ea0 100644
--- a/src/shader_recompiler/frontend/ir/attribute.cpp
+++ b/src/shader_recompiler/frontend/ir/attribute.cpp
@@ -13,7 +13,7 @@ bool IsGeneric(Attribute attribute) noexcept {
     return attribute >= Attribute::Generic0X && attribute <= Attribute::Generic31X;
 }
 
-int GenericAttributeIndex(Attribute attribute) {
+u32 GenericAttributeIndex(Attribute attribute) {
     if (!IsGeneric(attribute)) {
         throw InvalidArgument("Attribute is not generic {}", attribute);
     }
diff --git a/src/shader_recompiler/frontend/ir/attribute.h b/src/shader_recompiler/frontend/ir/attribute.h
index bb2cad6afd..34ec7e0cd0 100644
--- a/src/shader_recompiler/frontend/ir/attribute.h
+++ b/src/shader_recompiler/frontend/ir/attribute.h
@@ -224,7 +224,7 @@ enum class Attribute : u64 {
 
 [[nodiscard]] bool IsGeneric(Attribute attribute) noexcept;
 
-[[nodiscard]] int GenericAttributeIndex(Attribute attribute);
+[[nodiscard]] u32 GenericAttributeIndex(Attribute attribute);
 
 [[nodiscard]] std::string NameOf(Attribute attribute);
 
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
index 958282160d..672836c0b7 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
@@ -82,6 +82,12 @@ void IREmitter::Return() {
     Inst(Opcode::Return);
 }
 
+void IREmitter::DemoteToHelperInvocation(Block* continue_label) {
+    block->SetBranch(continue_label);
+    continue_label->AddImmediatePredecessor(block);
+    Inst(Opcode::DemoteToHelperInvocation, continue_label);
+}
+
 U32 IREmitter::GetReg(IR::Reg reg) {
     return Inst<U32>(Opcode::GetRegister, reg);
 }
@@ -248,6 +254,14 @@ void IREmitter::SetAttribute(IR::Attribute attribute, const F32& value) {
     Inst(Opcode::SetAttribute, attribute, value);
 }
 
+void IREmitter::SetFragColor(u32 index, u32 component, const F32& value) {
+    Inst(Opcode::SetFragColor, Imm32(index), Imm32(component), value);
+}
+
+void IREmitter::SetFragDepth(const F32& value) {
+    Inst(Opcode::SetFragDepth, value);
+}
+
 U32 IREmitter::WorkgroupIdX() {
     return U32{CompositeExtract(Inst(Opcode::WorkgroupId), 0)};
 }
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.h b/src/shader_recompiler/frontend/ir/ir_emitter.h
index 05263fe8b4..72af5db377 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.h
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.h
@@ -36,6 +36,7 @@ public:
     void LoopMerge(Block* merge_block, Block* continue_target);
     void SelectionMerge(Block* merge_block);
     void Return();
+    void DemoteToHelperInvocation(Block* continue_label);
 
     [[nodiscard]] U32 GetReg(IR::Reg reg);
     void SetReg(IR::Reg reg, const U32& value);
@@ -67,6 +68,9 @@ public:
     [[nodiscard]] F32 GetAttribute(IR::Attribute attribute);
     void SetAttribute(IR::Attribute attribute, const F32& value);
 
+    void SetFragColor(u32 index, u32 component, const F32& value);
+    void SetFragDepth(const F32& value);
+
     [[nodiscard]] U32 WorkgroupIdX();
     [[nodiscard]] U32 WorkgroupIdY();
     [[nodiscard]] U32 WorkgroupIdZ();
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 5946105d2f..21b7d8a9f4 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -55,8 +55,11 @@ bool Inst::MayHaveSideEffects() const noexcept {
     case Opcode::LoopMerge:
     case Opcode::SelectionMerge:
     case Opcode::Return:
+    case Opcode::DemoteToHelperInvocation:
     case Opcode::SetAttribute:
     case Opcode::SetAttributeIndexed:
+    case Opcode::SetFragColor:
+    case Opcode::SetFragDepth:
     case Opcode::WriteGlobalU8:
     case Opcode::WriteGlobalS8:
     case Opcode::WriteGlobalU16:
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc
index 9052a4903d..593faca528 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.inc
+++ b/src/shader_recompiler/frontend/ir/opcodes.inc
@@ -13,6 +13,7 @@ OPCODE(BranchConditional,                                   Void,           U1,
 OPCODE(LoopMerge,                                           Void,           Label,          Label,                                                          )
 OPCODE(SelectionMerge,                                      Void,           Label,                                                                          )
 OPCODE(Return,                                              Void,                                                                                           )
+OPCODE(DemoteToHelperInvocation,                            Void,           Label,                                                                          )
 
 // Context getters/setters
 OPCODE(GetRegister,                                         U32,            Reg,                                                                            )
@@ -28,10 +29,12 @@ OPCODE(GetCbufS16,                                          U32,            U32,
 OPCODE(GetCbufU32,                                          U32,            U32,            U32,                                                            )
 OPCODE(GetCbufF32,                                          F32,            U32,            U32,                                                            )
 OPCODE(GetCbufU64,                                          U64,            U32,            U32,                                                            )
-OPCODE(GetAttribute,                                        U32,            Attribute,                                                                      )
-OPCODE(SetAttribute,                                        Void,           Attribute,      U32,                                                            )
-OPCODE(GetAttributeIndexed,                                 U32,            U32,                                                                            )
-OPCODE(SetAttributeIndexed,                                 Void,           U32,            U32,                                                            )
+OPCODE(GetAttribute,                                        F32,            Attribute,                                                                      )
+OPCODE(SetAttribute,                                        Void,           Attribute,      F32,                                                            )
+OPCODE(GetAttributeIndexed,                                 F32,            U32,                                                                            )
+OPCODE(SetAttributeIndexed,                                 Void,           U32,            F32,                                                            )
+OPCODE(SetFragColor,                                        Void,           U32,            U32,            F32,                                            )
+OPCODE(SetFragDepth,                                        Void,           F32,                                                                            )
 OPCODE(GetZFlag,                                            U1,             Void,                                                                           )
 OPCODE(GetSFlag,                                            U1,             Void,                                                                           )
 OPCODE(GetCFlag,                                            U1,             Void,                                                                           )
diff --git a/src/shader_recompiler/frontend/ir/program.h b/src/shader_recompiler/frontend/ir/program.h
index bce8b19b3a..733513c8b1 100644
--- a/src/shader_recompiler/frontend/ir/program.h
+++ b/src/shader_recompiler/frontend/ir/program.h
@@ -10,6 +10,7 @@
 
 #include "shader_recompiler/frontend/ir/basic_block.h"
 #include "shader_recompiler/shader_info.h"
+#include "shader_recompiler/stage.h"
 
 namespace Shader::IR {
 
@@ -17,6 +18,7 @@ struct Program {
     BlockList blocks;
     BlockList post_order_blocks;
     Info info;
+    Stage stage{};
 };
 
 [[nodiscard]] std::string DumpProgram(const Program& program);
diff --git a/src/shader_recompiler/frontend/ir/reg.h b/src/shader_recompiler/frontend/ir/reg.h
index 8fea05f7be..3845ec5fb2 100644
--- a/src/shader_recompiler/frontend/ir/reg.h
+++ b/src/shader_recompiler/frontend/ir/reg.h
@@ -293,12 +293,12 @@ constexpr size_t NUM_REGS = 256;
     return reg + (-num);
 }
 
-[[nodiscard]] constexpr Reg operator++(Reg& reg) {
+constexpr Reg operator++(Reg& reg) {
     reg = reg + 1;
     return reg;
 }
 
-[[nodiscard]] constexpr Reg operator++(Reg& reg, int) {
+constexpr Reg operator++(Reg& reg, int) {
     const Reg copy{reg};
     reg = reg + 1;
     return copy;
diff --git a/src/shader_recompiler/frontend/maxwell/control_flow.cpp b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
index 715c0e92d8..4f6707fae4 100644
--- a/src/shader_recompiler/frontend/maxwell/control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
@@ -104,6 +104,7 @@ bool HasFlowTest(Opcode opcode) {
     case Opcode::EXIT:
     case Opcode::JMP:
     case Opcode::JMX:
+    case Opcode::KIL:
     case Opcode::BRK:
     case Opcode::CONT:
     case Opcode::LONGJMP:
@@ -287,6 +288,13 @@ CFG::AnalysisState CFG::AnalyzeInst(Block* block, FunctionId function_id, Locati
         block->end = pc;
         return AnalysisState::Branch;
     }
+    case Opcode::KIL: {
+        const Predicate pred{inst.Pred()};
+        const auto ir_pred{static_cast<IR::Pred>(pred.index)};
+        const IR::Condition cond{inst.branch.flow_test, ir_pred, pred.negated};
+        AnalyzeCondInst(block, function_id, pc, EndClass::Kill, cond);
+        return AnalysisState::Branch;
+    }
     case Opcode::PBK:
     case Opcode::PCNT:
     case Opcode::PEXIT:
@@ -324,13 +332,12 @@ CFG::AnalysisState CFG::AnalyzeInst(Block* block, FunctionId function_id, Locati
         return AnalysisState::Continue;
     }
     const IR::Condition cond{static_cast<IR::Pred>(pred.index), pred.negated};
-    AnalyzeCondInst(block, function_id, pc, EndClass::Branch, cond, true);
+    AnalyzeCondInst(block, function_id, pc, EndClass::Branch, cond);
     return AnalysisState::Branch;
 }
 
 void CFG::AnalyzeCondInst(Block* block, FunctionId function_id, Location pc,
-                          EndClass insn_end_class, IR::Condition cond,
-                          bool visit_conditional_inst) {
+                          EndClass insn_end_class, IR::Condition cond) {
     if (block->begin != pc) {
         // If the block doesn't start in the conditional instruction
         // mark it as a label to visit it later
@@ -356,14 +363,16 @@ void CFG::AnalyzeCondInst(Block* block, FunctionId function_id, Location pc,
     // Impersonate the visited block with a virtual block
     *block = std::move(virtual_block);
     // Set the end properties of the conditional instruction
-    conditional_block->end = visit_conditional_inst ? (pc + 1) : pc;
+    conditional_block->end = pc + 1;
     conditional_block->end_class = insn_end_class;
     // Add a label to the instruction after the conditional instruction
     Block* const endif_block{AddLabel(conditional_block, block->stack, pc + 1, function_id)};
     // Branch to the next instruction from the virtual block
     block->branch_false = endif_block;
-    // And branch to it from the conditional instruction if it is a branch
-    if (insn_end_class == EndClass::Branch) {
+    // And branch to it from the conditional instruction if it is a branch or a kill instruction
+    // Kill instructions are considered a branch because they demote to a helper invocation and
+    // execution may continue.
+    if (insn_end_class == EndClass::Branch || insn_end_class == EndClass::Kill) {
         conditional_block->cond = IR::Condition{true};
         conditional_block->branch_true = endif_block;
         conditional_block->branch_false = nullptr;
@@ -415,7 +424,7 @@ CFG::AnalysisState CFG::AnalyzeEXIT(Block* block, FunctionId function_id, Locati
             throw NotImplementedException("Conditional EXIT with PEXIT token");
         }
         const IR::Condition cond{flow_test, static_cast<IR::Pred>(pred.index), pred.negated};
-        AnalyzeCondInst(block, function_id, pc, EndClass::Exit, cond, false);
+        AnalyzeCondInst(block, function_id, pc, EndClass::Exit, cond);
         return AnalysisState::Branch;
     }
     if (const std::optional<Location> exit_pc{block->stack.Peek(Token::PEXIT)}) {
@@ -425,7 +434,7 @@ CFG::AnalysisState CFG::AnalyzeEXIT(Block* block, FunctionId function_id, Locati
         block->branch_false = nullptr;
         return AnalysisState::Branch;
     }
-    block->end = pc;
+    block->end = pc + 1;
     block->end_class = EndClass::Exit;
     return AnalysisState::Branch;
 }
@@ -505,6 +514,12 @@ std::string CFG::Dot() const {
                                    node_uid);
                 ++node_uid;
                 break;
+            case EndClass::Kill:
+                dot += fmt::format("\t\t{}->N{};\n", name, node_uid);
+                dot += fmt::format("\t\tN{} [label=\"Kill\"][shape=square][style=stripped];\n",
+                                   node_uid);
+                ++node_uid;
+                break;
             }
         }
         if (function.entrypoint == 8) {
diff --git a/src/shader_recompiler/frontend/maxwell/control_flow.h b/src/shader_recompiler/frontend/maxwell/control_flow.h
index fe74f210fb..22f1341944 100644
--- a/src/shader_recompiler/frontend/maxwell/control_flow.h
+++ b/src/shader_recompiler/frontend/maxwell/control_flow.h
@@ -29,6 +29,7 @@ enum class EndClass {
     Call,
     Exit,
     Return,
+    Kill,
 };
 
 enum class Token {
@@ -130,7 +131,7 @@ private:
     AnalysisState AnalyzeInst(Block* block, FunctionId function_id, Location pc);
 
     void AnalyzeCondInst(Block* block, FunctionId function_id, Location pc, EndClass insn_end_class,
-                         IR::Condition cond, bool visit_conditional_inst);
+                         IR::Condition cond);
 
     /// Return true when the branch instruction is confirmed to be a branch
     bool AnalyzeBranch(Block* block, FunctionId function_id, Location pc, Instruction inst,
diff --git a/src/shader_recompiler/frontend/maxwell/program.cpp b/src/shader_recompiler/frontend/maxwell/program.cpp
index 8bfa643268..0074eb89b6 100644
--- a/src/shader_recompiler/frontend/maxwell/program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/program.cpp
@@ -32,6 +32,7 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
     IR::Program program;
     program.blocks = VisitAST(inst_pool, block_pool, env, cfg);
     program.post_order_blocks = PostOrder(program.blocks);
+    program.stage = env.ShaderStage();
     RemoveUnreachableBlocks(program);
 
     // Replace instructions before the SSA rewrite
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
index 5f5d9cf173..cec03e73ee 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
@@ -45,6 +45,7 @@ enum class StatementType {
     Loop,
     Break,
     Return,
+    Kill,
     Function,
     Identity,
     Not,
@@ -70,6 +71,7 @@ struct If {};
 struct Loop {};
 struct Break {};
 struct Return {};
+struct Kill {};
 struct FunctionTag {};
 struct Identity {};
 struct Not {};
@@ -93,6 +95,7 @@ struct Statement : ListBaseHook {
     Statement(Break, Statement* cond_, Statement* up_)
         : cond{cond_}, up{up_}, type{StatementType::Break} {}
     Statement(Return) : type{StatementType::Return} {}
+    Statement(Kill) : type{StatementType::Kill} {}
     Statement(FunctionTag) : children{}, type{StatementType::Function} {}
     Statement(Identity, IR::Condition cond_) : guest_cond{cond_}, type{StatementType::Identity} {}
     Statement(Not, Statement* op_) : op{op_}, type{StatementType::Not} {}
@@ -174,6 +177,9 @@ std::string DumpTree(const Tree& tree, u32 indentation = 0) {
         case StatementType::Return:
             ret += fmt::format("{}    return;\n", indent);
             break;
+        case StatementType::Kill:
+            ret += fmt::format("{}    kill;\n", indent);
+            break;
         case StatementType::SetVariable:
             ret += fmt::format("{}    goto_L{} = {};\n", indent, stmt->id, DumpExpr(stmt->op));
             break;
@@ -424,6 +430,9 @@ private:
                 gotos.push_back(root.insert(ip, *goto_stmt));
                 break;
             }
+            case Flow::EndClass::Kill:
+                root.insert(ip, *pool.Create(Kill{}));
+                break;
             }
         }
     }
@@ -729,6 +738,15 @@ private:
                 current_block = nullptr;
                 break;
             }
+            case StatementType::Kill: {
+                if (!current_block) {
+                    current_block = block_pool.Create(inst_pool);
+                    block_list.push_back(current_block);
+                }
+                IR::IREmitter{*current_block}.DemoteToHelperInvocation(continue_block);
+                current_block = nullptr;
+                break;
+            }
             default:
                 throw NotImplementedException("Statement type {}", stmt.type);
             }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/exit.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/exit.cpp
deleted file mode 100644
index e98bbd0d18..0000000000
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/exit.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include "common/common_types.h"
-#include "shader_recompiler/exception.h"
-#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"
-
-namespace Shader::Maxwell {
-
-void TranslatorVisitor::EXIT(u64) {
-    ir.Exit();
-}
-
-} // namespace Shader::Maxwell
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/exit_program.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/exit_program.cpp
new file mode 100644
index 0000000000..ea9b33da9b
--- /dev/null
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/exit_program.cpp
@@ -0,0 +1,43 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/common_types.h"
+#include "shader_recompiler/exception.h"
+#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"
+
+namespace Shader::Maxwell {
+namespace {
+void ExitFragment(TranslatorVisitor& v) {
+    const ProgramHeader sph{v.env.SPH()};
+    IR::Reg src_reg{IR::Reg::R0};
+    for (u32 render_target = 0; render_target < 8; ++render_target) {
+        const std::array<bool, 4> mask{sph.ps.EnabledOutputComponents(render_target)};
+        for (u32 component = 0; component < 4; ++component) {
+            if (!mask[component]) {
+                continue;
+            }
+            v.ir.SetFragColor(render_target, component, v.F(src_reg));
+            ++src_reg;
+        }
+    }
+    if (sph.ps.omap.sample_mask != 0) {
+        throw NotImplementedException("Sample mask");
+    }
+    if (sph.ps.omap.depth != 0) {
+        throw NotImplementedException("Fragment depth");
+    }
+}
+} // Anonymous namespace
+
+void TranslatorVisitor::EXIT() {
+    switch (env.ShaderStage()) {
+    case Stage::Fragment:
+        ExitFragment(*this);
+        break;
+    default:
+        break;
+    }
+}
+
+} // namespace Shader::Maxwell
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/impl.h b/src/shader_recompiler/frontend/maxwell/translate/impl/impl.h
index e3e298c3b6..ed81d9c369 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/impl.h
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/impl.h
@@ -108,7 +108,7 @@ public:
     void DSETP_reg(u64 insn);
     void DSETP_cbuf(u64 insn);
     void DSETP_imm(u64 insn);
-    void EXIT(u64 insn);
+    void EXIT();
     void F2F_reg(u64 insn);
     void F2F_cbuf(u64 insn);
     void F2F_imm(u64 insn);
@@ -220,7 +220,7 @@ public:
     void JCAL(u64 insn);
     void JMP(u64 insn);
     void JMX(u64 insn);
-    void KIL(u64 insn);
+    void KIL();
     void LD(u64 insn);
     void LDC(u64 insn);
     void LDG(u64 insn);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
index ad97786d4e..2922145eec 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
@@ -11,6 +11,13 @@
 
 namespace Shader::Maxwell {
 namespace {
+enum class Size : u64 {
+    B32,
+    B64,
+    B96,
+    B128,
+};
+
 enum class InterpolationMode : u64 {
     Pass,
     Multiply,
@@ -23,8 +30,85 @@ enum class SampleMode : u64 {
     Centroid,
     Offset,
 };
+
+int NumElements(Size size) {
+    switch (size) {
+    case Size::B32:
+        return 1;
+    case Size::B64:
+        return 2;
+    case Size::B96:
+        return 3;
+    case Size::B128:
+        return 4;
+    }
+    throw InvalidArgument("Invalid size {}", size);
+}
 } // Anonymous namespace
 
+void TranslatorVisitor::ALD(u64 insn) {
+    union {
+        u64 raw;
+        BitField<0, 8, IR::Reg> dest_reg;
+        BitField<8, 8, IR::Reg> index_reg;
+        BitField<20, 10, u64> absolute_offset;
+        BitField<20, 11, s64> relative_offset;
+        BitField<39, 8, IR::Reg> stream_reg;
+        BitField<32, 1, u64> o;
+        BitField<31, 1, u64> patch;
+        BitField<47, 2, Size> size;
+    } const ald{insn};
+
+    if (ald.o != 0) {
+        throw NotImplementedException("O");
+    }
+    if (ald.patch != 0) {
+        throw NotImplementedException("P");
+    }
+    if (ald.index_reg != IR::Reg::RZ) {
+        throw NotImplementedException("Indexed");
+    }
+    const u64 offset{ald.absolute_offset.Value()};
+    if (offset % 4 != 0) {
+        throw NotImplementedException("Unaligned absolute offset {}", offset);
+    }
+    const int num_elements{NumElements(ald.size)};
+    for (int element = 0; element < num_elements; ++element) {
+        F(ald.dest_reg + element, ir.GetAttribute(IR::Attribute{offset / 4 + element}));
+    }
+}
+
+void TranslatorVisitor::AST(u64 insn) {
+    union {
+        u64 raw;
+        BitField<0, 8, IR::Reg> src_reg;
+        BitField<8, 8, IR::Reg> index_reg;
+        BitField<20, 10, u64> absolute_offset;
+        BitField<20, 11, s64> relative_offset;
+        BitField<31, 1, u64> patch;
+        BitField<39, 8, IR::Reg> stream_reg;
+        BitField<47, 2, Size> size;
+    } const ast{insn};
+
+    if (ast.patch != 0) {
+        throw NotImplementedException("P");
+    }
+    if (ast.stream_reg != IR::Reg::RZ) {
+        throw NotImplementedException("Stream store");
+    }
+    if (ast.index_reg != IR::Reg::RZ) {
+        throw NotImplementedException("Indexed store");
+    }
+    const u64 offset{ast.absolute_offset.Value()};
+    if (offset % 4 != 0) {
+        throw NotImplementedException("Unaligned absolute offset {}", offset);
+    }
+    const int num_elements{NumElements(ast.size)};
+    for (int element = 0; element < num_elements; ++element) {
+        ir.SetAttribute(IR::Attribute{offset / 4 + element}, F(ast.src_reg + element));
+    }
+}
+
 void TranslatorVisitor::IPA(u64 insn) {
     // IPA is the instruction used to read varyings from a fragment shader.
     // gl_FragCoord is mapped to the gl_Position attribute.
@@ -51,7 +135,7 @@ void TranslatorVisitor::IPA(u64 insn) {
     // }
     const bool is_indexed{ipa.idx != 0 && ipa.index_reg != IR::Reg::RZ};
     if (is_indexed) {
-        throw NotImplementedException("IPA.IDX");
+        throw NotImplementedException("IDX");
     }
 
     const IR::Attribute attribute{ipa.attribute};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
index 9675cef541..59252bcc50 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/not_implemented.cpp
@@ -17,14 +17,6 @@ void TranslatorVisitor::AL2P(u64) {
     ThrowNotImplemented(Opcode::AL2P);
 }
 
-void TranslatorVisitor::ALD(u64) {
-    ThrowNotImplemented(Opcode::ALD);
-}
-
-void TranslatorVisitor::AST(u64) {
-    ThrowNotImplemented(Opcode::AST);
-}
-
 void TranslatorVisitor::ATOM_cas(u64) {
     ThrowNotImplemented(Opcode::ATOM_cas);
 }
@@ -153,10 +145,6 @@ void TranslatorVisitor::DSETP_imm(u64) {
     ThrowNotImplemented(Opcode::DSETP_imm);
 }
 
-void TranslatorVisitor::EXIT(u64) {
-    throw LogicError("Visting EXIT instruction");
-}
-
 void TranslatorVisitor::F2F_reg(u64) {
     ThrowNotImplemented(Opcode::F2F_reg);
 }
@@ -345,8 +333,8 @@ void TranslatorVisitor::JMX(u64) {
     ThrowNotImplemented(Opcode::JMX);
 }
 
-void TranslatorVisitor::KIL(u64) {
-    ThrowNotImplemented(Opcode::KIL);
+void TranslatorVisitor::KIL() {
+    // KIL is a no-op
 }
 
 void TranslatorVisitor::LD(u64) {
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
index 98d9f4c648..0fbb87ec43 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
@@ -215,7 +215,7 @@ void TranslatorVisitor::TEX(u64 insn) {
         BitField<36, 13, u64> cbuf_offset;
     } const tex{insn};
 
-    Impl(*this, insn, tex.aoffi != 0, tex.blod, tex.lc != 0, static_cast<u32>(tex.cbuf_offset));
+    Impl(*this, insn, tex.aoffi != 0, tex.blod, tex.lc != 0, static_cast<u32>(tex.cbuf_offset * 4));
 }
 
 void TranslatorVisitor::TEX_b(u64 insn) {
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
index ac1615b007..54f0df7547 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
@@ -70,7 +70,7 @@ IR::F32 ReadArray(TranslatorVisitor& v, const IR::U32& value) {
 
 IR::Value Sample(TranslatorVisitor& v, u64 insn) {
     const Encoding texs{insn};
-    const IR::U32 handle{v.ir.Imm32(static_cast<u32>(texs.cbuf_offset))};
+    const IR::U32 handle{v.ir.Imm32(static_cast<u32>(texs.cbuf_offset * 4))};
     const IR::F32 zero{v.ir.Imm32(0.0f)};
     const IR::Reg reg_a{texs.src_reg_a};
     const IR::Reg reg_b{texs.src_reg_b};
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 708b6b2672..fbbe286322 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -17,10 +17,47 @@ void AddConstantBufferDescriptor(Info& info, u32 index, u32 count) {
         return;
     }
     info.constant_buffer_mask |= 1U << index;
-    info.constant_buffer_descriptors.push_back({
-        .index{index},
-        .count{1},
-    });
+
+    auto& cbufs{info.constant_buffer_descriptors};
+    cbufs.insert(std::ranges::lower_bound(cbufs, index, {}, &ConstantBufferDescriptor::index),
+                 ConstantBufferDescriptor{
+                     .index{index},
+                     .count{1},
+                 });
+}
+
+void GetAttribute(Info& info, IR::Attribute attribute) {
+    if (IR::IsGeneric(attribute)) {
+        info.loads_generics.at(IR::GenericAttributeIndex(attribute)) = true;
+        return;
+    }
+    switch (attribute) {
+    case IR::Attribute::PositionX:
+    case IR::Attribute::PositionY:
+    case IR::Attribute::PositionZ:
+    case IR::Attribute::PositionW:
+        info.loads_position = true;
+        break;
+    default:
+        throw NotImplementedException("Get attribute {}", attribute);
+    }
+}
+
+void SetAttribute(Info& info, IR::Attribute attribute) {
+    if (IR::IsGeneric(attribute)) {
+        info.stores_generics.at(IR::GenericAttributeIndex(attribute)) = true;
+        return;
+    }
+    switch (attribute) {
+    case IR::Attribute::PositionX:
+    case IR::Attribute::PositionY:
+    case IR::Attribute::PositionZ:
+    case IR::Attribute::PositionW:
+        info.stores_position = true;
+        break;
+    default:
+        throw NotImplementedException("Set attribute {}", attribute);
+    }
 }
 
 void VisitUsages(Info& info, IR::Inst& inst) {
@@ -162,6 +199,21 @@ void VisitUsages(Info& info, IR::Inst& inst) {
         break;
     }
     switch (inst.Opcode()) {
+    case IR::Opcode::DemoteToHelperInvocation:
+        info.uses_demote_to_helper_invocation = true;
+        break;
+    case IR::Opcode::GetAttribute:
+        GetAttribute(info, inst.Arg(0).Attribute());
+        break;
+    case IR::Opcode::SetAttribute:
+        SetAttribute(info, inst.Arg(0).Attribute());
+        break;
+    case IR::Opcode::SetFragColor:
+        info.stores_frag_color[inst.Arg(0).U32()] = true;
+        break;
+    case IR::Opcode::SetFragDepth:
+        info.stores_frag_depth = true;
+        break;
     case IR::Opcode::WorkgroupId:
         info.uses_workgroup_id = true;
         break;
diff --git a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
index d09bcec366..bab7ca1868 100644
--- a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
+++ b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
@@ -169,7 +169,7 @@ private:
         const size_t num_args{phi.NumArgs()};
         for (size_t arg_index = 0; arg_index < num_args; ++arg_index) {
             const IR::Value& op{phi.Arg(arg_index)};
-            if (op == same || op == IR::Value{&phi}) {
+            if (op.Resolve() == same.Resolve() || op == IR::Value{&phi}) {
                 // Unique value or self-reference
                 continue;
             }
diff --git a/src/shader_recompiler/program_header.h b/src/shader_recompiler/program_header.h
new file mode 100644
index 0000000000..1544bfa427
--- /dev/null
+++ b/src/shader_recompiler/program_header.h
@@ -0,0 +1,143 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <optional>
+
+#include "common/bit_field.h"
+#include "common/common_funcs.h"
+#include "common/common_types.h"
+
+namespace Shader {
+
+enum class OutputTopology : u32 {
+    PointList = 1,
+    LineStrip = 6,
+    TriangleStrip = 7,
+};
+
+enum class PixelImap : u8 {
+    Unused = 0,
+    Constant = 1,
+    Perspective = 2,
+    ScreenLinear = 3,
+};
+
+// Documentation in:
+// http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html
+struct ProgramHeader {
+    union {
+        BitField<0, 5, u32> sph_type;
+        BitField<5, 5, u32> version;
+        BitField<10, 4, u32> shader_type;
+        BitField<14, 1, u32> mrt_enable;
+        BitField<15, 1, u32> kills_pixels;
+        BitField<16, 1, u32> does_global_store;
+        BitField<17, 4, u32> sass_version;
+        BitField<21, 5, u32> reserved;
+        BitField<26, 1, u32> does_load_or_store;
+        BitField<27, 1, u32> does_fp64;
+        BitField<28, 4, u32> stream_out_mask;
+    } common0;
+
+    union {
+        BitField<0, 24, u32> shader_local_memory_low_size;
+        BitField<24, 8, u32> per_patch_attribute_count;
+    } common1;
+
+    union {
+        BitField<0, 24, u32> shader_local_memory_high_size;
+        BitField<24, 8, u32> threads_per_input_primitive;
+    } common2;
+
+    union {
+        BitField<0, 24, u32> shader_local_memory_crs_size;
+        BitField<24, 4, OutputTopology> output_topology;
+        BitField<28, 4, u32> reserved;
+    } common3;
+
+    union {
+        BitField<0, 12, u32> max_output_vertices;
+        BitField<12, 8, u32> store_req_start; // NOTE: not used by geometry shaders.
+        BitField<20, 4, u32> reserved;
+        BitField<24, 8, u32> store_req_end; // NOTE: not used by geometry shaders.
+    } common4;
+
+    union {
+        struct {
+            INSERT_PADDING_BYTES_NOINIT(3);  // ImapSystemValuesA
+            INSERT_PADDING_BYTES_NOINIT(1);  // ImapSystemValuesB
+            INSERT_PADDING_BYTES_NOINIT(16); // ImapGenericVector[32]
+            INSERT_PADDING_BYTES_NOINIT(2);  // ImapColor
+            union {
+                BitField<0, 8, u16> clip_distances;
+                BitField<8, 1, u16> point_sprite_s;
+                BitField<9, 1, u16> point_sprite_t;
+                BitField<10, 1, u16> fog_coordinate;
+                BitField<12, 1, u16> tessellation_eval_point_u;
+                BitField<13, 1, u16> tessellation_eval_point_v;
+                BitField<14, 1, u16> instance_id;
+                BitField<15, 1, u16> vertex_id;
+            };
+            INSERT_PADDING_BYTES_NOINIT(5);  // ImapFixedFncTexture[10]
+            INSERT_PADDING_BYTES_NOINIT(1);  // ImapReserved
+            INSERT_PADDING_BYTES_NOINIT(3);  // OmapSystemValuesA
+            INSERT_PADDING_BYTES_NOINIT(1);  // OmapSystemValuesB
+            INSERT_PADDING_BYTES_NOINIT(16); // OmapGenericVector[32]
+            INSERT_PADDING_BYTES_NOINIT(2);  // OmapColor
+            INSERT_PADDING_BYTES_NOINIT(2);  // OmapSystemValuesC
+            INSERT_PADDING_BYTES_NOINIT(5);  // OmapFixedFncTexture[10]
+            INSERT_PADDING_BYTES_NOINIT(1);  // OmapReserved
+        } vtg;
+
+        struct {
+            INSERT_PADDING_BYTES_NOINIT(3); // ImapSystemValuesA
+            INSERT_PADDING_BYTES_NOINIT(1); // ImapSystemValuesB
+
+            union {
+                BitField<0, 2, PixelImap> x;
+                BitField<2, 2, PixelImap> y;
+                BitField<4, 2, PixelImap> z;
+                BitField<6, 2, PixelImap> w;
+                u8 raw;
+            } imap_generic_vector[32];
+
+            INSERT_PADDING_BYTES_NOINIT(2);  // ImapColor
+            INSERT_PADDING_BYTES_NOINIT(2);  // ImapSystemValuesC
+            INSERT_PADDING_BYTES_NOINIT(10); // ImapFixedFncTexture[10]
+            INSERT_PADDING_BYTES_NOINIT(2);  // ImapReserved
+
+            struct {
+                u32 target;
+                union {
+                    BitField<0, 1, u32> sample_mask;
+                    BitField<1, 1, u32> depth;
+                    BitField<2, 30, u32> reserved;
+                };
+            } omap;
+
+            [[nodiscard]] std::array<bool, 4> EnabledOutputComponents(u32 rt) const noexcept {
+                const u32 bits{omap.target >> (rt * 4)};
+                return {(bits & 1) != 0, (bits & 2) != 0, (bits & 4) != 0, (bits & 8) != 0};
+            }
+
+            [[nodiscard]] std::array<PixelImap, 4> GenericInputMap(u32 attribute) const {
+                const auto& vector{imap_generic_vector[attribute]};
+                return {vector.x, vector.y, vector.z, vector.w};
+            }
+        } ps;
+
+        std::array<u32, 0xf> raw;
+    };
+
+    [[nodiscard]] u64 LocalMemorySize() const noexcept {
+        return (common1.shader_local_memory_low_size |
+                (common2.shader_local_memory_high_size << 24));
+    }
+};
+static_assert(sizeof(ProgramHeader) == 0x50, "Incorrect structure size");
+
+} // namespace Shader
diff --git a/src/shader_recompiler/recompiler.cpp b/src/shader_recompiler/recompiler.cpp
deleted file mode 100644
index 527e19c272..0000000000
--- a/src/shader_recompiler/recompiler.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <vector>
-
-#include "common/common_types.h"
-#include "shader_recompiler/backend/spirv/emit_spirv.h"
-#include "shader_recompiler/environment.h"
-#include "shader_recompiler/frontend/maxwell/control_flow.h"
-#include "shader_recompiler/frontend/maxwell/program.h"
-#include "shader_recompiler/object_pool.h"
-#include "shader_recompiler/recompiler.h"
-
-namespace Shader {
-
-std::pair<Info, std::vector<u32>> RecompileSPIRV(const Profile& profile, Environment& env,
-                                                 u32 start_address) {
-    ObjectPool<Maxwell::Flow::Block> flow_block_pool;
-    ObjectPool<IR::Inst> inst_pool;
-    ObjectPool<IR::Block> block_pool;
-
-    Maxwell::Flow::CFG cfg{env, flow_block_pool, start_address};
-    IR::Program program{Maxwell::TranslateProgram(inst_pool, block_pool, env, cfg)};
-    return {std::move(program.info), Backend::SPIRV::EmitSPIRV(profile, env, program)};
-}
-
-} // namespace Shader
diff --git a/src/shader_recompiler/recompiler.h b/src/shader_recompiler/recompiler.h
deleted file mode 100644
index 2529463aec..0000000000
--- a/src/shader_recompiler/recompiler.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <utility>
-#include <vector>
-
-#include "common/common_types.h"
-#include "shader_recompiler/environment.h"
-#include "shader_recompiler/profile.h"
-#include "shader_recompiler/shader_info.h"
-
-namespace Shader {
-
-[[nodiscard]] std::pair<Info, std::vector<u32>> RecompileSPIRV(const Profile& profile,
-                                                               Environment& env, u32 start_address);
-
-} // namespace Shader
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index adc1d9a64a..6eff762e2c 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -56,6 +56,15 @@ struct Info {
 
     bool uses_workgroup_id{};
     bool uses_local_invocation_id{};
+
+    std::array<bool, 32> loads_generics{};
+    bool loads_position{};
+
+    std::array<bool, 8> stores_frag_color{};
+    bool stores_frag_depth{};
+    std::array<bool, 32> stores_generics{};
+    bool stores_position{};
+
     bool uses_fp16{};
     bool uses_fp64{};
     bool uses_fp16_denorms_flush{};
@@ -68,6 +77,7 @@ struct Info {
     bool uses_image_1d{};
     bool uses_sampled_1d{};
     bool uses_sparse_residency{};
+    bool uses_demote_to_helper_invocation{};
 
     IR::Type used_constant_buffer_types{};
 
diff --git a/src/shader_recompiler/stage.h b/src/shader_recompiler/stage.h
new file mode 100644
index 0000000000..fc6ce60435
--- /dev/null
+++ b/src/shader_recompiler/stage.h
@@ -0,0 +1,19 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+namespace Shader {
+
+enum class Stage {
+    Compute,
+    VertexA,
+    VertexB,
+    TessellationControl,
+    TessellationEval,
+    Geometry,
+    Fragment,
+};
+
+} // namespace Shader
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 3323e69169..71b07c1940 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -100,6 +100,7 @@ add_library(video_core STATIC
     renderer_vulkan/fixed_pipeline_state.h
     renderer_vulkan/maxwell_to_vk.cpp
     renderer_vulkan/maxwell_to_vk.h
+    renderer_vulkan/pipeline_helper.h
     renderer_vulkan/renderer_vulkan.h
     renderer_vulkan/renderer_vulkan.cpp
     renderer_vulkan/vk_blit_screen.cpp
@@ -116,15 +117,18 @@ add_library(video_core STATIC
     renderer_vulkan/vk_descriptor_pool.h
     renderer_vulkan/vk_fence_manager.cpp
     renderer_vulkan/vk_fence_manager.h
+    renderer_vulkan/vk_graphics_pipeline.cpp
+    renderer_vulkan/vk_graphics_pipeline.h
     renderer_vulkan/vk_master_semaphore.cpp
     renderer_vulkan/vk_master_semaphore.h
     renderer_vulkan/vk_pipeline_cache.cpp
     renderer_vulkan/vk_pipeline_cache.h
-    renderer_vulkan/vk_pipeline.h
     renderer_vulkan/vk_query_cache.cpp
     renderer_vulkan/vk_query_cache.h
     renderer_vulkan/vk_rasterizer.cpp
     renderer_vulkan/vk_rasterizer.h
+    renderer_vulkan/vk_render_pass_cache.cpp
+    renderer_vulkan/vk_render_pass_cache.h
     renderer_vulkan/vk_resource_pool.cpp
     renderer_vulkan/vk_resource_pool.h
     renderer_vulkan/vk_scheduler.cpp
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 362278f015..d8f6839072 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -72,6 +72,10 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
         regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
     alpha_test_func.Assign(PackComparisonOp(test_func));
     early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
+    depth_enabled.Assign(regs.zeta_enable != 0 ? 1 : 0);
+    depth_format.Assign(static_cast<u32>(regs.zeta.format));
+    std::ranges::transform(regs.rt, color_formats.begin(),
+                           [](const auto& rt) { return static_cast<u8>(rt.format); });
 
     alpha_test_ref = Common::BitCast<u32>(regs.alpha_test_ref);
     point_size = Common::BitCast<u32>(regs.point_size);
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index a0eb83a68d..348f1d6ce6 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -60,7 +60,7 @@ struct FixedPipelineState {
 
         void Refresh(const Maxwell& regs, size_t index);
 
-        constexpr std::array<bool, 4> Mask() const noexcept {
+        std::array<bool, 4> Mask() const noexcept {
             return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
         }
 
@@ -97,11 +97,11 @@ struct FixedPipelineState {
         BitField<20, 3, u32> type;
         BitField<23, 6, u32> size;
 
-        constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
+        Maxwell::VertexAttribute::Type Type() const noexcept {
             return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
         }
 
-        constexpr Maxwell::VertexAttribute::Size Size() const noexcept {
+        Maxwell::VertexAttribute::Size Size() const noexcept {
             return static_cast<Maxwell::VertexAttribute::Size>(size.Value());
         }
     };
@@ -187,7 +187,10 @@ struct FixedPipelineState {
         u32 raw2;
         BitField<0, 3, u32> alpha_test_func;
         BitField<3, 1, u32> early_z;
+        BitField<4, 1, u32> depth_enabled;
+        BitField<5, 5, u32> depth_format;
     };
+    std::array<u8, Maxwell::NumRenderTargets> color_formats;
 
     u32 alpha_test_ref;
     u32 point_size;
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index f088447e94..dc4ff0da2b 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -741,4 +741,28 @@ VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reducti
     return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT;
 }
 
+VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
+    switch (msaa_mode) {
+    case Tegra::Texture::MsaaMode::Msaa1x1:
+        return VK_SAMPLE_COUNT_1_BIT;
+    case Tegra::Texture::MsaaMode::Msaa2x1:
+    case Tegra::Texture::MsaaMode::Msaa2x1_D3D:
+        return VK_SAMPLE_COUNT_2_BIT;
+    case Tegra::Texture::MsaaMode::Msaa2x2:
+    case Tegra::Texture::MsaaMode::Msaa2x2_VC4:
+    case Tegra::Texture::MsaaMode::Msaa2x2_VC12:
+        return VK_SAMPLE_COUNT_4_BIT;
+    case Tegra::Texture::MsaaMode::Msaa4x2:
+    case Tegra::Texture::MsaaMode::Msaa4x2_D3D:
+    case Tegra::Texture::MsaaMode::Msaa4x2_VC8:
+    case Tegra::Texture::MsaaMode::Msaa4x2_VC24:
+        return VK_SAMPLE_COUNT_8_BIT;
+    case Tegra::Texture::MsaaMode::Msaa4x4:
+        return VK_SAMPLE_COUNT_16_BIT;
+    default:
+        UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
+        return VK_SAMPLE_COUNT_1_BIT;
+    }
+}
+
 } // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h
index e3e06ba38a..9f78e15b6a 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.h
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h
@@ -71,4 +71,6 @@ VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle);
 
 VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reduction);
 
+VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode);
+
 } // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/pipeline_helper.h b/src/video_core/renderer_vulkan/pipeline_helper.h
new file mode 100644
index 0000000000..0a59aa6593
--- /dev/null
+++ b/src/video_core/renderer_vulkan/pipeline_helper.h
@@ -0,0 +1,162 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+
+#include <boost/container/small_vector.hpp>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "shader_recompiler/shader_info.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/texture_cache/texture_cache.h"
+#include "video_core/texture_cache/types.h"
+#include "video_core/textures/texture.h"
+
+namespace Vulkan {
+
+struct TextureHandle {
+    explicit TextureHandle(u32 data, bool via_header_index) {
+        [[likely]] if (via_header_index) {
+            image = data;
+            sampler = data;
+        } else {
+            const Tegra::Texture::TextureHandle handle{data};
+            image = handle.tic_id;
+            sampler = via_header_index ? image : handle.tsc_id.Value();
+        }
+    }
+
+    u32 image;
+    u32 sampler;
+};
+
+struct DescriptorLayoutTuple {
+    vk::DescriptorSetLayout descriptor_set_layout;
+    vk::PipelineLayout pipeline_layout;
+    vk::DescriptorUpdateTemplateKHR descriptor_update_template;
+};
+
+class DescriptorLayoutBuilder {
+public:
+    DescriptorLayoutTuple Create(const vk::Device& device) {
+        DescriptorLayoutTuple result;
+        if (!bindings.empty()) {
+            result.descriptor_set_layout = device.CreateDescriptorSetLayout({
+                .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+                .pNext = nullptr,
+                .flags = 0,
+                .bindingCount = static_cast<u32>(bindings.size()),
+                .pBindings = bindings.data(),
+            });
+        }
+        result.pipeline_layout = device.CreatePipelineLayout({
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .setLayoutCount = result.descriptor_set_layout ? 1U : 0U,
+            .pSetLayouts = bindings.empty() ? nullptr : result.descriptor_set_layout.address(),
+            .pushConstantRangeCount = 0,
+            .pPushConstantRanges = nullptr,
+        });
+        if (!entries.empty()) {
+            result.descriptor_update_template = device.CreateDescriptorUpdateTemplateKHR({
+                .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
+                .pNext = nullptr,
+                .flags = 0,
+                .descriptorUpdateEntryCount = static_cast<u32>(entries.size()),
+                .pDescriptorUpdateEntries = entries.data(),
+                .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
+                .descriptorSetLayout = *result.descriptor_set_layout,
+                .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+                .pipelineLayout = *result.pipeline_layout,
+                .set = 0,
+            });
+        }
+        return result;
+    }
+
+    void Add(const Shader::Info& info, VkShaderStageFlags stage) {
+        for ([[maybe_unused]] const auto& desc : info.constant_buffer_descriptors) {
+            Add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, stage);
+        }
+        for ([[maybe_unused]] const auto& desc : info.storage_buffers_descriptors) {
+            Add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stage);
+        }
+        for ([[maybe_unused]] const auto& desc : info.texture_descriptors) {
+            Add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, stage);
+        }
+    }
+
+private:
+    void Add(VkDescriptorType type, VkShaderStageFlags stage) {
+        bindings.push_back({
+            .binding = binding,
+            .descriptorType = type,
+            .descriptorCount = 1,
+            .stageFlags = stage,
+            .pImmutableSamplers = nullptr,
+        });
+        entries.push_back(VkDescriptorUpdateTemplateEntryKHR{
+            .dstBinding = binding,
+            .dstArrayElement = 0,
+            .descriptorCount = 1,
+            .descriptorType = type,
+            .offset = offset,
+            .stride = sizeof(DescriptorUpdateEntry),
+        });
+        ++binding;
+        offset += sizeof(DescriptorUpdateEntry);
+    }
+
+    boost::container::small_vector<VkDescriptorSetLayoutBinding, 32> bindings;
+    boost::container::small_vector<VkDescriptorUpdateTemplateEntryKHR, 32> entries;
+    u32 binding{};
+    size_t offset{};
+};
+
+inline VideoCommon::ImageViewType CastType(Shader::TextureType type) {
+    switch (type) {
+    case Shader::TextureType::Color1D:
+    case Shader::TextureType::Shadow1D:
+        return VideoCommon::ImageViewType::e1D;
+    case Shader::TextureType::ColorArray1D:
+    case Shader::TextureType::ShadowArray1D:
+        return VideoCommon::ImageViewType::e1DArray;
+    case Shader::TextureType::Color2D:
+    case Shader::TextureType::Shadow2D:
+        return VideoCommon::ImageViewType::e2D;
+    case Shader::TextureType::ColorArray2D:
+    case Shader::TextureType::ShadowArray2D:
+        return VideoCommon::ImageViewType::e2DArray;
+    case Shader::TextureType::Color3D:
+    case Shader::TextureType::Shadow3D:
+        return VideoCommon::ImageViewType::e3D;
+    case Shader::TextureType::ColorCube:
+    case Shader::TextureType::ShadowCube:
+        return VideoCommon::ImageViewType::Cube;
+    case Shader::TextureType::ColorArrayCube:
+    case Shader::TextureType::ShadowArrayCube:
+        return VideoCommon::ImageViewType::CubeArray;
+    }
+    UNREACHABLE_MSG("Invalid texture type {}", type);
+    return {};
+}
+
+inline void PushImageDescriptors(const Shader::Info& info, const VkSampler* samplers,
+                                 const ImageId* image_view_ids, TextureCache& texture_cache,
+                                 VKUpdateDescriptorQueue& update_descriptor_queue, size_t& index) {
+    for (const auto& desc : info.texture_descriptors) {
+        const VkSampler sampler{samplers[index]};
+        ImageView& image_view{texture_cache.GetImageView(image_view_ids[index])};
+        const VkImageView vk_image_view{image_view.Handle(CastType(desc.type))};
+        update_descriptor_queue.AddSampledImage(vk_image_view, sampler);
+        ++index;
+    }
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index ef8bef6ffc..6684d37a6e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -6,6 +6,7 @@
 
 #include <boost/container/small_vector.hpp>
 
+#include "video_core/renderer_vulkan/pipeline_helper.h"
 #include "video_core/renderer_vulkan/vk_buffer_cache.h"
 #include "video_core/renderer_vulkan/vk_compute_pipeline.h"
 #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
@@ -17,140 +18,10 @@
 
 namespace Vulkan {
 namespace {
-vk::DescriptorSetLayout CreateDescriptorSetLayout(const Device& device, const Shader::Info& info) {
-    boost::container::small_vector<VkDescriptorSetLayoutBinding, 24> bindings;
-    u32 binding{};
-    for ([[maybe_unused]] const auto& desc : info.constant_buffer_descriptors) {
-        bindings.push_back({
-            .binding = binding,
-            .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
-            .descriptorCount = 1,
-            .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
-            .pImmutableSamplers = nullptr,
-        });
-        ++binding;
-    }
-    for ([[maybe_unused]] const auto& desc : info.storage_buffers_descriptors) {
-        bindings.push_back({
-            .binding = binding,
-            .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
-            .descriptorCount = 1,
-            .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
-            .pImmutableSamplers = nullptr,
-        });
-        ++binding;
-    }
-    for (const auto& desc : info.texture_descriptors) {
-        bindings.push_back({
-            .binding = binding,
-            .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
-            .descriptorCount = 1,
-            .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
-            .pImmutableSamplers = nullptr,
-        });
-        ++binding;
-    }
-    return device.GetLogical().CreateDescriptorSetLayout({
-        .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
-        .pNext = nullptr,
-        .flags = 0,
-        .bindingCount = static_cast<u32>(bindings.size()),
-        .pBindings = bindings.data(),
-    });
-}
-
-vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
-    const Device& device, const Shader::Info& info, VkDescriptorSetLayout descriptor_set_layout,
-    VkPipelineLayout pipeline_layout) {
-    boost::container::small_vector<VkDescriptorUpdateTemplateEntry, 24> entries;
-    size_t offset{};
-    u32 binding{};
-    for ([[maybe_unused]] const auto& desc : info.constant_buffer_descriptors) {
-        entries.push_back({
-            .dstBinding = binding,
-            .dstArrayElement = 0,
-            .descriptorCount = 1,
-            .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
-            .offset = offset,
-            .stride = sizeof(DescriptorUpdateEntry),
-        });
-        ++binding;
-        offset += sizeof(DescriptorUpdateEntry);
-    }
-    for ([[maybe_unused]] const auto& desc : info.storage_buffers_descriptors) {
-        entries.push_back({
-            .dstBinding = binding,
-            .dstArrayElement = 0,
-            .descriptorCount = 1,
-            .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
-            .offset = offset,
-            .stride = sizeof(DescriptorUpdateEntry),
-        });
-        ++binding;
-        offset += sizeof(DescriptorUpdateEntry);
-    }
-    for (const auto& desc : info.texture_descriptors) {
-        entries.push_back({
-            .dstBinding = binding,
-            .dstArrayElement = 0,
-            .descriptorCount = 1,
-            .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
-            .offset = offset,
-            .stride = sizeof(DescriptorUpdateEntry),
-        });
-        ++binding;
-        offset += sizeof(DescriptorUpdateEntry);
-    }
-    return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
-        .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,
-        .pNext = nullptr,
-        .flags = 0,
-        .descriptorUpdateEntryCount = static_cast<u32>(entries.size()),
-        .pDescriptorUpdateEntries = entries.data(),
-        .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,
-        .descriptorSetLayout = descriptor_set_layout,
-        .pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE,
-        .pipelineLayout = pipeline_layout,
-        .set = 0,
-    });
-}
-
-struct TextureHandle {
-    explicit TextureHandle(u32 data, bool via_header_index) {
-        const Tegra::Texture::TextureHandle handle{data};
-        image = handle.tic_id;
-        sampler = via_header_index ? image : handle.tsc_id.Value();
-    }
-
-    u32 image;
-    u32 sampler;
-};
-
-VideoCommon::ImageViewType CastType(Shader::TextureType type) {
-    switch (type) {
-    case Shader::TextureType::Color1D:
-    case Shader::TextureType::Shadow1D:
-        return VideoCommon::ImageViewType::e1D;
-    case Shader::TextureType::ColorArray1D:
-    case Shader::TextureType::ShadowArray1D:
-        return VideoCommon::ImageViewType::e1DArray;
-    case Shader::TextureType::Color2D:
-    case Shader::TextureType::Shadow2D:
-        return VideoCommon::ImageViewType::e2D;
-    case Shader::TextureType::ColorArray2D:
-    case Shader::TextureType::ShadowArray2D:
-        return VideoCommon::ImageViewType::e2DArray;
-    case Shader::TextureType::Color3D:
-    case Shader::TextureType::Shadow3D:
-        return VideoCommon::ImageViewType::e3D;
-    case Shader::TextureType::ColorCube:
-    case Shader::TextureType::ShadowCube:
-        return VideoCommon::ImageViewType::Cube;
-    case Shader::TextureType::ColorArrayCube:
-    case Shader::TextureType::ShadowArrayCube:
-        return VideoCommon::ImageViewType::CubeArray;
-    }
-    UNREACHABLE_MSG("Invalid texture type {}", type);
+DescriptorLayoutTuple CreateLayout(const Device& device, const Shader::Info& info) {
+    DescriptorLayoutBuilder builder;
+    builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
+    return builder.Create(device.GetLogical());
 }
 } // Anonymous namespace
 
@@ -158,37 +29,31 @@ ComputePipeline::ComputePipeline(const Device& device, VKDescriptorPool& descrip
                                  VKUpdateDescriptorQueue& update_descriptor_queue_,
                                  const Shader::Info& info_, vk::ShaderModule spv_module_)
     : update_descriptor_queue{&update_descriptor_queue_}, info{info_},
-      spv_module(std::move(spv_module_)),
-      descriptor_set_layout(CreateDescriptorSetLayout(device, info)),
-      descriptor_allocator(descriptor_pool, *descriptor_set_layout),
-      pipeline_layout{device.GetLogical().CreatePipelineLayout({
-          .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
-          .pNext = nullptr,
-          .flags = 0,
-          .setLayoutCount = 1,
-          .pSetLayouts = descriptor_set_layout.address(),
-          .pushConstantRangeCount = 0,
-          .pPushConstantRanges = nullptr,
-      })},
-      descriptor_update_template{
-          CreateDescriptorUpdateTemplate(device, info, *descriptor_set_layout, *pipeline_layout)},
-      pipeline{device.GetLogical().CreateComputePipeline({
-          .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
-          .pNext = nullptr,
-          .flags = 0,
-          .stage{
-              .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
-              .pNext = nullptr,
-              .flags = 0,
-              .stage = VK_SHADER_STAGE_COMPUTE_BIT,
-              .module = *spv_module,
-              .pName = "main",
-              .pSpecializationInfo = nullptr,
-          },
-          .layout = *pipeline_layout,
-          .basePipelineHandle = 0,
-          .basePipelineIndex = 0,
-      })} {}
+      spv_module(std::move(spv_module_)) {
+    DescriptorLayoutTuple tuple{CreateLayout(device, info)};
+    descriptor_set_layout = std::move(tuple.descriptor_set_layout);
+    pipeline_layout = std::move(tuple.pipeline_layout);
+    descriptor_update_template = std::move(tuple.descriptor_update_template);
+    descriptor_allocator = DescriptorAllocator(descriptor_pool, *descriptor_set_layout);
+
+    pipeline = device.GetLogical().CreateComputePipeline({
+        .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .stage{
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .stage = VK_SHADER_STAGE_COMPUTE_BIT,
+            .module = *spv_module,
+            .pName = "main",
+            .pSpecializationInfo = nullptr,
+        },
+        .layout = *pipeline_layout,
+        .basePipelineHandle = 0,
+        .basePipelineIndex = 0,
+    });
+}
 
 void ComputePipeline::ConfigureBufferCache(BufferCache& buffer_cache) {
     buffer_cache.SetEnabledComputeUniformBuffers(info.constant_buffer_mask);
@@ -211,7 +76,7 @@ void ComputePipeline::ConfigureTextureCache(Tegra::Engines::KeplerCompute& keple
     static constexpr size_t max_elements = 64;
     std::array<ImageId, max_elements> image_view_ids;
     boost::container::static_vector<u32, max_elements> image_view_indices;
-    boost::container::static_vector<VkSampler, max_elements> sampler_handles;
+    boost::container::static_vector<VkSampler, max_elements> samplers;
 
     const auto& launch_desc{kepler_compute.launch_description};
     const auto& cbufs{launch_desc.const_buffer_config};
@@ -228,20 +93,14 @@ void ComputePipeline::ConfigureTextureCache(Tegra::Engines::KeplerCompute& keple
         image_view_indices.push_back(handle.image);
 
         Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
-        sampler_handles.push_back(sampler->Handle());
+        samplers.push_back(sampler->Handle());
     }
-
     const std::span indices_span(image_view_indices.data(), image_view_indices.size());
     texture_cache.FillComputeImageViews(indices_span, image_view_ids);
 
     size_t index{};
-    for (const auto& desc : info.texture_descriptors) {
-        const VkSampler vk_sampler{sampler_handles[index]};
-        ImageView& image_view{texture_cache.GetImageView(image_view_ids[index])};
-        const VkImageView vk_image_view{image_view.Handle(CastType(desc.type))};
-        update_descriptor_queue->AddSampledImage(vk_image_view, vk_sampler);
-        ++index;
-    }
+    PushImageDescriptors(info, samplers.data(), image_view_ids.data(), texture_cache,
+                         *update_descriptor_queue, index);
 }
 
 VkDescriptorSet ComputePipeline::UpdateDescriptorSet() {
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 08d73a2a4b..e82e5816b8 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -9,7 +9,6 @@
 #include "video_core/memory_manager.h"
 #include "video_core/renderer_vulkan/vk_buffer_cache.h"
 #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
-#include "video_core/renderer_vulkan/vk_pipeline.h"
 #include "video_core/renderer_vulkan/vk_texture_cache.h"
 #include "video_core/renderer_vulkan/vk_update_descriptor.h"
 #include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -18,7 +17,7 @@ namespace Vulkan {
 
 class Device;
 
-class ComputePipeline : public Pipeline {
+class ComputePipeline {
 public:
     explicit ComputePipeline() = default;
     explicit ComputePipeline(const Device& device, VKDescriptorPool& descriptor_pool,
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
new file mode 100644
index 0000000000..a2ec418b12
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -0,0 +1,445 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <span>
+
+#include <boost/container/small_vector.hpp>
+#include <boost/container/static_vector.hpp>
+
+#include "common/bit_field.h"
+#include "video_core/renderer_vulkan/maxwell_to_vk.h"
+#include "video_core/renderer_vulkan/pipeline_helper.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
+#include "video_core/vulkan_common/vulkan_device.h"
+
+namespace Vulkan {
+namespace {
+using boost::container::small_vector;
+using boost::container::static_vector;
+using VideoCore::Surface::PixelFormat;
+using VideoCore::Surface::PixelFormatFromDepthFormat;
+using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
+
+DescriptorLayoutTuple CreateLayout(const Device& device, std::span<const Shader::Info> infos) {
+    DescriptorLayoutBuilder builder;
+    for (size_t index = 0; index < infos.size(); ++index) {
+        static constexpr std::array stages{
+            VK_SHADER_STAGE_VERTEX_BIT,
+            VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+            VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+            VK_SHADER_STAGE_GEOMETRY_BIT,
+            VK_SHADER_STAGE_FRAGMENT_BIT,
+        };
+        builder.Add(infos[index], stages.at(index));
+    }
+    return builder.Create(device.GetLogical());
+}
+
+template <class StencilFace>
+VkStencilOpState GetStencilFaceState(const StencilFace& face) {
+    return {
+        .failOp = MaxwellToVK::StencilOp(face.ActionStencilFail()),
+        .passOp = MaxwellToVK::StencilOp(face.ActionDepthPass()),
+        .depthFailOp = MaxwellToVK::StencilOp(face.ActionDepthFail()),
+        .compareOp = MaxwellToVK::ComparisonOp(face.TestFunc()),
+        .compareMask = 0,
+        .writeMask = 0,
+        .reference = 0,
+    };
+}
+
+bool SupportsPrimitiveRestart(VkPrimitiveTopology topology) {
+    static constexpr std::array unsupported_topologies{
+        VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+        VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+        VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+        VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
+        VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
+        VK_PRIMITIVE_TOPOLOGY_PATCH_LIST,
+        // VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT,
+    };
+    return std::ranges::find(unsupported_topologies, topology) == unsupported_topologies.end();
+}
+
+VkViewportSwizzleNV UnpackViewportSwizzle(u16 swizzle) {
+    union Swizzle {
+        u32 raw;
+        BitField<0, 3, Maxwell::ViewportSwizzle> x;
+        BitField<4, 3, Maxwell::ViewportSwizzle> y;
+        BitField<8, 3, Maxwell::ViewportSwizzle> z;
+        BitField<12, 3, Maxwell::ViewportSwizzle> w;
+    };
+    const Swizzle unpacked{swizzle};
+    return VkViewportSwizzleNV{
+        .x = MaxwellToVK::ViewportSwizzle(unpacked.x),
+        .y = MaxwellToVK::ViewportSwizzle(unpacked.y),
+        .z = MaxwellToVK::ViewportSwizzle(unpacked.z),
+        .w = MaxwellToVK::ViewportSwizzle(unpacked.w),
+    };
+}
+
+PixelFormat DecodeFormat(u8 encoded_format) {
+    const auto format{static_cast<Tegra::RenderTargetFormat>(encoded_format)};
+    if (format == Tegra::RenderTargetFormat::NONE) {
+        return PixelFormat::Invalid;
+    }
+    return PixelFormatFromRenderTargetFormat(format);
+}
+
+RenderPassKey MakeRenderPassKey(const FixedPipelineState& state) {
+    RenderPassKey key;
+    std::ranges::transform(state.color_formats, key.color_formats.begin(), DecodeFormat);
+    if (state.depth_enabled != 0) {
+        const auto depth_format{static_cast<Tegra::DepthFormat>(state.depth_format.Value())};
+        key.depth_format = PixelFormatFromDepthFormat(depth_format);
+    } else {
+        key.depth_format = PixelFormat::Invalid;
+    }
+    key.samples = MaxwellToVK::MsaaMode(state.msaa_mode);
+    return key;
+}
+} // Anonymous namespace
+
+GraphicsPipeline::GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d_,
+                                   Tegra::MemoryManager& gpu_memory_, VKScheduler& scheduler_,
+                                   BufferCache& buffer_cache_, TextureCache& texture_cache_,
+                                   const Device& device, VKDescriptorPool& descriptor_pool,
+                                   VKUpdateDescriptorQueue& update_descriptor_queue_,
+                                   RenderPassCache& render_pass_cache,
+                                   const FixedPipelineState& state,
+                                   std::array<vk::ShaderModule, NUM_STAGES> stages,
+                                   const std::array<const Shader::Info*, NUM_STAGES>& infos)
+    : maxwell3d{&maxwell3d_}, gpu_memory{&gpu_memory_}, texture_cache{&texture_cache_},
+      buffer_cache{&buffer_cache_}, scheduler{&scheduler_},
+      update_descriptor_queue{&update_descriptor_queue_}, spv_modules{std::move(stages)} {
+    std::ranges::transform(infos, stage_infos.begin(),
+                           [](const Shader::Info* info) { return info ? *info : Shader::Info{}; });
+
+    DescriptorLayoutTuple tuple{CreateLayout(device, stage_infos)};
+    descriptor_set_layout = std::move(tuple.descriptor_set_layout);
+    pipeline_layout = std::move(tuple.pipeline_layout);
+    descriptor_update_template = std::move(tuple.descriptor_update_template);
+    descriptor_allocator = DescriptorAllocator(descriptor_pool, *descriptor_set_layout);
+
+    const VkRenderPass render_pass{render_pass_cache.Get(MakeRenderPassKey(state))};
+    MakePipeline(device, state, render_pass);
+}
+
+void GraphicsPipeline::Configure(bool is_indexed) {
+    static constexpr size_t max_images_elements = 64;
+    std::array<ImageId, max_images_elements> image_view_ids;
+    static_vector<u32, max_images_elements> image_view_indices;
+    static_vector<VkSampler, max_images_elements> samplers;
+
+    texture_cache->SynchronizeGraphicsDescriptors();
+    texture_cache->UpdateRenderTargets(false);
+
+    const auto& regs{maxwell3d->regs};
+    const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
+    for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
+        const Shader::Info& info{stage_infos[stage]};
+        buffer_cache->SetEnabledUniformBuffers(stage, info.constant_buffer_mask);
+        buffer_cache->UnbindGraphicsStorageBuffers(stage);
+        size_t index{};
+        for (const auto& desc : info.storage_buffers_descriptors) {
+            ASSERT(desc.count == 1);
+            buffer_cache->BindGraphicsStorageBuffer(stage, index, desc.cbuf_index, desc.cbuf_offset,
+                                                    true);
+            ++index;
+        }
+        const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
+        for (const auto& desc : info.texture_descriptors) {
+            const u32 cbuf_index{desc.cbuf_index};
+            const u32 cbuf_offset{desc.cbuf_offset};
+            ASSERT(cbufs[cbuf_index].enabled);
+            const GPUVAddr addr{cbufs[cbuf_index].address + cbuf_offset};
+            const u32 raw_handle{gpu_memory->Read<u32>(addr)};
+
+            const TextureHandle handle(raw_handle, via_header_index);
+            image_view_indices.push_back(handle.image);
+
+            Sampler* const sampler{texture_cache->GetGraphicsSampler(handle.sampler)};
+            samplers.push_back(sampler->Handle());
+        }
+    }
+    const std::span indices_span(image_view_indices.data(), image_view_indices.size());
+    buffer_cache->UpdateGraphicsBuffers(is_indexed);
+    texture_cache->FillGraphicsImageViews(indices_span, image_view_ids);
+
+    buffer_cache->BindHostGeometryBuffers(is_indexed);
+
+    size_t index{};
+    for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
+        buffer_cache->BindHostStageBuffers(stage);
+        PushImageDescriptors(stage_infos[stage], samplers.data(), image_view_ids.data(),
+                             *texture_cache, *update_descriptor_queue, index);
+    }
+    const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
+    update_descriptor_queue->Send(*descriptor_update_template, descriptor_set);
+
+    scheduler->BindGraphicsPipeline(*pipeline);
+    scheduler->Record([descriptor_set, layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
+        cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
+                                  nullptr);
+    });
+}
+
+void GraphicsPipeline::MakePipeline(const Device& device, const FixedPipelineState& state,
+                                    VkRenderPass render_pass) {
+    FixedPipelineState::DynamicState dynamic{};
+    if (!device.IsExtExtendedDynamicStateSupported()) {
+        dynamic = state.dynamic_state;
+    }
+    static_vector<VkVertexInputBindingDescription, 32> vertex_bindings;
+    static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors;
+    for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
+        const bool instanced = state.binding_divisors[index] != 0;
+        const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
+        vertex_bindings.push_back({
+            .binding = static_cast<u32>(index),
+            .stride = dynamic.vertex_strides[index],
+            .inputRate = rate,
+        });
+        if (instanced) {
+            vertex_binding_divisors.push_back({
+                .binding = static_cast<u32>(index),
+                .divisor = state.binding_divisors[index],
+            });
+        }
+    }
+    static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes;
+    const auto& input_attributes = stage_infos[0].loads_generics;
+    for (size_t index = 0; index < state.attributes.size(); ++index) {
+        const auto& attribute = state.attributes[index];
+        if (!attribute.enabled || !input_attributes[index]) {
+            continue;
+        }
+        vertex_attributes.push_back({
+            .location = static_cast<u32>(index),
+            .binding = attribute.buffer,
+            .format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size()),
+            .offset = attribute.offset,
+        });
+    }
+    VkPipelineVertexInputStateCreateInfo vertex_input_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .vertexBindingDescriptionCount = static_cast<u32>(vertex_bindings.size()),
+        .pVertexBindingDescriptions = vertex_bindings.data(),
+        .vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size()),
+        .pVertexAttributeDescriptions = vertex_attributes.data(),
+    };
+    const VkPipelineVertexInputDivisorStateCreateInfoEXT input_divisor_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT,
+        .pNext = nullptr,
+        .vertexBindingDivisorCount = static_cast<u32>(vertex_binding_divisors.size()),
+        .pVertexBindingDivisors = vertex_binding_divisors.data(),
+    };
+    if (!vertex_binding_divisors.empty()) {
+        vertex_input_ci.pNext = &input_divisor_ci;
+    }
+    const auto input_assembly_topology = MaxwellToVK::PrimitiveTopology(device, state.topology);
+    const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .topology = MaxwellToVK::PrimitiveTopology(device, state.topology),
+        .primitiveRestartEnable = state.primitive_restart_enable != 0 &&
+                                  SupportsPrimitiveRestart(input_assembly_topology),
+    };
+    const VkPipelineTessellationStateCreateInfo tessellation_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .patchControlPoints = state.patch_control_points_minus_one.Value() + 1,
+    };
+    VkPipelineViewportStateCreateInfo viewport_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .viewportCount = Maxwell::NumViewports,
+        .pViewports = nullptr,
+        .scissorCount = Maxwell::NumViewports,
+        .pScissors = nullptr,
+    };
+    std::array<VkViewportSwizzleNV, Maxwell::NumViewports> swizzles;
+    std::ranges::transform(state.viewport_swizzles, swizzles.begin(), UnpackViewportSwizzle);
+    VkPipelineViewportSwizzleStateCreateInfoNV swizzle_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV,
+        .pNext = nullptr,
+        .flags = 0,
+        .viewportCount = Maxwell::NumViewports,
+        .pViewportSwizzles = swizzles.data(),
+    };
+    if (device.IsNvViewportSwizzleSupported()) {
+        viewport_ci.pNext = &swizzle_ci;
+    }
+
+    const VkPipelineRasterizationStateCreateInfo rasterization_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .depthClampEnable =
+            static_cast<VkBool32>(state.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE),
+        .rasterizerDiscardEnable =
+            static_cast<VkBool32>(state.rasterize_enable == 0 ? VK_TRUE : VK_FALSE),
+        .polygonMode = VK_POLYGON_MODE_FILL,
+        .cullMode = static_cast<VkCullModeFlags>(
+            dynamic.cull_enable ? MaxwellToVK::CullFace(dynamic.CullFace()) : VK_CULL_MODE_NONE),
+        .frontFace = MaxwellToVK::FrontFace(dynamic.FrontFace()),
+        .depthBiasEnable = state.depth_bias_enable,
+        .depthBiasConstantFactor = 0.0f,
+        .depthBiasClamp = 0.0f,
+        .depthBiasSlopeFactor = 0.0f,
+        .lineWidth = 1.0f,
+    };
+    const VkPipelineMultisampleStateCreateInfo multisample_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .rasterizationSamples = MaxwellToVK::MsaaMode(state.msaa_mode),
+        .sampleShadingEnable = VK_FALSE,
+        .minSampleShading = 0.0f,
+        .pSampleMask = nullptr,
+        .alphaToCoverageEnable = VK_FALSE,
+        .alphaToOneEnable = VK_FALSE,
+    };
+    const VkPipelineDepthStencilStateCreateInfo depth_stencil_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .depthTestEnable = dynamic.depth_test_enable,
+        .depthWriteEnable = dynamic.depth_write_enable,
+        .depthCompareOp = dynamic.depth_test_enable
+                              ? MaxwellToVK::ComparisonOp(dynamic.DepthTestFunc())
+                              : VK_COMPARE_OP_ALWAYS,
+        .depthBoundsTestEnable = dynamic.depth_bounds_enable,
+        .stencilTestEnable = dynamic.stencil_enable,
+        .front = GetStencilFaceState(dynamic.front),
+        .back = GetStencilFaceState(dynamic.back),
+        .minDepthBounds = 0.0f,
+        .maxDepthBounds = 0.0f,
+    };
+    static_vector<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
+    for (size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
+        static constexpr std::array mask_table{
+            VK_COLOR_COMPONENT_R_BIT,
+            VK_COLOR_COMPONENT_G_BIT,
+            VK_COLOR_COMPONENT_B_BIT,
+            VK_COLOR_COMPONENT_A_BIT,
+        };
+        const auto format{static_cast<Tegra::RenderTargetFormat>(state.color_formats[index])};
+        if (format == Tegra::RenderTargetFormat::NONE) {
+            continue;
+        }
+        const auto& blend{state.attachments[index]};
+        const std::array mask{blend.Mask()};
+        VkColorComponentFlags write_mask{};
+        for (size_t i = 0; i < mask_table.size(); ++i) {
+            write_mask |= mask[i] ? mask_table[i] : 0;
+        }
+        cb_attachments.push_back({
+            .blendEnable = blend.enable != 0,
+            .srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor()),
+            .dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor()),
+            .colorBlendOp = MaxwellToVK::BlendEquation(blend.EquationRGB()),
+            .srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.SourceAlphaFactor()),
+            .dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.DestAlphaFactor()),
+            .alphaBlendOp = MaxwellToVK::BlendEquation(blend.EquationAlpha()),
+            .colorWriteMask = write_mask,
+        });
+    }
+    const VkPipelineColorBlendStateCreateInfo color_blend_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .logicOpEnable = VK_FALSE,
+        .logicOp = VK_LOGIC_OP_COPY,
+        .attachmentCount = static_cast<u32>(cb_attachments.size()),
+        .pAttachments = cb_attachments.data(),
+        .blendConstants = {},
+    };
+    static_vector<VkDynamicState, 17> dynamic_states{
+        VK_DYNAMIC_STATE_VIEWPORT,           VK_DYNAMIC_STATE_SCISSOR,
+        VK_DYNAMIC_STATE_DEPTH_BIAS,         VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+        VK_DYNAMIC_STATE_DEPTH_BOUNDS,       VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
+        VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+    };
+    if (device.IsExtExtendedDynamicStateSupported()) {
+        static constexpr std::array extended{
+            VK_DYNAMIC_STATE_CULL_MODE_EXT,
+            VK_DYNAMIC_STATE_FRONT_FACE_EXT,
+            VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT,
+            VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
+            VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
+            VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,
+            VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT,
+            VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
+            VK_DYNAMIC_STATE_STENCIL_OP_EXT,
+        };
+        dynamic_states.insert(dynamic_states.end(), extended.begin(), extended.end());
+    }
+    const VkPipelineDynamicStateCreateInfo dynamic_state_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .dynamicStateCount = static_cast<u32>(dynamic_states.size()),
+        .pDynamicStates = dynamic_states.data(),
+    };
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
+        .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+        .pNext = nullptr,
+        .requiredSubgroupSize = GuestWarpSize,
+    };
+    static_vector<VkPipelineShaderStageCreateInfo, 5> shader_stages;
+    for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
+        if (!spv_modules[stage]) {
+            continue;
+        }
+        [[maybe_unused]] auto& stage_ci = shader_stages.emplace_back(VkPipelineShaderStageCreateInfo{
+            .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage)),
+            .module = *spv_modules[stage],
+            .pName = "main",
+            .pSpecializationInfo = nullptr,
+        });
+        /*
+        if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
+            stage_ci.pNext = &subgroup_size_ci;
+        }
+        */
+    }
+    pipeline = device.GetLogical().CreateGraphicsPipeline({
+        .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .stageCount = static_cast<u32>(shader_stages.size()),
+        .pStages = shader_stages.data(),
+        .pVertexInputState = &vertex_input_ci,
+        .pInputAssemblyState = &input_assembly_ci,
+        .pTessellationState = &tessellation_ci,
+        .pViewportState = &viewport_ci,
+        .pRasterizationState = &rasterization_ci,
+        .pMultisampleState = &multisample_ci,
+        .pDepthStencilState = &depth_stencil_ci,
+        .pColorBlendState = &color_blend_ci,
+        .pDynamicState = &dynamic_state_ci,
+        .layout = *pipeline_layout,
+        .renderPass = render_pass,
+        .subpass = 0,
+        .basePipelineHandle = nullptr,
+        .basePipelineIndex = 0,
+    });
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
new file mode 100644
index 0000000000..ba1d34a837
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -0,0 +1,66 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+
+#include "shader_recompiler/shader_info.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
+#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/vulkan_common/vulkan_wrapper.h"
+
+namespace Vulkan {
+
+class Device;
+class RenderPassCache;
+class VKScheduler;
+class VKUpdateDescriptorQueue;
+
+class GraphicsPipeline {
+    static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
+
+public:
+    explicit GraphicsPipeline() = default;
+    explicit GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d,
+                              Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
+                              BufferCache& buffer_cache,
+                              TextureCache& texture_cache, const Device& device, VKDescriptorPool& descriptor_pool,
+                              VKUpdateDescriptorQueue& update_descriptor_queue,
+                              RenderPassCache& render_pass_cache, const FixedPipelineState& state,
+                              std::array<vk::ShaderModule, NUM_STAGES> stages,
+                              const std::array<const Shader::Info*, NUM_STAGES>& infos);
+
+    void Configure(bool is_indexed);
+
+    GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = default;
+    GraphicsPipeline(GraphicsPipeline&&) noexcept = default;
+
+    GraphicsPipeline& operator=(const GraphicsPipeline&) = delete;
+    GraphicsPipeline(const GraphicsPipeline&) = delete;
+
+private:
+    void MakePipeline(const Device& device, const FixedPipelineState& state,
+                      VkRenderPass render_pass);
+
+    Tegra::Engines::Maxwell3D* maxwell3d{};
+    Tegra::MemoryManager* gpu_memory{};
+    TextureCache* texture_cache{};
+    BufferCache* buffer_cache{};
+    VKScheduler* scheduler{};
+    VKUpdateDescriptorQueue* update_descriptor_queue{};
+
+    std::array<vk::ShaderModule, NUM_STAGES> spv_modules;
+    std::array<Shader::Info, NUM_STAGES> stage_infos;
+    vk::DescriptorSetLayout descriptor_set_layout;
+    DescriptorAllocator descriptor_allocator;
+    vk::PipelineLayout pipeline_layout;
+    vk::DescriptorUpdateTemplateKHR descriptor_update_template;
+    vk::Pipeline pipeline;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline.h b/src/video_core/renderer_vulkan/vk_pipeline.h
deleted file mode 100644
index b062884035..0000000000
--- a/src/video_core/renderer_vulkan/vk_pipeline.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <cstddef>
-
-#include "video_core/vulkan_common/vulkan_wrapper.h"
-
-namespace Vulkan {
-
-class Pipeline {
-public:
-    /// Add a reference count to the pipeline
-    void AddRef() noexcept {
-        ++ref_count;
-    }
-
-    [[nodiscard]] bool RemoveRef() noexcept {
-        --ref_count;
-        return ref_count == 0;
-    }
-
-    [[nodiscard]] u64 UsageTick() const noexcept {
-        return usage_tick;
-    }
-
-protected:
-    u64 usage_tick{};
-
-private:
-    size_t ref_count{};
-};
-
-} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 5477a2903b..c9da2080d4 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -12,8 +12,11 @@
 #include "common/microprofile.h"
 #include "core/core.h"
 #include "core/memory.h"
+#include "shader_recompiler/backend/spirv/emit_spirv.h"
 #include "shader_recompiler/environment.h"
-#include "shader_recompiler/recompiler.h"
+#include "shader_recompiler/frontend/maxwell/control_flow.h"
+#include "shader_recompiler/frontend/maxwell/program.h"
+#include "shader_recompiler/program_header.h"
 #include "video_core/engines/kepler_compute.h"
 #include "video_core/engines/maxwell_3d.h"
 #include "video_core/memory_manager.h"
@@ -34,18 +37,18 @@
 namespace Vulkan {
 MICROPROFILE_DECLARE(Vulkan_PipelineCache);
 
-using Tegra::Engines::ShaderType;
-
 namespace {
-class Environment final : public Shader::Environment {
+using Shader::Backend::SPIRV::EmitSPIRV;
+
+class GenericEnvironment : public Shader::Environment {
 public:
-    explicit Environment(Tegra::Engines::KeplerCompute& kepler_compute_,
-                         Tegra::MemoryManager& gpu_memory_, GPUVAddr program_base_)
-        : kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, program_base{program_base_} {}
+    explicit GenericEnvironment() = default;
+    explicit GenericEnvironment(Tegra::MemoryManager& gpu_memory_, GPUVAddr program_base_)
+        : gpu_memory{&gpu_memory_}, program_base{program_base_} {}
 
-    ~Environment() override = default;
+    ~GenericEnvironment() override = default;
 
-    [[nodiscard]] std::optional<u128> Analyze(u32 start_address) {
+    std::optional<u128> Analyze(u32 start_address) {
         const std::optional<u64> size{TryFindSize(start_address)};
         if (!size) {
             return std::nullopt;
@@ -55,52 +58,47 @@ public:
         return Common::CityHash128(reinterpret_cast<const char*>(code.data()), code.size());
     }
 
-    [[nodiscard]] size_t ShaderSize() const noexcept {
+    [[nodiscard]] size_t CachedSize() const noexcept {
+        return cached_highest - cached_lowest + INST_SIZE;
+    }
+
+    [[nodiscard]] size_t ReadSize() const noexcept {
         return read_highest - read_lowest + INST_SIZE;
     }
 
-    [[nodiscard]] u128 ComputeHash() const {
-        const size_t size{ShaderSize()};
+    [[nodiscard]] u128 CalculateHash() const {
+        const size_t size{ReadSize()};
         auto data = std::make_unique<u64[]>(size);
-        gpu_memory.ReadBlock(program_base + read_lowest, data.get(), size);
+        gpu_memory->ReadBlock(program_base + read_lowest, data.get(), size);
         return Common::CityHash128(reinterpret_cast<const char*>(data.get()), size);
     }
 
-    u64 ReadInstruction(u32 address) override {
+    u64 ReadInstruction(u32 address) final {
         read_lowest = std::min(read_lowest, address);
         read_highest = std::max(read_highest, address);
 
         if (address >= cached_lowest && address < cached_highest) {
             return code[address / INST_SIZE];
         }
-        return gpu_memory.Read<u64>(program_base + address);
-    }
-
-    u32 TextureBoundBuffer() override {
-        return kepler_compute.regs.tex_cb_index;
-    }
-
-    std::array<u32, 3> WorkgroupSize() override {
-        const auto& qmd{kepler_compute.launch_description};
-        return {qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z};
+        return gpu_memory->Read<u64>(program_base + address);
     }
 
-private:
+protected:
     static constexpr size_t INST_SIZE = sizeof(u64);
-    static constexpr size_t BLOCK_SIZE = 0x1000;
-    static constexpr size_t MAXIMUM_SIZE = 0x100000;
 
-    static constexpr u64 SELF_BRANCH_A = 0xE2400FFFFF87000FULL;
-    static constexpr u64 SELF_BRANCH_B = 0xE2400FFFFF07000FULL;
+    std::optional<u64> TryFindSize(GPUVAddr guest_addr) {
+        constexpr size_t BLOCK_SIZE = 0x1000;
+        constexpr size_t MAXIMUM_SIZE = 0x100000;
+
+        constexpr u64 SELF_BRANCH_A = 0xE2400FFFFF87000FULL;
+        constexpr u64 SELF_BRANCH_B = 0xE2400FFFFF07000FULL;
 
-    std::optional<u64> TryFindSize(u32 start_address) {
-        GPUVAddr guest_addr = program_base + start_address;
         size_t offset = 0;
         size_t size = BLOCK_SIZE;
         while (size <= MAXIMUM_SIZE) {
             code.resize(size / INST_SIZE);
             u64* const data = code.data() + offset / INST_SIZE;
-            gpu_memory.ReadBlock(guest_addr, data, BLOCK_SIZE);
+            gpu_memory->ReadBlock(guest_addr, data, BLOCK_SIZE);
             for (size_t i = 0; i < BLOCK_SIZE; i += INST_SIZE) {
                 const u64 inst = data[i / INST_SIZE];
                 if (inst == SELF_BRANCH_A || inst == SELF_BRANCH_B) {
@@ -114,17 +112,87 @@ private:
         return std::nullopt;
     }
 
-    Tegra::Engines::KeplerCompute& kepler_compute;
-    Tegra::MemoryManager& gpu_memory;
-    GPUVAddr program_base;
+    Tegra::MemoryManager* gpu_memory{};
+    GPUVAddr program_base{};
+
+    std::vector<u64> code;
 
-    u32 read_lowest = 0;
+    u32 read_lowest = std::numeric_limits<u32>::max();
     u32 read_highest = 0;
 
-    std::vector<u64> code;
     u32 cached_lowest = std::numeric_limits<u32>::max();
     u32 cached_highest = 0;
 };
+
+class GraphicsEnvironment final : public GenericEnvironment {
+public:
+    explicit GraphicsEnvironment() = default;
+    explicit GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
+                                 Tegra::MemoryManager& gpu_memory_, Maxwell::ShaderProgram program,
+                                 GPUVAddr program_base_, u32 start_offset)
+        : GenericEnvironment{gpu_memory_, program_base_}, maxwell3d{&maxwell3d_} {
+        gpu_memory->ReadBlock(program_base + start_offset, &sph, sizeof(sph));
+        switch (program) {
+        case Maxwell::ShaderProgram::VertexA:
+            stage = Shader::Stage::VertexA;
+            break;
+        case Maxwell::ShaderProgram::VertexB:
+            stage = Shader::Stage::VertexB;
+            break;
+        case Maxwell::ShaderProgram::TesselationControl:
+            stage = Shader::Stage::TessellationControl;
+            break;
+        case Maxwell::ShaderProgram::TesselationEval:
+            stage = Shader::Stage::TessellationEval;
+            break;
+        case Maxwell::ShaderProgram::Geometry:
+            stage = Shader::Stage::Geometry;
+            break;
+        case Maxwell::ShaderProgram::Fragment:
+            stage = Shader::Stage::Fragment;
+            break;
+        default:
+            UNREACHABLE_MSG("Invalid program={}", program);
+        }
+    }
+
+    ~GraphicsEnvironment() override = default;
+
+    u32 TextureBoundBuffer() override {
+        return maxwell3d->regs.tex_cb_index;
+    }
+
+    std::array<u32, 3> WorkgroupSize() override {
+        throw Shader::LogicError("Requesting workgroup size in a graphics stage");
+    }
+
+private:
+    Tegra::Engines::Maxwell3D* maxwell3d{};
+};
+
+class ComputeEnvironment final : public GenericEnvironment {
+public:
+    explicit ComputeEnvironment() = default;
+    explicit ComputeEnvironment(Tegra::Engines::KeplerCompute& kepler_compute_,
+                                Tegra::MemoryManager& gpu_memory_, GPUVAddr program_base_)
+        : GenericEnvironment{gpu_memory_, program_base_}, kepler_compute{&kepler_compute_} {
+        stage = Shader::Stage::Compute;
+    }
+
+    ~ComputeEnvironment() override = default;
+
+    u32 TextureBoundBuffer() override {
+        return kepler_compute->regs.tex_cb_index;
+    }
+
+    std::array<u32, 3> WorkgroupSize() override {
+        const auto& qmd{kepler_compute->launch_description};
+        return {qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z};
+    }
+
+private:
+    Tegra::Engines::KeplerCompute* kepler_compute{};
+};
 } // Anonymous namespace
 
 size_t ComputePipelineCacheKey::Hash() const noexcept {
@@ -136,19 +204,67 @@ bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) con
     return std::memcmp(&rhs, this, sizeof *this) == 0;
 }
 
+size_t GraphicsPipelineCacheKey::Hash() const noexcept {
+    const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), Size());
+    return static_cast<size_t>(hash);
+}
+
+bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
+    return std::memcmp(&rhs, this, Size()) == 0;
+}
+
 PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_,
                              Tegra::Engines::Maxwell3D& maxwell3d_,
                              Tegra::Engines::KeplerCompute& kepler_compute_,
                              Tegra::MemoryManager& gpu_memory_, const Device& device_,
                              VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_,
-                             VKUpdateDescriptorQueue& update_descriptor_queue_)
+                             VKUpdateDescriptorQueue& update_descriptor_queue_,
+                             RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
+                             TextureCache& texture_cache_)
     : VideoCommon::ShaderCache<ShaderInfo>{rasterizer_}, gpu{gpu_}, maxwell3d{maxwell3d_},
       kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_},
-      scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, update_descriptor_queue{
-                                                                    update_descriptor_queue_} {}
+      scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
+      update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_},
+      buffer_cache{buffer_cache_}, texture_cache{texture_cache_} {
+    const auto& float_control{device.FloatControlProperties()};
+    profile = Shader::Profile{
+        .unified_descriptor_binding = true,
+        .support_float_controls = true,
+        .support_separate_denorm_behavior = float_control.denormBehaviorIndependence ==
+                                            VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
+        .support_separate_rounding_mode =
+            float_control.roundingModeIndependence == VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
+        .support_fp16_denorm_preserve = float_control.shaderDenormPreserveFloat16 != VK_FALSE,
+        .support_fp32_denorm_preserve = float_control.shaderDenormPreserveFloat32 != VK_FALSE,
+        .support_fp16_denorm_flush = float_control.shaderDenormFlushToZeroFloat16 != VK_FALSE,
+        .support_fp32_denorm_flush = float_control.shaderDenormFlushToZeroFloat32 != VK_FALSE,
+        .support_fp16_signed_zero_nan_preserve =
+            float_control.shaderSignedZeroInfNanPreserveFloat16 != VK_FALSE,
+        .support_fp32_signed_zero_nan_preserve =
+            float_control.shaderSignedZeroInfNanPreserveFloat32 != VK_FALSE,
+        .has_broken_spirv_clamp = true, // TODO: is_intel
+    };
+}
 
 PipelineCache::~PipelineCache() = default;
 
+GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
+    MICROPROFILE_SCOPE(Vulkan_PipelineCache);
+
+    if (!RefreshStages()) {
+        return nullptr;
+    }
+    graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported());
+
+    const auto [pair, is_new]{graphics_cache.try_emplace(graphics_key)};
+    auto& pipeline{pair->second};
+    if (!is_new) {
+        return &pipeline;
+    }
+    pipeline = CreateGraphicsPipeline();
+    return &pipeline;
+}
+
 ComputePipeline* PipelineCache::CurrentComputePipeline() {
     MICROPROFILE_SCOPE(Vulkan_PipelineCache);
 
@@ -170,45 +286,130 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() {
         return &pipeline;
     }
     pipeline = CreateComputePipeline(shader);
-    shader->compute_users.push_back(key);
     return &pipeline;
 }
 
+bool PipelineCache::RefreshStages() {
+    const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()};
+    for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
+        if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
+            graphics_key.unique_hashes[index] = u128{};
+            continue;
+        }
+        const auto& shader_config{maxwell3d.regs.shader_config[index]};
+        const auto program{static_cast<Maxwell::ShaderProgram>(index)};
+        const GPUVAddr shader_addr{base_addr + shader_config.offset};
+        const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)};
+        if (!cpu_shader_addr) {
+            LOG_ERROR(Render_Vulkan, "Invalid GPU address for shader 0x{:016x}", shader_addr);
+            return false;
+        }
+        const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)};
+        if (!shader_info) {
+            const u32 offset{shader_config.offset};
+            shader_info = MakeShaderInfo(program, base_addr, offset, *cpu_shader_addr);
+        }
+        graphics_key.unique_hashes[index] = shader_info->unique_hash;
+    }
+    return true;
+}
+
+const ShaderInfo* PipelineCache::MakeShaderInfo(Maxwell::ShaderProgram program, GPUVAddr base_addr,
+                                                u32 start_address, VAddr cpu_addr) {
+    GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address};
+    auto info = std::make_unique<ShaderInfo>();
+    if (const std::optional<u128> cached_hash{env.Analyze(start_address)}) {
+        info->unique_hash = *cached_hash;
+        info->size_bytes = env.CachedSize();
+    } else {
+        // Slow path, not really hit on commercial games
+        // Build a control flow graph to get the real shader size
+        flow_block_pool.ReleaseContents();
+        Shader::Maxwell::Flow::CFG cfg{env, flow_block_pool, start_address};
+        info->unique_hash = env.CalculateHash();
+        info->size_bytes = env.ReadSize();
+    }
+    const size_t size_bytes{info->size_bytes};
+    const ShaderInfo* const result{info.get()};
+    Register(std::move(info), cpu_addr, size_bytes);
+    return result;
+}
+
+GraphicsPipeline PipelineCache::CreateGraphicsPipeline() {
+    flow_block_pool.ReleaseContents();
+    inst_pool.ReleaseContents();
+    block_pool.ReleaseContents();
+
+    std::array<GraphicsEnvironment, Maxwell::MaxShaderProgram> envs;
+    std::array<Shader::IR::Program, Maxwell::MaxShaderProgram> programs;
+
+    const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()};
+    for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
+        if (graphics_key.unique_hashes[index] == u128{}) {
+            continue;
+        }
+        const auto program{static_cast<Maxwell::ShaderProgram>(index)};
+        GraphicsEnvironment& env{envs[index]};
+        const u32 start_address{maxwell3d.regs.shader_config[index].offset};
+        env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address};
+
+        const u32 cfg_offset = start_address + sizeof(Shader::ProgramHeader);
+        Shader::Maxwell::Flow::CFG cfg(env, flow_block_pool, cfg_offset);
+        programs[index] = Shader::Maxwell::TranslateProgram(inst_pool, block_pool, env, cfg);
+    }
+    std::array<const Shader::Info*, Maxwell::MaxShaderStage> infos{};
+    std::array<vk::ShaderModule, Maxwell::MaxShaderStage> modules;
+
+    u32 binding{0};
+    for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
+        if (graphics_key.unique_hashes[index] == u128{}) {
+            continue;
+        }
+        UNIMPLEMENTED_IF(index == 0);
+
+        GraphicsEnvironment& env{envs[index]};
+        Shader::IR::Program& program{programs[index]};
+
+        const size_t stage_index{index - 1};
+        infos[stage_index] = &program.info;
+        std::vector<u32> code{EmitSPIRV(profile, env, program, binding)};
+
+        FILE* file = fopen("D:\\shader.spv", "wb");
+        fwrite(code.data(), 4, code.size(), file);
+        fclose(file);
+        std::system("spirv-cross --vulkan-semantics D:\\shader.spv");
+
+        modules[stage_index] = BuildShader(device, code);
+    }
+    return GraphicsPipeline(maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, device,
+                            descriptor_pool, update_descriptor_queue, render_pass_cache,
+                            graphics_key.state, std::move(modules), infos);
+}
+
 ComputePipeline PipelineCache::CreateComputePipeline(ShaderInfo* shader_info) {
     const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
     const auto& qmd{kepler_compute.launch_description};
-    Environment env{kepler_compute, gpu_memory, program_base};
+    ComputeEnvironment env{kepler_compute, gpu_memory, program_base};
     if (const std::optional<u128> cached_hash{env.Analyze(qmd.program_start)}) {
         // TODO: Load from cache
     }
-    const auto& float_control{device.FloatControlProperties()};
-    const Shader::Profile profile{
-        .unified_descriptor_binding = true,
-        .support_float_controls = true,
-        .support_separate_denorm_behavior = float_control.denormBehaviorIndependence ==
-                                            VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
-        .support_separate_rounding_mode =
-            float_control.roundingModeIndependence == VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
-        .support_fp16_denorm_preserve = float_control.shaderDenormPreserveFloat16 != VK_FALSE,
-        .support_fp32_denorm_preserve = float_control.shaderDenormPreserveFloat32 != VK_FALSE,
-        .support_fp16_denorm_flush = float_control.shaderDenormFlushToZeroFloat16 != VK_FALSE,
-        .support_fp32_denorm_flush = float_control.shaderDenormFlushToZeroFloat32 != VK_FALSE,
-        .support_fp16_signed_zero_nan_preserve =
-            float_control.shaderSignedZeroInfNanPreserveFloat16 != VK_FALSE,
-        .support_fp32_signed_zero_nan_preserve =
-            float_control.shaderSignedZeroInfNanPreserveFloat32 != VK_FALSE,
-        .has_broken_spirv_clamp = true, // TODO: is_intel
-    };
-    const auto [info, code]{Shader::RecompileSPIRV(profile, env, qmd.program_start)};
+    flow_block_pool.ReleaseContents();
+    inst_pool.ReleaseContents();
+    block_pool.ReleaseContents();
+
+    Shader::Maxwell::Flow::CFG cfg{env, flow_block_pool, qmd.program_start};
+    Shader::IR::Program program{Shader::Maxwell::TranslateProgram(inst_pool, block_pool, env, cfg)};
+    u32 binding{0};
+    std::vector<u32> code{EmitSPIRV(profile, env, program, binding)};
     /*
     FILE* file = fopen("D:\\shader.spv", "wb");
     fwrite(code.data(), 4, code.size(), file);
     fclose(file);
     std::system("spirv-dis D:\\shader.spv");
     */
-    shader_info->unique_hash = env.ComputeHash();
-    shader_info->size_bytes = env.ShaderSize();
-    return ComputePipeline{device, descriptor_pool, update_descriptor_queue, info,
+    shader_info->unique_hash = env.CalculateHash();
+    shader_info->size_bytes = env.ReadSize();
+    return ComputePipeline{device, descriptor_pool, update_descriptor_queue, program.info,
                            BuildShader(device, code)};
 }
 
@@ -216,9 +417,6 @@ ComputePipeline* PipelineCache::CreateComputePipelineWithoutShader(VAddr shader_
     ShaderInfo shader;
     ComputePipeline pipeline{CreateComputePipeline(&shader)};
     const ComputePipelineCacheKey key{MakeComputePipelineKey(shader.unique_hash)};
-    shader.compute_users.push_back(key);
-    pipeline.AddRef();
-
     const size_t size_bytes{shader.size_bytes};
     Register(std::make_unique<ShaderInfo>(std::move(shader)), shader_cpu_addr, size_bytes);
     return &compute_cache.emplace(key, std::move(pipeline)).first->second;
@@ -233,18 +431,4 @@ ComputePipelineCacheKey PipelineCache::MakeComputePipelineKey(u128 unique_hash)
     };
 }
 
-void PipelineCache::OnShaderRemoval(ShaderInfo* shader) {
-    for (const ComputePipelineCacheKey& key : shader->compute_users) {
-        const auto it = compute_cache.find(key);
-        ASSERT(it != compute_cache.end());
-
-        Pipeline& pipeline = it->second;
-        if (pipeline.RemoveRef()) {
-            // Wait for the pipeline to be free of GPU usage before destroying it
-            scheduler.Wait(pipeline.UsageTick());
-            compute_cache.erase(it);
-        }
-    }
-}
-
 } // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index eb35abc27f..60fb976dfa 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -12,11 +12,18 @@
 #include <utility>
 #include <vector>
 
-#include <boost/functional/hash.hpp>
-
 #include "common/common_types.h"
+#include "shader_recompiler/frontend/ir/basic_block.h"
+#include "shader_recompiler/frontend/ir/microinstruction.h"
+#include "shader_recompiler/frontend/maxwell/control_flow.h"
+#include "shader_recompiler/object_pool.h"
+#include "shader_recompiler/profile.h"
 #include "video_core/engines/maxwell_3d.h"
 #include "video_core/renderer_vulkan/fixed_pipeline_state.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
+#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
+#include "video_core/renderer_vulkan/vk_texture_cache.h"
 #include "video_core/shader_cache.h"
 #include "video_core/vulkan_common/vulkan_wrapper.h"
 
@@ -26,13 +33,6 @@ class System;
 
 namespace Vulkan {
 
-class Device;
-class RasterizerVulkan;
-class ComputePipeline;
-class VKDescriptorPool;
-class VKScheduler;
-class VKUpdateDescriptorQueue;
-
 using Maxwell = Tegra::Engines::Maxwell3D::Regs;
 
 struct ComputePipelineCacheKey {
@@ -52,6 +52,26 @@ static_assert(std::has_unique_object_representations_v<ComputePipelineCacheKey>)
 static_assert(std::is_trivially_copyable_v<ComputePipelineCacheKey>);
 static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
 
+struct GraphicsPipelineCacheKey {
+    std::array<u128, 6> unique_hashes;
+    FixedPipelineState state;
+
+    size_t Hash() const noexcept;
+
+    bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
+
+    bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept {
+        return !operator==(rhs);
+    }
+
+    size_t Size() const noexcept {
+        return sizeof(unique_hashes) + state.Size();
+    }
+};
+static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
+static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
+
 } // namespace Vulkan
 
 namespace std {
@@ -63,14 +83,28 @@ struct hash<Vulkan::ComputePipelineCacheKey> {
     }
 };
 
+template <>
+struct hash<Vulkan::GraphicsPipelineCacheKey> {
+    size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
+        return k.Hash();
+    }
+};
+
 } // namespace std
 
 namespace Vulkan {
 
+class ComputePipeline;
+class Device;
+class RasterizerVulkan;
+class RenderPassCache;
+class VKDescriptorPool;
+class VKScheduler;
+class VKUpdateDescriptorQueue;
+
 struct ShaderInfo {
     u128 unique_hash{};
     size_t size_bytes{};
-    std::vector<ComputePipelineCacheKey> compute_users;
 };
 
 class PipelineCache final : public VideoCommon::ShaderCache<ShaderInfo> {
@@ -80,15 +114,23 @@ public:
                            Tegra::Engines::KeplerCompute& kepler_compute,
                            Tegra::MemoryManager& gpu_memory, const Device& device,
                            VKScheduler& scheduler, VKDescriptorPool& descriptor_pool,
-                           VKUpdateDescriptorQueue& update_descriptor_queue);
+                           VKUpdateDescriptorQueue& update_descriptor_queue,
+                           RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
+                           TextureCache& texture_cache);
     ~PipelineCache() override;
 
-    [[nodiscard]] ComputePipeline* CurrentComputePipeline();
+    [[nodiscard]] GraphicsPipeline* CurrentGraphicsPipeline();
 
-protected:
-    void OnShaderRemoval(ShaderInfo* shader) override;
+    [[nodiscard]] ComputePipeline* CurrentComputePipeline();
 
 private:
+    bool RefreshStages();
+
+    const ShaderInfo* MakeShaderInfo(Maxwell::ShaderProgram program, GPUVAddr base_addr,
+                                     u32 start_address, VAddr cpu_addr);
+
+    GraphicsPipeline CreateGraphicsPipeline();
+
     ComputePipeline CreateComputePipeline(ShaderInfo* shader);
 
     ComputePipeline* CreateComputePipelineWithoutShader(VAddr shader_cpu_addr);
@@ -104,8 +146,20 @@ private:
     VKScheduler& scheduler;
     VKDescriptorPool& descriptor_pool;
     VKUpdateDescriptorQueue& update_descriptor_queue;
+    RenderPassCache& render_pass_cache;
+    BufferCache& buffer_cache;
+    TextureCache& texture_cache;
+
+    GraphicsPipelineCacheKey graphics_key{};
 
     std::unordered_map<ComputePipelineCacheKey, ComputePipeline> compute_cache;
+    std::unordered_map<GraphicsPipelineCacheKey, GraphicsPipeline> graphics_cache;
+
+    Shader::ObjectPool<Shader::IR::Inst> inst_pool;
+    Shader::ObjectPool<Shader::IR::Block> block_pool;
+    Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block_pool;
+
+    Shader::Profile profile;
 };
 
 } // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index c94419d29c..036b531b92 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -141,15 +141,18 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
       blit_image(device, scheduler, state_tracker, descriptor_pool),
       astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue,
                         memory_allocator),
-      texture_cache_runtime{device,       scheduler,  memory_allocator,
-                            staging_pool, blit_image, astc_decoder_pass},
+      render_pass_cache(device), texture_cache_runtime{device,           scheduler,
+                                                       memory_allocator, staging_pool,
+                                                       blit_image,       astc_decoder_pass,
+                                                       render_pass_cache},
       texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
       buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
                            update_descriptor_queue, descriptor_pool),
       buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
       pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
-                     descriptor_pool, update_descriptor_queue),
-      query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache},
+                     descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache,
+                     texture_cache),
+      query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{ buffer_cache },
       fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
       wfi_event(device.GetLogical().CreateEvent()) {
     scheduler.SetQueryCache(query_cache);
@@ -158,7 +161,39 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
 RasterizerVulkan::~RasterizerVulkan() = default;
 
 void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
-    UNREACHABLE_MSG("Rendering not implemented {} {}", is_indexed, is_instanced);
+    MICROPROFILE_SCOPE(Vulkan_Drawing);
+
+    SCOPE_EXIT({ gpu.TickWork(); });
+    FlushWork();
+
+    query_cache.UpdateCounters();
+
+    GraphicsPipeline* const pipeline{pipeline_cache.CurrentGraphicsPipeline()};
+    if (!pipeline) {
+        return;
+    }
+    update_descriptor_queue.Acquire();
+    std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
+    pipeline->Configure(is_indexed);
+
+    BeginTransformFeedback();
+
+    scheduler.RequestRenderpass(texture_cache.GetFramebuffer());
+    UpdateDynamicStates();
+
+    const auto& regs{maxwell3d.regs};
+    const u32 num_instances{maxwell3d.mme_draw.instance_count};
+    const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
+    scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
+        if (draw_params.is_indexed) {
+            cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances, 0,
+                               draw_params.base_vertex, draw_params.base_instance);
+        } else {
+            cmdbuf.Draw(draw_params.num_vertices, draw_params.num_instances,
+                        draw_params.base_vertex, draw_params.base_instance);
+        }
+    });
+    EndTransformFeedback();
 }
 
 void RasterizerVulkan::Clear() {
@@ -487,13 +522,11 @@ void RasterizerVulkan::FlushWork() {
     if ((++draw_counter & 7) != 7) {
         return;
     }
-
     if (draw_counter < DRAWS_TO_DISPATCH) {
         // Send recorded tasks to the worker thread
         scheduler.DispatchWork();
         return;
     }
-
     // Otherwise (every certain number of draws) flush execution.
     // This submits commands to the Vulkan driver.
     scheduler.Flush();
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 3fd03b9155..88dbd753b0 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -23,6 +23,7 @@
 #include "video_core/renderer_vulkan/vk_fence_manager.h"
 #include "video_core/renderer_vulkan/vk_pipeline_cache.h"
 #include "video_core/renderer_vulkan/vk_query_cache.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
 #include "video_core/renderer_vulkan/vk_scheduler.h"
 #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
 #include "video_core/renderer_vulkan/vk_texture_cache.h"
@@ -148,6 +149,7 @@ private:
     VKUpdateDescriptorQueue update_descriptor_queue;
     BlitImageHelper blit_image;
     ASTCDecoderPass astc_decoder_pass;
+    RenderPassCache render_pass_cache;
 
     TextureCacheRuntime texture_cache_runtime;
     TextureCache texture_cache;
diff --git a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
new file mode 100644
index 0000000000..7e5ae43ea9
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
@@ -0,0 +1,100 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <unordered_map>
+
+#include <boost/container/static_vector.hpp>
+
+#include "video_core/renderer_vulkan/maxwell_to_vk.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
+#include "video_core/surface.h"
+#include "video_core/vulkan_common/vulkan_device.h"
+#include "video_core/vulkan_common/vulkan_wrapper.h"
+
+namespace Vulkan {
+namespace {
+using VideoCore::Surface::PixelFormat;
+
+constexpr std::array ATTACHMENT_REFERENCES{
+    VkAttachmentReference{0, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{1, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{2, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{3, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{4, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{5, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{6, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{7, VK_IMAGE_LAYOUT_GENERAL},
+    VkAttachmentReference{8, VK_IMAGE_LAYOUT_GENERAL},
+};
+
+VkAttachmentDescription AttachmentDescription(const Device& device, PixelFormat format,
+                                              VkSampleCountFlagBits samples) {
+    using MaxwellToVK::SurfaceFormat;
+    return {
+        .flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
+        .format = SurfaceFormat(device, FormatType::Optimal, true, format).format,
+        .samples = samples,
+        .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+        .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+        .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+        .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
+        .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
+        .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
+    };
+}
+} // Anonymous namespace
+
+RenderPassCache::RenderPassCache(const Device& device_) : device{&device_} {}
+
+VkRenderPass RenderPassCache::Get(const RenderPassKey& key) {
+    const auto [pair, is_new] = cache.try_emplace(key);
+    if (!is_new) {
+        return *pair->second;
+    }
+    boost::container::static_vector<VkAttachmentDescription, 9> descriptions;
+    u32 num_images{0};
+
+    for (size_t index = 0; index < key.color_formats.size(); ++index) {
+        const PixelFormat format{key.color_formats[index]};
+        if (format == PixelFormat::Invalid) {
+            continue;
+        }
+        descriptions.push_back(AttachmentDescription(*device, format, key.samples));
+        ++num_images;
+    }
+    const size_t num_colors{descriptions.size()};
+    const VkAttachmentReference* depth_attachment{};
+    if (key.depth_format != PixelFormat::Invalid) {
+        depth_attachment = &ATTACHMENT_REFERENCES[num_colors];
+        descriptions.push_back(AttachmentDescription(*device, key.depth_format, key.samples));
+    }
+    const VkSubpassDescription subpass{
+        .flags = 0,
+        .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+        .inputAttachmentCount = 0,
+        .pInputAttachments = nullptr,
+        .colorAttachmentCount = static_cast<u32>(num_colors),
+        .pColorAttachments = num_colors != 0 ? ATTACHMENT_REFERENCES.data() : nullptr,
+        .pResolveAttachments = nullptr,
+        .pDepthStencilAttachment = depth_attachment,
+        .preserveAttachmentCount = 0,
+        .pPreserveAttachments = nullptr,
+    };
+    pair->second = device->GetLogical().CreateRenderPass({
+        .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+        .pNext = nullptr,
+        .flags = 0,
+        .attachmentCount = static_cast<u32>(descriptions.size()),
+        .pAttachments = descriptions.data(),
+        .subpassCount = 1,
+        .pSubpasses = &subpass,
+        .dependencyCount = 0,
+        .pDependencies = nullptr,
+    });
+    return *pair->second;
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_render_pass_cache.h b/src/video_core/renderer_vulkan/vk_render_pass_cache.h
new file mode 100644
index 0000000000..db8e83f1aa
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_render_pass_cache.h
@@ -0,0 +1,53 @@
+// Copyright 2021 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <unordered_map>
+
+#include "video_core/surface.h"
+#include "video_core/vulkan_common/vulkan_wrapper.h"
+
+namespace Vulkan {
+
+struct RenderPassKey {
+    auto operator<=>(const RenderPassKey&) const noexcept = default;
+
+    std::array<VideoCore::Surface::PixelFormat, 8> color_formats;
+    VideoCore::Surface::PixelFormat depth_format;
+    VkSampleCountFlagBits samples;
+};
+
+} // namespace Vulkan
+
+namespace std {
+template <>
+struct hash<Vulkan::RenderPassKey> {
+    [[nodiscard]] size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
+        size_t value = static_cast<size_t>(key.depth_format) << 48;
+        value ^= static_cast<size_t>(key.samples) << 52;
+        for (size_t i = 0; i < key.color_formats.size(); ++i) {
+            value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
+        }
+        return value;
+    }
+};
+} // namespace std
+
+namespace Vulkan {
+
+    class Device;
+
+class RenderPassCache {
+public:
+    explicit RenderPassCache(const Device& device_);
+
+    VkRenderPass Get(const RenderPassKey& key);
+
+private:
+    const Device* device{};
+    std::unordered_map<RenderPassKey, vk::RenderPass> cache;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 88ccf96f51..1bbc542a1c 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -18,6 +18,7 @@
 #include "video_core/renderer_vulkan/vk_scheduler.h"
 #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
 #include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
 #include "video_core/vulkan_common/vulkan_device.h"
 #include "video_core/vulkan_common/vulkan_memory_allocator.h"
 #include "video_core/vulkan_common/vulkan_wrapper.h"
@@ -34,19 +35,6 @@ using VideoCommon::SubresourceRange;
 using VideoCore::Surface::IsPixelFormatASTC;
 
 namespace {
-
-constexpr std::array ATTACHMENT_REFERENCES{
-    VkAttachmentReference{0, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{1, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{2, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{3, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{4, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{5, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{6, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{7, VK_IMAGE_LAYOUT_GENERAL},
-    VkAttachmentReference{8, VK_IMAGE_LAYOUT_GENERAL},
-};
-
 constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
     if (color == std::array<float, 4>{0, 0, 0, 0}) {
         return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
@@ -226,23 +214,6 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
     }
 }
 
-[[nodiscard]] VkAttachmentDescription AttachmentDescription(const Device& device,
-                                                            const ImageView* image_view) {
-    using MaxwellToVK::SurfaceFormat;
-    const PixelFormat pixel_format = image_view->format;
-    return VkAttachmentDescription{
-        .flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
-        .format = SurfaceFormat(device, FormatType::Optimal, true, pixel_format).format,
-        .samples = image_view->Samples(),
-        .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
-        .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
-        .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
-        .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
-        .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
-        .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
-    };
-}
-
 [[nodiscard]] VkComponentSwizzle ComponentSwizzle(SwizzleSource swizzle) {
     switch (swizzle) {
     case SwizzleSource::Zero:
@@ -1164,7 +1135,6 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
 
 Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM_RT> color_buffers,
                          ImageView* depth_buffer, const VideoCommon::RenderTargets& key) {
-    std::vector<VkAttachmentDescription> descriptions;
     std::vector<VkImageView> attachments;
     RenderPassKey renderpass_key{};
     s32 num_layers = 1;
@@ -1175,7 +1145,6 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
             renderpass_key.color_formats[index] = PixelFormat::Invalid;
             continue;
         }
-        descriptions.push_back(AttachmentDescription(runtime.device, color_buffer));
         attachments.push_back(color_buffer->RenderTarget());
         renderpass_key.color_formats[index] = color_buffer->format;
         num_layers = std::max(num_layers, color_buffer->range.extent.layers);
@@ -1185,10 +1154,7 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
         ++num_images;
     }
     const size_t num_colors = attachments.size();
-    const VkAttachmentReference* depth_attachment =
-        depth_buffer ? &ATTACHMENT_REFERENCES[num_colors] : nullptr;
     if (depth_buffer) {
-        descriptions.push_back(AttachmentDescription(runtime.device, depth_buffer));
         attachments.push_back(depth_buffer->RenderTarget());
         renderpass_key.depth_format = depth_buffer->format;
         num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
@@ -1201,40 +1167,14 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
     }
     renderpass_key.samples = samples;
 
-    const auto& device = runtime.device.GetLogical();
-    const auto [cache_pair, is_new] = runtime.renderpass_cache.try_emplace(renderpass_key);
-    if (is_new) {
-        const VkSubpassDescription subpass{
-            .flags = 0,
-            .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
-            .inputAttachmentCount = 0,
-            .pInputAttachments = nullptr,
-            .colorAttachmentCount = static_cast<u32>(num_colors),
-            .pColorAttachments = num_colors != 0 ? ATTACHMENT_REFERENCES.data() : nullptr,
-            .pResolveAttachments = nullptr,
-            .pDepthStencilAttachment = depth_attachment,
-            .preserveAttachmentCount = 0,
-            .pPreserveAttachments = nullptr,
-        };
-        cache_pair->second = device.CreateRenderPass(VkRenderPassCreateInfo{
-            .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
-            .pNext = nullptr,
-            .flags = 0,
-            .attachmentCount = static_cast<u32>(descriptions.size()),
-            .pAttachments = descriptions.data(),
-            .subpassCount = 1,
-            .pSubpasses = &subpass,
-            .dependencyCount = 0,
-            .pDependencies = nullptr,
-        });
-    }
-    renderpass = *cache_pair->second;
+    renderpass = runtime.render_pass_cache.Get(renderpass_key);
+
     render_area = VkExtent2D{
         .width = key.size.width,
         .height = key.size.height,
     };
     num_color_buffers = static_cast<u32>(num_colors);
-    framebuffer = device.CreateFramebuffer(VkFramebufferCreateInfo{
+    framebuffer = runtime.device.GetLogical().CreateFramebuffer({
         .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
         .pNext = nullptr,
         .flags = 0,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 172bcdf98f..189ee5a68e 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -26,35 +26,10 @@ class Device;
 class Image;
 class ImageView;
 class Framebuffer;
+class RenderPassCache;
 class StagingBufferPool;
 class VKScheduler;
 
-struct RenderPassKey {
-    constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
-
-    std::array<PixelFormat, NUM_RT> color_formats;
-    PixelFormat depth_format;
-    VkSampleCountFlagBits samples;
-};
-
-} // namespace Vulkan
-
-namespace std {
-template <>
-struct hash<Vulkan::RenderPassKey> {
-    [[nodiscard]] constexpr size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
-        size_t value = static_cast<size_t>(key.depth_format) << 48;
-        value ^= static_cast<size_t>(key.samples) << 52;
-        for (size_t i = 0; i < key.color_formats.size(); ++i) {
-            value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
-        }
-        return value;
-    }
-};
-} // namespace std
-
-namespace Vulkan {
-
 struct TextureCacheRuntime {
     const Device& device;
     VKScheduler& scheduler;
@@ -62,7 +37,7 @@ struct TextureCacheRuntime {
     StagingBufferPool& staging_buffer_pool;
     BlitImageHelper& blit_image_helper;
     ASTCDecoderPass& astc_decoder_pass;
-    std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache{};
+    RenderPassCache& render_pass_cache;
 
     void Finish();
 
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 4887d6fd9a..f0e5b098c0 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -49,6 +49,7 @@ constexpr std::array REQUIRED_EXTENSIONS{
     VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
     VK_EXT_ROBUSTNESS_2_EXTENSION_NAME,
     VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
+    VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME,
 #ifdef _WIN32
     VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
 #endif
@@ -312,6 +313,13 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
     };
     SetNext(next, host_query_reset);
 
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT demote{
+        .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT,
+        .pNext = nullptr,
+        .shaderDemoteToHelperInvocation = true,
+    };
+    SetNext(next, demote);
+
     VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
     if (is_float16_supported) {
         float16_int8 = {
@@ -597,8 +605,14 @@ void Device::CheckSuitability(bool requires_swapchain) const {
             throw vk::Exception(VK_ERROR_FEATURE_NOT_PRESENT);
         }
     }
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT demote{};
+    demote.sType =
+        VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT;
+    demote.pNext = nullptr;
+
     VkPhysicalDeviceRobustness2FeaturesEXT robustness2{};
     robustness2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT;
+    robustness2.pNext = &demote;
 
     VkPhysicalDeviceFeatures2KHR features2{};
     features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
@@ -625,6 +639,7 @@ void Device::CheckSuitability(bool requires_swapchain) const {
         std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"),
         std::make_pair(features.shaderStorageImageWriteWithoutFormat,
                        "shaderStorageImageWriteWithoutFormat"),
+        std::make_pair(demote.shaderDemoteToHelperInvocation, "shaderDemoteToHelperInvocation"),
         std::make_pair(robustness2.robustBufferAccess2, "robustBufferAccess2"),
         std::make_pair(robustness2.robustImageAccess2, "robustImageAccess2"),
         std::make_pair(robustness2.nullDescriptor, "nullDescriptor"),
-- 
cgit v1.2.3-70-g09d2


From 0bb85f6a753c769266c95c4ba146b25b9eaaaffd Mon Sep 17 00:00:00 2001
From: lat9nq <22451773+lat9nq@users.noreply.github.com>
Date: Mon, 5 Apr 2021 22:25:22 -0400
Subject: shader_recompiler,video_core: Cleanup some GCC and Clang errors

Mostly fixing unused *, implicit conversion, braced scalar init,
fpermissive, and some others.

Some Clang errors likely remain in video_core, and std::ranges is still
a pertinent issue in shader_recompiler

shader_recompiler: cmake: Force bracket depth to 1024 on Clang
Increases the maximum fold expression depth

thread_worker: Include condition_variable

Don't use list initializers in control flow

Co-authored-by: ReinUsesLisp <reinuseslisp@airmail.cc>
---
 src/common/thread_worker.h                         |   1 +
 src/shader_recompiler/CMakeLists.txt               |   2 +
 .../backend/spirv/emit_context.cpp                 |   4 +-
 src/shader_recompiler/backend/spirv/emit_spirv.cpp |  19 +--
 .../backend/spirv/emit_spirv_image.cpp             |  11 +-
 .../backend/spirv/emit_spirv_warp.cpp              |   2 +-
 src/shader_recompiler/file_environment.h           |   2 +-
 src/shader_recompiler/frontend/ir/attribute.cpp    |   4 +-
 src/shader_recompiler/frontend/ir/basic_block.cpp  |   2 +-
 src/shader_recompiler/frontend/ir/condition.cpp    |   6 +-
 src/shader_recompiler/frontend/ir/condition.h      |   4 +-
 src/shader_recompiler/frontend/ir/ir_emitter.cpp   |   4 +-
 .../frontend/ir/microinstruction.cpp               |  16 +--
 .../frontend/ir/microinstruction.h                 |   4 +-
 src/shader_recompiler/frontend/ir/opcodes.cpp      |   2 +-
 src/shader_recompiler/frontend/ir/program.cpp      |   2 -
 src/shader_recompiler/frontend/ir/value.cpp        |   4 +-
 src/shader_recompiler/frontend/ir/value.h          |   2 +-
 .../frontend/maxwell/control_flow.cpp              | 140 +++++++++------------
 src/shader_recompiler/frontend/maxwell/decode.cpp  |  10 +-
 .../maxwell/indirect_branch_table_track.cpp        |  10 +-
 .../frontend/maxwell/structured_control_flow.cpp   |   3 +-
 .../frontend/maxwell/translate/impl/double_add.cpp |   6 +-
 .../translate/impl/double_fused_multiply_add.cpp   |   6 +-
 .../maxwell/translate/impl/double_multiply.cpp     |   6 +-
 .../maxwell/translate/impl/floating_point_add.cpp  |   6 +-
 .../translate/impl/floating_point_compare.cpp      |   3 +-
 .../impl/floating_point_compare_and_set.cpp        |   6 +-
 .../floating_point_conversion_floating_point.cpp   |   6 +-
 .../impl/floating_point_conversion_integer.cpp     |  11 +-
 .../impl/floating_point_fused_multiply_add.cpp     |   6 +-
 .../translate/impl/floating_point_min_max.cpp      |   6 +-
 .../translate/impl/floating_point_multiply.cpp     |   8 +-
 .../impl/floating_point_set_predicate.cpp          |   6 +-
 .../translate/impl/floating_point_swizzled_add.cpp |   6 +-
 .../translate/impl/half_floating_point_add.cpp     |  11 +-
 .../half_floating_point_fused_multiply_add.cpp     |  11 +-
 .../impl/half_floating_point_multiply.cpp          |  11 +-
 .../translate/impl/half_floating_point_set.cpp     |  11 +-
 .../impl/half_floating_point_set_predicate.cpp     |  12 +-
 .../frontend/maxwell/translate/impl/impl.cpp       |   8 +-
 .../maxwell/translate/impl/integer_add.cpp         |   1 -
 .../impl/integer_floating_point_conversion.cpp     |   4 +-
 .../maxwell/translate/impl/load_constant.cpp       |   2 +-
 .../translate/impl/load_store_local_shared.cpp     |   9 +-
 .../maxwell/translate/impl/load_store_memory.cpp   |   4 +-
 .../maxwell/translate/impl/texture_fetch.cpp       |   2 +-
 .../translate/impl/texture_fetch_swizzled.cpp      |   2 +-
 .../translate/impl/texture_gather_swizzled.cpp     |   2 +-
 .../translate/impl/texture_load_swizzled.cpp       |   2 +-
 .../maxwell/translate/impl/texture_query.cpp       |   2 +-
 .../maxwell/translate/impl/video_set_predicate.cpp |   1 -
 .../ir_opt/collect_shader_info_pass.cpp            |  20 +--
 .../ir_opt/constant_propagation_pass.cpp           |  49 ++++----
 .../global_memory_to_storage_buffer_pass.cpp       |  42 +++----
 .../ir_opt/identity_removal_pass.cpp               |   3 +-
 .../ir_opt/lower_fp16_to_fp32.cpp                  |   2 +-
 src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp  |   4 +-
 src/shader_recompiler/ir_opt/texture_pass.cpp      |  32 ++---
 src/shader_recompiler/ir_opt/verification_pass.cpp |   4 +-
 src/tests/common/unique_function.cpp               |   2 +
 src/video_core/CMakeLists.txt                      |   2 +-
 .../renderer_vulkan/vk_graphics_pipeline.cpp       |  21 ++--
 .../renderer_vulkan/vk_pipeline_cache.cpp          |   5 +-
 .../renderer_vulkan/vk_render_pass_cache.cpp       |   2 -
 .../renderer_vulkan/vk_texture_cache.cpp           |   2 +-
 66 files changed, 308 insertions(+), 313 deletions(-)

(limited to 'src/shader_recompiler/frontend/ir/attribute.cpp')

diff --git a/src/common/thread_worker.h b/src/common/thread_worker.h
index 0a975a869d..cd0017726f 100644
--- a/src/common/thread_worker.h
+++ b/src/common/thread_worker.h
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <atomic>
+#include <condition_variable>
 #include <functional>
 #include <mutex>
 #include <stop_token>
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index 22639fe132..551bf1c582 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -196,6 +196,8 @@ else()
         $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
         $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
         -Werror=unused-variable
+
+        $<$<CXX_COMPILER_ID:Clang>:-fbracket-depth=1024>
     )
 endif()
 
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index b738e00cc2..0c114402b4 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -4,6 +4,7 @@
 
 #include <algorithm>
 #include <array>
+#include <climits>
 #include <string_view>
 
 #include <fmt/format.h>
@@ -116,7 +117,8 @@ void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_vie
         const std::string_view def_name_view(
             def_name.data(),
             fmt::format_to_n(def_name.data(), def_name.size(), "{}x{}", name, i + 1).size);
-        defs[i] = sirit_ctx.Name(sirit_ctx.TypeVector(base_type, i + 1), def_name_view);
+        defs[static_cast<size_t>(i)] =
+            sirit_ctx.Name(sirit_ctx.TypeVector(base_type, i + 1), def_name_view);
     }
 }
 
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index 32512a0e5f..355cf0ca8a 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -16,7 +16,7 @@
 namespace Shader::Backend::SPIRV {
 namespace {
 template <class Func>
-struct FuncTraits : FuncTraits<Func> {};
+struct FuncTraits {};
 
 template <class ReturnType_, class... Args>
 struct FuncTraits<ReturnType_ (*)(Args...)> {
@@ -64,17 +64,20 @@ ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
 template <auto func, bool is_first_arg_inst, size_t... I>
 void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
     using Traits = FuncTraits<decltype(func)>;
-    if constexpr (std::is_same_v<Traits::ReturnType, Id>) {
+    if constexpr (std::is_same_v<typename Traits::ReturnType, Id>) {
         if constexpr (is_first_arg_inst) {
-            SetDefinition<func>(ctx, inst, inst, Arg<Traits::ArgType<I + 2>>(ctx, inst->Arg(I))...);
+            SetDefinition<func>(
+                ctx, inst, inst,
+                Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
         } else {
-            SetDefinition<func>(ctx, inst, Arg<Traits::ArgType<I + 1>>(ctx, inst->Arg(I))...);
+            SetDefinition<func>(
+                ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
         }
     } else {
         if constexpr (is_first_arg_inst) {
-            func(ctx, inst, Arg<Traits::ArgType<I + 2>>(ctx, inst->Arg(I))...);
+            func(ctx, inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
         } else {
-            func(ctx, Arg<Traits::ArgType<I + 1>>(ctx, inst->Arg(I))...);
+            func(ctx, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
         }
     }
 }
@@ -94,14 +97,14 @@ void Invoke(EmitContext& ctx, IR::Inst* inst) {
 }
 
 void EmitInst(EmitContext& ctx, IR::Inst* inst) {
-    switch (inst->Opcode()) {
+    switch (inst->GetOpcode()) {
 #define OPCODE(name, result_type, ...)                                                             \
     case IR::Opcode::name:                                                                         \
         return Invoke<&Emit##name>(ctx, inst);
 #include "shader_recompiler/frontend/ir/opcodes.inc"
 #undef OPCODE
     }
-    throw LogicError("Invalid opcode {}", inst->Opcode());
+    throw LogicError("Invalid opcode {}", inst->GetOpcode());
 }
 
 Id TypeId(const EmitContext& ctx, IR::Type type) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index f0f8db8c37..815ca62992 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -43,11 +43,13 @@ public:
             // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING");
             return;
         }
-        const IR::Opcode opcode{values[0]->Opcode()};
-        if (opcode != values[1]->Opcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
+        const IR::Opcode opcode{values[0]->GetOpcode()};
+        if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
             throw LogicError("Invalid PTP arguments");
         }
-        auto read{[&](int a, int b) { return ctx.Constant(ctx.U32[1], values[a]->Arg(b).U32()); }};
+        auto read{[&](unsigned int a, unsigned int b) {
+            return ctx.Constant(ctx.U32[1], values[a]->Arg(b).U32());
+        }};
 
         const Id offsets{
             ctx.ConstantComposite(ctx.TypeArray(ctx.U32[2], ctx.Constant(ctx.U32[1], 4)),
@@ -297,13 +299,14 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
 
 Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords,
                        const IR::Value& offset, const IR::Value& offset2, Id dref) {
-    const auto info{inst->Flags<IR::TextureInstInfo>()};
     const ImageOperands operands(ctx, offset, offset2);
     return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst,
                 ctx.F32[4], Texture(ctx, index), coords, dref, operands.Mask(), operands.Span());
 }
 
+#ifdef _WIN32
 #pragma optimize("", off)
+#endif
 
 Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset,
                   Id lod, Id ms) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
index c57bd291db..12a03ed6ed 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_warp.cpp
@@ -7,7 +7,7 @@
 namespace Shader::Backend::SPIRV {
 namespace {
 Id WarpExtract(EmitContext& ctx, Id value) {
-    const Id shift{ctx.Constant(ctx.U32[1], 5)};
+    [[maybe_unused]] const Id shift{ctx.Constant(ctx.U32[1], 5)};
     const Id local_index{ctx.OpLoad(ctx.U32[1], ctx.subgroup_local_invocation_id)};
     return ctx.OpVectorExtractDynamic(ctx.U32[1], value, local_index);
 }
diff --git a/src/shader_recompiler/file_environment.h b/src/shader_recompiler/file_environment.h
index 17640a6229..71601f8fd6 100644
--- a/src/shader_recompiler/file_environment.h
+++ b/src/shader_recompiler/file_environment.h
@@ -7,7 +7,7 @@
 
 namespace Shader {
 
-class FileEnvironment final : public Environment {
+class FileEnvironment : public Environment {
 public:
     explicit FileEnvironment(const char* path);
     ~FileEnvironment() override;
diff --git a/src/shader_recompiler/frontend/ir/attribute.cpp b/src/shader_recompiler/frontend/ir/attribute.cpp
index 4811242ea0..7993e5c436 100644
--- a/src/shader_recompiler/frontend/ir/attribute.cpp
+++ b/src/shader_recompiler/frontend/ir/attribute.cpp
@@ -17,7 +17,7 @@ u32 GenericAttributeIndex(Attribute attribute) {
     if (!IsGeneric(attribute)) {
         throw InvalidArgument("Attribute is not generic {}", attribute);
     }
-    return (static_cast<int>(attribute) - static_cast<int>(Attribute::Generic0X)) / 4;
+    return (static_cast<u32>(attribute) - static_cast<u32>(Attribute::Generic0X)) / 4u;
 }
 
 std::string NameOf(Attribute attribute) {
@@ -444,4 +444,4 @@ std::string NameOf(Attribute attribute) {
     return fmt::format("<reserved attribute {}>", static_cast<int>(attribute));
 }
 
-} // namespace Shader::IR
\ No newline at end of file
+} // namespace Shader::IR
diff --git a/src/shader_recompiler/frontend/ir/basic_block.cpp b/src/shader_recompiler/frontend/ir/basic_block.cpp
index ec029dfd6e..e1f0191f40 100644
--- a/src/shader_recompiler/frontend/ir/basic_block.cpp
+++ b/src/shader_recompiler/frontend/ir/basic_block.cpp
@@ -155,7 +155,7 @@ std::string DumpBlock(const Block& block, const std::map<const Block*, size_t>&
     ret += fmt::format(": begin={:04x} end={:04x}\n", block.LocationBegin(), block.LocationEnd());
 
     for (const Inst& inst : block) {
-        const Opcode op{inst.Opcode()};
+        const Opcode op{inst.GetOpcode()};
         ret += fmt::format("[{:016x}] ", reinterpret_cast<u64>(&inst));
         if (TypeOf(op) != Type::Void) {
             ret += fmt::format("%{:<5} = {}", InstIndex(inst_to_index, inst_index, &inst), op);
diff --git a/src/shader_recompiler/frontend/ir/condition.cpp b/src/shader_recompiler/frontend/ir/condition.cpp
index ec1659e2bc..fc18ea2a2f 100644
--- a/src/shader_recompiler/frontend/ir/condition.cpp
+++ b/src/shader_recompiler/frontend/ir/condition.cpp
@@ -12,10 +12,10 @@ namespace Shader::IR {
 
 std::string NameOf(Condition condition) {
     std::string ret;
-    if (condition.FlowTest() != FlowTest::T) {
-        ret = fmt::to_string(condition.FlowTest());
+    if (condition.GetFlowTest() != FlowTest::T) {
+        ret = fmt::to_string(condition.GetFlowTest());
     }
-    const auto [pred, negated]{condition.Pred()};
+    const auto [pred, negated]{condition.GetPred()};
     if (!ret.empty()) {
         ret += '&';
     }
diff --git a/src/shader_recompiler/frontend/ir/condition.h b/src/shader_recompiler/frontend/ir/condition.h
index 51c2f15cf5..aa8597c608 100644
--- a/src/shader_recompiler/frontend/ir/condition.h
+++ b/src/shader_recompiler/frontend/ir/condition.h
@@ -30,11 +30,11 @@ public:
 
     auto operator<=>(const Condition&) const noexcept = default;
 
-    [[nodiscard]] IR::FlowTest FlowTest() const noexcept {
+    [[nodiscard]] IR::FlowTest GetFlowTest() const noexcept {
         return static_cast<IR::FlowTest>(flow_test);
     }
 
-    [[nodiscard]] std::pair<IR::Pred, bool> Pred() const noexcept {
+    [[nodiscard]] std::pair<IR::Pred, bool> GetPred() const noexcept {
         return {static_cast<IR::Pred>(pred), pred_negated != 0};
     }
 
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
index 13eb2de4c4..a2104bdb31 100644
--- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp
+++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp
@@ -290,8 +290,8 @@ static U1 GetFlowTest(IREmitter& ir, FlowTest flow_test) {
 }
 
 U1 IREmitter::Condition(IR::Condition cond) {
-    const FlowTest flow_test{cond.FlowTest()};
-    const auto [pred, is_negated]{cond.Pred()};
+    const FlowTest flow_test{cond.GetFlowTest()};
+    const auto [pred, is_negated]{cond.GetPred()};
     return LogicalAnd(GetPred(pred, is_negated), GetFlowTest(*this, flow_test));
 }
 
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 481202d94b..ceb44e6042 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -12,7 +12,7 @@
 namespace Shader::IR {
 namespace {
 void CheckPseudoInstruction(IR::Inst* inst, IR::Opcode opcode) {
-    if (inst && inst->Opcode() != opcode) {
+    if (inst && inst->GetOpcode() != opcode) {
         throw LogicError("Invalid pseudo-instruction");
     }
 }
@@ -25,11 +25,17 @@ void SetPseudoInstruction(IR::Inst*& dest_inst, IR::Inst* pseudo_inst) {
 }
 
 void RemovePseudoInstruction(IR::Inst*& inst, IR::Opcode expected_opcode) {
-    if (inst->Opcode() != expected_opcode) {
+    if (inst->GetOpcode() != expected_opcode) {
         throw LogicError("Undoing use of invalid pseudo-op");
     }
     inst = nullptr;
 }
+
+void AllocAssociatedInsts(std::unique_ptr<AssociatedInsts>& associated_insts) {
+    if (!associated_insts) {
+        associated_insts = std::make_unique<AssociatedInsts>();
+    }
+}
 } // Anonymous namespace
 
 Inst::Inst(IR::Opcode op_, u32 flags_) noexcept : op{op_}, flags{flags_} {
@@ -249,12 +255,6 @@ void Inst::ReplaceOpcode(IR::Opcode opcode) {
     op = opcode;
 }
 
-void AllocAssociatedInsts(std::unique_ptr<AssociatedInsts>& associated_insts) {
-    if (!associated_insts) {
-        associated_insts = std::make_unique<AssociatedInsts>();
-    }
-}
-
 void Inst::Use(const Value& value) {
     Inst* const inst{value.Inst()};
     ++inst->use_count;
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.h b/src/shader_recompiler/frontend/ir/microinstruction.h
index 6658dc674e..97dc91d855 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.h
+++ b/src/shader_recompiler/frontend/ir/microinstruction.h
@@ -46,7 +46,7 @@ public:
     }
 
     /// Get the opcode this microinstruction represents.
-    [[nodiscard]] IR::Opcode Opcode() const noexcept {
+    [[nodiscard]] IR::Opcode GetOpcode() const noexcept {
         return op;
     }
 
@@ -95,7 +95,7 @@ public:
     requires(sizeof(FlagsType) <= sizeof(u32) && std::is_trivially_copyable_v<FlagsType>)
         [[nodiscard]] FlagsType Flags() const noexcept {
         FlagsType ret;
-        std::memcpy(&ret, &flags, sizeof(ret));
+        std::memcpy(reinterpret_cast<char*>(&ret), &flags, sizeof(ret));
         return ret;
     }
 
diff --git a/src/shader_recompiler/frontend/ir/opcodes.cpp b/src/shader_recompiler/frontend/ir/opcodes.cpp
index 1cb9db6c9c..002dbf94e9 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.cpp
+++ b/src/shader_recompiler/frontend/ir/opcodes.cpp
@@ -49,7 +49,7 @@ constexpr std::array META_TABLE{
 #define OPCODE(name_token, type_token, ...)                                                        \
     OpcodeMeta{                                                                                    \
         .name{#name_token},                                                                        \
-        .type{type_token},                                                                         \
+        .type = type_token,                                                                         \
         .arg_types{__VA_ARGS__},                                                                   \
     },
 #include "opcodes.inc"
diff --git a/src/shader_recompiler/frontend/ir/program.cpp b/src/shader_recompiler/frontend/ir/program.cpp
index 5f51aeb5f3..89a17fb1b4 100644
--- a/src/shader_recompiler/frontend/ir/program.cpp
+++ b/src/shader_recompiler/frontend/ir/program.cpp
@@ -2,8 +2,6 @@
 // Licensed under GPLv2 or any later version
 // Refer to the license.txt file included.
 
-#pragma once
-
 #include <map>
 #include <string>
 
diff --git a/src/shader_recompiler/frontend/ir/value.cpp b/src/shader_recompiler/frontend/ir/value.cpp
index 837c1b487f..1e7ffb86d5 100644
--- a/src/shader_recompiler/frontend/ir/value.cpp
+++ b/src/shader_recompiler/frontend/ir/value.cpp
@@ -33,11 +33,11 @@ Value::Value(u64 value) noexcept : type{Type::U64}, imm_u64{value} {}
 Value::Value(f64 value) noexcept : type{Type::F64}, imm_f64{value} {}
 
 bool Value::IsIdentity() const noexcept {
-    return type == Type::Opaque && inst->Opcode() == Opcode::Identity;
+    return type == Type::Opaque && inst->GetOpcode() == Opcode::Identity;
 }
 
 bool Value::IsPhi() const noexcept {
-    return type == Type::Opaque && inst->Opcode() == Opcode::Phi;
+    return type == Type::Opaque && inst->GetOpcode() == Opcode::Phi;
 }
 
 bool Value::IsEmpty() const noexcept {
diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h
index b27601e704..a0962863d8 100644
--- a/src/shader_recompiler/frontend/ir/value.h
+++ b/src/shader_recompiler/frontend/ir/value.h
@@ -94,7 +94,7 @@ public:
         }
     }
 
-    explicit TypedValue(IR::Inst* inst) : TypedValue(Value(inst)) {}
+    explicit TypedValue(IR::Inst* inst_) : TypedValue(Value(inst_)) {}
 };
 
 using U1 = TypedValue<Type::U1>;
diff --git a/src/shader_recompiler/frontend/maxwell/control_flow.cpp b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
index 847bb19864..cb8ec7eaa3 100644
--- a/src/shader_recompiler/frontend/maxwell/control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/control_flow.cpp
@@ -34,41 +34,37 @@ struct Compare {
 };
 
 u32 BranchOffset(Location pc, Instruction inst) {
-    return pc.Offset() + inst.branch.Offset() + 8;
+    return pc.Offset() + static_cast<u32>(inst.branch.Offset()) + 8u;
 }
 
 void Split(Block* old_block, Block* new_block, Location pc) {
     if (pc <= old_block->begin || pc >= old_block->end) {
         throw InvalidArgument("Invalid address to split={}", pc);
     }
-    *new_block = Block{
-        .begin{pc},
-        .end{old_block->end},
-        .end_class{old_block->end_class},
-        .cond{old_block->cond},
-        .stack{old_block->stack},
-        .branch_true{old_block->branch_true},
-        .branch_false{old_block->branch_false},
-        .function_call{old_block->function_call},
-        .return_block{old_block->return_block},
-        .branch_reg{old_block->branch_reg},
-        .branch_offset{old_block->branch_offset},
-        .indirect_branches{std::move(old_block->indirect_branches)},
-    };
-    *old_block = Block{
-        .begin{old_block->begin},
-        .end{pc},
-        .end_class{EndClass::Branch},
-        .cond{true},
-        .stack{std::move(old_block->stack)},
-        .branch_true{new_block},
-        .branch_false{nullptr},
-        .function_call{},
-        .return_block{},
-        .branch_reg{},
-        .branch_offset{},
-        .indirect_branches{},
-    };
+    *new_block = Block{};
+    new_block->begin = pc;
+    new_block->end = old_block->end;
+    new_block->end_class = old_block->end_class,
+    new_block->cond = old_block->cond;
+    new_block->stack = old_block->stack;
+    new_block->branch_true = old_block->branch_true;
+    new_block->branch_false = old_block->branch_false;
+    new_block->function_call = old_block->function_call;
+    new_block->return_block = old_block->return_block;
+    new_block->branch_reg = old_block->branch_reg;
+    new_block->branch_offset = old_block->branch_offset;
+    new_block->indirect_branches = std::move(old_block->indirect_branches);
+
+    const Location old_begin{old_block->begin};
+    Stack old_stack{std::move(old_block->stack)};
+    *old_block = Block{};
+    old_block->begin = old_begin;
+    old_block->end = pc;
+    old_block->end_class = EndClass::Branch;
+    old_block->cond = IR::Condition(true);
+    old_block->stack = old_stack;
+    old_block->branch_true = new_block;
+    old_block->branch_false = nullptr;
 }
 
 Token OpcodeToken(Opcode opcode) {
@@ -141,7 +137,7 @@ std::string NameOf(const Block& block) {
 
 void Stack::Push(Token token, Location target) {
     entries.push_back({
-        .token{token},
+        .token = token,
         .target{target},
     });
 }
@@ -177,24 +173,17 @@ bool Block::Contains(Location pc) const noexcept {
 }
 
 Function::Function(ObjectPool<Block>& block_pool, Location start_address)
-    : entrypoint{start_address}, labels{{
-                                     .address{start_address},
-                                     .block{block_pool.Create(Block{
-                                         .begin{start_address},
-                                         .end{start_address},
-                                         .end_class{EndClass::Branch},
-                                         .cond{true},
-                                         .stack{},
-                                         .branch_true{nullptr},
-                                         .branch_false{nullptr},
-                                         .function_call{},
-                                         .return_block{},
-                                         .branch_reg{},
-                                         .branch_offset{},
-                                         .indirect_branches{},
-                                     })},
-                                     .stack{},
-                                 }} {}
+    : entrypoint{start_address} {
+    Label& label{labels.emplace_back()};
+    label.address = start_address;
+    label.block = block_pool.Create(Block{});
+    label.block->begin = start_address;
+    label.block->end = start_address;
+    label.block->end_class = EndClass::Branch;
+    label.block->cond = IR::Condition(true);
+    label.block->branch_true = nullptr;
+    label.block->branch_false = nullptr;
+}
 
 CFG::CFG(Environment& env_, ObjectPool<Block>& block_pool_, Location start_address)
     : env{env_}, block_pool{block_pool_}, program_start{start_address} {
@@ -327,7 +316,8 @@ CFG::AnalysisState CFG::AnalyzeInst(Block* block, FunctionId function_id, Locati
         // Insert the function into the list if it doesn't exist
         const auto it{std::ranges::find(functions, cal_pc, &Function::entrypoint)};
         const bool exists{it != functions.end()};
-        const FunctionId call_id{exists ? std::distance(functions.begin(), it) : functions.size()};
+        const FunctionId call_id{exists ? static_cast<size_t>(std::distance(functions.begin(), it))
+                                        : functions.size()};
         if (!exists) {
             functions.emplace_back(block_pool, cal_pc);
         }
@@ -362,20 +352,14 @@ void CFG::AnalyzeCondInst(Block* block, FunctionId function_id, Location pc,
     }
     // Create a virtual block and a conditional block
     Block* const conditional_block{block_pool.Create()};
-    Block virtual_block{
-        .begin{block->begin.Virtual()},
-        .end{block->begin.Virtual()},
-        .end_class{EndClass::Branch},
-        .cond{cond},
-        .stack{block->stack},
-        .branch_true{conditional_block},
-        .branch_false{nullptr},
-        .function_call{},
-        .return_block{},
-        .branch_reg{},
-        .branch_offset{},
-        .indirect_branches{},
-    };
+    Block virtual_block{};
+    virtual_block.begin = block->begin.Virtual();
+    virtual_block.end = block->begin.Virtual();
+    virtual_block.end_class = EndClass::Branch;
+    virtual_block.stack = block->stack;
+    virtual_block.cond = cond;
+    virtual_block.branch_true = conditional_block;
+    virtual_block.branch_false = nullptr;
     // Save the contents of the visited block in the conditional block
     *conditional_block = std::move(*block);
     // Impersonate the visited block with a virtual block
@@ -444,7 +428,7 @@ CFG::AnalysisState CFG::AnalyzeBRX(Block* block, Location pc, Instruction inst,
         if (!is_absolute) {
             target += pc.Offset();
         }
-        target += brx_table->branch_offset;
+        target += static_cast<unsigned int>(brx_table->branch_offset);
         target += 8;
         targets.push_back(target);
     }
@@ -455,8 +439,8 @@ CFG::AnalysisState CFG::AnalyzeBRX(Block* block, Location pc, Instruction inst,
     for (const u32 target : targets) {
         Block* const branch{AddLabel(block, block->stack, target, function_id)};
         block->indirect_branches.push_back({
-            .block{branch},
-            .address{target},
+            .block = branch,
+            .address = target,
         });
     }
     block->cond = IR::Condition{true};
@@ -523,23 +507,17 @@ Block* CFG::AddLabel(Block* block, Stack stack, Location pc, FunctionId function
     if (label_it != function.labels.end()) {
         return label_it->block;
     }
-    Block* const new_block{block_pool.Create(Block{
-        .begin{pc},
-        .end{pc},
-        .end_class{EndClass::Branch},
-        .cond{true},
-        .stack{stack},
-        .branch_true{nullptr},
-        .branch_false{nullptr},
-        .function_call{},
-        .return_block{},
-        .branch_reg{},
-        .branch_offset{},
-        .indirect_branches{},
-    })};
+    Block* const new_block{block_pool.Create()};
+    new_block->begin = pc;
+    new_block->end = pc;
+    new_block->end_class = EndClass::Branch;
+    new_block->cond = IR::Condition(true);
+    new_block->stack = stack;
+    new_block->branch_true = nullptr;
+    new_block->branch_false = nullptr;
     function.labels.push_back(Label{
         .address{pc},
-        .block{new_block},
+        .block = new_block,
         .stack{std::move(stack)},
     });
     return new_block;
diff --git a/src/shader_recompiler/frontend/maxwell/decode.cpp b/src/shader_recompiler/frontend/maxwell/decode.cpp
index bd85afa1e5..932d19c1d4 100644
--- a/src/shader_recompiler/frontend/maxwell/decode.cpp
+++ b/src/shader_recompiler/frontend/maxwell/decode.cpp
@@ -45,7 +45,7 @@ constexpr MaskValue MaskValueFromEncoding(const char* encoding) {
             bit >>= 1;
         }
     }
-    return MaskValue{.mask{mask}, .value{value}};
+    return MaskValue{.mask = mask, .value = value};
 }
 
 struct InstEncoding {
@@ -56,7 +56,7 @@ constexpr std::array UNORDERED_ENCODINGS{
 #define INST(name, cute, encode)                                                                   \
     InstEncoding{                                                                                  \
         .mask_value{MaskValueFromEncoding(encode)},                                                \
-        .opcode{Opcode::name},                                                                     \
+        .opcode = Opcode::name,                                                                     \
     },
 #include "maxwell.inc"
 #undef INST
@@ -116,9 +116,9 @@ constexpr auto MakeFastLookupTableIndex(size_t index) {
         const size_t value{ToFastLookupIndex(encoding.mask_value.value)};
         if ((index & mask) == value) {
             encodings.at(element) = InstInfo{
-                .high_mask{static_cast<u16>(encoding.mask_value.mask >> MASK_SHIFT)},
-                .high_value{static_cast<u16>(encoding.mask_value.value >> MASK_SHIFT)},
-                .opcode{encoding.opcode},
+                .high_mask = static_cast<u16>(encoding.mask_value.mask >> MASK_SHIFT),
+                .high_value = static_cast<u16>(encoding.mask_value.value >> MASK_SHIFT),
+                .opcode = encoding.opcode,
             };
             ++element;
         }
diff --git a/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp b/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp
index 96453509d5..008625cb37 100644
--- a/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp
+++ b/src/shader_recompiler/frontend/maxwell/indirect_branch_table_track.cpp
@@ -97,11 +97,11 @@ std::optional<IndirectBranchTableInfo> TrackIndirectBranchTable(Environment& env
     }
     const u32 imnmx_immediate{static_cast<u32>(imnmx.immediate.Value())};
     return IndirectBranchTableInfo{
-        .cbuf_index{cbuf_index},
-        .cbuf_offset{cbuf_offset},
-        .num_entries{imnmx_immediate + 1},
-        .branch_offset{brx_offset},
-        .branch_reg{brx_reg},
+        .cbuf_index = cbuf_index,
+        .cbuf_offset = cbuf_offset,
+        .num_entries = imnmx_immediate + 1,
+        .branch_offset = brx_offset,
+        .branch_reg = brx_reg,
     };
 }
 
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
index c804c2a8e9..02cef26455 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
@@ -558,7 +558,6 @@ private:
         const Node label{goto_stmt->label};
         const u32 label_id{label->id};
         const Node label_nested_stmt{FindStatementWithLabel(body, goto_stmt)};
-        const auto type{label_nested_stmt->type};
 
         Tree loop_body;
         loop_body.splice(loop_body.begin(), body, label_nested_stmt, goto_stmt);
@@ -566,7 +565,7 @@ private:
         Statement* const variable{pool.Create(Variable{}, label_id)};
         Statement* const loop_stmt{pool.Create(Loop{}, variable, std::move(loop_body), parent)};
         UpdateTreeUp(loop_stmt);
-        const Node loop_node{body.insert(goto_stmt, *loop_stmt)};
+        body.insert(goto_stmt, *loop_stmt);
 
         Statement* const new_goto{pool.Create(Goto{}, variable, label, loop_stmt)};
         loop_stmt->children.push_front(*new_goto);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp
index ac1433dea7..5a1b3a8fcb 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/double_add.cpp
@@ -31,9 +31,9 @@ void DADD(TranslatorVisitor& v, u64 insn, const IR::F64& src_b) {
     const IR::F64 op_b{v.ir.FPAbsNeg(src_b, dadd.abs_b != 0, dadd.neg_b != 0)};
 
     const IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(dadd.fp_rounding)},
-        .fmz_mode{IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(dadd.fp_rounding),
+        .fmz_mode = IR::FmzMode::None,
     };
 
     v.D(dadd.dest_reg, v.ir.FPAdd(op_a, op_b, control));
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp
index ff73218629..7238414962 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/double_fused_multiply_add.cpp
@@ -25,9 +25,9 @@ void DFMA(TranslatorVisitor& v, u64 insn, const IR::F64& src_b, const IR::F64& s
     const IR::F64 op_c{v.ir.FPAbsNeg(src_c, false, dfma.neg_c != 0)};
 
     const IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(dfma.fp_rounding)},
-        .fmz_mode{IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(dfma.fp_rounding),
+        .fmz_mode = IR::FmzMode::None,
     };
 
     v.D(dfma.dest_reg, v.ir.FPFma(src_a, op_b, op_c, control));
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp
index 3e83d1c95c..4a49299a0b 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/double_multiply.cpp
@@ -21,9 +21,9 @@ void DMUL(TranslatorVisitor& v, u64 insn, const IR::F64& src_b) {
 
     const IR::F64 src_a{v.ir.FPAbsNeg(v.D(dmul.src_a_reg), false, dmul.neg != 0)};
     const IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(dmul.fp_rounding)},
-        .fmz_mode{IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(dmul.fp_rounding),
+        .fmz_mode = IR::FmzMode::None,
     };
 
     v.D(dmul.dest_reg, v.ir.FPMul(src_a, src_b, control));
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp
index b39950c849..b8c89810cb 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_add.cpp
@@ -23,9 +23,9 @@ void FADD(TranslatorVisitor& v, u64 insn, bool sat, bool cc, bool ftz, FpRoundin
     const IR::F32 op_a{v.ir.FPAbsNeg(v.F(fadd.src_a), abs_a, neg_a)};
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, abs_b, neg_b)};
     IR::FpControl control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(fp_rounding)},
-        .fmz_mode{ftz ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = CastFpRounding(fp_rounding),
+        .fmz_mode = (ftz ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     IR::F32 value{v.ir.FPAdd(op_a, op_b, control)};
     if (sat) {
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp
index c02a40209e..80109ca0e5 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare.cpp
@@ -19,8 +19,7 @@ void FCMP(TranslatorVisitor& v, u64 insn, const IR::U32& src_a, const IR::F32& o
     } const fcmp{insn};
 
     const IR::F32 zero{v.ir.Imm32(0.0f)};
-    const IR::F32 neg_zero{v.ir.Imm32(-0.0f)};
-    const IR::FpControl control{.fmz_mode{fcmp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None}};
+    const IR::FpControl control{.fmz_mode = (fcmp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None)};
     const IR::U1 cmp_result{FloatingPointCompare(v.ir, operand, zero, fcmp.compare_op, control)};
     const IR::U32 src_reg{v.X(fcmp.src_reg)};
     const IR::U32 result{v.ir.Select(cmp_result, src_reg, src_a)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp
index c5417775e1..b9f4ee0d9b 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp
@@ -29,9 +29,9 @@ void FSET(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
     const IR::F32 op_a{v.ir.FPAbsNeg(v.F(fset.src_a_reg), fset.abs_a != 0, fset.negate_a != 0)};
     const IR::F32 op_b = v.ir.FPAbsNeg(src_b, fset.abs_b != 0, fset.negate_b != 0);
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fset.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (fset.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     IR::U1 pred{v.ir.GetPred(fset.pred)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp
index 1e366fde03..035f8782a7 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp
@@ -57,9 +57,9 @@ void F2F(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a, bool abs) {
 
     const bool any_fp64{f2f.src_size == FloatFormat::F64 || f2f.dst_size == FloatFormat::F64};
     IR::FpControl fp_control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{f2f.ftz != 0 && !any_fp64 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (f2f.ftz != 0 && !any_fp64 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     if (f2f.src_size != f2f.dst_size) {
         fp_control.rounding = CastFpRounding(f2f.rounding);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp
index 21ae92be1e..cf3cf1ba69 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp
@@ -123,9 +123,9 @@ void TranslateF2I(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a) {
         fmz_mode = f2i.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None;
     }
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fmz_mode},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = fmz_mode,
     };
     const IR::F16F32F64 op_a{v.ir.FPAbsNeg(src_a, f2i.abs != 0, f2i.neg != 0)};
     const IR::F16F32F64 rounded_value{[&] {
@@ -186,14 +186,14 @@ void TranslateF2I(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a) {
         } else if (f2i.dest_format == DestFormat::I64) {
             handled_special_case = true;
             result = IR::U64{
-                v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0x8000'0000'0000'0000ULL), result)};
+                v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0x8000'0000'0000'0000UL), result)};
         }
     }
     if (!handled_special_case && is_signed) {
         if (bitsize != 64) {
             result = IR::U32{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm32(0U), result)};
         } else {
-            result = IR::U64{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0ULL), result)};
+            result = IR::U64{v.ir.Select(v.ir.FPIsNan(op_a), v.ir.Imm64(0UL), result)};
         }
     }
 
@@ -211,6 +211,7 @@ void TranslateF2I(TranslatorVisitor& v, u64 insn, const IR::F16F32F64& src_a) {
 
 void TranslatorVisitor::F2I_reg(u64 insn) {
     union {
+        u64 raw;
         F2I base;
         BitField<20, 8, IR::Reg> src_reg;
     } const f2i{insn};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
index 18561bc9c7..fa2a7807b7 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
@@ -24,9 +24,9 @@ void FFMA(TranslatorVisitor& v, u64 insn, const IR::F32& src_b, const IR::F32& s
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, false, neg_b)};
     const IR::F32 op_c{v.ir.FPAbsNeg(src_c, false, neg_c)};
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(fp_rounding)},
-        .fmz_mode{CastFmzMode(fmz_mode)},
+        .no_contraction = true,
+        .rounding = CastFpRounding(fp_rounding),
+        .fmz_mode = CastFmzMode(fmz_mode),
     };
     IR::F32 value{v.ir.FPFma(op_a, op_b, op_c, fp_control)};
     if (fmz_mode == FmzMode::FMZ && !sat) {
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp
index 343d91032b..8ae4375287 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_min_max.cpp
@@ -27,9 +27,9 @@ void FMNMX(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, fmnmx.abs_b != 0, fmnmx.negate_b != 0)};
 
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fmnmx.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (fmnmx.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     IR::F32 max{v.ir.FPMax(op_a, op_b, control)};
     IR::F32 min{v.ir.FPMin(op_a, op_b, control)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp
index 72f0a18ae8..06226b7ce2 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_multiply.cpp
@@ -64,9 +64,9 @@ void FMUL(TranslatorVisitor& v, u64 insn, const IR::F32& src_b, FmzMode fmz_mode
     }
     const IR::F32 op_b{v.ir.FPAbsNeg(src_b, false, neg_b)};
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{CastFpRounding(fp_rounding)},
-        .fmz_mode{CastFmzMode(fmz_mode)},
+        .no_contraction = true,
+        .rounding = CastFpRounding(fp_rounding),
+        .fmz_mode = CastFmzMode(fmz_mode),
     };
     IR::F32 value{v.ir.FPMul(op_a, op_b, fp_control)};
     if (fmz_mode == FmzMode::FMZ && !sat) {
@@ -124,4 +124,4 @@ void TranslatorVisitor::FMUL32I(u64 insn) {
          fmul32i.sat != 0, fmul32i.cc != 0, false);
 }
 
-} // namespace Shader::Maxwell
\ No newline at end of file
+} // namespace Shader::Maxwell
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp
index 8ff9db8438..5f93a15130 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_set_predicate.cpp
@@ -29,9 +29,9 @@ void FSETP(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
     const IR::F32 op_a{v.ir.FPAbsNeg(v.F(fsetp.src_a_reg), fsetp.abs_a != 0, fsetp.negate_a != 0)};
     const IR::F32 op_b = v.ir.FPAbsNeg(src_b, fsetp.abs_b != 0, fsetp.negate_b != 0);
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{fsetp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (fsetp.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     const BooleanOp bop{fsetp.bop};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp
index e42921a216..7550a8d4c4 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp
@@ -28,9 +28,9 @@ void TranslatorVisitor::FSWZADD(u64 insn) {
     const IR::U32 swizzle{ir.Imm32(static_cast<u32>(fswzadd.swizzle))};
 
     const IR::FpControl fp_control{
-        .no_contraction{false},
-        .rounding{CastFpRounding(fswzadd.round)},
-        .fmz_mode{fswzadd.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = CastFpRounding(fswzadd.round),
+        .fmz_mode = (fswzadd.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     const IR::F32 result{ir.FSwizzleAdd(src_a, src_b, swizzle, fp_control)};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp
index 03e7bf047d..f2738a93b2 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_add.cpp
@@ -34,9 +34,9 @@ void HADD2(TranslatorVisitor& v, u64 insn, Merge merge, bool ftz, bool sat, bool
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{ftz ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (ftz ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
     IR::F16F32F64 lhs{v.ir.FPAdd(lhs_a, lhs_b, fp_control)};
     IR::F16F32F64 rhs{v.ir.FPAdd(rhs_a, rhs_b, fp_control)};
@@ -102,8 +102,9 @@ void TranslatorVisitor::HADD2_imm(u64 insn) {
         BitField<20, 9, u64> low;
     } const hadd2{insn};
 
-    const u32 imm{static_cast<u32>(hadd2.low << 6) | ((hadd2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hadd2.high << 22) | ((hadd2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hadd2.low << 6) | static_cast<u32>((hadd2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hadd2.high << 22) | static_cast<u32>((hadd2.neg_high != 0 ? 1 : 0) << 31)};
     HADD2(*this, insn, hadd2.sat != 0, false, false, Swizzle::H1_H0, ir.Imm32(imm));
 }
 
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp
index 8b234bd6ae..fd79867016 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp
@@ -41,9 +41,9 @@ void HFMA2(TranslatorVisitor& v, u64 insn, Merge merge, Swizzle swizzle_a, bool
     rhs_c = v.ir.FPAbsNeg(rhs_c, false, neg_c);
 
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{HalfPrecision2FmzMode(precision)},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = HalfPrecision2FmzMode(precision),
     };
     IR::F16F32F64 lhs{v.ir.FPFma(lhs_a, lhs_b, lhs_c, fp_control)};
     IR::F16F32F64 rhs{v.ir.FPFma(rhs_a, rhs_b, rhs_c, fp_control)};
@@ -143,8 +143,9 @@ void TranslatorVisitor::HFMA2_imm(u64 insn) {
         BitField<57, 2, HalfPrecision> precision;
     } const hfma2{insn};
 
-    const u32 imm{static_cast<u32>(hfma2.low << 6) | ((hfma2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hfma2.high << 22) | ((hfma2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hfma2.low << 6) | static_cast<u32>((hfma2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hfma2.high << 22) | static_cast<u32>((hfma2.neg_high != 0 ? 1 : 0) << 31)};
 
     HFMA2(*this, insn, false, hfma2.neg_c != 0, Swizzle::H1_H0, hfma2.swizzle_c, ir.Imm32(imm),
           GetReg39(insn), hfma2.saturate != 0, hfma2.precision);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp
index 2451a6ef68..3f548ce761 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_multiply.cpp
@@ -35,9 +35,9 @@ void HMUL2(TranslatorVisitor& v, u64 insn, Merge merge, bool sat, bool abs_a, bo
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl fp_control{
-        .no_contraction{true},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{HalfPrecision2FmzMode(precision)},
+        .no_contraction = true,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = HalfPrecision2FmzMode(precision),
     };
     IR::F16F32F64 lhs{v.ir.FPMul(lhs_a, lhs_b, fp_control)};
     IR::F16F32F64 rhs{v.ir.FPMul(rhs_a, rhs_b, fp_control)};
@@ -119,8 +119,9 @@ void TranslatorVisitor::HMUL2_imm(u64 insn) {
         BitField<44, 1, u64> abs_a;
     } const hmul2{insn};
 
-    const u32 imm{static_cast<u32>(hmul2.low << 6) | ((hmul2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hmul2.high << 22) | ((hmul2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hmul2.low << 6) | static_cast<u32>((hmul2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hmul2.high << 22) | static_cast<u32>((hmul2.neg_high != 0 ? 1 : 0) << 31)};
     HMUL2(*this, insn, hmul2.sat != 0, hmul2.abs_a != 0, hmul2.neg_a != 0, false, false,
           Swizzle::H1_H0, ir.Imm32(imm));
 }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp
index 7f1f4b88c8..cca5b831fd 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set.cpp
@@ -41,9 +41,9 @@ void HSET2(TranslatorVisitor& v, u64 insn, const IR::U32& src_b, bool bf, bool f
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{ftz ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (ftz ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     IR::U1 pred{v.ir.GetPred(hset2.pred)};
@@ -106,8 +106,9 @@ void TranslatorVisitor::HSET2_imm(u64 insn) {
         BitField<20, 9, u64> low;
     } const hset2{insn};
 
-    const u32 imm{static_cast<u32>(hset2.low << 6) | ((hset2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hset2.high << 22) | ((hset2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{
+        static_cast<u32>(hset2.low << 6) | static_cast<u32>((hset2.neg_low != 0 ? 1 : 0) << 15) |
+        static_cast<u32>(hset2.high << 22) | static_cast<u32>((hset2.neg_high != 0 ? 1 : 0) << 31)};
 
     HSET2(*this, insn, ir.Imm32(imm), hset2.bf != 0, hset2.ftz != 0, false, false, hset2.compare_op,
           Swizzle::H1_H0);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp
index 3e2a23c92d..b3931dae32 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp
@@ -43,9 +43,9 @@ void HSETP2(TranslatorVisitor& v, u64 insn, const IR::U32& src_b, bool neg_b, bo
     rhs_b = v.ir.FPAbsNeg(rhs_b, abs_b, neg_b);
 
     const IR::FpControl control{
-        .no_contraction{false},
-        .rounding{IR::FpRounding::DontCare},
-        .fmz_mode{hsetp2.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None},
+        .no_contraction = false,
+        .rounding = IR::FpRounding::DontCare,
+        .fmz_mode = (hsetp2.ftz != 0 ? IR::FmzMode::FTZ : IR::FmzMode::None),
     };
 
     IR::U1 pred{v.ir.GetPred(hsetp2.pred)};
@@ -106,8 +106,10 @@ void TranslatorVisitor::HSETP2_imm(u64 insn) {
         BitField<20, 9, u64> low;
     } const hsetp2{insn};
 
-    const u32 imm{static_cast<u32>(hsetp2.low << 6) | ((hsetp2.neg_low != 0 ? 1 : 0) << 15) |
-                  static_cast<u32>(hsetp2.high << 22) | ((hsetp2.neg_high != 0 ? 1 : 0) << 31)};
+    const u32 imm{static_cast<u32>(hsetp2.low << 6) |
+                  static_cast<u32>((hsetp2.neg_low != 0 ? 1 : 0) << 15) |
+                  static_cast<u32>(hsetp2.high << 22) |
+                  static_cast<u32>((hsetp2.neg_high != 0 ? 1 : 0) << 31)};
 
     HSETP2(*this, insn, ir.Imm32(imm), false, false, Swizzle::H1_H0, hsetp2.compare_op,
            hsetp2.h_and != 0);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp
index 30b570ce4d..88bbac0a50 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/impl.cpp
@@ -49,7 +49,7 @@ void TranslatorVisitor::L(IR::Reg dest_reg, const IR::U64& value) {
     }
     const IR::Value result{ir.UnpackUint2x32(value)};
     for (int i = 0; i < 2; i++) {
-        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, i)});
+        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, static_cast<size_t>(i))});
     }
 }
 
@@ -63,7 +63,7 @@ void TranslatorVisitor::D(IR::Reg dest_reg, const IR::F64& value) {
     }
     const IR::Value result{ir.UnpackDouble2x32(value)};
     for (int i = 0; i < 2; i++) {
-        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, i)});
+        X(dest_reg + i, IR::U32{ir.CompositeExtract(result, static_cast<size_t>(i))});
     }
 }
 
@@ -156,7 +156,7 @@ IR::F64 TranslatorVisitor::GetDoubleCbuf(u64 insn) {
     const auto [binding, offset_value]{CbufAddr(insn)};
     const bool unaligned{cbuf.unaligned != 0};
     const u32 offset{offset_value.U32()};
-    const IR::Value addr{unaligned ? offset | 4 : (offset & ~7) | 4};
+    const IR::Value addr{unaligned ? offset | 4u : (offset & ~7u) | 4u};
 
     const IR::U32 value{ir.GetCbuf(binding, IR::U32{addr})};
     const IR::U32 lower_bits{CbufLowerBits(ir, unaligned, binding, offset)};
@@ -200,7 +200,7 @@ IR::F32 TranslatorVisitor::GetFloatImm20(u64 insn) {
         BitField<20, 19, u64> value;
         BitField<56, 1, u64> is_negative;
     } const imm{insn};
-    const u32 sign_bit{imm.is_negative != 0 ? (1ULL << 31) : 0};
+    const u32 sign_bit{static_cast<u32>(imm.is_negative != 0 ? (1ULL << 31) : 0)};
     const u32 value{static_cast<u32>(imm.value) << 12};
     return ir.Imm32(Common::BitCast<f32>(value | sign_bit));
 }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp
index 1493e18151..8ffd84867d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_add.cpp
@@ -68,7 +68,6 @@ void IADD(TranslatorVisitor& v, u64 insn, IR::U32 op_b) {
     } const iadd{insn};
 
     const bool po{iadd.three_for_po == 3};
-    const bool neg_a{!po && iadd.neg_a != 0};
     if (!po && iadd.neg_b != 0) {
         op_b = v.ir.INeg(op_b);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
index e8b5ae1d2d..5a0fc36a03 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
@@ -131,7 +131,7 @@ void I2F(TranslatorVisitor& v, u64 insn, IR::U32U64 src) {
         }
         const IR::Value vector{v.ir.UnpackDouble2x32(value)};
         for (int i = 0; i < 2; ++i) {
-            v.X(i2f.dest_reg + i, IR::U32{v.ir.CompositeExtract(vector, i)});
+            v.X(i2f.dest_reg + i, IR::U32{v.ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
@@ -170,4 +170,4 @@ void TranslatorVisitor::I2F_imm(u64 insn) {
     }
 }
 
-} // namespace Shader::Maxwell
\ No newline at end of file
+} // namespace Shader::Maxwell
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp
index ae3ecea325..2300088e38 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_constant.cpp
@@ -50,7 +50,7 @@ void TranslatorVisitor::LDC(u64 insn) {
         }
         const IR::Value vector{ir.GetCbuf(index, offset, 64, false)};
         for (int i = 0; i < 2; ++i) {
-            X(ldc.dest_reg + i, IR::U32{ir.CompositeExtract(vector, i)});
+            X(ldc.dest_reg + i, IR::U32{ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp
index 68963c8ea6..e24b497210 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_local_shared.cpp
@@ -40,7 +40,6 @@ std::pair<int, bool> GetSize(u64 insn) {
         BitField<48, 3, Size> size;
     } const encoding{insn};
 
-    const Size nnn = encoding.size;
     switch (encoding.size) {
     case Size::U8:
         return {8, false};
@@ -99,7 +98,7 @@ void TranslatorVisitor::LDL(u64 insn) {
     case 32:
     case 64:
     case 128:
-        if (!IR::IsAligned(dest, bit_size / 32)) {
+        if (!IR::IsAligned(dest, static_cast<size_t>(bit_size / 32))) {
             throw NotImplementedException("Unaligned destination register {}", dest);
         }
         X(dest, ir.LoadLocal(word_offset));
@@ -123,11 +122,11 @@ void TranslatorVisitor::LDS(u64 insn) {
         break;
     case 64:
     case 128:
-        if (!IR::IsAligned(dest, bit_size / 32)) {
+        if (!IR::IsAligned(dest, static_cast<size_t>(bit_size / 32))) {
             throw NotImplementedException("Unaligned destination register {}", dest);
         }
         for (int element = 0; element < bit_size / 32; ++element) {
-            X(dest + element, IR::U32{ir.CompositeExtract(value, element)});
+            X(dest + element, IR::U32{ir.CompositeExtract(value, static_cast<size_t>(element))});
         }
         break;
     }
@@ -156,7 +155,7 @@ void TranslatorVisitor::STL(u64 insn) {
     case 32:
     case 64:
     case 128:
-        if (!IR::IsAligned(reg, bit_size / 32)) {
+        if (!IR::IsAligned(reg, static_cast<size_t>(bit_size / 32))) {
             throw NotImplementedException("Unaligned source register");
         }
         ir.WriteLocal(word_offset, src);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp
index 71688b1d78..36c5cff2f1 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_memory.cpp
@@ -114,7 +114,7 @@ void TranslatorVisitor::LDG(u64 insn) {
         }
         const IR::Value vector{ir.LoadGlobal64(address)};
         for (int i = 0; i < 2; ++i) {
-            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, i)});
+            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
@@ -125,7 +125,7 @@ void TranslatorVisitor::LDG(u64 insn) {
         }
         const IR::Value vector{ir.LoadGlobal128(address)};
         for (int i = 0; i < 4; ++i) {
-            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, i)});
+            X(dest_reg + i, IR::U32{ir.CompositeExtract(vector, static_cast<size_t>(i))});
         }
         break;
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
index b2da079f9c..95d4165863 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch.cpp
@@ -199,7 +199,7 @@ void Impl(TranslatorVisitor& v, u64 insn, bool aoffi, Blod blod, bool lc,
         if (tex.dc != 0) {
             value = element < 3 ? IR::F32{sample} : v.ir.Imm32(1.0f);
         } else {
-            value = IR::F32{v.ir.CompositeExtract(sample, element)};
+            value = IR::F32{v.ir.CompositeExtract(sample, static_cast<size_t>(element))};
         }
         v.F(dest_reg, value);
         ++dest_reg;
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
index d5fda20f42..fe2c7db85d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
@@ -53,7 +53,7 @@ constexpr std::array RGBA_LUT{
     R | G | B | A, //
 };
 
-void CheckAlignment(IR::Reg reg, int alignment) {
+void CheckAlignment(IR::Reg reg, size_t alignment) {
     if (!IR::IsAligned(reg, alignment)) {
         throw NotImplementedException("Unaligned source register {}", reg);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp
index beab515ad9..2ba9c1018a 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_gather_swizzled.cpp
@@ -37,7 +37,7 @@ union Encoding {
     BitField<36, 13, u64> cbuf_offset;
 };
 
-void CheckAlignment(IR::Reg reg, int alignment) {
+void CheckAlignment(IR::Reg reg, size_t alignment) {
     if (!IR::IsAligned(reg, alignment)) {
         throw NotImplementedException("Unaligned source register {}", reg);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp
index 623b8fc23b..0863bdfcd4 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_load_swizzled.cpp
@@ -56,7 +56,7 @@ union Encoding {
     BitField<53, 4, u64> encoding;
 };
 
-void CheckAlignment(IR::Reg reg, int alignment) {
+void CheckAlignment(IR::Reg reg, size_t alignment) {
     if (!IR::IsAligned(reg, alignment)) {
         throw NotImplementedException("Unaligned source register {}", reg);
     }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp
index 8c7e04bcab..0459e5473e 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_query.cpp
@@ -54,7 +54,7 @@ void Impl(TranslatorVisitor& v, u64 insn, std::optional<u32> cbuf_offset) {
         if (((txq.mask >> element) & 1) == 0) {
             continue;
         }
-        v.X(dest_reg, IR::U32{v.ir.CompositeExtract(query, element)});
+        v.X(dest_reg, IR::U32{v.ir.CompositeExtract(query, static_cast<size_t>(element))});
         ++dest_reg;
     }
 }
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp
index af13b3fccf..ec5e74f6d8 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/video_set_predicate.cpp
@@ -69,7 +69,6 @@ void TranslatorVisitor::VSETP(u64 insn) {
     const IR::U32 src_b{is_b_imm ? ir.Imm32(static_cast<u32>(vsetp.src_b_imm)) : GetReg20(insn)};
 
     const u32 a_selector{static_cast<u32>(vsetp.src_a_selector)};
-    const u32 b_selector{is_b_imm ? 0U : static_cast<u32>(vsetp.src_b_selector)};
     const VideoWidth a_width{vsetp.src_a_width};
     const VideoWidth b_width{GetVideoSourceWidth(vsetp.src_b_width, is_b_imm)};
 
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 1c03ee82af..edbfcd3082 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -6,6 +6,7 @@
 #include "shader_recompiler/frontend/ir/microinstruction.h"
 #include "shader_recompiler/frontend/ir/modifiers.h"
 #include "shader_recompiler/frontend/ir/program.h"
+#include "shader_recompiler/ir_opt/passes.h"
 #include "shader_recompiler/shader_info.h"
 
 namespace Shader::Optimization {
@@ -22,8 +23,8 @@ void AddConstantBufferDescriptor(Info& info, u32 index, u32 count) {
     auto& cbufs{info.constant_buffer_descriptors};
     cbufs.insert(std::ranges::lower_bound(cbufs, index, {}, &ConstantBufferDescriptor::index),
                  ConstantBufferDescriptor{
-                     .index{index},
-                     .count{1},
+                     .index = index,
+                     .count = 1,
                  });
 }
 
@@ -91,7 +92,7 @@ void SetAttribute(Info& info, IR::Attribute attribute) {
 }
 
 void VisitUsages(Info& info, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::CompositeConstructF16x2:
     case IR::Opcode::CompositeConstructF16x3:
     case IR::Opcode::CompositeConstructF16x4:
@@ -209,7 +210,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::GetCbufU8:
     case IR::Opcode::GetCbufS8:
     case IR::Opcode::UndefU8:
@@ -236,7 +237,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::GetCbufU16:
     case IR::Opcode::GetCbufS16:
     case IR::Opcode::UndefU16:
@@ -271,7 +272,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::UndefU64:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS8:
@@ -314,7 +315,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
     default:
         break;
     }
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::DemoteToHelperInvocation:
         info.uses_demote_to_helper_invocation = true;
         break;
@@ -361,7 +362,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
         } else {
             throw NotImplementedException("Constant buffer with non-immediate index");
         }
-        switch (inst.Opcode()) {
+        switch (inst.GetOpcode()) {
         case IR::Opcode::GetCbufU8:
         case IR::Opcode::GetCbufS8:
             info.used_constant_buffer_types |= IR::Type::U8;
@@ -443,7 +444,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
 }
 
 void VisitFpModifiers(Info& info, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::FPAdd16:
     case IR::Opcode::FPFma16:
     case IR::Opcode::FPMul16:
@@ -540,7 +541,6 @@ void GatherInfoFromHeader(Environment& env, Info& info) {
         info.stores_position |= header.vtg.omap_systemb.position != 0;
     }
 }
-
 } // Anonymous namespace
 
 void CollectShaderInfoPass(Environment& env, IR::Program& program) {
diff --git a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
index 1720d7a092..61fbbe04cb 100644
--- a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
+++ b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
@@ -58,7 +58,7 @@ bool FoldCommutative(IR::Inst& inst, ImmFn&& imm_fn) {
     }
     if (is_lhs_immediate && !is_rhs_immediate) {
         IR::Inst* const rhs_inst{rhs.InstRecursive()};
-        if (rhs_inst->Opcode() == inst.Opcode() && rhs_inst->Arg(1).IsImmediate()) {
+        if (rhs_inst->GetOpcode() == inst.GetOpcode() && rhs_inst->Arg(1).IsImmediate()) {
             const auto combined{imm_fn(Arg<T>(lhs), Arg<T>(rhs_inst->Arg(1)))};
             inst.SetArg(0, rhs_inst->Arg(0));
             inst.SetArg(1, IR::Value{combined});
@@ -70,7 +70,7 @@ bool FoldCommutative(IR::Inst& inst, ImmFn&& imm_fn) {
     }
     if (!is_lhs_immediate && is_rhs_immediate) {
         const IR::Inst* const lhs_inst{lhs.InstRecursive()};
-        if (lhs_inst->Opcode() == inst.Opcode() && lhs_inst->Arg(1).IsImmediate()) {
+        if (lhs_inst->GetOpcode() == inst.GetOpcode() && lhs_inst->Arg(1).IsImmediate()) {
             const auto combined{imm_fn(Arg<T>(rhs), Arg<T>(lhs_inst->Arg(1)))};
             inst.SetArg(0, lhs_inst->Arg(0));
             inst.SetArg(1, IR::Value{combined});
@@ -123,7 +123,8 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
         return false;
     }
     IR::Inst* const lhs_shl{lhs_arg.InstRecursive()};
-    if (lhs_shl->Opcode() != IR::Opcode::ShiftLeftLogical32 || lhs_shl->Arg(1) != IR::Value{16U}) {
+    if (lhs_shl->GetOpcode() != IR::Opcode::ShiftLeftLogical32 ||
+        lhs_shl->Arg(1) != IR::Value{16U}) {
         return false;
     }
     if (lhs_shl->Arg(0).IsImmediate()) {
@@ -131,7 +132,7 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
     }
     IR::Inst* const lhs_mul{lhs_shl->Arg(0).InstRecursive()};
     IR::Inst* const rhs_mul{rhs_arg.InstRecursive()};
-    if (lhs_mul->Opcode() != IR::Opcode::IMul32 || rhs_mul->Opcode() != IR::Opcode::IMul32) {
+    if (lhs_mul->GetOpcode() != IR::Opcode::IMul32 || rhs_mul->GetOpcode() != IR::Opcode::IMul32) {
         return false;
     }
     if (lhs_mul->Arg(1).Resolve() != rhs_mul->Arg(1).Resolve()) {
@@ -143,10 +144,10 @@ bool FoldXmadMultiply(IR::Block& block, IR::Inst& inst) {
     }
     IR::Inst* const lhs_bfe{lhs_mul->Arg(0).InstRecursive()};
     IR::Inst* const rhs_bfe{rhs_mul->Arg(0).InstRecursive()};
-    if (lhs_bfe->Opcode() != IR::Opcode::BitFieldUExtract) {
+    if (lhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) {
         return false;
     }
-    if (rhs_bfe->Opcode() != IR::Opcode::BitFieldUExtract) {
+    if (rhs_bfe->GetOpcode() != IR::Opcode::BitFieldUExtract) {
         return false;
     }
     if (lhs_bfe->Arg(1) != IR::Value{16U} || lhs_bfe->Arg(2) != IR::Value{16U}) {
@@ -194,8 +195,9 @@ void FoldISub32(IR::Inst& inst) {
     // ISub32 is generally used to subtract two constant buffers, compare and replace this with
     // zero if they equal.
     const auto equal_cbuf{[](IR::Inst* a, IR::Inst* b) {
-        return a->Opcode() == IR::Opcode::GetCbufU32 && b->Opcode() == IR::Opcode::GetCbufU32 &&
-               a->Arg(0) == b->Arg(0) && a->Arg(1) == b->Arg(1);
+        return a->GetOpcode() == IR::Opcode::GetCbufU32 &&
+               b->GetOpcode() == IR::Opcode::GetCbufU32 && a->Arg(0) == b->Arg(0) &&
+               a->Arg(1) == b->Arg(1);
     }};
     IR::Inst* op_a{inst.Arg(0).InstRecursive()};
     IR::Inst* op_b{inst.Arg(1).InstRecursive()};
@@ -204,15 +206,15 @@ void FoldISub32(IR::Inst& inst) {
         return;
     }
     // It's also possible a value is being added to a cbuf and then subtracted
-    if (op_b->Opcode() == IR::Opcode::IAdd32) {
+    if (op_b->GetOpcode() == IR::Opcode::IAdd32) {
         // Canonicalize local variables to simplify the following logic
         std::swap(op_a, op_b);
     }
-    if (op_b->Opcode() != IR::Opcode::GetCbufU32) {
+    if (op_b->GetOpcode() != IR::Opcode::GetCbufU32) {
         return;
     }
     IR::Inst* const inst_cbuf{op_b};
-    if (op_a->Opcode() != IR::Opcode::IAdd32) {
+    if (op_a->GetOpcode() != IR::Opcode::IAdd32) {
         return;
     }
     IR::Value add_op_a{op_a->Arg(0)};
@@ -250,7 +252,8 @@ void FoldFPMul32(IR::Inst& inst) {
     }
     IR::Inst* const lhs_op{lhs_value.InstRecursive()};
     IR::Inst* const rhs_op{rhs_value.InstRecursive()};
-    if (lhs_op->Opcode() != IR::Opcode::FPMul32 || rhs_op->Opcode() != IR::Opcode::FPRecip32) {
+    if (lhs_op->GetOpcode() != IR::Opcode::FPMul32 ||
+        rhs_op->GetOpcode() != IR::Opcode::FPRecip32) {
         return;
     }
     const IR::Value recip_source{rhs_op->Arg(0)};
@@ -260,8 +263,8 @@ void FoldFPMul32(IR::Inst& inst) {
     }
     IR::Inst* const attr_a{recip_source.InstRecursive()};
     IR::Inst* const attr_b{lhs_mul_source.InstRecursive()};
-    if (attr_a->Opcode() != IR::Opcode::GetAttribute ||
-        attr_b->Opcode() != IR::Opcode::GetAttribute) {
+    if (attr_a->GetOpcode() != IR::Opcode::GetAttribute ||
+        attr_b->GetOpcode() != IR::Opcode::GetAttribute) {
         return;
     }
     if (attr_a->Arg(0).Attribute() == attr_b->Arg(0).Attribute()) {
@@ -304,7 +307,7 @@ void FoldLogicalNot(IR::Inst& inst) {
         return;
     }
     IR::Inst* const arg{value.InstRecursive()};
-    if (arg->Opcode() == IR::Opcode::LogicalNot) {
+    if (arg->GetOpcode() == IR::Opcode::LogicalNot) {
         inst.ReplaceUsesWith(arg->Arg(0));
     }
 }
@@ -317,12 +320,12 @@ void FoldBitCast(IR::Inst& inst, IR::Opcode reverse) {
         return;
     }
     IR::Inst* const arg_inst{value.InstRecursive()};
-    if (arg_inst->Opcode() == reverse) {
+    if (arg_inst->GetOpcode() == reverse) {
         inst.ReplaceUsesWith(arg_inst->Arg(0));
         return;
     }
     if constexpr (op == IR::Opcode::BitCastF32U32) {
-        if (arg_inst->Opcode() == IR::Opcode::GetCbufU32) {
+        if (arg_inst->GetOpcode() == IR::Opcode::GetCbufU32) {
             // Replace the bitcast with a typed constant buffer read
             inst.ReplaceOpcode(IR::Opcode::GetCbufF32);
             inst.SetArg(0, arg_inst->Arg(0));
@@ -338,7 +341,7 @@ void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
         return;
     }
     IR::Inst* const arg_inst{value.InstRecursive()};
-    if (arg_inst->Opcode() == reverse) {
+    if (arg_inst->GetOpcode() == reverse) {
         inst.ReplaceUsesWith(arg_inst->Arg(0));
         return;
     }
@@ -347,7 +350,7 @@ void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
 template <typename Func, size_t... I>
 IR::Value EvalImmediates(const IR::Inst& inst, Func&& func, std::index_sequence<I...>) {
     using Traits = LambdaTraits<decltype(func)>;
-    return IR::Value{func(Arg<Traits::ArgType<I>>(inst.Arg(I))...)};
+    return IR::Value{func(Arg<typename Traits::template ArgType<I>>(inst.Arg(I))...)};
 }
 
 void FoldBranchConditional(IR::Inst& inst) {
@@ -357,7 +360,7 @@ void FoldBranchConditional(IR::Inst& inst) {
         return;
     }
     const IR::Inst* cond_inst{cond.InstRecursive()};
-    if (cond_inst->Opcode() == IR::Opcode::LogicalNot) {
+    if (cond_inst->GetOpcode() == IR::Opcode::LogicalNot) {
         const IR::Value true_label{inst.Arg(1)};
         const IR::Value false_label{inst.Arg(2)};
         // Remove negation on the conditional (take the parameter out of LogicalNot) and swap
@@ -371,10 +374,10 @@ void FoldBranchConditional(IR::Inst& inst) {
 std::optional<IR::Value> FoldCompositeExtractImpl(IR::Value inst_value, IR::Opcode insert,
                                                   IR::Opcode construct, u32 first_index) {
     IR::Inst* const inst{inst_value.InstRecursive()};
-    if (inst->Opcode() == construct) {
+    if (inst->GetOpcode() == construct) {
         return inst->Arg(first_index);
     }
-    if (inst->Opcode() != insert) {
+    if (inst->GetOpcode() != insert) {
         return std::nullopt;
     }
     IR::Value value_index{inst->Arg(2)};
@@ -410,7 +413,7 @@ void FoldCompositeExtract(IR::Inst& inst, IR::Opcode construct, IR::Opcode inser
 }
 
 void ConstantPropagation(IR::Block& block, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::GetRegister:
         return FoldGetRegister(inst);
     case IR::Opcode::GetPred:
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 0858a0bddd..90a65dd167 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -57,7 +57,7 @@ struct StorageInfo {
 
 /// Returns true when the instruction is a global memory instruction
 bool IsGlobalMemory(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::LoadGlobalS8:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS16:
@@ -80,7 +80,7 @@ bool IsGlobalMemory(const IR::Inst& inst) {
 
 /// Returns true when the instruction is a global memory instruction
 bool IsGlobalMemoryWrite(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::WriteGlobalS8:
     case IR::Opcode::WriteGlobalU8:
     case IR::Opcode::WriteGlobalS16:
@@ -140,7 +140,7 @@ bool MeetsBias(const StorageBufferAddr& storage_buffer, const Bias& bias) noexce
 void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) {
     IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
     const IR::Value zero{u32{0}};
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::LoadGlobalS8:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS16:
@@ -164,7 +164,7 @@ void DiscardGlobalMemory(IR::Block& block, IR::Inst& inst) {
         inst.Invalidate();
         break;
     default:
-        throw LogicError("Invalid opcode to discard its global memory operation {}", inst.Opcode());
+        throw LogicError("Invalid opcode to discard its global memory operation {}", inst.GetOpcode());
     }
 }
 
@@ -184,7 +184,7 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
     // This address is expected to either be a PackUint2x32 or a IAdd64
     IR::Inst* addr_inst{addr.InstRecursive()};
     s32 imm_offset{0};
-    if (addr_inst->Opcode() == IR::Opcode::IAdd64) {
+    if (addr_inst->GetOpcode() == IR::Opcode::IAdd64) {
         // If it's an IAdd64, get the immediate offset it is applying and grab the address
         // instruction. This expects for the instruction to be canonicalized having the address on
         // the first argument and the immediate offset on the second one.
@@ -200,7 +200,7 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
         addr_inst = iadd_addr.Inst();
     }
     // With IAdd64 handled, now PackUint2x32 is expected without exceptions
-    if (addr_inst->Opcode() != IR::Opcode::PackUint2x32) {
+    if (addr_inst->GetOpcode() != IR::Opcode::PackUint2x32) {
         return std::nullopt;
     }
     // PackUint2x32 is expected to be generated from a vector
@@ -210,20 +210,20 @@ std::optional<LowAddrInfo> TrackLowAddress(IR::Inst* inst) {
     }
     // This vector is expected to be a CompositeConstructU32x2
     IR::Inst* const vector_inst{vector.InstRecursive()};
-    if (vector_inst->Opcode() != IR::Opcode::CompositeConstructU32x2) {
+    if (vector_inst->GetOpcode() != IR::Opcode::CompositeConstructU32x2) {
         return std::nullopt;
     }
     // Grab the first argument from the CompositeConstructU32x2, this is the low address.
     return LowAddrInfo{
         .value{IR::U32{vector_inst->Arg(0)}},
-        .imm_offset{imm_offset},
+        .imm_offset = imm_offset,
     };
 }
 
 /// Tries to track the storage buffer address used by a global memory instruction
 std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias) {
     const auto pred{[bias](const IR::Inst* inst) -> std::optional<StorageBufferAddr> {
-        if (inst->Opcode() != IR::Opcode::GetCbufU32) {
+        if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
             return std::nullopt;
         }
         const IR::Value index{inst->Arg(0)};
@@ -256,9 +256,9 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
     // NVN puts storage buffers in a specific range, we have to bias towards these addresses to
     // avoid getting false positives
     static constexpr Bias nvn_bias{
-        .index{0},
-        .offset_begin{0x110},
-        .offset_end{0x610},
+        .index = 0,
+        .offset_begin = 0x110,
+        .offset_end = 0x610,
     };
     // Track the low address of the instruction
     const std::optional<LowAddrInfo> low_addr_info{TrackLowAddress(&inst)};
@@ -286,8 +286,8 @@ void CollectStorageBuffers(IR::Block& block, IR::Inst& inst, StorageInfo& info)
     info.set.insert(*storage_buffer);
     info.to_replace.push_back(StorageInst{
         .storage_buffer{*storage_buffer},
-        .inst{&inst},
-        .block{&block},
+        .inst = &inst,
+        .block = &block,
     });
 }
 
@@ -312,7 +312,7 @@ IR::U32 StorageOffset(IR::Block& block, IR::Inst& inst, StorageBufferAddr buffer
 /// Replace a global memory load instruction with its storage buffer equivalent
 void ReplaceLoad(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
                  const IR::U32& offset) {
-    const IR::Opcode new_opcode{GlobalToStorage(inst.Opcode())};
+    const IR::Opcode new_opcode{GlobalToStorage(inst.GetOpcode())};
     const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
     const IR::Value value{&*block.PrependNewInst(it, new_opcode, {storage_index, offset})};
     inst.ReplaceUsesWith(value);
@@ -321,7 +321,7 @@ void ReplaceLoad(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
 /// Replace a global memory write instruction with its storage buffer equivalent
 void ReplaceWrite(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
                   const IR::U32& offset) {
-    const IR::Opcode new_opcode{GlobalToStorage(inst.Opcode())};
+    const IR::Opcode new_opcode{GlobalToStorage(inst.GetOpcode())};
     const auto it{IR::Block::InstructionList::s_iterator_to(inst)};
     block.PrependNewInst(it, new_opcode, {storage_index, offset, inst.Arg(1)});
     inst.Invalidate();
@@ -330,7 +330,7 @@ void ReplaceWrite(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index
 /// Replace a global memory instruction with its storage buffer equivalent
 void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
              const IR::U32& offset) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::LoadGlobalS8:
     case IR::Opcode::LoadGlobalU8:
     case IR::Opcode::LoadGlobalS16:
@@ -348,7 +348,7 @@ void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
     case IR::Opcode::WriteGlobal128:
         return ReplaceWrite(block, inst, storage_index, offset);
     default:
-        throw InvalidArgument("Invalid global memory opcode {}", inst.Opcode());
+        throw InvalidArgument("Invalid global memory opcode {}", inst.GetOpcode());
     }
 }
 } // Anonymous namespace
@@ -366,9 +366,9 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program) {
     u32 storage_index{};
     for (const StorageBufferAddr& storage_buffer : info.set) {
         program.info.storage_buffers_descriptors.push_back({
-            .cbuf_index{storage_buffer.index},
-            .cbuf_offset{storage_buffer.offset},
-            .count{1},
+            .cbuf_index = storage_buffer.index,
+            .cbuf_offset = storage_buffer.offset,
+            .count = 1,
             .is_written{info.writes.contains(storage_buffer)},
         });
         ++storage_index;
diff --git a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
index 8790b48f21..38af72dfea 100644
--- a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
+++ b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp
@@ -22,7 +22,8 @@ void IdentityRemovalPass(IR::Program& program) {
                     inst->SetArg(i, arg.Inst()->Arg(0));
                 }
             }
-            if (inst->Opcode() == IR::Opcode::Identity || inst->Opcode() == IR::Opcode::Void) {
+            if (inst->GetOpcode() == IR::Opcode::Identity ||
+                inst->GetOpcode() == IR::Opcode::Void) {
                 to_invalidate.push_back(&*inst);
                 inst = block->Instructions().erase(inst);
             } else {
diff --git a/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp b/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
index 0d2c91ed61..52576b07fc 100644
--- a/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
+++ b/src/shader_recompiler/ir_opt/lower_fp16_to_fp32.cpp
@@ -123,7 +123,7 @@ IR::Opcode Replace(IR::Opcode op) {
 void LowerFp16ToFp32(IR::Program& program) {
     for (IR::Block* const block : program.blocks) {
         for (IR::Inst& inst : block->Instructions()) {
-            inst.ReplaceOpcode(Replace(inst.Opcode()));
+            inst.ReplaceOpcode(Replace(inst.GetOpcode()));
         }
     }
 }
diff --git a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
index ca36253d14..346fcc3774 100644
--- a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
+++ b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp
@@ -116,7 +116,7 @@ IR::Opcode UndefOpcode(IndirectBranchVariable) noexcept {
 }
 
 [[nodiscard]] bool IsPhi(const IR::Inst& inst) noexcept {
-    return inst.Opcode() == IR::Opcode::Phi;
+    return inst.GetOpcode() == IR::Opcode::Phi;
 }
 
 enum class Status {
@@ -278,7 +278,7 @@ private:
 };
 
 void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::SetRegister:
         if (const IR::Reg reg{inst.Arg(0).Reg()}; reg != IR::Reg::RZ) {
             pass.WriteVariable(reg, block, inst.Arg(1));
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
index 290ce41791..c8aee3d3d5 100644
--- a/src/shader_recompiler/ir_opt/texture_pass.cpp
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -30,7 +30,7 @@ struct TextureInst {
 using TextureInstVector = boost::container::small_vector<TextureInst, 24>;
 
 IR::Opcode IndexedInstruction(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::BindlessImageSampleImplicitLod:
     case IR::Opcode::BoundImageSampleImplicitLod:
         return IR::Opcode::ImageSampleImplicitLod;
@@ -67,7 +67,7 @@ IR::Opcode IndexedInstruction(const IR::Inst& inst) {
 }
 
 bool IsBindless(const IR::Inst& inst) {
-    switch (inst.Opcode()) {
+    switch (inst.GetOpcode()) {
     case IR::Opcode::BindlessImageSampleImplicitLod:
     case IR::Opcode::BindlessImageSampleExplicitLod:
     case IR::Opcode::BindlessImageSampleDrefImplicitLod:
@@ -91,7 +91,7 @@ bool IsBindless(const IR::Inst& inst) {
     case IR::Opcode::BoundImageGradient:
         return false;
     default:
-        throw InvalidArgument("Invalid opcode {}", inst.Opcode());
+        throw InvalidArgument("Invalid opcode {}", inst.GetOpcode());
     }
 }
 
@@ -100,7 +100,7 @@ bool IsTextureInstruction(const IR::Inst& inst) {
 }
 
 std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
-    if (inst->Opcode() != IR::Opcode::GetCbufU32) {
+    if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
         return std::nullopt;
     }
     const IR::Value index{inst->Arg(0)};
@@ -134,14 +134,14 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
         addr = *track_addr;
     } else {
         addr = ConstBufferAddr{
-            .index{env.TextureBoundBuffer()},
-            .offset{inst.Arg(0).U32()},
+            .index = env.TextureBoundBuffer(),
+            .offset = inst.Arg(0).U32(),
         };
     }
     return TextureInst{
         .cbuf{addr},
-        .inst{&inst},
-        .block{block},
+        .inst = &inst,
+        .block = block,
     };
 }
 
@@ -211,7 +211,7 @@ void TexturePass(Environment& env, IR::Program& program) {
 
         const auto& cbuf{texture_inst.cbuf};
         auto flags{inst->Flags<IR::TextureInstInfo>()};
-        switch (inst->Opcode()) {
+        switch (inst->GetOpcode()) {
         case IR::Opcode::ImageQueryDimensions:
             flags.type.Assign(env.ReadTextureType(cbuf.index, cbuf.offset));
             inst->SetFlags(flags);
@@ -235,16 +235,16 @@ void TexturePass(Environment& env, IR::Program& program) {
         u32 index;
         if (flags.type == TextureType::Buffer) {
             index = descriptors.Add(TextureBufferDescriptor{
-                .cbuf_index{cbuf.index},
-                .cbuf_offset{cbuf.offset},
-                .count{1},
+                .cbuf_index = cbuf.index,
+                .cbuf_offset = cbuf.offset,
+                .count = 1,
             });
         } else {
             index = descriptors.Add(TextureDescriptor{
-                .type{flags.type},
-                .cbuf_index{cbuf.index},
-                .cbuf_offset{cbuf.offset},
-                .count{1},
+                .type = flags.type,
+                .cbuf_index = cbuf.index,
+                .cbuf_offset = cbuf.offset,
+                .count = 1,
             });
         }
         inst->SetArg(0, IR::Value{index});
diff --git a/src/shader_recompiler/ir_opt/verification_pass.cpp b/src/shader_recompiler/ir_opt/verification_pass.cpp
index 4080b37cca..dbec96d84a 100644
--- a/src/shader_recompiler/ir_opt/verification_pass.cpp
+++ b/src/shader_recompiler/ir_opt/verification_pass.cpp
@@ -14,14 +14,14 @@ namespace Shader::Optimization {
 static void ValidateTypes(const IR::Program& program) {
     for (const auto& block : program.blocks) {
         for (const IR::Inst& inst : *block) {
-            if (inst.Opcode() == IR::Opcode::Phi) {
+            if (inst.GetOpcode() == IR::Opcode::Phi) {
                 // Skip validation on phi nodes
                 continue;
             }
             const size_t num_args{inst.NumArgs()};
             for (size_t i = 0; i < num_args; ++i) {
                 const IR::Type t1{inst.Arg(i).Type()};
-                const IR::Type t2{IR::ArgTypeOf(inst.Opcode(), i)};
+                const IR::Type t2{IR::ArgTypeOf(inst.GetOpcode(), i)};
                 if (!IR::AreTypesCompatible(t1, t2)) {
                     throw LogicError("Invalid types in block:\n{}", IR::DumpBlock(*block));
                 }
diff --git a/src/tests/common/unique_function.cpp b/src/tests/common/unique_function.cpp
index ac9912738a..aa6e865934 100644
--- a/src/tests/common/unique_function.cpp
+++ b/src/tests/common/unique_function.cpp
@@ -17,10 +17,12 @@ struct Noisy {
     Noisy& operator=(Noisy&& rhs) noexcept {
         state = "Move assigned";
         rhs.state = "Moved away";
+        return *this;
     }
     Noisy(const Noisy&) : state{"Copied constructed"} {}
     Noisy& operator=(const Noisy&) {
         state = "Copied assigned";
+        return *this;
     }
 
     std::string state;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 71b07c1940..3166a69dc1 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -203,7 +203,7 @@ add_library(video_core STATIC
 create_target_directory_groups(video_core)
 
 target_link_libraries(video_core PUBLIC common core)
-target_link_libraries(video_core PRIVATE glad shader_recompiler xbyak)
+target_link_libraries(video_core PUBLIC glad shader_recompiler xbyak)
 
 if (YUZU_USE_BUNDLED_FFMPEG AND NOT WIN32)
     add_dependencies(video_core ffmpeg-build)
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 893258b4aa..57e2d569c2 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -447,7 +447,7 @@ void GraphicsPipeline::MakePipeline(const Device& device, VkRenderPass render_pa
         .dynamicStateCount = static_cast<u32>(dynamic_states.size()),
         .pDynamicStates = dynamic_states.data(),
     };
-    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
+    [[maybe_unused]] const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
         .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
         .pNext = nullptr,
         .requiredSubgroupSize = GuestWarpSize,
@@ -457,15 +457,16 @@ void GraphicsPipeline::MakePipeline(const Device& device, VkRenderPass render_pa
         if (!spv_modules[stage]) {
             continue;
         }
-        [[maybe_unused]] auto& stage_ci = shader_stages.emplace_back(VkPipelineShaderStageCreateInfo{
-            .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
-            .pNext = nullptr,
-            .flags = 0,
-            .stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage)),
-            .module = *spv_modules[stage],
-            .pName = "main",
-            .pSpecializationInfo = nullptr,
-        });
+        [[maybe_unused]] auto& stage_ci =
+            shader_stages.emplace_back(VkPipelineShaderStageCreateInfo{
+                .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+                .pNext = nullptr,
+                .flags = 0,
+                .stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage)),
+                .module = *spv_modules[stage],
+                .pName = "main",
+                .pSpecializationInfo = nullptr,
+            });
         /*
         if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
             stage_ci.pNext = &subgroup_size_ci;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 23bf84a92f..fcebb8f6e2 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -47,7 +47,7 @@ auto MakeSpan(Container& container) {
     return std::span(container.data(), container.size());
 }
 
-u64 MakeCbufKey(u32 index, u32 offset) {
+static u64 MakeCbufKey(u32 index, u32 offset) {
     return (static_cast<u64>(index) << 32) | offset;
 }
 
@@ -638,6 +638,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_,
         .warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
         .has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR,
         .generic_input_types{},
+        .fixed_state_point_size{},
     };
 }
 
@@ -748,7 +749,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
         Shader::Environment& env{*envs[env_index]};
         ++env_index;
 
-        const u32 cfg_offset{env.StartAddress() + sizeof(Shader::ProgramHeader)};
+        const u32 cfg_offset{static_cast<u32>(env.StartAddress() + sizeof(Shader::ProgramHeader))};
         Shader::Maxwell::Flow::CFG cfg(env, pools.flow_block, cfg_offset);
         programs[index] = TranslateProgram(pools.inst, pools.block, env, cfg);
     }
diff --git a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
index b2dcd74ab9..991afe521e 100644
--- a/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
@@ -2,8 +2,6 @@
 // Licensed under GPLv2 or any later version
 // Refer to the license.txt file included.
 
-#pragma once
-
 #include <unordered_map>
 
 #include <boost/container/static_vector.hpp>
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index e42b091c5f..70328680dd 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -279,7 +279,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
     };
 }
 
-[[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
+[[maybe_unused]] [[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
     std::span<const VideoCommon::BufferCopy> copies, size_t buffer_offset) {
     std::vector<VkBufferCopy> result(copies.size());
     std::ranges::transform(
-- 
cgit v1.2.3-70-g09d2


From b126987c59964d81ae3705ad7ad6c0ace8714e19 Mon Sep 17 00:00:00 2001
From: ReinUsesLisp <reinuseslisp@airmail.cc>
Date: Wed, 14 Apr 2021 01:04:59 -0300
Subject: shader: Implement transform feedbacks and define file format

---
 .../backend/spirv/emit_context.cpp                 |  54 ++++++++--
 src/shader_recompiler/backend/spirv/emit_context.h |   8 +-
 src/shader_recompiler/backend/spirv/emit_spirv.cpp |   3 +
 .../backend/spirv/emit_spirv_context_get_set.cpp   |  19 +++-
 .../backend/spirv/emit_spirv_special.cpp           |  29 ++++-
 src/shader_recompiler/frontend/ir/attribute.cpp    |   7 ++
 src/shader_recompiler/frontend/ir/attribute.h      |   2 +
 src/shader_recompiler/profile.h                    |  10 ++
 .../renderer_vulkan/fixed_pipeline_state.cpp       |  19 +++-
 .../renderer_vulkan/fixed_pipeline_state.h         |  26 ++++-
 .../renderer_vulkan/vk_pipeline_cache.cpp          | 118 ++++++++++++++++++++-
 11 files changed, 272 insertions(+), 23 deletions(-)

(limited to 'src/shader_recompiler/frontend/ir/attribute.cpp')

diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index df53e58a88..74c42233d7 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -135,6 +135,45 @@ Id DefineOutput(EmitContext& ctx, Id type, std::optional<spv::BuiltIn> builtin =
     return DefineVariable(ctx, type, builtin, spv::StorageClass::Output);
 }
 
+void DefineGenericOutput(EmitContext& ctx, size_t index) {
+    static constexpr std::string_view swizzle{"xyzw"};
+    const size_t base_attr_index{static_cast<size_t>(IR::Attribute::Generic0X) + index * 4};
+    u32 element{0};
+    while (element < 4) {
+        const u32 remainder{4 - element};
+        const TransformFeedbackVarying* xfb_varying{};
+        if (!ctx.profile.xfb_varyings.empty()) {
+            xfb_varying = &ctx.profile.xfb_varyings[base_attr_index + element];
+            xfb_varying = xfb_varying && xfb_varying->components > 0 ? xfb_varying : nullptr;
+        }
+        const u32 num_components{xfb_varying ? xfb_varying->components : remainder};
+
+        const Id id{DefineOutput(ctx, ctx.F32[num_components])};
+        ctx.Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
+        if (element > 0) {
+            ctx.Decorate(id, spv::Decoration::Component, element);
+        }
+        if (xfb_varying) {
+            ctx.Decorate(id, spv::Decoration::XfbBuffer, xfb_varying->buffer);
+            ctx.Decorate(id, spv::Decoration::XfbStride, xfb_varying->stride);
+            ctx.Decorate(id, spv::Decoration::Offset, xfb_varying->offset);
+        }
+        if (num_components < 4 || element > 0) {
+            ctx.Name(id, fmt::format("out_attr{}", index));
+        } else {
+            const std::string_view subswizzle{swizzle.substr(element, num_components)};
+            ctx.Name(id, fmt::format("out_attr{}_{}", index, subswizzle));
+        }
+        const GenericElementInfo info{
+            .id = id,
+            .first_element = element,
+            .num_components = num_components,
+        };
+        std::fill_n(ctx.output_generics[index].begin(), num_components, info);
+        element += num_components;
+    }
+}
+
 Id GetAttributeType(EmitContext& ctx, AttributeType type) {
     switch (type) {
     case AttributeType::Float:
@@ -663,12 +702,15 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
             OpReturn();
             ++label_index;
         }
-        for (size_t i = 0; i < info.stores_generics.size(); i++) {
+        for (size_t i = 0; i < info.stores_generics.size(); ++i) {
             if (!info.stores_generics[i]) {
                 continue;
             }
+            if (output_generics[i][0].num_components != 4) {
+                throw NotImplementedException("Physical stores and transform feedbacks");
+            }
             AddLabel(labels[label_index]);
-            const Id generic_id{output_generics.at(i)};
+            const Id generic_id{output_generics[i][0].id};
             const Id pointer{OpAccessChain(output_f32, generic_id, masked_index)};
             OpStore(pointer, store_value);
             OpReturn();
@@ -1015,11 +1057,9 @@ void EmitContext::DefineOutputs(const Info& info) {
         }
         viewport_index = DefineOutput(*this, U32[1], spv::BuiltIn::ViewportIndex);
     }
-    for (size_t i = 0; i < info.stores_generics.size(); ++i) {
-        if (info.stores_generics[i]) {
-            output_generics[i] = DefineOutput(*this, F32[4]);
-            Decorate(output_generics[i], spv::Decoration::Location, static_cast<u32>(i));
-            Name(output_generics[i], fmt::format("out_attr{}", i));
+    for (size_t index = 0; index < info.stores_generics.size(); ++index) {
+        if (info.stores_generics[index]) {
+            DefineGenericOutput(*this, index);
         }
     }
     if (stage == Stage::Fragment) {
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
index cade1fa0d6..b27e5540c9 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.h
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -79,6 +79,12 @@ struct StorageDefinitions {
     Id U32x4{};
 };
 
+struct GenericElementInfo {
+    Id id{};
+    u32 first_element{};
+    u32 num_components{};
+};
+
 class EmitContext final : public Sirit::Module {
 public:
     explicit EmitContext(const Profile& profile, IR::Program& program, u32& binding);
@@ -189,7 +195,7 @@ public:
 
     Id output_point_size{};
     Id output_position{};
-    std::array<Id, 32> output_generics{};
+    std::array<std::array<GenericElementInfo, 4>, 32> output_generics{};
 
     std::array<Id, 8> frag_color{};
     Id frag_depth{};
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index 7ad00c4343..444ba276f7 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -288,6 +288,9 @@ void SetupCapabilities(const Profile& profile, const Info& info, EmitContext& ct
     if (info.uses_typeless_image_writes) {
         ctx.AddCapability(spv::Capability::StorageImageWriteWithoutFormat);
     }
+    if (!ctx.profile.xfb_varyings.empty()) {
+        ctx.AddCapability(spv::Capability::TransformFeedback);
+    }
     // TODO: Track this usage
     ctx.AddCapability(spv::Capability::ImageGatherExtended);
     ctx.AddCapability(spv::Capability::ImageQuery);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index a91b4c212e..f9c151a5c5 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -40,11 +40,17 @@ Id AttrPointer(EmitContext& ctx, Id pointer_type, Id vertex, Id base, Args&&...
 }
 
 std::optional<Id> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
-    const u32 element{static_cast<u32>(attr) % 4};
-    const auto element_id{[&] { return ctx.Constant(ctx.U32[1], element); }};
     if (IR::IsGeneric(attr)) {
         const u32 index{IR::GenericAttributeIndex(attr)};
-        return ctx.OpAccessChain(ctx.output_f32, ctx.output_generics.at(index), element_id());
+        const u32 element{IR::GenericAttributeElement(attr)};
+        const GenericElementInfo& info{ctx.output_generics.at(index).at(element)};
+        if (info.num_components == 1) {
+            return info.id;
+        } else {
+            const u32 index_element{element - info.first_element};
+            const Id index_id{ctx.Constant(ctx.U32[1], index_element)};
+            return ctx.OpAccessChain(ctx.output_f32, info.id, index_id);
+        }
     }
     switch (attr) {
     case IR::Attribute::PointSize:
@@ -52,8 +58,11 @@ std::optional<Id> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
     case IR::Attribute::PositionX:
     case IR::Attribute::PositionY:
     case IR::Attribute::PositionZ:
-    case IR::Attribute::PositionW:
-        return ctx.OpAccessChain(ctx.output_f32, ctx.output_position, element_id());
+    case IR::Attribute::PositionW: {
+        const u32 element{static_cast<u32>(attr) % 4};
+        const Id element_id{ctx.Constant(ctx.U32[1], element)};
+        return ctx.OpAccessChain(ctx.output_f32, ctx.output_position, element_id);
+    }
     case IR::Attribute::ClipDistance0:
     case IR::Attribute::ClipDistance1:
     case IR::Attribute::ClipDistance2:
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
index fee740c084..7af29e4dd7 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp
@@ -22,6 +22,21 @@ void SetFixedPipelinePointSize(EmitContext& ctx) {
         ctx.OpStore(ctx.output_point_size, ctx.Constant(ctx.F32[1], point_size));
     }
 }
+
+Id DefaultVarying(EmitContext& ctx, u32 num_components, u32 element, Id zero, Id one,
+                  Id default_vector) {
+    switch (num_components) {
+    case 1:
+        return element == 3 ? one : zero;
+    case 2:
+        return ctx.ConstantComposite(ctx.F32[2], zero, element + 1 == 3 ? one : zero);
+    case 3:
+        return ctx.ConstantComposite(ctx.F32[3], zero, zero, element + 2 == 3 ? one : zero);
+    case 4:
+        return default_vector;
+    }
+    throw InvalidArgument("Bad element");
+}
 } // Anonymous namespace
 
 void EmitPrologue(EmitContext& ctx) {
@@ -30,9 +45,17 @@ void EmitPrologue(EmitContext& ctx) {
         const Id one{ctx.Constant(ctx.F32[1], 1.0f)};
         const Id default_vector{ctx.ConstantComposite(ctx.F32[4], zero, zero, zero, one)};
         ctx.OpStore(ctx.output_position, default_vector);
-        for (const Id generic_id : ctx.output_generics) {
-            if (Sirit::ValidId(generic_id)) {
-                ctx.OpStore(generic_id, default_vector);
+        for (const auto& info : ctx.output_generics) {
+            if (info[0].num_components == 0) {
+                continue;
+            }
+            u32 element{0};
+            while (element < 4) {
+                const auto& element_info{info[element]};
+                const u32 num{element_info.num_components};
+                const Id value{DefaultVarying(ctx, num, element, zero, one, default_vector)};
+                ctx.OpStore(element_info.id, value);
+                element += num;
             }
         }
     }
diff --git a/src/shader_recompiler/frontend/ir/attribute.cpp b/src/shader_recompiler/frontend/ir/attribute.cpp
index 7993e5c436..4d0b8b8e5c 100644
--- a/src/shader_recompiler/frontend/ir/attribute.cpp
+++ b/src/shader_recompiler/frontend/ir/attribute.cpp
@@ -20,6 +20,13 @@ u32 GenericAttributeIndex(Attribute attribute) {
     return (static_cast<u32>(attribute) - static_cast<u32>(Attribute::Generic0X)) / 4u;
 }
 
+u32 GenericAttributeElement(Attribute attribute) {
+    if (!IsGeneric(attribute)) {
+        throw InvalidArgument("Attribute is not generic {}", attribute);
+    }
+    return static_cast<u32>(attribute) % 4;
+}
+
 std::string NameOf(Attribute attribute) {
     switch (attribute) {
     case Attribute::PrimitiveId:
diff --git a/src/shader_recompiler/frontend/ir/attribute.h b/src/shader_recompiler/frontend/ir/attribute.h
index 34ec7e0cd0..8bf2ddf30d 100644
--- a/src/shader_recompiler/frontend/ir/attribute.h
+++ b/src/shader_recompiler/frontend/ir/attribute.h
@@ -226,6 +226,8 @@ enum class Attribute : u64 {
 
 [[nodiscard]] u32 GenericAttributeIndex(Attribute attribute);
 
+[[nodiscard]] u32 GenericAttributeElement(Attribute attribute);
+
 [[nodiscard]] std::string NameOf(Attribute attribute);
 
 } // namespace Shader::IR
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index 919bec4e2c..5ecae71b95 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -5,6 +5,7 @@
 #pragma once
 
 #include <array>
+#include <vector>
 #include <optional>
 
 #include "common/common_types.h"
@@ -26,6 +27,13 @@ enum class InputTopology {
     TrianglesAdjacency,
 };
 
+struct TransformFeedbackVarying {
+    u32 buffer{};
+    u32 stride{};
+    u32 offset{};
+    u32 components{};
+};
+
 struct Profile {
     u32 supported_spirv{0x00010000};
 
@@ -58,6 +66,8 @@ struct Profile {
     InputTopology input_topology{};
 
     std::optional<float> fixed_state_point_size;
+
+    std::vector<TransformFeedbackVarying> xfb_varyings;
 };
 
 } // namespace Shader
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index d8f6839072..6a3baf837a 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -52,6 +52,8 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
     const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
 
     raw1 = 0;
+    no_extended_dynamic_state.Assign(has_extended_dynamic_state ? 0 : 1);
+    xfb_enabled.Assign(regs.tfb_enabled != 0);
     primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
     depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
     depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
@@ -113,10 +115,12 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
             return static_cast<u16>(viewport.swizzle.raw);
         });
     }
-    if (!has_extended_dynamic_state) {
-        no_extended_dynamic_state.Assign(1);
+    if (no_extended_dynamic_state != 0) {
         dynamic_state.Refresh(regs);
     }
+    if (xfb_enabled != 0) {
+        xfb_state.Refresh(regs);
+    }
 }
 
 void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t index) {
@@ -158,6 +162,17 @@ void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t
     enable.Assign(1);
 }
 
+void FixedPipelineState::TransformFeedbackState::Refresh(const Maxwell& regs) {
+    std::ranges::transform(regs.tfb_layouts, layouts.begin(), [](const auto& layout) {
+        return Layout{
+            .stream = layout.stream,
+            .varying_count = layout.varying_count,
+            .stride = layout.stride,
+        };
+    });
+    varyings = regs.tfb_varying_locs;
+}
+
 void FixedPipelineState::DynamicState::Refresh(const Maxwell& regs) {
     u32 packed_front_face = PackFrontFace(regs.front_face);
     if (regs.screen_y_control.triangle_rast_flip != 0) {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 348f1d6ce6..5568c4f728 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -130,6 +130,18 @@ struct FixedPipelineState {
         }
     };
 
+    struct TransformFeedbackState {
+        struct Layout {
+            u32 stream;
+            u32 varying_count;
+            u32 stride;
+        };
+        std::array<Layout, Maxwell::NumTransformFeedbackBuffers> layouts;
+        std::array<std::array<u8, 128>, Maxwell::NumTransformFeedbackBuffers> varyings;
+
+        void Refresh(const Maxwell& regs);
+    };
+
     struct DynamicState {
         union {
             u32 raw1;
@@ -168,6 +180,7 @@ struct FixedPipelineState {
     union {
         u32 raw1;
         BitField<0, 1, u32> no_extended_dynamic_state;
+        BitField<1, 1, u32> xfb_enabled;
         BitField<2, 1, u32> primitive_restart_enable;
         BitField<3, 1, u32> depth_bias_enable;
         BitField<4, 1, u32> depth_clamp_disabled;
@@ -199,6 +212,7 @@ struct FixedPipelineState {
     std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
     std::array<u16, Maxwell::NumViewports> viewport_swizzles;
     DynamicState dynamic_state;
+    TransformFeedbackState xfb_state;
 
     void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state);
 
@@ -211,8 +225,16 @@ struct FixedPipelineState {
     }
 
     size_t Size() const noexcept {
-        const size_t total_size = sizeof *this;
-        return total_size - (no_extended_dynamic_state != 0 ? 0 : sizeof(DynamicState));
+        if (xfb_enabled != 0) {
+            // When transform feedback is enabled, use the whole struct
+            return sizeof(*this);
+        } else if (no_extended_dynamic_state != 0) {
+            // Dynamic state is enabled, we can enable more
+            return offsetof(FixedPipelineState, xfb_state);
+        } else {
+            // No XFB, extended dynamic state enabled
+            return offsetof(FixedPipelineState, dynamic_state);
+        }
     }
 };
 static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 8a59a26112..de52d0f306 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -248,6 +248,10 @@ namespace {
 using Shader::Backend::SPIRV::EmitSPIRV;
 using Shader::Maxwell::TranslateProgram;
 
+// TODO: Move this to a separate file
+constexpr std::array<char, 8> MAGIC_NUMBER{'y', 'u', 'z', 'u', 'c', 'a', 'c', 'h'};
+constexpr u32 CACHE_VERSION{1};
+
 class GraphicsEnvironment final : public GenericEnvironment {
 public:
     explicit GraphicsEnvironment() = default;
@@ -379,13 +383,14 @@ void SerializePipeline(const Key& key, const Envs& envs, const std::string& file
     try {
         std::ofstream file;
         file.exceptions(std::ifstream::failbit);
-        Common::FS::OpenFStream(file, filename, std::ios::binary | std::ios::app);
+        Common::FS::OpenFStream(file, filename, std::ios::binary | std::ios::ate | std::ios::app);
         if (!file.is_open()) {
             LOG_ERROR(Common_Filesystem, "Failed to open pipeline cache file {}", filename);
             return;
         }
         if (file.tellp() == 0) {
-            // Write header...
+            file.write(MAGIC_NUMBER.data(), MAGIC_NUMBER.size())
+                .write(reinterpret_cast<const char*>(&CACHE_VERSION), sizeof(CACHE_VERSION));
         }
         const std::span key_span(reinterpret_cast<const char*>(&key), sizeof(key));
         SerializePipeline(key_span, MakeSpan(envs), file);
@@ -520,8 +525,27 @@ void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading
     file.exceptions(std::ifstream::failbit);
     const auto end{file.tellg()};
     file.seekg(0, std::ios::beg);
-    // Read header...
 
+    std::array<char, 8> magic_number;
+    u32 cache_version;
+    file.read(magic_number.data(), magic_number.size())
+        .read(reinterpret_cast<char*>(&cache_version), sizeof(cache_version));
+    if (magic_number != MAGIC_NUMBER || cache_version != CACHE_VERSION) {
+        file.close();
+        if (Common::FS::Delete(pipeline_cache_filename)) {
+            if (magic_number != MAGIC_NUMBER) {
+                LOG_ERROR(Render_Vulkan, "Invalid pipeline cache file");
+            }
+            if (cache_version != CACHE_VERSION) {
+                LOG_INFO(Render_Vulkan, "Deleting old pipeline cache");
+            }
+        } else {
+            LOG_ERROR(Render_Vulkan,
+                      "Invalid pipeline cache file and failed to delete it in \"{}\"",
+                      pipeline_cache_filename);
+        }
+        return;
+    }
     while (file.tellg() != end) {
         if (stop_loading) {
             return;
@@ -879,6 +903,88 @@ static Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexA
     return Shader::AttributeType::Float;
 }
 
+static std::vector<Shader::TransformFeedbackVarying> MakeTransformFeedbackVaryings(
+    const GraphicsPipelineCacheKey& key) {
+    static constexpr std::array VECTORS{
+        28,  // gl_Position
+        32,  // Generic 0
+        36,  // Generic 1
+        40,  // Generic 2
+        44,  // Generic 3
+        48,  // Generic 4
+        52,  // Generic 5
+        56,  // Generic 6
+        60,  // Generic 7
+        64,  // Generic 8
+        68,  // Generic 9
+        72,  // Generic 10
+        76,  // Generic 11
+        80,  // Generic 12
+        84,  // Generic 13
+        88,  // Generic 14
+        92,  // Generic 15
+        96,  // Generic 16
+        100, // Generic 17
+        104, // Generic 18
+        108, // Generic 19
+        112, // Generic 20
+        116, // Generic 21
+        120, // Generic 22
+        124, // Generic 23
+        128, // Generic 24
+        132, // Generic 25
+        136, // Generic 26
+        140, // Generic 27
+        144, // Generic 28
+        148, // Generic 29
+        152, // Generic 30
+        156, // Generic 31
+        160, // gl_FrontColor
+        164, // gl_FrontSecondaryColor
+        160, // gl_BackColor
+        164, // gl_BackSecondaryColor
+        192, // gl_TexCoord[0]
+        196, // gl_TexCoord[1]
+        200, // gl_TexCoord[2]
+        204, // gl_TexCoord[3]
+        208, // gl_TexCoord[4]
+        212, // gl_TexCoord[5]
+        216, // gl_TexCoord[6]
+        220, // gl_TexCoord[7]
+    };
+    std::vector<Shader::TransformFeedbackVarying> xfb(256);
+    for (size_t buffer = 0; buffer < Maxwell::NumTransformFeedbackBuffers; ++buffer) {
+        const auto& locations = key.state.xfb_state.varyings[buffer];
+        const auto& layout = key.state.xfb_state.layouts[buffer];
+        const u32 varying_count = layout.varying_count;
+        u32 highest = 0;
+        for (u32 offset = 0; offset < varying_count; ++offset) {
+            const u32 base_offset = offset;
+            const u8 location = locations[offset];
+
+            Shader::TransformFeedbackVarying varying;
+            varying.buffer = layout.stream;
+            varying.stride = layout.stride;
+            varying.offset = offset * 4;
+            varying.components = 1;
+
+            if (std::ranges::find(VECTORS, Common::AlignDown(location, 4)) != VECTORS.end()) {
+                UNIMPLEMENTED_IF_MSG(location % 4 != 0, "Unaligned TFB");
+
+                const u8 base_index = location / 4;
+                while (offset + 1 < varying_count && base_index == locations[offset + 1] / 4) {
+                    ++offset;
+                    ++varying.components;
+                }
+            }
+            xfb[location] = varying;
+            highest = std::max(highest, (base_offset + varying.components) * 4);
+        }
+        UNIMPLEMENTED_IF(highest != layout.stride);
+    }
+    return xfb;
+}
+
 Shader::Profile PipelineCache::MakeProfile(const GraphicsPipelineCacheKey& key,
                                            const Shader::IR::Program& program) {
     Shader::Profile profile{base_profile};
@@ -893,6 +999,9 @@ Shader::Profile PipelineCache::MakeProfile(const GraphicsPipelineCacheKey& key,
             if (key.state.topology == Maxwell::PrimitiveTopology::Points) {
                 profile.fixed_state_point_size = point_size;
             }
+            if (key.state.xfb_enabled != 0) {
+                profile.xfb_varyings = MakeTransformFeedbackVaryings(key);
+            }
             profile.convert_depth_mode = gl_ndc;
         }
         std::ranges::transform(key.state.attributes, profile.generic_input_types.begin(),
@@ -902,6 +1011,9 @@ Shader::Profile PipelineCache::MakeProfile(const GraphicsPipelineCacheKey& key,
         if (program.output_topology == Shader::OutputTopology::PointList) {
             profile.fixed_state_point_size = point_size;
         }
+        if (key.state.xfb_enabled != 0) {
+            profile.xfb_varyings = MakeTransformFeedbackVaryings(key);
+        }
         profile.convert_depth_mode = gl_ndc;
         break;
     default:
-- 
cgit v1.2.3-70-g09d2