diff options
author | ReinUsesLisp <reinuseslisp@airmail.cc> | 2021-05-14 00:40:54 -0300 |
---|---|---|
committer | ameerj <52414509+ameerj@users.noreply.github.com> | 2021-07-22 21:51:31 -0400 |
commit | d54d7de40e7295827b0e4e4026441b53d3fc9569 (patch) | |
tree | 29b5074f851292dace7aeb5da7716675544b3735 /src | |
parent | 7ff5851608031baca2adceb9f72e7c75eda9b3a9 (diff) |
glasm: Rework control flow introducing a syntax list
This commit regresses VertexA shaders, their transformation pass has to
be adapted to the new control flow.
Diffstat (limited to 'src')
33 files changed, 437 insertions, 505 deletions
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt index f829b8d32c..0d55924a7c 100644 --- a/src/shader_recompiler/CMakeLists.txt +++ b/src/shader_recompiler/CMakeLists.txt @@ -51,6 +51,7 @@ add_library(shader_recompiler STATIC backend/spirv/emit_spirv_warp.cpp environment.h exception.h + frontend/ir/abstract_syntax_list.h frontend/ir/attribute.cpp frontend/ir/attribute.h frontend/ir/basic_block.cpp diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp index 056d8cbf8d..51ca83d189 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp @@ -117,8 +117,6 @@ auto Arg(EmitContext& ctx, const IR::Value& arg) { return Identity<const IR::Value&>{arg}; } else if constexpr (std::is_same_v<ArgType, u32>) { return Identity{arg.U32()}; - } else if constexpr (std::is_same_v<ArgType, IR::Block*>) { - return Identity{arg.Label()}; } else if constexpr (std::is_same_v<ArgType, IR::Attribute>) { return Identity{arg.Attribute()}; } else if constexpr (std::is_same_v<ArgType, IR::Patch>) { @@ -177,6 +175,39 @@ void EmitInst(EmitContext& ctx, IR::Inst* inst) { throw LogicError("Invalid opcode {}", inst->GetOpcode()); } +void EmitCode(EmitContext& ctx, const IR::Program& program) { + const auto eval{ + [&](const IR::U1& cond) { return ScalarS32{ctx.reg_alloc.Consume(IR::Value{cond})}; }}; + for (const IR::AbstractSyntaxNode& node : program.syntax_list) { + switch (node.type) { + case IR::AbstractSyntaxNode::Type::Block: + for (IR::Inst& inst : node.block->Instructions()) { + EmitInst(ctx, &inst); + } + break; + case IR::AbstractSyntaxNode::Type::If: + ctx.Add("MOV.S.CC RC,{};IF NE.x;", eval(node.if_node.cond)); + break; + case IR::AbstractSyntaxNode::Type::EndIf: + ctx.Add("ENDIF;"); + break; + case IR::AbstractSyntaxNode::Type::Loop: + ctx.Add("REP;"); + break; + case IR::AbstractSyntaxNode::Type::Repeat: + ctx.Add("MOV.S.CC RC,{};BRK NE.x;ENDREP;", eval(node.repeat.cond)); + break; + case IR::AbstractSyntaxNode::Type::Break: + ctx.Add("MOV.S.CC RC,{};BRK NE.x;", eval(node.repeat.cond)); + break; + case IR::AbstractSyntaxNode::Type::Return: + case IR::AbstractSyntaxNode::Type::Unreachable: + ctx.Add("RET;"); + break; + } + } +} + void SetupOptions(std::string& header, Info info) { if (info.uses_int64_bit_atomics) { header += "OPTION NV_shader_atomic_int64;"; @@ -201,11 +232,7 @@ void SetupOptions(std::string& header, Info info) { std::string EmitGLASM(const Profile&, IR::Program& program, Bindings&) { EmitContext ctx{program}; - for (IR::Block* const block : program.blocks) { - for (IR::Inst& inst : block->Instructions()) { - EmitInst(ctx, &inst); - } - } + EmitCode(ctx, program); std::string header = "!!NVcp5.0\n" "OPTION NV_internal;"; SetupOptions(header, program.info); diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h index 8202354fea..0f7f16e6e2 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h +++ b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h @@ -22,13 +22,8 @@ class EmitContext; void EmitPhi(EmitContext& ctx, IR::Inst& inst); void EmitVoid(EmitContext& ctx); void EmitIdentity(EmitContext& ctx, IR::Inst& inst, const IR::Value& value); -void EmitBranch(EmitContext& ctx); -void EmitBranchConditional(EmitContext& ctx); -void EmitLoopMerge(EmitContext& ctx); -void EmitSelectionMerge(EmitContext& ctx); -void EmitReturn(EmitContext& ctx); +void EmitBranchConditionRef(EmitContext&); void EmitJoin(EmitContext& ctx); -void EmitUnreachable(EmitContext& ctx); void EmitDemoteToHelperInvocation(EmitContext& ctx); void EmitBarrier(EmitContext& ctx); void EmitWorkgroupMemoryBarrier(EmitContext& ctx); diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp index 15fd233560..adcc0404b1 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_integer.cpp @@ -91,7 +91,8 @@ void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, Scalar if (count.type != Type::Register && offset.type != Type::Register) { ctx.Add("BFI.S {},{{{},{},0,0}},{},{};", ret, count, offset, insert, base); } else { - ctx.Add("MOV.S RC.x,{};MOV.U RC.y,{};" + ctx.Add("MOV.S RC.x,{};" + "MOV.S RC.y,{};" "BFI.S {},RC,{},{};", count, offset, ret, insert, base); } @@ -103,7 +104,8 @@ void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, Scal if (count.type != Type::Register && offset.type != Type::Register) { ctx.Add("BFE.S {},{{{},{},0,0}},{};", ret, count, offset, base); } else { - ctx.Add("MOV.S RC.x,{};MOV.U RC.y,{};" + ctx.Add("MOV.S RC.x,{};" + "MOV.S RC.y,{};" "BFE.S {},RC,{};", count, offset, ret, base); } @@ -115,7 +117,8 @@ void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, Scal if (count.type != Type::Register && offset.type != Type::Register) { ctx.Add("BFE.U {},{{{},{},0,0}},{};", ret, count, offset, base); } else { - ctx.Add("MOV.U RC.x,{};MOV.U RC.y,{};" + ctx.Add("MOV.U RC.x,{};" + "MOV.U RC.y,{};" "BFE.U {},RC,{};", count, offset, ret, base); } diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp index b40d09f8c1..f37ad55879 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp @@ -23,34 +23,12 @@ void EmitPhi(EmitContext& ctx, IR::Inst& inst) { void EmitVoid(EmitContext&) {} -void EmitBranch(EmitContext& ctx) { - NotImplemented(); -} - -void EmitBranchConditional(EmitContext& ctx) { - NotImplemented(); -} - -void EmitLoopMerge(EmitContext& ctx) { - NotImplemented(); -} - -void EmitSelectionMerge(EmitContext& ctx) { - NotImplemented(); -} - -void EmitReturn(EmitContext& ctx) { - ctx.Add("RET;"); -} +void EmitBranchConditionRef(EmitContext&) {} void EmitJoin(EmitContext& ctx) { NotImplemented(); } -void EmitUnreachable(EmitContext& ctx) { - NotImplemented(); -} - void EmitDemoteToHelperInvocation(EmitContext& ctx) { NotImplemented(); } diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp index 9759591bdd..a98e08392c 100644 --- a/src/shader_recompiler/backend/spirv/emit_context.cpp +++ b/src/shader_recompiler/backend/spirv/emit_context.cpp @@ -463,7 +463,6 @@ EmitContext::EmitContext(const Profile& profile_, IR::Program& program, Bindings DefineImages(program.info, image_binding); DefineAttributeMemAccess(program.info); DefineGlobalMemoryFunctions(program.info); - DefineLabels(program); } EmitContext::~EmitContext() = default; @@ -487,8 +486,6 @@ Id EmitContext::Def(const IR::Value& value) { return Const(value.F32()); case IR::Type::F64: return Constant(F64[1], value.F64()); - case IR::Type::Label: - return value.Label()->Definition<Id>(); default: throw NotImplementedException("Immediate type {}", value.Type()); } @@ -1139,12 +1136,6 @@ void EmitContext::DefineImages(const Info& info, u32& binding) { } } -void EmitContext::DefineLabels(IR::Program& program) { - for (IR::Block* const block : program.blocks) { - block->SetDefinition(OpLabel()); - } -} - void EmitContext::DefineInputs(const Info& info) { if (info.uses_workgroup_id) { workgroup_id = DefineInput(*this, U32[3], false, spv::BuiltIn::WorkgroupId); diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h index 8b000f1ec8..d2b79f6c17 100644 --- a/src/shader_recompiler/backend/spirv/emit_context.h +++ b/src/shader_recompiler/backend/spirv/emit_context.h @@ -296,7 +296,6 @@ private: void DefineImages(const Info& info, u32& binding); void DefineAttributeMemAccess(const Info& info); void DefineGlobalMemoryFunctions(const Info& info); - void DefineLabels(IR::Program& program); void DefineInputs(const Info& info); void DefineOutputs(const IR::Program& program); diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp index 2dad87e872..c22edfec2a 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp @@ -41,8 +41,6 @@ ArgType Arg(EmitContext& ctx, const IR::Value& arg) { return arg; } else if constexpr (std::is_same_v<ArgType, u32>) { return arg.U32(); - } else if constexpr (std::is_same_v<ArgType, IR::Block*>) { - return arg.Label(); } else if constexpr (std::is_same_v<ArgType, IR::Attribute>) { return arg.Attribute(); } else if constexpr (std::is_same_v<ArgType, IR::Patch>) { @@ -109,15 +107,74 @@ Id TypeId(const EmitContext& ctx, IR::Type type) { } } +void Traverse(EmitContext& ctx, IR::Program& program) { + IR::Block* current_block{}; + for (const IR::AbstractSyntaxNode& node : program.syntax_list) { + switch (node.type) { + case IR::AbstractSyntaxNode::Type::Block: + const Id label{node.block->Definition<Id>()}; + if (current_block) { + ctx.OpBranch(label); + } + current_block = node.block; + ctx.AddLabel(label); + for (IR::Inst& inst : node.block->Instructions()) { + EmitInst(ctx, &inst); + } + break; + case IR::AbstractSyntaxNode::Type::If: { + const Id if_label{node.if_node.body->Definition<Id>()}; + const Id endif_label{node.if_node.merge->Definition<Id>()}; + ctx.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); + ctx.OpBranchConditional(ctx.Def(node.if_node.cond), if_label, endif_label); + break; + } + case IR::AbstractSyntaxNode::Type::Loop: { + const Id body_label{node.loop.body->Definition<Id>()}; + const Id continue_label{node.loop.continue_block->Definition<Id>()}; + const Id endloop_label{node.loop.merge->Definition<Id>()}; + + ctx.OpLoopMerge(endloop_label, continue_label, spv::LoopControlMask::MaskNone); + ctx.OpBranch(node.loop.body->Definition<Id>()); + break; + } + case IR::AbstractSyntaxNode::Type::Break: { + const Id break_label{node.break_node.merge->Definition<Id>()}; + const Id skip_label{node.break_node.skip->Definition<Id>()}; + ctx.OpBranchConditional(ctx.Def(node.break_node.cond), break_label, skip_label); + break; + } + case IR::AbstractSyntaxNode::Type::EndIf: + if (current_block) { + ctx.OpBranch(node.end_if.merge->Definition<Id>()); + } + break; + case IR::AbstractSyntaxNode::Type::Repeat: { + const Id loop_header_label{node.repeat.loop_header->Definition<Id>()}; + const Id merge_label{node.repeat.merge->Definition<Id>()}; + ctx.OpBranchConditional(ctx.Def(node.repeat.cond), loop_header_label, merge_label); + break; + } + case IR::AbstractSyntaxNode::Type::Return: + ctx.OpReturn(); + break; + case IR::AbstractSyntaxNode::Type::Unreachable: + ctx.OpUnreachable(); + break; + } + if (node.type != IR::AbstractSyntaxNode::Type::Block) { + current_block = nullptr; + } + } +} + Id DefineMain(EmitContext& ctx, IR::Program& program) { const Id void_function{ctx.TypeFunction(ctx.void_id)}; const Id main{ctx.OpFunction(ctx.void_id, spv::FunctionControlMask::MaskNone, void_function)}; for (IR::Block* const block : program.blocks) { - ctx.AddLabel(block->Definition<Id>()); - for (IR::Inst& inst : block->Instructions()) { - EmitInst(ctx, &inst); - } + block->SetDefinition(ctx.OpLabel()); } + Traverse(ctx, program); ctx.OpFunctionEnd(); return main; } @@ -411,6 +468,8 @@ Id EmitIdentity(EmitContext& ctx, const IR::Value& value) { return id; } +void EmitBranchConditionRef(EmitContext&) {} + void EmitGetZeroFromOp(EmitContext&) { throw LogicError("Unreachable instruction"); } diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp index 6154c46be4..d33486f282 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp @@ -7,40 +7,21 @@ namespace Shader::Backend::SPIRV { -void EmitBranch(EmitContext& ctx, Id label) { - ctx.OpBranch(label); -} - -void EmitBranchConditional(EmitContext& ctx, Id condition, Id true_label, Id false_label) { - ctx.OpBranchConditional(condition, true_label, false_label); -} - -void EmitLoopMerge(EmitContext& ctx, Id merge_label, Id continue_label) { - ctx.OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::MaskNone); -} - -void EmitSelectionMerge(EmitContext& ctx, Id merge_label) { - ctx.OpSelectionMerge(merge_label, spv::SelectionControlMask::MaskNone); -} - -void EmitReturn(EmitContext& ctx) { - ctx.OpReturn(); -} - void EmitJoin(EmitContext&) { throw NotImplementedException("Join shouldn't be emitted"); } -void EmitUnreachable(EmitContext& ctx) { - ctx.OpUnreachable(); -} - -void EmitDemoteToHelperInvocation(EmitContext& ctx, Id continue_label) { +void EmitDemoteToHelperInvocation(EmitContext& ctx) { if (ctx.profile.support_demote_to_helper_invocation) { ctx.OpDemoteToHelperInvocationEXT(); - ctx.OpBranch(continue_label); } else { + const Id kill_label{ctx.OpLabel()}; + const Id impossible_label{ctx.OpLabel()}; + ctx.OpSelectionMerge(impossible_label, spv::SelectionControlMask::MaskNone); + ctx.OpBranchConditional(ctx.true_value, kill_label, impossible_label); + ctx.AddLabel(kill_label); ctx.OpKill(); + ctx.AddLabel(impossible_label); } } diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h index a1ca3f43de..2f4f6e59ed 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h +++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h @@ -23,14 +23,9 @@ class EmitContext; Id EmitPhi(EmitContext& ctx, IR::Inst* inst); void EmitVoid(EmitContext& ctx); Id EmitIdentity(EmitContext& ctx, const IR::Value& value); -void EmitBranch(EmitContext& ctx, Id label); -void EmitBranchConditional(EmitContext& ctx, Id condition, Id true_label, Id false_label); -void EmitLoopMerge(EmitContext& ctx, Id merge_label, Id continue_label); -void EmitSelectionMerge(EmitContext& ctx, Id merge_label); -void EmitReturn(EmitContext& ctx); +void EmitBranchConditionRef(EmitContext&); void EmitJoin(EmitContext& ctx); -void EmitUnreachable(EmitContext& ctx); -void EmitDemoteToHelperInvocation(EmitContext& ctx, Id continue_label); +void EmitDemoteToHelperInvocation(EmitContext& ctx); void EmitBarrier(EmitContext& ctx); void EmitWorkgroupMemoryBarrier(EmitContext& ctx); void EmitDeviceMemoryBarrier(EmitContext& ctx); diff --git a/src/shader_recompiler/frontend/ir/abstract_syntax_list.h b/src/shader_recompiler/frontend/ir/abstract_syntax_list.h new file mode 100644 index 0000000000..1366414c25 --- /dev/null +++ b/src/shader_recompiler/frontend/ir/abstract_syntax_list.h @@ -0,0 +1,56 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <vector> + +#include "shader_recompiler/frontend/ir/value.h" + +namespace Shader::IR { + +class Block; + +struct AbstractSyntaxNode { + enum class Type { + Block, + If, + EndIf, + Loop, + Repeat, + Break, + Return, + Unreachable, + }; + Type type{}; + union { + Block* block{}; + struct { + U1 cond; + Block* body; + Block* merge; + } if_node; + struct { + Block* merge; + } end_if; + struct { + Block* body; + Block* continue_block; + Block* merge; + } loop; + struct { + U1 cond; + Block* loop_header; + Block* merge; + } repeat; + struct { + U1 cond; + Block* merge; + Block* skip; + } break_node; + }; +}; +using AbstractSyntaxList = std::vector<AbstractSyntaxNode>; + +} // namespace Shader::IR diff --git a/src/shader_recompiler/frontend/ir/basic_block.cpp b/src/shader_recompiler/frontend/ir/basic_block.cpp index f92fc2571c..7c08b25ce2 100644 --- a/src/shader_recompiler/frontend/ir/basic_block.cpp +++ b/src/shader_recompiler/frontend/ir/basic_block.cpp @@ -14,10 +14,7 @@ namespace Shader::IR { -Block::Block(ObjectPool<Inst>& inst_pool_, u32 begin, u32 end) - : inst_pool{&inst_pool_}, location_begin{begin}, location_end{end} {} - -Block::Block(ObjectPool<Inst>& inst_pool_) : Block{inst_pool_, 0, 0} {} +Block::Block(ObjectPool<Inst>& inst_pool_) : inst_pool{&inst_pool_} {} Block::~Block() = default; @@ -40,39 +37,15 @@ Block::iterator Block::PrependNewInst(iterator insertion_point, Opcode op, return result_it; } -void Block::SetBranches(Condition cond, Block* branch_true_, Block* branch_false_) { - branch_cond = cond; - branch_true = branch_true_; - branch_false = branch_false_; -} - -void Block::SetBranch(Block* branch) { - branch_cond = Condition{true}; - branch_true = branch; -} - -void Block::SetReturn() { - branch_cond = Condition{true}; - branch_true = nullptr; - branch_false = nullptr; -} - -bool Block::IsVirtual() const noexcept { - return location_begin == location_end; -} - -u32 Block::LocationBegin() const noexcept { - return location_begin; -} - -u32 Block::LocationEnd() const noexcept { - return location_end; -} - -void Block::AddImmediatePredecessor(Block* block) { - if (std::ranges::find(imm_predecessors, block) == imm_predecessors.end()) { - imm_predecessors.push_back(block); +void Block::AddBranch(Block* block) { + if (std::ranges::find(imm_successors, block) != imm_successors.end()) { + throw LogicError("Successor already inserted"); + } + if (std::ranges::find(block->imm_predecessors, this) != block->imm_predecessors.end()) { + throw LogicError("Predecessor already inserted"); } + imm_successors.push_back(block); + block->imm_predecessors.push_back(this); } static std::string BlockToIndex(const std::map<const Block*, size_t>& block_to_index, @@ -92,15 +65,11 @@ static size_t InstIndex(std::map<const Inst*, size_t>& inst_to_index, size_t& in return it->second; } -static std::string ArgToIndex(const std::map<const Block*, size_t>& block_to_index, - std::map<const Inst*, size_t>& inst_to_index, size_t& inst_index, +static std::string ArgToIndex(std::map<const Inst*, size_t>& inst_to_index, size_t& inst_index, const Value& arg) { if (arg.IsEmpty()) { return "<null>"; } - if (arg.IsLabel()) { - return BlockToIndex(block_to_index, arg.Label()); - } if (!arg.IsImmediate() || arg.IsIdentity()) { return fmt::format("%{}", InstIndex(inst_to_index, inst_index, arg.Inst())); } @@ -140,8 +109,7 @@ std::string DumpBlock(const Block& block, const std::map<const Block*, size_t>& if (const auto it{block_to_index.find(&block)}; it != block_to_index.end()) { ret += fmt::format(" ${}", it->second); } - ret += fmt::format(": begin={:04x} end={:04x}\n", block.LocationBegin(), block.LocationEnd()); - + ret += '\n'; for (const Inst& inst : block) { const Opcode op{inst.GetOpcode()}; ret += fmt::format("[{:016x}] ", reinterpret_cast<u64>(&inst)); @@ -153,7 +121,7 @@ std::string DumpBlock(const Block& block, const std::map<const Block*, size_t>& const size_t arg_count{inst.NumArgs()}; for (size_t arg_index = 0; arg_index < arg_count; ++arg_index) { const Value arg{inst.Arg(arg_index)}; - const std::string arg_str{ArgToIndex(block_to_index, inst_to_index, inst_index, arg)}; + const std::string arg_str{ArgToIndex(inst_to_index, inst_index, arg)}; ret += arg_index != 0 ? ", " : " "; if (op == Opcode::Phi) { ret += fmt::format("[ {}, {} ]", arg_str, diff --git a/src/shader_recompiler/frontend/ir/basic_block.h b/src/shader_recompiler/frontend/ir/basic_block.h index 0b0c97af6b..7e134b4c78 100644 --- a/src/shader_recompiler/frontend/ir/basic_block.h +++ b/src/shader_recompiler/frontend/ir/basic_block.h @@ -12,6 +12,7 @@ #include <boost/intrusive/list.hpp> #include "common/bit_cast.h" +#include "common/common_types.h" #include "shader_recompiler/frontend/ir/condition.h" #include "shader_recompiler/frontend/ir/value.h" #include "shader_recompiler/object_pool.h" @@ -27,7 +28,6 @@ public: using reverse_iterator = InstructionList::reverse_iterator; using const_reverse_iterator = InstructionList::const_reverse_iterator; - explicit Block(ObjectPool<Inst>& inst_pool_, u32 begin, u32 end); explicit Block(ObjectPool<Inst>& inst_pool_); ~Block(); @@ -44,22 +44,8 @@ public: iterator PrependNewInst(iterator insertion_point, Opcode op, std::initializer_list<Value> args = {}, u32 flags = 0); - /// Set the branches to jump to when all instructions have executed. - void SetBranches(Condition cond, Block* branch_true, Block* branch_false); - /// Set the branch to unconditionally jump to when all instructions have executed. - void SetBranch(Block* branch); - /// Mark the block as a return block. - void SetReturn(); - - /// Returns true when the block does not implement any guest instructions directly. - [[nodiscard]] bool IsVirtual() const noexcept; - /// Gets the starting location of this basic block. - [[nodiscard]] u32 LocationBegin() const noexcept; - /// Gets the end location for this basic block. - [[nodiscard]] u32 LocationEnd() const noexcept; - - /// Adds a new immediate predecessor to this basic block. - void AddImmediatePredecessor(Block* block); + /// Adds a new branch to this basic block. + void AddBranch(Block* block); /// Gets a mutable reference to the instruction list for this basic block. [[nodiscard]] InstructionList& Instructions() noexcept { @@ -71,9 +57,13 @@ public: } /// Gets an immutable span to the immediate predecessors. - [[nodiscard]] std::span<Block* const> ImmediatePredecessors() const noexcept { + [[nodiscard]] std::span<Block* const> ImmPredecessors() const noexcept { return imm_predecessors; } + /// Gets an immutable span to the immediate successors. + [[nodiscard]] std::span<Block* const> ImmSuccessors() const noexcept { + return imm_successors; + } /// Intrusively store the host definition of this instruction. template <typename DefinitionType> @@ -87,19 +77,6 @@ public: return Common::BitCast<DefinitionType>(definition); } - [[nodiscard]] Condition BranchCondition() const noexcept { - return branch_cond; - } - [[nodiscard]] bool IsTerminationBlock() const noexcept { - return !branch_true && !branch_false; - } - [[nodiscard]] Block* TrueBranch() const noexcept { - return branch_true; - } - [[nodiscard]] Block* FalseBranch() const noexcept { - return branch_false; - } - void SetSsaRegValue(IR::Reg reg, const Value& value) noexcept { ssa_reg_values[RegIndex(reg)] = value; } @@ -178,22 +155,14 @@ public: private: /// Memory pool for instruction list ObjectPool<Inst>* inst_pool; - /// Starting location of this block - u32 location_begin; - /// End location of this block - u32 location_end; /// List of instructions in this block InstructionList instructions; - /// Condition to choose the branch to take - Condition branch_cond{true}; - /// Block to jump into when the branch condition evaluates as true - Block* branch_true{nullptr}; - /// Block to jump into when the branch condition evaluates as false - Block* branch_false{nullptr}; /// Block immediate predecessors std::vector<Block*> imm_predecessors; + /// Block immediate successors + std::vector<Block*> imm_successors; /// Intrusively store the value of a register in the block. std::array<Value, NUM_REGS> ssa_reg_values; diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp index ce6c9af073..eb45aa4772 100644 --- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp +++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp @@ -61,25 +61,28 @@ F64 IREmitter::Imm64(f64 value) const { return F64{Value{value}}; } -void IREmitter::Branch(Block* label) { - label->AddImmediatePredecessor(block); - block->SetBranch(label); - Inst(Opcode::Branch, label); +void IREmitter::Prologue() { + Inst(Opcode::Prologue); } -void IREmitter::BranchConditional(const U1& condition, Block* true_label, Block* false_label) { - block->SetBranches(IR::Condition{true}, true_label, false_label); - true_label->AddImmediatePredecessor(block); - false_label->AddImmediatePredecessor(block); - Inst(Opcode::BranchConditional, condition, true_label, false_label); +void IREmitter::Epilogue() { + Inst(Opcode::Epilogue); } -void IREmitter::LoopMerge(Block* merge_block, Block* continue_target) { - Inst(Opcode::LoopMerge, merge_block, continue_target); +void IREmitter::BranchConditionRef(const U1& cond) { + Inst(Opcode::BranchConditionRef, cond); } -void IREmitter::SelectionMerge(Block* merge_block) { - Inst(Opcode::SelectionMerge, merge_block); +void IREmitter::DemoteToHelperInvocation() { + Inst(Opcode::DemoteToHelperInvocation); +} + +void IREmitter::EmitVertex(const U32& stream) { + Inst(Opcode::EmitVertex, stream); +} + +void IREmitter::EndPrimitive(const U32& stream) { + Inst(Opcode::EndPrimitive, stream); } void IREmitter::Barrier() { @@ -94,37 +97,6 @@ void IREmitter::DeviceMemoryBarrier() { Inst(Opcode::DeviceMemoryBarrier); } -void IREmitter::Return() { - block->SetReturn(); - Inst(Opcode::Return); -} - -void IREmitter::Unreachable() { - Inst(Opcode::Unreachable); -} - -void IREmitter::DemoteToHelperInvocation(Block* continue_label) { - block->SetBranch(continue_label); - continue_label->AddImmediatePredecessor(block); - Inst(Opcode::DemoteToHelperInvocation, continue_label); -} - -void IREmitter::Prologue() { - Inst(Opcode::Prologue); -} - -void IREmitter::Epilogue() { - Inst(Opcode::Epilogue); -} - -void IREmitter::EmitVertex(const U32& stream) { - Inst(Opcode::EmitVertex, stream); -} - -void IREmitter::EndPrimitive(const U32& stream) { - Inst(Opcode::EndPrimitive, stream); -} - U32 IREmitter::GetReg(IR::Reg reg) { return Inst<U32>(Opcode::GetRegister, reg); } diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.h b/src/shader_recompiler/frontend/ir/ir_emitter.h index fd41b7e893..7a83c33d33 100644 --- a/src/shader_recompiler/frontend/ir/ir_emitter.h +++ b/src/shader_recompiler/frontend/ir/ir_emitter.h @@ -32,17 +32,10 @@ public: [[nodiscard]] U64 Imm64(s64 value) const; [[nodiscard]] F64 Imm64(f64 value) const; - void Branch(Block* label); - void BranchConditional(const U1& condition, Block* true_label, Block* false_label); - void LoopMerge(Block* merge_block, Block* continue_target); - void SelectionMerge(Block* merge_block); - void Return(); - void Unreachable(); - void DemoteToHelperInvocation(Block* continue_label); - void Prologue(); void Epilogue(); - + void BranchConditionRef(const U1& cond); + void DemoteToHelperInvocation(); void EmitVertex(const U32& stream); void EndPrimitive(const U32& stream); diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp index 616ef17d4e..3645742401 100644 --- a/src/shader_recompiler/frontend/ir/microinstruction.cpp +++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp @@ -56,19 +56,14 @@ Inst::~Inst() { bool Inst::MayHaveSideEffects() const noexcept { switch (op) { - case Opcode::Branch: - case Opcode::BranchConditional: - case Opcode::LoopMerge: - case Opcode::SelectionMerge: - case Opcode::Return: + case Opcode::Prologue: + case Opcode::Epilogue: + case Opcode::BranchConditionRef: case Opcode::Join: - case Opcode::Unreachable: case Opcode::DemoteToHelperInvocation: case Opcode::Barrier: case Opcode::WorkgroupMemoryBarrier: case Opcode::DeviceMemoryBarrier: - case Opcode::Prologue: - case Opcode::Epilogue: case Opcode::EmitVertex: case Opcode::EndPrimitive: case Opcode::SetAttribute: diff --git a/src/shader_recompiler/frontend/ir/opcodes.h b/src/shader_recompiler/frontend/ir/opcodes.h index 2b9c0ed8cc..56b001902c 100644 --- a/src/shader_recompiler/frontend/ir/opcodes.h +++ b/src/shader_recompiler/frontend/ir/opcodes.h @@ -30,7 +30,6 @@ struct OpcodeMeta { // using enum Type; constexpr Type Void{Type::Void}; constexpr Type Opaque{Type::Opaque}; -constexpr Type Label{Type::Label}; constexpr Type Reg{Type::Reg}; constexpr Type Pred{Type::Pred}; constexpr Type Attribute{Type::Attribute}; diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc index 9165421f89..75ddb6b6f8 100644 --- a/src/shader_recompiler/frontend/ir/opcodes.inc +++ b/src/shader_recompiler/frontend/ir/opcodes.inc @@ -7,27 +7,20 @@ OPCODE(Phi, Opaque, OPCODE(Identity, Opaque, Opaque, ) OPCODE(Void, Void, ) -// Control flow -OPCODE(Branch, Void, Label, ) -OPCODE(BranchConditional, Void, U1, Label, Label, ) -OPCODE(LoopMerge, Void, Label, Label, ) -OPCODE(SelectionMerge, Void, Label, ) -OPCODE(Return, Void, ) +// Special operations +OPCODE(Prologue, Void, ) +OPCODE(Epilogue, Void, ) +OPCODE(BranchConditionRef, Void, U1, ) OPCODE(Join, Void, ) -OPCODE(Unreachable, Void, ) -OPCODE(DemoteToHelperInvocation, Void, Label, ) +OPCODE(DemoteToHelperInvocation, Void, ) +OPCODE(EmitVertex, Void, U32, ) +OPCODE(EndPrimitive, Void, U32, ) // Barriers OPCODE(Barrier, Void, ) OPCODE(WorkgroupMemoryBarrier, Void, ) OPCODE(DeviceMemoryBarrier, Void, ) -// Special operations -OPCODE(Prologue, Void, ) -OPCODE(Epilogue, Void, ) -OPCODE(EmitVertex, Void, U32, ) -OPCODE(EndPrimitive, Void, U32, ) - // Context getters/setters OPCODE(GetRegister, U32, Reg, ) OPCODE(SetRegister, Void, Reg, U32, ) diff --git a/src/shader_recompiler/frontend/ir/post_order.cpp b/src/shader_recompiler/frontend/ir/post_order.cpp index 8709a2ea1e..1a28df7fbe 100644 --- a/src/shader_recompiler/frontend/ir/post_order.cpp +++ b/src/shader_recompiler/frontend/ir/post_order.cpp @@ -2,6 +2,8 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <algorithm> + #include <boost/container/flat_set.hpp> #include <boost/container/small_vector.hpp> @@ -10,35 +12,31 @@ namespace Shader::IR { -BlockList PostOrder(const BlockList& blocks) { +BlockList PostOrder(const AbstractSyntaxNode& root) { boost::container::small_vector<Block*, 16> block_stack; boost::container::flat_set<Block*> visited; - BlockList post_order_blocks; - post_order_blocks.reserve(blocks.size()); - Block* const first_block{blocks.front()}; + if (root.type != AbstractSyntaxNode::Type::Block) { + throw LogicError("First node in abstract syntax list root is not a block"); + } + Block* const first_block{root.block}; visited.insert(first_block); block_stack.push_back(first_block); - const auto visit_branch = [&](Block* block, Block* branch) { - if (!branch) { - return false; - } - if (!visited.insert(branch).second) { - return false; - } - // Calling push_back twice is faster than insert on MSVC - block_stack.push_back(block); - block_stack.push_back(branch); - return true; - }; while (!block_stack.empty()) { Block* const block{block_stack.back()}; + const auto visit{[&](Block* branch) { + if (!visited.insert(branch).second) { + return false; + } + // Calling push_back twice is faster than insert on MSVC + block_stack.push_back(block); + block_stack.push_back(branch); + return true; + }}; block_stack.pop_back(); - - if (!visit_branch(block, block->TrueBranch()) && - !visit_branch(block, block->FalseBranch())) { + if (std::ranges::none_of(block->ImmSuccessors(), visit)) { post_order_blocks.push_back(block); } } diff --git a/src/shader_recompiler/frontend/ir/post_order.h b/src/shader_recompiler/frontend/ir/post_order.h index 30137ff57a..58a0467a03 100644 --- a/src/shader_recompiler/frontend/ir/post_order.h +++ b/src/shader_recompiler/frontend/ir/post_order.h @@ -5,9 +5,10 @@ #pragma once #include "shader_recompiler/frontend/ir/basic_block.h" +#include "shader_recompiler/frontend/ir/abstract_syntax_list.h" namespace Shader::IR { -BlockList PostOrder(const BlockList& blocks); +BlockList PostOrder(const AbstractSyntaxNode& root); } // namespace Shader::IR diff --git a/src/shader_recompiler/frontend/ir/program.h b/src/shader_recompiler/frontend/ir/program.h index 51e1a8c779..9ede5b48d9 100644 --- a/src/shader_recompiler/frontend/ir/program.h +++ b/src/shader_recompiler/frontend/ir/program.h @@ -7,8 +7,7 @@ #include <array> #include <string> -#include <boost/container/small_vector.hpp> - +#include "shader_recompiler/frontend/ir/abstract_syntax_list.h" #include "shader_recompiler/frontend/ir/basic_block.h" #include "shader_recompiler/program_header.h" #include "shader_recompiler/shader_info.h" @@ -17,6 +16,7 @@ namespace Shader::IR { struct Program { + AbstractSyntaxList syntax_list; BlockList blocks; BlockList post_order_blocks; Info info; diff --git a/src/shader_recompiler/frontend/ir/type.h b/src/shader_recompiler/frontend/ir/type.h index 8b3b338528..294b230c49 100644 --- a/src/shader_recompiler/frontend/ir/type.h +++ b/src/shader_recompiler/frontend/ir/type.h @@ -16,31 +16,30 @@ namespace Shader::IR { enum class Type { Void = 0, Opaque = 1 << 0, - Label = 1 << 1, - Reg = 1 << 2, - Pred = 1 << 3, - Attribute = 1 << 4, - Patch = 1 << 5, - U1 = 1 << 6, - U8 = 1 << 7, - U16 = 1 << 8, - U32 = 1 << 9, - U64 = 1 << 10, - F16 = 1 << 11, - F32 = 1 << 12, - F64 = 1 << 13, - U32x2 = 1 << 14, - U32x3 = 1 << 15, - U32x4 = 1 << 16, - F16x2 = 1 << 17, - F16x3 = 1 << 18, - F16x4 = 1 << 19, - F32x2 = 1 << 20, - F32x3 = 1 << 21, - F32x4 = 1 << 22, - F64x2 = 1 << 23, - F64x3 = 1 << 24, - F64x4 = 1 << 25, + Reg = 1 << 1, + Pred = 1 << 2, + Attribute = 1 << 3, + Patch = 1 << 4, + U1 = 1 << 5, + U8 = 1 << 6, + U16 = 1 << 7, + U32 = 1 << 8, + U64 = 1 << 9, + F16 = 1 << 10, + F32 = 1 << 11, + F64 = 1 << 12, + U32x2 = 1 << 13, + U32x3 = 1 << 14, + U32x4 = 1 << 15, + F16x2 = 1 << 16, + F16x3 = 1 << 17, + F16x4 = 1 << 18, + F32x2 = 1 << 19, + F32x3 = 1 << 20, + F32x4 = 1 << 21, + F64x2 = 1 << 22, + F64x3 = 1 << 23, + F64x4 = 1 << 24, }; DECLARE_ENUM_FLAG_OPERATORS(Type) diff --git a/src/shader_recompiler/frontend/ir/value.cpp b/src/shader_recompiler/frontend/ir/value.cpp index b962f170d7..d365ea1bcb 100644 --- a/src/shader_recompiler/frontend/ir/value.cpp +++ b/src/shader_recompiler/frontend/ir/value.cpp @@ -9,8 +9,6 @@ namespace Shader::IR { Value::Value(IR::Inst* value) noexcept : type{Type::Opaque}, inst{value} {} -Value::Value(IR::Block* value) noexcept : type{Type::Label}, label{value} {} - Value::Value(IR::Reg value) noexcept : type{Type::Reg}, reg{value} {} Value::Value(IR::Pred value) noexcept : type{Type::Pred}, pred{value} {} @@ -33,10 +31,6 @@ Value::Value(u64 value) noexcept : type{Type::U64}, imm_u64{value} {} Value::Value(f64 value) noexcept : type{Type::F64}, imm_f64{value} {} -bool Value::IsLabel() const noexcept { - return type == Type::Label; -} - IR::Type Value::Type() const noexcept { if (IsPhi()) { // The type of a phi node is stored in its flags @@ -60,8 +54,6 @@ bool Value::operator==(const Value& other) const { return true; case Type::Opaque: return inst == other.inst; - case Type::Label: - return label == other.label; case Type::Reg: return reg == other.reg; case Type::Pred: diff --git a/src/shader_recompiler/frontend/ir/value.h b/src/shader_recompiler/frontend/ir/value.h index beaf149f3f..2ce49f953d 100644 --- a/src/shader_recompiler/frontend/ir/value.h +++ b/src/shader_recompiler/frontend/ir/value.h @@ -37,7 +37,6 @@ class Value { public: Value() noexcept = default; explicit Value(IR::Inst* value) noexcept; - explicit Value(IR::Block* value) noexcept; explicit Value(IR::Reg value) noexcept; explicit Value(IR::Pred value) noexcept; explicit Value(IR::Attribute value) noexcept; @@ -54,11 +53,9 @@ public: [[nodiscard]] bool IsPhi() const noexcept; [[nodiscard]] bool IsEmpty() const noexcept; [[nodiscard]] bool IsImmediate() const noexcept; - [[nodiscard]] bool IsLabel() const noexcept; [[nodiscard]] IR::Type Type() const noexcept; [[nodiscard]] IR::Inst* Inst() const; - [[nodiscard]] IR::Block* Label() const; [[nodiscard]] IR::Inst* InstRecursive() const; [[nodiscard]] IR::Value Resolve() const; [[nodiscard]] IR::Reg Reg() const; @@ -80,7 +77,6 @@ private: IR::Type type{}; union { IR::Inst* inst{}; - IR::Block* label; IR::Reg reg; IR::Pred pred; IR::Attribute attribute; @@ -304,11 +300,6 @@ inline IR::Inst* Value::Inst() const { return inst; } -inline IR::Block* Value::Label() const { - DEBUG_ASSERT(type == Type::Label); - return label; -} - inline IR::Inst* Value::InstRecursive() const { DEBUG_ASSERT(type == Type::Opaque); if (IsIdentity()) { diff --git a/src/shader_recompiler/frontend/maxwell/program.cpp b/src/shader_recompiler/frontend/maxwell/program.cpp index 0d3f006991..017c4b8fdc 100644 --- a/src/shader_recompiler/frontend/maxwell/program.cpp +++ b/src/shader_recompiler/frontend/maxwell/program.cpp @@ -4,6 +4,7 @@ #include <algorithm> #include <memory> +#include <ranges> #include <vector> #include "shader_recompiler/frontend/ir/basic_block.h" @@ -15,6 +16,16 @@ namespace Shader::Maxwell { namespace { +IR::BlockList GenerateBlocks(const IR::AbstractSyntaxList& syntax_list) { + auto syntax_blocks{syntax_list | std::views::filter([](const auto& node) { + return node.type == IR::AbstractSyntaxNode::Type::Block; + })}; + IR::BlockList blocks(std::ranges::distance(syntax_blocks)); + std::ranges::transform(syntax_blocks, blocks.begin(), + [](const IR::AbstractSyntaxNode& node) { return node.block; }); + return blocks; +} + void RemoveUnreachableBlocks(IR::Program& program) { // Some blocks might be unreachable if a function call exists unconditionally // If this happens the number of blocks and post order blocks will mismatch @@ -23,7 +34,7 @@ void RemoveUnreachableBlocks(IR::Program& program) { } const auto begin{program.blocks.begin() + 1}; const auto end{program.blocks.end()}; - const auto pred{[](IR::Block* block) { return block->ImmediatePredecessors().empty(); }}; + const auto pred{[](IR::Block* block) { return block->ImmPredecessors().empty(); }}; program.blocks.erase(std::remove_if(begin, end, pred), end); } @@ -110,8 +121,9 @@ void AddNVNStorageBuffers(IR::Program& program) { IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool, Environment& env, Flow::CFG& cfg) { IR::Program program; - program.blocks = VisitAST(inst_pool, block_pool, env, cfg); - program.post_order_blocks = PostOrder(program.blocks); + program.syntax_list = BuildASL(inst_pool, block_pool, env, cfg); + program.blocks = GenerateBlocks(program.syntax_list); + program.post_order_blocks = PostOrder(program.syntax_list.front()); program.stage = env.ShaderStage(); program.local_memory_size = env.LocalMemorySize(); switch (program.stage) { @@ -159,9 +171,7 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b Optimization::VertexATransformPass(vertex_a); Optimization::VertexBTransformPass(vertex_b); std::swap(result.blocks, vertex_a.blocks); - for (IR::Block* block : vertex_b.blocks) { - result.blocks.push_back(block); - } + result.blocks.insert(result.blocks.end(), vertex_b.blocks.begin(), vertex_b.blocks.end()); result.stage = Stage::VertexB; result.info = vertex_a.info; result.local_memory_size = std::max(vertex_a.local_memory_size, vertex_b.local_memory_size); @@ -173,7 +183,7 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b Optimization::JoinTextureInfo(result.info, vertex_b.info); Optimization::JoinStorageInfo(result.info, vertex_b.info); Optimization::DualVertexJoinPass(result); - result.post_order_blocks = PostOrder(result.blocks); + result.post_order_blocks = PostOrder(result.syntax_list.front()); Optimization::DeadCodeEliminationPass(result); Optimization::VerificationPass(result); Optimization::CollectShaderInfoPass(env_vertex_b, result); diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp index cc5410c6df..e7e2e9c826 100644 --- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp +++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp @@ -36,7 +36,6 @@ using Tree = boost::intrusive::list<Statement, // Avoid linear complexity on splice, size is never called boost::intrusive::constant_time_size<false>>; using Node = Tree::iterator; -using ConstNode = Tree::const_iterator; enum class StatementType { Code, @@ -91,7 +90,8 @@ struct IndirectBranchCond {}; #pragma warning(disable : 26495) // Always initialize a member variable, expected in Statement #endif struct Statement : ListBaseHook { - Statement(IR::Block* code_, Statement* up_) : code{code_}, up{up_}, type{StatementType::Code} {} + Statement(const Flow::Block* block_, Statement* up_) + : block{block_}, up{up_}, type{StatementType::Code} {} Statement(Goto, Statement* cond_, Node label_, Statement* up_) : label{label_}, cond{cond_}, up{up_}, type{StatementType::Goto} {} Statement(Label, u32 id_, Statement* up_) : id{id_}, up{up_}, type{StatementType::Label} {} @@ -125,7 +125,7 @@ struct Statement : ListBaseHook { } union { - IR::Block* code; + const Flow::Block* block; Node label; Tree children; IR::Condition guest_cond; @@ -171,8 +171,8 @@ std::string DumpTree(const Tree& tree, u32 indentation = 0) { switch (stmt->type) { case StatementType::Code: ret += fmt::format("{} Block {:04x} -> {:04x} (0x{:016x});\n", indent, - stmt->code->LocationBegin(), stmt->code->LocationEnd(), - reinterpret_cast<uintptr_t>(stmt->code)); + stmt->block->begin, stmt->block->end, + reinterpret_cast<uintptr_t>(stmt->block)); break; case StatementType::Goto: ret += fmt::format("{} if ({}) goto L{};\n", indent, DumpExpr(stmt->cond), @@ -407,11 +407,7 @@ private: }}; root.push_front(make_reset_variable()); root.insert(ip, make_reset_variable()); - - const u32 begin_offset{block.begin.Offset()}; - const u32 end_offset{block.end.Offset()}; - IR::Block* const ir_block{block_pool.Create(inst_pool, begin_offset, end_offset)}; - root.insert(ip, *pool.Create(ir_block, &root_stmt)); + root.insert(ip, *pool.Create(&block, &root_stmt)); switch (block.end_class) { case Flow::EndClass::Branch: { @@ -620,13 +616,13 @@ private: Statement root_stmt{FunctionTag{}}; }; -IR::Block* TryFindForwardBlock(const Statement& stmt) { - const Tree& tree{stmt.up->children}; - const ConstNode end{tree.cend()}; - ConstNode forward_node{std::next(Tree::s_iterator_to(stmt))}; +[[nodiscard]] Statement* TryFindForwardBlock(Statement& stmt) { + Tree& tree{stmt.up->children}; + const Node end{tree.end()}; + Node forward_node{std::next(Tree::s_iterator_to(stmt))}; while (forward_node != end && !HasChildren(forward_node->type)) { if (forward_node->type == StatementType::Code) { - return forward_node->code; + return &*forward_node; } ++forward_node; } @@ -654,21 +650,29 @@ class TranslatePass { public: TranslatePass(ObjectPool<IR::Inst>& inst_pool_, ObjectPool<IR::Block>& block_pool_, ObjectPool<Statement>& stmt_pool_, Environment& env_, Statement& root_stmt, - IR::BlockList& block_list_) + IR::AbstractSyntaxList& syntax_list_) : stmt_pool{stmt_pool_}, inst_pool{inst_pool_}, block_pool{block_pool_}, env{env_}, - block_list{block_list_} { + syntax_list{syntax_list_} { Visit(root_stmt, nullptr, nullptr); - IR::Block& first_block{*block_list.front()}; + IR::Block& first_block{*syntax_list.front().block}; IR::IREmitter ir{first_block, first_block.begin()}; ir.Prologue(); } private: - void Visit(Statement& parent, IR::Block* continue_block, IR::Block* break_block) { + void Visit(Statement& parent, IR::Block* break_block, IR::Block* fallthrough_block) { + IR::Block* current_block{}; + const auto ensure_block{[&] { + if (current_block) { + return; + } + current_block = block_pool.Create(inst_pool); + auto& node{syntax_list.emplace_back()}; + node.type = IR::AbstractSyntaxNode::Type::Block; + node.block = current_block; + }}; Tree& tree{parent.children}; - IR::Block* current_block{nullptr}; - for (auto it = tree.begin(); it != tree.end(); ++it) { Statement& stmt{*it}; switch (stmt.type) { @@ -676,124 +680,157 @@ private: // Labels can be ignored break; case StatementType::Code: { - if (current_block && current_block != stmt.code) { - IR::IREmitter{*current_block}.Branch(stmt.code); - } - current_block = stmt.code; - Translate(env, stmt.code); - block_list.push_back(stmt.code); + ensure_block(); + Translate(env, current_block, stmt.block->begin.Offset(), stmt.block->end.Offset()); break; } case StatementType::SetVariable: { - if (!current_block) { - current_block = MergeBlock(parent, stmt); - } + ensure_block(); IR::IREmitter ir{*current_block}; ir.SetGotoVariable(stmt.id, VisitExpr(ir, *stmt.op)); break; } case StatementType::SetIndirectBranchVariable: { - if (!current_block) { - current_block = MergeBlock(parent, stmt); - } + ensure_block(); IR::IREmitter ir{*current_block}; IR::U32 address{ir.IAdd(ir.GetReg(stmt.branch_reg), ir.Imm32(stmt.branch_offset))}; ir.SetIndirectBranchVariable(address); break; } case StatementType::If: { - if (!current_block) { - current_block = block_pool.Create(inst_pool); - block_list.push_back(current_block); - } + ensure_block(); IR::Block* const merge_block{MergeBlock(parent, stmt)}; - // Visit children - const size_t first_block_index{block_list.size()}; - Visit(stmt, merge_block, break_block); - // Implement if header block - IR::Block* const first_if_block{block_list.at(first_block_index)}; IR::IREmitter ir{*current_block}; const IR::U1 cond{VisitExpr(ir, *stmt.cond)}; - ir.SelectionMerge(merge_block); - ir.BranchConditional(cond, first_if_block, merge_block); + ir.BranchConditionRef(cond); + const size_t if_node_index{syntax_list.size()}; + syntax_list.emplace_back(); + + // Visit children + const size_t then_block_index{syntax_list.size()}; + Visit(stmt, break_block, merge_block); + + IR::Block* const then_block{syntax_list.at(then_block_index).block}; + current_block->AddBranch(then_block); + current_block->AddBranch(merge_block); current_block = merge_block; + + auto& if_node{syntax_list[if_node_index]}; + if_node.type = IR::AbstractSyntaxNode::Type::If; + if_node.if_node.cond = cond; + if_node.if_node.body = then_block; + if_node.if_node.merge = merge_block; + + auto& endif_node{syntax_list.emplace_back()}; + endif_node.type = IR::AbstractSyntaxNode::Type::EndIf; + endif_node.end_if.merge = merge_block; + + auto& merge{syntax_list.emplace_back()}; + merge.type = IR::AbstractSyntaxNode::Type::Block; + merge.block = merge_block; break; } case StatementType::Loop: { IR::Block* const loop_header_block{block_pool.Create(inst_pool)}; if (current_block) { - IR::IREmitter{*current_block}.Branch(loop_header_block); + current_block->AddBranch(loop_header_block); } - block_list.push_back(loop_header_block); + auto& header_node{syntax_list.emplace_back()}; + header_node.type = IR::AbstractSyntaxNode::Type::Block; + header_node.block = loop_header_block; - IR::Block* const new_continue_block{block_pool.Create(inst_pool)}; + IR::Block* const continue_block{block_pool.Create(inst_pool)}; IR::Block* const merge_block{MergeBlock(parent, stmt)}; + const size_t loop_node_index{syntax_list.size()}; + syntax_list.emplace_back(); + // Visit children - const size_t first_block_index{block_list.size()}; - Visit(stmt, new_continue_block, merge_block); + const size_t body_block_index{syntax_list.size()}; + Visit(stmt, merge_block, continue_block); // The continue block is located at the end of the loop - block_list.push_back(new_continue_block); + IR::IREmitter ir{*continue_block}; + const IR::U1 cond{VisitExpr(ir, *stmt.cond)}; + ir.BranchConditionRef(cond); - // Implement loop header block - IR::Block* const first_loop_block{block_list.at(first_block_index)}; - IR::IREmitter ir{*loop_header_block}; - ir.LoopMerge(merge_block, new_continue_block); - ir.Branch(first_loop_block); + IR::Block* const body_block{syntax_list.at(body_block_index).block}; + loop_header_block->AddBranch(body_block); - // Implement continue block - IR::IREmitter continue_ir{*new_continue_block}; - const IR::U1 continue_cond{VisitExpr(continue_ir, *stmt.cond)}; - continue_ir.BranchConditional(continue_cond, ir.block, merge_block); + continue_block->AddBranch(loop_header_block); + continue_block->AddBranch(merge_block); current_block = merge_block; + + auto& loop{syntax_list[loop_node_index]}; + loop.type = IR::AbstractSyntaxNode::Type::Loop; + loop.loop.body = body_block; + loop.loop.continue_block = continue_block; + loop.loop.merge = merge_block; + + auto& continue_block_node{syntax_list.emplace_back()}; + continue_block_node.type = IR::AbstractSyntaxNode::Type::Block; + continue_block_node.block = continue_block; + + auto& repeat{syntax_list.emplace_back()}; + repeat.type = IR::AbstractSyntaxNode::Type::Repeat; + repeat.repeat.cond = cond; + repeat.repeat.loop_header = loop_header_block; + repeat.repeat.merge = merge_block; + + auto& merge{syntax_list.emplace_back()}; + merge.type = IR::AbstractSyntaxNode::Type::Block; + merge.block = merge_block; break; } case StatementType::Break: { - if (!current_block) { - current_block = block_pool.Create(inst_pool); - block_list.push_back(current_block); - } + ensure_block(); IR::Block* const skip_block{MergeBlock(parent, stmt)}; IR::IREmitter ir{*current_block}; - ir.BranchConditional(VisitExpr(ir, *stmt.cond), break_block, skip_block); - + const IR::U1 cond{VisitExpr(ir, *stmt.cond)}; + ir.BranchConditionRef(cond); + current_block->AddBranch(break_block); + current_block->AddBranch(skip_block); current_block = skip_block; + + auto& break_node{syntax_list.emplace_back()}; + break_node.type = IR::AbstractSyntaxNode::Type::Break; + break_node.break_node.cond = cond; + break_node.break_node.merge = break_block; + break_node.break_node.skip = skip_block; + + auto& merge{syntax_list.emplace_back()}; + merge.type = IR::AbstractSyntaxNode::Type::Block; + merge.block = skip_block; break; } case StatementType::Return: { - if (!current_block) { - current_block = block_pool.Create(inst_pool); - block_list.push_back(current_block); - } - IR::IREmitter ir{*current_block}; - ir.Epilogue(); - ir.Return(); + ensure_block(); + IR::IREmitter{*current_block}.Epilogue(); current_block = nullptr; + syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Return; break; } case StatementType::Kill: { - if (!current_block) { - current_block = block_pool.Create(inst_pool); - block_list.push_back(current_block); - } + ensure_block(); IR::Block* demote_block{MergeBlock(parent, stmt)}; - IR::IREmitter{*current_block}.DemoteToHelperInvocation(demote_block); + IR::IREmitter{*current_block}.DemoteToHelperInvocation(); + current_block->AddBranch(demote_block); current_block = demote_block; + + auto& merge{syntax_list.emplace_back()}; + merge.type = IR::AbstractSyntaxNode::Type::Block; + merge.block = demote_block; break; } case StatementType::Unreachable: { - if (!current_block) { - current_block = block_pool.Create(inst_pool); - block_list.push_back(current_block); - } - IR::IREmitter{*current_block}.Unreachable(); + ensure_block(); current_block = nullptr; + syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Unreachable; break; } default: @@ -801,42 +838,42 @@ private: } } if (current_block) { - IR::IREmitter ir{*current_block}; - if (continue_block) { - ir.Branch(continue_block); + if (fallthrough_block) { + current_block->AddBranch(fallthrough_block); } else { - ir.Unreachable(); + syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Unreachable; } } } IR::Block* MergeBlock(Statement& parent, Statement& stmt) { - if (IR::Block* const block{TryFindForwardBlock(stmt)}) { - return block; + Statement* merge_stmt{TryFindForwardBlock(stmt)}; + if (!merge_stmt) { + // Create a merge block we can visit later + merge_stmt = stmt_pool.Create(&dummy_flow_block, &parent); + parent.children.insert(std::next(Tree::s_iterator_to(stmt)), *merge_stmt); } - // Create a merge block we can visit later - IR::Block* const block{block_pool.Create(inst_pool)}; - Statement* const merge_stmt{stmt_pool.Create(block, &parent)}; - parent.children.insert(std::next(Tree::s_iterator_to(stmt)), *merge_stmt); - return block; + return block_pool.Create(inst_pool); } ObjectPool<Statement>& stmt_pool; ObjectPool<IR::Inst>& inst_pool; ObjectPool<IR::Block>& block_pool; Environment& env; - IR::BlockList& block_list; + IR::AbstractSyntaxList& syntax_list; + // TODO: Make this constexpr when std::vector is constexpr + const Flow::Block dummy_flow_block; }; } // Anonymous namespace -IR::BlockList VisitAST(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool, - Environment& env, Flow::CFG& cfg) { +IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool, + Environment& env, Flow::CFG& cfg) { ObjectPool<Statement> stmt_pool{64}; GotoPass goto_pass{cfg, inst_pool, block_pool, stmt_pool}; Statement& root{goto_pass.RootStatement()}; - IR::BlockList block_list; - TranslatePass{inst_pool, block_pool, stmt_pool, env, root, block_list}; - return block_list; + IR::AbstractSyntaxList syntax_list; + TranslatePass{inst_pool, block_pool, stmt_pool, env, root, syntax_list}; + return syntax_list; } } // namespace Shader::Maxwell diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.h b/src/shader_recompiler/frontend/maxwell/structured_control_flow.h index a6be12ba2e..88b0836491 100644 --- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.h +++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.h @@ -4,12 +4,8 @@ #pragma once -#include <functional> -#include <span> - -#include <boost/intrusive/list.hpp> - #include "shader_recompiler/environment.h" +#include "shader_recompiler/frontend/ir/abstract_syntax_list.h" #include "shader_recompiler/frontend/ir/basic_block.h" #include "shader_recompiler/frontend/ir/value.h" #include "shader_recompiler/frontend/maxwell/control_flow.h" @@ -17,8 +13,8 @@ namespace Shader::Maxwell { -[[nodiscard]] IR::BlockList VisitAST(ObjectPool<IR::Inst>& inst_pool, - ObjectPool<IR::Block>& block_pool, Environment& env, - Flow::CFG& cfg); +[[nodiscard]] IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, + ObjectPool<IR::Block>& block_pool, Environment& env, + Flow::CFG& cfg); } // namespace Shader::Maxwell diff --git a/src/shader_recompiler/frontend/maxwell/translate/translate.cpp b/src/shader_recompiler/frontend/maxwell/translate/translate.cpp index f1230f58fe..0f4e7a251a 100644 --- a/src/shader_recompiler/frontend/maxwell/translate/translate.cpp +++ b/src/shader_recompiler/frontend/maxwell/translate/translate.cpp @@ -23,13 +23,12 @@ static void Invoke(TranslatorVisitor& visitor, Location pc, u64 insn) { } } -void Translate(Environment& env, IR::Block* block) { - if (block->IsVirtual()) { +void Translate(Environment& env, IR::Block* block, u32 location_begin, u32 location_end) { + if (location_begin == location_end) { return; } TranslatorVisitor visitor{env, *block}; - const Location pc_end{block->LocationEnd()}; - for (Location pc = block->LocationBegin(); pc != pc_end; ++pc) { + for (Location pc = location_begin; pc != location_end; ++pc) { const u64 insn{env.ReadInstruction(pc.Offset())}; const Opcode opcode{Decode(insn)}; switch (opcode) { diff --git a/src/shader_recompiler/frontend/maxwell/translate/translate.h b/src/shader_recompiler/frontend/maxwell/translate/translate.h index e1aa2e0f4b..a3edd2e466 100644 --- a/src/shader_recompiler/frontend/maxwell/translate/translate.h +++ b/src/shader_recompiler/frontend/maxwell/translate/translate.h @@ -9,6 +9,6 @@ namespace Shader::Maxwell { -void Translate(Environment& env, IR::Block* block); +void Translate(Environment& env, IR::Block* block, u32 location_begin, u32 location_end); } // namespace Shader::Maxwell diff --git a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp index b1c45d13a8..66f1391dbf 100644 --- a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp +++ b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp @@ -353,24 +353,6 @@ IR::Value EvalImmediates(const IR::Inst& inst, Func&& func, std::index_sequence< return IR::Value{func(Arg<typename Traits::template ArgType<I>>(inst.Arg(I))...)}; } -void FoldBranchConditional(IR::Inst& inst) { - const IR::U1 cond{inst.Arg(0)}; - if (cond.IsImmediate()) { - // TODO: Convert to Branch - return; - } - const IR::Inst* cond_inst{cond.InstRecursive()}; - if (cond_inst->GetOpcode() == IR::Opcode::LogicalNot) { - const IR::Value true_label{inst.Arg(1)}; - const IR::Value false_label{inst.Arg(2)}; - // Remove negation on the conditional (take the parameter out of LogicalNot) and swap - // the branches - inst.SetArg(0, cond_inst->Arg(0)); - inst.SetArg(1, false_label); - inst.SetArg(2, true_label); - } -} - std::optional<IR::Value> FoldCompositeExtractImpl(IR::Value inst_value, IR::Opcode insert, IR::Opcode construct, u32 first_index) { IR::Inst* const inst{inst_value.InstRecursive()}; @@ -581,8 +563,6 @@ void ConstantPropagation(IR::Block& block, IR::Inst& inst) { return (base & ~(~(~0u << bits) << offset)) | (insert << offset); }); return; - case IR::Opcode::BranchConditional: - return FoldBranchConditional(inst); case IR::Opcode::CompositeExtractF32x2: return FoldCompositeExtract(inst, IR::Opcode::CompositeConstructF32x2, IR::Opcode::CompositeInsertF32x2); diff --git a/src/shader_recompiler/ir_opt/dual_vertex_pass.cpp b/src/shader_recompiler/ir_opt/dual_vertex_pass.cpp index f2d7db0e6b..b0a9f5258e 100644 --- a/src/shader_recompiler/ir_opt/dual_vertex_pass.cpp +++ b/src/shader_recompiler/ir_opt/dual_vertex_pass.cpp @@ -13,60 +13,16 @@ namespace Shader::Optimization { -void VertexATransformPass(IR::Program& program) { - bool replaced_join{}; - bool eliminated_epilogue{}; - for (IR::Block* const block : program.post_order_blocks) { - for (IR::Inst& inst : block->Instructions()) { - switch (inst.GetOpcode()) { - case IR::Opcode::Return: - inst.ReplaceOpcode(IR::Opcode::Join); - replaced_join = true; - break; - case IR::Opcode::Epilogue: - inst.Invalidate(); - eliminated_epilogue = true; - break; - default: - break; - } - if (replaced_join && eliminated_epilogue) { - return; - } - } - } +void VertexATransformPass(IR::Program&) { + throw NotImplementedException("VertexA pass"); } -void VertexBTransformPass(IR::Program& program) { - for (IR::Block* const block : program.blocks) { - for (IR::Inst& inst : block->Instructions()) { - if (inst.GetOpcode() == IR::Opcode::Prologue) { - return inst.Invalidate(); - } - } - } +void VertexBTransformPass(IR::Program&) { + throw NotImplementedException("VertexA pass"); } -void DualVertexJoinPass(IR::Program& program) { - const auto& blocks = program.blocks; - const s64 sub_size = static_cast<s64>(blocks.size()) - 1; - if (sub_size < 1) { - throw LogicError("Dual Vertex Join pass failed, expected atleast 2 blocks"); - } - for (s64 index = 0; index < sub_size; ++index) { - IR::Block* const current_block{blocks[index]}; - IR::Block* const next_block{blocks[index + 1]}; - for (IR::Inst& inst : current_block->Instructions()) { - if (inst.GetOpcode() == IR::Opcode::Join) { - IR::IREmitter ir{*current_block, IR::Block::InstructionList::s_iterator_to(inst)}; - ir.Branch(next_block); - inst.Invalidate(); - // Only 1 join should exist - return; - } - } - } - throw LogicError("Dual Vertex Join pass failed, no join present"); +void DualVertexJoinPass(IR::Program&) { + throw NotImplementedException("VertexA pass"); } } // namespace Shader::Optimization diff --git a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp index 6afbe24f7f..e9b55f8358 100644 --- a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp +++ b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp @@ -12,7 +12,6 @@ namespace Shader::Optimization { void IdentityRemovalPass(IR::Program& program) { std::vector<IR::Inst*> to_invalidate; - for (IR::Block* const block : program.blocks) { for (auto inst = block->begin(); inst != block->end();) { const size_t num_args{inst->NumArgs()}; diff --git a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp index a8064a5d00..26eb3a3abc 100644 --- a/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp +++ b/src/shader_recompiler/ir_opt/ssa_rewrite_pass.cpp @@ -202,7 +202,7 @@ public: incomplete_phis[block].insert_or_assign(variable, phi); stack.back().result = IR::Value{&*phi}; - } else if (const std::span imm_preds{block->ImmediatePredecessors()}; + } else if (const std::span imm_preds = block->ImmPredecessors(); imm_preds.size() == 1) { // Optimize the common case of one predecessor: no phi needed stack.back().pc = Status::SetValue; @@ -257,7 +257,7 @@ public: private: template <typename Type> IR::Value AddPhiOperands(Type variable, IR::Inst& phi, IR::Block* block) { - for (IR::Block* const imm_pred : block->ImmediatePredecessors()) { + for (IR::Block* const imm_pred : block->ImmPredecessors()) { phi.AddPhiOperand(imm_pred, ReadVariable(variable, imm_pred)); } return TryRemoveTrivialPhi(phi, block, UndefOpcode(variable)); |