diff options
author | gdkchan <gab.dark.100@gmail.com> | 2019-08-08 15:56:22 -0300 |
---|---|---|
committer | emmauss <emmausssss@gmail.com> | 2019-08-08 21:56:22 +0300 |
commit | a731ab3a2aad56e6ceb8b4e2444a61353246295c (patch) | |
tree | c7f13f51bfec6b19431e62167811ae31e9d2fea9 /ARMeilleure/Instructions/InstEmitAlu.cs | |
parent | 1ba58e9942e54175e3f3a0e1d57a48537f4888b1 (diff) |
Add a new JIT compiler for CPU code (#693)
* Start of the ARMeilleure project
* Refactoring around the old IRAdapter, now renamed to PreAllocator
* Optimize the LowestBitSet method
* Add CLZ support and fix CLS implementation
* Add missing Equals and GetHashCode overrides on some structs, misc small tweaks
* Implement the ByteSwap IR instruction, and some refactoring on the assembler
* Implement the DivideUI IR instruction and fix 64-bits IDIV
* Correct constant operand type on CSINC
* Move division instructions implementation to InstEmitDiv
* Fix destination type for the ConditionalSelect IR instruction
* Implement UMULH and SMULH, with new IR instructions
* Fix some issues with shift instructions
* Fix constant types for BFM instructions
* Fix up new tests using the new V128 struct
* Update tests
* Move DIV tests to a separate file
* Add support for calls, and some instructions that depends on them
* Start adding support for SIMD & FP types, along with some of the related ARM instructions
* Fix some typos and the divide instruction with FP operands
* Fix wrong method call on Clz_V
* Implement ARM FP & SIMD move instructions, Saddlv_V, and misc. fixes
* Implement SIMD logical instructions and more misc. fixes
* Fix PSRAD x86 instruction encoding, TRN, UABD and UABDL implementations
* Implement float conversion instruction, merge in LDj3SNuD fixes, and some other misc. fixes
* Implement SIMD shift instruction and fix Dup_V
* Add SCVTF and UCVTF (vector, fixed-point) variants to the opcode table
* Fix check with tolerance on tester
* Implement FP & SIMD comparison instructions, and some fixes
* Update FCVT (Scalar) encoding on the table to support the Half-float variants
* Support passing V128 structs, some cleanup on the register allocator, merge LDj3SNuD fixes
* Use old memory access methods, made a start on SIMD memory insts support, some fixes
* Fix float constant passed to functions, save and restore non-volatile XMM registers, other fixes
* Fix arguments count with struct return values, other fixes
* More instructions
* Misc. fixes and integrate LDj3SNuD fixes
* Update tests
* Add a faster linear scan allocator, unwinding support on windows, and other changes
* Update Ryujinx.HLE
* Update Ryujinx.Graphics
* Fix V128 return pointer passing, RCX is clobbered
* Update Ryujinx.Tests
* Update ITimeZoneService
* Stop using GetFunctionPointer as that can't be called from native code, misc. fixes and tweaks
* Use generic GetFunctionPointerForDelegate method and other tweaks
* Some refactoring on the code generator, assert on invalid operations and use a separate enum for intrinsics
* Remove some unused code on the assembler
* Fix REX.W prefix regression on float conversion instructions, add some sort of profiler
* Add hardware capability detection
* Fix regression on Sha1h and revert Fcm** changes
* Add SSE2-only paths on vector extract and insert, some refactoring on the pre-allocator
* Fix silly mistake introduced on last commit on CpuId
* Generate inline stack probes when the stack allocation is too large
* Initial support for the System-V ABI
* Support multiple destination operands
* Fix SSE2 VectorInsert8 path, and other fixes
* Change placement of XMM callee save and restore code to match other compilers
* Rename Dest to Destination and Inst to Instruction
* Fix a regression related to calls and the V128 type
* Add an extra space on comments to match code style
* Some refactoring
* Fix vector insert FP32 SSE2 path
* Port over the ARM32 instructions
* Avoid memory protection races on JIT Cache
* Another fix on VectorInsert FP32 (thanks to LDj3SNuD
* Float operands don't need to use the same register when VEX is supported
* Add a new register allocator, higher quality code for hot code (tier up), and other tweaks
* Some nits, small improvements on the pre allocator
* CpuThreadState is gone
* Allow changing CPU emulators with a config entry
* Add runtime identifiers on the ARMeilleure project
* Allow switching between CPUs through a config entry (pt. 2)
* Change win10-x64 to win-x64 on projects
* Update the Ryujinx project to use ARMeilleure
* Ensure that the selected register is valid on the hybrid allocator
* Allow exiting on returns to 0 (should fix test regression)
* Remove register assignments for most used variables on the hybrid allocator
* Do not use fixed registers as spill temp
* Add missing namespace and remove unneeded using
* Address PR feedback
* Fix types, etc
* Enable AssumeStrictAbiCompliance by default
* Ensure that Spill and Fill don't load or store any more than necessary
Diffstat (limited to 'ARMeilleure/Instructions/InstEmitAlu.cs')
-rw-r--r-- | ARMeilleure/Instructions/InstEmitAlu.cs | 369 |
1 files changed, 369 insertions, 0 deletions
diff --git a/ARMeilleure/Instructions/InstEmitAlu.cs b/ARMeilleure/Instructions/InstEmitAlu.cs new file mode 100644 index 00000000..947c9f70 --- /dev/null +++ b/ARMeilleure/Instructions/InstEmitAlu.cs @@ -0,0 +1,369 @@ +using ARMeilleure.Decoders; +using ARMeilleure.IntermediateRepresentation; +using ARMeilleure.State; +using ARMeilleure.Translation; + +using static ARMeilleure.Instructions.InstEmitAluHelper; +using static ARMeilleure.Instructions.InstEmitHelper; +using static ARMeilleure.IntermediateRepresentation.OperandHelper; + +namespace ARMeilleure.Instructions +{ + static partial class InstEmit + { + public static void Adc(ArmEmitterContext context) => EmitAdc(context, setFlags: false); + public static void Adcs(ArmEmitterContext context) => EmitAdc(context, setFlags: true); + + private static void EmitAdc(ArmEmitterContext context, bool setFlags) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + Operand d = context.Add(n, m); + + Operand carry = GetFlag(PState.CFlag); + + if (context.CurrOp.RegisterSize == RegisterSize.Int64) + { + carry = context.ZeroExtend32(OperandType.I64, carry); + } + + d = context.Add(d, carry); + + if (setFlags) + { + EmitNZFlagsCheck(context, d); + + EmitAdcsCCheck(context, n, d); + EmitAddsVCheck(context, n, m, d); + } + + SetAluDOrZR(context, d); + } + + public static void Add(ArmEmitterContext context) + { + SetAluD(context, context.Add(GetAluN(context), GetAluM(context))); + } + + public static void Adds(ArmEmitterContext context) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + context.MarkComparison(n, m); + + Operand d = context.Add(n, m); + + EmitNZFlagsCheck(context, d); + + EmitAddsCCheck(context, n, d); + EmitAddsVCheck(context, n, m, d); + + SetAluDOrZR(context, d); + } + + public static void And(ArmEmitterContext context) + { + SetAluD(context, context.BitwiseAnd(GetAluN(context), GetAluM(context))); + } + + public static void Ands(ArmEmitterContext context) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + Operand d = context.BitwiseAnd(n, m); + + EmitNZFlagsCheck(context, d); + EmitCVFlagsClear(context); + + SetAluDOrZR(context, d); + } + + public static void Asrv(ArmEmitterContext context) + { + SetAluDOrZR(context, context.ShiftRightSI(GetAluN(context), GetAluMShift(context))); + } + + public static void Bic(ArmEmitterContext context) => EmitBic(context, setFlags: false); + public static void Bics(ArmEmitterContext context) => EmitBic(context, setFlags: true); + + private static void EmitBic(ArmEmitterContext context, bool setFlags) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + Operand d = context.BitwiseAnd(n, context.BitwiseNot(m)); + + if (setFlags) + { + EmitNZFlagsCheck(context, d); + EmitCVFlagsClear(context); + } + + SetAluD(context, d, setFlags); + } + + public static void Cls(ArmEmitterContext context) + { + OpCodeAlu op = (OpCodeAlu)context.CurrOp; + + Operand n = GetIntOrZR(context, op.Rn); + + Operand nHigh = context.ShiftRightUI(n, Const(1)); + + bool is32Bits = op.RegisterSize == RegisterSize.Int32; + + Operand mask = is32Bits ? Const(int.MaxValue) : Const(long.MaxValue); + + Operand nLow = context.BitwiseAnd(n, mask); + + Operand res = context.CountLeadingZeros(context.BitwiseExclusiveOr(nHigh, nLow)); + + res = context.Subtract(res, Const(res.Type, 1)); + + SetAluDOrZR(context, res); + } + + public static void Clz(ArmEmitterContext context) + { + OpCodeAlu op = (OpCodeAlu)context.CurrOp; + + Operand n = GetIntOrZR(context, op.Rn); + + Operand d = context.CountLeadingZeros(n); + + SetAluDOrZR(context, d); + } + + public static void Eon(ArmEmitterContext context) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + Operand d = context.BitwiseExclusiveOr(n, context.BitwiseNot(m)); + + SetAluD(context, d); + } + + public static void Eor(ArmEmitterContext context) + { + SetAluD(context, context.BitwiseExclusiveOr(GetAluN(context), GetAluM(context))); + } + + public static void Extr(ArmEmitterContext context) + { + OpCodeAluRs op = (OpCodeAluRs)context.CurrOp; + + Operand res = GetIntOrZR(context, op.Rm); + + if (op.Shift != 0) + { + if (op.Rn == op.Rm) + { + res = context.RotateRight(res, Const(op.Shift)); + } + else + { + res = context.ShiftRightUI(res, Const(op.Shift)); + + Operand n = GetIntOrZR(context, op.Rn); + + int invShift = op.GetBitsCount() - op.Shift; + + res = context.BitwiseOr(res, context.ShiftLeft(n, Const(invShift))); + } + } + + SetAluDOrZR(context, res); + } + + public static void Lslv(ArmEmitterContext context) + { + SetAluDOrZR(context, context.ShiftLeft(GetAluN(context), GetAluMShift(context))); + } + + public static void Lsrv(ArmEmitterContext context) + { + SetAluDOrZR(context, context.ShiftRightUI(GetAluN(context), GetAluMShift(context))); + } + + public static void Sbc(ArmEmitterContext context) => EmitSbc(context, setFlags: false); + public static void Sbcs(ArmEmitterContext context) => EmitSbc(context, setFlags: true); + + private static void EmitSbc(ArmEmitterContext context, bool setFlags) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + Operand d = context.Subtract(n, m); + + Operand borrow = context.BitwiseExclusiveOr(GetFlag(PState.CFlag), Const(1)); + + if (context.CurrOp.RegisterSize == RegisterSize.Int64) + { + borrow = context.ZeroExtend32(OperandType.I64, borrow); + } + + d = context.Subtract(d, borrow); + + if (setFlags) + { + EmitNZFlagsCheck(context, d); + + EmitSbcsCCheck(context, n, m); + EmitSubsVCheck(context, n, m, d); + } + + SetAluDOrZR(context, d); + } + + public static void Sub(ArmEmitterContext context) + { + SetAluD(context, context.Subtract(GetAluN(context), GetAluM(context))); + } + + public static void Subs(ArmEmitterContext context) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + context.MarkComparison(n, m); + + Operand d = context.Subtract(n, m); + + EmitNZFlagsCheck(context, d); + + EmitSubsCCheck(context, n, m); + EmitSubsVCheck(context, n, m, d); + + SetAluDOrZR(context, d); + } + + public static void Orn(ArmEmitterContext context) + { + Operand n = GetAluN(context); + Operand m = GetAluM(context); + + Operand d = context.BitwiseOr(n, context.BitwiseNot(m)); + + SetAluD(context, d); + } + + public static void Orr(ArmEmitterContext context) + { + SetAluD(context, context.BitwiseOr(GetAluN(context), GetAluM(context))); + } + + public static void Rbit(ArmEmitterContext context) + { + OpCodeAlu op = (OpCodeAlu)context.CurrOp; + + Operand n = GetIntOrZR(context, op.Rn); + Operand d; + + if (op.RegisterSize == RegisterSize.Int32) + { + d = context.Call(new _U32_U32(SoftFallback.ReverseBits32), n); + } + else + { + d = context.Call(new _U64_U64(SoftFallback.ReverseBits64), n); + } + + SetAluDOrZR(context, d); + } + + public static void Rev16(ArmEmitterContext context) + { + OpCodeAlu op = (OpCodeAlu)context.CurrOp; + + Operand n = GetIntOrZR(context, op.Rn); + Operand d; + + if (op.RegisterSize == RegisterSize.Int32) + { + d = context.Call(new _U32_U32(SoftFallback.ReverseBytes16_32), n); + } + else + { + d = context.Call(new _U64_U64(SoftFallback.ReverseBytes16_64), n); + } + + SetAluDOrZR(context, d); + } + + public static void Rev32(ArmEmitterContext context) + { + OpCodeAlu op = (OpCodeAlu)context.CurrOp; + + Operand n = GetIntOrZR(context, op.Rn); + + if (op.RegisterSize == RegisterSize.Int32) + { + SetAluDOrZR(context, context.ByteSwap(n)); + } + else + { + Operand d = context.Call(new _U64_U64(SoftFallback.ReverseBytes32_64), n); + + SetAluDOrZR(context, d); + } + } + + public static void Rev64(ArmEmitterContext context) + { + OpCodeAlu op = (OpCodeAlu)context.CurrOp; + + SetAluDOrZR(context, context.ByteSwap(GetIntOrZR(context, op.Rn))); + } + + public static void Rorv(ArmEmitterContext context) + { + SetAluDOrZR(context, context.RotateRight(GetAluN(context), GetAluMShift(context))); + } + + private static Operand GetAluMShift(ArmEmitterContext context) + { + IOpCodeAluRs op = (IOpCodeAluRs)context.CurrOp; + + Operand m = GetIntOrZR(context, op.Rm); + + if (op.RegisterSize == RegisterSize.Int64) + { + m = context.ConvertI64ToI32(m); + } + + return context.BitwiseAnd(m, Const(context.CurrOp.GetBitsCount() - 1)); + } + + private static void EmitCVFlagsClear(ArmEmitterContext context) + { + SetFlag(context, PState.CFlag, Const(0)); + SetFlag(context, PState.VFlag, Const(0)); + } + + public static void SetAluD(ArmEmitterContext context, Operand d) + { + SetAluD(context, d, x31IsZR: false); + } + + public static void SetAluDOrZR(ArmEmitterContext context, Operand d) + { + SetAluD(context, d, x31IsZR: true); + } + + public static void SetAluD(ArmEmitterContext context, Operand d, bool x31IsZR) + { + IOpCodeAlu op = (IOpCodeAlu)context.CurrOp; + + if ((x31IsZR || op is IOpCodeAluRs) && op.Rd == RegisterConsts.ZeroIndex) + { + return; + } + + SetIntOrSP(context, op.Rd, d); + } + } +} |