aboutsummaryrefslogtreecommitdiff
path: root/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
diff options
context:
space:
mode:
authorLDj3SNuD <35856442+LDj3SNuD@users.noreply.github.com>2020-06-16 20:28:02 +0200
committerGitHub <noreply@github.com>2020-06-16 20:28:02 +0200
commit5e724cf24e3d696b95be859c055a617e5d37bf80 (patch)
treebda92e3f1618cfad804f31ea3b00d7569501bc96 /ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
parentfa286d353595aa91ad0bda6671d8d87e0700846f (diff)
Add Profiled Persistent Translation Cache. (#769)
* Delete DelegateTypes.cs * Delete DelegateCache.cs * Add files via upload * Update Horizon.cs * Update Program.cs * Update MainWindow.cs * Update Aot.cs * Update RelocEntry.cs * Update Translator.cs * Update MemoryManager.cs * Update InstEmitMemoryHelper.cs * Update Delegates.cs * Nit. * Nit. * Nit. * 10 fewer MSIL bytes for us * Add comment. Nits. * Update Translator.cs * Update Aot.cs * Nits. * Opt.. * Opt.. * Opt.. * Opt.. * Allow to change compression level. * Update MemoryManager.cs * Update Translator.cs * Manage corner cases during the save phase. Nits. * Update Aot.cs * Translator response tweak for Aot disabled. Nit. * Nit. * Nits. * Create DelegateHelpers.cs * Update Delegates.cs * Nit. * Nit. * Nits. * Fix due to #784. * Fixes due to #757 & #841. * Fix due to #846. * Fix due to #847. * Use MethodInfo for managed method calls. Use IR methods instead of managed methods about Max/Min (S/U). Follow-ups & Nits. * Add missing exception messages. Reintroduce slow path for Fmov_Vi. Implement slow path for Fmov_Si. * Switch to the new folder structure. Nits. * Impl. index-based relocation information. Impl. cache file version field. * Nit. * Address gdkchan comments. Mainly: - fixed cache file corruption issue on exit; - exposed a way to disable AOT on the GUI. * Address AcK77 comment. * Address Thealexbarney, jduncanator & emmauss comments. Header magic, CpuId (FI) & Aot -> Ptc. * Adaptation to the new application reloading system. Improvements to the call system of managed methods. Follow-ups. Nits. * Get the same boot times as on master when PTC is disabled. * Profiled Aot. * A32 support (#897). * #975 support (1 of 2). * #975 support (2 of 2). * Rebase fix & nits. * Some fixes and nits (still one bug left). * One fix & nits. * Tests fix (by gdk) & nits. * Support translations not only in high quality and rejit. Nits. * Added possibility to skip translations and continue execution, using `ESC` key. * Update SettingsWindow.cs * Update GLRenderer.cs * Update Ptc.cs * Disabled Profiled PTC by default as requested in the past by gdk. * Fix rejit bug. Increased number of parallel translations. Add stack unwinding stuffs support (1 of 2). Nits. * Add stack unwinding stuffs support (2 of 2). Tuned number of parallel translations. * Restored the ability to assemble jumps with 8-bit offset when Profiled PTC is disabled or during profiling. Modifications due to rebase. Nits. * Limited profiling of the functions to be translated to the addresses belonging to the range of static objects only. * Nits. * Nits. * Update Delegates.cs * Nit. * Update InstEmitSimdArithmetic.cs * Address riperiperi comments. * Fixed the issue of unjustifiably longer boot times at the second boot than at the first boot, measured at the same time or reference point and with the same number of translated functions. * Implemented a simple redundant load/save mechanism. Halved the value of Decoder.MaxInstsPerFunction more appropriate for the current performance of the Translator. Replaced by Logger.PrintError to Logger.PrintDebug in TexturePool.cs about the supposed invalid texture format to avoid the spawn of the log. Nits. * Nit. Improved Logger.PrintError in TexturePool.cs to avoid log spawn. Added missing code for FZ handling (in output) for fp max/min instructions (slow paths). * Add configuration migration for PTC Co-authored-by: Thog <me@thog.eu>
Diffstat (limited to 'ARMeilleure/Instructions/InstEmitSimdArithmetic.cs')
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdArithmetic.cs266
1 files changed, 124 insertions, 142 deletions
diff --git a/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs b/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
index 8c2d604c..b3041aac 100644
--- a/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
@@ -6,6 +6,7 @@ using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.State;
using ARMeilleure.Translation;
using System;
+using System.Diagnostics;
using static ARMeilleure.Instructions.InstEmitHelper;
using static ARMeilleure.Instructions.InstEmitSimdHelper;
@@ -106,7 +107,7 @@ namespace ARMeilleure.Instructions
{
Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
- Operand de = context.Call(new _U64_U64_S32(SoftFallback.CountLeadingSigns), ne, Const(eSize));
+ Operand de = context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.CountLeadingSigns)), ne, Const(eSize));
res = EmitVectorInsert(context, res, de, index, op.Size);
}
@@ -128,16 +129,7 @@ namespace ARMeilleure.Instructions
{
Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
- Operand de;
-
- if (eSize == 64)
- {
- de = context.CountLeadingZeros(ne);
- }
- else
- {
- de = context.Call(new _U64_U64_S32(SoftFallback.CountLeadingZeros), ne, Const(eSize));
- }
+ Operand de = context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.CountLeadingZeros)), ne, Const(eSize));
res = EmitVectorInsert(context, res, de, index, op.Size);
}
@@ -165,7 +157,7 @@ namespace ARMeilleure.Instructions
}
else
{
- de = context.Call(new _U64_U64(SoftFallback.CountSetBits8), ne);
+ de = context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.CountSetBits8)), ne);
}
res = EmitVectorInsert(context, res, de, index, 0);
@@ -203,9 +195,9 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- Operand res = EmitSoftFloatCall(context, SoftFloat32.FPSub, SoftFloat64.FPSub, op1, op2);
+ Operand res = EmitSoftFloatCall(context, nameof(SoftFloat32.FPSub), op1, op2);
- return EmitUnaryMathCall(context, MathF.Abs, Math.Abs, res);
+ return EmitUnaryMathCall(context, nameof(Math.Abs), res);
});
}
}
@@ -244,9 +236,9 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- Operand res = EmitSoftFloatCall(context, SoftFloat32.FPSub, SoftFloat64.FPSub, op1, op2);
+ Operand res = EmitSoftFloatCall(context, nameof(SoftFloat32.FPSub), op1, op2);
- return EmitUnaryMathCall(context, MathF.Abs, Math.Abs, res);
+ return EmitUnaryMathCall(context, nameof(Math.Abs), res);
});
}
}
@@ -274,7 +266,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Abs), op1);
});
}
}
@@ -309,7 +301,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Abs), op1);
});
}
}
@@ -328,7 +320,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPAdd, SoftFloat64.FPAdd, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPAdd), op1, op2);
});
}
}
@@ -347,7 +339,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPAdd, SoftFloat64.FPAdd, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPAdd), op1, op2);
});
}
}
@@ -380,7 +372,7 @@ namespace ARMeilleure.Instructions
Operand ne0 = context.VectorExtract(type, GetVec(op.Rn), 0);
Operand ne1 = context.VectorExtract(type, GetVec(op.Rn), 1);
- Operand res = EmitSoftFloatCall(context, SoftFloat32.FPAdd, SoftFloat64.FPAdd, ne0, ne1);
+ Operand res = EmitSoftFloatCall(context, nameof(SoftFloat32.FPAdd), ne0, ne1);
context.Copy(GetVec(op.Rd), context.VectorInsert(context.VectorZero(), res, 0));
}
@@ -396,7 +388,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorPairwiseOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPAdd, SoftFloat64.FPAdd, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPAdd), op1, op2);
});
}
}
@@ -415,7 +407,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPDiv, SoftFloat64.FPDiv, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPDiv), op1, op2);
});
}
}
@@ -434,7 +426,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPDiv, SoftFloat64.FPDiv, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPDiv), op1, op2);
});
}
}
@@ -469,7 +461,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulAdd, SoftFloat64.FPMulAdd, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulAdd), op1, op2, op3);
});
}
}
@@ -484,7 +476,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMax, SoftFloat64.FPMax, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMax), op1, op2);
});
}
}
@@ -499,7 +491,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMax, SoftFloat64.FPMax, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMax), op1, op2);
});
}
}
@@ -514,7 +506,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMaxNum), op1, op2);
});
}
}
@@ -529,7 +521,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMaxNum), op1, op2);
});
}
}
@@ -538,7 +530,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorAcrossVectorOpF(context, (op1, op2) =>
{
- return context.Call(new _F32_F32_F32(SoftFloat32.FPMaxNum), op1, op2);
+ return context.Call(typeof(SoftFloat32).GetMethod(nameof(SoftFloat32.FPMaxNum)), op1, op2);
});
}
@@ -552,7 +544,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorPairwiseOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMax, SoftFloat64.FPMax, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMax), op1, op2);
});
}
}
@@ -567,7 +559,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMin, SoftFloat64.FPMin, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMin), op1, op2);
});
}
}
@@ -582,7 +574,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMin, SoftFloat64.FPMin, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMin), op1, op2);
});
}
}
@@ -597,7 +589,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMinNum), op1, op2);
});
}
}
@@ -612,7 +604,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMinNum), op1, op2);
});
}
}
@@ -621,7 +613,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorAcrossVectorOpF(context, (op1, op2) =>
{
- return context.Call(new _F32_F32_F32(SoftFloat32.FPMinNum), op1, op2);
+ return context.Call(typeof(SoftFloat32).GetMethod(nameof(SoftFloat32.FPMinNum)), op1, op2);
});
}
@@ -635,7 +627,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorPairwiseOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMin, SoftFloat64.FPMin, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMin), op1, op2);
});
}
}
@@ -686,7 +678,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorTernaryOpF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulAdd, SoftFloat64.FPMulAdd, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulAdd), op1, op2, op3);
});
}
}
@@ -735,7 +727,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorTernaryOpByElemF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulAdd, SoftFloat64.FPMulAdd, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulAdd), op1, op2, op3);
});
}
}
@@ -786,7 +778,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorTernaryOpF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulSub, SoftFloat64.FPMulSub, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulSub), op1, op2, op3);
});
}
}
@@ -835,7 +827,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorTernaryOpByElemF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulSub, SoftFloat64.FPMulSub, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulSub), op1, op2, op3);
});
}
}
@@ -870,7 +862,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulSub, SoftFloat64.FPMulSub, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulSub), op1, op2, op3);
});
}
}
@@ -889,7 +881,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMul, SoftFloat64.FPMul, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMul), op1, op2);
});
}
}
@@ -913,7 +905,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMul, SoftFloat64.FPMul, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMul), op1, op2);
});
}
}
@@ -963,7 +955,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpByElemF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMul, SoftFloat64.FPMul, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMul), op1, op2);
});
}
}
@@ -972,7 +964,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulX, SoftFloat64.FPMulX, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulX), op1, op2);
});
}
@@ -980,7 +972,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpByElemF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulX, SoftFloat64.FPMulX, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulX), op1, op2);
});
}
@@ -988,7 +980,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulX, SoftFloat64.FPMulX, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulX), op1, op2);
});
}
@@ -996,7 +988,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpByElemF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMulX, SoftFloat64.FPMulX, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulX), op1, op2);
});
}
@@ -1103,7 +1095,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPNegMulAdd, SoftFloat64.FPNegMulAdd, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPNegMulAdd), op1, op2, op3);
});
}
}
@@ -1146,7 +1138,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPNegMulSub, SoftFloat64.FPNegMulSub, op1, op2, op3);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPNegMulSub), op1, op2, op3);
});
}
}
@@ -1170,7 +1162,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRecipEstimate, SoftFloat64.FPRecipEstimate, op1);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRecipEstimate), op1);
});
}
}
@@ -1189,7 +1181,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorUnaryOpF(context, (op1) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRecipEstimate, SoftFloat64.FPRecipEstimate, op1);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRecipEstimate), op1);
});
}
}
@@ -1227,7 +1219,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRecipStepFused, SoftFloat64.FPRecipStepFused, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRecipStepFused), op1, op2);
});
}
}
@@ -1270,7 +1262,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRecipStepFused, SoftFloat64.FPRecipStepFused, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRecipStepFused), op1, op2);
});
}
}
@@ -1279,7 +1271,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRecpX, SoftFloat64.FPRecpX, op1);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRecpX), op1);
});
}
@@ -1307,11 +1299,11 @@ namespace ARMeilleure.Instructions
{
if (op.Size == 0)
{
- return context.Call(new _F32_F32(SoftFallback.RoundF), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.RoundF)), op1);
}
else /* if (op.Size == 1) */
{
- return context.Call(new _F64_F64(SoftFallback.Round), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.Round)), op1);
}
});
}
@@ -1326,11 +1318,11 @@ namespace ARMeilleure.Instructions
{
if (sizeF == 0)
{
- return context.Call(new _F32_F32(SoftFallback.RoundF), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.RoundF)), op1);
}
else /* if (sizeF == 1) */
{
- return context.Call(new _F64_F64(SoftFallback.Round), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.Round)), op1);
}
});
}
@@ -1345,7 +1337,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Floor, Math.Floor, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Floor), op1);
});
}
}
@@ -1360,7 +1352,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Floor, Math.Floor, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Floor), op1);
});
}
}
@@ -1405,7 +1397,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Ceiling), op1);
});
}
}
@@ -1420,7 +1412,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Ceiling, Math.Ceiling, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Ceiling), op1);
});
}
}
@@ -1433,11 +1425,11 @@ namespace ARMeilleure.Instructions
{
if (op.Size == 0)
{
- return context.Call(new _F32_F32(SoftFallback.RoundF), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.RoundF)), op1);
}
else /* if (op.Size == 1) */
{
- return context.Call(new _F64_F64(SoftFallback.Round), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.Round)), op1);
}
});
}
@@ -1452,11 +1444,11 @@ namespace ARMeilleure.Instructions
{
if (sizeF == 0)
{
- return context.Call(new _F32_F32(SoftFallback.RoundF), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.RoundF)), op1);
}
else /* if (sizeF == 1) */
{
- return context.Call(new _F64_F64(SoftFallback.Round), op1);
+ return context.Call(typeof(SoftFallback).GetMethod(nameof(SoftFallback.Round)), op1);
}
});
}
@@ -1471,7 +1463,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Truncate, Math.Truncate, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Truncate), op1);
});
}
}
@@ -1486,7 +1478,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorUnaryOpF(context, (op1) =>
{
- return EmitUnaryMathCall(context, MathF.Truncate, Math.Truncate, op1);
+ return EmitUnaryMathCall(context, nameof(Math.Truncate), op1);
});
}
}
@@ -1505,7 +1497,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRSqrtEstimate, SoftFloat64.FPRSqrtEstimate, op1);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRSqrtEstimate), op1);
});
}
}
@@ -1524,7 +1516,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorUnaryOpF(context, (op1) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRSqrtEstimate, SoftFloat64.FPRSqrtEstimate, op1);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRSqrtEstimate), op1);
});
}
}
@@ -1566,7 +1558,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRSqrtStepFused, SoftFloat64.FPRSqrtStepFused, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRSqrtStepFused), op1, op2);
});
}
}
@@ -1613,7 +1605,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPRSqrtStepFused, SoftFloat64.FPRSqrtStepFused, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRSqrtStepFused), op1, op2);
});
}
}
@@ -1628,7 +1620,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarUnaryOpF(context, (op1) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPSqrt, SoftFloat64.FPSqrt, op1);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPSqrt), op1);
});
}
}
@@ -1643,7 +1635,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorUnaryOpF(context, (op1) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPSqrt, SoftFloat64.FPSqrt, op1);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPSqrt), op1);
});
}
}
@@ -1662,7 +1654,7 @@ namespace ARMeilleure.Instructions
{
EmitScalarBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPSub, SoftFloat64.FPSub, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPSub), op1, op2);
});
}
}
@@ -1681,7 +1673,7 @@ namespace ARMeilleure.Instructions
{
EmitVectorBinaryOpF(context, (op1, op2) =>
{
- return EmitSoftFloatCall(context, SoftFloat32.FPSub, SoftFloat64.FPSub, op1, op2);
+ return EmitSoftFloatCall(context, nameof(SoftFloat32.FPSub), op1, op2);
});
}
}
@@ -1690,7 +1682,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse41)
{
- EmitSse41Mul_AddSub(context, AddSub.Add);
+ EmitSse41VectorMul_AddSub(context, AddSub.Add);
}
else
{
@@ -1713,7 +1705,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse41)
{
- EmitSse41Mul_AddSub(context, AddSub.Subtract);
+ EmitSse41VectorMul_AddSub(context, AddSub.Subtract);
}
else
{
@@ -1736,7 +1728,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse41)
{
- EmitSse41Mul_AddSub(context, AddSub.None);
+ EmitSse41VectorMul_AddSub(context, AddSub.None);
}
else
{
@@ -1805,14 +1797,14 @@ namespace ARMeilleure.Instructions
public static void Sabd_V(ArmEmitterContext context)
{
- if (Optimizations.UseSse2)
+ if (Optimizations.UseSse41)
{
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
Operand n = GetVec(op.Rn);
Operand m = GetVec(op.Rm);
- EmitSse41Sabd(context, op, n, m, isLong: false);
+ EmitSse41VectorSabdOp(context, op, n, m, isLong: false);
}
else
{
@@ -1845,7 +1837,7 @@ namespace ARMeilleure.Instructions
n = context.AddIntrinsic(movInst, n);
m = context.AddIntrinsic(movInst, m);
- EmitSse41Sabd(context, op, n, m, isLong: true);
+ EmitSse41VectorSabdOp(context, op, n, m, isLong: true);
}
else
{
@@ -2027,9 +2019,7 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _S64_S64_S64(Math.Max);
-
- EmitVectorBinaryOpSx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorBinaryOpSx(context, (op1, op2) => EmitMax64Op(context, op1, op2, signed: true));
}
}
@@ -2041,17 +2031,13 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _S64_S64_S64(Math.Max);
-
- EmitVectorPairwiseOpSx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorPairwiseOpSx(context, (op1, op2) => EmitMax64Op(context, op1, op2, signed: true));
}
}
public static void Smaxv_V(ArmEmitterContext context)
{
- Delegate dlg = new _S64_S64_S64(Math.Max);
-
- EmitVectorAcrossVectorOpSx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorAcrossVectorOpSx(context, (op1, op2) => EmitMax64Op(context, op1, op2, signed: true));
}
public static void Smin_V(ArmEmitterContext context)
@@ -2076,9 +2062,7 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _S64_S64_S64(Math.Min);
-
- EmitVectorBinaryOpSx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorBinaryOpSx(context, (op1, op2) => EmitMin64Op(context, op1, op2, signed: true));
}
}
@@ -2090,17 +2074,13 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _S64_S64_S64(Math.Min);
-
- EmitVectorPairwiseOpSx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorPairwiseOpSx(context, (op1, op2) => EmitMin64Op(context, op1, op2, signed: true));
}
}
public static void Sminv_V(ArmEmitterContext context)
{
- Delegate dlg = new _S64_S64_S64(Math.Min);
-
- EmitVectorAcrossVectorOpSx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorAcrossVectorOpSx(context, (op1, op2) => EmitMin64Op(context, op1, op2, signed: true));
}
public static void Smlal_V(ArmEmitterContext context)
@@ -2458,7 +2438,7 @@ namespace ARMeilleure.Instructions
Operand n = GetVec(op.Rn);
Operand m = GetVec(op.Rm);
- EmitSse41Uabd(context, op, n, m, isLong: false);
+ EmitSse41VectorUabdOp(context, op, n, m, isLong: false);
}
else
{
@@ -2491,7 +2471,7 @@ namespace ARMeilleure.Instructions
n = context.AddIntrinsic(movInst, n);
m = context.AddIntrinsic(movInst, m);
- EmitSse41Uabd(context, op, n, m, isLong: true);
+ EmitSse41VectorUabdOp(context, op, n, m, isLong: true);
}
else
{
@@ -2666,9 +2646,7 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _U64_U64_U64(Math.Max);
-
- EmitVectorBinaryOpZx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorBinaryOpZx(context, (op1, op2) => EmitMax64Op(context, op1, op2, signed: false));
}
}
@@ -2680,17 +2658,13 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _U64_U64_U64(Math.Max);
-
- EmitVectorPairwiseOpZx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorPairwiseOpZx(context, (op1, op2) => EmitMax64Op(context, op1, op2, signed: false));
}
}
public static void Umaxv_V(ArmEmitterContext context)
{
- Delegate dlg = new _U64_U64_U64(Math.Max);
-
- EmitVectorAcrossVectorOpZx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorAcrossVectorOpZx(context, (op1, op2) => EmitMax64Op(context, op1, op2, signed: false));
}
public static void Umin_V(ArmEmitterContext context)
@@ -2715,9 +2689,7 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _U64_U64_U64(Math.Min);
-
- EmitVectorBinaryOpZx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorBinaryOpZx(context, (op1, op2) => EmitMin64Op(context, op1, op2, signed: false));
}
}
@@ -2729,17 +2701,13 @@ namespace ARMeilleure.Instructions
}
else
{
- Delegate dlg = new _U64_U64_U64(Math.Min);
-
- EmitVectorPairwiseOpZx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorPairwiseOpZx(context, (op1, op2) => EmitMin64Op(context, op1, op2, signed: false));
}
}
public static void Uminv_V(ArmEmitterContext context)
{
- Delegate dlg = new _U64_U64_U64(Math.Min);
-
- EmitVectorAcrossVectorOpZx(context, (op1, op2) => context.Call(dlg, op1, op2));
+ EmitVectorAcrossVectorOpZx(context, (op1, op2) => EmitMin64Op(context, op1, op2, signed: false));
}
public static void Umlal_V(ArmEmitterContext context)
@@ -3081,7 +3049,29 @@ namespace ARMeilleure.Instructions
context.Copy(d, res);
}
- public static void EmitScalarRoundOpF(ArmEmitterContext context, FPRoundingMode roundMode)
+ private static Operand EmitMax64Op(ArmEmitterContext context, Operand op1, Operand op2, bool signed)
+ {
+ Debug.Assert(op1.Type == OperandType.I64 && op2.Type == OperandType.I64);
+
+ Operand cmp = signed
+ ? context.ICompareGreaterOrEqual (op1, op2)
+ : context.ICompareGreaterOrEqualUI(op1, op2);
+
+ return context.ConditionalSelect(cmp, op1, op2);
+ }
+
+ private static Operand EmitMin64Op(ArmEmitterContext context, Operand op1, Operand op2, bool signed)
+ {
+ Debug.Assert(op1.Type == OperandType.I64 && op2.Type == OperandType.I64);
+
+ Operand cmp = signed
+ ? context.ICompareLessOrEqual (op1, op2)
+ : context.ICompareLessOrEqualUI(op1, op2);
+
+ return context.ConditionalSelect(cmp, op1, op2);
+ }
+
+ private static void EmitScalarRoundOpF(ArmEmitterContext context, FPRoundingMode roundMode)
{
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
@@ -3103,7 +3093,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(op.Rd), res);
}
- public static void EmitVectorRoundOpF(ArmEmitterContext context, FPRoundingMode roundMode)
+ private static void EmitVectorRoundOpF(ArmEmitterContext context, FPRoundingMode roundMode)
{
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
@@ -3220,14 +3210,14 @@ namespace ARMeilleure.Instructions
Subtract
}
- private static void EmitSse41Mul_AddSub(ArmEmitterContext context, AddSub addSub)
+ private static void EmitSse41VectorMul_AddSub(ArmEmitterContext context, AddSub addSub)
{
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
Operand n = GetVec(op.Rn);
Operand m = GetVec(op.Rm);
- Operand res = null;
+ Operand res;
if (op.Size == 0)
{
@@ -3257,23 +3247,15 @@ namespace ARMeilleure.Instructions
if (addSub == AddSub.Add)
{
- switch (op.Size)
- {
- case 0: res = context.AddIntrinsic(Intrinsic.X86Paddb, d, res); break;
- case 1: res = context.AddIntrinsic(Intrinsic.X86Paddw, d, res); break;
- case 2: res = context.AddIntrinsic(Intrinsic.X86Paddd, d, res); break;
- case 3: res = context.AddIntrinsic(Intrinsic.X86Paddq, d, res); break;
- }
+ Intrinsic addInst = X86PaddInstruction[op.Size];
+
+ res = context.AddIntrinsic(addInst, d, res);
}
else if (addSub == AddSub.Subtract)
{
- switch (op.Size)
- {
- case 0: res = context.AddIntrinsic(Intrinsic.X86Psubb, d, res); break;
- case 1: res = context.AddIntrinsic(Intrinsic.X86Psubw, d, res); break;
- case 2: res = context.AddIntrinsic(Intrinsic.X86Psubd, d, res); break;
- case 3: res = context.AddIntrinsic(Intrinsic.X86Psubq, d, res); break;
- }
+ Intrinsic subInst = X86PsubInstruction[op.Size];
+
+ res = context.AddIntrinsic(subInst, d, res);
}
if (op.RegisterSize == RegisterSize.Simd64)
@@ -3284,7 +3266,7 @@ namespace ARMeilleure.Instructions
context.Copy(d, res);
}
- private static void EmitSse41Sabd(
+ private static void EmitSse41VectorSabdOp(
ArmEmitterContext context,
OpCodeSimdReg op,
Operand n,
@@ -3317,7 +3299,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(op.Rd), res);
}
- private static void EmitSse41Uabd(
+ private static void EmitSse41VectorUabdOp(
ArmEmitterContext context,
OpCodeSimdReg op,
Operand n,