aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ARMeilleure/CodeGen/X86/Assembler.cs6
-rw-r--r--ARMeilleure/CodeGen/X86/CodeGenerator.cs12
-rw-r--r--ARMeilleure/CodeGen/X86/IntrinsicTable.cs4
-rw-r--r--ARMeilleure/CodeGen/X86/PreAllocator.cs7
-rw-r--r--ARMeilleure/CodeGen/X86/X86Instruction.cs6
-rw-r--r--ARMeilleure/Common/BitUtils.cs62
-rw-r--r--ARMeilleure/Decoders/DecoderHelper.cs97
-rw-r--r--ARMeilleure/Decoders/OpCodeSimdFmov.cs17
-rw-r--r--ARMeilleure/Decoders/OpCodeSimdImm.cs17
-rw-r--r--ARMeilleure/Instructions/InstEmitAluHelper.cs2
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdArithmetic.cs261
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdCmp.cs22
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdHelper.cs5
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdMove.cs57
-rw-r--r--ARMeilleure/Instructions/SoftFloat.cs32
-rw-r--r--ARMeilleure/IntermediateRepresentation/Intrinsic.cs4
16 files changed, 449 insertions, 162 deletions
diff --git a/ARMeilleure/CodeGen/X86/Assembler.cs b/ARMeilleure/CodeGen/X86/Assembler.cs
index ee80d892..24a122c3 100644
--- a/ARMeilleure/CodeGen/X86/Assembler.cs
+++ b/ARMeilleure/CodeGen/X86/Assembler.cs
@@ -75,6 +75,10 @@ namespace ARMeilleure.CodeGen.X86
Add(X86Instruction.And, new InstructionInfo(0x00000021, 0x04000083, 0x04000081, BadOp, 0x00000023, InstructionFlags.None));
Add(X86Instruction.Andnpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f55, InstructionFlags.Vex | InstructionFlags.Prefix66));
Add(X86Instruction.Andnps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f55, InstructionFlags.Vex));
+ Add(X86Instruction.Andpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f54, InstructionFlags.Vex | InstructionFlags.Prefix66));
+ Add(X86Instruction.Andps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f54, InstructionFlags.Vex));
+ Add(X86Instruction.Blendvpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3815, InstructionFlags.Prefix66));
+ Add(X86Instruction.Blendvps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3814, InstructionFlags.Prefix66));
Add(X86Instruction.Bsr, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000fbd, InstructionFlags.None));
Add(X86Instruction.Bswap, new InstructionInfo(0x00000fc8, BadOp, BadOp, BadOp, BadOp, InstructionFlags.RegOnly));
Add(X86Instruction.Call, new InstructionInfo(0x020000ff, BadOp, BadOp, BadOp, BadOp, InstructionFlags.None));
@@ -245,6 +249,8 @@ namespace ARMeilleure.CodeGen.X86
Add(X86Instruction.Unpckhps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f15, InstructionFlags.Vex));
Add(X86Instruction.Unpcklpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f14, InstructionFlags.Vex | InstructionFlags.Prefix66));
Add(X86Instruction.Unpcklps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f14, InstructionFlags.Vex));
+ Add(X86Instruction.Vblendvpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a4b, InstructionFlags.Vex | InstructionFlags.Prefix66));
+ Add(X86Instruction.Vblendvps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a4a, InstructionFlags.Vex | InstructionFlags.Prefix66));
Add(X86Instruction.Vpblendvb, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a4c, InstructionFlags.Vex | InstructionFlags.Prefix66));
Add(X86Instruction.Xor, new InstructionInfo(0x00000031, 0x06000083, 0x06000081, BadOp, 0x00000033, InstructionFlags.None));
Add(X86Instruction.Xorpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f57, InstructionFlags.Vex | InstructionFlags.Prefix66));
diff --git a/ARMeilleure/CodeGen/X86/CodeGenerator.cs b/ARMeilleure/CodeGen/X86/CodeGenerator.cs
index 33fc2aee..d1224363 100644
--- a/ARMeilleure/CodeGen/X86/CodeGenerator.cs
+++ b/ARMeilleure/CodeGen/X86/CodeGenerator.cs
@@ -336,7 +336,15 @@ namespace ARMeilleure.CodeGen.X86
Debug.Assert(!dest.Type.IsInteger());
- if (info.Inst == X86Instruction.Pblendvb && HardwareCapabilities.SupportsVexEncoding)
+ if (info.Inst == X86Instruction.Blendvpd && HardwareCapabilities.SupportsVexEncoding)
+ {
+ context.Assembler.WriteInstruction(X86Instruction.Vblendvpd, dest, src1, src2, src3);
+ }
+ else if (info.Inst == X86Instruction.Blendvps && HardwareCapabilities.SupportsVexEncoding)
+ {
+ context.Assembler.WriteInstruction(X86Instruction.Vblendvps, dest, src1, src2, src3);
+ }
+ else if (info.Inst == X86Instruction.Pblendvb && HardwareCapabilities.SupportsVexEncoding)
{
context.Assembler.WriteInstruction(X86Instruction.Vpblendvb, dest, src1, src2, src3);
}
@@ -1646,7 +1654,7 @@ namespace ARMeilleure.CodeGen.X86
for (int offset = PageSize; offset < size; offset += PageSize)
{
- Operand memOp = new MemoryOperand(OperandType.I32, rsp, null, Multiplier.x1, -offset);;
+ Operand memOp = new MemoryOperand(OperandType.I32, rsp, null, Multiplier.x1, -offset);
context.Assembler.Mov(temp, memOp, OperandType.I32);
}
diff --git a/ARMeilleure/CodeGen/X86/IntrinsicTable.cs b/ARMeilleure/CodeGen/X86/IntrinsicTable.cs
index 73fb5fd1..e87de035 100644
--- a/ARMeilleure/CodeGen/X86/IntrinsicTable.cs
+++ b/ARMeilleure/CodeGen/X86/IntrinsicTable.cs
@@ -19,6 +19,10 @@ namespace ARMeilleure.CodeGen.X86
Add(Intrinsic.X86Addss, new IntrinsicInfo(X86Instruction.Addss, IntrinsicType.Binary));
Add(Intrinsic.X86Andnpd, new IntrinsicInfo(X86Instruction.Andnpd, IntrinsicType.Binary));
Add(Intrinsic.X86Andnps, new IntrinsicInfo(X86Instruction.Andnps, IntrinsicType.Binary));
+ Add(Intrinsic.X86Andpd, new IntrinsicInfo(X86Instruction.Andpd, IntrinsicType.Binary));
+ Add(Intrinsic.X86Andps, new IntrinsicInfo(X86Instruction.Andps, IntrinsicType.Binary));
+ Add(Intrinsic.X86Blendvpd, new IntrinsicInfo(X86Instruction.Blendvpd, IntrinsicType.Ternary));
+ Add(Intrinsic.X86Blendvps, new IntrinsicInfo(X86Instruction.Blendvps, IntrinsicType.Ternary));
Add(Intrinsic.X86Cmppd, new IntrinsicInfo(X86Instruction.Cmppd, IntrinsicType.TernaryImm));
Add(Intrinsic.X86Cmpps, new IntrinsicInfo(X86Instruction.Cmpps, IntrinsicType.TernaryImm));
Add(Intrinsic.X86Cmpsd, new IntrinsicInfo(X86Instruction.Cmpsd, IntrinsicType.TernaryImm));
diff --git a/ARMeilleure/CodeGen/X86/PreAllocator.cs b/ARMeilleure/CodeGen/X86/PreAllocator.cs
index a1490131..034a87ac 100644
--- a/ARMeilleure/CodeGen/X86/PreAllocator.cs
+++ b/ARMeilleure/CodeGen/X86/PreAllocator.cs
@@ -298,8 +298,11 @@ namespace ARMeilleure.CodeGen.X86
{
IntrinsicOperation intrinOp = (IntrinsicOperation)operation;
- // PBLENDVB last operand is always implied to be XMM0 when VEX is not supported.
- if (intrinOp.Intrinsic == Intrinsic.X86Pblendvb && !HardwareCapabilities.SupportsVexEncoding)
+ // BLENDVPD, BLENDVPS, PBLENDVB last operand is always implied to be XMM0 when VEX is not supported.
+ if ((intrinOp.Intrinsic == Intrinsic.X86Blendvpd ||
+ intrinOp.Intrinsic == Intrinsic.X86Blendvps ||
+ intrinOp.Intrinsic == Intrinsic.X86Pblendvb) &&
+ !HardwareCapabilities.SupportsVexEncoding)
{
Operand xmm0 = Xmm(X86Register.Xmm0, OperandType.V128);
diff --git a/ARMeilleure/CodeGen/X86/X86Instruction.cs b/ARMeilleure/CodeGen/X86/X86Instruction.cs
index 10ba891a..a29e68fb 100644
--- a/ARMeilleure/CodeGen/X86/X86Instruction.cs
+++ b/ARMeilleure/CodeGen/X86/X86Instruction.cs
@@ -10,6 +10,10 @@ namespace ARMeilleure.CodeGen.X86
And,
Andnpd,
Andnps,
+ Andpd,
+ Andps,
+ Blendvpd,
+ Blendvps,
Bsr,
Bswap,
Call,
@@ -180,6 +184,8 @@ namespace ARMeilleure.CodeGen.X86
Unpckhps,
Unpcklpd,
Unpcklps,
+ Vblendvpd,
+ Vblendvps,
Vpblendvb,
Xor,
Xorpd,
diff --git a/ARMeilleure/Common/BitUtils.cs b/ARMeilleure/Common/BitUtils.cs
index 55344608..7a29dcff 100644
--- a/ARMeilleure/Common/BitUtils.cs
+++ b/ARMeilleure/Common/BitUtils.cs
@@ -1,12 +1,12 @@
-using System.Runtime.CompilerServices;
-
namespace ARMeilleure.Common
{
static class BitUtils
{
private const int DeBrujinSequence = 0x77cb531;
- private static int[] DeBrujinLbsLut;
+ private static readonly int[] DeBrujinLbsLut;
+
+ private static readonly sbyte[] HbsNibbleLut;
static BitUtils()
{
@@ -18,19 +18,27 @@ namespace ARMeilleure.Common
DeBrujinLbsLut[lutIndex] = index;
}
+
+ HbsNibbleLut = new sbyte[] { -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 };
}
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static int LowestBitSet(int value)
+ public static int CountBits(int value)
{
- if (value == 0)
+ int count = 0;
+
+ while (value != 0)
{
- return -1;
+ value &= ~(value & -value);
+
+ count++;
}
- int lsb = value & -value;
+ return count;
+ }
- return DeBrujinLbsLut[(uint)(DeBrujinSequence * lsb) >> 27];
+ public static long FillWithOnes(int bits)
+ {
+ return bits == 64 ? -1L : (1L << bits) - 1;
}
public static int HighestBitSet(int value)
@@ -51,39 +59,33 @@ namespace ARMeilleure.Common
return -1;
}
- private static readonly sbyte[] HbsNibbleLut = { -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 };
-
- public static int HighestBitSetNibble(int value) => HbsNibbleLut[value & 0b1111];
-
- public static long Replicate(long bits, int size)
+ public static int HighestBitSetNibble(int value)
{
- long output = 0;
+ return HbsNibbleLut[value];
+ }
- for (int bit = 0; bit < 64; bit += size)
+ public static int LowestBitSet(int value)
+ {
+ if (value == 0)
{
- output |= bits << bit;
+ return -1;
}
- return output;
+ int lsb = value & -value;
+
+ return DeBrujinLbsLut[(uint)(DeBrujinSequence * lsb) >> 27];
}
- public static int CountBits(int value)
+ public static long Replicate(long bits, int size)
{
- int count = 0;
+ long output = 0;
- while (value != 0)
+ for (int bit = 0; bit < 64; bit += size)
{
- value &= ~(value & -value);
-
- count++;
+ output |= bits << bit;
}
- return count;
- }
-
- public static long FillWithOnes(int bits)
- {
- return bits == 64 ? -1L : (1L << bits) - 1;
+ return output;
}
public static int RotateRight(int bits, int shift, int size)
diff --git a/ARMeilleure/Decoders/DecoderHelper.cs b/ARMeilleure/Decoders/DecoderHelper.cs
index 3cbd4912..bc41c61c 100644
--- a/ARMeilleure/Decoders/DecoderHelper.cs
+++ b/ARMeilleure/Decoders/DecoderHelper.cs
@@ -1,10 +1,77 @@
using ARMeilleure.Common;
-using System;
namespace ARMeilleure.Decoders
{
static class DecoderHelper
{
+ static DecoderHelper()
+ {
+ Imm8ToFP32Table = BuildImm8ToFP32Table();
+ Imm8ToFP64Table = BuildImm8ToFP64Table();
+ }
+
+ public static readonly uint[] Imm8ToFP32Table;
+ public static readonly ulong[] Imm8ToFP64Table;
+
+ private static uint[] BuildImm8ToFP32Table()
+ {
+ uint[] tbl = new uint[256];
+
+ for (int idx = 0; idx < 256; idx++)
+ {
+ tbl[idx] = ExpandImm8ToFP32((uint)idx);
+ }
+
+ return tbl;
+ }
+
+ private static ulong[] BuildImm8ToFP64Table()
+ {
+ ulong[] tbl = new ulong[256];
+
+ for (int idx = 0; idx < 256; idx++)
+ {
+ tbl[idx] = ExpandImm8ToFP64((ulong)idx);
+ }
+
+ return tbl;
+ }
+
+ // abcdefgh -> aBbbbbbc defgh000 00000000 00000000 (B = ~b)
+ private static uint ExpandImm8ToFP32(uint imm)
+ {
+ uint MoveBit(uint bits, int from, int to)
+ {
+ return ((bits >> from) & 1U) << to;
+ }
+
+ return MoveBit(imm, 7, 31) | MoveBit(~imm, 6, 30) |
+ MoveBit(imm, 6, 29) | MoveBit( imm, 6, 28) |
+ MoveBit(imm, 6, 27) | MoveBit( imm, 6, 26) |
+ MoveBit(imm, 6, 25) | MoveBit( imm, 5, 24) |
+ MoveBit(imm, 4, 23) | MoveBit( imm, 3, 22) |
+ MoveBit(imm, 2, 21) | MoveBit( imm, 1, 20) |
+ MoveBit(imm, 0, 19);
+ }
+
+ // abcdefgh -> aBbbbbbb bbcdefgh 00000000 00000000 00000000 00000000 00000000 00000000 (B = ~b)
+ private static ulong ExpandImm8ToFP64(ulong imm)
+ {
+ ulong MoveBit(ulong bits, int from, int to)
+ {
+ return ((bits >> from) & 1UL) << to;
+ }
+
+ return MoveBit(imm, 7, 63) | MoveBit(~imm, 6, 62) |
+ MoveBit(imm, 6, 61) | MoveBit( imm, 6, 60) |
+ MoveBit(imm, 6, 59) | MoveBit( imm, 6, 58) |
+ MoveBit(imm, 6, 57) | MoveBit( imm, 6, 56) |
+ MoveBit(imm, 6, 55) | MoveBit( imm, 6, 54) |
+ MoveBit(imm, 5, 53) | MoveBit( imm, 4, 52) |
+ MoveBit(imm, 3, 51) | MoveBit( imm, 2, 50) |
+ MoveBit(imm, 1, 49) | MoveBit( imm, 0, 48);
+ }
+
public struct BitMask
{
public long WMask;
@@ -62,34 +129,6 @@ namespace ARMeilleure.Decoders
};
}
- public static long DecodeImm8Float(long imm, int size)
- {
- int e = 0, f = 0;
-
- switch (size)
- {
- case 0: e = 8; f = 23; break;
- case 1: e = 11; f = 52; break;
-
- default: throw new ArgumentOutOfRangeException(nameof(size));
- }
-
- long value = (imm & 0x3f) << f - 4;
-
- long eBit = (imm >> 6) & 1;
- long sBit = (imm >> 7) & 1;
-
- if (eBit != 0)
- {
- value |= (1L << e - 3) - 1 << f + 2;
- }
-
- value |= (eBit ^ 1) << f + e - 1;
- value |= sBit << f + e;
-
- return value;
- }
-
public static long DecodeImm24_2(int opCode)
{
return ((long)opCode << 40) >> 38;
diff --git a/ARMeilleure/Decoders/OpCodeSimdFmov.cs b/ARMeilleure/Decoders/OpCodeSimdFmov.cs
index 61a3f077..f0da0396 100644
--- a/ARMeilleure/Decoders/OpCodeSimdFmov.cs
+++ b/ARMeilleure/Decoders/OpCodeSimdFmov.cs
@@ -8,16 +8,8 @@ namespace ARMeilleure.Decoders
public OpCodeSimdFmov(InstDescriptor inst, ulong address, int opCode) : base(inst, address, opCode)
{
- int imm5 = (opCode >> 5) & 0x1f;
int type = (opCode >> 22) & 0x3;
- if (imm5 != 0b00000 || type > 1)
- {
- Instruction = InstDescriptor.Undefined;
-
- return;
- }
-
Size = type;
long imm;
@@ -25,7 +17,14 @@ namespace ARMeilleure.Decoders
Rd = (opCode >> 0) & 0x1f;
imm = (opCode >> 13) & 0xff;
- Immediate = DecoderHelper.DecodeImm8Float(imm, type);
+ if (type == 0)
+ {
+ Immediate = (long)DecoderHelper.Imm8ToFP32Table[(int)imm];
+ }
+ else /* if (type == 1) */
+ {
+ Immediate = (long)DecoderHelper.Imm8ToFP64Table[(int)imm];
+ }
}
}
} \ No newline at end of file
diff --git a/ARMeilleure/Decoders/OpCodeSimdImm.cs b/ARMeilleure/Decoders/OpCodeSimdImm.cs
index ecad906d..a88e360e 100644
--- a/ARMeilleure/Decoders/OpCodeSimdImm.cs
+++ b/ARMeilleure/Decoders/OpCodeSimdImm.cs
@@ -23,19 +23,19 @@ namespace ARMeilleure.Decoders
if (modeHigh == 0b111)
{
- Size = modeLow != 0 ? op : 3;
-
switch (op | (modeLow << 1))
{
case 0:
// 64-bits Immediate.
// Transform abcd efgh into abcd efgh abcd efgh ...
+ Size = 3;
imm = (long)((ulong)imm * 0x0101010101010101);
break;
case 1:
// 64-bits Immediate.
// Transform abcd efgh into aaaa aaaa bbbb bbbb ...
+ Size = 3;
imm = (imm & 0xf0) >> 4 | (imm & 0x0f) << 4;
imm = (imm & 0xcc) >> 2 | (imm & 0x33) << 2;
imm = (imm & 0xaa) >> 1 | (imm & 0x55) << 1;
@@ -49,9 +49,16 @@ namespace ARMeilleure.Decoders
break;
case 2:
+ // 2 x 32-bits floating point Immediate.
+ Size = 0;
+ imm = (long)DecoderHelper.Imm8ToFP32Table[(int)imm];
+ imm |= imm << 32;
+ break;
+
case 3:
- // Floating point Immediate.
- imm = DecoderHelper.DecodeImm8Float(imm, Size);
+ // 64-bits floating point Immediate.
+ Size = 1;
+ imm = (long)DecoderHelper.Imm8ToFP64Table[(int)imm];
break;
}
}
@@ -72,7 +79,7 @@ namespace ARMeilleure.Decoders
}
else
{
- // 8 bits without shift.
+ // 8-bits without shift.
Size = 0;
}
diff --git a/ARMeilleure/Instructions/InstEmitAluHelper.cs b/ARMeilleure/Instructions/InstEmitAluHelper.cs
index 81d5c9eb..d032b32e 100644
--- a/ARMeilleure/Instructions/InstEmitAluHelper.cs
+++ b/ARMeilleure/Instructions/InstEmitAluHelper.cs
@@ -268,7 +268,7 @@ namespace ARMeilleure.Instructions
{
if (setCarry)
{
- SetFlag(context, PState.CFlag, Const(0));;
+ SetFlag(context, PState.CFlag, Const(0));
}
return Const(0);
diff --git a/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs b/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
index c411a6d3..1a9e01c8 100644
--- a/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdArithmetic.cs
@@ -384,8 +384,7 @@ namespace ARMeilleure.Instructions
}
else
{
- OperandType type = sizeF != 0 ? OperandType.FP64
- : OperandType.FP32;
+ OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
Operand ne0 = context.VectorExtract(type, GetVec(op.Rn), 0);
Operand ne1 = context.VectorExtract(type, GetVec(op.Rn), 1);
@@ -455,6 +454,7 @@ namespace ARMeilleure.Instructions
{
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
+ Operand d = GetVec(op.Rd);
Operand a = GetVec(op.Ra);
Operand n = GetVec(op.Rn);
Operand m = GetVec(op.Rm);
@@ -462,18 +462,16 @@ namespace ARMeilleure.Instructions
if (op.Size == 0)
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Addss, a, res);
- res = context.AddIntrinsic(Intrinsic.X86Addss, a, res);
-
- context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
+ context.Copy(d, context.VectorZeroUpper96(res));
}
else /* if (op.Size == 1) */
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Addsd, a, res);
- res = context.AddIntrinsic(Intrinsic.X86Addsd, a, res);
-
- context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
+ context.Copy(d, context.VectorZeroUpper64(res));
}
}
else
@@ -517,18 +515,32 @@ namespace ARMeilleure.Instructions
public static void Fmaxnm_S(ArmEmitterContext context)
{
- EmitScalarBinaryOpF(context, (op1, op2) =>
+ if (Optimizations.FastFP && Optimizations.UseSse41)
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
- });
+ EmitSse41MaxMinNumOpF(context, isMaxNum: true, scalar: true);
+ }
+ else
+ {
+ EmitScalarBinaryOpF(context, (op1, op2) =>
+ {
+ return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
+ });
+ }
}
public static void Fmaxnm_V(ArmEmitterContext context)
{
- EmitVectorBinaryOpF(context, (op1, op2) =>
+ if (Optimizations.FastFP && Optimizations.UseSse41)
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
- });
+ EmitSse41MaxMinNumOpF(context, isMaxNum: true, scalar: false);
+ }
+ else
+ {
+ EmitVectorBinaryOpF(context, (op1, op2) =>
+ {
+ return EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2);
+ });
+ }
}
public static void Fmaxp_V(ArmEmitterContext context)
@@ -578,18 +590,32 @@ namespace ARMeilleure.Instructions
public static void Fminnm_S(ArmEmitterContext context)
{
- EmitScalarBinaryOpF(context, (op1, op2) =>
+ if (Optimizations.FastFP && Optimizations.UseSse41)
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
- });
+ EmitSse41MaxMinNumOpF(context, isMaxNum: false, scalar: true);
+ }
+ else
+ {
+ EmitScalarBinaryOpF(context, (op1, op2) =>
+ {
+ return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
+ });
+ }
}
public static void Fminnm_V(ArmEmitterContext context)
{
- EmitVectorBinaryOpF(context, (op1, op2) =>
+ if (Optimizations.FastFP && Optimizations.UseSse41)
{
- return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
- });
+ EmitSse41MaxMinNumOpF(context, isMaxNum: false, scalar: false);
+ }
+ else
+ {
+ EmitVectorBinaryOpF(context, (op1, op2) =>
+ {
+ return EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2);
+ });
+ }
}
public static void Fminp_V(ArmEmitterContext context)
@@ -813,6 +839,7 @@ namespace ARMeilleure.Instructions
{
OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
+ Operand d = GetVec(op.Rd);
Operand a = GetVec(op.Ra);
Operand n = GetVec(op.Rn);
Operand m = GetVec(op.Rm);
@@ -820,18 +847,16 @@ namespace ARMeilleure.Instructions
if (op.Size == 0)
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Subss, a, res);
- res = context.AddIntrinsic(Intrinsic.X86Subss, a, res);
-
- context.Copy(GetVec(op.Rd), context.VectorZeroUpper96(res));
+ context.Copy(d, context.VectorZeroUpper96(res));
}
else /* if (op.Size == 1) */
{
Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Subsd, a, res);
- res = context.AddIntrinsic(Intrinsic.X86Subsd, a, res);
-
- context.Copy(GetVec(op.Rd), context.VectorZeroUpper64(res));
+ context.Copy(d, context.VectorZeroUpper64(res));
}
}
else
@@ -1035,36 +1060,88 @@ namespace ARMeilleure.Instructions
public static void Fnmadd_S(ArmEmitterContext context) // Fused.
{
- OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
+ if (Optimizations.FastFP && Optimizations.UseSse2)
+ {
+ OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
- int sizeF = op.Size & 1;
+ Operand d = GetVec(op.Rd);
+ Operand a = GetVec(op.Ra);
+ Operand n = GetVec(op.Rn);
+ Operand m = GetVec(op.Rm);
+
+ if (op.Size == 0)
+ {
+ Operand mask = X86GetScalar(context, -0f);
+
+ Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorps, mask, a);
+
+ Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Subss, aNeg, res);
- OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
+ context.Copy(d, context.VectorZeroUpper96(res));
+ }
+ else /* if (op.Size == 1) */
+ {
+ Operand mask = X86GetScalar(context, -0d);
- Operand ne = context.VectorExtract(type, GetVec(op.Rn), 0);
- Operand me = context.VectorExtract(type, GetVec(op.Rm), 0);
- Operand ae = context.VectorExtract(type, GetVec(op.Ra), 0);
+ Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorpd, mask, a);
- Operand res = context.Subtract(context.Multiply(context.Negate(ne), me), ae);
+ Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Subsd, aNeg, res);
- context.Copy(GetVec(op.Rd), context.VectorInsert(context.VectorZero(), res, 0));
+ context.Copy(d, context.VectorZeroUpper64(res));
+ }
+ }
+ else
+ {
+ EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
+ {
+ return EmitSoftFloatCall(context, SoftFloat32.FPNegMulAdd, SoftFloat64.FPNegMulAdd, op1, op2, op3);
+ });
+ }
}
public static void Fnmsub_S(ArmEmitterContext context) // Fused.
{
- OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
+ if (Optimizations.FastFP && Optimizations.UseSse2)
+ {
+ OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
- int sizeF = op.Size & 1;
+ Operand d = GetVec(op.Rd);
+ Operand a = GetVec(op.Ra);
+ Operand n = GetVec(op.Rn);
+ Operand m = GetVec(op.Rm);
+
+ if (op.Size == 0)
+ {
+ Operand mask = X86GetScalar(context, -0f);
+
+ Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorps, mask, a);
+
+ Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Addss, aNeg, res);
- OperandType type = sizeF != 0 ? OperandType.FP64 : OperandType.FP32;
+ context.Copy(d, context.VectorZeroUpper96(res));
+ }
+ else /* if (op.Size == 1) */
+ {
+ Operand mask = X86GetScalar(context, -0d);
- Operand ne = context.VectorExtract(type, GetVec(op.Rn), 0);
- Operand me = context.VectorExtract(type, GetVec(op.Rm), 0);
- Operand ae = context.VectorExtract(type, GetVec(op.Ra), 0);
+ Operand aNeg = context.AddIntrinsic(Intrinsic.X86Xorpd, mask, a);
- Operand res = context.Subtract(context.Multiply(ne, me), ae);
+ Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
+ res = context.AddIntrinsic(Intrinsic.X86Addsd, aNeg, res);
- context.Copy(GetVec(op.Rd), context.VectorInsert(context.VectorZero(), res, 0));
+ context.Copy(d, context.VectorZeroUpper64(res));
+ }
+ }
+ else
+ {
+ EmitScalarTernaryRaOpF(context, (op1, op2, op3) =>
+ {
+ return EmitSoftFloatCall(context, SoftFloat32.FPNegMulSub, SoftFloat64.FPNegMulSub, op1, op2, op3);
+ });
+ }
}
public static void Fnmul_S(ArmEmitterContext context)
@@ -2067,9 +2144,7 @@ namespace ARMeilleure.Instructions
m = context.AddIntrinsic(Intrinsic.X86Psrldq, m, Const(8));
}
- Intrinsic movInst = op.Size == 0
- ? Intrinsic.X86Pmovsxbw
- : Intrinsic.X86Pmovsxwd;
+ Intrinsic movInst = op.Size == 0 ? Intrinsic.X86Pmovsxbw : Intrinsic.X86Pmovsxwd;
n = context.AddIntrinsic(movInst, n);
m = context.AddIntrinsic(movInst, m);
@@ -2694,9 +2769,7 @@ namespace ARMeilleure.Instructions
m = context.AddIntrinsic(Intrinsic.X86Psrldq, m, Const(8));
}
- Intrinsic movInst = op.Size == 0
- ? Intrinsic.X86Pmovzxbw
- : Intrinsic.X86Pmovzxwd;
+ Intrinsic movInst = op.Size == 0 ? Intrinsic.X86Pmovzxbw : Intrinsic.X86Pmovzxwd;
n = context.AddIntrinsic(movInst, n);
m = context.AddIntrinsic(movInst, m);
@@ -3011,6 +3084,98 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(op.Rd), res);
}
+ private static Operand EmitSse2VectorIsQNaNOpF(ArmEmitterContext context, Operand opF)
+ {
+ IOpCodeSimd op = (IOpCodeSimd)context.CurrOp;
+
+ if ((op.Size & 1) == 0)
+ {
+ const int QBit = 22;
+
+ Operand qMask = X86GetAllElements(context, 1 << QBit);
+
+ Operand mask1 = context.AddIntrinsic(Intrinsic.X86Cmpps, opF, opF, Const((int)CmpCondition.UnorderedQ));
+
+ Operand mask2 = context.AddIntrinsic(Intrinsic.X86Pand, opF, qMask);
+ mask2 = context.AddIntrinsic(Intrinsic.X86Cmpps, mask2, qMask, Const((int)CmpCondition.Equal));
+
+ return context.AddIntrinsic(Intrinsic.X86Andps, mask1, mask2);
+ }
+ else /* if ((op.Size & 1) == 1) */
+ {
+ const int QBit = 51;
+
+ Operand qMask = X86GetAllElements(context, 1L << QBit);
+
+ Operand mask1 = context.AddIntrinsic(Intrinsic.X86Cmppd, opF, opF, Const((int)CmpCondition.UnorderedQ));
+
+ Operand mask2 = context.AddIntrinsic(Intrinsic.X86Pand, opF, qMask);
+ mask2 = context.AddIntrinsic(Intrinsic.X86Cmppd, mask2, qMask, Const((int)CmpCondition.Equal));
+
+ return context.AddIntrinsic(Intrinsic.X86Andpd, mask1, mask2);
+ }
+ }
+
+ private static void EmitSse41MaxMinNumOpF(ArmEmitterContext context, bool isMaxNum, bool scalar)
+ {
+ OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
+
+ Operand d = GetVec(op.Rd);
+ Operand n = GetVec(op.Rn);
+ Operand m = GetVec(op.Rm);
+
+ Operand nQNaNMask = EmitSse2VectorIsQNaNOpF(context, n);
+ Operand mQNaNMask = EmitSse2VectorIsQNaNOpF(context, m);
+
+ Operand nNum = context.Copy(n);
+ Operand mNum = context.Copy(m);
+
+ int sizeF = op.Size & 1;
+
+ if (sizeF == 0)
+ {
+ Operand negInfMask = X86GetAllElements(context, isMaxNum ? float.NegativeInfinity : float.PositiveInfinity);
+
+ Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnps, mQNaNMask, nQNaNMask);
+ Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnps, nQNaNMask, mQNaNMask);
+
+ nNum = context.AddIntrinsic(Intrinsic.X86Blendvps, nNum, negInfMask, nMask);
+ mNum = context.AddIntrinsic(Intrinsic.X86Blendvps, mNum, negInfMask, mMask);
+
+ Operand res = context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxps : Intrinsic.X86Minps, nNum, mNum);
+
+ if (scalar)
+ {
+ res = context.VectorZeroUpper96(res);
+ }
+ else if (op.RegisterSize == RegisterSize.Simd64)
+ {
+ res = context.VectorZeroUpper64(res);
+ }
+
+ context.Copy(d, res);
+ }
+ else /* if (sizeF == 1) */
+ {
+ Operand negInfMask = X86GetAllElements(context, isMaxNum ? double.NegativeInfinity : double.PositiveInfinity);
+
+ Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnpd, mQNaNMask, nQNaNMask);
+ Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnpd, nQNaNMask, mQNaNMask);
+
+ nNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, nNum, negInfMask, nMask);
+ mNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, mNum, negInfMask, mMask);
+
+ Operand res = context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxpd : Intrinsic.X86Minpd, nNum, mNum);
+
+ if (scalar)
+ {
+ res = context.VectorZeroUpper64(res);
+ }
+
+ context.Copy(d, res);
+ }
+ }
+
private enum AddSub
{
None,
diff --git a/ARMeilleure/Instructions/InstEmitSimdCmp.cs b/ARMeilleure/Instructions/InstEmitSimdCmp.cs
index ac1bffcb..e70f56a0 100644
--- a/ARMeilleure/Instructions/InstEmitSimdCmp.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdCmp.cs
@@ -300,7 +300,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseSse2)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.Equal, scalar: true);
+ EmitSse2CmpOpF(context, CmpCondition.Equal, scalar: true);
}
else
{
@@ -312,7 +312,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseSse2)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.Equal, scalar: false);
+ EmitSse2CmpOpF(context, CmpCondition.Equal, scalar: false);
}
else
{
@@ -324,7 +324,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseAvx)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: true);
+ EmitSse2CmpOpF(context, CmpCondition.GreaterThanOrEqual, scalar: true);
}
else
{
@@ -336,7 +336,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseAvx)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThanOrEqual, scalar: false);
+ EmitSse2CmpOpF(context, CmpCondition.GreaterThanOrEqual, scalar: false);
}
else
{
@@ -348,7 +348,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseAvx)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: true);
+ EmitSse2CmpOpF(context, CmpCondition.GreaterThan, scalar: true);
}
else
{
@@ -360,7 +360,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseAvx)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.GreaterThan, scalar: false);
+ EmitSse2CmpOpF(context, CmpCondition.GreaterThan, scalar: false);
}
else
{
@@ -372,7 +372,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseSse2)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.LessThanOrEqual, scalar: true);
+ EmitSse2CmpOpF(context, CmpCondition.LessThanOrEqual, scalar: true);
}
else
{
@@ -384,7 +384,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseSse2)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.LessThanOrEqual, scalar: false);
+ EmitSse2CmpOpF(context, CmpCondition.LessThanOrEqual, scalar: false);
}
else
{
@@ -396,7 +396,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseSse2)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.LessThan, scalar: true);
+ EmitSse2CmpOpF(context, CmpCondition.LessThan, scalar: true);
}
else
{
@@ -408,7 +408,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.FastFP && Optimizations.UseSse2)
{
- EmitCmpSseOrSse2OpF(context, CmpCondition.LessThan, scalar: false);
+ EmitSse2CmpOpF(context, CmpCondition.LessThan, scalar: false);
}
else
{
@@ -673,7 +673,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(op.Rd), res);
}
- private static void EmitCmpSseOrSse2OpF(ArmEmitterContext context, CmpCondition cond, bool scalar)
+ private static void EmitSse2CmpOpF(ArmEmitterContext context, CmpCondition cond, bool scalar)
{
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
diff --git a/ARMeilleure/Instructions/InstEmitSimdHelper.cs b/ARMeilleure/Instructions/InstEmitSimdHelper.cs
index f0880079..28d075dd 100644
--- a/ARMeilleure/Instructions/InstEmitSimdHelper.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdHelper.cs
@@ -907,7 +907,7 @@ namespace ARMeilleure.Instructions
Operand res = context.VectorZero();
- Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);;
+ Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);
int elems = 8 >> op.Size;
@@ -939,7 +939,7 @@ namespace ARMeilleure.Instructions
Operand res = context.VectorZero();
- Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);;
+ Operand me = EmitVectorExtract(context, op.Rm, op.Index, op.Size, signed);
int elems = 8 >> op.Size;
@@ -1114,6 +1114,7 @@ namespace ARMeilleure.Instructions
Equal = 0, // Ordered, non-signaling.
LessThan = 1, // Ordered, signaling.
LessThanOrEqual = 2, // Ordered, signaling.
+ UnorderedQ = 3, // Non-signaling.
NotLessThan = 5, // Unordered, signaling.
NotLessThanOrEqual = 6, // Unordered, signaling.
OrderedQ = 7, // Non-signaling.
diff --git a/ARMeilleure/Instructions/InstEmitSimdMove.cs b/ARMeilleure/Instructions/InstEmitSimdMove.cs
index 9d2aeb3b..789c8c87 100644
--- a/ARMeilleure/Instructions/InstEmitSimdMove.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdMove.cs
@@ -177,7 +177,7 @@ namespace ARMeilleure.Instructions
if (op.RegisterSize == RegisterSize.Simd64)
{
- nShifted = context.AddIntrinsic(Intrinsic.X86Movlhps, nShifted, context.VectorZero());
+ nShifted = context.VectorZeroUpper64(nShifted);
}
nShifted = context.AddIntrinsic(Intrinsic.X86Psrldq, nShifted, Const(op.Imm4));
@@ -188,7 +188,7 @@ namespace ARMeilleure.Instructions
if (op.RegisterSize == RegisterSize.Simd64)
{
- mShifted = context.AddIntrinsic(Intrinsic.X86Movlhps, mShifted, context.VectorZero());
+ mShifted = context.VectorZeroUpper64(mShifted);
}
Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, mShifted);
@@ -277,9 +277,10 @@ namespace ARMeilleure.Instructions
{
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
+ Operand d = GetVec(op.Rd);
Operand n = GetIntOrZR(context, op.Rn);
- context.Copy(GetVec(op.Rd), EmitVectorInsert(context, GetVec(op.Rd), n, 1, 3));
+ context.Copy(d, EmitVectorInsert(context, d, n, 1, 3));
}
public static void Fmov_S(ArmEmitterContext context)
@@ -311,18 +312,32 @@ namespace ARMeilleure.Instructions
{
OpCodeSimdImm op = (OpCodeSimdImm)context.CurrOp;
- Operand e = Const(op.Immediate);
+ if (Optimizations.UseSse2)
+ {
+ if (op.RegisterSize == RegisterSize.Simd128)
+ {
+ context.Copy(GetVec(op.Rd), X86GetAllElements(context, op.Immediate));
+ }
+ else
+ {
+ context.Copy(GetVec(op.Rd), X86GetScalar(context, op.Immediate));
+ }
+ }
+ else
+ {
+ Operand e = Const(op.Immediate);
- Operand res = context.VectorZero();
+ Operand res = context.VectorZero();
- int elems = op.RegisterSize == RegisterSize.Simd128 ? 4 : 2;
+ int elems = op.RegisterSize == RegisterSize.Simd128 ? 2 : 1;
- for (int index = 0; index < (elems >> op.Size); index++)
- {
- res = EmitVectorInsert(context, res, e, index, op.Size + 2);
- }
+ for (int index = 0; index < elems; index++)
+ {
+ res = EmitVectorInsert(context, res, e, index, 3);
+ }
- context.Copy(GetVec(op.Rd), res);
+ context.Copy(GetVec(op.Rd), res);
+ }
}
public static void Ins_Gp(ArmEmitterContext context)
@@ -349,7 +364,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse2)
{
- EmitMoviMvni(context, not: false);
+ EmitSse2MoviMvni(context, not: false);
}
else
{
@@ -361,7 +376,7 @@ namespace ARMeilleure.Instructions
{
if (Optimizations.UseSse2)
{
- EmitMoviMvni(context, not: true);
+ EmitSse2MoviMvni(context, not: true);
}
else
{
@@ -430,13 +445,11 @@ namespace ARMeilleure.Instructions
{
Operand d = GetVec(op.Rd);
- Operand res = context.AddIntrinsic(Intrinsic.X86Movlhps, d, context.VectorZero());
-
- Operand n = GetVec(op.Rn);
+ Operand res = context.VectorZeroUpper64(d);
Operand mask = X86GetAllElements(context, _masksE0_TrnUzpXtn[op.Size]);
- Operand res2 = context.AddIntrinsic(Intrinsic.X86Pshufb, n, mask);
+ Operand res2 = context.AddIntrinsic(Intrinsic.X86Pshufb, GetVec(op.Rn), mask);
Intrinsic movInst = op.RegisterSize == RegisterSize.Simd128
? Intrinsic.X86Movlhps
@@ -444,7 +457,7 @@ namespace ARMeilleure.Instructions
res = context.AddIntrinsic(movInst, res, res2);
- context.Copy(GetVec(op.Rd), res);
+ context.Copy(d, res);
}
else
{
@@ -452,7 +465,9 @@ namespace ARMeilleure.Instructions
int part = op.RegisterSize == RegisterSize.Simd128 ? elems : 0;
- Operand res = part == 0 ? context.VectorZero() : context.Copy(GetVec(op.Rd));
+ Operand d = GetVec(op.Rd);
+
+ Operand res = part == 0 ? context.VectorZero() : context.Copy(d);
for (int index = 0; index < elems; index++)
{
@@ -461,7 +476,7 @@ namespace ARMeilleure.Instructions
res = EmitVectorInsert(context, res, ne, part + index, op.Size);
}
- context.Copy(GetVec(op.Rd), res);
+ context.Copy(d, res);
}
}
@@ -475,7 +490,7 @@ namespace ARMeilleure.Instructions
EmitVectorZip(context, part: 1);
}
- private static void EmitMoviMvni(ArmEmitterContext context, bool not)
+ private static void EmitSse2MoviMvni(ArmEmitterContext context, bool not)
{
OpCodeSimdImm op = (OpCodeSimdImm)context.CurrOp;
diff --git a/ARMeilleure/Instructions/SoftFloat.cs b/ARMeilleure/Instructions/SoftFloat.cs
index af22c85d..256bc5b9 100644
--- a/ARMeilleure/Instructions/SoftFloat.cs
+++ b/ARMeilleure/Instructions/SoftFloat.cs
@@ -1089,8 +1089,6 @@ namespace ARMeilleure.Instructions
public static float FPMulSub(float valueA, float value1, float value2)
{
- ExecutionContext context = NativeInterface.GetContext();
-
value1 = value1.FPNeg();
return FPMulAdd(valueA, value1, value2);
@@ -1138,6 +1136,21 @@ namespace ARMeilleure.Instructions
return result;
}
+ public static float FPNegMulAdd(float valueA, float value1, float value2)
+ {
+ valueA = valueA.FPNeg();
+ value1 = value1.FPNeg();
+
+ return FPMulAdd(valueA, value1, value2);
+ }
+
+ public static float FPNegMulSub(float valueA, float value1, float value2)
+ {
+ valueA = valueA.FPNeg();
+
+ return FPMulAdd(valueA, value1, value2);
+ }
+
public static float FPRecipEstimate(float value)
{
ExecutionContext context = NativeInterface.GetContext();
@@ -2196,6 +2209,21 @@ namespace ARMeilleure.Instructions
return result;
}
+ public static double FPNegMulAdd(double valueA, double value1, double value2)
+ {
+ valueA = valueA.FPNeg();
+ value1 = value1.FPNeg();
+
+ return FPMulAdd(valueA, value1, value2);
+ }
+
+ public static double FPNegMulSub(double valueA, double value1, double value2)
+ {
+ valueA = valueA.FPNeg();
+
+ return FPMulAdd(valueA, value1, value2);
+ }
+
public static double FPRecipEstimate(double value)
{
ExecutionContext context = NativeInterface.GetContext();
diff --git a/ARMeilleure/IntermediateRepresentation/Intrinsic.cs b/ARMeilleure/IntermediateRepresentation/Intrinsic.cs
index e2d3c6db..57c8914d 100644
--- a/ARMeilleure/IntermediateRepresentation/Intrinsic.cs
+++ b/ARMeilleure/IntermediateRepresentation/Intrinsic.cs
@@ -8,6 +8,10 @@ namespace ARMeilleure.IntermediateRepresentation
X86Addss,
X86Andnpd,
X86Andnps,
+ X86Andpd,
+ X86Andps,
+ X86Blendvpd,
+ X86Blendvps,
X86Cmppd,
X86Cmpps,
X86Cmpsd,