aboutsummaryrefslogtreecommitdiff
path: root/ARMeilleure/Instructions
diff options
context:
space:
mode:
Diffstat (limited to 'ARMeilleure/Instructions')
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdHelper.cs15
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdLogical.cs41
-rw-r--r--ARMeilleure/Instructions/InstEmitSimdShift.cs134
3 files changed, 177 insertions, 13 deletions
diff --git a/ARMeilleure/Instructions/InstEmitSimdHelper.cs b/ARMeilleure/Instructions/InstEmitSimdHelper.cs
index 49c9e687..49c17560 100644
--- a/ARMeilleure/Instructions/InstEmitSimdHelper.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdHelper.cs
@@ -243,6 +243,21 @@ namespace ARMeilleure.Instructions
throw new ArgumentException($"Invalid rounding mode \"{roundMode}\".");
}
+ public static ulong X86GetGf2p8LogicalShiftLeft(int shift)
+ {
+ ulong identity =
+ (0b00000001UL << 56) |
+ (0b00000010UL << 48) |
+ (0b00000100UL << 40) |
+ (0b00001000UL << 32) |
+ (0b00010000UL << 24) |
+ (0b00100000UL << 16) |
+ (0b01000000UL << 8) |
+ (0b10000000UL << 0);
+
+ return shift >= 0 ? identity >> (shift * 8) : identity << (-shift * 8);
+ }
+
public static Operand EmitCountSetBits8(ArmEmitterContext context, Operand op) // "size" is 8 (SIMD&FP Inst.).
{
Debug.Assert(op.Type == OperandType.I32 || op.Type == OperandType.I64);
diff --git a/ARMeilleure/Instructions/InstEmitSimdLogical.cs b/ARMeilleure/Instructions/InstEmitSimdLogical.cs
index dbd1a1a0..624ae841 100644
--- a/ARMeilleure/Instructions/InstEmitSimdLogical.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdLogical.cs
@@ -336,20 +336,45 @@ namespace ARMeilleure.Instructions
{
OpCodeSimd op = (OpCodeSimd)context.CurrOp;
- Operand res = context.VectorZero();
+ if (Optimizations.UseGfni)
+ {
+ const long bitMatrix =
+ (0b10000000L << 56) |
+ (0b01000000L << 48) |
+ (0b00100000L << 40) |
+ (0b00010000L << 32) |
+ (0b00001000L << 24) |
+ (0b00000100L << 16) |
+ (0b00000010L << 8) |
+ (0b00000001L << 0);
- int elems = op.RegisterSize == RegisterSize.Simd128 ? 16 : 8;
+ Operand vBitMatrix = X86GetAllElements(context, bitMatrix);
- for (int index = 0; index < elems; index++)
- {
- Operand ne = EmitVectorExtractZx(context, op.Rn, index, 0);
+ Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, GetVec(op.Rn), vBitMatrix, Const(0));
- Operand de = EmitReverseBits8Op(context, ne);
+ if (op.RegisterSize == RegisterSize.Simd64)
+ {
+ res = context.VectorZeroUpper64(res);
+ }
- res = EmitVectorInsert(context, res, de, index, 0);
+ context.Copy(GetVec(op.Rd), res);
}
+ else
+ {
+ Operand res = context.VectorZero();
+ int elems = op.RegisterSize == RegisterSize.Simd128 ? 16 : 8;
- context.Copy(GetVec(op.Rd), res);
+ for (int index = 0; index < elems; index++)
+ {
+ Operand ne = EmitVectorExtractZx(context, op.Rn, index, 0);
+
+ Operand de = EmitReverseBits8Op(context, ne);
+
+ res = EmitVectorInsert(context, res, de, index, 0);
+ }
+
+ context.Copy(GetVec(op.Rd), res);
+ }
}
private static Operand EmitReverseBits8Op(ArmEmitterContext context, Operand op)
diff --git a/ARMeilleure/Instructions/InstEmitSimdShift.cs b/ARMeilleure/Instructions/InstEmitSimdShift.cs
index 146aeafa..cf3b51bd 100644
--- a/ARMeilleure/Instructions/InstEmitSimdShift.cs
+++ b/ARMeilleure/Instructions/InstEmitSimdShift.cs
@@ -88,8 +88,35 @@ namespace ARMeilleure.Instructions
OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
int shift = GetImmShl(op);
+ int eSize = 8 << op.Size;
- if (Optimizations.UseSse2 && op.Size > 0)
+ if (shift >= eSize)
+ {
+ if ((op.RegisterSize == RegisterSize.Simd64))
+ {
+ Operand res = context.VectorZeroUpper64(GetVec(op.Rd));
+
+ context.Copy(GetVec(op.Rd), res);
+ }
+ }
+ else if (Optimizations.UseGfni && op.Size == 0)
+ {
+ Operand n = GetVec(op.Rn);
+
+ ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(shift);
+
+ Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
+
+ Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
+
+ if (op.RegisterSize == RegisterSize.Simd64)
+ {
+ res = context.VectorZeroUpper64(res);
+ }
+
+ context.Copy(GetVec(op.Rd), res);
+ }
+ else if (Optimizations.UseSse2 && op.Size > 0)
{
Operand n = GetVec(op.Rn);
@@ -396,10 +423,40 @@ namespace ARMeilleure.Instructions
{
OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
- if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
+ int shift = GetImmShr(op);
+
+ if (Optimizations.UseGfni && op.Size == 0)
{
- int shift = GetImmShr(op);
+ Operand n = GetVec(op.Rn);
+ ulong bitMatrix;
+
+ if (shift < 8)
+ {
+ bitMatrix = X86GetGf2p8LogicalShiftLeft(-shift);
+
+ // Extend sign-bit
+ bitMatrix |= 0x8080808080808080UL >> (64 - shift * 8);
+ }
+ else
+ {
+ // Replicate sign-bit into all bits
+ bitMatrix = 0x8080808080808080UL;
+ }
+
+ Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
+
+ Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
+
+ if (op.RegisterSize == RegisterSize.Simd64)
+ {
+ res = context.VectorZeroUpper64(res);
+ }
+
+ context.Copy(GetVec(op.Rd), res);
+ }
+ else if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
+ {
Operand n = GetVec(op.Rn);
Intrinsic sraInst = X86PsraInstruction[op.Size];
@@ -929,10 +986,44 @@ namespace ARMeilleure.Instructions
OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
int shift = GetImmShl(op);
+ int eSize = 8 << op.Size;
ulong mask = shift != 0 ? ulong.MaxValue >> (64 - shift) : 0UL;
- if (Optimizations.UseSse2 && op.Size > 0)
+ if (shift >= eSize)
+ {
+ if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
+ {
+ Operand res = context.VectorZeroUpper64(GetVec(op.Rd));
+
+ context.Copy(GetVec(op.Rd), res);
+ }
+ }
+ else if (Optimizations.UseGfni && op.Size == 0)
+ {
+ Operand d = GetVec(op.Rd);
+ Operand n = GetVec(op.Rn);
+
+ ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(shift);
+
+ Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
+
+ Operand nShifted = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
+
+ Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]);
+
+ Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask);
+
+ Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked);
+
+ if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
+ {
+ res = context.VectorZeroUpper64(res);
+ }
+
+ context.Copy(d, res);
+ }
+ else if (Optimizations.UseSse2 && op.Size > 0)
{
Operand d = GetVec(op.Rd);
Operand n = GetVec(op.Rn);
@@ -988,7 +1079,40 @@ namespace ARMeilleure.Instructions
ulong mask = (ulong.MaxValue << (eSize - shift)) & (ulong.MaxValue >> (64 - eSize));
- if (Optimizations.UseSse2 && op.Size > 0)
+ if (shift >= eSize)
+ {
+ if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
+ {
+ Operand res = context.VectorZeroUpper64(GetVec(op.Rd));
+
+ context.Copy(GetVec(op.Rd), res);
+ }
+ }
+ else if (Optimizations.UseGfni && op.Size == 0)
+ {
+ Operand d = GetVec(op.Rd);
+ Operand n = GetVec(op.Rn);
+
+ ulong bitMatrix = X86GetGf2p8LogicalShiftLeft(-shift);
+
+ Operand vBitMatrix = X86GetElements(context, bitMatrix, bitMatrix);
+
+ Operand nShifted = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, n, vBitMatrix, Const(0));
+
+ Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]);
+
+ Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask);
+
+ Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked);
+
+ if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
+ {
+ res = context.VectorZeroUpper64(res);
+ }
+
+ context.Copy(d, res);
+ }
+ else if (Optimizations.UseSse2 && op.Size > 0)
{
Operand d = GetVec(op.Rd);
Operand n = GetVec(op.Rn);