diff options
author | Wunk <wunkolo@gmail.com> | 2022-10-02 02:17:19 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-10-02 11:17:19 +0200 |
commit | 45ce540b9b756f372840e923b73cfd7e3edd85f8 (patch) | |
tree | 5908b97b09330f91c893c6c25a3a76519e8651de /ARMeilleure/Instructions/InstEmitSimdLogical.cs | |
parent | 96bf7f8522e38c36d792a6ac2173497c3674e920 (diff) |
ARMeilleure: Add `gfni` acceleration (#3669)1.1.286
* ARMeilleure: Add `GFNI` detection
This is intended for utilizing the `gf2p8affineqb` instruction
* ARMeilleure: Add `gf2p8affineqb`
Not using the VEX or EVEX-form of this instruction is intentional. There
are `GFNI`-chips that do not support AVX(so no VEX encoding) such as
Tremont(Lakefield) chips as well as Jasper Lake.
https://github.com/InstLatx64/InstLatx64/blob/13df339fe7150b114929f71b19a6b2fe72fc751e/GenuineIntel/GenuineIntel00806A1_Lakefield_LC_InstLatX64.txt#L1297-L1299
https://github.com/InstLatx64/InstLatx64/blob/13df339fe7150b114929f71b19a6b2fe72fc751e/GenuineIntel/GenuineIntel00906C0_JasperLake_InstLatX64.txt#L1252-L1254
* ARMeilleure: Add `gfni` acceleration of `Rbit_V`
Passes all `Rbit_V*` unit tests on my `i9-11900k`
* ARMeilleure: Add `gfni` acceleration of `S{l,r}i_V`
Also added a fast-path for when the shift amount is greater than the
size of the element.
* ARMeilleure: Add `gfni` acceleration of `Shl_V` and `Sshr_V`
* ARMeilleure: Increment InternalVersion
* ARMeilleure: Fix Intrinsic and Assembler Table alignment
`gf2p8affineqb` is the longest instruction name I know of. It shouldn't
get any wider than this.
* ARMeilleure: Remove SSE2+SHA requirement for GFNI
* ARMeilleure Add `X86GetGf2p8LogicalShiftLeft`
Used to generate GF(2^8) 8x8 bit-matrices for bit-shifting for the `gf2p8affineqb` instruction.
* ARMeilleure: Append `FeatureInfo7Ecx` to `FeatureInfo`
Diffstat (limited to 'ARMeilleure/Instructions/InstEmitSimdLogical.cs')
-rw-r--r-- | ARMeilleure/Instructions/InstEmitSimdLogical.cs | 41 |
1 files changed, 33 insertions, 8 deletions
diff --git a/ARMeilleure/Instructions/InstEmitSimdLogical.cs b/ARMeilleure/Instructions/InstEmitSimdLogical.cs index dbd1a1a0..624ae841 100644 --- a/ARMeilleure/Instructions/InstEmitSimdLogical.cs +++ b/ARMeilleure/Instructions/InstEmitSimdLogical.cs @@ -336,20 +336,45 @@ namespace ARMeilleure.Instructions { OpCodeSimd op = (OpCodeSimd)context.CurrOp; - Operand res = context.VectorZero(); + if (Optimizations.UseGfni) + { + const long bitMatrix = + (0b10000000L << 56) | + (0b01000000L << 48) | + (0b00100000L << 40) | + (0b00010000L << 32) | + (0b00001000L << 24) | + (0b00000100L << 16) | + (0b00000010L << 8) | + (0b00000001L << 0); - int elems = op.RegisterSize == RegisterSize.Simd128 ? 16 : 8; + Operand vBitMatrix = X86GetAllElements(context, bitMatrix); - for (int index = 0; index < elems; index++) - { - Operand ne = EmitVectorExtractZx(context, op.Rn, index, 0); + Operand res = context.AddIntrinsic(Intrinsic.X86Gf2p8affineqb, GetVec(op.Rn), vBitMatrix, Const(0)); - Operand de = EmitReverseBits8Op(context, ne); + if (op.RegisterSize == RegisterSize.Simd64) + { + res = context.VectorZeroUpper64(res); + } - res = EmitVectorInsert(context, res, de, index, 0); + context.Copy(GetVec(op.Rd), res); } + else + { + Operand res = context.VectorZero(); + int elems = op.RegisterSize == RegisterSize.Simd128 ? 16 : 8; - context.Copy(GetVec(op.Rd), res); + for (int index = 0; index < elems; index++) + { + Operand ne = EmitVectorExtractZx(context, op.Rn, index, 0); + + Operand de = EmitReverseBits8Op(context, ne); + + res = EmitVectorInsert(context, res, de, index, 0); + } + + context.Copy(GetVec(op.Rd), res); + } } private static Operand EmitReverseBits8Op(ArmEmitterContext context, Operand op) |