aboutsummaryrefslogtreecommitdiff
path: root/ARMeilleure/CodeGen
diff options
context:
space:
mode:
authorriperiperi <rhy3756547@hotmail.com>2020-03-12 03:20:55 +0000
committerGitHub <noreply@github.com>2020-03-12 14:20:55 +1100
commitd904706fc0a14d17072f7235d73c80c4f01b1041 (patch)
treeb0f5d56bcaaf711de2ba05d1510c72af8fa099f8 /ARMeilleure/CodeGen
parentc26f3774bdbf3982149a3ea4c0f7abb4de869db7 (diff)
Use a Jump Table for direct and indirect calls/jumps, removing transitions to managed (#975)
* Implement Jump Table for Native Calls NOTE: this slows down rejit considerably! Not recommended to be used without codegen optimisation or AOT. - Does not work on Linux - A32 needs an additional commit. * A32 Support (WIP) * Actually write Direct Call pointers to the table That would help. * Direct Calls: Rather than returning to the translator, attempt to keep within the native stack frame. A return to the translator can still happen, but only by exceptionally bubbling up to it. Also: - Always translate lowCq as a function. Faster interop with the direct jumps, and this will be useful in future if we want to do speculative translation. - Tail Call Detection: after the decoding stage, detect if we do a tail call, and avoid translating into it. Detected if a jump is made to an address outwith the contiguous sequence of blocks surrounding the entry point. The goal is to reduce code touched by jit and rejit. * A32 Support * Use smaller max function size for lowCq, fix exceptional returns When a return has an unexpected value and there is no code block following this one, we now return the value rather than continuing. * CompareAndSwap (buggy) * Ensure CompareAndSwap does not get optimized away. * Use CompareAndSwap to make the dynamic table thread safe. * Tail call for linux, throw on too many arguments. * Combine CompareAndSwap 128 and 32/64. They emit different IR instructions since their PreAllocator behaviour is different, but now they just have one function on EmitterContext. * Fix issues separating from optimisations. * Use a stub to find and execute missing functions. This allows us to skip doing many runtime comparisons and branches, and reduces the amount of code we need to emit significantly. For the indirect call table, this stub also does the work of moving in the highCq address to the table when one is found. * Make Jump Tables and Jit Cache dynmically resize Reserve virtual memory, commit as needed. * Move TailCallRemover to its own class. * Multithreaded Translation (based on heuristic) A poor one, at that. Need to get core count for a better one, which means a lot of OS specific garbage. * Better priority management for background threads. * Bound core limit a bit more Past a certain point the load is not paralellizable and starts stealing from the main thread. Likely due to GC, memory, heap allocation thread contention. Reduce by one core til optimisations come to improve the situation. * Fix memory management on linux. * Temporary solution to some sync problems. This will make sure threads exit correctly, most of the time. There is a potential race where setting the sync counter to 0 does nothing (counter stays at what it was before, thread could take too long to exit), but we need to find a better way to do this anyways. Synchronization frequency has been tightened as we never enter blockwise segments of code. Essentially this means, check every x functions or loop iterations, before lowcq blocks existed and were worth just as much. Ideally it should be done in a better way, since functions can be anywhere from 1 to 5000 instructions. (maybe based on host timer, or an interrupt flag from a scheduler thread) * Address feedback minus CompareAndSwap change. * Use default ReservedRegion granularity. * Merge CompareAndSwap with its V128 variant. * We already got the source, no need to do it again. * Make sure all background translation threads exit. * Fix CompareAndSwap128 Detection criteria was a bit scuffed. * Address Comments.
Diffstat (limited to 'ARMeilleure/CodeGen')
-rw-r--r--ARMeilleure/CodeGen/Optimizations/Optimizer.cs4
-rw-r--r--ARMeilleure/CodeGen/X86/Assembler.cs14
-rw-r--r--ARMeilleure/CodeGen/X86/CodeGenerator.cs32
-rw-r--r--ARMeilleure/CodeGen/X86/PreAllocator.cs188
-rw-r--r--ARMeilleure/CodeGen/X86/X86Instruction.cs2
5 files changed, 216 insertions, 24 deletions
diff --git a/ARMeilleure/CodeGen/Optimizations/Optimizer.cs b/ARMeilleure/CodeGen/Optimizations/Optimizer.cs
index d3ffd185..8b0c75fd 100644
--- a/ARMeilleure/CodeGen/Optimizations/Optimizer.cs
+++ b/ARMeilleure/CodeGen/Optimizations/Optimizer.cs
@@ -136,7 +136,9 @@ namespace ARMeilleure.CodeGen.Optimizations
private static bool HasSideEffects(Node node)
{
- return (node is Operation operation) && operation.Instruction == Instruction.Call;
+ return (node is Operation operation) && (operation.Instruction == Instruction.Call
+ || operation.Instruction == Instruction.Tailcall
+ || operation.Instruction == Instruction.CompareAndSwap);
}
private static bool IsPropagableCopy(Operation operation)
diff --git a/ARMeilleure/CodeGen/X86/Assembler.cs b/ARMeilleure/CodeGen/X86/Assembler.cs
index 70130d90..5088e6f0 100644
--- a/ARMeilleure/CodeGen/X86/Assembler.cs
+++ b/ARMeilleure/CodeGen/X86/Assembler.cs
@@ -90,6 +90,7 @@ namespace ARMeilleure.CodeGen.X86
Add(X86Instruction.Cmpps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000fc2, InstructionFlags.Vex));
Add(X86Instruction.Cmpsd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000fc2, InstructionFlags.Vex | InstructionFlags.PrefixF2));
Add(X86Instruction.Cmpss, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000fc2, InstructionFlags.Vex | InstructionFlags.PrefixF3));
+ Add(X86Instruction.Cmpxchg, new InstructionInfo(0x00000fb1, BadOp, BadOp, BadOp, BadOp, InstructionFlags.None));
Add(X86Instruction.Cmpxchg16b, new InstructionInfo(0x01000fc7, BadOp, BadOp, BadOp, BadOp, InstructionFlags.RexW));
Add(X86Instruction.Comisd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f2f, InstructionFlags.Vex | InstructionFlags.Prefix66));
Add(X86Instruction.Comiss, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f2f, InstructionFlags.Vex));
@@ -117,6 +118,7 @@ namespace ARMeilleure.CodeGen.X86
Add(X86Instruction.Imul, new InstructionInfo(BadOp, 0x0000006b, 0x00000069, BadOp, 0x00000faf, InstructionFlags.None));
Add(X86Instruction.Imul128, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x050000f7, InstructionFlags.None));
Add(X86Instruction.Insertps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x000f3a21, InstructionFlags.Vex | InstructionFlags.Prefix66));
+ Add(X86Instruction.Jmp, new InstructionInfo(0x040000ff, BadOp, BadOp, BadOp, BadOp, InstructionFlags.None));
Add(X86Instruction.Lea, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x0000008d, InstructionFlags.None));
Add(X86Instruction.Maxpd, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f5f, InstructionFlags.Vex | InstructionFlags.Prefix66));
Add(X86Instruction.Maxps, new InstructionInfo(BadOp, BadOp, BadOp, BadOp, 0x00000f5f, InstructionFlags.Vex));
@@ -328,6 +330,13 @@ namespace ARMeilleure.CodeGen.X86
WriteByte(0x99);
}
+ public void Cmpxchg(MemoryOperand memOp, Operand src)
+ {
+ WriteByte(LockPrefix);
+
+ WriteInstruction(memOp, src, src.Type, X86Instruction.Cmpxchg);
+ }
+
public void Cmpxchg16b(MemoryOperand memOp)
{
WriteByte(LockPrefix);
@@ -480,6 +489,11 @@ namespace ARMeilleure.CodeGen.X86
}
}
+ public void Jmp(Operand dest)
+ {
+ WriteInstruction(dest, null, OperandType.None, X86Instruction.Jmp);
+ }
+
public void Lea(Operand dest, Operand source, OperandType type)
{
WriteInstruction(dest, source, type, X86Instruction.Lea);
diff --git a/ARMeilleure/CodeGen/X86/CodeGenerator.cs b/ARMeilleure/CodeGen/X86/CodeGenerator.cs
index 32ca6a78..1d0a4c12 100644
--- a/ARMeilleure/CodeGen/X86/CodeGenerator.cs
+++ b/ARMeilleure/CodeGen/X86/CodeGenerator.cs
@@ -34,7 +34,7 @@ namespace ARMeilleure.CodeGen.X86
Add(Instruction.ByteSwap, GenerateByteSwap);
Add(Instruction.Call, GenerateCall);
Add(Instruction.Clobber, GenerateClobber);
- Add(Instruction.CompareAndSwap128, GenerateCompareAndSwap128);
+ Add(Instruction.CompareAndSwap, GenerateCompareAndSwap);
Add(Instruction.CompareEqual, GenerateCompareEqual);
Add(Instruction.CompareGreater, GenerateCompareGreater);
Add(Instruction.CompareGreaterOrEqual, GenerateCompareGreaterOrEqual);
@@ -76,6 +76,7 @@ namespace ARMeilleure.CodeGen.X86
Add(Instruction.Store16, GenerateStore16);
Add(Instruction.Store8, GenerateStore8);
Add(Instruction.Subtract, GenerateSubtract);
+ Add(Instruction.Tailcall, GenerateTailcall);
Add(Instruction.VectorCreateScalar, GenerateVectorCreateScalar);
Add(Instruction.VectorExtract, GenerateVectorExtract);
Add(Instruction.VectorExtract16, GenerateVectorExtract16);
@@ -543,13 +544,27 @@ namespace ARMeilleure.CodeGen.X86
// register allocator, we don't need to produce any code.
}
- private static void GenerateCompareAndSwap128(CodeGenContext context, Operation operation)
+ private static void GenerateCompareAndSwap(CodeGenContext context, Operation operation)
{
- Operand source = operation.GetSource(0);
+ Operand src1 = operation.GetSource(0);
+
+ if (operation.SourcesCount == 5) // CompareAndSwap128 has 5 sources, compared to CompareAndSwap64/32's 3.
+ {
+ MemoryOperand memOp = new MemoryOperand(OperandType.I64, src1);
+
+ context.Assembler.Cmpxchg16b(memOp);
+ }
+ else
+ {
+ Operand src2 = operation.GetSource(1);
+ Operand src3 = operation.GetSource(2);
- MemoryOperand memOp = new MemoryOperand(OperandType.I64, source);
+ EnsureSameType(src2, src3);
- context.Assembler.Cmpxchg16b(memOp);
+ MemoryOperand memOp = new MemoryOperand(src3.Type, src1);
+
+ context.Assembler.Cmpxchg(memOp, src3);
+ }
}
private static void GenerateCompareEqual(CodeGenContext context, Operation operation)
@@ -1083,6 +1098,13 @@ namespace ARMeilleure.CodeGen.X86
}
}
+ private static void GenerateTailcall(CodeGenContext context, Operation operation)
+ {
+ WriteEpilogue(context);
+
+ context.Assembler.Jmp(operation.GetSource(0));
+ }
+
private static void GenerateVectorCreateScalar(CodeGenContext context, Operation operation)
{
Operand dest = operation.Destination;
diff --git a/ARMeilleure/CodeGen/X86/PreAllocator.cs b/ARMeilleure/CodeGen/X86/PreAllocator.cs
index 75844b09..e20fca9d 100644
--- a/ARMeilleure/CodeGen/X86/PreAllocator.cs
+++ b/ARMeilleure/CodeGen/X86/PreAllocator.cs
@@ -1,6 +1,7 @@
using ARMeilleure.CodeGen.RegisterAllocators;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Translation;
+using System;
using System.Collections.Generic;
using System.Diagnostics;
@@ -101,6 +102,17 @@ namespace ARMeilleure.CodeGen.X86
}
break;
+ case Instruction.Tailcall:
+ if (callConv == CallConvName.Windows)
+ {
+ HandleTailcallWindowsAbi(block.Operations, stackAlloc, node, operation);
+ }
+ else
+ {
+ HandleTailcallSystemVAbi(block.Operations, stackAlloc, node, operation);
+ }
+ break;
+
case Instruction.VectorInsert8:
if (!HardwareCapabilities.SupportsSse41)
{
@@ -199,32 +211,55 @@ namespace ARMeilleure.CodeGen.X86
switch (operation.Instruction)
{
- case Instruction.CompareAndSwap128:
+ case Instruction.CompareAndSwap:
{
- // Handle the many restrictions of the compare and exchange (16 bytes) instruction:
- // - The expected value should be in RDX:RAX.
- // - The new value to be written should be in RCX:RBX.
- // - The value at the memory location is loaded to RDX:RAX.
- void SplitOperand(Operand source, Operand lr, Operand hr)
+ OperandType type = operation.GetSource(1).Type;
+
+ if (type == OperandType.V128)
{
- nodes.AddBefore(node, new Operation(Instruction.VectorExtract, lr, source, Const(0)));
- nodes.AddBefore(node, new Operation(Instruction.VectorExtract, hr, source, Const(1)));
+ // Handle the many restrictions of the compare and exchange (16 bytes) instruction:
+ // - The expected value should be in RDX:RAX.
+ // - The new value to be written should be in RCX:RBX.
+ // - The value at the memory location is loaded to RDX:RAX.
+ void SplitOperand(Operand source, Operand lr, Operand hr)
+ {
+ nodes.AddBefore(node, new Operation(Instruction.VectorExtract, lr, source, Const(0)));
+ nodes.AddBefore(node, new Operation(Instruction.VectorExtract, hr, source, Const(1)));
+ }
+
+ Operand rax = Gpr(X86Register.Rax, OperandType.I64);
+ Operand rbx = Gpr(X86Register.Rbx, OperandType.I64);
+ Operand rcx = Gpr(X86Register.Rcx, OperandType.I64);
+ Operand rdx = Gpr(X86Register.Rdx, OperandType.I64);
+
+ SplitOperand(operation.GetSource(1), rax, rdx);
+ SplitOperand(operation.GetSource(2), rbx, rcx);
+
+ node = nodes.AddAfter(node, new Operation(Instruction.VectorCreateScalar, dest, rax));
+ node = nodes.AddAfter(node, new Operation(Instruction.VectorInsert, dest, dest, rdx, Const(1)));
+
+ operation.SetDestinations(new Operand[] { rdx, rax });
+
+ operation.SetSources(new Operand[] { operation.GetSource(0), rdx, rax, rcx, rbx });
}
+ else
+ {
+ // Handle the many restrictions of the compare and exchange (32/64) instruction:
+ // - The expected value should be in (E/R)AX.
+ // - The value at the memory location is loaded to (E/R)AX.
- Operand rax = Gpr(X86Register.Rax, OperandType.I64);
- Operand rbx = Gpr(X86Register.Rbx, OperandType.I64);
- Operand rcx = Gpr(X86Register.Rcx, OperandType.I64);
- Operand rdx = Gpr(X86Register.Rdx, OperandType.I64);
+ Operand expected = operation.GetSource(1);
- SplitOperand(operation.GetSource(1), rax, rdx);
- SplitOperand(operation.GetSource(2), rbx, rcx);
+ Operand rax = Gpr(X86Register.Rax, expected.Type);
- node = nodes.AddAfter(node, new Operation(Instruction.VectorCreateScalar, dest, rax));
- node = nodes.AddAfter(node, new Operation(Instruction.VectorInsert, dest, dest, rdx, Const(1)));
+ nodes.AddBefore(node, new Operation(Instruction.Copy, rax, expected));
- operation.SetDestinations(new Operand[] { rdx, rax });
+ operation.SetSources(new Operand[] { operation.GetSource(0), rax, operation.GetSource(2) });
- operation.SetSources(new Operand[] { operation.GetSource(0), rdx, rax, rcx, rbx });
+ node = nodes.AddAfter(node, new Operation(Instruction.Copy, dest, rax));
+
+ operation.Destination = rax;
+ }
break;
}
@@ -829,6 +864,123 @@ namespace ARMeilleure.CodeGen.X86
return node;
}
+ private static void HandleTailcallSystemVAbi(IntrusiveList<Node> nodes, StackAllocator stackAlloc, Node node, Operation operation)
+ {
+ List<Operand> sources = new List<Operand>();
+
+ sources.Add(operation.GetSource(0));
+
+ int argsCount = operation.SourcesCount - 1;
+
+ int intMax = CallingConvention.GetIntArgumentsOnRegsCount();
+ int vecMax = CallingConvention.GetVecArgumentsOnRegsCount();
+
+ int intCount = 0;
+ int vecCount = 0;
+
+ // Handle arguments passed on registers.
+ for (int index = 0; index < argsCount; index++)
+ {
+ Operand source = operation.GetSource(1 + index);
+
+ bool passOnReg;
+
+ if (source.Type.IsInteger())
+ {
+ passOnReg = intCount + 1 < intMax;
+ }
+ else
+ {
+ passOnReg = vecCount < vecMax;
+ }
+
+ if (source.Type == OperandType.V128 && passOnReg)
+ {
+ // V128 is a struct, we pass each half on a GPR if possible.
+ Operand argReg = Gpr(CallingConvention.GetIntArgumentRegister(intCount++), OperandType.I64);
+ Operand argReg2 = Gpr(CallingConvention.GetIntArgumentRegister(intCount++), OperandType.I64);
+
+ nodes.AddBefore(node, new Operation(Instruction.VectorExtract, argReg, source, Const(0)));
+ nodes.AddBefore(node, new Operation(Instruction.VectorExtract, argReg2, source, Const(1)));
+
+ continue;
+ }
+
+ if (passOnReg)
+ {
+ Operand argReg = source.Type.IsInteger()
+ ? Gpr(CallingConvention.GetIntArgumentRegister(intCount++), source.Type)
+ : Xmm(CallingConvention.GetVecArgumentRegister(vecCount++), source.Type);
+
+ Operation copyOp = new Operation(Instruction.Copy, argReg, source);
+
+ HandleConstantCopy(nodes, nodes.AddBefore(node, copyOp), copyOp);
+
+ sources.Add(argReg);
+ }
+ else
+ {
+ throw new NotImplementedException("Spilling is not currently supported for tail calls. (too many arguments)");
+ }
+ }
+
+ // The target address must be on the return registers, since we
+ // don't return anything and it is guaranteed to not be a
+ // callee saved register (which would be trashed on the epilogue).
+ Operand retReg = Gpr(CallingConvention.GetIntReturnRegister(), OperandType.I64);
+
+ Operation addrCopyOp = new Operation(Instruction.Copy, retReg, operation.GetSource(0));
+
+ nodes.AddBefore(node, addrCopyOp);
+
+ sources[0] = retReg;
+
+ operation.SetSources(sources.ToArray());
+ }
+
+ private static void HandleTailcallWindowsAbi(IntrusiveList<Node> nodes, StackAllocator stackAlloc, Node node, Operation operation)
+ {
+ int argsCount = operation.SourcesCount - 1;
+
+ int maxArgs = CallingConvention.GetArgumentsOnRegsCount();
+
+ if (argsCount > maxArgs)
+ {
+ throw new NotImplementedException("Spilling is not currently supported for tail calls. (too many arguments)");
+ }
+
+ Operand[] sources = new Operand[1 + argsCount];
+
+ // Handle arguments passed on registers.
+ for (int index = 0; index < argsCount; index++)
+ {
+ Operand source = operation.GetSource(1 + index);
+
+ Operand argReg = source.Type.IsInteger()
+ ? Gpr(CallingConvention.GetIntArgumentRegister(index), source.Type)
+ : Xmm(CallingConvention.GetVecArgumentRegister(index), source.Type);
+
+ Operation copyOp = new Operation(Instruction.Copy, argReg, source);
+
+ HandleConstantCopy(nodes, nodes.AddBefore(node, copyOp), copyOp);
+
+ sources[1 + index] = argReg;
+ }
+
+ // The target address must be on the return registers, since we
+ // don't return anything and it is guaranteed to not be a
+ // callee saved register (which would be trashed on the epilogue).
+ Operand retReg = Gpr(CallingConvention.GetIntReturnRegister(), OperandType.I64);
+
+ Operation addrCopyOp = new Operation(Instruction.Copy, retReg, operation.GetSource(0));
+
+ nodes.AddBefore(node, addrCopyOp);
+
+ sources[0] = retReg;
+
+ operation.SetSources(sources);
+ }
+
private static void HandleLoadArgumentWindowsAbi(
CompilerContext cctx,
IntrusiveList<Node> nodes,
diff --git a/ARMeilleure/CodeGen/X86/X86Instruction.cs b/ARMeilleure/CodeGen/X86/X86Instruction.cs
index 813730f2..a6dbf1a5 100644
--- a/ARMeilleure/CodeGen/X86/X86Instruction.cs
+++ b/ARMeilleure/CodeGen/X86/X86Instruction.cs
@@ -23,6 +23,7 @@ namespace ARMeilleure.CodeGen.X86
Cmpps,
Cmpsd,
Cmpss,
+ Cmpxchg,
Cmpxchg16b,
Comisd,
Comiss,
@@ -50,6 +51,7 @@ namespace ARMeilleure.CodeGen.X86
Imul,
Imul128,
Insertps,
+ Jmp,
Lea,
Maxpd,
Maxps,