mirror of
https://github.com/GreemDev/Ryujinx
synced 2024-11-22 17:56:59 +01:00
22b2cb39af
* Turn `MemoryOperand` into a struct * Remove `IntrinsicOperation` * Remove `PhiNode` * Remove `Node` * Turn `Operand` into a struct * Turn `Operation` into a struct * Clean up pool management methods * Add `Arena` allocator * Move `OperationHelper` to `Operation.Factory` * Move `OperandHelper` to `Operand.Factory` * Optimize `Operation` a bit * Fix `Arena` initialization * Rename `NativeList<T>` to `ArenaList<T>` * Reduce `Operand` size from 88 to 56 bytes * Reduce `Operation` size from 56 to 40 bytes * Add optimistic interning of Register & Constant operands * Optimize `RegisterUsage` pass a bit * Optimize `RemoveUnusedNodes` pass a bit Iterating in reverse-order allows killing dependency chains in a single pass. * Fix PPTC symbols * Optimize `BasicBlock` a bit Reduce allocations from `_successor` & `DominanceFrontiers` * Fix `Operation` resize * Make `Arena` expandable Change the arena allocator to be expandable by allocating in pages, with some of them being pooled. Currently 32 pages are pooled. An LRU removal mechanism should probably be added to it. Apparently MHR can allocate bitmaps large enough to exceed the 16MB limit for the type. * Move `Arena` & `ArenaList` to `Common` * Remove `ThreadStaticPool` & co * Add `PhiOperation` * Reduce `Operand` size from 56 from 48 bytes * Add linear-probing to `Operand` intern table * Optimize `HybridAllocator` a bit * Add `Allocators` class * Tune `ArenaAllocator` sizes * Add page removal mechanism to `ArenaAllocator` Remove pages which have not been used for more than 5s after each reset. I am on fence if this would be better using a Gen2 callback object like the one in System.Buffers.ArrayPool<T>, to trim the pool. Because right now if a large translation happens, the pages will be freed only after a reset. This reset may not happen for a while because no new translation is hit, but the arena base sizes are rather small. * Fix `OOM` when allocating larger than page size in `ArenaAllocator` Tweak resizing mechanism for Operand.Uses and Assignemnts. * Optimize `Optimizer` a bit * Optimize `Operand.Add<T>/Remove<T>` a bit * Clean up `PreAllocator` * Fix phi insertion order Reduce codegen diffs. * Fix code alignment * Use new heuristics for degree of parallelism * Suppress warnings * Address gdkchan's feedback Renamed `GetValue()` to `GetValueUnsafe()` to make it more clear that `Operand.Value` should usually not be modified directly. * Add fast path to `ArenaAllocator` * Assembly for `ArenaAllocator.Allocate(ulong)`: .L0: mov rax, [rcx+0x18] lea r8, [rax+rdx] cmp r8, [rcx+0x10] ja short .L2 .L1: mov rdx, [rcx+8] add rax, [rdx+8] mov [rcx+0x18], r8 ret .L2: jmp ArenaAllocator.AllocateSlow(UInt64) A few variable/field had to be changed to ulong so that RyuJIT avoids emitting zero-extends. * Implement a new heuristic to free pooled pages. If an arena is used often, it is more likely that its pages will be needed, so the pages are kept for longer (e.g: during PPTC rebuild or burst sof compilations). If is not used often, then it is more likely that its pages will not be needed (e.g: after PPTC rebuild or bursts of compilations). * Address riperiperi's feedback * Use `EqualityComparer<T>` in `IntrusiveList<T>` Avoids a potential GC hole in `Equals(T, T)`.
184 lines
No EOL
5.4 KiB
C#
184 lines
No EOL
5.4 KiB
C#
using ARMeilleure.Decoders;
|
|
using ARMeilleure.IntermediateRepresentation;
|
|
using ARMeilleure.Translation;
|
|
|
|
using static ARMeilleure.Instructions.InstEmitHelper;
|
|
using static ARMeilleure.Instructions.InstEmitMemoryHelper;
|
|
using static ARMeilleure.IntermediateRepresentation.Operand.Factory;
|
|
|
|
namespace ARMeilleure.Instructions
|
|
{
|
|
static partial class InstEmit
|
|
{
|
|
public static void Adr(ArmEmitterContext context)
|
|
{
|
|
OpCodeAdr op = (OpCodeAdr)context.CurrOp;
|
|
|
|
SetIntOrZR(context, op.Rd, Const(op.Address + (ulong)op.Immediate));
|
|
}
|
|
|
|
public static void Adrp(ArmEmitterContext context)
|
|
{
|
|
OpCodeAdr op = (OpCodeAdr)context.CurrOp;
|
|
|
|
ulong address = (op.Address & ~0xfffUL) + ((ulong)op.Immediate << 12);
|
|
|
|
SetIntOrZR(context, op.Rd, Const(address));
|
|
}
|
|
|
|
public static void Ldr(ArmEmitterContext context) => EmitLdr(context, signed: false);
|
|
public static void Ldrs(ArmEmitterContext context) => EmitLdr(context, signed: true);
|
|
|
|
private static void EmitLdr(ArmEmitterContext context, bool signed)
|
|
{
|
|
OpCodeMem op = (OpCodeMem)context.CurrOp;
|
|
|
|
Operand address = GetAddress(context);
|
|
|
|
if (signed && op.Extend64)
|
|
{
|
|
EmitLoadSx64(context, address, op.Rt, op.Size);
|
|
}
|
|
else if (signed)
|
|
{
|
|
EmitLoadSx32(context, address, op.Rt, op.Size);
|
|
}
|
|
else
|
|
{
|
|
EmitLoadZx(context, address, op.Rt, op.Size);
|
|
}
|
|
|
|
EmitWBackIfNeeded(context, address);
|
|
}
|
|
|
|
public static void Ldr_Literal(ArmEmitterContext context)
|
|
{
|
|
IOpCodeLit op = (IOpCodeLit)context.CurrOp;
|
|
|
|
if (op.Prefetch)
|
|
{
|
|
return;
|
|
}
|
|
|
|
if (op.Signed)
|
|
{
|
|
EmitLoadSx64(context, Const(op.Immediate), op.Rt, op.Size);
|
|
}
|
|
else
|
|
{
|
|
EmitLoadZx(context, Const(op.Immediate), op.Rt, op.Size);
|
|
}
|
|
}
|
|
|
|
public static void Ldp(ArmEmitterContext context)
|
|
{
|
|
OpCodeMemPair op = (OpCodeMemPair)context.CurrOp;
|
|
|
|
void EmitLoad(int rt, Operand ldAddr)
|
|
{
|
|
if (op.Extend64)
|
|
{
|
|
EmitLoadSx64(context, ldAddr, rt, op.Size);
|
|
}
|
|
else
|
|
{
|
|
EmitLoadZx(context, ldAddr, rt, op.Size);
|
|
}
|
|
}
|
|
|
|
Operand address = GetAddress(context);
|
|
Operand address2 = GetAddress(context, 1L << op.Size);
|
|
|
|
EmitLoad(op.Rt, address);
|
|
EmitLoad(op.Rt2, address2);
|
|
|
|
EmitWBackIfNeeded(context, address);
|
|
}
|
|
|
|
public static void Str(ArmEmitterContext context)
|
|
{
|
|
OpCodeMem op = (OpCodeMem)context.CurrOp;
|
|
|
|
Operand address = GetAddress(context);
|
|
|
|
InstEmitMemoryHelper.EmitStore(context, address, op.Rt, op.Size);
|
|
|
|
EmitWBackIfNeeded(context, address);
|
|
}
|
|
|
|
public static void Stp(ArmEmitterContext context)
|
|
{
|
|
OpCodeMemPair op = (OpCodeMemPair)context.CurrOp;
|
|
|
|
Operand address = GetAddress(context);
|
|
Operand address2 = GetAddress(context, 1L << op.Size);
|
|
|
|
InstEmitMemoryHelper.EmitStore(context, address, op.Rt, op.Size);
|
|
InstEmitMemoryHelper.EmitStore(context, address2, op.Rt2, op.Size);
|
|
|
|
EmitWBackIfNeeded(context, address);
|
|
}
|
|
|
|
private static Operand GetAddress(ArmEmitterContext context, long addend = 0)
|
|
{
|
|
Operand address = default;
|
|
|
|
switch (context.CurrOp)
|
|
{
|
|
case OpCodeMemImm op:
|
|
{
|
|
address = context.Copy(GetIntOrSP(context, op.Rn));
|
|
|
|
// Pre-indexing.
|
|
if (!op.PostIdx)
|
|
{
|
|
address = context.Add(address, Const(op.Immediate + addend));
|
|
}
|
|
else if (addend != 0)
|
|
{
|
|
address = context.Add(address, Const(addend));
|
|
}
|
|
|
|
break;
|
|
}
|
|
|
|
case OpCodeMemReg op:
|
|
{
|
|
Operand n = GetIntOrSP(context, op.Rn);
|
|
|
|
Operand m = GetExtendedM(context, op.Rm, op.IntType);
|
|
|
|
if (op.Shift)
|
|
{
|
|
m = context.ShiftLeft(m, Const(op.Size));
|
|
}
|
|
|
|
address = context.Add(n, m);
|
|
|
|
if (addend != 0)
|
|
{
|
|
address = context.Add(address, Const(addend));
|
|
}
|
|
|
|
break;
|
|
}
|
|
}
|
|
|
|
return address;
|
|
}
|
|
|
|
private static void EmitWBackIfNeeded(ArmEmitterContext context, Operand address)
|
|
{
|
|
// Check whenever the current OpCode has post-indexed write back, if so write it.
|
|
if (context.CurrOp is OpCodeMemImm op && op.WBack)
|
|
{
|
|
if (op.PostIdx)
|
|
{
|
|
address = context.Add(address, Const(op.Immediate));
|
|
}
|
|
|
|
SetIntOrSP(context, op.Rn, address);
|
|
}
|
|
}
|
|
}
|
|
} |