using ARMeilleure.Memory;
using Ryujinx.Common;
using Ryujinx.Cpu.Signal;
using Ryujinx.Memory;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Threading;
using static Ryujinx.Cpu.MemoryEhMeilleure;
namespace ARMeilleure.Common
{
///
/// Represents a table of guest address to a value.
///
/// Type of the value
public unsafe class AddressTable : IAddressTable where TEntry : unmanaged
{
///
/// Represents a page of the address table.
///
private readonly struct AddressTablePage
{
///
/// True if the allocation belongs to a sparse block, false otherwise.
///
public readonly bool IsSparse;
///
/// Base address for the page.
///
public readonly IntPtr Address;
public AddressTablePage(bool isSparse, IntPtr address)
{
IsSparse = isSparse;
Address = address;
}
}
///
/// A sparsely mapped block of memory with a signal handler to map pages as they're accessed.
///
private readonly struct TableSparseBlock : IDisposable
{
public readonly SparseMemoryBlock Block;
private readonly TrackingEventDelegate _trackingEvent;
public TableSparseBlock(ulong size, Action ensureMapped, PageInitDelegate pageInit)
{
var block = new SparseMemoryBlock(size, pageInit, null);
_trackingEvent = (ulong address, ulong size, bool write) =>
{
ulong pointer = (ulong)block.Block.Pointer + address;
ensureMapped((IntPtr)pointer);
return pointer;
};
bool added = NativeSignalHandler.AddTrackedRegion(
(nuint)block.Block.Pointer,
(nuint)(block.Block.Pointer + (IntPtr)block.Block.Size),
Marshal.GetFunctionPointerForDelegate(_trackingEvent));
if (!added)
{
throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
}
Block = block;
}
public void Dispose()
{
NativeSignalHandler.RemoveTrackedRegion((nuint)Block.Block.Pointer);
Block.Dispose();
}
}
private bool _disposed;
private TEntry** _table;
private readonly List _pages;
private TEntry _fill;
private readonly MemoryBlock _sparseFill;
private readonly SparseMemoryBlock _fillBottomLevel;
private readonly TEntry* _fillBottomLevelPtr;
private readonly List _sparseReserved;
private readonly ReaderWriterLockSlim _sparseLock;
private ulong _sparseBlockSize;
private ulong _sparseReservedOffset;
public bool Sparse { get; }
///
public ulong Mask { get; }
///
public AddressTableLevel[] Levels { get; }
///
public TEntry Fill
{
get
{
return _fill;
}
set
{
UpdateFill(value);
}
}
///
public IntPtr Base
{
get
{
ObjectDisposedException.ThrowIf(_disposed, this);
lock (_pages)
{
return (IntPtr)GetRootPage();
}
}
}
///
/// Constructs a new instance of the class with the specified list of
/// .
///
/// Levels for the address table
/// True if the bottom page should be sparsely mapped
/// is null
/// Length of is less than 2
public AddressTable(AddressTableLevel[] levels, bool sparse)
{
ArgumentNullException.ThrowIfNull(levels);
_pages = new List(capacity: 16);
Levels = levels;
Mask = 0;
foreach (var level in Levels)
{
Mask |= level.Mask;
}
Sparse = sparse;
if (sparse)
{
// If the address table is sparse, allocate a fill block
_sparseFill = new MemoryBlock(268435456ul, MemoryAllocationFlags.Mirrorable); //low Power TC uses size: 65536ul
ulong bottomLevelSize = (1ul << levels.Last().Length) * (ulong)sizeof(TEntry);
_fillBottomLevel = new SparseMemoryBlock(bottomLevelSize, null, _sparseFill);
_fillBottomLevelPtr = (TEntry*)_fillBottomLevel.Block.Pointer;
_sparseReserved = new List();
_sparseLock = new ReaderWriterLockSlim();
_sparseBlockSize = bottomLevelSize;
}
}
///
/// Create an instance for an ARM function table.
/// Selects the best table structure for A32/A64, taking into account the selected memory manager type.
///
/// True if the guest is A64, false otherwise
/// Memory manager type
/// An for ARM function lookup
public static AddressTable CreateForArm(bool for64Bits, MemoryManagerType type)
{
// Assume software memory means that we don't want to use any signal handlers.
bool sparse = type != MemoryManagerType.SoftwareMmu && type != MemoryManagerType.SoftwarePageTable;
return new AddressTable(AddressTablePresets.GetArmPreset(for64Bits, sparse), sparse);
}
///
/// Update the fill value for the bottom level of the table.
///
/// New fill value
private void UpdateFill(TEntry fillValue)
{
if (_sparseFill != null)
{
Span span = _sparseFill.GetSpan(0, (int)_sparseFill.Size);
MemoryMarshal.Cast(span).Fill(fillValue);
}
_fill = fillValue;
}
///
/// Signal that the given code range exists.
///
///
///
public void SignalCodeRange(ulong address, ulong size)
{
AddressTableLevel bottom = Levels.Last();
ulong bottomLevelEntries = 1ul << bottom.Length;
ulong entryIndex = address >> bottom.Index;
ulong entries = size >> bottom.Index;
entries += entryIndex - BitUtils.AlignDown(entryIndex, bottomLevelEntries);
_sparseBlockSize = Math.Max(_sparseBlockSize, BitUtils.AlignUp(entries, bottomLevelEntries) * (ulong)sizeof(TEntry));
}
///
public bool IsValid(ulong address)
{
return (address & ~Mask) == 0;
}
///
public ref TEntry GetValue(ulong address)
{
ObjectDisposedException.ThrowIf(_disposed, this);
if (!IsValid(address))
{
throw new ArgumentException($"Address 0x{address:X} is not mapped onto the table.", nameof(address));
}
lock (_pages)
{
TEntry* page = GetPage(address);
long index = Levels[^1].GetValue(address);
EnsureMapped((IntPtr)(page + index));
return ref page[index];
}
}
///
/// Gets the leaf page for the specified guest .
///
/// Guest address
/// Leaf page for the specified guest
private TEntry* GetPage(ulong address)
{
TEntry** page = GetRootPage();
for (int i = 0; i < Levels.Length - 1; i++)
{
ref AddressTableLevel level = ref Levels[i];
ref TEntry* nextPage = ref page[level.GetValue(address)];
if (nextPage == null || nextPage == _fillBottomLevelPtr)
{
ref AddressTableLevel nextLevel = ref Levels[i + 1];
if (i == Levels.Length - 2)
{
nextPage = (TEntry*)Allocate(1 << nextLevel.Length, Fill, leaf: true);
}
else
{
nextPage = (TEntry*)Allocate(1 << nextLevel.Length, GetFillValue(i), leaf: false);
}
}
page = (TEntry**)nextPage;
}
return (TEntry*)page;
}
///
/// Ensure the given pointer is mapped in any overlapping sparse reservations.
///
/// Pointer to be mapped
private void EnsureMapped(IntPtr ptr)
{
if (Sparse)
{
// Check sparse allocations to see if the pointer is in any of them.
// Ensure the page is committed if there's a match.
_sparseLock.EnterReadLock();
try
{
foreach (TableSparseBlock reserved in _sparseReserved)
{
SparseMemoryBlock sparse = reserved.Block;
if (ptr >= sparse.Block.Pointer && ptr < sparse.Block.Pointer + (IntPtr)sparse.Block.Size)
{
sparse.EnsureMapped((ulong)(ptr - sparse.Block.Pointer));
break;
}
}
}
finally
{
_sparseLock.ExitReadLock();
}
}
}
///
/// Get the fill value for a non-leaf level of the table.
///
/// Level to get the fill value for
/// The fill value
private IntPtr GetFillValue(int level)
{
if (_fillBottomLevel != null && level == Levels.Length - 2)
{
return (IntPtr)_fillBottomLevelPtr;
}
else
{
return IntPtr.Zero;
}
}
///
/// Lazily initialize and get the root page of the .
///
/// Root page of the
private TEntry** GetRootPage()
{
if (_table == null)
{
if (Levels.Length == 1)
_table = (TEntry**)Allocate(1 << Levels[0].Length, Fill, leaf: true);
else
_table = (TEntry**)Allocate(1 << Levels[0].Length, GetFillValue(0), leaf: false);
}
return _table;
}
///
/// Initialize a leaf page with the fill value.
///
/// Page to initialize
private void InitLeafPage(Span page)
{
MemoryMarshal.Cast(page).Fill(_fill);
}
///
/// Reserve a new sparse block, and add it to the list.
///
/// The new sparse block that was added
private TableSparseBlock ReserveNewSparseBlock()
{
var block = new TableSparseBlock(_sparseBlockSize, EnsureMapped, InitLeafPage);
_sparseReserved.Add(block);
_sparseReservedOffset = 0;
return block;
}
///
/// Allocates a block of memory of the specified type and length.
///
/// Type of elements
/// Number of elements
/// Fill value
/// if leaf; otherwise
/// Allocated block
private IntPtr Allocate(int length, T fill, bool leaf) where T : unmanaged
{
var size = sizeof(T) * length;
AddressTablePage page;
if (Sparse && leaf)
{
_sparseLock.EnterWriteLock();
SparseMemoryBlock block;
if (_sparseReserved.Count == 0)
{
block = ReserveNewSparseBlock().Block;
}
else
{
block = _sparseReserved.Last().Block;
if (_sparseReservedOffset == block.Block.Size)
{
block = ReserveNewSparseBlock().Block;
}
}
page = new AddressTablePage(true, block.Block.Pointer + (IntPtr)_sparseReservedOffset);
_sparseReservedOffset += (ulong)size;
_sparseLock.ExitWriteLock();
}
else
{
var address = (IntPtr)NativeAllocator.Instance.Allocate((uint)size);
page = new AddressTablePage(false, address);
var span = new Span((void*)page.Address, length);
span.Fill(fill);
}
_pages.Add(page);
//TranslatorEventSource.Log.AddressTableAllocated(size, leaf);
return page.Address;
}
///
/// Releases all resources used by the instance.
///
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
///
/// Releases all unmanaged and optionally managed resources used by the
/// instance.
///
/// to dispose managed resources also; otherwise just unmanaged resouces
protected virtual void Dispose(bool disposing)
{
if (!_disposed)
{
foreach (var page in _pages)
{
if (!page.IsSparse)
{
Marshal.FreeHGlobal(page.Address);
}
}
if (Sparse)
{
foreach (TableSparseBlock block in _sparseReserved)
{
block.Dispose();
}
_sparseReserved.Clear();
_fillBottomLevel.Dispose();
_sparseFill.Dispose();
_sparseLock.Dispose();
}
_disposed = true;
}
}
///
/// Frees resources used by the instance.
///
~AddressTable()
{
Dispose(false);
}
}
}