mirror of
https://github.com/GreemDev/Ryujinx
synced 2024-12-05 07:52:10 +01:00
e20abbf9cc
* Vulkan: Don't flush commands when creating most sync When the WaitForIdle method is called, we create sync as some internal GPU method may read back written buffer data. Some games randomly intersperse compute dispatch into their render passes, which result in this happening an unbounded number of times depending on how many times they run compute. Creating sync in Vulkan is expensive, as we need to flush the current command buffer so that it can be waited on. We have a limited number of active command buffers due to how we track resource usage, so submitting too many command buffers will force us to wait for them to return to the pool. This PR allows less "important" sync (things which are less likely to be waited on) to wait on a command buffer's result without submitting it, instead relying on AutoFlush or another, more important sync to flush it later on. Because of the possibility of us waiting for a command buffer that hasn't submitted yet, any thread needs to be able to force the active command buffer to submit. The ability to do this has been added to the backend multithreading via an "Interrupt", though it is not supported without multithreading. OpenGL drivers should already be doing something similar so they don't blow up when creating lots of sync, which is why this hasn't been a problem for these games over there. Improves Vulkan performance on Xenoblade DE, Pokemon Scarlet/Violet, and Zelda BOTW (still another large issue here) * Add strict argument This is technically a separate concern from whether the sync is a host syncpoint. * Remove _interrupted variable * Actually wait for the invoke This is required by AMD GPUs, and also may have caused some issues on other GPUs. * Remove unused using. * I don't know why it added these ones. * Address Feedback * Fix typo
368 lines
12 KiB
C#
368 lines
12 KiB
C#
using Silk.NET.Vulkan;
|
|
using System;
|
|
using System.Collections.Generic;
|
|
using System.Diagnostics;
|
|
using Thread = System.Threading.Thread;
|
|
|
|
namespace Ryujinx.Graphics.Vulkan
|
|
{
|
|
class CommandBufferPool : IDisposable
|
|
{
|
|
public const int MaxCommandBuffers = 16;
|
|
|
|
private int _totalCommandBuffers;
|
|
private int _totalCommandBuffersMask;
|
|
|
|
private readonly Vk _api;
|
|
private readonly Device _device;
|
|
private readonly Queue _queue;
|
|
private readonly object _queueLock;
|
|
private readonly CommandPool _pool;
|
|
private readonly Thread _owner;
|
|
|
|
public bool OwnedByCurrentThread => _owner == Thread.CurrentThread;
|
|
|
|
private struct ReservedCommandBuffer
|
|
{
|
|
public bool InUse;
|
|
public bool InConsumption;
|
|
public CommandBuffer CommandBuffer;
|
|
public FenceHolder Fence;
|
|
public SemaphoreHolder Semaphore;
|
|
|
|
public List<IAuto> Dependants;
|
|
public HashSet<MultiFenceHolder> Waitables;
|
|
public HashSet<SemaphoreHolder> Dependencies;
|
|
|
|
public void Initialize(Vk api, Device device, CommandPool pool)
|
|
{
|
|
var allocateInfo = new CommandBufferAllocateInfo()
|
|
{
|
|
SType = StructureType.CommandBufferAllocateInfo,
|
|
CommandBufferCount = 1,
|
|
CommandPool = pool,
|
|
Level = CommandBufferLevel.Primary
|
|
};
|
|
|
|
api.AllocateCommandBuffers(device, allocateInfo, out CommandBuffer);
|
|
|
|
Dependants = new List<IAuto>();
|
|
Waitables = new HashSet<MultiFenceHolder>();
|
|
Dependencies = new HashSet<SemaphoreHolder>();
|
|
}
|
|
}
|
|
|
|
private readonly ReservedCommandBuffer[] _commandBuffers;
|
|
|
|
private readonly int[] _queuedIndexes;
|
|
private int _queuedIndexesPtr;
|
|
private int _queuedCount;
|
|
private int _inUseCount;
|
|
|
|
public unsafe CommandBufferPool(Vk api, Device device, Queue queue, object queueLock, uint queueFamilyIndex, bool isLight = false)
|
|
{
|
|
_api = api;
|
|
_device = device;
|
|
_queue = queue;
|
|
_queueLock = queueLock;
|
|
_owner = Thread.CurrentThread;
|
|
|
|
var commandPoolCreateInfo = new CommandPoolCreateInfo()
|
|
{
|
|
SType = StructureType.CommandPoolCreateInfo,
|
|
QueueFamilyIndex = queueFamilyIndex,
|
|
Flags = CommandPoolCreateFlags.TransientBit |
|
|
CommandPoolCreateFlags.ResetCommandBufferBit
|
|
};
|
|
|
|
api.CreateCommandPool(device, commandPoolCreateInfo, null, out _pool).ThrowOnError();
|
|
|
|
// We need at least 2 command buffers to get texture data in some cases.
|
|
_totalCommandBuffers = isLight ? 2 : MaxCommandBuffers;
|
|
_totalCommandBuffersMask = _totalCommandBuffers - 1;
|
|
|
|
_commandBuffers = new ReservedCommandBuffer[_totalCommandBuffers];
|
|
|
|
_queuedIndexes = new int[_totalCommandBuffers];
|
|
_queuedIndexesPtr = 0;
|
|
_queuedCount = 0;
|
|
|
|
for (int i = 0; i < _totalCommandBuffers; i++)
|
|
{
|
|
_commandBuffers[i].Initialize(api, device, _pool);
|
|
WaitAndDecrementRef(i);
|
|
}
|
|
}
|
|
|
|
public void AddDependant(int cbIndex, IAuto dependant)
|
|
{
|
|
dependant.IncrementReferenceCount();
|
|
_commandBuffers[cbIndex].Dependants.Add(dependant);
|
|
}
|
|
|
|
public void AddWaitable(MultiFenceHolder waitable)
|
|
{
|
|
lock (_commandBuffers)
|
|
{
|
|
for (int i = 0; i < _totalCommandBuffers; i++)
|
|
{
|
|
ref var entry = ref _commandBuffers[i];
|
|
|
|
if (entry.InConsumption)
|
|
{
|
|
AddWaitable(i, waitable);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
public void AddInUseWaitable(MultiFenceHolder waitable)
|
|
{
|
|
lock (_commandBuffers)
|
|
{
|
|
for (int i = 0; i < _totalCommandBuffers; i++)
|
|
{
|
|
ref var entry = ref _commandBuffers[i];
|
|
|
|
if (entry.InUse)
|
|
{
|
|
AddWaitable(i, waitable);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
public void AddDependency(int cbIndex, CommandBufferScoped dependencyCbs)
|
|
{
|
|
Debug.Assert(_commandBuffers[cbIndex].InUse);
|
|
var semaphoreHolder = _commandBuffers[dependencyCbs.CommandBufferIndex].Semaphore;
|
|
semaphoreHolder.Get();
|
|
_commandBuffers[cbIndex].Dependencies.Add(semaphoreHolder);
|
|
}
|
|
|
|
public void AddWaitable(int cbIndex, MultiFenceHolder waitable)
|
|
{
|
|
ref var entry = ref _commandBuffers[cbIndex];
|
|
waitable.AddFence(cbIndex, entry.Fence);
|
|
entry.Waitables.Add(waitable);
|
|
}
|
|
|
|
public bool HasWaitableOnRentedCommandBuffer(MultiFenceHolder waitable, int offset, int size)
|
|
{
|
|
lock (_commandBuffers)
|
|
{
|
|
for (int i = 0; i < _totalCommandBuffers; i++)
|
|
{
|
|
ref var entry = ref _commandBuffers[i];
|
|
|
|
if (entry.InUse &&
|
|
entry.Waitables.Contains(waitable) &&
|
|
waitable.IsBufferRangeInUse(i, offset, size))
|
|
{
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
public bool IsFenceOnRentedCommandBuffer(FenceHolder fence)
|
|
{
|
|
lock (_commandBuffers)
|
|
{
|
|
for (int i = 0; i < _totalCommandBuffers; i++)
|
|
{
|
|
ref var entry = ref _commandBuffers[i];
|
|
|
|
if (entry.InUse && entry.Fence == fence)
|
|
{
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
public FenceHolder GetFence(int cbIndex)
|
|
{
|
|
return _commandBuffers[cbIndex].Fence;
|
|
}
|
|
|
|
private int FreeConsumed(bool wait)
|
|
{
|
|
int freeEntry = 0;
|
|
|
|
while (_queuedCount > 0)
|
|
{
|
|
int index = _queuedIndexes[_queuedIndexesPtr];
|
|
|
|
ref var entry = ref _commandBuffers[index];
|
|
|
|
if (wait || !entry.InConsumption || entry.Fence.IsSignaled())
|
|
{
|
|
WaitAndDecrementRef(index);
|
|
|
|
wait = false;
|
|
freeEntry = index;
|
|
|
|
_queuedCount--;
|
|
_queuedIndexesPtr = (_queuedIndexesPtr + 1) % _totalCommandBuffers;
|
|
}
|
|
else
|
|
{
|
|
break;
|
|
}
|
|
}
|
|
|
|
return freeEntry;
|
|
}
|
|
|
|
public CommandBufferScoped ReturnAndRent(CommandBufferScoped cbs)
|
|
{
|
|
Return(cbs);
|
|
return Rent();
|
|
}
|
|
|
|
public CommandBufferScoped Rent()
|
|
{
|
|
lock (_commandBuffers)
|
|
{
|
|
int cursor = FreeConsumed(_inUseCount + _queuedCount == _totalCommandBuffers);
|
|
|
|
for (int i = 0; i < _totalCommandBuffers; i++)
|
|
{
|
|
ref var entry = ref _commandBuffers[cursor];
|
|
|
|
if (!entry.InUse && !entry.InConsumption)
|
|
{
|
|
entry.InUse = true;
|
|
|
|
_inUseCount++;
|
|
|
|
var commandBufferBeginInfo = new CommandBufferBeginInfo()
|
|
{
|
|
SType = StructureType.CommandBufferBeginInfo
|
|
};
|
|
|
|
_api.BeginCommandBuffer(entry.CommandBuffer, commandBufferBeginInfo).ThrowOnError();
|
|
|
|
return new CommandBufferScoped(this, entry.CommandBuffer, cursor);
|
|
}
|
|
|
|
cursor = (cursor + 1) & _totalCommandBuffersMask;
|
|
}
|
|
}
|
|
|
|
throw new InvalidOperationException($"Out of command buffers (In use: {_inUseCount}, queued: {_queuedCount}, total: {_totalCommandBuffers})");
|
|
}
|
|
|
|
public void Return(CommandBufferScoped cbs)
|
|
{
|
|
Return(cbs, null, null, null);
|
|
}
|
|
|
|
public unsafe void Return(
|
|
CommandBufferScoped cbs,
|
|
ReadOnlySpan<Semaphore> waitSemaphores,
|
|
ReadOnlySpan<PipelineStageFlags> waitDstStageMask,
|
|
ReadOnlySpan<Semaphore> signalSemaphores)
|
|
{
|
|
lock (_commandBuffers)
|
|
{
|
|
int cbIndex = cbs.CommandBufferIndex;
|
|
|
|
ref var entry = ref _commandBuffers[cbIndex];
|
|
|
|
Debug.Assert(entry.InUse);
|
|
Debug.Assert(entry.CommandBuffer.Handle == cbs.CommandBuffer.Handle);
|
|
entry.InUse = false;
|
|
entry.InConsumption = true;
|
|
_inUseCount--;
|
|
|
|
var commandBuffer = entry.CommandBuffer;
|
|
|
|
_api.EndCommandBuffer(commandBuffer).ThrowOnError();
|
|
|
|
fixed (Semaphore* pWaitSemaphores = waitSemaphores, pSignalSemaphores = signalSemaphores)
|
|
{
|
|
fixed (PipelineStageFlags* pWaitDstStageMask = waitDstStageMask)
|
|
{
|
|
SubmitInfo sInfo = new SubmitInfo()
|
|
{
|
|
SType = StructureType.SubmitInfo,
|
|
WaitSemaphoreCount = waitSemaphores != null ? (uint)waitSemaphores.Length : 0,
|
|
PWaitSemaphores = pWaitSemaphores,
|
|
PWaitDstStageMask = pWaitDstStageMask,
|
|
CommandBufferCount = 1,
|
|
PCommandBuffers = &commandBuffer,
|
|
SignalSemaphoreCount = signalSemaphores != null ? (uint)signalSemaphores.Length : 0,
|
|
PSignalSemaphores = pSignalSemaphores
|
|
};
|
|
|
|
lock (_queueLock)
|
|
{
|
|
_api.QueueSubmit(_queue, 1, sInfo, entry.Fence.GetUnsafe()).ThrowOnError();
|
|
}
|
|
}
|
|
}
|
|
|
|
int ptr = (_queuedIndexesPtr + _queuedCount) % _totalCommandBuffers;
|
|
_queuedIndexes[ptr] = cbIndex;
|
|
_queuedCount++;
|
|
}
|
|
}
|
|
|
|
private void WaitAndDecrementRef(int cbIndex, bool refreshFence = true)
|
|
{
|
|
ref var entry = ref _commandBuffers[cbIndex];
|
|
|
|
if (entry.InConsumption)
|
|
{
|
|
entry.Fence.Wait();
|
|
entry.InConsumption = false;
|
|
}
|
|
|
|
foreach (var dependant in entry.Dependants)
|
|
{
|
|
dependant.DecrementReferenceCount(cbIndex);
|
|
}
|
|
|
|
foreach (var waitable in entry.Waitables)
|
|
{
|
|
waitable.RemoveFence(cbIndex, entry.Fence);
|
|
waitable.RemoveBufferUses(cbIndex);
|
|
}
|
|
|
|
foreach (var dependency in entry.Dependencies)
|
|
{
|
|
dependency.Put();
|
|
}
|
|
|
|
entry.Dependants.Clear();
|
|
entry.Waitables.Clear();
|
|
entry.Dependencies.Clear();
|
|
entry.Fence?.Dispose();
|
|
|
|
if (refreshFence)
|
|
{
|
|
entry.Fence = new FenceHolder(_api, _device);
|
|
}
|
|
else
|
|
{
|
|
entry.Fence = null;
|
|
}
|
|
}
|
|
|
|
public unsafe void Dispose()
|
|
{
|
|
for (int i = 0; i < _totalCommandBuffers; i++)
|
|
{
|
|
WaitAndDecrementRef(i, refreshFence: false);
|
|
}
|
|
|
|
_api.DestroyCommandPool(_device, _pool, null);
|
|
}
|
|
}
|
|
}
|