aboutsummaryrefslogtreecommitdiff
path: root/Ryujinx.Graphics.Gpu/Shader/HashTable
diff options
context:
space:
mode:
Diffstat (limited to 'Ryujinx.Graphics.Gpu/Shader/HashTable')
-rw-r--r--Ryujinx.Graphics.Gpu/Shader/HashTable/HashState.cs113
-rw-r--r--Ryujinx.Graphics.Gpu/Shader/HashTable/IDataAccessor.cs27
-rw-r--r--Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionHashTable.cs452
-rw-r--r--Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionedHashTable.cs244
-rw-r--r--Ryujinx.Graphics.Gpu/Shader/HashTable/SmartDataAccessor.cs96
5 files changed, 932 insertions, 0 deletions
diff --git a/Ryujinx.Graphics.Gpu/Shader/HashTable/HashState.cs b/Ryujinx.Graphics.Gpu/Shader/HashTable/HashState.cs
new file mode 100644
index 00000000..584eefdc
--- /dev/null
+++ b/Ryujinx.Graphics.Gpu/Shader/HashTable/HashState.cs
@@ -0,0 +1,113 @@
+using System;
+using System.Runtime.InteropServices;
+
+namespace Ryujinx.Graphics.Gpu.Shader.HashTable
+{
+ /// <summary>
+ /// State of a hash calculation.
+ /// </summary>
+ struct HashState
+ {
+ // This is using a slightly modified implementation of FastHash64.
+ // Reference: https://github.com/ztanml/fast-hash/blob/master/fasthash.c
+ private const ulong M = 0x880355f21e6d1965UL;
+ private ulong _hash;
+ private int _start;
+
+ /// <summary>
+ /// One shot hash calculation for a given data.
+ /// </summary>
+ /// <param name="data">Data to be hashed</param>
+ /// <returns>Hash of the given data</returns>
+ public static uint CalcHash(ReadOnlySpan<byte> data)
+ {
+ HashState state = new HashState();
+
+ state.Initialize();
+ state.Continue(data);
+ return state.Finalize(data);
+ }
+
+ /// <summary>
+ /// Initializes the hash state.
+ /// </summary>
+ public void Initialize()
+ {
+ _hash = 23;
+ }
+
+ /// <summary>
+ /// Calculates the hash of the given data.
+ /// </summary>
+ /// <remarks>
+ /// The full data must be passed on <paramref name="data"/>.
+ /// If this is not the first time the method is called, then <paramref name="data"/> must start with the data passed on the last call.
+ /// If a smaller slice of the data was already hashed before, only the additional data will be hashed.
+ /// This can be used for additive hashing of data in chuncks.
+ /// </remarks>
+ /// <param name="data">Data to be hashed</param>
+ public void Continue(ReadOnlySpan<byte> data)
+ {
+ ulong h = _hash;
+
+ ReadOnlySpan<ulong> dataAsUlong = MemoryMarshal.Cast<byte, ulong>(data.Slice(_start));
+
+ for (int i = 0; i < dataAsUlong.Length; i++)
+ {
+ ulong value = dataAsUlong[i];
+
+ h ^= Mix(value);
+ h *= M;
+ }
+
+ _hash = h;
+ _start = data.Length & ~7;
+ }
+
+ /// <summary>
+ /// Performs the hash finalization step, and returns the calculated hash.
+ /// </summary>
+ /// <remarks>
+ /// The full data must be passed on <paramref name="data"/>.
+ /// <paramref name="data"/> must start with the data passed on the last call to <see cref="Continue"/>.
+ /// No internal state is changed, so one can still continue hashing data with <see cref="Continue"/>
+ /// after calling this method.
+ /// </remarks>
+ /// <param name="data">Data to be hashed</param>
+ /// <returns>Hash of all the data hashed with this <see cref="HashState"/></returns>
+ public uint Finalize(ReadOnlySpan<byte> data)
+ {
+ ulong h = _hash;
+
+ int remainder = data.Length & 7;
+ if (remainder != 0)
+ {
+ ulong v = 0;
+
+ for (int i = data.Length - remainder; i < data.Length; i++)
+ {
+ v |= (ulong)data[i] << ((i - remainder) * 8);
+ }
+
+ h ^= Mix(v);
+ h *= M;
+ }
+
+ h = Mix(h);
+ return (uint)(h - (h >> 32));
+ }
+
+ /// <summary>
+ /// Hash mix function.
+ /// </summary>
+ /// <param name="h">Hash to mix</param>
+ /// <returns>Mixed hash</returns>
+ private static ulong Mix(ulong h)
+ {
+ h ^= h >> 23;
+ h *= 0x2127599bf4325c37UL;
+ h ^= h >> 47;
+ return h;
+ }
+ }
+}
diff --git a/Ryujinx.Graphics.Gpu/Shader/HashTable/IDataAccessor.cs b/Ryujinx.Graphics.Gpu/Shader/HashTable/IDataAccessor.cs
new file mode 100644
index 00000000..c982cd9f
--- /dev/null
+++ b/Ryujinx.Graphics.Gpu/Shader/HashTable/IDataAccessor.cs
@@ -0,0 +1,27 @@
+using System;
+
+namespace Ryujinx.Graphics.Gpu.Shader.HashTable
+{
+ /// <summary>
+ /// Data accessor, used by <see cref="PartitionedHashTable{T}"/> to access data of unknown length.
+ /// </summary>
+ /// <remarks>
+ /// This will be used to access chuncks of data and try finding a match on the table.
+ /// This is necessary because the data size is assumed to be unknown, and so the
+ /// hash table must try to "guess" the size of the data based on the entries on the table.
+ /// </remarks>
+ public interface IDataAccessor
+ {
+ /// <summary>
+ /// Gets a span of shader code at the specified offset, with at most the specified size.
+ /// </summary>
+ /// <remarks>
+ /// This might return a span smaller than the requested <paramref name="length"/> if there's
+ /// no more code available.
+ /// </remarks>
+ /// <param name="offset">Offset in shader code</param>
+ /// <param name="length">Size in bytes</param>
+ /// <returns>Shader code span</returns>
+ ReadOnlySpan<byte> GetSpan(int offset, int length);
+ }
+}
diff --git a/Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionHashTable.cs b/Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionHashTable.cs
new file mode 100644
index 00000000..6a563c16
--- /dev/null
+++ b/Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionHashTable.cs
@@ -0,0 +1,452 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Numerics;
+
+namespace Ryujinx.Graphics.Gpu.Shader.HashTable
+{
+ /// <summary>
+ /// Partitioned hash table.
+ /// </summary>
+ /// <typeparam name="T">Hash table entry type</typeparam>
+ class PartitionHashTable<T>
+ {
+ /// <summary>
+ /// Hash table entry.
+ /// </summary>
+ private struct Entry
+ {
+ /// <summary>
+ /// Hash <see cref="OwnSize"/> bytes of <see cref="Data"/>.
+ /// </summary>
+ public readonly uint Hash;
+
+ /// <summary>
+ /// If this entry is only a sub-region of <see cref="Data"/>, this indicates the size in bytes
+ /// of that region. Otherwise, it should be zero.
+ /// </summary>
+ public readonly int OwnSize;
+
+ /// <summary>
+ /// Data used to compute the hash for this entry.
+ /// </summary>
+ /// <remarks>
+ /// To avoid additional allocations, this might be a instance of the full entry data,
+ /// and only a sub-region of it might be actually used by this entry. Such sub-region
+ /// has its size indicated by <see cref="OwnSize"/> in this case.
+ /// </remarks>
+ public readonly byte[] Data;
+
+ /// <summary>
+ /// Item associated with this entry.
+ /// </summary>
+ public T Item;
+
+ /// <summary>
+ /// Indicates if the entry is partial, which means that this entry is only for a sub-region of the data.
+ /// </summary>
+ /// <remarks>
+ /// Partial entries have no items associated with them. They just indicates that the data might be present on
+ /// the table, and one must keep looking for the full entry on other tables of larger data size.
+ /// </remarks>
+ public bool IsPartial => OwnSize != 0;
+
+ /// <summary>
+ /// Creates a new partial hash table entry.
+ /// </summary>
+ /// <param name="hash">Hash of the data</param>
+ /// <param name="ownerData">Full data</param>
+ /// <param name="ownSize">Size of the sub-region of data that belongs to this entry</param>
+ public Entry(uint hash, byte[] ownerData, int ownSize)
+ {
+ Hash = hash;
+ OwnSize = ownSize;
+ Data = ownerData;
+ Item = default;
+ }
+
+ /// <summary>
+ /// Creates a new full hash table entry.
+ /// </summary>
+ /// <param name="hash">Hash of the data</param>
+ /// <param name="data">Data</param>
+ /// <param name="item">Item associated with this entry</param>
+ public Entry(uint hash, byte[] data, T item)
+ {
+ Hash = hash;
+ OwnSize = 0;
+ Data = data;
+ Item = item;
+ }
+
+ /// <summary>
+ /// Gets the data for this entry, either full or partial.
+ /// </summary>
+ /// <returns>Data sub-region</returns>
+ public ReadOnlySpan<byte> GetData()
+ {
+ if (OwnSize != 0)
+ {
+ return new ReadOnlySpan<byte>(Data).Slice(0, OwnSize);
+ }
+
+ return Data;
+ }
+ }
+
+ /// <summary>
+ /// Hash table bucket.
+ /// </summary>
+ private struct Bucket
+ {
+ /// <summary>
+ /// Inline entry, to avoid allocations for the common single entry case.
+ /// </summary>
+ public Entry InlineEntry;
+
+ /// <summary>
+ /// List of additional entries for the not-so-common multiple entries case.
+ /// </summary>
+ public List<Entry> MoreEntries;
+ }
+
+ private Bucket[] _buckets;
+ private int _count;
+
+ /// <summary>
+ /// Total amount of entries on the hash table.
+ /// </summary>
+ public int Count => _count;
+
+ /// <summary>
+ /// Creates a new instance of the partitioned hash table.
+ /// </summary>
+ public PartitionHashTable()
+ {
+ _buckets = Array.Empty<Bucket>();
+ }
+
+ /// <summary>
+ /// Gets an item on the table, or adds a new one if not present.
+ /// </summary>
+ /// <param name="data">Data</param>
+ /// <param name="dataHash">Hash of the data</param>
+ /// <param name="item">Item to be added if not found</param>
+ /// <returns>Existing item if found, or <paramref name="item"/> if not found</returns>
+ public T GetOrAdd(byte[] data, uint dataHash, T item)
+ {
+ if (TryFindItem(dataHash, data, out T existingItem))
+ {
+ return existingItem;
+ }
+
+ Entry entry = new Entry(dataHash, data, item);
+
+ AddToBucket(dataHash, ref entry);
+
+ return item;
+ }
+
+ /// <summary>
+ /// Adds an item to the hash table.
+ /// </summary>
+ /// <param name="data">Data</param>
+ /// <param name="dataHash">Hash of the data</param>
+ /// <param name="item">Item to be added</param>
+ /// <returns>True if the item was added, false due to an item associated with the data already being on the table</returns>
+ public bool Add(byte[] data, uint dataHash, T item)
+ {
+ if (TryFindItem(dataHash, data, out _))
+ {
+ return false;
+ }
+
+ Entry entry = new Entry(dataHash, data, item);
+
+ AddToBucket(dataHash, ref entry);
+
+ return true;
+ }
+
+ /// <summary>
+ /// Adds a partial entry to the hash table.
+ /// </summary>
+ /// <param name="ownerData">Full data</param>
+ /// <param name="ownSize">Size of the sub-region of <paramref name="ownerData"/> used by the partial entry</param>
+ /// <returns>True if added, false otherwise</returns>
+ public bool AddPartial(byte[] ownerData, int ownSize)
+ {
+ ReadOnlySpan<byte> data = new ReadOnlySpan<byte>(ownerData).Slice(0, ownSize);
+
+ return AddPartial(ownerData, HashState.CalcHash(data), ownSize);
+ }
+
+ /// <summary>
+ /// Adds a partial entry to the hash table.
+ /// </summary>
+ /// <param name="ownerData">Full data</param>
+ /// <param name="dataHash">Hash of the data sub-region</param>
+ /// <param name="ownSize">Size of the sub-region of <paramref name="ownerData"/> used by the partial entry</param>
+ /// <returns>True if added, false otherwise</returns>
+ public bool AddPartial(byte[] ownerData, uint dataHash, int ownSize)
+ {
+ ReadOnlySpan<byte> data = new ReadOnlySpan<byte>(ownerData).Slice(0, ownSize);
+
+ if (TryFindItem(dataHash, data, out _))
+ {
+ return false;
+ }
+
+ Entry entry = new Entry(dataHash, ownerData, ownSize);
+
+ AddToBucket(dataHash, ref entry);
+
+ return true;
+ }
+
+ /// <summary>
+ /// Adds entry with a given hash to the table.
+ /// </summary>
+ /// <param name="dataHash">Hash of the entry</param>
+ /// <param name="entry">Entry</param>
+ private void AddToBucket(uint dataHash, ref Entry entry)
+ {
+ int pow2Count = GetPow2Count(++_count);
+ if (pow2Count != _buckets.Length)
+ {
+ Rebuild(pow2Count);
+ }
+
+ ref Bucket bucket = ref GetBucketForHash(dataHash);
+
+ AddToBucket(ref bucket, ref entry);
+ }
+
+ /// <summary>
+ /// Adds an entry to a bucket.
+ /// </summary>
+ /// <param name="bucket">Bucket to add the entry into</param>
+ /// <param name="entry">Entry to be added</param>
+ private void AddToBucket(ref Bucket bucket, ref Entry entry)
+ {
+ if (bucket.InlineEntry.Data == null)
+ {
+ bucket.InlineEntry = entry;
+ }
+ else
+ {
+ (bucket.MoreEntries ??= new List<Entry>()).Add(entry);
+ }
+ }
+
+ /// <summary>
+ /// Creates partial entries on a new hash table for all existing full entries.
+ /// </summary>
+ /// <remarks>
+ /// This should be called every time a new hash table is created, and there are hash
+ /// tables with data sizes that are higher than that of the new table.
+ /// This will then fill the new hash table with "partial" entries of full entries
+ /// on the hash tables with higher size.
+ /// </remarks>
+ /// <param name="newTable">New hash table</param>
+ /// <param name="newEntrySize">Size of the data on the new hash table</param>
+ public void FillPartials(PartitionHashTable<T> newTable, int newEntrySize)
+ {
+ for (int i = 0; i < _buckets.Length; i++)
+ {
+ ref Bucket bucket = ref _buckets[i];
+ ref Entry inlineEntry = ref bucket.InlineEntry;
+
+ if (inlineEntry.Data != null)
+ {
+ if (!inlineEntry.IsPartial)
+ {
+ newTable.AddPartial(inlineEntry.Data, newEntrySize);
+ }
+
+ if (bucket.MoreEntries != null)
+ {
+ foreach (Entry entry in bucket.MoreEntries)
+ {
+ if (entry.IsPartial)
+ {
+ continue;
+ }
+
+ newTable.AddPartial(entry.Data, newEntrySize);
+ }
+ }
+ }
+ }
+ }
+
+ /// <summary>
+ /// Tries to find an item on the table.
+ /// </summary>
+ /// <param name="dataHash">Hash of <paramref name="data"/></param>
+ /// <param name="data">Data to find</param>
+ /// <param name="item">Item associated with the data</param>
+ /// <returns>True if an item was found, false otherwise</returns>
+ private bool TryFindItem(uint dataHash, ReadOnlySpan<byte> data, out T item)
+ {
+ if (_count == 0)
+ {
+ item = default;
+ return false;
+ }
+
+ ref Bucket bucket = ref GetBucketForHash(dataHash);
+
+ if (bucket.InlineEntry.Data != null)
+ {
+ if (bucket.InlineEntry.Hash == dataHash && bucket.InlineEntry.GetData().SequenceEqual(data))
+ {
+ item = bucket.InlineEntry.Item;
+ return true;
+ }
+
+ if (bucket.MoreEntries != null)
+ {
+ foreach (Entry entry in bucket.MoreEntries)
+ {
+ if (entry.Hash == dataHash && entry.GetData().SequenceEqual(data))
+ {
+ item = entry.Item;
+ return true;
+ }
+ }
+ }
+ }
+
+ item = default;
+ return false;
+ }
+
+ /// <summary>
+ /// Indicates the result of a hash table lookup.
+ /// </summary>
+ public enum SearchResult
+ {
+ /// <summary>
+ /// No entry was found, the search must continue on hash tables of lower size.
+ /// </summary>
+ NotFound,
+
+ /// <summary>
+ /// A partial entry was found, the search must continue on hash tables of higher size.
+ /// </summary>
+ FoundPartial,
+
+ /// <summary>
+ /// A full entry was found, the search was concluded and the item can be retrieved.
+ /// </summary>
+ FoundFull
+ }
+
+ /// <summary>
+ /// Tries to find an item on the table.
+ /// </summary>
+ /// <param name="dataAccessor">Data accessor</param>
+ /// <param name="size">Size of the hash table data</param>
+ /// <param name="item">The item on the table, if found, otherwise unmodified</param>
+ /// <param name="data">The data on the table, if found, otherwise unmodified</param>
+ /// <returns>Table lookup result</returns>
+ public SearchResult TryFindItem(ref SmartDataAccessor dataAccessor, int size, ref T item, ref byte[] data)
+ {
+ if (_count == 0)
+ {
+ return SearchResult.NotFound;
+ }
+
+ ReadOnlySpan<byte> dataSpan = dataAccessor.GetSpanAndHash(size, out uint dataHash);
+
+ if (dataSpan.Length != size)
+ {
+ return SearchResult.NotFound;
+ }
+
+ ref Bucket bucket = ref GetBucketForHash(dataHash);
+
+ if (bucket.InlineEntry.Data != null)
+ {
+ if (bucket.InlineEntry.Hash == dataHash && bucket.InlineEntry.GetData().SequenceEqual(dataSpan))
+ {
+ item = bucket.InlineEntry.Item;
+ data = bucket.InlineEntry.Data;
+ return bucket.InlineEntry.IsPartial ? SearchResult.FoundPartial : SearchResult.FoundFull;
+ }
+
+ if (bucket.MoreEntries != null)
+ {
+ foreach (Entry entry in bucket.MoreEntries)
+ {
+ if (entry.Hash == dataHash && entry.GetData().SequenceEqual(dataSpan))
+ {
+ item = entry.Item;
+ data = entry.Data;
+ return entry.IsPartial ? SearchResult.FoundPartial : SearchResult.FoundFull;
+ }
+ }
+ }
+ }
+
+ return SearchResult.NotFound;
+ }
+
+ /// <summary>
+ /// Rebuilds the table for a new count.
+ /// </summary>
+ /// <param name="newPow2Count">New power of two count of the table</param>
+ private void Rebuild(int newPow2Count)
+ {
+ Bucket[] newBuckets = new Bucket[newPow2Count];
+
+ uint mask = (uint)newPow2Count - 1;
+
+ for (int i = 0; i < _buckets.Length; i++)
+ {
+ ref Bucket bucket = ref _buckets[i];
+
+ if (bucket.InlineEntry.Data != null)
+ {
+ AddToBucket(ref newBuckets[(int)(bucket.InlineEntry.Hash & mask)], ref bucket.InlineEntry);
+
+ if (bucket.MoreEntries != null)
+ {
+ foreach (Entry entry in bucket.MoreEntries)
+ {
+ Entry entryCopy = entry;
+ AddToBucket(ref newBuckets[(int)(entry.Hash & mask)], ref entryCopy);
+ }
+ }
+ }
+ }
+
+ _buckets = newBuckets;
+ }
+
+ /// <summary>
+ /// Gets the bucket for a given hash.
+ /// </summary>
+ /// <param name="hash">Data hash</param>
+ /// <returns>Bucket for the hash</returns>
+ private ref Bucket GetBucketForHash(uint hash)
+ {
+ int index = (int)(hash & (_buckets.Length - 1));
+
+ return ref _buckets[index];
+ }
+
+ /// <summary>
+ /// Gets a power of two count from a regular count.
+ /// </summary>
+ /// <param name="count">Count</param>
+ /// <returns>Power of two count</returns>
+ private static int GetPow2Count(int count)
+ {
+ // This returns the nearest power of two that is lower than count.
+ // This was done to optimize memory usage rather than performance.
+ return 1 << BitOperations.Log2((uint)count);
+ }
+ }
+}
diff --git a/Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionedHashTable.cs b/Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionedHashTable.cs
new file mode 100644
index 00000000..4c9cc4d4
--- /dev/null
+++ b/Ryujinx.Graphics.Gpu/Shader/HashTable/PartitionedHashTable.cs
@@ -0,0 +1,244 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.Graphics.Gpu.Shader.HashTable
+{
+ /// <summary>
+ /// Partitioned hash table.
+ /// </summary>
+ /// <typeparam name="T"></typeparam>
+ public class PartitionedHashTable<T>
+ {
+ /// <summary>
+ /// Entry for a given data size.
+ /// </summary>
+ private struct SizeEntry
+ {
+ /// <summary>
+ /// Size for the data that will be stored on the hash table on this entry.
+ /// </summary>
+ public int Size { get; }
+
+ /// <summary>
+ /// Number of entries on the hash table.
+ /// </summary>
+ public int TableCount => _table.Count;
+
+ private readonly PartitionHashTable<T> _table;
+
+ /// <summary>
+ /// Creates an entry for a given size.
+ /// </summary>
+ /// <param name="size">Size of the data to be stored on this entry</param>
+ public SizeEntry(int size)
+ {
+ Size = size;
+ _table = new PartitionHashTable<T>();
+ }
+
+ /// <summary>
+ /// Gets an item for existing data, or adds a new one.
+ /// </summary>
+ /// <param name="data">Data associated with the item</param>
+ /// <param name="dataHash">Hash of <paramref name="data"/></param>
+ /// <param name="item">Item to be added</param>
+ /// <returns>Existing item, or <paramref name="item"/> if not present</returns>
+ public T GetOrAdd(byte[] data, uint dataHash, T item)
+ {
+ Debug.Assert(data.Length == Size);
+ return _table.GetOrAdd(data, dataHash, item);
+ }
+
+ /// <summary>
+ /// Adds a new item.
+ /// </summary>
+ /// <param name="data">Data associated with the item</param>
+ /// <param name="dataHash">Hash of <paramref name="data"/></param>
+ /// <param name="item">Item to be added</param>
+ /// <returns>True if added, false otherwise</returns>
+ public bool Add(byte[] data, uint dataHash, T item)
+ {
+ Debug.Assert(data.Length == Size);
+ return _table.Add(data, dataHash, item);
+ }
+
+ /// <summary>
+ /// Adds a partial entry.
+ /// </summary>
+ /// <param name="ownerData">Full entry data</param>
+ /// <param name="dataHash">Hash of the sub-region of the data that belongs to this entry</param>
+ /// <returns>True if added, false otherwise</returns>
+ public bool AddPartial(byte[] ownerData, uint dataHash)
+ {
+ return _table.AddPartial(ownerData, dataHash, Size);
+ }
+
+ /// <summary>
+ /// Fills a new hash table with "partials" of existing full entries of higher size.
+ /// </summary>
+ /// <param name="newEntry">Entry with the new hash table</param>
+ public void FillPartials(SizeEntry newEntry)
+ {
+ Debug.Assert(newEntry.Size < Size);
+ _table.FillPartials(newEntry._table, newEntry.Size);
+ }
+
+ /// <summary>
+ /// Tries to find an item on the hash table.
+ /// </summary>
+ /// <param name="dataAccessor">Data accessor</param>
+ /// <param name="item">The item on the table, if found, otherwise unmodified</param>
+ /// <param name="data">The data on the table, if found, otherwise unmodified</param>
+ /// <returns>Table lookup result</returns>
+ public PartitionHashTable<T>.SearchResult TryFindItem(ref SmartDataAccessor dataAccessor, ref T item, ref byte[] data)
+ {
+ return _table.TryFindItem(ref dataAccessor, Size, ref item, ref data);
+ }
+ }
+
+ private readonly List<SizeEntry> _sizeTable;
+
+ /// <summary>
+ /// Creates a new partitioned hash table.
+ /// </summary>
+ public PartitionedHashTable()
+ {
+ _sizeTable = new List<SizeEntry>();
+ }
+
+ /// <summary>
+ /// Adds a new item to the table.
+ /// </summary>
+ /// <param name="data">Data</param>
+ /// <param name="item">Item associated with the data</param>
+ public void Add(byte[] data, T item)
+ {
+ GetOrAdd(data, item);
+ }
+
+ /// <summary>
+ /// Gets an existing item from the table, or adds a new one if not present.
+ /// </summary>
+ /// <param name="data">Data</param>
+ /// <param name="item">Item associated with the data</param>
+ /// <returns>Existing item, or <paramref name="item"/> if not present</returns>
+ public T GetOrAdd(byte[] data, T item)
+ {
+ SizeEntry sizeEntry;
+
+ int index = BinarySearch(_sizeTable, data.Length);
+ if (index < _sizeTable.Count && _sizeTable[index].Size == data.Length)
+ {
+ sizeEntry = _sizeTable[index];
+ }
+ else
+ {
+ if (index < _sizeTable.Count && _sizeTable[index].Size < data.Length)
+ {
+ index++;
+ }
+
+ sizeEntry = new SizeEntry(data.Length);
+
+ _sizeTable.Insert(index, sizeEntry);
+
+ for (int i = index + 1; i < _sizeTable.Count; i++)
+ {
+ _sizeTable[i].FillPartials(sizeEntry);
+ }
+ }
+
+ HashState hashState = new HashState();
+ hashState.Initialize();
+
+ for (int i = 0; i < index; i++)
+ {
+ ReadOnlySpan<byte> dataSlice = new ReadOnlySpan<byte>(data).Slice(0, _sizeTable[i].Size);
+ hashState.Continue(dataSlice);
+ _sizeTable[i].AddPartial(data, hashState.Finalize(dataSlice));
+ }
+
+ hashState.Continue(data);
+ return sizeEntry.GetOrAdd(data, hashState.Finalize(data), item);
+ }
+
+ /// <summary>
+ /// Performs binary search on a list of hash tables, each one with a fixed data size.
+ /// </summary>
+ /// <param name="entries">List of hash tables</param>
+ /// <param name="size">Size to search for</param>
+ /// <returns>Index of the hash table with the given size, or nearest one otherwise</returns>
+ private static int BinarySearch(List<SizeEntry> entries, int size)
+ {
+ int left = 0;
+ int middle = 0;
+ int right = entries.Count - 1;
+
+ while (left <= right)
+ {
+ middle = left + ((right - left) >> 1);
+
+ SizeEntry entry = entries[middle];
+
+ if (size == entry.Size)
+ {
+ break;
+ }
+
+ if (size < entry.Size)
+ {
+ right = middle - 1;
+ }
+ else
+ {
+ left = middle + 1;
+ }
+ }
+
+ return middle;
+ }
+
+ /// <summary>
+ /// Tries to find an item on the table.
+ /// </summary>
+ /// <param name="dataAccessor">Data accessor</param>
+ /// <param name="item">Item, if found</param>
+ /// <param name="data">Data, if found</param>
+ /// <returns>True if the item was found on the table, false otherwise</returns>
+ public bool TryFindItem(IDataAccessor dataAccessor, out T item, out byte[] data)
+ {
+ SmartDataAccessor sda = new SmartDataAccessor(dataAccessor);
+
+ item = default;
+ data = null;
+
+ int left = 0;
+ int right = _sizeTable.Count;
+
+ while (left != right)
+ {
+ int index = left + ((right - left) >> 1);
+
+ PartitionHashTable<T>.SearchResult result = _sizeTable[index].TryFindItem(ref sda, ref item, ref data);
+
+ if (result == PartitionHashTable<T>.SearchResult.FoundFull)
+ {
+ return true;
+ }
+
+ if (result == PartitionHashTable<T>.SearchResult.NotFound)
+ {
+ right = index;
+ }
+ else /* if (result == PartitionHashTable<T>.SearchResult.FoundPartial) */
+ {
+ left = index + 1;
+ }
+ }
+
+ data = null;
+ return false;
+ }
+ }
+}
diff --git a/Ryujinx.Graphics.Gpu/Shader/HashTable/SmartDataAccessor.cs b/Ryujinx.Graphics.Gpu/Shader/HashTable/SmartDataAccessor.cs
new file mode 100644
index 00000000..0632add6
--- /dev/null
+++ b/Ryujinx.Graphics.Gpu/Shader/HashTable/SmartDataAccessor.cs
@@ -0,0 +1,96 @@
+using System;
+using System.Collections.Generic;
+
+namespace Ryujinx.Graphics.Gpu.Shader.HashTable
+{
+ /// <summary>
+ /// Smart data accessor that can cache data and hashes to avoid reading and re-hashing the same memory regions.
+ /// </summary>
+ ref struct SmartDataAccessor
+ {
+ private readonly IDataAccessor _dataAccessor;
+ private ReadOnlySpan<byte> _data;
+ private readonly SortedList<int, HashState> _cachedHashes;
+
+ /// <summary>
+ /// Creates a new smart data accessor.
+ /// </summary>
+ /// <param name="dataAccessor">Data accessor</param>
+ public SmartDataAccessor(IDataAccessor dataAccessor)
+ {
+ _dataAccessor = dataAccessor;
+ _data = ReadOnlySpan<byte>.Empty;
+ _cachedHashes = new SortedList<int, HashState>();
+ }
+
+ /// <summary>
+ /// Get a spans of a given size.
+ /// </summary>
+ /// <remarks>
+ /// The actual length of the span returned depends on the <see cref="IDataAccessor"/>
+ /// and might be less than requested.
+ /// </remarks>
+ /// <param name="length">Size in bytes</param>
+ /// <returns>Span with the requested size</returns>
+ public ReadOnlySpan<byte> GetSpan(int length)
+ {
+ if (_data.Length < length)
+ {
+ _data = _dataAccessor.GetSpan(0, length);
+ }
+ else if (_data.Length > length)
+ {
+ return _data.Slice(0, length);
+ }
+
+ return _data;
+ }
+
+ /// <summary>
+ /// Gets a span of the requested size, and a hash of its data.
+ /// </summary>
+ /// <param name="length">Length of the span</param>
+ /// <param name="hash">Hash of the span data</param>
+ /// <returns>Span of data</returns>
+ public ReadOnlySpan<byte> GetSpanAndHash(int length, out uint hash)
+ {
+ ReadOnlySpan<byte> data = GetSpan(length);
+ hash = data.Length == length ? CalcHashCached(data) : 0;
+ return data;
+ }
+
+ /// <summary>
+ /// Calculates the hash for a requested span.
+ /// This will try to use a cached hash if the data was already accessed before, to avoid re-hashing.
+ /// </summary>
+ /// <param name="data">Data to be hashed</param>
+ /// <returns>Hash of the data</returns>
+ private uint CalcHashCached(ReadOnlySpan<byte> data)
+ {
+ HashState state = default;
+ bool found = false;
+
+ for (int i = _cachedHashes.Count - 1; i >= 0; i--)
+ {
+ int cachedHashSize = _cachedHashes.Keys[i];
+
+ if (cachedHashSize < data.Length)
+ {
+ state = _cachedHashes.Values[i];
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ {
+ state = new HashState();
+ state.Initialize();
+ }
+
+ state.Continue(data);
+ _cachedHashes[data.Length & ~7] = state;
+ return state.Finalize(data);
+ }
+ }
+}