aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ARMeilleure/Memory/IJitMemoryAllocator.cs2
-rw-r--r--ARMeilleure/Signal/NativeSignalHandler.cs24
-rw-r--r--ARMeilleure/Translation/Translator.cs2
-rw-r--r--Ryujinx.Cpu/AddressSpace.cs470
-rw-r--r--Ryujinx.Cpu/Jit/JitMemoryAllocator.cs2
-rw-r--r--Ryujinx.Cpu/Jit/MemoryManager.cs43
-rw-r--r--Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs63
-rw-r--r--Ryujinx.Cpu/PrivateMemoryAllocation.cs41
-rw-r--r--Ryujinx.Cpu/PrivateMemoryAllocator.cs268
-rw-r--r--Ryujinx.Graphics.Gpu/Image/TextureGroup.cs8
-rw-r--r--Ryujinx.Graphics.Gpu/Memory/Buffer.cs15
-rw-r--r--Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs5
-rw-r--r--Ryujinx.HLE/HOS/Kernel/KernelContext.cs13
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs2
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs56
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs70
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs13
-rw-r--r--Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs2
-rw-r--r--Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs14
-rw-r--r--Ryujinx.Memory/AddressSpaceManager.cs137
-rw-r--r--Ryujinx.Memory/IVirtualMemoryManager.cs30
-rw-r--r--Ryujinx.Memory/MemoryBlock.cs2
-rw-r--r--Ryujinx.Memory/MemoryMapFlags.cs23
-rw-r--r--Ryujinx.Memory/Range/HostMemoryRange.cs71
-rw-r--r--Ryujinx.Memory/Tracking/MemoryTracking.cs10
-rw-r--r--Ryujinx.Memory/Tracking/MultiRegionHandle.cs21
-rw-r--r--Ryujinx.Memory/Tracking/RegionHandle.cs27
-rw-r--r--Ryujinx.Tests/Cpu/CpuTest.cs2
-rw-r--r--Ryujinx.Tests/Cpu/CpuTest32.cs2
29 files changed, 1293 insertions, 145 deletions
diff --git a/ARMeilleure/Memory/IJitMemoryAllocator.cs b/ARMeilleure/Memory/IJitMemoryAllocator.cs
index 5745a4bf..19b696b0 100644
--- a/ARMeilleure/Memory/IJitMemoryAllocator.cs
+++ b/ARMeilleure/Memory/IJitMemoryAllocator.cs
@@ -4,5 +4,7 @@
{
IJitMemoryBlock Allocate(ulong size);
IJitMemoryBlock Reserve(ulong size);
+
+ ulong GetPageSize();
}
}
diff --git a/ARMeilleure/Signal/NativeSignalHandler.cs b/ARMeilleure/Signal/NativeSignalHandler.cs
index da02f76a..e8dc6dda 100644
--- a/ARMeilleure/Signal/NativeSignalHandler.cs
+++ b/ARMeilleure/Signal/NativeSignalHandler.cs
@@ -71,8 +71,8 @@ namespace ARMeilleure.Signal
private const uint EXCEPTION_ACCESS_VIOLATION = 0xc0000005;
- private static ulong _pageSize = GetPageSize();
- private static ulong _pageMask = _pageSize - 1;
+ private static ulong _pageSize;
+ private static ulong _pageMask;
private static IntPtr _handlerConfig;
private static IntPtr _signalHandlerPtr;
@@ -81,19 +81,6 @@ namespace ARMeilleure.Signal
private static readonly object _lock = new object();
private static bool _initialized;
- private static ulong GetPageSize()
- {
- // TODO: This needs to be based on the current memory manager configuration.
- if (OperatingSystem.IsMacOS() && RuntimeInformation.ProcessArchitecture == Architecture.Arm64)
- {
- return 1UL << 14;
- }
- else
- {
- return 1UL << 12;
- }
- }
-
static NativeSignalHandler()
{
_handlerConfig = Marshal.AllocHGlobal(Unsafe.SizeOf<SignalHandlerConfig>());
@@ -102,12 +89,12 @@ namespace ARMeilleure.Signal
config = new SignalHandlerConfig();
}
- public static void InitializeJitCache(IJitMemoryAllocator allocator)
+ public static void Initialize(IJitMemoryAllocator allocator)
{
JitCache.Initialize(allocator);
}
- public static void InitializeSignalHandler(Func<IntPtr, IntPtr, IntPtr> customSignalHandlerFactory = null)
+ public static void InitializeSignalHandler(ulong pageSize, Func<IntPtr, IntPtr, IntPtr> customSignalHandlerFactory = null)
{
if (_initialized) return;
@@ -115,6 +102,9 @@ namespace ARMeilleure.Signal
{
if (_initialized) return;
+ _pageSize = pageSize;
+ _pageMask = pageSize - 1;
+
ref SignalHandlerConfig config = ref GetConfigRef();
if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
diff --git a/ARMeilleure/Translation/Translator.cs b/ARMeilleure/Translation/Translator.cs
index 75c4df23..cbf6baa0 100644
--- a/ARMeilleure/Translation/Translator.cs
+++ b/ARMeilleure/Translation/Translator.cs
@@ -81,7 +81,7 @@ namespace ARMeilleure.Translation
if (memory.Type.IsHostMapped())
{
- NativeSignalHandler.InitializeSignalHandler();
+ NativeSignalHandler.InitializeSignalHandler(allocator.GetPageSize());
}
}
diff --git a/Ryujinx.Cpu/AddressSpace.cs b/Ryujinx.Cpu/AddressSpace.cs
new file mode 100644
index 00000000..cea3b56d
--- /dev/null
+++ b/Ryujinx.Cpu/AddressSpace.cs
@@ -0,0 +1,470 @@
+using Ryujinx.Common;
+using Ryujinx.Common.Collections;
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu
+{
+ class AddressSpace : IDisposable
+ {
+ private const ulong PageSize = 0x1000;
+
+ private const int DefaultBlockAlignment = 1 << 20;
+
+ private enum MappingType : byte
+ {
+ None,
+ Private,
+ Shared
+ }
+
+ private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public MappingType Type { get; private set; }
+
+ public Mapping(ulong address, ulong size, MappingType type)
+ {
+ Address = address;
+ Size = size;
+ Type = type;
+ }
+
+ public Mapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ Mapping left = new Mapping(Address, leftSize, Type);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void UpdateState(MappingType newType)
+ {
+ Type = newType;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(Mapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private class PrivateMapping : IntrusiveRedBlackTreeNode<PrivateMapping>, IComparable<PrivateMapping>
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public PrivateMemoryAllocation PrivateAllocation { get; private set; }
+
+ public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation)
+ {
+ Address = address;
+ Size = size;
+ PrivateAllocation = privateAllocation;
+ }
+
+ public PrivateMapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ (var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize);
+
+ PrivateMapping left = new PrivateMapping(Address, leftSize, leftAllocation);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void Map(MemoryBlock baseBlock, MemoryBlock mirrorBlock, PrivateMemoryAllocation newAllocation)
+ {
+ baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size);
+ mirrorBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size);
+ PrivateAllocation = newAllocation;
+ }
+
+ public void Unmap(MemoryBlock baseBlock, MemoryBlock mirrorBlock)
+ {
+ if (PrivateAllocation.IsValid)
+ {
+ baseBlock.UnmapView(PrivateAllocation.Memory, Address, Size);
+ mirrorBlock.UnmapView(PrivateAllocation.Memory, Address, Size);
+ PrivateAllocation.Dispose();
+ }
+
+ PrivateAllocation = default;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(PrivateMapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly PrivateMemoryAllocator _privateMemoryAllocator;
+ private readonly IntrusiveRedBlackTree<Mapping> _mappingTree;
+ private readonly IntrusiveRedBlackTree<PrivateMapping> _privateTree;
+
+ private readonly object _treeLock;
+
+ private readonly bool _supports4KBPages;
+
+ public MemoryBlock Base { get; }
+ public MemoryBlock Mirror { get; }
+
+ public AddressSpace(MemoryBlock backingMemory, ulong asSize, bool supports4KBPages)
+ {
+ if (!supports4KBPages)
+ {
+ _privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable | MemoryAllocationFlags.NoMap);
+ _mappingTree = new IntrusiveRedBlackTree<Mapping>();
+ _privateTree = new IntrusiveRedBlackTree<PrivateMapping>();
+ _treeLock = new object();
+
+ _mappingTree.Add(new Mapping(0UL, asSize, MappingType.None));
+ _privateTree.Add(new PrivateMapping(0UL, asSize, default));
+ }
+
+ _backingMemory = backingMemory;
+ _supports4KBPages = supports4KBPages;
+
+ MemoryAllocationFlags asFlags = MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible;
+
+ Base = new MemoryBlock(asSize, asFlags);
+ Mirror = new MemoryBlock(asSize, asFlags);
+ }
+
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ if (_supports4KBPages)
+ {
+ Base.MapView(_backingMemory, pa, va, size);
+ Mirror.MapView(_backingMemory, pa, va, size);
+
+ return;
+ }
+
+ lock (_treeLock)
+ {
+ ulong alignment = MemoryBlock.GetPageSize();
+ bool isAligned = ((va | pa | size) & (alignment - 1)) == 0;
+
+ if (flags.HasFlag(MemoryMapFlags.Private) && !isAligned)
+ {
+ Update(va, pa, size, MappingType.Private);
+ }
+ else
+ {
+ // The update method assumes that shared mappings are already aligned.
+
+ if (!flags.HasFlag(MemoryMapFlags.Private))
+ {
+ if ((va & (alignment - 1)) != (pa & (alignment - 1)))
+ {
+ throw new InvalidMemoryRegionException($"Virtual address 0x{va:X} and physical address 0x{pa:X} are misaligned and can't be aligned.");
+ }
+
+ ulong endAddress = va + size;
+ va = BitUtils.AlignDown(va, alignment);
+ pa = BitUtils.AlignDown(pa, alignment);
+ size = BitUtils.AlignUp(endAddress, alignment) - va;
+ }
+
+ Update(va, pa, size, MappingType.Shared);
+ }
+ }
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ if (_supports4KBPages)
+ {
+ Base.UnmapView(_backingMemory, va, size);
+ Mirror.UnmapView(_backingMemory, va, size);
+
+ return;
+ }
+
+ lock (_treeLock)
+ {
+ Update(va, 0UL, size, MappingType.None);
+ }
+ }
+
+ private void Update(ulong va, ulong pa, ulong size, MappingType type)
+ {
+ Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None));
+
+ Update(map, va, pa, size, type);
+ }
+
+ private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
+ {
+ ulong endAddress = va + size;
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.Address < va)
+ {
+ _mappingTree.Add(map.Split(va));
+ }
+
+ if (map.EndAddress > endAddress)
+ {
+ Mapping newMap = map.Split(endAddress);
+ _mappingTree.Add(newMap);
+ map = newMap;
+ }
+
+ switch (type)
+ {
+ case MappingType.None:
+ if (map.Type == MappingType.Shared)
+ {
+ ulong startOffset = map.Address - va;
+ ulong mapVa = va + startOffset;
+ ulong mapSize = Math.Min(size - startOffset, map.Size);
+ ulong mapEndAddress = mapVa + mapSize;
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ mapVa = BitUtils.AlignDown(mapVa, alignment);
+ mapEndAddress = BitUtils.AlignUp(mapEndAddress, alignment);
+
+ mapSize = mapEndAddress - mapVa;
+
+ Base.UnmapView(_backingMemory, mapVa, mapSize);
+ Mirror.UnmapView(_backingMemory, mapVa, mapSize);
+ }
+ else
+ {
+ UnmapPrivate(va, size);
+ }
+ break;
+ case MappingType.Private:
+ if (map.Type == MappingType.Shared)
+ {
+ throw new InvalidMemoryRegionException($"Private mapping request at 0x{va:X} with size 0x{size:X} overlaps shared mapping at 0x{map.Address:X} with size 0x{map.Size:X}.");
+ }
+ else
+ {
+ MapPrivate(va, size);
+ }
+ break;
+ case MappingType.Shared:
+ if (map.Type != MappingType.None)
+ {
+ throw new InvalidMemoryRegionException($"Shared mapping request at 0x{va:X} with size 0x{size:X} overlaps mapping at 0x{map.Address:X} with size 0x{map.Size:X}.");
+ }
+ else
+ {
+ ulong startOffset = map.Address - va;
+ ulong mapPa = pa + startOffset;
+ ulong mapVa = va + startOffset;
+ ulong mapSize = Math.Min(size - startOffset, map.Size);
+
+ Base.MapView(_backingMemory, mapPa, mapVa, mapSize);
+ Mirror.MapView(_backingMemory, mapPa, mapVa, mapSize);
+ }
+ break;
+ }
+
+ map.UpdateState(type);
+ map = TryCoalesce(map);
+
+ if (map.EndAddress >= endAddress)
+ {
+ break;
+ }
+ }
+
+ return map;
+ }
+
+ private Mapping TryCoalesce(Mapping map)
+ {
+ Mapping previousMap = map.Predecessor;
+ Mapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _mappingTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _mappingTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(Mapping left, Mapping right)
+ {
+ return left.Type == right.Type;
+ }
+
+ private void MapPrivate(ulong va, ulong size)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ // Expand the range outwards based on page size to ensure that at least the requested region is mapped.
+ ulong vaAligned = BitUtils.AlignDown(va, alignment);
+ ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment);
+
+ ulong sizeAligned = endAddressAligned - vaAligned;
+
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
+
+ for (; map != null; map = map.Successor)
+ {
+ if (!map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Map(Base, Mirror, _privateMemoryAllocator.Allocate(map.Size, MemoryBlock.GetPageSize()));
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private void UnmapPrivate(ulong va, ulong size)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ // Shrink the range inwards based on page size to ensure we won't unmap memory that might be still in use.
+ ulong vaAligned = BitUtils.AlignUp(va, alignment);
+ ulong endAddressAligned = BitUtils.AlignDown(endAddress, alignment);
+
+ if (endAddressAligned <= vaAligned)
+ {
+ return;
+ }
+
+ ulong alignedSize = endAddressAligned - vaAligned;
+
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Unmap(Base, Mirror);
+ map = TryCoalesce(map);
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private PrivateMapping TryCoalesce(PrivateMapping map)
+ {
+ PrivateMapping previousMap = map.Predecessor;
+ PrivateMapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _privateTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _privateTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(PrivateMapping left, PrivateMapping right)
+ {
+ return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid;
+ }
+
+ public void Dispose()
+ {
+ _privateMemoryAllocator.Dispose();
+ Base.Dispose();
+ Mirror.Dispose();
+ }
+ }
+} \ No newline at end of file
diff --git a/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs b/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs
index 0cf35c17..4aa78d06 100644
--- a/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs
+++ b/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs
@@ -7,5 +7,7 @@ namespace Ryujinx.Cpu.Jit
{
public IJitMemoryBlock Allocate(ulong size) => new JitMemoryBlock(size, MemoryAllocationFlags.None);
public IJitMemoryBlock Reserve(ulong size) => new JitMemoryBlock(size, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.Jit);
+
+ public ulong GetPageSize() => MemoryBlock.GetPageSize();
}
}
diff --git a/Ryujinx.Cpu/Jit/MemoryManager.cs b/Ryujinx.Cpu/Jit/MemoryManager.cs
index 21c50d51..014d843b 100644
--- a/Ryujinx.Cpu/Jit/MemoryManager.cs
+++ b/Ryujinx.Cpu/Jit/MemoryManager.cs
@@ -28,6 +28,9 @@ namespace Ryujinx.Cpu.Jit
private readonly MemoryBlock _backingMemory;
private readonly InvalidAccessHandler _invalidAccessHandler;
+ /// <inheritdoc/>
+ public bool Supports4KBPages => true;
+
/// <summary>
/// Address space width in bits.
/// </summary>
@@ -76,7 +79,7 @@ namespace Ryujinx.Cpu.Jit
}
/// <inheritdoc/>
- public void Map(ulong va, ulong pa, ulong size)
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
{
AssertValidAddressAndSize(va, size);
@@ -91,10 +94,17 @@ namespace Ryujinx.Cpu.Jit
pa += PageSize;
remainingSize -= PageSize;
}
+
Tracking.Map(oVa, size);
}
/// <inheritdoc/>
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ throw new NotSupportedException();
+ }
+
+ /// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
// If size is 0, there's nothing to unmap, just exit early.
@@ -379,6 +389,32 @@ namespace Ryujinx.Cpu.Jit
}
/// <inheritdoc/>
+ public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty<HostMemoryRange>();
+ }
+
+ var guestRegions = GetPhysicalRegionsImpl(va, size);
+ if (guestRegions == null)
+ {
+ return null;
+ }
+
+ var regions = new HostMemoryRange[guestRegions.Count];
+
+ for (int i = 0; i < regions.Length; i++)
+ {
+ var guestRegion = guestRegions[i];
+ IntPtr pointer = _backingMemory.GetPointer(guestRegion.Address, guestRegion.Size);
+ regions[i] = new HostMemoryRange((nuint)(ulong)pointer, guestRegion.Size);
+ }
+
+ return regions;
+ }
+
+ /// <inheritdoc/>
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
@@ -386,6 +422,11 @@ namespace Ryujinx.Cpu.Jit
return Enumerable.Empty<MemoryRange>();
}
+ return GetPhysicalRegionsImpl(va, size);
+ }
+
+ private List<MemoryRange> GetPhysicalRegionsImpl(ulong va, ulong size)
+ {
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
{
return null;
diff --git a/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs b/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
index c4e59db9..856b6b9b 100644
--- a/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
+++ b/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
@@ -5,6 +5,7 @@ using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
+using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading;
@@ -37,20 +38,21 @@ namespace Ryujinx.Cpu.Jit
private readonly InvalidAccessHandler _invalidAccessHandler;
private readonly bool _unsafeMode;
- private readonly MemoryBlock _addressSpace;
- private readonly MemoryBlock _addressSpaceMirror;
+ private readonly AddressSpace _addressSpace;
private readonly ulong _addressSpaceSize;
- private readonly MemoryBlock _backingMemory;
private readonly PageTable<ulong> _pageTable;
private readonly MemoryEhMeilleure _memoryEh;
private readonly ulong[] _pageBitmap;
+ /// <inheritdoc/>
+ public bool Supports4KBPages => MemoryBlock.GetPageSize() == PageSize;
+
public int AddressSpaceBits { get; }
- public IntPtr PageTablePointer => _addressSpace.Pointer;
+ public IntPtr PageTablePointer => _addressSpace.Base.Pointer;
public MemoryManagerType Type => _unsafeMode ? MemoryManagerType.HostMappedUnsafe : MemoryManagerType.HostMapped;
@@ -67,7 +69,6 @@ namespace Ryujinx.Cpu.Jit
/// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
public MemoryManagerHostMapped(MemoryBlock backingMemory, ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler = null)
{
- _backingMemory = backingMemory;
_pageTable = new PageTable<ulong>();
_invalidAccessHandler = invalidAccessHandler;
_unsafeMode = unsafeMode;
@@ -86,13 +87,10 @@ namespace Ryujinx.Cpu.Jit
_pageBitmap = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))];
- MemoryAllocationFlags asFlags = MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible;
+ _addressSpace = new AddressSpace(backingMemory, asSize, Supports4KBPages);
- _addressSpace = new MemoryBlock(asSize, asFlags);
- _addressSpaceMirror = new MemoryBlock(asSize, asFlags);
-
- Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler);
- _memoryEh = new MemoryEhMeilleure(_addressSpace, _addressSpaceMirror, Tracking);
+ Tracking = new MemoryTracking(this, (int)MemoryBlock.GetPageSize(), invalidAccessHandler);
+ _memoryEh = new MemoryEhMeilleure(_addressSpace.Base, _addressSpace.Mirror, Tracking);
}
/// <summary>
@@ -145,12 +143,11 @@ namespace Ryujinx.Cpu.Jit
}
/// <inheritdoc/>
- public void Map(ulong va, ulong pa, ulong size)
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
{
AssertValidAddressAndSize(va, size);
- _addressSpace.MapView(_backingMemory, pa, va, size);
- _addressSpaceMirror.MapView(_backingMemory, pa, va, size);
+ _addressSpace.Map(va, pa, size, flags);
AddMapping(va, size);
PtMap(va, pa, size);
@@ -158,6 +155,12 @@ namespace Ryujinx.Cpu.Jit
}
/// <inheritdoc/>
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ throw new NotSupportedException();
+ }
+
+ /// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
@@ -167,8 +170,7 @@ namespace Ryujinx.Cpu.Jit
RemoveMapping(va, size);
PtUnmap(va, size);
- _addressSpace.UnmapView(_backingMemory, va, size);
- _addressSpaceMirror.UnmapView(_backingMemory, va, size);
+ _addressSpace.Unmap(va, size);
}
private void PtMap(ulong va, ulong pa, ulong size)
@@ -201,7 +203,7 @@ namespace Ryujinx.Cpu.Jit
{
AssertMapped(va, (ulong)Unsafe.SizeOf<T>());
- return _addressSpaceMirror.Read<T>(va);
+ return _addressSpace.Mirror.Read<T>(va);
}
catch (InvalidMemoryRegionException)
{
@@ -241,7 +243,7 @@ namespace Ryujinx.Cpu.Jit
{
AssertMapped(va, (ulong)data.Length);
- _addressSpaceMirror.Read(va, data);
+ _addressSpace.Mirror.Read(va, data);
}
catch (InvalidMemoryRegionException)
{
@@ -260,7 +262,7 @@ namespace Ryujinx.Cpu.Jit
{
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), write: true);
- _addressSpaceMirror.Write(va, value);
+ _addressSpace.Mirror.Write(va, value);
}
catch (InvalidMemoryRegionException)
{
@@ -278,7 +280,7 @@ namespace Ryujinx.Cpu.Jit
{
SignalMemoryTracking(va, (ulong)data.Length, write: true);
- _addressSpaceMirror.Write(va, data);
+ _addressSpace.Mirror.Write(va, data);
}
catch (InvalidMemoryRegionException)
{
@@ -296,7 +298,7 @@ namespace Ryujinx.Cpu.Jit
{
AssertMapped(va, (ulong)data.Length);
- _addressSpaceMirror.Write(va, data);
+ _addressSpace.Mirror.Write(va, data);
}
catch (InvalidMemoryRegionException)
{
@@ -314,7 +316,7 @@ namespace Ryujinx.Cpu.Jit
{
SignalMemoryTracking(va, (ulong)data.Length, false);
- Span<byte> target = _addressSpaceMirror.GetSpan(va, data.Length);
+ Span<byte> target = _addressSpace.Mirror.GetSpan(va, data.Length);
bool changed = !data.SequenceEqual(target);
if (changed)
@@ -347,7 +349,7 @@ namespace Ryujinx.Cpu.Jit
AssertMapped(va, (ulong)size);
}
- return _addressSpaceMirror.GetSpan(va, size);
+ return _addressSpace.Mirror.GetSpan(va, size);
}
/// <inheritdoc/>
@@ -362,7 +364,7 @@ namespace Ryujinx.Cpu.Jit
AssertMapped(va, (ulong)size);
}
- return _addressSpaceMirror.GetWritableRegion(va, size);
+ return _addressSpace.Mirror.GetWritableRegion(va, size);
}
/// <inheritdoc/>
@@ -370,7 +372,7 @@ namespace Ryujinx.Cpu.Jit
{
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
- return ref _addressSpaceMirror.GetRef<T>(va);
+ return ref _addressSpace.Mirror.GetRef<T>(va);
}
/// <inheritdoc/>
@@ -455,6 +457,14 @@ namespace Ryujinx.Cpu.Jit
}
/// <inheritdoc/>
+ public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ return Enumerable.Repeat(new HostMemoryRange((nuint)(ulong)_addressSpace.Mirror.GetPointer(va, size), size), 1);
+ }
+
+ /// <inheritdoc/>
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
int pages = GetPagesCount(va, (uint)size, out va);
@@ -692,7 +702,7 @@ namespace Ryujinx.Cpu.Jit
_ => MemoryPermission.None
};
- _addressSpace.Reprotect(va, size, protection, false);
+ _addressSpace.Base.Reprotect(va, size, protection, false);
}
/// <inheritdoc/>
@@ -799,7 +809,6 @@ namespace Ryujinx.Cpu.Jit
protected override void Destroy()
{
_addressSpace.Dispose();
- _addressSpaceMirror.Dispose();
_memoryEh.Dispose();
}
diff --git a/Ryujinx.Cpu/PrivateMemoryAllocation.cs b/Ryujinx.Cpu/PrivateMemoryAllocation.cs
new file mode 100644
index 00000000..1327880e
--- /dev/null
+++ b/Ryujinx.Cpu/PrivateMemoryAllocation.cs
@@ -0,0 +1,41 @@
+using Ryujinx.Memory;
+using System;
+
+namespace Ryujinx.Cpu
+{
+ struct PrivateMemoryAllocation : IDisposable
+ {
+ private readonly PrivateMemoryAllocator _owner;
+ private readonly PrivateMemoryAllocator.Block _block;
+
+ public bool IsValid => _owner != null;
+ public MemoryBlock Memory => _block?.Memory;
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public PrivateMemoryAllocation(
+ PrivateMemoryAllocator owner,
+ PrivateMemoryAllocator.Block block,
+ ulong offset,
+ ulong size)
+ {
+ _owner = owner;
+ _block = block;
+ Offset = offset;
+ Size = size;
+ }
+
+ public (PrivateMemoryAllocation, PrivateMemoryAllocation) Split(ulong splitOffset)
+ {
+ PrivateMemoryAllocation left = new PrivateMemoryAllocation(_owner, _block, Offset, splitOffset);
+ PrivateMemoryAllocation right = new PrivateMemoryAllocation(_owner, _block, Offset + splitOffset, Size - splitOffset);
+
+ return (left, right);
+ }
+
+ public void Dispose()
+ {
+ _owner.Free(_block, Offset, Size);
+ }
+ }
+}
diff --git a/Ryujinx.Cpu/PrivateMemoryAllocator.cs b/Ryujinx.Cpu/PrivateMemoryAllocator.cs
new file mode 100644
index 00000000..cbf1f1d9
--- /dev/null
+++ b/Ryujinx.Cpu/PrivateMemoryAllocator.cs
@@ -0,0 +1,268 @@
+using Ryujinx.Common;
+using Ryujinx.Memory;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.Cpu
+{
+ class PrivateMemoryAllocator : PrivateMemoryAllocatorImpl<PrivateMemoryAllocator.Block>
+ {
+ public const ulong InvalidOffset = ulong.MaxValue;
+
+ public class Block : IComparable<Block>
+ {
+ public MemoryBlock Memory { get; private set; }
+ public ulong Size { get; }
+
+ private struct Range : IComparable<Range>
+ {
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public Range(ulong offset, ulong size)
+ {
+ Offset = offset;
+ Size = size;
+ }
+
+ public int CompareTo(Range other)
+ {
+ return Offset.CompareTo(other.Offset);
+ }
+ }
+
+ private readonly List<Range> _freeRanges;
+
+ public Block(MemoryBlock memory, ulong size)
+ {
+ Memory = memory;
+ Size = size;
+ _freeRanges = new List<Range>
+ {
+ new Range(0, size)
+ };
+ }
+
+ public ulong Allocate(ulong size, ulong alignment)
+ {
+ for (int i = 0; i < _freeRanges.Count; i++)
+ {
+ var range = _freeRanges[i];
+
+ ulong alignedOffset = BitUtils.AlignUp(range.Offset, alignment);
+ ulong sizeDelta = alignedOffset - range.Offset;
+ ulong usableSize = range.Size - sizeDelta;
+
+ if (sizeDelta < range.Size && usableSize >= size)
+ {
+ _freeRanges.RemoveAt(i);
+
+ if (sizeDelta != 0)
+ {
+ InsertFreeRange(range.Offset, sizeDelta);
+ }
+
+ ulong endOffset = range.Offset + range.Size;
+ ulong remainingSize = endOffset - (alignedOffset + size);
+ if (remainingSize != 0)
+ {
+ InsertFreeRange(endOffset - remainingSize, remainingSize);
+ }
+
+ return alignedOffset;
+ }
+ }
+
+ return InvalidOffset;
+ }
+
+ public void Free(ulong offset, ulong size)
+ {
+ InsertFreeRangeComingled(offset, size);
+ }
+
+ private void InsertFreeRange(ulong offset, ulong size)
+ {
+ var range = new Range(offset, size);
+ int index = _freeRanges.BinarySearch(range);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ _freeRanges.Insert(index, range);
+ }
+
+ private void InsertFreeRangeComingled(ulong offset, ulong size)
+ {
+ ulong endOffset = offset + size;
+ var range = new Range(offset, size);
+ int index = _freeRanges.BinarySearch(range);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ if (index < _freeRanges.Count && _freeRanges[index].Offset == endOffset)
+ {
+ endOffset = _freeRanges[index].Offset + _freeRanges[index].Size;
+ _freeRanges.RemoveAt(index);
+ }
+
+ if (index > 0 && _freeRanges[index - 1].Offset + _freeRanges[index - 1].Size == offset)
+ {
+ offset = _freeRanges[index - 1].Offset;
+ _freeRanges.RemoveAt(--index);
+ }
+
+ range = new Range(offset, endOffset - offset);
+
+ _freeRanges.Insert(index, range);
+ }
+
+ public bool IsTotallyFree()
+ {
+ if (_freeRanges.Count == 1 && _freeRanges[0].Size == Size)
+ {
+ Debug.Assert(_freeRanges[0].Offset == 0);
+ return true;
+ }
+
+ return false;
+ }
+
+ public int CompareTo(Block other)
+ {
+ return Size.CompareTo(other.Size);
+ }
+
+ public virtual void Destroy()
+ {
+ Memory.Dispose();
+ }
+ }
+
+ public PrivateMemoryAllocator(int blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
+ {
+ }
+
+ public PrivateMemoryAllocation Allocate(ulong size, ulong alignment)
+ {
+ var allocation = Allocate(size, alignment, CreateBlock);
+
+ return new PrivateMemoryAllocation(this, allocation.Block, allocation.Offset, allocation.Size);
+ }
+
+ private Block CreateBlock(MemoryBlock memory, ulong size)
+ {
+ return new Block(memory, size);
+ }
+ }
+
+ class PrivateMemoryAllocatorImpl<T> : IDisposable where T : PrivateMemoryAllocator.Block
+ {
+ private const ulong InvalidOffset = ulong.MaxValue;
+
+ public struct Allocation
+ {
+ public T Block { get; }
+ public ulong Offset { get; }
+ public ulong Size { get; }
+
+ public Allocation(T block, ulong offset, ulong size)
+ {
+ Block = block;
+ Offset = offset;
+ Size = size;
+ }
+ }
+
+ private readonly List<T> _blocks;
+
+ private readonly int _blockAlignment;
+ private readonly MemoryAllocationFlags _allocationFlags;
+
+ public PrivateMemoryAllocatorImpl(int blockAlignment, MemoryAllocationFlags allocationFlags)
+ {
+ _blocks = new List<T>();
+ _blockAlignment = blockAlignment;
+ _allocationFlags = allocationFlags;
+ }
+
+ protected Allocation Allocate(ulong size, ulong alignment, Func<MemoryBlock, ulong, T> createBlock)
+ {
+ // Ensure we have a sane alignment value.
+ if ((ulong)(int)alignment != alignment || (int)alignment <= 0)
+ {
+ throw new ArgumentOutOfRangeException(nameof(alignment), $"Invalid alignment 0x{alignment:X}.");
+ }
+
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ var block = _blocks[i];
+
+ if (block.Size >= size)
+ {
+ ulong offset = block.Allocate(size, alignment);
+ if (offset != InvalidOffset)
+ {
+ return new Allocation(block, offset, size);
+ }
+ }
+ }
+
+ ulong blockAlignedSize = BitUtils.AlignUp(size, (ulong)_blockAlignment);
+
+ var memory = new MemoryBlock(blockAlignedSize, _allocationFlags);
+ var newBlock = createBlock(memory, blockAlignedSize);
+
+ InsertBlock(newBlock);
+
+ ulong newBlockOffset = newBlock.Allocate(size, alignment);
+ Debug.Assert(newBlockOffset != InvalidOffset);
+
+ return new Allocation(newBlock, newBlockOffset, size);
+ }
+
+ public void Free(PrivateMemoryAllocator.Block block, ulong offset, ulong size)
+ {
+ block.Free(offset, size);
+
+ if (block.IsTotallyFree())
+ {
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ if (_blocks[i] == block)
+ {
+ _blocks.RemoveAt(i);
+ break;
+ }
+ }
+
+ block.Destroy();
+ }
+ }
+
+ private void InsertBlock(T block)
+ {
+ int index = _blocks.BinarySearch(block);
+ if (index < 0)
+ {
+ index = ~index;
+ }
+
+ _blocks.Insert(index, block);
+ }
+
+ public void Dispose()
+ {
+ for (int i = 0; i < _blocks.Count; i++)
+ {
+ _blocks[i].Destroy();
+ }
+
+ _blocks.Clear();
+ }
+ }
+} \ No newline at end of file
diff --git a/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs b/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs
index c167dc0d..896e11a5 100644
--- a/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs
+++ b/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs
@@ -1420,6 +1420,14 @@ namespace Ryujinx.Graphics.Gpu.Image
/// <param name="size">The size of the flushing memory access</param>
public void FlushAction(TextureGroupHandle handle, ulong address, ulong size)
{
+ // If the page size is larger than 4KB, we will have a lot of false positives for flushing.
+ // Let's avoid flushing textures that are unlikely to be read from CPU to improve performance
+ // on those platforms.
+ if (!_physicalMemory.Supports4KBPages && !Storage.Info.IsLinear && !_context.IsGpuThread())
+ {
+ return;
+ }
+
// There is a small gap here where the action is removed but _actionRegistered is still 1.
// In this case it will skip registering the action, but here we are already handling it,
// so there shouldn't be any issue as it's the same handler for all actions.
diff --git a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs
index 842249f3..a624386e 100644
--- a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs
+++ b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs
@@ -470,19 +470,16 @@ namespace Ryujinx.Graphics.Gpu.Memory
return false;
}
- if (address < Address)
- {
- address = Address;
- }
+ ulong maxAddress = Math.Max(address, Address);
+ ulong minEndAddress = Math.Min(address + size, Address + Size);
- ulong maxSize = Address + Size - address;
-
- if (size > maxSize)
+ if (maxAddress >= minEndAddress)
{
- size = maxSize;
+ // Access doesn't overlap.
+ return false;
}
- ForceDirty(address, size);
+ ForceDirty(maxAddress, minEndAddress - maxAddress);
return true;
}
diff --git a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs
index 051838f1..c1fc0c5c 100644
--- a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs
+++ b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs
@@ -22,6 +22,11 @@ namespace Ryujinx.Graphics.Gpu.Memory
private int _referenceCount;
/// <summary>
+ /// Indicates whenever the memory manager supports 4KB pages.
+ /// </summary>
+ public bool Supports4KBPages => _cpuMemory.Supports4KBPages;
+
+ /// <summary>
/// In-memory shader cache.
/// </summary>
public ShaderCache ShaderCache { get; }
diff --git a/Ryujinx.HLE/HOS/Kernel/KernelContext.cs b/Ryujinx.HLE/HOS/Kernel/KernelContext.cs
index 6c58e197..ccc5c0f0 100644
--- a/Ryujinx.HLE/HOS/Kernel/KernelContext.cs
+++ b/Ryujinx.HLE/HOS/Kernel/KernelContext.cs
@@ -84,7 +84,7 @@ namespace Ryujinx.HLE.HOS.Kernel
KernelConstants.UserSlabHeapItemSize,
KernelConstants.UserSlabHeapSize);
- memory.Commit(KernelConstants.UserSlabHeapBase - DramMemoryMap.DramBase, KernelConstants.UserSlabHeapSize);
+ CommitMemory(KernelConstants.UserSlabHeapBase - DramMemoryMap.DramBase, KernelConstants.UserSlabHeapSize);
CriticalSection = new KCriticalSection(this);
Schedulers = new KScheduler[KScheduler.CpuCoresCount];
@@ -119,6 +119,17 @@ namespace Ryujinx.HLE.HOS.Kernel
new Thread(PreemptionThreadStart) { Name = "HLE.PreemptionThread" }.Start();
}
+ public void CommitMemory(ulong address, ulong size)
+ {
+ ulong alignment = MemoryBlock.GetPageSize();
+ ulong endAddress = address + size;
+
+ address &= ~(alignment - 1);
+ endAddress = (endAddress + (alignment - 1)) & ~(alignment - 1);
+
+ Memory.Commit(address, endAddress - address);
+ }
+
public ulong NewThreadUid()
{
return Interlocked.Increment(ref _threadUid) - 1;
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
index 5e6273b8..4596b15d 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs
@@ -64,7 +64,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
if (address != 0)
{
IncrementPagesReferenceCount(address, pagesCount);
- context.Memory.Commit(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize);
+ context.CommitMemory(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize);
}
return address;
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
index 9b7c99ba..28e9f90a 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
@@ -1,6 +1,8 @@
using Ryujinx.Horizon.Common;
using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
using System;
+using System.Collections.Generic;
using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
@@ -9,12 +11,20 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
private readonly IVirtualMemoryManager _cpuMemory;
+ protected override bool Supports4KBPages => _cpuMemory.Supports4KBPages;
+
public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
{
_cpuMemory = cpuMemory;
}
/// <inheritdoc/>
+ protected override IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ return _cpuMemory.GetHostRegions(va, size);
+ }
+
+ /// <inheritdoc/>
protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
{
var ranges = _cpuMemory.GetPhysicalRegions(va, size);
@@ -43,7 +53,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return result;
}
- result = MapPages(dst, pageList, newDstPermission, false, 0);
+ result = MapPages(dst, pageList, newDstPermission, MemoryMapFlags.Private, false, 0);
if (result != Result.Success)
{
@@ -81,7 +91,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
if (result != Result.Success)
{
- Result mapResult = MapPages(dst, dstPageList, oldDstPermission, false, 0);
+ Result mapResult = MapPages(dst, dstPageList, oldDstPermission, MemoryMapFlags.Private, false, 0);
Debug.Assert(mapResult == Result.Success);
}
@@ -89,13 +99,20 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
/// <inheritdoc/>
- protected override Result MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
+ protected override Result MapPages(
+ ulong dstVa,
+ ulong pagesCount,
+ ulong srcPa,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages,
+ byte fillValue)
{
ulong size = pagesCount * PageSize;
- Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size);
+ Context.CommitMemory(srcPa - DramMemoryMap.DramBase, size);
- _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size);
+ _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size, flags);
if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
{
@@ -111,7 +128,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
/// <inheritdoc/>
- protected override Result MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
+ protected override Result MapPages(
+ ulong address,
+ KPageList pageList,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages,
+ byte fillValue)
{
using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
@@ -122,9 +145,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
ulong addr = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * PageSize;
- Context.Memory.Commit(addr, size);
+ Context.CommitMemory(addr, size);
- _cpuMemory.Map(currentVa, addr, size);
+ _cpuMemory.Map(currentVa, addr, size, flags);
if (shouldFillPages)
{
@@ -140,6 +163,21 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
/// <inheritdoc/>
+ protected override Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size)
+ {
+ ulong offset = 0;
+
+ foreach (var region in regions)
+ {
+ _cpuMemory.MapForeign(va + offset, region.Address, region.Size);
+
+ offset += region.Size;
+ }
+
+ return Result.Success;
+ }
+
+ /// <inheritdoc/>
protected override Result Unmap(ulong address, ulong pagesCount)
{
KPageList pagesToClose = new KPageList();
@@ -188,4 +226,4 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
_cpuMemory.Write(va, data);
}
}
-}
+} \ No newline at end of file
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
index e19e22c8..bd7d5725 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs
@@ -1,6 +1,8 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
using Ryujinx.Horizon.Common;
using System;
using System.Collections.Generic;
@@ -29,6 +31,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private const int MaxBlocksNeededForInsertion = 2;
protected readonly KernelContext Context;
+ protected virtual bool Supports4KBPages => true;
public ulong AddrSpaceStart { get; private set; }
public ulong AddrSpaceEnd { get; private set; }
@@ -366,7 +369,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.OutOfResource;
}
- Result result = MapPages(address, pageList, permission);
+ Result result = MapPages(address, pageList, permission, MemoryMapFlags.None);
if (result == Result.Success)
{
@@ -502,7 +505,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
if (paIsValid)
{
- result = MapPages(address, pagesCount, srcPa, permission);
+ result = MapPages(address, pagesCount, srcPa, permission, MemoryMapFlags.Private);
}
else
{
@@ -565,7 +568,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager));
- return MapPages(address, pageList, permission);
+ return MapPages(address, pageList, permission, MemoryMapFlags.Private);
}
public Result MapProcessCodeMemory(ulong dst, ulong src, ulong size)
@@ -746,7 +749,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidMemState;
}
- result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite, true, (byte)_heapFillValue);
+ result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private, true, (byte)_heapFillValue);
if (result != Result.Success)
{
@@ -1334,7 +1337,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
ulong currentPagesCount = Math.Min(srcPaPages, dstVaPages);
- MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite);
+ MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private);
dstVa += currentPagesCount * PageSize;
srcPa += currentPagesCount * PageSize;
@@ -1878,7 +1881,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Fill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter, (byte)_ipcFillValue);
}
- Result result = MapPages(currentVa, 1, dstFirstPagePa, permission);
+ Result result = MapPages(currentVa, 1, dstFirstPagePa, permission, MemoryMapFlags.Private);
if (result != Result.Success)
{
@@ -1894,10 +1897,19 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong alignedSize = endAddrTruncated - addressRounded;
- KPageList pageList = new KPageList();
- srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
+ Result result;
+
+ if (srcPageTable.Supports4KBPages)
+ {
+ KPageList pageList = new KPageList();
+ srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
- Result result = MapPages(currentVa, pageList, permission);
+ result = MapPages(currentVa, pageList, permission, MemoryMapFlags.None);
+ }
+ else
+ {
+ result = MapForeign(srcPageTable.GetHostRegions(addressRounded, alignedSize), currentVa, alignedSize);
+ }
if (result != Result.Success)
{
@@ -1932,7 +1944,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Fill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter, (byte)_ipcFillValue);
- Result result = MapPages(currentVa, 1, dstLastPagePa, permission);
+ Result result = MapPages(currentVa, 1, dstLastPagePa, permission, MemoryMapFlags.Private);
if (result != Result.Success)
{
@@ -2885,6 +2897,16 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
/// <summary>
+ /// Gets the host regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>The host regions</returns>
+ /// <exception cref="Ryujinx.Memory.InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
+ protected abstract IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size);
+
+ /// <summary>
/// Gets the physical regions that make up the given virtual address region.
/// If any part of the virtual region is unmapped, null is returned.
/// </summary>
@@ -2936,10 +2958,18 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <param name="pagesCount">Number of pages to map</param>
/// <param name="srcPa">Physical address where the pages should be mapped. May be ignored if aliasing is not supported</param>
/// <param name="permission">Permission of the region to be mapped</param>
+ /// <param name="flags">Flags controlling the memory map operation</param>
/// <param name="shouldFillPages">Indicate if the pages should be filled with the <paramref name="fillValue"/> value</param>
/// <param name="fillValue">The value used to fill pages when <paramref name="shouldFillPages"/> is set to true</param>
/// <returns>Result of the mapping operation</returns>
- protected abstract Result MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission, bool shouldFillPages = false, byte fillValue = 0);
+ protected abstract Result MapPages(
+ ulong dstVa,
+ ulong pagesCount,
+ ulong srcPa,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages = false,
+ byte fillValue = 0);
/// <summary>
/// Maps a region of memory into the specified physical memory region.
@@ -2947,10 +2977,26 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <param name="address">Destination virtual address that should be mapped</param>
/// <param name="pageList">List of physical memory pages where the pages should be mapped. May be ignored if aliasing is not supported</param>
/// <param name="permission">Permission of the region to be mapped</param>
+ /// <param name="flags">Flags controlling the memory map operation</param>
/// <param name="shouldFillPages">Indicate if the pages should be filled with the <paramref name="fillValue"/> value</param>
/// <param name="fillValue">The value used to fill pages when <paramref name="shouldFillPages"/> is set to true</param>
/// <returns>Result of the mapping operation</returns>
- protected abstract Result MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages = false, byte fillValue = 0);
+ protected abstract Result MapPages(
+ ulong address,
+ KPageList pageList,
+ KMemoryPermission permission,
+ MemoryMapFlags flags,
+ bool shouldFillPages = false,
+ byte fillValue = 0);
+
+ /// <summary>
+ /// Maps pages into an arbitrary host memory location.
+ /// </summary>
+ /// <param name="regions">Host regions to be mapped into the specified virtual memory region</param>
+ /// <param name="va">Destination virtual address of the range on this page table</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>Result of the mapping operation</returns>
+ protected abstract Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size);
/// <summary>
/// Unmaps a region of memory that was previously mapped with one of the page mapping methods.
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
index 2dbaf3cd..5ec3cd72 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs
@@ -2,6 +2,7 @@ using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Horizon.Common;
+using Ryujinx.Memory;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@@ -48,7 +49,17 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidPermission;
}
- return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
+ // On platforms with page size > 4 KB, this can fail due to the address not being page aligned,
+ // we can return an error to force the application to retry with a different address.
+
+ try
+ {
+ return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ return KernelResult.InvalidMemState;
+ }
}
public Result UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
index 167e0aa9..c68b7369 100644
--- a/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
+++ b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs
@@ -18,7 +18,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong address = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
- context.Memory.Commit(address, size);
+ context.CommitMemory(address, size);
}
}
diff --git a/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs
index 6c442282..06eb4729 100644
--- a/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs
+++ b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs
@@ -6,6 +6,8 @@ namespace Ryujinx.Memory.Tests
{
public class MockVirtualMemoryManager : IVirtualMemoryManager
{
+ public bool Supports4KBPages => true;
+
public bool NoMappings = false;
public event Action<ulong, ulong, MemoryPermission> OnProtect;
@@ -14,7 +16,12 @@ namespace Ryujinx.Memory.Tests
{
}
- public void Map(ulong va, ulong pa, ulong size)
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ throw new NotImplementedException();
+ }
+
+ public void MapForeign(ulong va, nuint hostAddress, ulong size)
{
throw new NotImplementedException();
}
@@ -64,6 +71,11 @@ namespace Ryujinx.Memory.Tests
throw new NotImplementedException();
}
+ IEnumerable<HostMemoryRange> IVirtualMemoryManager.GetHostRegions(ulong va, ulong size)
+ {
+ throw new NotImplementedException();
+ }
+
IEnumerable<MemoryRange> IVirtualMemoryManager.GetPhysicalRegions(ulong va, ulong size)
{
return NoMappings ? new MemoryRange[0] : new MemoryRange[] { new MemoryRange(va, size) };
diff --git a/Ryujinx.Memory/AddressSpaceManager.cs b/Ryujinx.Memory/AddressSpaceManager.cs
index ffe880bf..b532ce5e 100644
--- a/Ryujinx.Memory/AddressSpaceManager.cs
+++ b/Ryujinx.Memory/AddressSpaceManager.cs
@@ -13,9 +13,12 @@ namespace Ryujinx.Memory
/// </summary>
public sealed class AddressSpaceManager : IVirtualMemoryManager, IWritableBlock
{
- public const int PageBits = PageTable<ulong>.PageBits;
- public const int PageSize = PageTable<ulong>.PageSize;
- public const int PageMask = PageTable<ulong>.PageMask;
+ public const int PageBits = PageTable<nuint>.PageBits;
+ public const int PageSize = PageTable<nuint>.PageSize;
+ public const int PageMask = PageTable<nuint>.PageMask;
+
+ /// <inheritdoc/>
+ public bool Supports4KBPages => true;
/// <summary>
/// Address space width in bits.
@@ -25,7 +28,7 @@ namespace Ryujinx.Memory
private readonly ulong _addressSpaceSize;
private readonly MemoryBlock _backingMemory;
- private readonly PageTable<ulong> _pageTable;
+ private readonly PageTable<nuint> _pageTable;
/// <summary>
/// Creates a new instance of the memory manager.
@@ -46,17 +49,17 @@ namespace Ryujinx.Memory
AddressSpaceBits = asBits;
_addressSpaceSize = asSize;
_backingMemory = backingMemory;
- _pageTable = new PageTable<ulong>();
+ _pageTable = new PageTable<nuint>();
}
/// <inheritdoc/>
- public void Map(ulong va, ulong pa, ulong size)
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
{
AssertValidAddressAndSize(va, size);
while (size != 0)
{
- _pageTable.Map(va, pa);
+ _pageTable.Map(va, (nuint)(ulong)_backingMemory.GetPointer(pa, PageSize));
va += PageSize;
pa += PageSize;
@@ -65,6 +68,21 @@ namespace Ryujinx.Memory
}
/// <inheritdoc/>
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ while (size != 0)
+ {
+ _pageTable.Map(va, hostPointer);
+
+ va += PageSize;
+ hostPointer += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ /// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
@@ -108,7 +126,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, data.Length))
{
- data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
+ data.CopyTo(GetHostSpanContiguous(va, data.Length));
}
else
{
@@ -116,22 +134,18 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
- ulong pa = GetPhysicalAddressInternal(va);
-
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
- data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
+ data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
- ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
-
size = Math.Min(data.Length - offset, PageSize);
- data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
+ data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size));
}
}
}
@@ -154,7 +168,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
- return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
+ return GetHostSpanContiguous(va, size);
}
else
{
@@ -176,7 +190,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
- return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
+ return new WritableRegion(null, va, new NativeMemoryManager<byte>((byte*)GetHostAddress(va), size).Memory);
}
else
{
@@ -189,14 +203,14 @@ namespace Ryujinx.Memory
}
/// <inheritdoc/>
- public ref T GetRef<T>(ulong va) where T : unmanaged
+ public unsafe ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
ThrowMemoryNotContiguous();
}
- return ref _backingMemory.GetRef<T>(GetPhysicalAddressInternal(va));
+ return ref *(T*)GetHostAddress(va);
}
/// <inheritdoc/>
@@ -210,7 +224,7 @@ namespace Ryujinx.Memory
return (int)(vaSpan / PageSize);
}
- private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
+ private void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va);
@@ -232,7 +246,7 @@ namespace Ryujinx.Memory
return false;
}
- if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize))
{
return false;
}
@@ -244,6 +258,17 @@ namespace Ryujinx.Memory
}
/// <inheritdoc/>
+ public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty<HostMemoryRange>();
+ }
+
+ return GetHostRegionsImpl(va, size);
+ }
+
+ /// <inheritdoc/>
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
@@ -251,6 +276,39 @@ namespace Ryujinx.Memory
return Enumerable.Empty<MemoryRange>();
}
+ var hostRegions = GetHostRegionsImpl(va, size);
+ if (hostRegions == null)
+ {
+ return null;
+ }
+
+ var regions = new MemoryRange[hostRegions.Count];
+
+ ulong backingStart = (ulong)_backingMemory.Pointer;
+ ulong backingEnd = backingStart + _backingMemory.Size;
+
+ int count = 0;
+
+ for (int i = 0; i < regions.Length; i++)
+ {
+ var hostRegion = hostRegions[i];
+
+ if ((ulong)hostRegion.Address >= backingStart && (ulong)hostRegion.Address < backingEnd)
+ {
+ regions[count++] = new MemoryRange((ulong)hostRegion.Address - backingStart, hostRegion.Size);
+ }
+ }
+
+ if (count != regions.Length)
+ {
+ return new ArraySegment<MemoryRange>(regions, 0, count);
+ }
+
+ return regions;
+ }
+
+ private List<HostMemoryRange> GetHostRegionsImpl(ulong va, ulong size)
+ {
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
{
return null;
@@ -258,9 +316,9 @@ namespace Ryujinx.Memory
int pages = GetPagesCount(va, (uint)size, out va);
- var regions = new List<MemoryRange>();
+ var regions = new List<HostMemoryRange>();
- ulong regionStart = GetPhysicalAddressInternal(va);
+ nuint regionStart = GetHostAddress(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
@@ -270,12 +328,12 @@ namespace Ryujinx.Memory
return null;
}
- ulong newPa = GetPhysicalAddressInternal(va + PageSize);
+ nuint newHostAddress = GetHostAddress(va + PageSize);
- if (GetPhysicalAddressInternal(va) + PageSize != newPa)
+ if (GetHostAddress(va) + PageSize != newHostAddress)
{
- regions.Add(new MemoryRange(regionStart, regionSize));
- regionStart = newPa;
+ regions.Add(new HostMemoryRange(regionStart, regionSize));
+ regionStart = newHostAddress;
regionSize = 0;
}
@@ -283,7 +341,7 @@ namespace Ryujinx.Memory
regionSize += PageSize;
}
- regions.Add(new MemoryRange(regionStart, regionSize));
+ regions.Add(new HostMemoryRange(regionStart, regionSize));
return regions;
}
@@ -301,22 +359,18 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
- ulong pa = GetPhysicalAddressInternal(va);
-
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
- _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
+ GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
- ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
-
size = Math.Min(data.Length - offset, PageSize);
- _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
+ GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size));
}
}
@@ -391,22 +445,23 @@ namespace Ryujinx.Memory
}
}
- private ulong GetPhysicalAddressInternal(ulong va)
+ private unsafe Span<byte> GetHostSpanContiguous(ulong va, int size)
{
- return _pageTable.Read(va) + (va & PageMask);
+ return new Span<byte>((void*)GetHostAddress(va), size);
}
- /// <summary>
- /// Reprotect a region of virtual memory for tracking. Sets software protection bits.
- /// </summary>
- /// <param name="va">Virtual address base</param>
- /// <param name="size">Size of the region to protect</param>
- /// <param name="protection">Memory protection to set</param>
+ private nuint GetHostAddress(ulong va)
+ {
+ return _pageTable.Read(va) + (nuint)(va & PageMask);
+ }
+
+ /// <inheritdoc/>
public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection)
{
throw new NotImplementedException();
}
+ /// <inheritdoc/>
public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false)
{
// Only the ARM Memory Manager has tracking for now.
diff --git a/Ryujinx.Memory/IVirtualMemoryManager.cs b/Ryujinx.Memory/IVirtualMemoryManager.cs
index c8a74f66..390371ad 100644
--- a/Ryujinx.Memory/IVirtualMemoryManager.cs
+++ b/Ryujinx.Memory/IVirtualMemoryManager.cs
@@ -7,6 +7,12 @@ namespace Ryujinx.Memory
public interface IVirtualMemoryManager
{
/// <summary>
+ /// Indicates whenever the memory manager supports aliasing pages at 4KB granularity.
+ /// </summary>
+ /// <returns>True if 4KB pages are supported by the memory manager, false otherwise</returns>
+ bool Supports4KBPages { get; }
+
+ /// <summary>
/// Maps a virtual memory range into a physical memory range.
/// </summary>
/// <remarks>
@@ -15,7 +21,20 @@ namespace Ryujinx.Memory
/// <param name="va">Virtual memory address</param>
/// <param name="pa">Physical memory address where the region should be mapped to</param>
/// <param name="size">Size to be mapped</param>
- void Map(ulong va, ulong pa, ulong size);
+ /// <param name="flags">Flags controlling memory mapping</param>
+ void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags);
+
+ /// <summary>
+ /// Maps a virtual memory range into an arbitrary host memory range.
+ /// </summary>
+ /// <remarks>
+ /// Addresses and size must be page aligned.
+ /// Not all memory managers supports this feature.
+ /// </remarks>
+ /// <param name="va">Virtual memory address</param>
+ /// <param name="hostPointer">Host pointer where the virtual region should be mapped</param>
+ /// <param name="size">Size to be mapped</param>
+ void MapForeign(ulong va, nuint hostPointer, ulong size);
/// <summary>
/// Unmaps a previously mapped range of virtual memory.
@@ -116,6 +135,15 @@ namespace Ryujinx.Memory
ref T GetRef<T>(ulong va) where T : unmanaged;
/// <summary>
+ /// Gets the host regions that make up the given virtual address region.
+ /// If any part of the virtual region is unmapped, null is returned.
+ /// </summary>
+ /// <param name="va">Virtual address of the range</param>
+ /// <param name="size">Size of the range</param>
+ /// <returns>Array of host regions</returns>
+ IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size);
+
+ /// <summary>
/// Gets the physical regions that make up the given virtual address region.
/// If any part of the virtual region is unmapped, null is returned.
/// </summary>
diff --git a/Ryujinx.Memory/MemoryBlock.cs b/Ryujinx.Memory/MemoryBlock.cs
index 2df7ea9b..885ef456 100644
--- a/Ryujinx.Memory/MemoryBlock.cs
+++ b/Ryujinx.Memory/MemoryBlock.cs
@@ -440,4 +440,4 @@ namespace Ryujinx.Memory
private static void ThrowInvalidMemoryRegionException() => throw new InvalidMemoryRegionException();
}
-}
+} \ No newline at end of file
diff --git a/Ryujinx.Memory/MemoryMapFlags.cs b/Ryujinx.Memory/MemoryMapFlags.cs
new file mode 100644
index 00000000..b4c74c8c
--- /dev/null
+++ b/Ryujinx.Memory/MemoryMapFlags.cs
@@ -0,0 +1,23 @@
+using System;
+
+namespace Ryujinx.Memory
+{
+ /// <summary>
+ /// Flags that indicate how the host memory should be mapped.
+ /// </summary>
+ [Flags]
+ public enum MemoryMapFlags
+ {
+ /// <summary>
+ /// No mapping flags.
+ /// </summary>
+ None = 0,
+
+ /// <summary>
+ /// Indicates that the implementation is free to ignore the specified backing memory offset
+ /// and allocate its own private storage for the mapping.
+ /// This allows some mappings that would otherwise fail due to host platform restrictions to succeed.
+ /// </summary>
+ Private = 1 << 0
+ }
+}
diff --git a/Ryujinx.Memory/Range/HostMemoryRange.cs b/Ryujinx.Memory/Range/HostMemoryRange.cs
new file mode 100644
index 00000000..79c649d8
--- /dev/null
+++ b/Ryujinx.Memory/Range/HostMemoryRange.cs
@@ -0,0 +1,71 @@
+using System;
+
+namespace Ryujinx.Memory.Range
+{
+ /// <summary>
+ /// Range of memory composed of an address and size.
+ /// </summary>
+ public struct HostMemoryRange : IEquatable<HostMemoryRange>
+ {
+ /// <summary>
+ /// An empty memory range, with a null address and zero size.
+ /// </summary>
+ public static HostMemoryRange Empty => new HostMemoryRange(0, 0);
+
+ /// <summary>
+ /// Start address of the range.
+ /// </summary>
+ public nuint Address { get; }
+
+ /// <summary>
+ /// Size of the range in bytes.
+ /// </summary>
+ public ulong Size { get; }
+
+ /// <summary>
+ /// Address where the range ends (exclusive).
+ /// </summary>
+ public nuint EndAddress => Address + (nuint)Size;
+
+ /// <summary>
+ /// Creates a new memory range with the specified address and size.
+ /// </summary>
+ /// <param name="address">Start address</param>
+ /// <param name="size">Size in bytes</param>
+ public HostMemoryRange(nuint address, ulong size)
+ {
+ Address = address;
+ Size = size;
+ }
+
+ /// <summary>
+ /// Checks if the range overlaps with another.
+ /// </summary>
+ /// <param name="other">The other range to check for overlap</param>
+ /// <returns>True if the ranges overlap, false otherwise</returns>
+ public bool OverlapsWith(HostMemoryRange other)
+ {
+ nuint thisAddress = Address;
+ nuint thisEndAddress = EndAddress;
+ nuint otherAddress = other.Address;
+ nuint otherEndAddress = other.EndAddress;
+
+ return thisAddress < otherEndAddress && otherAddress < thisEndAddress;
+ }
+
+ public override bool Equals(object obj)
+ {
+ return obj is HostMemoryRange other && Equals(other);
+ }
+
+ public bool Equals(HostMemoryRange other)
+ {
+ return Address == other.Address && Size == other.Size;
+ }
+
+ public override int GetHashCode()
+ {
+ return HashCode.Combine(Address, Size);
+ }
+ }
+}
diff --git a/Ryujinx.Memory/Tracking/MemoryTracking.cs b/Ryujinx.Memory/Tracking/MemoryTracking.cs
index 9aa7c7ff..9a35cfb6 100644
--- a/Ryujinx.Memory/Tracking/MemoryTracking.cs
+++ b/Ryujinx.Memory/Tracking/MemoryTracking.cs
@@ -139,8 +139,6 @@ namespace Ryujinx.Memory.Tracking
/// <returns>The memory tracking handle</returns>
public MultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity)
{
- (address, size) = PageAlign(address, size);
-
return new MultiRegionHandle(this, address, size, handles, granularity);
}
@@ -166,11 +164,11 @@ namespace Ryujinx.Memory.Tracking
/// <returns>The memory tracking handle</returns>
public RegionHandle BeginTracking(ulong address, ulong size)
{
- (address, size) = PageAlign(address, size);
+ var (paAddress, paSize) = PageAlign(address, size);
lock (TrackingLock)
{
- RegionHandle handle = new RegionHandle(this, address, size, _memoryManager.IsRangeMapped(address, size));
+ RegionHandle handle = new RegionHandle(this, paAddress, paSize, address, size, _memoryManager.IsRangeMapped(address, size));
return handle;
}
@@ -186,11 +184,11 @@ namespace Ryujinx.Memory.Tracking
/// <returns>The memory tracking handle</returns>
internal RegionHandle BeginTrackingBitmap(ulong address, ulong size, ConcurrentBitmap bitmap, int bit)
{
- (address, size) = PageAlign(address, size);
+ var (paAddress, paSize) = PageAlign(address, size);
lock (TrackingLock)
{
- RegionHandle handle = new RegionHandle(this, address, size, bitmap, bit, _memoryManager.IsRangeMapped(address, size));
+ RegionHandle handle = new RegionHandle(this, paAddress, paSize, address, size, bitmap, bit, _memoryManager.IsRangeMapped(address, size));
return handle;
}
diff --git a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs
index 6cbea7f3..6ea2b784 100644
--- a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs
+++ b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs
@@ -32,7 +32,7 @@ namespace Ryujinx.Memory.Tracking
internal MultiRegionHandle(MemoryTracking tracking, ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity)
{
- _handles = new RegionHandle[size / granularity];
+ _handles = new RegionHandle[(size + granularity - 1) / granularity];
Granularity = granularity;
_dirtyBitmap = new ConcurrentBitmap(_handles.Length, true);
@@ -50,7 +50,7 @@ namespace Ryujinx.Memory.Tracking
foreach (RegionHandle handle in handles)
{
- int startIndex = (int)((handle.Address - address) / granularity);
+ int startIndex = (int)((handle.RealAddress - address) / granularity);
// Fill any gap left before this handle.
while (i < startIndex)
@@ -72,7 +72,7 @@ namespace Ryujinx.Memory.Tracking
}
else
{
- int endIndex = (int)((handle.EndAddress - address) / granularity);
+ int endIndex = (int)((handle.RealEndAddress - address) / granularity);
while (i < endIndex)
{
@@ -171,12 +171,13 @@ namespace Ryujinx.Memory.Tracking
modifiedAction(rgStart, rgSize);
rgSize = 0;
}
- rgStart = handle.Address;
+
+ rgStart = handle.RealAddress;
}
if (handle.Dirty)
{
- rgSize += handle.Size;
+ rgSize += handle.RealSize;
handle.Reprotect();
}
@@ -191,7 +192,7 @@ namespace Ryujinx.Memory.Tracking
int startHandle = (int)((address - Address) / Granularity);
int lastHandle = (int)((address + (size - 1) - Address) / Granularity);
- ulong rgStart = _handles[startHandle].Address;
+ ulong rgStart = Address + (ulong)startHandle * Granularity;
if (startHandle == lastHandle)
{
@@ -200,7 +201,7 @@ namespace Ryujinx.Memory.Tracking
if (handle.Dirty)
{
handle.Reprotect();
- modifiedAction(rgStart, handle.Size);
+ modifiedAction(rgStart, handle.RealSize);
}
return;
@@ -273,10 +274,10 @@ namespace Ryujinx.Memory.Tracking
modifiedAction(rgStart, rgSize);
rgSize = 0;
}
- rgStart = handle.Address;
+ rgStart = handle.RealAddress;
}
- rgSize += handle.Size;
+ rgSize += handle.RealSize;
handle.Reprotect(false, (checkMasks[index] & bitValue) == 0);
checkMasks[index] &= ~bitValue;
@@ -320,7 +321,7 @@ namespace Ryujinx.Memory.Tracking
{
handle.Reprotect();
- modifiedAction(rgStart, handle.Size);
+ modifiedAction(rgStart, handle.RealSize);
}
}
diff --git a/Ryujinx.Memory/Tracking/RegionHandle.cs b/Ryujinx.Memory/Tracking/RegionHandle.cs
index 86c77abc..580f94a5 100644
--- a/Ryujinx.Memory/Tracking/RegionHandle.cs
+++ b/Ryujinx.Memory/Tracking/RegionHandle.cs
@@ -42,6 +42,10 @@ namespace Ryujinx.Memory.Tracking
public ulong Size { get; }
public ulong EndAddress { get; }
+ public ulong RealAddress { get; }
+ public ulong RealSize { get; }
+ public ulong RealEndAddress { get; }
+
internal IMultiRegionHandle Parent { get; set; }
private event Action _onDirty;
@@ -89,10 +93,12 @@ namespace Ryujinx.Memory.Tracking
/// <param name="tracking">Tracking object for the target memory block</param>
/// <param name="address">Virtual address of the region to track</param>
/// <param name="size">Size of the region to track</param>
+ /// <param name="realAddress">The real, unaligned address of the handle</param>
+ /// <param name="realSize">The real, unaligned size of the handle</param>
/// <param name="bitmap">The bitmap the dirty flag for this handle is stored in</param>
/// <param name="bit">The bit index representing the dirty flag for this handle</param>
/// <param name="mapped">True if the region handle starts mapped</param>
- internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, ConcurrentBitmap bitmap, int bit, bool mapped = true)
+ internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong realAddress, ulong realSize, ConcurrentBitmap bitmap, int bit, bool mapped = true)
{
Bitmap = bitmap;
DirtyBit = bit;
@@ -104,6 +110,10 @@ namespace Ryujinx.Memory.Tracking
Size = size;
EndAddress = address + size;
+ RealAddress = realAddress;
+ RealSize = realSize;
+ RealEndAddress = realAddress + realSize;
+
_tracking = tracking;
_regions = tracking.GetVirtualRegionsForHandle(address, size);
foreach (var region in _regions)
@@ -119,16 +129,23 @@ namespace Ryujinx.Memory.Tracking
/// <param name="tracking">Tracking object for the target memory block</param>
/// <param name="address">Virtual address of the region to track</param>
/// <param name="size">Size of the region to track</param>
+ /// <param name="realAddress">The real, unaligned address of the handle</param>
+ /// <param name="realSize">The real, unaligned size of the handle</param>
/// <param name="mapped">True if the region handle starts mapped</param>
- internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, bool mapped = true)
+ internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong realAddress, ulong realSize, bool mapped = true)
{
Bitmap = new ConcurrentBitmap(1, mapped);
Unmapped = !mapped;
+
Address = address;
Size = size;
EndAddress = address + size;
+ RealAddress = realAddress;
+ RealSize = realSize;
+ RealEndAddress = realAddress + realSize;
+
_tracking = tracking;
_regions = tracking.GetVirtualRegionsForHandle(address, size);
foreach (var region in _regions)
@@ -199,6 +216,10 @@ namespace Ryujinx.Memory.Tracking
if (_preAction != null)
{
+ // Limit the range to within this handle.
+ ulong maxAddress = Math.Max(address, RealAddress);
+ ulong minEndAddress = Math.Min(address + size, RealAddress + RealSize);
+
// Copy the handles list in case it changes when we're out of the lock.
if (handleIterable is List<RegionHandle>)
{
@@ -212,7 +233,7 @@ namespace Ryujinx.Memory.Tracking
{
lock (_preActionLock)
{
- _preAction?.Invoke(address, size);
+ _preAction?.Invoke(maxAddress, minEndAddress - maxAddress);
// The action is removed after it returns, to ensure that the null check above succeeds when
// it's still in progress rather than continuing and possibly missing a required data flush.
diff --git a/Ryujinx.Tests/Cpu/CpuTest.cs b/Ryujinx.Tests/Cpu/CpuTest.cs
index cafed37d..b64f7466 100644
--- a/Ryujinx.Tests/Cpu/CpuTest.cs
+++ b/Ryujinx.Tests/Cpu/CpuTest.cs
@@ -53,7 +53,7 @@ namespace Ryujinx.Tests.Cpu
_ram = new MemoryBlock(Size * 2);
_memory = new MemoryManager(_ram, 1ul << 16);
_memory.IncrementReferenceCount();
- _memory.Map(CodeBaseAddress, 0, Size * 2);
+ _memory.Map(CodeBaseAddress, 0, Size * 2, MemoryMapFlags.Private);
_context = CpuContext.CreateExecutionContext();
Translator.IsReadyForTranslation.Set();
diff --git a/Ryujinx.Tests/Cpu/CpuTest32.cs b/Ryujinx.Tests/Cpu/CpuTest32.cs
index 53fea943..46ae3c77 100644
--- a/Ryujinx.Tests/Cpu/CpuTest32.cs
+++ b/Ryujinx.Tests/Cpu/CpuTest32.cs
@@ -48,7 +48,7 @@ namespace Ryujinx.Tests.Cpu
_ram = new MemoryBlock(Size * 2);
_memory = new MemoryManager(_ram, 1ul << 16);
_memory.IncrementReferenceCount();
- _memory.Map(CodeBaseAddress, 0, Size * 2);
+ _memory.Map(CodeBaseAddress, 0, Size * 2, MemoryMapFlags.Private);
_context = CpuContext.CreateExecutionContext();
_context.IsAarch32 = true;