2019-12-01 03:53:09 +01:00
|
|
|
using Ryujinx.Graphics.Shader.IntermediateRepresentation;
|
|
|
|
using System.Collections.Generic;
|
2019-12-11 07:54:18 +01:00
|
|
|
using System.Diagnostics;
|
2020-05-27 16:07:10 +02:00
|
|
|
using System.Linq;
|
2019-12-01 03:53:09 +01:00
|
|
|
|
|
|
|
using static Ryujinx.Graphics.Shader.IntermediateRepresentation.OperandHelper;
|
|
|
|
using static Ryujinx.Graphics.Shader.Translation.GlobalMemory;
|
|
|
|
|
|
|
|
namespace Ryujinx.Graphics.Shader.Translation
|
|
|
|
{
|
2020-11-09 23:35:04 +01:00
|
|
|
static class Rewriter
|
2019-12-01 03:53:09 +01:00
|
|
|
{
|
|
|
|
public static void RunPass(BasicBlock[] blocks, ShaderConfig config)
|
|
|
|
{
|
|
|
|
for (int blkIndex = 0; blkIndex < blocks.Length; blkIndex++)
|
|
|
|
{
|
|
|
|
BasicBlock block = blocks[blkIndex];
|
|
|
|
|
|
|
|
for (LinkedListNode<INode> node = block.Operations.First; node != null; node = node.Next)
|
|
|
|
{
|
2021-05-31 21:59:23 +02:00
|
|
|
if (node.Value is not Operation operation)
|
2019-12-01 03:53:09 +01:00
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (UsesGlobalMemory(operation.Inst))
|
|
|
|
{
|
2019-12-11 07:54:18 +01:00
|
|
|
node = RewriteGlobalAccess(node, config);
|
|
|
|
}
|
|
|
|
|
2020-05-27 16:07:10 +02:00
|
|
|
if (operation is TextureOperation texOp)
|
2019-12-11 07:54:18 +01:00
|
|
|
{
|
2020-05-27 16:07:10 +02:00
|
|
|
if (texOp.Inst == Instruction.TextureSample)
|
|
|
|
{
|
|
|
|
node = RewriteTextureSample(node, config);
|
|
|
|
|
2021-05-31 21:59:23 +02:00
|
|
|
if (texOp.Type == SamplerType.TextureBuffer)
|
|
|
|
{
|
|
|
|
node = InsertSnormNormalization(node, config);
|
|
|
|
}
|
2020-05-27 16:07:10 +02:00
|
|
|
}
|
2019-12-01 03:53:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-11 07:54:18 +01:00
|
|
|
private static LinkedListNode<INode> RewriteGlobalAccess(LinkedListNode<INode> node, ShaderConfig config)
|
2019-12-01 03:53:09 +01:00
|
|
|
{
|
|
|
|
Operation operation = (Operation)node.Value;
|
|
|
|
|
2021-05-19 23:15:26 +02:00
|
|
|
bool isAtomic = operation.Inst.IsAtomic();
|
2021-10-19 01:24:15 +02:00
|
|
|
bool isStg16Or8 = operation.Inst == Instruction.StoreGlobal16 || operation.Inst == Instruction.StoreGlobal8;
|
|
|
|
bool isWrite = isAtomic || operation.Inst == Instruction.StoreGlobal || isStg16Or8;
|
2021-05-19 23:15:26 +02:00
|
|
|
|
2019-12-01 03:53:09 +01:00
|
|
|
Operation storageOp;
|
|
|
|
|
|
|
|
Operand PrependOperation(Instruction inst, params Operand[] sources)
|
|
|
|
{
|
|
|
|
Operand local = Local();
|
|
|
|
|
|
|
|
node.List.AddBefore(node, new Operation(inst, local, sources));
|
|
|
|
|
|
|
|
return local;
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand addrLow = operation.GetSource(0);
|
|
|
|
Operand addrHigh = operation.GetSource(1);
|
|
|
|
|
|
|
|
Operand sbBaseAddrLow = Const(0);
|
|
|
|
Operand sbSlot = Const(0);
|
|
|
|
|
|
|
|
for (int slot = 0; slot < StorageMaxCount; slot++)
|
|
|
|
{
|
2021-05-19 23:15:26 +02:00
|
|
|
config.SetUsedStorageBuffer(slot, isWrite);
|
|
|
|
|
2019-12-01 03:53:09 +01:00
|
|
|
int cbOffset = GetStorageCbOffset(config.Stage, slot);
|
|
|
|
|
2021-05-19 23:15:26 +02:00
|
|
|
Operand baseAddrLow = config.CreateCbuf(0, cbOffset);
|
|
|
|
Operand baseAddrHigh = config.CreateCbuf(0, cbOffset + 1);
|
|
|
|
Operand size = config.CreateCbuf(0, cbOffset + 2);
|
2019-12-01 03:53:09 +01:00
|
|
|
|
|
|
|
Operand offset = PrependOperation(Instruction.Subtract, addrLow, baseAddrLow);
|
|
|
|
Operand borrow = PrependOperation(Instruction.CompareLessU32, addrLow, baseAddrLow);
|
|
|
|
|
|
|
|
Operand inRangeLow = PrependOperation(Instruction.CompareLessU32, offset, size);
|
|
|
|
|
|
|
|
Operand addrHighBorrowed = PrependOperation(Instruction.Add, addrHigh, borrow);
|
|
|
|
|
|
|
|
Operand inRangeHigh = PrependOperation(Instruction.CompareEqual, addrHighBorrowed, baseAddrHigh);
|
|
|
|
|
|
|
|
Operand inRange = PrependOperation(Instruction.BitwiseAnd, inRangeLow, inRangeHigh);
|
|
|
|
|
|
|
|
sbBaseAddrLow = PrependOperation(Instruction.ConditionalSelect, inRange, baseAddrLow, sbBaseAddrLow);
|
|
|
|
sbSlot = PrependOperation(Instruction.ConditionalSelect, inRange, Const(slot), sbSlot);
|
|
|
|
}
|
|
|
|
|
2021-08-11 23:01:06 +02:00
|
|
|
Operand alignMask = Const(-config.GpuAccessor.QueryHostStorageBufferOffsetAlignment());
|
2019-12-01 03:53:09 +01:00
|
|
|
|
2021-10-19 01:24:15 +02:00
|
|
|
Operand baseAddrTrunc = PrependOperation(Instruction.BitwiseAnd, sbBaseAddrLow, alignMask);
|
|
|
|
Operand byteOffset = PrependOperation(Instruction.Subtract, addrLow, baseAddrTrunc);
|
2019-12-01 03:53:09 +01:00
|
|
|
|
|
|
|
Operand[] sources = new Operand[operation.SourcesCount];
|
|
|
|
|
|
|
|
sources[0] = sbSlot;
|
2021-10-19 01:24:15 +02:00
|
|
|
|
|
|
|
if (isStg16Or8)
|
|
|
|
{
|
|
|
|
sources[1] = byteOffset;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
sources[1] = PrependOperation(Instruction.ShiftRightU32, byteOffset, Const(2));
|
|
|
|
}
|
2019-12-01 03:53:09 +01:00
|
|
|
|
|
|
|
for (int index = 2; index < operation.SourcesCount; index++)
|
|
|
|
{
|
|
|
|
sources[index] = operation.GetSource(index);
|
|
|
|
}
|
|
|
|
|
2021-05-19 23:15:26 +02:00
|
|
|
if (isAtomic)
|
2019-12-01 03:53:09 +01:00
|
|
|
{
|
|
|
|
Instruction inst = (operation.Inst & ~Instruction.MrMask) | Instruction.MrStorage;
|
|
|
|
|
|
|
|
storageOp = new Operation(inst, operation.Dest, sources);
|
|
|
|
}
|
|
|
|
else if (operation.Inst == Instruction.LoadGlobal)
|
|
|
|
{
|
|
|
|
storageOp = new Operation(Instruction.LoadStorage, operation.Dest, sources);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-10-19 01:24:15 +02:00
|
|
|
Instruction storeInst = operation.Inst switch
|
|
|
|
{
|
|
|
|
Instruction.StoreGlobal16 => Instruction.StoreStorage16,
|
|
|
|
Instruction.StoreGlobal8 => Instruction.StoreStorage8,
|
|
|
|
_ => Instruction.StoreStorage
|
|
|
|
};
|
|
|
|
|
|
|
|
storageOp = new Operation(storeInst, null, sources);
|
2019-12-01 03:53:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for (int index = 0; index < operation.SourcesCount; index++)
|
|
|
|
{
|
|
|
|
operation.SetSource(index, null);
|
|
|
|
}
|
|
|
|
|
|
|
|
LinkedListNode<INode> oldNode = node;
|
|
|
|
|
|
|
|
node = node.List.AddBefore(node, storageOp);
|
|
|
|
|
|
|
|
node.List.Remove(oldNode);
|
|
|
|
|
|
|
|
return node;
|
|
|
|
}
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
private static LinkedListNode<INode> RewriteTextureSample(LinkedListNode<INode> node, ShaderConfig config)
|
2019-12-11 07:54:18 +01:00
|
|
|
{
|
|
|
|
TextureOperation texOp = (TextureOperation)node.Value;
|
|
|
|
|
|
|
|
bool hasOffset = (texOp.Flags & TextureFlags.Offset) != 0;
|
|
|
|
bool hasOffsets = (texOp.Flags & TextureFlags.Offsets) != 0;
|
|
|
|
|
2021-08-11 23:01:06 +02:00
|
|
|
bool hasInvalidOffset = (hasOffset || hasOffsets) && !config.GpuAccessor.QueryHostSupportsNonConstantTextureOffset();
|
2019-12-16 05:59:46 +01:00
|
|
|
|
2021-05-31 21:59:23 +02:00
|
|
|
bool isBindless = (texOp.Flags & TextureFlags.Bindless) != 0;
|
|
|
|
|
New shader cache implementation (#3194)
* New shader cache implementation
* Remove some debug code
* Take transform feedback varying count into account
* Create shader cache directory if it does not exist + fragment output map related fixes
* Remove debug code
* Only check texture descriptors if the constant buffer is bound
* Also check CPU VA on GetSpanMapped
* Remove more unused code and move cache related code
* XML docs + remove more unused methods
* Better codegen for TransformFeedbackDescriptor.AsSpan
* Support migration from old cache format, remove more unused code
Shader cache rebuild now also rewrites the shared toc and data files
* Fix migration error with BRX shaders
* Add a limit to the async translation queue
Avoid async translation threads not being able to keep up and the queue growing very large
* Re-create specialization state on recompile
This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access
* Make shader cache more error resilient
* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc
* Address early PR feedback
* Fix rebase
* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly
* Handle some missing exceptions
* Make shader cache purge delete both old and new shader caches
* Register textures on new specialization state
* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)
* Limit in-flight shader compilation to the maximum number of compilation threads
* Replace ParallelDiskCacheLoader state changed event with a callback function
* Better handling for invalid constant buffer 1 data length
* Do not create the old cache directory structure if the old cache does not exist
* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)
* Replace rectangle texture with just coordinate normalization
* Skip incompatible shaders that are missing texture information, instead of crashing
This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable
* Fix coordinates normalization on cubemap textures
* Check if title ID is null before combining shader cache path
* More robust constant buffer address validation on spec state
* More robust constant buffer address validation on spec state (2)
* Regenerate shader cache with one stream, rather than one per shader.
* Only create shader cache directory during initialization
* Logging improvements
* Proper shader program disposal
* PR feedback, and add a comment on serialized structs
* XML docs for RegisterTexture
Co-authored-by: riperiperi <rhy3756547@hotmail.com>
2022-04-10 15:49:44 +02:00
|
|
|
bool isCoordNormalized = !isBindless && config.GpuAccessor.QueryTextureCoordNormalized(texOp.Handle, texOp.CbufSlot);
|
2019-12-16 05:59:46 +01:00
|
|
|
|
New shader cache implementation (#3194)
* New shader cache implementation
* Remove some debug code
* Take transform feedback varying count into account
* Create shader cache directory if it does not exist + fragment output map related fixes
* Remove debug code
* Only check texture descriptors if the constant buffer is bound
* Also check CPU VA on GetSpanMapped
* Remove more unused code and move cache related code
* XML docs + remove more unused methods
* Better codegen for TransformFeedbackDescriptor.AsSpan
* Support migration from old cache format, remove more unused code
Shader cache rebuild now also rewrites the shared toc and data files
* Fix migration error with BRX shaders
* Add a limit to the async translation queue
Avoid async translation threads not being able to keep up and the queue growing very large
* Re-create specialization state on recompile
This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access
* Make shader cache more error resilient
* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc
* Address early PR feedback
* Fix rebase
* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly
* Handle some missing exceptions
* Make shader cache purge delete both old and new shader caches
* Register textures on new specialization state
* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)
* Limit in-flight shader compilation to the maximum number of compilation threads
* Replace ParallelDiskCacheLoader state changed event with a callback function
* Better handling for invalid constant buffer 1 data length
* Do not create the old cache directory structure if the old cache does not exist
* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)
* Replace rectangle texture with just coordinate normalization
* Skip incompatible shaders that are missing texture information, instead of crashing
This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable
* Fix coordinates normalization on cubemap textures
* Check if title ID is null before combining shader cache path
* More robust constant buffer address validation on spec state
* More robust constant buffer address validation on spec state (2)
* Regenerate shader cache with one stream, rather than one per shader.
* Only create shader cache directory during initialization
* Logging improvements
* Proper shader program disposal
* PR feedback, and add a comment on serialized structs
* XML docs for RegisterTexture
Co-authored-by: riperiperi <rhy3756547@hotmail.com>
2022-04-10 15:49:44 +02:00
|
|
|
if (!hasInvalidOffset && isCoordNormalized)
|
2019-12-11 07:54:18 +01:00
|
|
|
{
|
|
|
|
return node;
|
|
|
|
}
|
2021-06-23 23:31:14 +02:00
|
|
|
|
2019-12-11 07:54:18 +01:00
|
|
|
bool isGather = (texOp.Flags & TextureFlags.Gather) != 0;
|
|
|
|
bool hasDerivatives = (texOp.Flags & TextureFlags.Derivatives) != 0;
|
2019-12-11 18:41:07 +01:00
|
|
|
bool intCoords = (texOp.Flags & TextureFlags.IntCoords) != 0;
|
2019-12-11 07:54:18 +01:00
|
|
|
bool hasLodBias = (texOp.Flags & TextureFlags.LodBias) != 0;
|
|
|
|
bool hasLodLevel = (texOp.Flags & TextureFlags.LodLevel) != 0;
|
|
|
|
|
|
|
|
bool isArray = (texOp.Type & SamplerType.Array) != 0;
|
|
|
|
bool isIndexed = (texOp.Type & SamplerType.Indexed) != 0;
|
|
|
|
bool isMultisample = (texOp.Type & SamplerType.Multisample) != 0;
|
|
|
|
bool isShadow = (texOp.Type & SamplerType.Shadow) != 0;
|
|
|
|
|
|
|
|
int coordsCount = texOp.Type.GetDimensions();
|
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
int offsetsCount;
|
|
|
|
|
|
|
|
if (hasOffsets)
|
|
|
|
{
|
|
|
|
offsetsCount = coordsCount * 4;
|
|
|
|
}
|
|
|
|
else if (hasOffset)
|
|
|
|
{
|
|
|
|
offsetsCount = coordsCount;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
offsetsCount = 0;
|
|
|
|
}
|
2019-12-11 07:54:18 +01:00
|
|
|
|
|
|
|
Operand[] offsets = new Operand[offsetsCount];
|
|
|
|
Operand[] sources = new Operand[texOp.SourcesCount - offsetsCount];
|
|
|
|
|
|
|
|
int copyCount = 0;
|
|
|
|
|
|
|
|
if (isBindless || isIndexed)
|
|
|
|
{
|
|
|
|
copyCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
Operand[] lodSources = new Operand[copyCount + coordsCount];
|
|
|
|
|
|
|
|
for (int index = 0; index < lodSources.Length; index++)
|
|
|
|
{
|
|
|
|
lodSources[index] = texOp.GetSource(index);
|
|
|
|
}
|
|
|
|
|
|
|
|
copyCount += coordsCount;
|
|
|
|
|
|
|
|
if (isArray)
|
|
|
|
{
|
|
|
|
copyCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isShadow)
|
|
|
|
{
|
|
|
|
copyCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hasDerivatives)
|
|
|
|
{
|
|
|
|
copyCount += coordsCount * 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isMultisample)
|
|
|
|
{
|
|
|
|
copyCount++;
|
|
|
|
}
|
|
|
|
else if (hasLodLevel)
|
|
|
|
{
|
|
|
|
copyCount++;
|
|
|
|
}
|
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
int srcIndex = 0;
|
|
|
|
int dstIndex = 0;
|
|
|
|
|
2019-12-11 07:54:18 +01:00
|
|
|
for (int index = 0; index < copyCount; index++)
|
|
|
|
{
|
|
|
|
sources[dstIndex++] = texOp.GetSource(srcIndex++);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool areAllOffsetsConstant = true;
|
|
|
|
|
|
|
|
for (int index = 0; index < offsetsCount; index++)
|
|
|
|
{
|
|
|
|
Operand offset = texOp.GetSource(srcIndex++);
|
|
|
|
|
|
|
|
areAllOffsetsConstant &= offset.Type == OperandType.Constant;
|
|
|
|
|
|
|
|
offsets[index] = offset;
|
|
|
|
}
|
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
hasInvalidOffset &= !areAllOffsetsConstant;
|
|
|
|
|
New shader cache implementation (#3194)
* New shader cache implementation
* Remove some debug code
* Take transform feedback varying count into account
* Create shader cache directory if it does not exist + fragment output map related fixes
* Remove debug code
* Only check texture descriptors if the constant buffer is bound
* Also check CPU VA on GetSpanMapped
* Remove more unused code and move cache related code
* XML docs + remove more unused methods
* Better codegen for TransformFeedbackDescriptor.AsSpan
* Support migration from old cache format, remove more unused code
Shader cache rebuild now also rewrites the shared toc and data files
* Fix migration error with BRX shaders
* Add a limit to the async translation queue
Avoid async translation threads not being able to keep up and the queue growing very large
* Re-create specialization state on recompile
This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access
* Make shader cache more error resilient
* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc
* Address early PR feedback
* Fix rebase
* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly
* Handle some missing exceptions
* Make shader cache purge delete both old and new shader caches
* Register textures on new specialization state
* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)
* Limit in-flight shader compilation to the maximum number of compilation threads
* Replace ParallelDiskCacheLoader state changed event with a callback function
* Better handling for invalid constant buffer 1 data length
* Do not create the old cache directory structure if the old cache does not exist
* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)
* Replace rectangle texture with just coordinate normalization
* Skip incompatible shaders that are missing texture information, instead of crashing
This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable
* Fix coordinates normalization on cubemap textures
* Check if title ID is null before combining shader cache path
* More robust constant buffer address validation on spec state
* More robust constant buffer address validation on spec state (2)
* Regenerate shader cache with one stream, rather than one per shader.
* Only create shader cache directory during initialization
* Logging improvements
* Proper shader program disposal
* PR feedback, and add a comment on serialized structs
* XML docs for RegisterTexture
Co-authored-by: riperiperi <rhy3756547@hotmail.com>
2022-04-10 15:49:44 +02:00
|
|
|
if (!hasInvalidOffset && isCoordNormalized)
|
2019-12-11 07:54:18 +01:00
|
|
|
{
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hasLodBias)
|
|
|
|
{
|
|
|
|
sources[dstIndex++] = texOp.GetSource(srcIndex++);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isGather && !isShadow)
|
|
|
|
{
|
|
|
|
sources[dstIndex++] = texOp.GetSource(srcIndex++);
|
|
|
|
}
|
|
|
|
|
2019-12-11 18:41:07 +01:00
|
|
|
int coordsIndex = isBindless || isIndexed ? 1 : 0;
|
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
int componentIndex = texOp.Index;
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand Int(Operand value)
|
2019-12-11 07:54:18 +01:00
|
|
|
{
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand res = Local();
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2021-11-15 01:37:07 +01:00
|
|
|
node.List.AddBefore(node, new Operation(Instruction.ConvertFP32ToS32, res, value));
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
return res;
|
|
|
|
}
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand Float(Operand value)
|
|
|
|
{
|
|
|
|
Operand res = Local();
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2021-11-15 01:37:07 +01:00
|
|
|
node.List.AddBefore(node, new Operation(Instruction.ConvertS32ToFP32, res, value));
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
return res;
|
|
|
|
}
|
2019-12-11 18:41:07 +01:00
|
|
|
|
New shader cache implementation (#3194)
* New shader cache implementation
* Remove some debug code
* Take transform feedback varying count into account
* Create shader cache directory if it does not exist + fragment output map related fixes
* Remove debug code
* Only check texture descriptors if the constant buffer is bound
* Also check CPU VA on GetSpanMapped
* Remove more unused code and move cache related code
* XML docs + remove more unused methods
* Better codegen for TransformFeedbackDescriptor.AsSpan
* Support migration from old cache format, remove more unused code
Shader cache rebuild now also rewrites the shared toc and data files
* Fix migration error with BRX shaders
* Add a limit to the async translation queue
Avoid async translation threads not being able to keep up and the queue growing very large
* Re-create specialization state on recompile
This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access
* Make shader cache more error resilient
* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc
* Address early PR feedback
* Fix rebase
* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly
* Handle some missing exceptions
* Make shader cache purge delete both old and new shader caches
* Register textures on new specialization state
* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)
* Limit in-flight shader compilation to the maximum number of compilation threads
* Replace ParallelDiskCacheLoader state changed event with a callback function
* Better handling for invalid constant buffer 1 data length
* Do not create the old cache directory structure if the old cache does not exist
* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)
* Replace rectangle texture with just coordinate normalization
* Skip incompatible shaders that are missing texture information, instead of crashing
This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable
* Fix coordinates normalization on cubemap textures
* Check if title ID is null before combining shader cache path
* More robust constant buffer address validation on spec state
* More robust constant buffer address validation on spec state (2)
* Regenerate shader cache with one stream, rather than one per shader.
* Only create shader cache directory during initialization
* Logging improvements
* Proper shader program disposal
* PR feedback, and add a comment on serialized structs
* XML docs for RegisterTexture
Co-authored-by: riperiperi <rhy3756547@hotmail.com>
2022-04-10 15:49:44 +02:00
|
|
|
// Emulate non-normalized coordinates by normalizing the coordinates on the shader.
|
|
|
|
// Without normalization, the coordinates are expected to the in the [0, W or H] range,
|
2019-12-16 05:59:46 +01:00
|
|
|
// and otherwise, it is expected to be in the [0, 1] range.
|
|
|
|
// We normalize by dividing the coords by the texture size.
|
New shader cache implementation (#3194)
* New shader cache implementation
* Remove some debug code
* Take transform feedback varying count into account
* Create shader cache directory if it does not exist + fragment output map related fixes
* Remove debug code
* Only check texture descriptors if the constant buffer is bound
* Also check CPU VA on GetSpanMapped
* Remove more unused code and move cache related code
* XML docs + remove more unused methods
* Better codegen for TransformFeedbackDescriptor.AsSpan
* Support migration from old cache format, remove more unused code
Shader cache rebuild now also rewrites the shared toc and data files
* Fix migration error with BRX shaders
* Add a limit to the async translation queue
Avoid async translation threads not being able to keep up and the queue growing very large
* Re-create specialization state on recompile
This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access
* Make shader cache more error resilient
* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc
* Address early PR feedback
* Fix rebase
* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly
* Handle some missing exceptions
* Make shader cache purge delete both old and new shader caches
* Register textures on new specialization state
* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)
* Limit in-flight shader compilation to the maximum number of compilation threads
* Replace ParallelDiskCacheLoader state changed event with a callback function
* Better handling for invalid constant buffer 1 data length
* Do not create the old cache directory structure if the old cache does not exist
* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)
* Replace rectangle texture with just coordinate normalization
* Skip incompatible shaders that are missing texture information, instead of crashing
This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable
* Fix coordinates normalization on cubemap textures
* Check if title ID is null before combining shader cache path
* More robust constant buffer address validation on spec state
* More robust constant buffer address validation on spec state (2)
* Regenerate shader cache with one stream, rather than one per shader.
* Only create shader cache directory during initialization
* Logging improvements
* Proper shader program disposal
* PR feedback, and add a comment on serialized structs
* XML docs for RegisterTexture
Co-authored-by: riperiperi <rhy3756547@hotmail.com>
2022-04-10 15:49:44 +02:00
|
|
|
if (!isCoordNormalized && !intCoords)
|
2019-12-16 05:59:46 +01:00
|
|
|
{
|
2021-07-12 21:20:33 +02:00
|
|
|
config.SetUsedFeature(FeatureFlags.IntegerSampling);
|
|
|
|
|
New shader cache implementation (#3194)
* New shader cache implementation
* Remove some debug code
* Take transform feedback varying count into account
* Create shader cache directory if it does not exist + fragment output map related fixes
* Remove debug code
* Only check texture descriptors if the constant buffer is bound
* Also check CPU VA on GetSpanMapped
* Remove more unused code and move cache related code
* XML docs + remove more unused methods
* Better codegen for TransformFeedbackDescriptor.AsSpan
* Support migration from old cache format, remove more unused code
Shader cache rebuild now also rewrites the shared toc and data files
* Fix migration error with BRX shaders
* Add a limit to the async translation queue
Avoid async translation threads not being able to keep up and the queue growing very large
* Re-create specialization state on recompile
This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access
* Make shader cache more error resilient
* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc
* Address early PR feedback
* Fix rebase
* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly
* Handle some missing exceptions
* Make shader cache purge delete both old and new shader caches
* Register textures on new specialization state
* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)
* Limit in-flight shader compilation to the maximum number of compilation threads
* Replace ParallelDiskCacheLoader state changed event with a callback function
* Better handling for invalid constant buffer 1 data length
* Do not create the old cache directory structure if the old cache does not exist
* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)
* Replace rectangle texture with just coordinate normalization
* Skip incompatible shaders that are missing texture information, instead of crashing
This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable
* Fix coordinates normalization on cubemap textures
* Check if title ID is null before combining shader cache path
* More robust constant buffer address validation on spec state
* More robust constant buffer address validation on spec state (2)
* Regenerate shader cache with one stream, rather than one per shader.
* Only create shader cache directory during initialization
* Logging improvements
* Proper shader program disposal
* PR feedback, and add a comment on serialized structs
* XML docs for RegisterTexture
Co-authored-by: riperiperi <rhy3756547@hotmail.com>
2022-04-10 15:49:44 +02:00
|
|
|
int normCoordsCount = (texOp.Type & SamplerType.Mask) == SamplerType.TextureCube ? 2 : coordsCount;
|
|
|
|
|
|
|
|
for (int index = 0; index < normCoordsCount; index++)
|
2019-12-11 07:54:18 +01:00
|
|
|
{
|
2019-12-11 18:41:07 +01:00
|
|
|
Operand coordSize = Local();
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-11 18:41:07 +01:00
|
|
|
Operand[] texSizeSources;
|
|
|
|
|
|
|
|
if (isBindless || isIndexed)
|
|
|
|
{
|
2019-12-16 05:59:46 +01:00
|
|
|
texSizeSources = new Operand[] { sources[0], Const(0) };
|
2019-12-11 18:41:07 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-12-16 05:59:46 +01:00
|
|
|
texSizeSources = new Operand[] { Const(0) };
|
2019-12-11 18:41:07 +01:00
|
|
|
}
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-11 18:41:07 +01:00
|
|
|
node.List.AddBefore(node, new TextureOperation(
|
|
|
|
Instruction.TextureSize,
|
|
|
|
texOp.Type,
|
2021-05-19 23:15:26 +02:00
|
|
|
texOp.Format,
|
2019-12-11 18:41:07 +01:00
|
|
|
texOp.Flags,
|
2021-07-12 21:20:33 +02:00
|
|
|
texOp.CbufSlot,
|
2019-12-11 18:41:07 +01:00
|
|
|
texOp.Handle,
|
|
|
|
index,
|
|
|
|
coordSize,
|
|
|
|
texSizeSources));
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2021-07-12 21:20:33 +02:00
|
|
|
config.SetUsedTexture(Instruction.TextureSize, texOp.Type, texOp.Format, texOp.Flags, texOp.CbufSlot, texOp.Handle);
|
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand source = sources[coordsIndex + index];
|
|
|
|
|
|
|
|
Operand coordNormalized = Local();
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2020-03-03 15:02:08 +01:00
|
|
|
node.List.AddBefore(node, new Operation(Instruction.FP32 | Instruction.Divide, coordNormalized, source, Float(coordSize)));
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
sources[coordsIndex + index] = coordNormalized;
|
|
|
|
}
|
|
|
|
}
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
// Technically, non-constant texture offsets are not allowed (according to the spec),
|
|
|
|
// however some GPUs does support that.
|
|
|
|
// For GPUs where it is not supported, we can replace the instruction with the following:
|
|
|
|
// For texture*Offset, we replace it by texture*, and add the offset to the P coords.
|
|
|
|
// The offset can be calculated as offset / textureSize(lod), where lod = textureQueryLod(coords).
|
|
|
|
// For texelFetchOffset, we replace it by texelFetch and add the offset to the P coords directly.
|
|
|
|
// For textureGatherOffset, we take advantage of the fact that the operation is already broken down
|
|
|
|
// to read the 4 pixels separately, and just replace it with 4 textureGather with a different offset
|
|
|
|
// for each pixel.
|
|
|
|
if (hasInvalidOffset)
|
|
|
|
{
|
|
|
|
if (intCoords)
|
|
|
|
{
|
|
|
|
for (int index = 0; index < coordsCount; index++)
|
|
|
|
{
|
|
|
|
Operand source = sources[coordsIndex + index];
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand coordPlusOffset = Local();
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
node.List.AddBefore(node, new Operation(Instruction.Add, coordPlusOffset, source, offsets[index]));
|
2019-12-11 18:41:07 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
sources[coordsIndex + index] = coordPlusOffset;
|
|
|
|
}
|
2019-12-11 18:41:07 +01:00
|
|
|
}
|
2019-12-16 05:59:46 +01:00
|
|
|
else
|
|
|
|
{
|
2021-07-12 21:20:33 +02:00
|
|
|
config.SetUsedFeature(FeatureFlags.IntegerSampling);
|
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand lod = Local();
|
|
|
|
|
|
|
|
node.List.AddBefore(node, new TextureOperation(
|
|
|
|
Instruction.Lod,
|
|
|
|
texOp.Type,
|
2021-05-19 23:15:26 +02:00
|
|
|
texOp.Format,
|
2019-12-16 05:59:46 +01:00
|
|
|
texOp.Flags,
|
2021-07-12 21:20:33 +02:00
|
|
|
texOp.CbufSlot,
|
2019-12-16 05:59:46 +01:00
|
|
|
texOp.Handle,
|
2021-06-23 23:31:14 +02:00
|
|
|
0,
|
2019-12-16 05:59:46 +01:00
|
|
|
lod,
|
|
|
|
lodSources));
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
for (int index = 0; index < coordsCount; index++)
|
|
|
|
{
|
|
|
|
Operand coordSize = Local();
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand[] texSizeSources;
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
if (isBindless || isIndexed)
|
|
|
|
{
|
|
|
|
texSizeSources = new Operand[] { sources[0], Int(lod) };
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
texSizeSources = new Operand[] { Int(lod) };
|
|
|
|
}
|
2019-12-11 07:54:18 +01:00
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
node.List.AddBefore(node, new TextureOperation(
|
|
|
|
Instruction.TextureSize,
|
|
|
|
texOp.Type,
|
2021-05-19 23:15:26 +02:00
|
|
|
texOp.Format,
|
2019-12-16 05:59:46 +01:00
|
|
|
texOp.Flags,
|
2021-07-12 21:20:33 +02:00
|
|
|
texOp.CbufSlot,
|
2019-12-16 05:59:46 +01:00
|
|
|
texOp.Handle,
|
|
|
|
index,
|
|
|
|
coordSize,
|
|
|
|
texSizeSources));
|
|
|
|
|
2021-07-12 21:20:33 +02:00
|
|
|
config.SetUsedTexture(Instruction.TextureSize, texOp.Type, texOp.Format, texOp.Flags, texOp.CbufSlot, texOp.Handle);
|
|
|
|
|
2019-12-16 05:59:46 +01:00
|
|
|
Operand offset = Local();
|
|
|
|
|
|
|
|
Operand intOffset = offsets[index + (hasOffsets ? texOp.Index * coordsCount : 0)];
|
|
|
|
|
2020-03-03 15:02:08 +01:00
|
|
|
node.List.AddBefore(node, new Operation(Instruction.FP32 | Instruction.Divide, offset, Float(intOffset), Float(coordSize)));
|
2019-12-16 05:59:46 +01:00
|
|
|
|
|
|
|
Operand source = sources[coordsIndex + index];
|
|
|
|
|
|
|
|
Operand coordPlusOffset = Local();
|
|
|
|
|
2020-03-03 15:02:08 +01:00
|
|
|
node.List.AddBefore(node, new Operation(Instruction.FP32 | Instruction.Add, coordPlusOffset, source, offset));
|
2019-12-16 05:59:46 +01:00
|
|
|
|
|
|
|
sources[coordsIndex + index] = coordPlusOffset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isGather && !isShadow)
|
|
|
|
{
|
|
|
|
Operand gatherComponent = sources[dstIndex - 1];
|
|
|
|
|
|
|
|
Debug.Assert(gatherComponent.Type == OperandType.Constant);
|
|
|
|
|
|
|
|
componentIndex = gatherComponent.Value;
|
|
|
|
}
|
2019-12-11 07:54:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TextureOperation newTexOp = new TextureOperation(
|
|
|
|
Instruction.TextureSample,
|
|
|
|
texOp.Type,
|
2021-05-19 23:15:26 +02:00
|
|
|
texOp.Format,
|
2019-12-11 07:54:18 +01:00
|
|
|
texOp.Flags & ~(TextureFlags.Offset | TextureFlags.Offsets),
|
2021-07-12 21:20:33 +02:00
|
|
|
texOp.CbufSlot,
|
2019-12-11 07:54:18 +01:00
|
|
|
texOp.Handle,
|
|
|
|
componentIndex,
|
|
|
|
texOp.Dest,
|
|
|
|
sources);
|
|
|
|
|
|
|
|
for (int index = 0; index < texOp.SourcesCount; index++)
|
|
|
|
{
|
|
|
|
texOp.SetSource(index, null);
|
|
|
|
}
|
|
|
|
|
|
|
|
LinkedListNode<INode> oldNode = node;
|
|
|
|
|
|
|
|
node = node.List.AddBefore(node, newTexOp);
|
|
|
|
|
|
|
|
node.List.Remove(oldNode);
|
|
|
|
|
|
|
|
return node;
|
|
|
|
}
|
2020-05-27 16:07:10 +02:00
|
|
|
|
|
|
|
private static LinkedListNode<INode> InsertSnormNormalization(LinkedListNode<INode> node, ShaderConfig config)
|
|
|
|
{
|
|
|
|
TextureOperation texOp = (TextureOperation)node.Value;
|
|
|
|
|
2021-05-31 21:59:23 +02:00
|
|
|
// We can't query the format of a bindless texture,
|
|
|
|
// because the handle is unknown, it can have any format.
|
|
|
|
if (texOp.Flags.HasFlag(TextureFlags.Bindless))
|
|
|
|
{
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
2021-05-19 20:05:43 +02:00
|
|
|
TextureFormat format = config.GpuAccessor.QueryTextureFormat(texOp.Handle, texOp.CbufSlot);
|
2020-05-27 16:07:10 +02:00
|
|
|
|
|
|
|
int maxPositive = format switch
|
|
|
|
{
|
|
|
|
TextureFormat.R8Snorm => sbyte.MaxValue,
|
|
|
|
TextureFormat.R8G8Snorm => sbyte.MaxValue,
|
|
|
|
TextureFormat.R8G8B8A8Snorm => sbyte.MaxValue,
|
|
|
|
TextureFormat.R16Snorm => short.MaxValue,
|
|
|
|
TextureFormat.R16G16Snorm => short.MaxValue,
|
|
|
|
TextureFormat.R16G16B16A16Snorm => short.MaxValue,
|
|
|
|
_ => 0
|
|
|
|
};
|
|
|
|
|
2021-05-31 21:59:23 +02:00
|
|
|
// The value being 0 means that the format is not a SNORM format,
|
|
|
|
// so there's nothing to do here.
|
2020-05-27 16:07:10 +02:00
|
|
|
if (maxPositive == 0)
|
|
|
|
{
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
2021-05-31 21:59:23 +02:00
|
|
|
// Do normalization. We assume SINT formats are being used
|
|
|
|
// as replacement for SNORM (which is not supported).
|
2020-05-27 16:07:10 +02:00
|
|
|
INode[] uses = texOp.Dest.UseOps.ToArray();
|
|
|
|
|
2021-11-15 01:37:07 +01:00
|
|
|
Operation convOp = new Operation(Instruction.ConvertS32ToFP32, Local(), texOp.Dest);
|
2020-05-27 16:07:10 +02:00
|
|
|
Operation normOp = new Operation(Instruction.FP32 | Instruction.Multiply, Local(), convOp.Dest, ConstF(1f / maxPositive));
|
|
|
|
|
|
|
|
node = node.List.AddAfter(node, convOp);
|
|
|
|
node = node.List.AddAfter(node, normOp);
|
|
|
|
|
|
|
|
foreach (INode useOp in uses)
|
|
|
|
{
|
2021-05-31 21:59:23 +02:00
|
|
|
if (useOp is not Operation op)
|
2020-05-27 16:07:10 +02:00
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace all uses of the texture pixel value with the normalized value.
|
|
|
|
for (int index = 0; index < op.SourcesCount; index++)
|
|
|
|
{
|
|
|
|
if (op.GetSource(index) == texOp.Dest)
|
|
|
|
{
|
|
|
|
op.SetSource(index, normOp.Dest);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return node;
|
|
|
|
}
|
2019-12-01 03:53:09 +01:00
|
|
|
}
|
|
|
|
}
|