Implement descriptor indexing for texel buffers
This change allows shaders to use non-uniform indexes into texel buffer
descriptors. So we must also perform multiple image reads/writes to
accomodate the fact that each lane may be using its own texel source or
destination.
Bug: b/206633340
Tests: dEQP-VK.descriptor_indexing.*
Change-Id: I04646779f0b3ba01a48221fc6a3e855366f7f2e5
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/65571
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Tested-by: Sean Risser <srisser@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
Reviewed-by: Sean Risser <srisser@google.com>
Commit-Queue: Sean Risser <srisser@google.com>
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index 22ebf75..6627b61 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -451,6 +451,7 @@
case spv::CapabilityShaderNonUniform: capabilities.ShaderNonUniform = true; break;
case spv::CapabilityRuntimeDescriptorArray: capabilities.RuntimeDescriptorArray = true; break;
case spv::CapabilityStorageBufferArrayNonUniformIndexing: capabilities.StorageBufferArrayNonUniformIndexing = true; break;
+ case spv::CapabilityStorageTexelBufferArrayNonUniformIndexing: capabilities.StorageTexelBufferArrayNonUniformIndexing = true; break;
default:
UNSUPPORTED("Unsupported capability %u", insn.word(1));
}
@@ -1363,7 +1364,7 @@
return ptr;
}
-SIMD::Pointer SpirvShader::WalkAccessChain(Object::ID baseId, Object::ID elementId, const Span &indexIds, EmitState const *state) const
+SIMD::Pointer SpirvShader::WalkAccessChain(Object::ID baseId, Object::ID elementId, const Span &indexIds, bool nonUniform, EmitState const *state) const
{
// TODO: avoid doing per-lane work in some cases if we can?
auto routine = state->routine;
@@ -1418,8 +1419,25 @@
}
else
{
- // Note: the value of indexIds[i] must be dynamically uniform.
- ptr += Int(descriptorSize * Extract(state->getIntermediate(indexIds[i]).Int(0), 0));
+ nonUniform |= GetDecorationsForId(indexIds[i]).NonUniform;
+ SIMD::Int intermediate = state->getIntermediate(indexIds[i]).Int(0);
+ if(nonUniform)
+ {
+ // NonUniform array data can deal with pointers not bound by a 32-bit address
+ // space, so we need to ensure we're using an array pointer, and not a base+offset
+ // pointer.
+ std::array<Pointer<Byte>, SIMD::Width> pointers;
+ for(int i = 0; i < SIMD::Width; i++)
+ {
+ pointers[i] = ptr.getPointerForLane(i);
+ }
+ ptr = SIMD::Pointer(pointers);
+ ptr += descriptorSize * intermediate;
+ }
+ else
+ {
+ ptr += descriptorSize * Extract(intermediate, 0);
+ }
}
}
else
@@ -2241,6 +2259,19 @@
Object::ID elementId = (insn.opcode() == spv::OpPtrAccessChain) ? insn.word(4) : 0;
int indexId = (insn.opcode() == spv::OpPtrAccessChain) ? 5 : 4;
+ // TODO(b/236280746): Eliminate lookahead by optimizing inside SIMD::Pointer.
+ for(auto it = insn; it != end(); it++)
+ {
+ if(it.opcode() == spv::OpLoad)
+ {
+ Object::ID pointerId = it.word(3);
+ if(pointerId.value() == resultId.value())
+ {
+ nonUniform |= GetDecorationsForId(it.word(2)).NonUniform;
+ break;
+ }
+ }
+ }
if(type.storageClass == spv::StorageClassPushConstant ||
type.storageClass == spv::StorageClassUniform ||
@@ -2251,7 +2282,7 @@
}
else
{
- auto ptr = WalkAccessChain(baseId, elementId, Span(insn, indexId, insn.wordCount() - indexId), state);
+ auto ptr = WalkAccessChain(baseId, elementId, Span(insn, indexId, insn.wordCount() - indexId), nonUniform, state);
state->createPointer(resultId, ptr);
}
diff --git a/src/Pipeline/SpirvShader.hpp b/src/Pipeline/SpirvShader.hpp
index 3c7612a..e454090 100644
--- a/src/Pipeline/SpirvShader.hpp
+++ b/src/Pipeline/SpirvShader.hpp
@@ -726,6 +726,7 @@
bool ShaderNonUniform : 1;
bool RuntimeDescriptorArray : 1;
bool StorageBufferArrayNonUniformIndexing : 1;
+ bool StorageTexelBufferArrayNonUniformIndexing : 1;
};
const Capabilities &getUsedCapabilities() const
@@ -1304,7 +1305,7 @@
OutOfBoundsBehavior getOutOfBoundsBehavior(Object::ID pointerId, EmitState const *state) const;
SIMD::Pointer WalkExplicitLayoutAccessChain(Object::ID id, Object::ID elementId, const Span &indexIds, bool nonUniform, const EmitState *state) const;
- SIMD::Pointer WalkAccessChain(Object::ID id, Object::ID elementId, const Span &indexIds, const EmitState *state) const;
+ SIMD::Pointer WalkAccessChain(Object::ID id, Object::ID elementId, const Span &indexIds, bool nonUniform, const EmitState *state) const;
// Returns the *component* offset in the literal for the given access chain.
uint32_t WalkLiteralAccessChain(Type::ID id, const Span &indexes) const;
@@ -1390,6 +1391,15 @@
void callSamplerFunction(Pointer<Byte> samplerFunction, Array<SIMD::Float> &out, Pointer<Byte> imageDescriptor, const ImageInstruction &instruction, EmitState *state) const;
void GetImageDimensions(EmitState const *state, Type const &resultTy, Object::ID imageId, Object::ID lodId, Intermediate &dst) const;
+ struct TexelAddressData
+ {
+ bool isArrayed;
+ spv::Dim dim;
+ int dims, texelSize;
+ SIMD::Int u, v, w, ptrOffset;
+ };
+ static TexelAddressData setupTexelAddressData(SIMD::Int rowPitch, SIMD::Int slicePitch, SIMD::Int samplePitch, ImageInstructionSignature instruction, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, const EmitState *state);
+ static SIMD::Pointer GetNonUniformTexelAddress(ImageInstructionSignature instruction, SIMD::Pointer descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const EmitState *state);
static SIMD::Pointer GetTexelAddress(ImageInstructionSignature instruction, Pointer<Byte> descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const EmitState *state);
static void WriteImage(ImageInstructionSignature instruction, Pointer<Byte> descriptor, const Pointer<SIMD::Int> &coord, const Pointer<SIMD::Int> &texelAndMask, vk::Format imageFormat);
uint32_t GetConstScalarInt(Object::ID id) const;
diff --git a/src/Pipeline/SpirvShaderImage.cpp b/src/Pipeline/SpirvShaderImage.cpp
index b8b8981..30bc61b 100644
--- a/src/Pipeline/SpirvShaderImage.cpp
+++ b/src/Pipeline/SpirvShaderImage.cpp
@@ -610,27 +610,130 @@
return EmitResult::Continue;
}
+SpirvShader::TexelAddressData SpirvShader::setupTexelAddressData(SIMD::Int rowPitch, SIMD::Int slicePitch, SIMD::Int samplePitch, ImageInstructionSignature instruction, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, const EmitState *state)
+{
+ TexelAddressData data;
+
+ data.isArrayed = instruction.arrayed;
+ data.dim = static_cast<spv::Dim>(instruction.dim);
+ data.texelSize = imageFormat.bytes();
+ data.dims = instruction.coordinates - (data.isArrayed ? 1 : 0);
+
+ data.u = coordinate[0];
+ data.v = SIMD::Int(0);
+
+ if(data.dims > 1)
+ {
+ data.v = coordinate[1];
+ }
+
+ if(data.dim == spv::DimSubpassData)
+ {
+ data.u += state->routine->windowSpacePosition[0];
+ data.v += state->routine->windowSpacePosition[1];
+ }
+
+ data.ptrOffset = data.u * SIMD::Int(data.texelSize);
+
+ if(data.dims > 1)
+ {
+ data.ptrOffset += data.v * rowPitch;
+ }
+
+ data.w = 0;
+ if((data.dims > 2) || data.isArrayed)
+ {
+ if(data.dims > 2)
+ {
+ data.w += coordinate[2];
+ }
+
+ if(data.isArrayed)
+ {
+ data.w += coordinate[data.dims];
+ }
+
+ data.ptrOffset += data.w * slicePitch;
+ }
+
+ if(data.dim == spv::DimSubpassData)
+ {
+ // Multiview input attachment access is to the layer corresponding to the current view
+ data.ptrOffset += SIMD::Int(state->routine->layer) * slicePitch;
+ }
+
+ if(instruction.sample)
+ {
+ data.ptrOffset += sample * samplePitch;
+ }
+
+ return data;
+}
+
+SIMD::Pointer SpirvShader::GetNonUniformTexelAddress(ImageInstructionSignature instruction, SIMD::Pointer descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const EmitState *state)
+{
+ const bool useStencilAspect = (imageFormat == VK_FORMAT_S8_UINT);
+ auto rowPitch = (descriptor + (useStencilAspect
+ ? OFFSET(vk::StorageImageDescriptor, stencilRowPitchBytes)
+ : OFFSET(vk::StorageImageDescriptor, rowPitchBytes)))
+ .Load<SIMD::Int>(outOfBoundsBehavior, state->activeLaneMask());
+
+ auto slicePitch = (descriptor + (useStencilAspect
+ ? OFFSET(vk::StorageImageDescriptor, stencilSlicePitchBytes)
+ : OFFSET(vk::StorageImageDescriptor, slicePitchBytes)))
+ .Load<SIMD::Int>(outOfBoundsBehavior, state->activeLaneMask());
+ auto samplePitch = (descriptor + (useStencilAspect
+ ? OFFSET(vk::StorageImageDescriptor, stencilSamplePitchBytes)
+ : OFFSET(vk::StorageImageDescriptor, samplePitchBytes)))
+ .Load<SIMD::Int>(outOfBoundsBehavior, state->activeLaneMask());
+
+ auto texelData = setupTexelAddressData(rowPitch, slicePitch, samplePitch, instruction, coordinate, sample, imageFormat, state);
+
+ // If the out-of-bounds behavior is set to nullify, then each coordinate must be tested individually.
+ // Other out-of-bounds behaviors work properly by just comparing the offset against the total size.
+ if(outOfBoundsBehavior == OutOfBoundsBehavior::Nullify)
+ {
+ SIMD::UInt width = (descriptor + OFFSET(vk::StorageImageDescriptor, width)).Load<SIMD::Int>(outOfBoundsBehavior, state->activeLaneMask());
+ SIMD::Int oobMask = As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(texelData.u), width));
+
+ if(texelData.dims > 1)
+ {
+ SIMD::UInt height = As<SIMD::UInt>((descriptor + OFFSET(vk::StorageImageDescriptor, height)).Load<SIMD::Int>(outOfBoundsBehavior, state->activeLaneMask()));
+ oobMask |= As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(texelData.v), height));
+ }
+
+ if((texelData.dims > 2) || texelData.isArrayed)
+ {
+ SIMD::UInt depth = As<SIMD::UInt>((descriptor + OFFSET(vk::StorageImageDescriptor, depth)).Load<SIMD::Int>(outOfBoundsBehavior, state->activeLaneMask()));
+ if(texelData.dim == spv::DimCube) { depth *= 6; }
+ oobMask |= As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(texelData.w), depth));
+ }
+
+ if(instruction.sample)
+ {
+ SIMD::UInt sampleCount = As<SIMD::UInt>((descriptor + OFFSET(vk::StorageImageDescriptor, sampleCount)).Load<SIMD::Int>(outOfBoundsBehavior, state->activeLaneMask()));
+ oobMask |= As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(sample), sampleCount));
+ }
+
+ constexpr int32_t OOB_OFFSET = 0x7FFFFFFF - 16; // SIMD pointer offsets are signed 32-bit, so this is the largest offset (for 16-byte texels).
+ static_assert(OOB_OFFSET >= vk::MAX_MEMORY_ALLOCATION_SIZE, "the largest offset must be guaranteed to be out-of-bounds");
+
+ texelData.ptrOffset = (texelData.ptrOffset & ~oobMask) | (oobMask & SIMD::Int(OOB_OFFSET)); // oob ? OOB_OFFSET : ptrOffset // TODO: IfThenElse()
+ }
+
+ std::array<Pointer<Byte>, SIMD::Width> imageBase;
+ for(int i = 0; i < SIMD::Width; i++)
+ {
+ imageBase[i] = *Pointer<Pointer<Byte>>(descriptor.getPointerForLane(i) + (useStencilAspect
+ ? OFFSET(vk::StorageImageDescriptor, stencilPtr)
+ : OFFSET(vk::StorageImageDescriptor, ptr)));
+ }
+
+ return SIMD::Pointer(imageBase) + texelData.ptrOffset;
+}
+
SIMD::Pointer SpirvShader::GetTexelAddress(ImageInstructionSignature instruction, Pointer<Byte> descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const EmitState *state)
{
- bool isArrayed = instruction.arrayed;
- spv::Dim dim = static_cast<spv::Dim>(instruction.dim);
- int dims = instruction.coordinates - (isArrayed ? 1 : 0);
-
- SIMD::Int u = coordinate[0];
- SIMD::Int v = SIMD::Int(0);
-
- if(dims > 1)
- {
- v = coordinate[1];
- }
-
- if(dim == spv::DimSubpassData)
- {
- u += state->routine->windowSpacePosition[0];
- v += state->routine->windowSpacePosition[1];
- }
-
- const int texelSize = imageFormat.bytes();
const bool useStencilAspect = (imageFormat == VK_FORMAT_S8_UINT);
auto rowPitch = SIMD::Int(*Pointer<Int>(descriptor + (useStencilAspect
? OFFSET(vk::StorageImageDescriptor, stencilRowPitchBytes)
@@ -644,58 +747,26 @@
? OFFSET(vk::StorageImageDescriptor, stencilSamplePitchBytes)
: OFFSET(vk::StorageImageDescriptor, samplePitchBytes))));
- SIMD::Int ptrOffset = u * SIMD::Int(texelSize);
-
- if(dims > 1)
- {
- ptrOffset += v * rowPitch;
- }
-
- SIMD::Int w = 0;
- if((dims > 2) || isArrayed)
- {
- if(dims > 2)
- {
- w += coordinate[2];
- }
-
- if(isArrayed)
- {
- w += coordinate[dims];
- }
-
- ptrOffset += w * slicePitch;
- }
-
- if(dim == spv::DimSubpassData)
- {
- // Multiview input attachment access is to the layer corresponding to the current view
- ptrOffset += SIMD::Int(state->routine->layer) * slicePitch;
- }
-
- if(instruction.sample)
- {
- ptrOffset += sample * samplePitch;
- }
+ auto texelData = setupTexelAddressData(rowPitch, slicePitch, samplePitch, instruction, coordinate, sample, imageFormat, state);
// If the out-of-bounds behavior is set to nullify, then each coordinate must be tested individually.
// Other out-of-bounds behaviors work properly by just comparing the offset against the total size.
if(outOfBoundsBehavior == OutOfBoundsBehavior::Nullify)
{
SIMD::UInt width = *Pointer<UInt>(descriptor + OFFSET(vk::StorageImageDescriptor, width));
- SIMD::Int oobMask = As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(u), width));
+ SIMD::Int oobMask = As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(texelData.u), width));
- if(dims > 1)
+ if(texelData.dims > 1)
{
SIMD::UInt height = *Pointer<UInt>(descriptor + OFFSET(vk::StorageImageDescriptor, height));
- oobMask |= As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(v), height));
+ oobMask |= As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(texelData.v), height));
}
- if((dims > 2) || isArrayed)
+ if((texelData.dims > 2) || texelData.isArrayed)
{
UInt depth = *Pointer<UInt>(descriptor + OFFSET(vk::StorageImageDescriptor, depth));
- if(dim == spv::DimCube) { depth *= 6; }
- oobMask |= As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(w), SIMD::UInt(depth)));
+ if(texelData.dim == spv::DimCube) { depth *= 6; }
+ oobMask |= As<SIMD::Int>(CmpNLT(As<SIMD::UInt>(texelData.w), SIMD::UInt(depth)));
}
if(instruction.sample)
@@ -707,7 +778,7 @@
constexpr int32_t OOB_OFFSET = 0x7FFFFFFF - 16; // SIMD pointer offsets are signed 32-bit, so this is the largest offset (for 16-byte texels).
static_assert(OOB_OFFSET >= vk::MAX_MEMORY_ALLOCATION_SIZE, "the largest offset must be guaranteed to be out-of-bounds");
- ptrOffset = (ptrOffset & ~oobMask) | (oobMask & SIMD::Int(OOB_OFFSET)); // oob ? OOB_OFFSET : ptrOffset // TODO: IfThenElse()
+ texelData.ptrOffset = (texelData.ptrOffset & ~oobMask) | (oobMask & SIMD::Int(OOB_OFFSET)); // oob ? OOB_OFFSET : ptrOffset // TODO: IfThenElse()
}
Pointer<Byte> imageBase = *Pointer<Pointer<Byte>>(descriptor + (useStencilAspect
@@ -716,7 +787,7 @@
Int imageSizeInBytes = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, sizeInBytes));
- return SIMD::Pointer(imageBase, imageSizeInBytes, ptrOffset);
+ return SIMD::Pointer(imageBase, imageSizeInBytes, texelData.ptrOffset);
}
SpirvShader::EmitResult SpirvShader::EmitImageRead(const ImageInstruction &instruction, EmitState *state) const
@@ -747,33 +818,32 @@
imageFormat = VK_FORMAT_S8_UINT;
}
- Pointer<Byte> descriptor = state->getPointer(instruction.imageId).getUniformPointer(); // vk::StorageImageDescriptor*
auto &dst = state->createIntermediate(instruction.resultId, resultType.componentCount);
-
- // VK_EXT_image_robustness requires replacing out-of-bounds access with zero.
- // TODO(b/162327166): Only perform bounds checks when VK_EXT_image_robustness is enabled.
- auto robustness = OutOfBoundsBehavior::Nullify;
+ SIMD::Pointer ptr = state->getPointer(instruction.imageId);
SIMD::Int uvwa[4];
SIMD::Int sample;
+ const int texelSize = imageFormat.bytes();
+ // VK_EXT_image_robustness requires replacing out-of-bounds access with zero.
+ // TODO(b/162327166): Only perform bounds checks when VK_EXT_image_robustness is enabled.
+ auto robustness = OutOfBoundsBehavior::Nullify;
for(uint32_t i = 0; i < instruction.coordinates; i++)
{
uvwa[i] = coordinate.Int(i);
}
-
if(instruction.sample)
{
sample = Operand(this, state, instruction.sampleId).Int(0);
}
- auto texelPtr = GetTexelAddress(instruction, descriptor, uvwa, sample, imageFormat, robustness, state);
-
- const int texelSize = imageFormat.bytes();
-
// Gather packed texel data. Texels larger than 4 bytes occupy multiple SIMD::Int elements.
// TODO(b/160531165): Provide gather abstractions for various element sizes.
SIMD::Int packed[4];
+
+ SIMD::Pointer texelPtr = ptr.isBasePlusOffset
+ ? GetTexelAddress(instruction, ptr.getUniformPointer(), uvwa, sample, imageFormat, robustness, state)
+ : GetNonUniformTexelAddress(instruction, ptr, uvwa, sample, imageFormat, robustness, state);
if(texelSize == 4 || texelSize == 8 || texelSize == 16)
{
for(auto i = 0; i < texelSize / 4; i++)
@@ -1168,19 +1238,43 @@
texelAndMask[3] = texel.Int(3);
texelAndMask[4] = state->activeStoresAndAtomicsMask();
- Pointer<Byte> descriptor = state->getPointer(instruction.imageId).getUniformPointer(); // vk::StorageImageDescriptor*
-
vk::Format imageFormat = SpirvFormatToVulkanFormat(static_cast<spv::ImageFormat>(instruction.imageFormat));
- if(imageFormat == VK_FORMAT_UNDEFINED) // spv::ImageFormatUnknown
+ SIMD::Pointer ptr = state->getPointer(instruction.imageId);
+ if(ptr.isBasePlusOffset)
{
- Pointer<Byte> samplerFunction = lookupSamplerFunction(descriptor, instruction, state);
+ Pointer<Byte> descriptor = ptr.getUniformPointer(); // vk::StorageImageDescriptor*
- Call<ImageSampler>(samplerFunction, descriptor, &coord, &texelAndMask, state->routine->constants);
+ if(imageFormat == VK_FORMAT_UNDEFINED) // spv::ImageFormatUnknown
+ {
+ Pointer<Byte> samplerFunction = lookupSamplerFunction(descriptor, instruction, state);
+
+ Call<ImageSampler>(samplerFunction, descriptor, &coord, &texelAndMask, state->routine->constants);
+ }
+ else
+ {
+ WriteImage(instruction, descriptor, &coord, &texelAndMask, imageFormat);
+ }
}
else
{
- WriteImage(instruction, descriptor, &coord, &texelAndMask, imageFormat);
+ for(int j = 0; j < SIMD::Width; j++)
+ {
+ SIMD::Int singleLaneMask = 0;
+ singleLaneMask = Insert(singleLaneMask, 0xffffffff, j);
+ texelAndMask[4] = state->activeStoresAndAtomicsMask() & singleLaneMask;
+ Pointer<Byte> descriptor = ptr.getPointerForLane(j);
+ if(imageFormat == VK_FORMAT_UNDEFINED) // spv::ImageFormatUnknown
+ {
+ Pointer<Byte> samplerFunction = lookupSamplerFunction(descriptor, instruction, state);
+
+ Call<ImageSampler>(samplerFunction, descriptor, &coord, &texelAndMask, state->routine->constants);
+ }
+ else
+ {
+ WriteImage(instruction, descriptor, &coord, &texelAndMask, imageFormat);
+ }
+ }
}
return EmitResult::Continue;
diff --git a/src/Vulkan/VkPhysicalDevice.cpp b/src/Vulkan/VkPhysicalDevice.cpp
index f2fe650..6b7351e 100644
--- a/src/Vulkan/VkPhysicalDevice.cpp
+++ b/src/Vulkan/VkPhysicalDevice.cpp
@@ -245,20 +245,20 @@
{
features->shaderInputAttachmentArrayDynamicIndexing = VK_FALSE;
features->shaderUniformTexelBufferArrayDynamicIndexing = VK_FALSE;
- features->shaderStorageTexelBufferArrayDynamicIndexing = VK_FALSE;
+ features->shaderStorageTexelBufferArrayDynamicIndexing = VK_TRUE;
features->shaderUniformBufferArrayNonUniformIndexing = VK_FALSE;
features->shaderSampledImageArrayNonUniformIndexing = VK_FALSE;
features->shaderStorageBufferArrayNonUniformIndexing = VK_TRUE;
features->shaderStorageImageArrayNonUniformIndexing = VK_FALSE;
features->shaderInputAttachmentArrayNonUniformIndexing = VK_FALSE;
features->shaderUniformTexelBufferArrayNonUniformIndexing = VK_FALSE;
- features->shaderStorageTexelBufferArrayNonUniformIndexing = VK_FALSE;
+ features->shaderStorageTexelBufferArrayNonUniformIndexing = VK_TRUE;
features->descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE;
features->descriptorBindingSampledImageUpdateAfterBind = VK_FALSE;
features->descriptorBindingStorageImageUpdateAfterBind = VK_FALSE;
features->descriptorBindingStorageBufferUpdateAfterBind = VK_TRUE;
features->descriptorBindingUniformTexelBufferUpdateAfterBind = VK_FALSE;
- features->descriptorBindingStorageTexelBufferUpdateAfterBind = VK_FALSE;
+ features->descriptorBindingStorageTexelBufferUpdateAfterBind = VK_TRUE;
features->descriptorBindingUpdateUnusedWhilePending = VK_TRUE;
features->descriptorBindingPartiallyBound = VK_TRUE;
features->descriptorBindingVariableDescriptorCount = VK_FALSE;