VK_EXT_inline_uniform_block implementation This extension provides a simpler way of specifying uniform block memory. Rather than having to allocate a buffer to contain it, the uniform block memory can be allocated dynamically, which reduces the number of API calls required to use uniform blocks. Tests: dEQP-VK.api.info.vulkan1p2_limits_validation.* Tests: dEQP-VK.binding_model.descriptor_copy.*.inline_uniform_block* Bug: b/204502117 Change-Id: I46c36158f0c66ea05cf1a762d3a2f720436533ac Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/62809 Presubmit-Ready: Alexis Hétu <sugoi@google.com> Kokoro-Result: kokoro <noreply+kokoro@google.com> Tested-by: Alexis Hétu <sugoi@google.com> Reviewed-by: Nicolas Capens <nicolascapens@google.com> Commit-Queue: Alexis Hétu <sugoi@google.com>
diff --git a/src/Pipeline/SpirvShaderMemory.cpp b/src/Pipeline/SpirvShaderMemory.cpp index 8439011..f7337c6 100644 --- a/src/Pipeline/SpirvShaderMemory.cpp +++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -56,7 +56,7 @@ auto ptr = GetPointerToData(pointerId, 0, state); bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass); auto &dst = state->createIntermediate(resultId, resultTy.componentCount); - auto robustness = state->getOutOfBoundsBehavior(pointerTy.storageClass); + auto robustness = getOutOfBoundsBehavior(pointerId, state); VisitMemoryObject(pointerId, [&](const MemoryElement &el) { auto p = ptr + el.offset; @@ -100,7 +100,7 @@ auto ptr = GetPointerToData(pointerId, 0, state); bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass); - auto robustness = state->getOutOfBoundsBehavior(pointerTy.storageClass); + auto robustness = getOutOfBoundsBehavior(pointerId, state); SIMD::Int mask = state->activeLaneMask(); if(!StoresInHelperInvocation(pointerTy.storageClass)) @@ -395,23 +395,35 @@ auto set = state->getPointer(id); Assert(set.base != Pointer<Byte>(nullptr)); - Pointer<Byte> descriptor = set.base + descriptorOffset; // BufferDescriptor* - Pointer<Byte> data = *Pointer<Pointer<Byte>>(descriptor + OFFSET(vk::BufferDescriptor, ptr)); // void* - Int size = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, sizeInBytes)); + Pointer<Byte> descriptor = set.base + descriptorOffset; // BufferDescriptor* or inline uniform block - if(routine->pipelineLayout->isDescriptorDynamic(d.DescriptorSet, d.Binding)) + auto descriptorType = routine->pipelineLayout->getDescriptorType(d.DescriptorSet, d.Binding); + if(descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { - Int dynamicOffsetIndex = - routine->pipelineLayout->getDynamicOffsetIndex(d.DescriptorSet, d.Binding) + - arrayIndex; - Int offset = routine->descriptorDynamicOffsets[dynamicOffsetIndex]; - Int robustnessSize = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, robustnessSize)); - - return SIMD::Pointer(data + offset, Min(size, robustnessSize - offset)); + // Note: there is no bounds checking for inline uniform blocks. + // MAX_INLINE_UNIFORM_BLOCK_SIZE represents the maximum size of + // an inline uniform block, but this value should remain unused. + return SIMD::Pointer(descriptor, vk::MAX_INLINE_UNIFORM_BLOCK_SIZE); } else { - return SIMD::Pointer(data, size); + Pointer<Byte> data = *Pointer<Pointer<Byte>>(descriptor + OFFSET(vk::BufferDescriptor, ptr)); // void* + Int size = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, sizeInBytes)); + + if(routine->pipelineLayout->isDescriptorDynamic(d.DescriptorSet, d.Binding)) + { + Int dynamicOffsetIndex = + routine->pipelineLayout->getDynamicOffsetIndex(d.DescriptorSet, d.Binding) + + arrayIndex; + Int offset = routine->descriptorDynamicOffsets[dynamicOffsetIndex]; + Int robustnessSize = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, robustnessSize)); + + return SIMD::Pointer(data + offset, Min(size, robustnessSize - offset)); + } + else + { + return SIMD::Pointer(data, size); + } } }