Move Reactor code emission from SpirvShader to EmitState
This change avoids having to pass EmitState into each of the methods
that emit Reactor code. It also prepares for separating EmitState from
SpirvShader so that the latter will only handle SPIR-V parsing.
Bug: b/247020580
Change-Id: Idecf94bf4988029474f188c0fb94c8f059765b65
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/68148
Tested-by: Nicolas Capens <nicolascapens@google.com>
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/Pipeline/SpirvShaderMemory.cpp b/src/Pipeline/SpirvShaderMemory.cpp
index 90818a4..5921e6e 100644
--- a/src/Pipeline/SpirvShaderMemory.cpp
+++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -23,65 +23,65 @@
namespace sw {
-SpirvShader::EmitResult SpirvShader::EmitLoad(InsnIterator insn, EmitState *state) const
+SpirvShader::EmitResult SpirvShader::EmitState::EmitLoad(InsnIterator insn)
{
bool atomic = (insn.opcode() == spv::OpAtomicLoad);
Object::ID resultId = insn.word(2);
Object::ID pointerId = insn.word(3);
- auto &result = getObject(resultId);
- auto &resultTy = getType(result);
- auto &pointer = getObject(pointerId);
- auto &pointerTy = getType(pointer);
+ auto &result = shader.getObject(resultId);
+ auto &resultTy = shader.getType(result);
+ auto &pointer = shader.getObject(pointerId);
+ auto &pointerTy = shader.getType(pointer);
std::memory_order memoryOrder = std::memory_order_relaxed;
- ASSERT(getType(pointer).element == result.typeId());
+ ASSERT(shader.getType(pointer).element == result.typeId());
ASSERT(Type::ID(insn.word(1)) == result.typeId());
- ASSERT(!atomic || getType(getType(pointer).element).opcode() == spv::OpTypeInt); // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
+ ASSERT(!atomic || shader.getType(shader.getType(pointer).element).opcode() == spv::OpTypeInt); // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
if(pointerTy.storageClass == spv::StorageClassUniformConstant)
{
// Just propagate the pointer.
- auto &ptr = state->getPointer(pointerId);
- state->createPointer(resultId, ptr);
+ auto &ptr = getPointer(pointerId);
+ createPointer(resultId, ptr);
return EmitResult::Continue;
}
if(atomic)
{
Object::ID semanticsId = insn.word(5);
- auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
+ auto memorySemantics = static_cast<spv::MemorySemanticsMask>(shader.getObject(semanticsId).constantValue[0]);
memoryOrder = MemoryOrder(memorySemantics);
}
- auto ptr = GetPointerToData(pointerId, 0, false, state);
+ auto ptr = GetPointerToData(pointerId, 0, false);
bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
- auto robustness = getOutOfBoundsBehavior(pointerId, state);
+ auto robustness = shader.getOutOfBoundsBehavior(pointerId, routine->pipelineLayout);
if(result.kind == Object::Kind::Pointer)
{
- VisitMemoryObject(pointerId, true, [&](const MemoryElement &el) {
+ shader.VisitMemoryObject(pointerId, true, [&](const MemoryElement &el) {
ASSERT(el.index == 0);
auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
- state->createPointer(resultId, p.Load<SIMD::Pointer>(robustness, state->activeLaneMask(), atomic, memoryOrder, sizeof(void *)));
+ createPointer(resultId, p.Load<SIMD::Pointer>(robustness, activeLaneMask(), atomic, memoryOrder, sizeof(void *)));
});
- SPIRV_SHADER_DBG("Load(atomic: {0}, order: {1}, ptr: {2}, mask: {3})", atomic, int(memoryOrder), ptr, state->activeLaneMask());
+ SPIRV_SHADER_DBG("Load(atomic: {0}, order: {1}, ptr: {2}, mask: {3})", atomic, int(memoryOrder), ptr, activeLaneMask());
}
else
{
- auto &dst = state->createIntermediate(resultId, resultTy.componentCount);
- VisitMemoryObject(pointerId, false, [&](const MemoryElement &el) {
+ auto &dst = createIntermediate(resultId, resultTy.componentCount);
+ shader.VisitMemoryObject(pointerId, false, [&](const MemoryElement &el) {
auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
- dst.move(el.index, p.Load<SIMD::Float>(robustness, state->activeLaneMask(), atomic, memoryOrder));
+ dst.move(el.index, p.Load<SIMD::Float>(robustness, activeLaneMask(), atomic, memoryOrder));
});
- SPIRV_SHADER_DBG("Load(atomic: {0}, order: {1}, ptr: {2}, val: {3}, mask: {4})", atomic, int(memoryOrder), ptr, dst, state->activeLaneMask());
+ SPIRV_SHADER_DBG("Load(atomic: {0}, order: {1}, ptr: {2}, val: {3}, mask: {4})", atomic, int(memoryOrder), ptr, dst, activeLaneMask());
}
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitStore(InsnIterator insn, EmitState *state) const
+SpirvShader::EmitResult SpirvShader::EmitState::EmitStore(InsnIterator insn)
{
bool atomic = (insn.opcode() == spv::OpAtomicStore);
Object::ID pointerId = insn.word(1);
@@ -91,40 +91,40 @@
if(atomic)
{
Object::ID semanticsId = insn.word(3);
- auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
+ auto memorySemantics = static_cast<spv::MemorySemanticsMask>(shader.getObject(semanticsId).constantValue[0]);
memoryOrder = MemoryOrder(memorySemantics);
}
- const auto &value = Operand(this, state, objectId);
+ const auto &value = Operand(shader, *this, objectId);
- Store(pointerId, value, atomic, memoryOrder, state);
+ Store(pointerId, value, atomic, memoryOrder);
return EmitResult::Continue;
}
-void SpirvShader::Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder, EmitState *state) const
+void SpirvShader::EmitState::Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder) const
{
- auto &pointer = getObject(pointerId);
- auto &pointerTy = getType(pointer);
- auto &elementTy = getType(pointerTy.element);
+ auto &pointer = shader.getObject(pointerId);
+ auto &pointerTy = shader.getType(pointer);
+ auto &elementTy = shader.getType(pointerTy.element);
ASSERT(!atomic || elementTy.opcode() == spv::OpTypeInt); // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
- auto ptr = GetPointerToData(pointerId, 0, false, state);
+ auto ptr = GetPointerToData(pointerId, 0, false);
bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
- auto robustness = getOutOfBoundsBehavior(pointerId, state);
+ auto robustness = shader.getOutOfBoundsBehavior(pointerId, routine->pipelineLayout);
- SIMD::Int mask = state->activeLaneMask();
+ SIMD::Int mask = activeLaneMask();
if(!StoresInHelperInvocation(pointerTy.storageClass))
{
- mask = mask & state->storesAndAtomicsMask();
+ mask = mask & storesAndAtomicsMask();
}
SPIRV_SHADER_DBG("Store(atomic: {0}, order: {1}, ptr: {2}, val: {3}, mask: {4}", atomic, int(memoryOrder), ptr, value, mask);
if(value.isPointer())
{
- VisitMemoryObject(pointerId, true, [&](const MemoryElement &el) {
+ shader.VisitMemoryObject(pointerId, true, [&](const MemoryElement &el) {
ASSERT(el.index == 0);
auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
p.Store(value.Pointer(), robustness, mask, atomic, memoryOrder);
@@ -132,19 +132,18 @@
}
else
{
- VisitMemoryObject(pointerId, false, [&](const MemoryElement &el) {
+ shader.VisitMemoryObject(pointerId, false, [&](const MemoryElement &el) {
auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
p.Store(value.Float(el.index), robustness, mask, atomic, memoryOrder);
});
}
}
-SpirvShader::EmitResult SpirvShader::EmitVariable(InsnIterator insn, EmitState *state) const
+SpirvShader::EmitResult SpirvShader::EmitState::EmitVariable(InsnIterator insn)
{
- auto routine = state->routine;
Object::ID resultId = insn.word(2);
- auto &object = getObject(resultId);
- auto &objectTy = getType(object);
+ auto &object = shader.getObject(resultId);
+ auto &objectTy = shader.getType(object);
switch(objectTy.storageClass)
{
@@ -154,17 +153,17 @@
{
ASSERT(objectTy.opcode() == spv::OpTypePointer);
auto base = &routine->getVariable(resultId)[0];
- auto elementTy = getType(objectTy.element);
+ auto elementTy = shader.getType(objectTy.element);
auto size = elementTy.componentCount * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
- state->createPointer(resultId, SIMD::Pointer(base, size));
+ createPointer(resultId, SIMD::Pointer(base, size));
}
break;
case spv::StorageClassWorkgroup:
{
ASSERT(objectTy.opcode() == spv::OpTypePointer);
auto base = &routine->workgroupMemory[0];
- auto size = workgroupMemory.size();
- state->createPointer(resultId, SIMD::Pointer(base, size, workgroupMemory.offsetOf(resultId)));
+ auto size = shader.workgroupMemory.size();
+ createPointer(resultId, SIMD::Pointer(base, size, shader.workgroupMemory.offsetOf(resultId)));
}
break;
case spv::StorageClassInput:
@@ -173,22 +172,22 @@
{
auto &dst = routine->getVariable(resultId);
int offset = 0;
- VisitInterface(resultId,
- [&](const Decorations &d, AttribType type) {
- auto scalarSlot = d.Location << 2 | d.Component;
- dst[offset++] = routine->inputs[scalarSlot];
- });
+ shader.VisitInterface(resultId,
+ [&](const Decorations &d, AttribType type) {
+ auto scalarSlot = d.Location << 2 | d.Component;
+ dst[offset++] = routine->inputs[scalarSlot];
+ });
}
ASSERT(objectTy.opcode() == spv::OpTypePointer);
auto base = &routine->getVariable(resultId)[0];
- auto elementTy = getType(objectTy.element);
+ auto elementTy = shader.getType(objectTy.element);
auto size = elementTy.componentCount * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
- state->createPointer(resultId, SIMD::Pointer(base, size));
+ createPointer(resultId, SIMD::Pointer(base, size));
}
break;
case spv::StorageClassUniformConstant:
{
- const auto &d = descriptorDecorations.at(resultId);
+ const auto &d = shader.descriptorDecorations.at(resultId);
ASSERT(d.DescriptorSet >= 0);
ASSERT(d.Binding >= 0);
@@ -196,14 +195,14 @@
Pointer<Byte> set = routine->descriptorSets[d.DescriptorSet]; // DescriptorSet*
Pointer<Byte> binding = Pointer<Byte>(set + bindingOffset); // vk::SampledImageDescriptor*
auto size = 0; // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
- state->createPointer(resultId, SIMD::Pointer(binding, size));
+ createPointer(resultId, SIMD::Pointer(binding, size));
}
break;
case spv::StorageClassUniform:
case spv::StorageClassStorageBuffer:
case spv::StorageClassPhysicalStorageBuffer:
{
- const auto &d = descriptorDecorations.at(resultId);
+ const auto &d = shader.descriptorDecorations.at(resultId);
ASSERT(d.DescriptorSet >= 0);
auto size = 0; // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
// Note: the module may contain descriptor set references that are not suitable for this implementation -- using a set index higher than the number
@@ -211,17 +210,17 @@
// is valid. In this case make the value nullptr to make it easier to diagnose an attempt to dereference it.
if(static_cast<uint32_t>(d.DescriptorSet) < vk::MAX_BOUND_DESCRIPTOR_SETS)
{
- state->createPointer(resultId, SIMD::Pointer(routine->descriptorSets[d.DescriptorSet], size));
+ createPointer(resultId, SIMD::Pointer(routine->descriptorSets[d.DescriptorSet], size));
}
else
{
- state->createPointer(resultId, SIMD::Pointer(nullptr, 0));
+ createPointer(resultId, SIMD::Pointer(nullptr, 0));
}
}
break;
case spv::StorageClassPushConstant:
{
- state->createPointer(resultId, SIMD::Pointer(routine->pushConstants, vk::MAX_PUSH_CONSTANT_SIZE));
+ createPointer(resultId, SIMD::Pointer(routine->pushConstants, vk::MAX_PUSH_CONSTANT_SIZE));
}
break;
default:
@@ -232,7 +231,7 @@
if(insn.wordCount() > 4)
{
Object::ID initializerId = insn.word(4);
- if(getObject(initializerId).kind != Object::Kind::Constant)
+ if(shader.getObject(initializerId).kind != Object::Kind::Constant)
{
UNIMPLEMENTED("b/148241854: Non-constant initializers not yet implemented"); // FIXME(b/148241854)
}
@@ -245,13 +244,15 @@
case spv::StorageClassWorkgroup:
{
bool interleavedByLane = IsStorageInterleavedByLane(objectTy.storageClass);
- auto ptr = GetPointerToData(resultId, 0, false, state);
- Operand initialValue(this, state, initializerId);
- VisitMemoryObject(resultId, false, [&](const MemoryElement &el) {
+ auto ptr = GetPointerToData(resultId, 0, false);
+ Operand initialValue(shader, *this, initializerId);
+
+ shader.VisitMemoryObject(resultId, false, [&](const MemoryElement &el) {
auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
auto robustness = OutOfBoundsBehavior::UndefinedBehavior; // Local variables are always within bounds.
- p.Store(initialValue.Float(el.index), robustness, state->activeLaneMask());
+ p.Store(initialValue.Float(el.index), robustness, activeLaneMask());
});
+
if(objectTy.storageClass == spv::StorageClassWorkgroup)
{
// Initialization of workgroup memory is done by each subgroup and requires waiting on a barrier.
@@ -268,24 +269,24 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitCopyMemory(InsnIterator insn, EmitState *state) const
+SpirvShader::EmitResult SpirvShader::EmitState::EmitCopyMemory(InsnIterator insn)
{
Object::ID dstPtrId = insn.word(1);
Object::ID srcPtrId = insn.word(2);
- auto &dstPtrTy = getObjectType(dstPtrId);
- auto &srcPtrTy = getObjectType(srcPtrId);
+ auto &dstPtrTy = shader.getObjectType(dstPtrId);
+ auto &srcPtrTy = shader.getObjectType(srcPtrId);
ASSERT(dstPtrTy.element == srcPtrTy.element);
bool dstInterleavedByLane = IsStorageInterleavedByLane(dstPtrTy.storageClass);
bool srcInterleavedByLane = IsStorageInterleavedByLane(srcPtrTy.storageClass);
- auto dstPtr = GetPointerToData(dstPtrId, 0, false, state);
- auto srcPtr = GetPointerToData(srcPtrId, 0, false, state);
+ auto dstPtr = GetPointerToData(dstPtrId, 0, false);
+ auto srcPtr = GetPointerToData(srcPtrId, 0, false);
std::unordered_map<uint32_t, uint32_t> srcOffsets;
- VisitMemoryObject(srcPtrId, false, [&](const MemoryElement &el) { srcOffsets[el.index] = el.offset; });
+ shader.VisitMemoryObject(srcPtrId, false, [&](const MemoryElement &el) { srcOffsets[el.index] = el.offset; });
- VisitMemoryObject(dstPtrId, false, [&](const MemoryElement &el) {
+ shader.VisitMemoryObject(dstPtrId, false, [&](const MemoryElement &el) {
auto it = srcOffsets.find(el.index);
ASSERT(it != srcOffsets.end());
auto srcOffset = it->second;
@@ -297,15 +298,15 @@
// TODO(b/131224163): Optimize based on src/dst storage classes.
auto robustness = OutOfBoundsBehavior::RobustBufferAccess;
- auto value = src.Load<SIMD::Float>(robustness, state->activeLaneMask());
- dst.Store(value, robustness, state->activeLaneMask());
+ auto value = src.Load<SIMD::Float>(robustness, activeLaneMask());
+ dst.Store(value, robustness, activeLaneMask());
});
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitMemoryBarrier(InsnIterator insn, EmitState *state) const
+SpirvShader::EmitResult SpirvShader::EmitState::EmitMemoryBarrier(InsnIterator insn)
{
- auto semantics = spv::MemorySemanticsMask(GetConstScalarInt(insn.word(2)));
+ auto semantics = spv::MemorySemanticsMask(shader.GetConstScalarInt(insn.word(2)));
// TODO(b/176819536): We probably want to consider the memory scope here.
// For now, just always emit the full fence.
Fence(semantics);
@@ -406,19 +407,18 @@
}
}
-SIMD::Pointer SpirvShader::GetPointerToData(Object::ID id, SIMD::Int arrayIndices, bool nonUniform, const EmitState *state) const
+SIMD::Pointer SpirvShader::EmitState::GetPointerToData(Object::ID id, SIMD::Int arrayIndices, bool nonUniform) const
{
- auto routine = state->routine;
- auto &object = getObject(id);
+ auto &object = shader.getObject(id);
switch(object.kind)
{
case Object::Kind::Pointer:
case Object::Kind::InterfaceVariable:
- return state->getPointer(id);
+ return getPointer(id);
case Object::Kind::DescriptorSet:
{
- const auto &d = descriptorDecorations.at(id);
+ const auto &d = shader.descriptorDecorations.at(id);
ASSERT(d.DescriptorSet >= 0 && static_cast<uint32_t>(d.DescriptorSet) < vk::MAX_BOUND_DESCRIPTOR_SETS);
ASSERT(d.Binding >= 0);
ASSERT(routine->pipelineLayout->getDescriptorCount(d.DescriptorSet, d.Binding) != 0); // "If descriptorCount is zero this binding entry is reserved and the resource must not be accessed from any stage via this binding within any pipeline using the set layout."
@@ -426,11 +426,11 @@
uint32_t bindingOffset = routine->pipelineLayout->getBindingOffset(d.DescriptorSet, d.Binding);
uint32_t descriptorSize = routine->pipelineLayout->getDescriptorSize(d.DescriptorSet, d.Binding);
- auto set = state->getPointer(id);
+ auto set = getPointer(id);
if(nonUniform)
{
SIMD::Int descriptorOffset = bindingOffset + descriptorSize * arrayIndices;
- auto robustness = getOutOfBoundsBehavior(id, state);
+ auto robustness = shader.getOutOfBoundsBehavior(id, routine->pipelineLayout);
ASSERT(routine->pipelineLayout->getDescriptorType(d.DescriptorSet, d.Binding) != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
std::vector<Pointer<Byte>> pointers(SIMD::Width);
@@ -445,7 +445,7 @@
{
SIMD::Int dynamicOffsetIndex = SIMD::Int(routine->pipelineLayout->getDynamicOffsetIndex(d.DescriptorSet, d.Binding) + arrayIndices);
SIMD::Pointer routineDynamicOffsets = SIMD::Pointer(routine->descriptorDynamicOffsets, 0, sizeof(int) * dynamicOffsetIndex);
- SIMD::Int dynamicOffsets = routineDynamicOffsets.Load<SIMD::Int>(robustness, state->activeLaneMask());
+ SIMD::Int dynamicOffsets = routineDynamicOffsets.Load<SIMD::Int>(robustness, activeLaneMask());
ptr += dynamicOffsets;
}
return ptr;
@@ -493,24 +493,25 @@
}
}
-void SpirvShader::OffsetToElement(SIMD::Pointer &ptr, Object::ID elementId, int32_t arrayStride, const EmitState *state) const
+void SpirvShader::EmitState::OffsetToElement(SIMD::Pointer &ptr, Object::ID elementId, int32_t arrayStride) const
{
if(elementId != 0 && arrayStride != 0)
{
- auto &elementObject = getObject(elementId);
+ auto &elementObject = shader.getObject(elementId);
ASSERT(elementObject.kind == Object::Kind::Constant || elementObject.kind == Object::Kind::Intermediate);
+
if(elementObject.kind == Object::Kind::Constant)
{
- ptr += GetConstScalarInt(elementId) * arrayStride;
+ ptr += shader.GetConstScalarInt(elementId) * arrayStride;
}
else
{
- ptr += state->getIntermediate(elementId).Int(0) * arrayStride;
+ ptr += getIntermediate(elementId).Int(0) * arrayStride;
}
}
}
-void SpirvShader::Fence(spv::MemorySemanticsMask semantics) const
+void SpirvShader::EmitState::Fence(spv::MemorySemanticsMask semantics) const
{
if(semantics != spv::MemorySemanticsMaskNone)
{