Move EmitState out of SpirvShader
EmitState was a class nested under the SpirvShader class. This
refactoring separates them so that SpirvShader only handles SPIR-V
parsing while EmitState performs Reactor code emission.
Bug: b/247020580
Change-Id: I252c0317ffad0a72e48c3ae74119bcea40fcb752
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/68491
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Tested-by: Nicolas Capens <nicolascapens@google.com>
Commit-Queue: Nicolas Capens <nicolascapens@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/Pipeline/ComputeProgram.cpp b/src/Pipeline/ComputeProgram.cpp
index b2f9c5e..bb05cee 100644
--- a/src/Pipeline/ComputeProgram.cpp
+++ b/src/Pipeline/ComputeProgram.cpp
@@ -244,7 +244,7 @@
auto groupX = baseGroupX + groupOffsetX;
MARL_SCOPED_EVENT("groupX: %d, groupY: %d, groupZ: %d", groupX, groupY, groupZ);
- using Coroutine = std::unique_ptr<rr::Stream<SpirvShader::YieldResult>>;
+ using Coroutine = std::unique_ptr<rr::Stream<EmitState::YieldResult>>;
std::queue<Coroutine> coroutines;
if(shader->getAnalysis().ContainsControlBarriers)
@@ -269,7 +269,7 @@
auto coroutine = std::move(coroutines.front());
coroutines.pop();
- SpirvShader::YieldResult result;
+ EmitState::YieldResult result;
if(coroutine->await(result))
{
// TODO: Consider result (when the enum is more than 1 entry).
diff --git a/src/Pipeline/ComputeProgram.hpp b/src/Pipeline/ComputeProgram.hpp
index 25e57c7..a4a85d2 100644
--- a/src/Pipeline/ComputeProgram.hpp
+++ b/src/Pipeline/ComputeProgram.hpp
@@ -36,7 +36,7 @@
struct Constants;
// ComputeProgram builds a SPIR-V compute shader.
-class ComputeProgram : public Coroutine<SpirvShader::YieldResult(
+class ComputeProgram : public Coroutine<EmitState::YieldResult(
const vk::Device *device,
void *data,
int32_t workgroupX,
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index 2973cdd..06fa405 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -1260,7 +1260,7 @@
}
}
-SIMD::Pointer SpirvShader::EmitState::WalkExplicitLayoutAccessChain(Object::ID baseId, Object::ID elementId, const Span &indexIds, bool nonUniform) const
+SIMD::Pointer EmitState::WalkExplicitLayoutAccessChain(Object::ID baseId, Object::ID elementId, const Span &indexIds, bool nonUniform) const
{
// Produce a offset into external memory in sizeof(float) units
@@ -1364,7 +1364,7 @@
}
break;
default:
- UNREACHABLE("%s", OpcodeName(type.definition.opcode()));
+ UNREACHABLE("%s", shader.OpcodeName(type.definition.opcode()));
}
}
@@ -1372,7 +1372,7 @@
return ptr;
}
-SIMD::Pointer SpirvShader::EmitState::WalkAccessChain(Object::ID baseId, Object::ID elementId, const Span &indexIds, bool nonUniform) const
+SIMD::Pointer EmitState::WalkAccessChain(Object::ID baseId, Object::ID elementId, const Span &indexIds, bool nonUniform) const
{
// TODO: avoid doing per-lane work in some cases if we can?
auto &baseObject = shader.getObject(baseId);
@@ -1470,7 +1470,7 @@
break;
default:
- UNREACHABLE("%s", OpcodeName(type.opcode()));
+ UNREACHABLE("%s", shader.OpcodeName(type.opcode()));
}
}
@@ -1592,7 +1592,7 @@
}
}
-void SpirvShader::Decorations::Apply(const sw::SpirvShader::Decorations &src)
+void SpirvShader::Decorations::Apply(const Decorations &src)
{
// Apply a decoration group to this set of decorations
if(src.HasBuiltIn)
@@ -1841,7 +1841,7 @@
state.EmitBlocks(getFunction(entryPoint).entry);
}
-void SpirvShader::EmitState::EmitInstructions(InsnIterator begin, InsnIterator end)
+void EmitState::EmitInstructions(InsnIterator begin, InsnIterator end)
{
for(auto insn = begin; insn != end; insn++)
{
@@ -1859,7 +1859,7 @@
}
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitInstruction(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitInstruction(InsnIterator insn)
{
auto opcode = insn.opcode();
@@ -2239,14 +2239,14 @@
return EmitArrayLength(insn);
default:
- UNREACHABLE("%s", OpcodeName(opcode));
+ UNREACHABLE("%s", shader.OpcodeName(opcode));
break;
}
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitAccessChain(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitAccessChain(InsnIterator insn)
{
Type::ID typeId = insn.word(1);
Object::ID resultId = insn.word(2);
@@ -2289,7 +2289,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitCompositeConstruct(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitCompositeConstruct(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -2311,7 +2311,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitCompositeInsert(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitCompositeInsert(InsnIterator insn)
{
Type::ID resultTypeId = insn.word(1);
auto &type = shader.getType(resultTypeId);
@@ -2342,7 +2342,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitCompositeExtract(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitCompositeExtract(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -2359,7 +2359,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitVectorShuffle(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitVectorShuffle(InsnIterator insn)
{
// Note: number of components in result, first vector, and second vector are all independent.
uint32_t resultSize = shader.getType(insn.resultTypeId()).componentCount;
@@ -2389,7 +2389,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitVectorExtractDynamic(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitVectorExtractDynamic(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -2409,7 +2409,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitVectorInsertDynamic(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitVectorInsertDynamic(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -2426,52 +2426,48 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitSelect(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitSelect(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto result = shader.getObject(insn.resultId());
auto cond = Operand(shader, *this, insn.word(3));
auto condIsScalar = (cond.componentCount == 1);
- switch(result.kind)
+ if(result.kind == Object::Kind::Pointer)
{
- case Object::Kind::Pointer:
+ ASSERT(condIsScalar);
+ ASSERT(type.storageClass == spv::StorageClassPhysicalStorageBuffer);
+
+ auto &lhs = getPointer(insn.word(4));
+ auto &rhs = getPointer(insn.word(5));
+ createPointer(insn.resultId(), SIMD::Pointer::IfThenElse(cond.Int(0), lhs, rhs));
+
+ SPIRV_SHADER_DBG("{0}: {1}", insn.word(3), cond);
+ SPIRV_SHADER_DBG("{0}: {1}", insn.word(4), lhs);
+ SPIRV_SHADER_DBG("{0}: {1}", insn.word(5), rhs);
+ }
+ else
+ {
+ auto lhs = Operand(shader, *this, insn.word(4));
+ auto rhs = Operand(shader, *this, insn.word(5));
+ auto &dst = createIntermediate(insn.resultId(), type.componentCount);
+
+ for(auto i = 0u; i < type.componentCount; i++)
{
- ASSERT(condIsScalar);
- ASSERT(type.storageClass == spv::StorageClassPhysicalStorageBuffer);
-
- auto &lhs = getPointer(insn.word(4));
- auto &rhs = getPointer(insn.word(5));
- createPointer(insn.resultId(), SIMD::Pointer::IfThenElse(cond.Int(0), lhs, rhs));
-
- SPIRV_SHADER_DBG("{0}: {1}", insn.word(3), cond);
- SPIRV_SHADER_DBG("{0}: {1}", insn.word(4), lhs);
- SPIRV_SHADER_DBG("{0}: {1}", insn.word(5), rhs);
+ auto sel = cond.Int(condIsScalar ? 0 : i);
+ dst.move(i, (sel & lhs.Int(i)) | (~sel & rhs.Int(i))); // TODO: IfThenElse()
}
- break;
- default:
- {
- auto lhs = Operand(shader, *this, insn.word(4));
- auto rhs = Operand(shader, *this, insn.word(5));
- auto &dst = createIntermediate(insn.resultId(), type.componentCount);
- for(auto i = 0u; i < type.componentCount; i++)
- {
- auto sel = cond.Int(condIsScalar ? 0 : i);
- dst.move(i, (sel & lhs.Int(i)) | (~sel & rhs.Int(i))); // TODO: IfThenElse()
- }
- SPIRV_SHADER_DBG("{0}: {1}", insn.word(2), dst);
- SPIRV_SHADER_DBG("{0}: {1}", insn.word(3), cond);
- SPIRV_SHADER_DBG("{0}: {1}", insn.word(4), lhs);
- SPIRV_SHADER_DBG("{0}: {1}", insn.word(5), rhs);
- }
- break;
+ SPIRV_SHADER_DBG("{0}: {1}", insn.word(2), dst);
+ SPIRV_SHADER_DBG("{0}: {1}", insn.word(3), cond);
+ SPIRV_SHADER_DBG("{0}: {1}", insn.word(4), lhs);
+ SPIRV_SHADER_DBG("{0}: {1}", insn.word(5), rhs);
}
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitAny(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitAny(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
ASSERT(type.componentCount == 1);
@@ -2490,7 +2486,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitAll(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitAll(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
ASSERT(type.componentCount == 1);
@@ -2509,14 +2505,14 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitAtomicOp(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitAtomicOp(InsnIterator insn)
{
auto &resultType = shader.getType(Type::ID(insn.word(1)));
Object::ID resultId = insn.word(2);
Object::ID pointerId = insn.word(3);
Object::ID semanticsId = insn.word(5);
auto memorySemantics = static_cast<spv::MemorySemanticsMask>(shader.getObject(semanticsId).constantValue[0]);
- auto memoryOrder = MemoryOrder(memorySemantics);
+ auto memoryOrder = shader.MemoryOrder(memorySemantics);
// Where no value is provided (increment/decrement) use an implicit value of 1.
auto value = (insn.wordCount() == 7) ? Operand(shader, *this, insn.word(6)).UInt(0) : RValue<SIMD::UInt>(1);
auto &dst = createIntermediate(resultId, resultType.componentCount);
@@ -2571,7 +2567,7 @@
v = ExchangeAtomic(Pointer<UInt>(ptr.getPointerForLane(j)), laneValue, memoryOrder);
break;
default:
- UNREACHABLE("%s", OpcodeName(insn.opcode()));
+ UNREACHABLE("%s", shader.OpcodeName(insn.opcode()));
break;
}
result = Insert(result, v, j);
@@ -2582,16 +2578,16 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitAtomicCompareExchange(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitAtomicCompareExchange(InsnIterator insn)
{
// Separate from EmitAtomicOp due to different instruction encoding
auto &resultType = shader.getType(Type::ID(insn.word(1)));
Object::ID resultId = insn.word(2);
auto memorySemanticsEqual = static_cast<spv::MemorySemanticsMask>(shader.getObject(insn.word(5)).constantValue[0]);
- auto memoryOrderEqual = MemoryOrder(memorySemanticsEqual);
+ auto memoryOrderEqual = shader.MemoryOrder(memorySemanticsEqual);
auto memorySemanticsUnequal = static_cast<spv::MemorySemanticsMask>(shader.getObject(insn.word(6)).constantValue[0]);
- auto memoryOrderUnequal = MemoryOrder(memorySemanticsUnequal);
+ auto memoryOrderUnequal = shader.MemoryOrder(memorySemanticsUnequal);
auto value = Operand(shader, *this, insn.word(7));
auto comparator = Operand(shader, *this, insn.word(8));
@@ -2615,7 +2611,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitCopyObject(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitCopyObject(InsnIterator insn)
{
auto src = Operand(shader, *this, insn.word(3));
if(src.isPointer())
@@ -2638,7 +2634,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitArrayLength(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitArrayLength(InsnIterator insn)
{
auto structPtrId = Object::ID(insn.word(3));
auto arrayFieldIdx = insn.word(4);
@@ -2670,14 +2666,14 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitExtendedInstruction(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitExtendedInstruction(InsnIterator insn)
{
auto ext = shader.getExtension(insn.word(3));
switch(ext.name)
{
- case Extension::GLSLstd450:
+ case SpirvShader::Extension::GLSLstd450:
return EmitExtGLSLstd450(insn);
- case Extension::NonSemanticInfo:
+ case SpirvShader::Extension::NonSemanticInfo:
// An extended set name which is prefixed with "NonSemantic." is
// guaranteed to contain only non-semantic instructions and all
// OpExtInst instructions referencing this set can be ignored.
@@ -2754,21 +2750,21 @@
}
}
-SpirvShader::Operand::Operand(const SpirvShader &shader, const EmitState &state, SpirvShader::Object::ID objectId)
+EmitState::Operand::Operand(const SpirvShader &shader, const EmitState &state, Object::ID objectId)
: Operand(state, shader.getObject(objectId))
{}
-SpirvShader::Operand::Operand(const EmitState &state, const Object &object)
- : constant(object.kind == SpirvShader::Object::Kind::Constant ? object.constantValue.data() : nullptr)
- , intermediate(object.kind == SpirvShader::Object::Kind::Intermediate ? &state.getIntermediate(object.id()) : nullptr)
- , pointer(object.kind == SpirvShader::Object::Kind::Pointer ? &state.getPointer(object.id()) : nullptr)
- , sampledImage(object.kind == SpirvShader::Object::Kind::SampledImage ? &state.getSampledImage(object.id()) : nullptr)
+EmitState::Operand::Operand(const EmitState &state, const Object &object)
+ : constant(object.kind == Object::Kind::Constant ? object.constantValue.data() : nullptr)
+ , intermediate(object.kind == Object::Kind::Intermediate ? &state.getIntermediate(object.id()) : nullptr)
+ , pointer(object.kind == Object::Kind::Pointer ? &state.getPointer(object.id()) : nullptr)
+ , sampledImage(object.kind == Object::Kind::SampledImage ? &state.getSampledImage(object.id()) : nullptr)
, componentCount(intermediate ? intermediate->componentCount : object.constantValue.size())
{
ASSERT(intermediate || constant || pointer || sampledImage);
}
-SpirvShader::Operand::Operand(const Intermediate &value)
+EmitState::Operand::Operand(const Intermediate &value)
: intermediate(&value)
, componentCount(value.componentCount)
{
diff --git a/src/Pipeline/SpirvShader.hpp b/src/Pipeline/SpirvShader.hpp
index 51b9fe9..9a3ad64 100644
--- a/src/Pipeline/SpirvShader.hpp
+++ b/src/Pipeline/SpirvShader.hpp
@@ -149,18 +149,19 @@
class SpirvShader
{
public:
+ SpirvShader(VkShaderStageFlagBits stage,
+ const char *entryPointName,
+ const SpirvBinary &insns,
+ const vk::RenderPass *renderPass,
+ uint32_t subpassIndex,
+ bool robustBufferAccess);
+
+ ~SpirvShader();
+
SpirvBinary insns;
- using ImageSampler = void(void *texture, void *uvsIn, void *texelOut, void *constants);
-
- enum class YieldResult
- {
- ControlBarrier,
- };
-
class Type;
class Object;
- class EmitState;
// Pseudo-iterator over SPIR-V instructions, designed to support range-based-for.
class InsnIterator
@@ -387,16 +388,6 @@
Kind kind = Kind::Unknown;
};
- class SampledImagePointer : public SIMD::Pointer
- {
- public:
- SampledImagePointer(SIMD::Pointer image, Object::ID sampler)
- : SIMD::Pointer(image)
- , samplerId(sampler)
- {}
- Object::ID samplerId;
- };
-
// Block is an interval of SPIR-V instructions, starting with the
// opening OpLabel, and ending with a termination instruction.
class Block
@@ -536,108 +527,6 @@
inline operator Object::ID() const { return Object::ID(value()); }
};
- // OpImageSample variants
- enum Variant : uint32_t
- {
- None, // No Dref or Proj. Also used by OpImageFetch and OpImageQueryLod.
- Dref,
- Proj,
- ProjDref,
- VARIANT_LAST = ProjDref
- };
-
- // Compact representation of image instruction state that is passed to the
- // trampoline function for retrieving/generating the corresponding sampling routine.
- struct ImageInstructionSignature
- {
- ImageInstructionSignature(Variant variant, SamplerMethod samplerMethod)
- {
- this->variant = variant;
- this->samplerMethod = samplerMethod;
- }
-
- // Unmarshal from raw 32-bit data
- explicit ImageInstructionSignature(uint32_t signature)
- : signature(signature)
- {}
-
- SamplerFunction getSamplerFunction() const
- {
- return { samplerMethod, offset != 0, sample != 0 };
- }
-
- bool isDref() const
- {
- return (variant == Dref) || (variant == ProjDref);
- }
-
- bool isProj() const
- {
- return (variant == Proj) || (variant == ProjDref);
- }
-
- bool hasLod() const
- {
- return samplerMethod == Lod || samplerMethod == Fetch; // We always pass a Lod operand for Fetch operations.
- }
-
- bool hasGrad() const
- {
- return samplerMethod == Grad;
- }
-
- union
- {
- struct
- {
- Variant variant : BITS(VARIANT_LAST);
- SamplerMethod samplerMethod : BITS(SAMPLER_METHOD_LAST);
- uint32_t gatherComponent : 2;
- uint32_t dim : BITS(spv::DimSubpassData); // spv::Dim
- uint32_t arrayed : 1;
- uint32_t imageFormat : BITS(spv::ImageFormatR64i); // spv::ImageFormat
-
- // Parameters are passed to the sampling routine in this order:
- uint32_t coordinates : 3; // 1-4 (does not contain projection component)
- /* uint32_t dref : 1; */ // Indicated by Variant::ProjDref|Dref
- /* uint32_t lodOrBias : 1; */ // Indicated by SamplerMethod::Lod|Bias|Fetch
- uint32_t grad : 2; // 0-3 components (for each of dx / dy)
- uint32_t offset : 2; // 0-3 components
- uint32_t sample : 1; // 0-1 scalar integer
- };
-
- uint32_t signature = 0;
- };
- };
-
- // This gets stored as a literal in the generated code, so it should be compact.
- static_assert(sizeof(ImageInstructionSignature) == sizeof(uint32_t), "ImageInstructionSignature must be 32-bit");
-
- struct ImageInstruction : public ImageInstructionSignature
- {
- ImageInstruction(InsnIterator insn, const SpirvShader &spirv, const EmitState &state);
-
- const uint32_t position;
-
- Type::ID resultTypeId = 0;
- Object::ID resultId = 0;
- Object::ID imageId = 0;
- Object::ID samplerId = 0;
- Object::ID coordinateId = 0;
- Object::ID texelId = 0;
- Object::ID drefId = 0;
- Object::ID lodOrBiasId = 0;
- Object::ID gradDxId = 0;
- Object::ID gradDyId = 0;
- Object::ID offsetId = 0;
- Object::ID sampleId = 0;
-
- private:
- static ImageInstructionSignature parseVariantAndMethod(InsnIterator insn);
- static uint32_t getImageOperandsIndex(InsnIterator insn);
- static uint32_t getImageOperandsMask(InsnIterator insn);
- };
-
// This method is for retrieving an ID that uniquely identifies the
// shader entry point represented by this object.
uint64_t getIdentifier() const
@@ -645,15 +534,6 @@
return ((uint64_t)entryPoint.value() << 32) | insns.getIdentifier();
}
- SpirvShader(VkShaderStageFlagBits stage,
- const char *entryPointName,
- const SpirvBinary &insns,
- const vk::RenderPass *renderPass,
- uint32_t subpassIndex,
- bool robustBufferAccess);
-
- ~SpirvShader();
-
struct ExecutionModes
{
bool EarlyFragmentTests : 1;
@@ -965,6 +845,9 @@
HandleMap<Type> types;
HandleMap<Object> defs;
+
+ // TODO(b/247020580): Encapsulate
+public:
HandleMap<Function> functions;
std::unordered_map<StringID, String> strings;
@@ -983,53 +866,6 @@
// Creates an Object for the instruction's result in 'defs'.
void DefineResult(const InsnIterator &insn);
- // Returns true if data in the given storage class is word-interleaved
- // by each SIMD vector lane, otherwise data is stored linerally.
- //
- // Each lane addresses a single word, picked by a base pointer and an
- // integer offset.
- //
- // A word is currently 32 bits (single float, int32_t, uint32_t).
- // A lane is a single element of a SIMD vector register.
- //
- // Storage interleaved by lane - (IsStorageInterleavedByLane() == true):
- // ---------------------------------------------------------------------
- //
- // Address = PtrBase + sizeof(Word) * (SIMD::Width * LaneOffset + LaneIndex)
- //
- // Assuming SIMD::Width == 4:
- //
- // Lane[0] | Lane[1] | Lane[2] | Lane[3]
- // ===========+===========+===========+==========
- // LaneOffset=0: | Word[0] | Word[1] | Word[2] | Word[3]
- // ---------------+-----------+-----------+-----------+----------
- // LaneOffset=1: | Word[4] | Word[5] | Word[6] | Word[7]
- // ---------------+-----------+-----------+-----------+----------
- // LaneOffset=2: | Word[8] | Word[9] | Word[a] | Word[b]
- // ---------------+-----------+-----------+-----------+----------
- // LaneOffset=3: | Word[c] | Word[d] | Word[e] | Word[f]
- //
- //
- // Linear storage - (IsStorageInterleavedByLane() == false):
- // ---------------------------------------------------------
- //
- // Address = PtrBase + sizeof(Word) * LaneOffset
- //
- // Lane[0] | Lane[1] | Lane[2] | Lane[3]
- // ===========+===========+===========+==========
- // LaneOffset=0: | Word[0] | Word[0] | Word[0] | Word[0]
- // ---------------+-----------+-----------+-----------+----------
- // LaneOffset=1: | Word[1] | Word[1] | Word[1] | Word[1]
- // ---------------+-----------+-----------+-----------+----------
- // LaneOffset=2: | Word[2] | Word[2] | Word[2] | Word[2]
- // ---------------+-----------+-----------+-----------+----------
- // LaneOffset=3: | Word[3] | Word[3] | Word[3] | Word[3]
- //
- static bool IsStorageInterleavedByLane(spv::StorageClass storageClass);
- static bool IsExplicitLayout(spv::StorageClass storageClass);
-
- static SIMD::Pointer GetElementPointer(SIMD::Pointer structure, uint32_t offset, bool interleavedByLane);
-
// Output storage buffers and images should not be affected by helper invocations
static bool StoresInHelperInvocation(spv::StorageClass storageClass);
@@ -1062,7 +898,270 @@
void ProcessInterfaceVariable(Object &object);
+ const Type &getType(Type::ID id) const
+ {
+ auto it = types.find(id);
+ ASSERT_MSG(it != types.end(), "Unknown type %d", id.value());
+ return it->second;
+ }
+
+ const Type &getType(const Object &object) const
+ {
+ return getType(object.typeId());
+ }
+
+ const Object &getObject(Object::ID id) const
+ {
+ auto it = defs.find(id);
+ ASSERT_MSG(it != defs.end(), "Unknown object %d", id.value());
+ return it->second;
+ }
+
+ const Type &getObjectType(Object::ID id) const
+ {
+ return getType(getObject(id));
+ }
+
+ const Function &getFunction(Function::ID id) const
+ {
+ auto it = functions.find(id);
+ ASSERT_MSG(it != functions.end(), "Unknown function %d", id.value());
+ return it->second;
+ }
+
+ const String &getString(StringID id) const
+ {
+ auto it = strings.find(id);
+ ASSERT_MSG(it != strings.end(), "Unknown string %d", id.value());
+ return it->second;
+ }
+
+ const Extension &getExtension(Extension::ID id) const
+ {
+ auto it = extensionsByID.find(id);
+ ASSERT_MSG(it != extensionsByID.end(), "Unknown extension %d", id.value());
+ return it->second;
+ }
+
+ OutOfBoundsBehavior getOutOfBoundsBehavior(Object::ID pointerId, const vk::PipelineLayout *pipelineLayout) const;
+
+ // Returns the *component* offset in the literal for the given access chain.
+ uint32_t WalkLiteralAccessChain(Type::ID id, const Span &indexes) const;
+
+ uint32_t GetConstScalarInt(Object::ID id) const;
+ void EvalSpecConstantOp(InsnIterator insn);
+ void EvalSpecConstantUnaryOp(InsnIterator insn);
+ void EvalSpecConstantBinaryOp(InsnIterator insn);
+
+ // Fragment input interpolation functions
+ uint32_t GetNumInputComponents(int32_t location) const;
+ uint32_t GetPackedInterpolant(int32_t location) const;
+
+ // WriteCFGGraphVizDotFile() writes a graphviz dot file of the shader's
+ // control flow to the given file path.
+ void WriteCFGGraphVizDotFile(const char *path) const;
+
+ // OpcodeName() returns the name of the opcode op.
+ static const char *OpcodeName(spv::Op op);
+ static std::memory_order MemoryOrder(spv::MemorySemanticsMask memorySemantics);
+
+ // IsStatement() returns true if the given opcode actually performs
+ // work (as opposed to declaring a type, defining a function start / end,
+ // etc).
+ static bool IsStatement(spv::Op op);
+
+ // HasTypeAndResult() returns true if the given opcode's instruction
+ // has a result type ID and result ID, i.e. defines an Object.
+ static bool HasTypeAndResult(spv::Op op);
+
+ // Returns 0 when invalid.
+ static VkShaderStageFlagBits executionModelToStage(spv::ExecutionModel model);
+
+ static bool IsExplicitLayout(spv::StorageClass storageClass);
+};
+
+class EmitState
+{
+ using Type = SpirvShader::Type;
+ using Object = SpirvShader::Object;
+ using Block = SpirvShader::Block;
+ using InsnIterator = SpirvShader::InsnIterator;
+ using Decorations = SpirvShader::Decorations;
+ using Span = SpirvShader::Span;
+
public:
+ EmitState(const SpirvShader &shader,
+ SpirvRoutine *routine,
+ SpirvShader::Function::ID function,
+ RValue<SIMD::Int> activeLaneMask,
+ RValue<SIMD::Int> storesAndAtomicsMask,
+ const vk::DescriptorSet::Bindings &descriptorSets,
+ unsigned int multiSampleCount)
+ : shader(shader)
+ , routine(routine)
+ , function(function)
+ , activeLaneMaskValue(activeLaneMask.value())
+ , storesAndAtomicsMaskValue(storesAndAtomicsMask.value())
+ , descriptorSets(descriptorSets)
+ , multiSampleCount(multiSampleCount)
+ {
+ }
+
+ // Returns the mask describing the active lanes as updated by dynamic
+ // control flow. Active lanes include helper invocations, used for
+ // calculating fragment derivitives, which must not perform memory
+ // stores or atomic writes.
+ //
+ // Use activeStoresAndAtomicsMask() to consider both control flow and
+ // lanes which are permitted to perform memory stores and atomic
+ // operations
+ RValue<SIMD::Int> activeLaneMask() const
+ {
+ ASSERT(activeLaneMaskValue != nullptr);
+ return RValue<SIMD::Int>(activeLaneMaskValue);
+ }
+
+ // Returns the immutable lane mask that describes which lanes are
+ // permitted to perform memory stores and atomic operations.
+ // Note that unlike activeStoresAndAtomicsMask() this mask *does not*
+ // consider lanes that have been made inactive due to control flow.
+ RValue<SIMD::Int> storesAndAtomicsMask() const
+ {
+ ASSERT(storesAndAtomicsMaskValue != nullptr);
+ return RValue<SIMD::Int>(storesAndAtomicsMaskValue);
+ }
+
+ // Returns a lane mask that describes which lanes are permitted to
+ // perform memory stores and atomic operations, considering lanes that
+ // may have been made inactive due to control flow.
+ RValue<SIMD::Int> activeStoresAndAtomicsMask() const
+ {
+ return activeLaneMask() & storesAndAtomicsMask();
+ }
+
+ // Add a new active lane mask edge from the current block to out.
+ // The edge mask value will be (mask AND activeLaneMaskValue).
+ // If multiple active lane masks are added for the same edge, then
+ // they will be ORed together.
+ void addOutputActiveLaneMaskEdge(Block::ID out, RValue<SIMD::Int> mask);
+
+ // Add a new active lane mask for the edge from -> to.
+ // If multiple active lane masks are added for the same edge, then
+ // they will be ORed together.
+ void addActiveLaneMaskEdge(Block::ID from, Block::ID to, RValue<SIMD::Int> mask);
+
+ // OpImageSample variants
+ enum Variant : uint32_t
+ {
+ None, // No Dref or Proj. Also used by OpImageFetch and OpImageQueryLod.
+ Dref,
+ Proj,
+ ProjDref,
+ VARIANT_LAST = ProjDref
+ };
+
+ // Compact representation of image instruction state that is passed to the
+ // trampoline function for retrieving/generating the corresponding sampling routine.
+ struct ImageInstructionSignature
+ {
+ ImageInstructionSignature(Variant variant, SamplerMethod samplerMethod)
+ {
+ this->variant = variant;
+ this->samplerMethod = samplerMethod;
+ }
+
+ // Unmarshal from raw 32-bit data
+ explicit ImageInstructionSignature(uint32_t signature)
+ : signature(signature)
+ {}
+
+ SamplerFunction getSamplerFunction() const
+ {
+ return { samplerMethod, offset != 0, sample != 0 };
+ }
+
+ bool isDref() const
+ {
+ return (variant == Dref) || (variant == ProjDref);
+ }
+
+ bool isProj() const
+ {
+ return (variant == Proj) || (variant == ProjDref);
+ }
+
+ bool hasLod() const
+ {
+ return samplerMethod == Lod || samplerMethod == Fetch; // We always pass a Lod operand for Fetch operations.
+ }
+
+ bool hasGrad() const
+ {
+ return samplerMethod == Grad;
+ }
+
+ union
+ {
+ struct
+ {
+ Variant variant : BITS(VARIANT_LAST);
+ SamplerMethod samplerMethod : BITS(SAMPLER_METHOD_LAST);
+ uint32_t gatherComponent : 2;
+ uint32_t dim : BITS(spv::DimSubpassData); // spv::Dim
+ uint32_t arrayed : 1;
+ uint32_t imageFormat : BITS(spv::ImageFormatR64i); // spv::ImageFormat
+
+ // Parameters are passed to the sampling routine in this order:
+ uint32_t coordinates : 3; // 1-4 (does not contain projection component)
+ /* uint32_t dref : 1; */ // Indicated by Variant::ProjDref|Dref
+ /* uint32_t lodOrBias : 1; */ // Indicated by SamplerMethod::Lod|Bias|Fetch
+ uint32_t grad : 2; // 0-3 components (for each of dx / dy)
+ uint32_t offset : 2; // 0-3 components
+ uint32_t sample : 1; // 0-1 scalar integer
+ };
+
+ uint32_t signature = 0;
+ };
+ };
+
+ // This gets stored as a literal in the generated code, so it should be compact.
+ static_assert(sizeof(ImageInstructionSignature) == sizeof(uint32_t), "ImageInstructionSignature must be 32-bit");
+
+ struct ImageInstruction : public ImageInstructionSignature
+ {
+ ImageInstruction(InsnIterator insn, const SpirvShader &shader, const EmitState &state);
+
+ const uint32_t position;
+
+ Type::ID resultTypeId = 0;
+ Object::ID resultId = 0;
+ Object::ID imageId = 0;
+ Object::ID samplerId = 0;
+ Object::ID coordinateId = 0;
+ Object::ID texelId = 0;
+ Object::ID drefId = 0;
+ Object::ID lodOrBiasId = 0;
+ Object::ID gradDxId = 0;
+ Object::ID gradDyId = 0;
+ Object::ID offsetId = 0;
+ Object::ID sampleId = 0;
+
+ private:
+ static ImageInstructionSignature parseVariantAndMethod(InsnIterator insn);
+ static uint32_t getImageOperandsIndex(InsnIterator insn);
+ static uint32_t getImageOperandsMask(InsnIterator insn);
+ };
+
+ class SampledImagePointer : public SIMD::Pointer
+ {
+ public:
+ SampledImagePointer(SIMD::Pointer image, Object::ID sampler)
+ : SIMD::Pointer(image)
+ , samplerId(sampler)
+ {}
+ Object::ID samplerId;
+ };
+
// Generic wrapper over either per-lane intermediate value, or a constant.
// Constants are transparently widened to per-lane values in operator[].
// This is appropriate in most cases -- if we're not going to do something
@@ -1070,7 +1169,7 @@
class Operand
{
public:
- Operand(const SpirvShader &shader, const EmitState &state, SpirvShader::Object::ID objectId);
+ Operand(const SpirvShader &shader, const EmitState &state, Object::ID objectId);
Operand(const Intermediate &value);
RValue<SIMD::Float> Float(uint32_t i) const
@@ -1147,6 +1246,58 @@
RR_PRINT_ONLY(friend struct rr::PrintValue::Ty<Operand>;)
+ Intermediate &createIntermediate(Object::ID id, uint32_t componentCount)
+ {
+ auto it = intermediates.emplace(std::piecewise_construct,
+ std::forward_as_tuple(id),
+ std::forward_as_tuple(componentCount));
+ ASSERT_MSG(it.second, "Intermediate %d created twice", id.value());
+ return it.first->second;
+ }
+
+ const Intermediate &getIntermediate(Object::ID id) const
+ {
+ auto it = intermediates.find(id);
+ ASSERT_MSG(it != intermediates.end(), "Unknown intermediate %d", id.value());
+ return it->second;
+ }
+
+ void createPointer(Object::ID id, SIMD::Pointer ptr)
+ {
+ bool added = pointers.emplace(id, ptr).second;
+ ASSERT_MSG(added, "Pointer %d created twice", id.value());
+ }
+
+ const SIMD::Pointer &getPointer(Object::ID id) const
+ {
+ auto it = pointers.find(id);
+ ASSERT_MSG(it != pointers.end(), "Unknown pointer %d", id.value());
+ return it->second;
+ }
+
+ void createSampledImage(Object::ID id, SampledImagePointer ptr)
+ {
+ bool added = sampledImages.emplace(id, ptr).second;
+ ASSERT_MSG(added, "Sampled image %d created twice", id.value());
+ }
+
+ const SampledImagePointer &getSampledImage(Object::ID id) const
+ {
+ auto it = sampledImages.find(id);
+ ASSERT_MSG(it != sampledImages.end(), "Unknown sampled image %d", id.value());
+ return it->second;
+ }
+
+ bool isSampledImage(Object::ID id) const
+ {
+ return sampledImages.find(id) != sampledImages.end();
+ }
+
+ const SIMD::Pointer &getImage(Object::ID id) const
+ {
+ return isSampledImage(id) ? getSampledImage(id) : getPointer(id);
+ }
+
// EmitResult is an enumerator of result values from the Emit functions.
enum class EmitResult
{
@@ -1154,383 +1305,236 @@
Terminator, // Reached a termination instruction.
};
- // EmitState holds control-flow state for the emit() pass.
- class EmitState
+ EmitResult EmitVariable(InsnIterator insn);
+ EmitResult EmitLoad(InsnIterator insn);
+ EmitResult EmitStore(InsnIterator insn);
+ EmitResult EmitAccessChain(InsnIterator insn);
+ EmitResult EmitCompositeConstruct(InsnIterator insn);
+ EmitResult EmitCompositeInsert(InsnIterator insn);
+ EmitResult EmitCompositeExtract(InsnIterator insn);
+ EmitResult EmitVectorShuffle(InsnIterator insn);
+ EmitResult EmitVectorTimesScalar(InsnIterator insn);
+ EmitResult EmitMatrixTimesVector(InsnIterator insn);
+ EmitResult EmitVectorTimesMatrix(InsnIterator insn);
+ EmitResult EmitMatrixTimesMatrix(InsnIterator insn);
+ EmitResult EmitOuterProduct(InsnIterator insn);
+ EmitResult EmitTranspose(InsnIterator insn);
+ EmitResult EmitVectorExtractDynamic(InsnIterator insn);
+ EmitResult EmitVectorInsertDynamic(InsnIterator insn);
+ EmitResult EmitUnaryOp(InsnIterator insn);
+ EmitResult EmitBinaryOp(InsnIterator insn);
+ EmitResult EmitDot(InsnIterator insn);
+ EmitResult EmitSelect(InsnIterator insn);
+ EmitResult EmitExtendedInstruction(InsnIterator insn);
+ EmitResult EmitExtGLSLstd450(InsnIterator insn);
+ EmitResult EmitAny(InsnIterator insn);
+ EmitResult EmitAll(InsnIterator insn);
+ EmitResult EmitBranch(InsnIterator insn);
+ EmitResult EmitBranchConditional(InsnIterator insn);
+ EmitResult EmitSwitch(InsnIterator insn);
+ EmitResult EmitUnreachable(InsnIterator insn);
+ EmitResult EmitReturn(InsnIterator insn);
+ EmitResult EmitTerminateInvocation(InsnIterator insn);
+ EmitResult EmitDemoteToHelperInvocation(InsnIterator insn);
+ EmitResult EmitIsHelperInvocation(InsnIterator insn);
+ EmitResult EmitFunctionCall(InsnIterator insn);
+ EmitResult EmitPhi(InsnIterator insn);
+ EmitResult EmitImageSample(const ImageInstruction &instruction);
+ EmitResult EmitImageQuerySizeLod(InsnIterator insn);
+ EmitResult EmitImageQuerySize(InsnIterator insn);
+ EmitResult EmitImageQueryLevels(InsnIterator insn);
+ EmitResult EmitImageQuerySamples(InsnIterator insn);
+ EmitResult EmitImageRead(const ImageInstruction &instruction);
+ EmitResult EmitImageWrite(const ImageInstruction &instruction);
+ EmitResult EmitImageTexelPointer(const ImageInstruction &instruction);
+ EmitResult EmitAtomicOp(InsnIterator insn);
+ EmitResult EmitAtomicCompareExchange(InsnIterator insn);
+ EmitResult EmitSampledImage(InsnIterator insn);
+ EmitResult EmitImage(InsnIterator insn);
+ EmitResult EmitCopyObject(InsnIterator insn);
+ EmitResult EmitCopyMemory(InsnIterator insn);
+ EmitResult EmitControlBarrier(InsnIterator insn);
+ EmitResult EmitMemoryBarrier(InsnIterator insn);
+ EmitResult EmitGroupNonUniform(InsnIterator insn);
+ EmitResult EmitArrayLength(InsnIterator insn);
+ EmitResult EmitBitcastPointer(Object::ID resultID, Operand &src);
+
+ enum InterpolationType
{
- public:
- EmitState(const SpirvShader &shader,
- SpirvRoutine *routine,
- Function::ID function,
- RValue<SIMD::Int> activeLaneMask,
- RValue<SIMD::Int> storesAndAtomicsMask,
- const vk::DescriptorSet::Bindings &descriptorSets,
- unsigned int multiSampleCount)
- : shader(shader)
- , routine(routine)
- , function(function)
- , activeLaneMaskValue(activeLaneMask.value())
- , storesAndAtomicsMaskValue(storesAndAtomicsMask.value())
- , descriptorSets(descriptorSets)
- , multiSampleCount(multiSampleCount)
- {
- }
-
- // Returns the mask describing the active lanes as updated by dynamic
- // control flow. Active lanes include helper invocations, used for
- // calculating fragment derivitives, which must not perform memory
- // stores or atomic writes.
- //
- // Use activeStoresAndAtomicsMask() to consider both control flow and
- // lanes which are permitted to perform memory stores and atomic
- // operations
- RValue<SIMD::Int> activeLaneMask() const
- {
- ASSERT(activeLaneMaskValue != nullptr);
- return RValue<SIMD::Int>(activeLaneMaskValue);
- }
-
- // Returns the immutable lane mask that describes which lanes are
- // permitted to perform memory stores and atomic operations.
- // Note that unlike activeStoresAndAtomicsMask() this mask *does not*
- // consider lanes that have been made inactive due to control flow.
- RValue<SIMD::Int> storesAndAtomicsMask() const
- {
- ASSERT(storesAndAtomicsMaskValue != nullptr);
- return RValue<SIMD::Int>(storesAndAtomicsMaskValue);
- }
-
- // Returns a lane mask that describes which lanes are permitted to
- // perform memory stores and atomic operations, considering lanes that
- // may have been made inactive due to control flow.
- RValue<SIMD::Int> activeStoresAndAtomicsMask() const
- {
- return activeLaneMask() & storesAndAtomicsMask();
- }
-
- // Add a new active lane mask edge from the current block to out.
- // The edge mask value will be (mask AND activeLaneMaskValue).
- // If multiple active lane masks are added for the same edge, then
- // they will be ORed together.
- void addOutputActiveLaneMaskEdge(Block::ID out, RValue<SIMD::Int> mask);
-
- // Add a new active lane mask for the edge from -> to.
- // If multiple active lane masks are added for the same edge, then
- // they will be ORed together.
- void addActiveLaneMaskEdge(Block::ID from, Block::ID to, RValue<SIMD::Int> mask);
-
- unsigned int getMultiSampleCount() const { return multiSampleCount; }
-
- Intermediate &createIntermediate(Object::ID id, uint32_t componentCount)
- {
- auto it = intermediates.emplace(std::piecewise_construct,
- std::forward_as_tuple(id),
- std::forward_as_tuple(componentCount));
- ASSERT_MSG(it.second, "Intermediate %d created twice", id.value());
- return it.first->second;
- }
-
- const Intermediate &getIntermediate(Object::ID id) const
- {
- auto it = intermediates.find(id);
- ASSERT_MSG(it != intermediates.end(), "Unknown intermediate %d", id.value());
- return it->second;
- }
-
- void createPointer(Object::ID id, SIMD::Pointer ptr)
- {
- bool added = pointers.emplace(id, ptr).second;
- ASSERT_MSG(added, "Pointer %d created twice", id.value());
- }
-
- const SIMD::Pointer &getPointer(Object::ID id) const
- {
- auto it = pointers.find(id);
- ASSERT_MSG(it != pointers.end(), "Unknown pointer %d", id.value());
- return it->second;
- }
-
- void createSampledImage(Object::ID id, SampledImagePointer ptr)
- {
- bool added = sampledImages.emplace(id, ptr).second;
- ASSERT_MSG(added, "Sampled image %d created twice", id.value());
- }
-
- const SampledImagePointer &getSampledImage(Object::ID id) const
- {
- auto it = sampledImages.find(id);
- ASSERT_MSG(it != sampledImages.end(), "Unknown sampled image %d", id.value());
- return it->second;
- }
-
- bool isSampledImage(Object::ID id) const
- {
- return sampledImages.find(id) != sampledImages.end();
- }
-
- const SIMD::Pointer &getImage(Object::ID id) const
- {
- return isSampledImage(id) ? getSampledImage(id) : getPointer(id);
- }
-
- EmitResult EmitVariable(InsnIterator insn);
- EmitResult EmitLoad(InsnIterator insn);
- EmitResult EmitStore(InsnIterator insn);
- EmitResult EmitAccessChain(InsnIterator insn);
- EmitResult EmitCompositeConstruct(InsnIterator insn);
- EmitResult EmitCompositeInsert(InsnIterator insn);
- EmitResult EmitCompositeExtract(InsnIterator insn);
- EmitResult EmitVectorShuffle(InsnIterator insn);
- EmitResult EmitVectorTimesScalar(InsnIterator insn);
- EmitResult EmitMatrixTimesVector(InsnIterator insn);
- EmitResult EmitVectorTimesMatrix(InsnIterator insn);
- EmitResult EmitMatrixTimesMatrix(InsnIterator insn);
- EmitResult EmitOuterProduct(InsnIterator insn);
- EmitResult EmitTranspose(InsnIterator insn);
- EmitResult EmitVectorExtractDynamic(InsnIterator insn);
- EmitResult EmitVectorInsertDynamic(InsnIterator insn);
- EmitResult EmitUnaryOp(InsnIterator insn);
- EmitResult EmitBinaryOp(InsnIterator insn);
- EmitResult EmitDot(InsnIterator insn);
- EmitResult EmitSelect(InsnIterator insn);
- EmitResult EmitExtendedInstruction(InsnIterator insn);
- EmitResult EmitExtGLSLstd450(InsnIterator insn);
- EmitResult EmitAny(InsnIterator insn);
- EmitResult EmitAll(InsnIterator insn);
- EmitResult EmitBranch(InsnIterator insn);
- EmitResult EmitBranchConditional(InsnIterator insn);
- EmitResult EmitSwitch(InsnIterator insn);
- EmitResult EmitUnreachable(InsnIterator insn);
- EmitResult EmitReturn(InsnIterator insn);
- EmitResult EmitTerminateInvocation(InsnIterator insn);
- EmitResult EmitDemoteToHelperInvocation(InsnIterator insn);
- EmitResult EmitIsHelperInvocation(InsnIterator insn);
- EmitResult EmitFunctionCall(InsnIterator insn);
- EmitResult EmitPhi(InsnIterator insn);
- EmitResult EmitImageSample(const ImageInstruction &instruction);
- EmitResult EmitImageQuerySizeLod(InsnIterator insn);
- EmitResult EmitImageQuerySize(InsnIterator insn);
- EmitResult EmitImageQueryLevels(InsnIterator insn);
- EmitResult EmitImageQuerySamples(InsnIterator insn);
- EmitResult EmitImageRead(const ImageInstruction &instruction);
- EmitResult EmitImageWrite(const ImageInstruction &instruction) const;
- EmitResult EmitImageTexelPointer(const ImageInstruction &instruction);
- EmitResult EmitAtomicOp(InsnIterator insn);
- EmitResult EmitAtomicCompareExchange(InsnIterator insn);
- EmitResult EmitSampledImage(InsnIterator insn);
- EmitResult EmitImage(InsnIterator insn);
- EmitResult EmitCopyObject(InsnIterator insn);
- EmitResult EmitCopyMemory(InsnIterator insn);
- EmitResult EmitControlBarrier(InsnIterator insn) const;
- EmitResult EmitMemoryBarrier(InsnIterator insn);
- EmitResult EmitGroupNonUniform(InsnIterator insn);
- EmitResult EmitArrayLength(InsnIterator insn);
- EmitResult EmitBitcastPointer(Object::ID resultID, Operand &src);
-
- enum InterpolationType
- {
- Centroid,
- AtSample,
- AtOffset,
- };
- SIMD::Float EmitInterpolate(const SIMD::Pointer &ptr, int32_t location, Object::ID paramId,
- uint32_t component, InterpolationType type) const;
-
- SIMD::Pointer WalkExplicitLayoutAccessChain(Object::ID id, Object::ID elementId, const Span &indexIds, bool nonUniform) const;
- SIMD::Pointer WalkAccessChain(Object::ID id, Object::ID elementId, const Span &indexIds, bool nonUniform) const;
-
- // Returns a SIMD::Pointer to the underlying data for the given pointer
- // object.
- // Handles objects of the following kinds:
- // - DescriptorSet
- // - Pointer
- // - InterfaceVariable
- // Calling GetPointerToData with objects of any other kind will assert.
- SIMD::Pointer GetPointerToData(Object::ID id, SIMD::Int arrayIndex, bool nonUniform) const;
- void OffsetToElement(SIMD::Pointer &ptr, Object::ID elementId, int32_t arrayStride) const;
-
- /* image istructions */
-
- // Emits code to sample an image, regardless of whether any SIMD lanes are active.
- void EmitImageSampleUnconditional(Array<SIMD::Float> &out, const ImageInstruction &instruction) const;
-
- Pointer<Byte> getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const;
- Pointer<Byte> getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction, int laneIdx) const;
- Pointer<Byte> lookupSamplerFunction(Pointer<Byte> imageDescriptor, Pointer<Byte> samplerDescriptor, const ImageInstruction &instruction) const;
- void callSamplerFunction(Pointer<Byte> samplerFunction, Array<SIMD::Float> &out, Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const;
-
- void GetImageDimensions(const Type &resultTy, Object::ID imageId, Object::ID lodId, Intermediate &dst) const;
- struct TexelAddressData
- {
- bool isArrayed;
- spv::Dim dim;
- int dims, texelSize;
- SIMD::Int u, v, w, ptrOffset;
- };
- static TexelAddressData setupTexelAddressData(SIMD::Int rowPitch, SIMD::Int slicePitch, SIMD::Int samplePitch, ImageInstructionSignature instruction, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, const SpirvRoutine *routine);
- static SIMD::Pointer GetNonUniformTexelAddress(ImageInstructionSignature instruction, SIMD::Pointer descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, SIMD::Int activeLaneMask, const SpirvRoutine *routine);
- static SIMD::Pointer GetTexelAddress(ImageInstructionSignature instruction, Pointer<Byte> descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const SpirvRoutine *routine);
- static void WriteImage(ImageInstructionSignature instruction, Pointer<Byte> descriptor, const Pointer<SIMD::Int> &coord, const Pointer<SIMD::Int> &texelAndMask, vk::Format imageFormat);
-
- /* control flow */
-
- // Lookup the active lane mask for the edge from -> to.
- // If from is unreachable, then a mask of all zeros is returned.
- // Asserts if from is reachable and the edge does not exist.
- RValue<SIMD::Int> GetActiveLaneMaskEdge(Block::ID from, Block::ID to) const;
-
- // Updates the current active lane mask.
- void SetActiveLaneMask(RValue<SIMD::Int> mask);
- void SetStoresAndAtomicsMask(RValue<SIMD::Int> mask);
-
- // Emit all the unvisited blocks (except for ignore) in DFS order,
- // starting with id.
- void EmitBlocks(Block::ID id, Block::ID ignore = 0);
- void EmitNonLoop();
- void EmitLoop();
-
- void EmitInstructions(InsnIterator begin, InsnIterator end);
- EmitResult EmitInstruction(InsnIterator insn);
-
- // Helper for implementing OpStore, which doesn't take an InsnIterator so it
- // can also store independent operands.
- void Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder) const;
-
- // LoadPhi loads the phi values from the alloca storage and places the
- // load values into the intermediate with the phi's result id.
- void LoadPhi(InsnIterator insn);
-
- // StorePhi updates the phi's alloca storage value using the incoming
- // values from blocks that are both in the OpPhi instruction and in
- // filter.
- void StorePhi(Block::ID blockID, InsnIterator insn, const std::unordered_set<SpirvShader::Block::ID> &filter) const;
-
- // Emits a rr::Fence for the given MemorySemanticsMask.
- void Fence(spv::MemorySemanticsMask semantics) const;
-
- // Helper for calling rr::Yield with res cast to an rr::Int.
- void Yield(YieldResult res) const;
-
- // Helper as we often need to take dot products as part of doing other things.
- static SIMD::Float FDot(unsigned numComponents, const Operand &x, const Operand &y);
- static SIMD::Int SDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum);
- static SIMD::UInt UDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum);
- static SIMD::Int SUDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum);
- static SIMD::Int AddSat(RValue<SIMD::Int> a, RValue<SIMD::Int> b);
- static SIMD::UInt AddSat(RValue<SIMD::UInt> a, RValue<SIMD::UInt> b);
-
- static ImageSampler *getImageSampler(const vk::Device *device, uint32_t signature, uint32_t samplerId, uint32_t imageViewId);
- static std::shared_ptr<rr::Routine> emitSamplerRoutine(ImageInstructionSignature instruction, const Sampler &samplerState);
- static std::shared_ptr<rr::Routine> emitWriteRoutine(ImageInstructionSignature instruction, const Sampler &samplerState);
-
- // TODO(b/129523279): Eliminate conversion and use vk::Sampler members directly.
- static sw::FilterType convertFilterMode(const vk::SamplerState *samplerState, VkImageViewType imageViewType, SamplerMethod samplerMethod);
- static sw::MipmapType convertMipmapMode(const vk::SamplerState *samplerState);
- static sw::AddressingMode convertAddressingMode(int coordinateIndex, const vk::SamplerState *samplerState, VkImageViewType imageViewType);
-
- private:
- const SpirvShader &shader;
- SpirvRoutine *const routine; // The current routine being built.
- Function::ID function; // The current function being built.
- Block::ID block; // The current block being built.
- rr::Value *activeLaneMaskValue = nullptr; // The current active lane mask.
- rr::Value *storesAndAtomicsMaskValue = nullptr; // The current atomics mask.
- Block::Set visited; // Blocks already built.
- std::unordered_map<Block::Edge, RValue<SIMD::Int>, Block::Edge::Hash> edgeActiveLaneMasks;
- std::deque<Block::ID> *pending;
-
- const vk::DescriptorSet::Bindings &descriptorSets;
-
- std::unordered_map<Object::ID, Intermediate> intermediates;
- std::unordered_map<Object::ID, SIMD::Pointer> pointers;
- std::unordered_map<Object::ID, SampledImagePointer> sampledImages;
-
- const unsigned int multiSampleCount;
+ Centroid,
+ AtSample,
+ AtOffset,
};
+ SIMD::Float EmitInterpolate(const SIMD::Pointer &ptr, int32_t location, Object::ID paramId,
+ uint32_t component, InterpolationType type) const;
- const Type &getType(Type::ID id) const
+ SIMD::Pointer WalkExplicitLayoutAccessChain(Object::ID id, Object::ID elementId, const Span &indexIds, bool nonUniform) const;
+ SIMD::Pointer WalkAccessChain(Object::ID id, Object::ID elementId, const Span &indexIds, bool nonUniform) const;
+
+ // Returns true if data in the given storage class is word-interleaved
+ // by each SIMD vector lane, otherwise data is stored linerally.
+ //
+ // Each lane addresses a single word, picked by a base pointer and an
+ // integer offset.
+ //
+ // A word is currently 32 bits (single float, int32_t, uint32_t).
+ // A lane is a single element of a SIMD vector register.
+ //
+ // Storage interleaved by lane - (IsStorageInterleavedByLane() == true):
+ // ---------------------------------------------------------------------
+ //
+ // Address = PtrBase + sizeof(Word) * (SIMD::Width * LaneOffset + LaneIndex)
+ //
+ // Assuming SIMD::Width == 4:
+ //
+ // Lane[0] | Lane[1] | Lane[2] | Lane[3]
+ // ===========+===========+===========+==========
+ // LaneOffset=0: | Word[0] | Word[1] | Word[2] | Word[3]
+ // ---------------+-----------+-----------+-----------+----------
+ // LaneOffset=1: | Word[4] | Word[5] | Word[6] | Word[7]
+ // ---------------+-----------+-----------+-----------+----------
+ // LaneOffset=2: | Word[8] | Word[9] | Word[a] | Word[b]
+ // ---------------+-----------+-----------+-----------+----------
+ // LaneOffset=3: | Word[c] | Word[d] | Word[e] | Word[f]
+ //
+ //
+ // Linear storage - (IsStorageInterleavedByLane() == false):
+ // ---------------------------------------------------------
+ //
+ // Address = PtrBase + sizeof(Word) * LaneOffset
+ //
+ // Lane[0] | Lane[1] | Lane[2] | Lane[3]
+ // ===========+===========+===========+==========
+ // LaneOffset=0: | Word[0] | Word[0] | Word[0] | Word[0]
+ // ---------------+-----------+-----------+-----------+----------
+ // LaneOffset=1: | Word[1] | Word[1] | Word[1] | Word[1]
+ // ---------------+-----------+-----------+-----------+----------
+ // LaneOffset=2: | Word[2] | Word[2] | Word[2] | Word[2]
+ // ---------------+-----------+-----------+-----------+----------
+ // LaneOffset=3: | Word[3] | Word[3] | Word[3] | Word[3]
+ //
+
+ static bool IsStorageInterleavedByLane(spv::StorageClass storageClass);
+ static SIMD::Pointer GetElementPointer(SIMD::Pointer structure, uint32_t offset, spv::StorageClass storageClass);
+
+ // Returns a SIMD::Pointer to the underlying data for the given pointer
+ // object.
+ // Handles objects of the following kinds:
+ // - DescriptorSet
+ // - Pointer
+ // - InterfaceVariable
+ // Calling GetPointerToData with objects of any other kind will assert.
+ SIMD::Pointer GetPointerToData(Object::ID id, SIMD::Int arrayIndex, bool nonUniform) const;
+ void OffsetToElement(SIMD::Pointer &ptr, Object::ID elementId, int32_t arrayStride) const;
+
+ /* image istructions */
+
+ // Emits code to sample an image, regardless of whether any SIMD lanes are active.
+ void EmitImageSampleUnconditional(Array<SIMD::Float> &out, const ImageInstruction &instruction) const;
+
+ Pointer<Byte> getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const;
+ Pointer<Byte> getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction, int laneIdx) const;
+ Pointer<Byte> lookupSamplerFunction(Pointer<Byte> imageDescriptor, Pointer<Byte> samplerDescriptor, const ImageInstruction &instruction) const;
+ void callSamplerFunction(Pointer<Byte> samplerFunction, Array<SIMD::Float> &out, Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const;
+
+ void GetImageDimensions(const Type &resultTy, Object::ID imageId, Object::ID lodId, Intermediate &dst) const;
+ struct TexelAddressData
{
- auto it = types.find(id);
- ASSERT_MSG(it != types.end(), "Unknown type %d", id.value());
- return it->second;
- }
+ bool isArrayed;
+ spv::Dim dim;
+ int dims, texelSize;
+ SIMD::Int u, v, w, ptrOffset;
+ };
+ static TexelAddressData setupTexelAddressData(SIMD::Int rowPitch, SIMD::Int slicePitch, SIMD::Int samplePitch, ImageInstructionSignature instruction, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, const SpirvRoutine *routine);
+ static SIMD::Pointer GetNonUniformTexelAddress(ImageInstructionSignature instruction, SIMD::Pointer descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, SIMD::Int activeLaneMask, const SpirvRoutine *routine);
+ static SIMD::Pointer GetTexelAddress(ImageInstructionSignature instruction, Pointer<Byte> descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const SpirvRoutine *routine);
+ static void WriteImage(ImageInstructionSignature instruction, Pointer<Byte> descriptor, const Pointer<SIMD::Int> &coord, const Pointer<SIMD::Int> &texelAndMask, vk::Format imageFormat);
+
+ /* control flow */
+
+ // Lookup the active lane mask for the edge from -> to.
+ // If from is unreachable, then a mask of all zeros is returned.
+ // Asserts if from is reachable and the edge does not exist.
+ RValue<SIMD::Int> GetActiveLaneMaskEdge(Block::ID from, Block::ID to) const;
+
+ // Updates the current active lane mask.
+ void SetActiveLaneMask(RValue<SIMD::Int> mask);
+ void SetStoresAndAtomicsMask(RValue<SIMD::Int> mask);
+
+ // Emit all the unvisited blocks (except for ignore) in DFS order,
+ // starting with id.
+ void EmitBlocks(Block::ID id, Block::ID ignore = 0);
+ void EmitNonLoop();
+ void EmitLoop();
+
+ void EmitInstructions(InsnIterator begin, InsnIterator end);
+ EmitResult EmitInstruction(InsnIterator insn);
+
+ // Helper for implementing OpStore, which doesn't take an InsnIterator so it
+ // can also store independent operands.
+ void Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder) const;
+
+ // LoadPhi loads the phi values from the alloca storage and places the
+ // load values into the intermediate with the phi's result id.
+ void LoadPhi(InsnIterator insn);
+
+ // StorePhi updates the phi's alloca storage value using the incoming
+ // values from blocks that are both in the OpPhi instruction and in
+ // filter.
+ void StorePhi(Block::ID blockID, InsnIterator insn, const std::unordered_set<Block::ID> &filter) const;
+
+ // Emits a rr::Fence for the given MemorySemanticsMask.
+ void Fence(spv::MemorySemanticsMask semantics) const;
+
+ // Helper for calling rr::Yield with res cast to an rr::Int.
+ enum class YieldResult
+ {
+ ControlBarrier = 0,
+ };
+ void Yield(YieldResult res) const;
+
+ // Helper as we often need to take dot products as part of doing other things.
+ static SIMD::Float FDot(unsigned numComponents, const Operand &x, const Operand &y);
+ static SIMD::Int SDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum);
+ static SIMD::UInt UDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum);
+ static SIMD::Int SUDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum);
+ static SIMD::Int AddSat(RValue<SIMD::Int> a, RValue<SIMD::Int> b);
+ static SIMD::UInt AddSat(RValue<SIMD::UInt> a, RValue<SIMD::UInt> b);
+
+ using ImageSampler = void(void *texture, void *uvsIn, void *texelOut, void *constants);
+ static ImageSampler *getImageSampler(const vk::Device *device, uint32_t signature, uint32_t samplerId, uint32_t imageViewId);
+ static std::shared_ptr<rr::Routine> emitSamplerRoutine(ImageInstructionSignature instruction, const Sampler &samplerState);
+ static std::shared_ptr<rr::Routine> emitWriteRoutine(ImageInstructionSignature instruction, const Sampler &samplerState);
+
+ // TODO(b/129523279): Eliminate conversion and use vk::Sampler members directly.
+ static sw::FilterType convertFilterMode(const vk::SamplerState *samplerState, VkImageViewType imageViewType, SamplerMethod samplerMethod);
+ static sw::MipmapType convertMipmapMode(const vk::SamplerState *samplerState);
+ static sw::AddressingMode convertAddressingMode(int coordinateIndex, const vk::SamplerState *samplerState, VkImageViewType imageViewType);
private:
- const Type &getType(const Object &object) const
- {
- return getType(object.typeId());
- }
+ const SpirvShader &shader;
+ SpirvRoutine *const routine; // The current routine being built.
+ SpirvShader::Function::ID function; // The current function being built.
+ Block::ID block; // The current block being built.
+ rr::Value *activeLaneMaskValue = nullptr; // The current active lane mask.
+ rr::Value *storesAndAtomicsMaskValue = nullptr; // The current atomics mask.
+ SpirvShader::Block::Set visited; // Blocks already built.
+ std::unordered_map<Block::Edge, RValue<SIMD::Int>, Block::Edge::Hash> edgeActiveLaneMasks;
+ std::deque<Block::ID> *pending;
- const Object &getObject(Object::ID id) const
- {
- auto it = defs.find(id);
- ASSERT_MSG(it != defs.end(), "Unknown object %d", id.value());
- return it->second;
- }
+ const vk::DescriptorSet::Bindings &descriptorSets;
- const Type &getObjectType(Object::ID id) const
- {
- return getType(getObject(id));
- }
+ std::unordered_map<Object::ID, Intermediate> intermediates;
+ std::unordered_map<Object::ID, SIMD::Pointer> pointers;
+ std::unordered_map<Object::ID, SampledImagePointer> sampledImages;
- const Function &getFunction(Function::ID id) const
- {
- auto it = functions.find(id);
- ASSERT_MSG(it != functions.end(), "Unknown function %d", id.value());
- return it->second;
- }
-
- const String &getString(StringID id) const
- {
- auto it = strings.find(id);
- ASSERT_MSG(it != strings.end(), "Unknown string %d", id.value());
- return it->second;
- }
-
- const Extension &getExtension(Extension::ID id) const
- {
- auto it = extensionsByID.find(id);
- ASSERT_MSG(it != extensionsByID.end(), "Unknown extension %d", id.value());
- return it->second;
- }
-
- OutOfBoundsBehavior getOutOfBoundsBehavior(Object::ID pointerId, const vk::PipelineLayout *pipelineLayout) const;
-
- // Returns the *component* offset in the literal for the given access chain.
- uint32_t WalkLiteralAccessChain(Type::ID id, const Span &indexes) const;
-
- uint32_t GetConstScalarInt(Object::ID id) const;
- void EvalSpecConstantOp(InsnIterator insn);
- void EvalSpecConstantUnaryOp(InsnIterator insn);
- void EvalSpecConstantBinaryOp(InsnIterator insn);
-
- // Fragment input interpolation functions
- uint32_t GetNumInputComponents(int32_t location) const;
- uint32_t GetPackedInterpolant(int32_t location) const;
-
- // WriteCFGGraphVizDotFile() writes a graphviz dot file of the shader's
- // control flow to the given file path.
- void WriteCFGGraphVizDotFile(const char *path) const;
-
-public:
- // OpcodeName() returns the name of the opcode op.
- static const char *OpcodeName(spv::Op op);
- static std::memory_order MemoryOrder(spv::MemorySemanticsMask memorySemantics);
-
- // IsStatement() returns true if the given opcode actually performs
- // work (as opposed to declaring a type, defining a function start / end,
- // etc).
- static bool IsStatement(spv::Op op);
-
- // HasTypeAndResult() returns true if the given opcode's instruction
- // has a result type ID and result ID, i.e. defines an Object.
- static bool HasTypeAndResult(spv::Op op);
-
- // Returns 0 when invalid.
- static VkShaderStageFlagBits executionModelToStage(spv::ExecutionModel model);
+ const unsigned int multiSampleCount;
};
class SpirvRoutine
{
+ using Object = SpirvShader::Object;
+
public:
SpirvRoutine(const vk::PipelineLayout *pipelineLayout);
@@ -1565,7 +1569,7 @@
const vk::PipelineLayout *const pipelineLayout;
- std::unordered_map<SpirvShader::Object::ID, Variable> variables;
+ std::unordered_map<Object::ID, Variable> variables;
std::unordered_map<uint32_t, SamplerCache> samplerCache; // Indexed by the instruction position, in words.
SIMD::Float inputs[MAX_INTERFACE_COMPONENTS];
Interpolation inputsInterpolation[MAX_INTERFACE_COMPONENTS];
@@ -1602,13 +1606,13 @@
std::array<SIMD::Int, 3> localInvocationID; // TODO(b/236162233): SIMD::Int3
std::array<SIMD::Int, 3> globalInvocationID; // TODO(b/236162233): SIMD::Int3
- void createVariable(SpirvShader::Object::ID id, uint32_t componentCount)
+ void createVariable(Object::ID id, uint32_t componentCount)
{
bool added = variables.emplace(id, Variable(componentCount)).second;
ASSERT_MSG(added, "Variable %d created twice", id.value());
}
- Variable &getVariable(SpirvShader::Object::ID id)
+ Variable &getVariable(Object::ID id)
{
auto it = variables.find(id);
ASSERT_MSG(it != variables.end(), "Unknown variables %d", id.value());
@@ -1642,7 +1646,8 @@
// SpirvShader::emitProlog() and SpirvShader::emitEpilog().
friend class SpirvShader;
- std::unordered_map<SpirvShader::Object::ID, Variable> phis;
+public:
+ std::unordered_map<Object::ID, Variable> phis;
};
} // namespace sw
diff --git a/src/Pipeline/SpirvShaderArithmetic.cpp b/src/Pipeline/SpirvShaderArithmetic.cpp
index 8eee57b..43d652f 100644
--- a/src/Pipeline/SpirvShaderArithmetic.cpp
+++ b/src/Pipeline/SpirvShaderArithmetic.cpp
@@ -23,7 +23,7 @@
namespace sw {
-SpirvShader::EmitResult SpirvShader::EmitState::EmitVectorTimesScalar(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitVectorTimesScalar(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -38,7 +38,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitState::EmitMatrixTimesVector(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitState::EmitMatrixTimesVector(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -58,7 +58,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitVectorTimesMatrix(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitVectorTimesMatrix(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -78,7 +78,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitMatrixTimesMatrix(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitMatrixTimesMatrix(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -105,7 +105,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitOuterProduct(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitOuterProduct(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -126,7 +126,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitState::EmitTranspose(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitState::EmitTranspose(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -146,7 +146,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitBitcastPointer(Object::ID resultID, Operand &src)
+EmitState::EmitResult EmitState::EmitBitcastPointer(SpirvShader::Object::ID resultID, Operand &src)
{
if(src.isPointer()) // Pointer -> Integer bits
{
@@ -188,12 +188,12 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitUnaryOp(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitUnaryOp(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto src = Operand(shader, *this, insn.word(3));
- bool dstIsPointer = shader.getObject(insn.resultId()).kind == Object::Kind::Pointer;
+ bool dstIsPointer = shader.getObject(insn.resultId()).kind == SpirvShader::Object::Kind::Pointer;
bool srcIsPointer = src.isPointer();
if(srcIsPointer || dstIsPointer)
{
@@ -354,14 +354,14 @@
}
break;
default:
- UNREACHABLE("%s", OpcodeName(insn.opcode()));
+ UNREACHABLE("%s", shader.OpcodeName(insn.opcode()));
}
}
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitBinaryOp(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitBinaryOp(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -563,7 +563,7 @@
dst.move(i + lhsType.componentCount, CmpLT(lhs.UInt(i), rhs.UInt(i)) >> 31);
break;
default:
- UNREACHABLE("%s", OpcodeName(insn.opcode()));
+ UNREACHABLE("%s", shader.OpcodeName(insn.opcode()));
}
}
@@ -574,7 +574,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitDot(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitDot(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
ASSERT(type.componentCount == 1);
@@ -617,7 +617,7 @@
}
break;
default:
- UNREACHABLE("%s", OpcodeName(opcode));
+ UNREACHABLE("%s", shader.OpcodeName(opcode));
break;
}
@@ -628,7 +628,7 @@
return EmitResult::Continue;
}
-SIMD::Float SpirvShader::EmitState::FDot(unsigned numComponents, const Operand &x, const Operand &y)
+SIMD::Float EmitState::FDot(unsigned numComponents, const Operand &x, const Operand &y)
{
SIMD::Float d = x.Float(0) * y.Float(0);
@@ -640,7 +640,7 @@
return d;
}
-SIMD::Int SpirvShader::EmitState::SDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum)
+SIMD::Int EmitState::SDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum)
{
SIMD::Int d(0);
@@ -676,7 +676,7 @@
return d;
}
-SIMD::UInt SpirvShader::EmitState::UDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum)
+SIMD::UInt EmitState::UDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum)
{
SIMD::UInt d(0);
@@ -712,7 +712,7 @@
return d;
}
-SIMD::Int SpirvShader::EmitState::SUDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum)
+SIMD::Int EmitState::SUDot(unsigned numComponents, const Operand &x, const Operand &y, const Operand *accum)
{
SIMD::Int d(0);
@@ -748,7 +748,7 @@
return d;
}
-SIMD::Int SpirvShader::EmitState::AddSat(RValue<SIMD::Int> a, RValue<SIMD::Int> b)
+SIMD::Int EmitState::AddSat(RValue<SIMD::Int> a, RValue<SIMD::Int> b)
{
SIMD::Int sum = a + b;
SIMD::Int sSign = sum >> 31;
@@ -765,7 +765,7 @@
(~oob & sum);
}
-SIMD::UInt SpirvShader::EmitState::AddSat(RValue<SIMD::UInt> a, RValue<SIMD::UInt> b)
+SIMD::UInt EmitState::AddSat(RValue<SIMD::UInt> a, RValue<SIMD::UInt> b)
{
SIMD::UInt sum = a + b;
diff --git a/src/Pipeline/SpirvShaderControlFlow.cpp b/src/Pipeline/SpirvShaderControlFlow.cpp
index e19f1b3..e50d4c5 100644
--- a/src/Pipeline/SpirvShaderControlFlow.cpp
+++ b/src/Pipeline/SpirvShaderControlFlow.cpp
@@ -117,7 +117,7 @@
}
}
-void SpirvShader::Function::TraverseReachableBlocks(Block::ID id, SpirvShader::Block::Set &reachable) const
+void SpirvShader::Function::TraverseReachableBlocks(Block::ID id, Block::Set &reachable) const
{
if(reachable.count(id) == 0)
{
@@ -195,12 +195,12 @@
return false;
}
-void SpirvShader::EmitState::addOutputActiveLaneMaskEdge(Block::ID to, RValue<SIMD::Int> mask)
+void EmitState::addOutputActiveLaneMaskEdge(Block::ID to, RValue<SIMD::Int> mask)
{
addActiveLaneMaskEdge(block, to, mask & activeLaneMask());
}
-void SpirvShader::EmitState::addActiveLaneMaskEdge(Block::ID from, Block::ID to, RValue<SIMD::Int> mask)
+void EmitState::addActiveLaneMaskEdge(Block::ID from, Block::ID to, RValue<SIMD::Int> mask)
{
auto edge = Block::Edge{ from, to };
auto it = edgeActiveLaneMasks.find(edge);
@@ -216,7 +216,7 @@
}
}
-RValue<SIMD::Int> SpirvShader::EmitState::GetActiveLaneMaskEdge(Block::ID from, Block::ID to) const
+RValue<SIMD::Int> EmitState::GetActiveLaneMaskEdge(Block::ID from, Block::ID to) const
{
auto edge = Block::Edge{ from, to };
auto it = edgeActiveLaneMasks.find(edge);
@@ -224,7 +224,7 @@
return it->second;
}
-void SpirvShader::EmitState::EmitBlocks(Block::ID id, Block::ID ignore /* = 0 */)
+void EmitState::EmitBlocks(Block::ID id, Block::ID ignore /* = 0 */)
{
auto oldPending = this->pending;
auto &function = shader.getFunction(this->function);
@@ -284,7 +284,7 @@
this->pending = oldPending;
}
-void SpirvShader::EmitState::EmitNonLoop()
+void EmitState::EmitNonLoop()
{
auto &function = shader.getFunction(this->function);
auto blockId = block;
@@ -322,7 +322,7 @@
SPIRV_SHADER_DBG("Block {0} done", blockId);
}
-void SpirvShader::EmitState::EmitLoop()
+void EmitState::EmitLoop()
{
auto &function = shader.getFunction(this->function);
auto blockId = block;
@@ -492,14 +492,14 @@
}
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitBranch(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitBranch(InsnIterator insn)
{
auto target = Block::ID(insn.word(1));
addActiveLaneMaskEdge(block, target, activeLaneMask());
return EmitResult::Terminator;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitBranchConditional(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitBranchConditional(InsnIterator insn)
{
auto &function = shader.getFunction(this->function);
auto block = function.getBlock(this->block);
@@ -520,7 +520,7 @@
return EmitResult::Terminator;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitSwitch(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitSwitch(InsnIterator insn)
{
auto &function = shader.getFunction(this->function);
auto block = function.getBlock(this->block);
@@ -558,27 +558,27 @@
return EmitResult::Terminator;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitUnreachable(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitUnreachable(InsnIterator insn)
{
// TODO: Log something in this case?
SetActiveLaneMask(SIMD::Int(0));
return EmitResult::Terminator;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitReturn(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitReturn(InsnIterator insn)
{
SetActiveLaneMask(SIMD::Int(0));
return EmitResult::Terminator;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitTerminateInvocation(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitTerminateInvocation(InsnIterator insn)
{
routine->discardMask |= SignMask(activeLaneMask());
SetActiveLaneMask(SIMD::Int(0));
return EmitResult::Terminator;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitDemoteToHelperInvocation(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitDemoteToHelperInvocation(InsnIterator insn)
{
routine->helperInvocation |= activeLaneMask();
routine->discardMask |= SignMask(activeLaneMask());
@@ -586,7 +586,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitIsHelperInvocation(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitIsHelperInvocation(InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -594,9 +594,9 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitFunctionCall(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitFunctionCall(InsnIterator insn)
{
- auto functionId = Function::ID(insn.word(3));
+ auto functionId = SpirvShader::Function::ID(insn.word(3));
const auto &functionIt = shader.functions.find(functionId);
ASSERT(functionIt != shader.functions.end());
auto &function = functionIt->second;
@@ -620,7 +620,7 @@
if(blockInsn.opcode() != wrapOpKill[insnNumber++])
{
- UNIMPLEMENTED("b/141246700: Function block instruction %d : %s", insnNumber - 1, OpcodeName(blockInsn.opcode())); // FIXME(b/141246700)
+ UNIMPLEMENTED("b/141246700: Function block instruction %d : %s", insnNumber - 1, shader.OpcodeName(blockInsn.opcode())); // FIXME(b/141246700)
return EmitResult::Continue;
}
@@ -634,7 +634,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitControlBarrier(InsnIterator insn) const
+EmitState::EmitResult EmitState::EmitControlBarrier(InsnIterator insn)
{
auto executionScope = spv::Scope(shader.GetConstScalarInt(insn.word(1)));
auto semantics = spv::MemorySemanticsMask(shader.GetConstScalarInt(insn.word(3)));
@@ -658,7 +658,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitPhi(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitPhi(InsnIterator insn)
{
auto &function = shader.getFunction(this->function);
auto currentBlock = function.getBlock(block);
@@ -673,7 +673,7 @@
return EmitResult::Continue;
}
-void SpirvShader::EmitState::LoadPhi(InsnIterator insn)
+void EmitState::LoadPhi(InsnIterator insn)
{
auto typeId = Type::ID(insn.word(1));
auto type = shader.getType(typeId);
@@ -691,7 +691,7 @@
}
}
-void SpirvShader::EmitState::StorePhi(Block::ID currentBlock, InsnIterator insn, const std::unordered_set<SpirvShader::Block::ID> &filter) const
+void EmitState::StorePhi(Block::ID currentBlock, InsnIterator insn, const std::unordered_set<Block::ID> &filter) const
{
auto typeId = Type::ID(insn.word(1));
auto type = shader.getType(typeId);
@@ -728,17 +728,17 @@
}
}
-void SpirvShader::EmitState::Yield(YieldResult res) const
+void EmitState::Yield(YieldResult res) const
{
rr::Yield(RValue<Int>(int(res)));
}
-void SpirvShader::EmitState::SetActiveLaneMask(RValue<SIMD::Int> mask)
+void EmitState::SetActiveLaneMask(RValue<SIMD::Int> mask)
{
activeLaneMaskValue = mask.value();
}
-void SpirvShader::EmitState::SetStoresAndAtomicsMask(RValue<SIMD::Int> mask)
+void EmitState::SetStoresAndAtomicsMask(RValue<SIMD::Int> mask)
{
storesAndAtomicsMaskValue = mask.value();
}
diff --git a/src/Pipeline/SpirvShaderDebug.hpp b/src/Pipeline/SpirvShaderDebug.hpp
index 5418a8b..78d4eaa 100644
--- a/src/Pipeline/SpirvShaderDebug.hpp
+++ b/src/Pipeline/SpirvShaderDebug.hpp
@@ -129,14 +129,14 @@
};
template<>
-struct PrintValue::Ty<sw::SpirvShader::Operand>
+struct PrintValue::Ty<sw::EmitState::Operand>
{
- static inline std::string fmt(const sw::SpirvShader::Operand &v)
+ static inline std::string fmt(const sw::EmitState::Operand &v)
{
return (v.intermediate != nullptr) ? PrintValue::Ty<sw::Intermediate>::fmt(*v.intermediate) : PrintValue::Ty<sw::SIMD::UInt>::fmt(v.UInt(0));
}
- static inline std::vector<Value *> val(const sw::SpirvShader::Operand &v)
+ static inline std::vector<Value *> val(const sw::EmitState::Operand &v)
{
return (v.intermediate != nullptr) ? PrintValue::Ty<sw::Intermediate>::val(*v.intermediate) : PrintValue::Ty<sw::SIMD::UInt>::val(v.UInt(0));
}
diff --git a/src/Pipeline/SpirvShaderGLSLstd450.cpp b/src/Pipeline/SpirvShaderGLSLstd450.cpp
index 672afd8..1205668 100644
--- a/src/Pipeline/SpirvShaderGLSLstd450.cpp
+++ b/src/Pipeline/SpirvShaderGLSLstd450.cpp
@@ -25,7 +25,7 @@
static constexpr float PI = 3.141592653589793f;
-SpirvShader::EmitResult SpirvShader::EmitState::EmitExtGLSLstd450(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitExtGLSLstd450(SpirvShader::InsnIterator insn)
{
auto &type = shader.getType(insn.resultTypeId());
auto &dst = createIntermediate(insn.resultId(), type.componentCount);
@@ -283,7 +283,7 @@
auto I = Operand(shader, *this, insn.word(5));
auto N = Operand(shader, *this, insn.word(6));
auto eta = Operand(shader, *this, insn.word(7));
- Decorations r = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations r = shader.GetDecorationsForId(insn.resultId());
SIMD::Float d = FDot(type.componentCount, I, N);
SIMD::Float k = SIMD::Float(1.0f) - eta.Float(0) * eta.Float(0) * (SIMD::Float(1.0f) - d * d);
@@ -316,7 +316,7 @@
{
auto x = Operand(shader, *this, insn.word(5));
SIMD::Float d = FDot(shader.getObjectType(insn.word(5)).componentCount, x, x);
- Decorations r = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations r = shader.GetDecorationsForId(insn.resultId());
dst.move(0, Sqrt(d, r.RelaxedPrecision));
}
@@ -324,7 +324,7 @@
case GLSLstd450Normalize:
{
auto x = Operand(shader, *this, insn.word(5));
- Decorations r = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations r = shader.GetDecorationsForId(insn.resultId());
SIMD::Float d = FDot(shader.getObjectType(insn.word(5)).componentCount, x, x);
SIMD::Float invLength = SIMD::Float(1.0f) / Sqrt(d, r.RelaxedPrecision);
@@ -339,7 +339,7 @@
{
auto p0 = Operand(shader, *this, insn.word(5));
auto p1 = Operand(shader, *this, insn.word(6));
- Decorations r = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations r = shader.GetDecorationsForId(insn.resultId());
// sqrt(dot(p0-p1, p0-p1))
SIMD::Float d = (p0.Float(0) - p1.Float(0)) * (p0.Float(0) - p1.Float(0));
@@ -355,7 +355,7 @@
case GLSLstd450Modf:
{
auto val = Operand(shader, *this, insn.word(5));
- auto ptrId = Object::ID(insn.word(6));
+ auto ptrId = SpirvShader::Object::ID(insn.word(6));
Intermediate whole(type.componentCount);
@@ -492,7 +492,7 @@
case GLSLstd450Frexp:
{
auto val = Operand(shader, *this, insn.word(5));
- auto ptrId = Object::ID(insn.word(6));
+ auto ptrId = SpirvShader::Object::ID(insn.word(6));
Intermediate exp(type.componentCount);
@@ -550,7 +550,7 @@
case GLSLstd450Sin:
{
auto radians = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -561,7 +561,7 @@
case GLSLstd450Cos:
{
auto radians = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -572,7 +572,7 @@
case GLSLstd450Tan:
{
auto radians = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -583,7 +583,7 @@
case GLSLstd450Asin:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -594,7 +594,7 @@
case GLSLstd450Acos:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -605,7 +605,7 @@
case GLSLstd450Atan:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -616,7 +616,7 @@
case GLSLstd450Sinh:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -627,7 +627,7 @@
case GLSLstd450Cosh:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -638,7 +638,7 @@
case GLSLstd450Tanh:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -649,7 +649,7 @@
case GLSLstd450Asinh:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -660,7 +660,7 @@
case GLSLstd450Acosh:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -671,7 +671,7 @@
case GLSLstd450Atanh:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -683,7 +683,7 @@
{
auto x = Operand(shader, *this, insn.word(5));
auto y = Operand(shader, *this, insn.word(6));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -695,7 +695,7 @@
{
auto x = Operand(shader, *this, insn.word(5));
auto y = Operand(shader, *this, insn.word(6));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -706,7 +706,7 @@
case GLSLstd450Exp:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -717,7 +717,7 @@
case GLSLstd450Log:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -728,7 +728,7 @@
case GLSLstd450Exp2:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -739,7 +739,7 @@
case GLSLstd450Log2:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -750,7 +750,7 @@
case GLSLstd450Sqrt:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -761,7 +761,7 @@
case GLSLstd450InverseSqrt:
{
auto val = Operand(shader, *this, insn.word(5));
- Decorations d = shader.GetDecorationsForId(insn.resultId());
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.resultId());
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -891,7 +891,7 @@
break;
case GLSLstd450InterpolateAtCentroid:
{
- Decorations d = shader.GetDecorationsForId(insn.word(5));
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.word(5));
auto ptr = getPointer(insn.word(5));
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -901,7 +901,7 @@
break;
case GLSLstd450InterpolateAtSample:
{
- Decorations d = shader.GetDecorationsForId(insn.word(5));
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.word(5));
auto ptr = getPointer(insn.word(5));
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -911,7 +911,7 @@
break;
case GLSLstd450InterpolateAtOffset:
{
- Decorations d = shader.GetDecorationsForId(insn.word(5));
+ SpirvShader::Decorations d = shader.GetDecorationsForId(insn.word(5));
auto ptr = getPointer(insn.word(5));
for(auto i = 0u; i < type.componentCount; i++)
{
@@ -978,8 +978,8 @@
return interpolant;
}
-SIMD::Float SpirvShader::EmitState::EmitInterpolate(const SIMD::Pointer &ptr, int32_t location, Object::ID paramId,
- uint32_t component, InterpolationType type) const
+SIMD::Float EmitState::EmitInterpolate(const SIMD::Pointer &ptr, int32_t location, SpirvShader::Object::ID paramId,
+ uint32_t component, InterpolationType type) const
{
uint32_t interpolant = (location * 4);
uint32_t components_per_row = shader.GetNumInputComponents(location);
@@ -994,7 +994,7 @@
SIMD::Float y;
SIMD::Float rhw;
- bool multisample = (getMultiSampleCount() > 1);
+ bool multisample = (multiSampleCount > 1);
switch(type)
{
case Centroid:
@@ -1018,7 +1018,7 @@
if(multisample)
{
static constexpr int NUM_SAMPLES = 4;
- ASSERT(getMultiSampleCount() == NUM_SAMPLES);
+ ASSERT(multiSampleCount == NUM_SAMPLES);
auto sampleOperand = Operand(shader, *this, paramId);
ASSERT(sampleOperand.componentCount == 1);
diff --git a/src/Pipeline/SpirvShaderGroup.cpp b/src/Pipeline/SpirvShaderGroup.cpp
index 86687c8..e298ade 100644
--- a/src/Pipeline/SpirvShaderGroup.cpp
+++ b/src/Pipeline/SpirvShaderGroup.cpp
@@ -64,7 +64,7 @@
}
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitGroupNonUniform(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitGroupNonUniform(InsnIterator insn)
{
ASSERT(SIMD::Width == 4); // EmitGroupNonUniform makes many assumptions that the SIMD vector width is 4
@@ -132,7 +132,7 @@
// Decide between the fast path for constants and the slow path for
// intermediates.
- if(shader.getObject(idId).kind == SpirvShader::Object::Kind::Constant)
+ if(shader.getObject(idId).kind == Object::Kind::Constant)
{
auto id = SIMD::Int(shader.GetConstScalarInt(insn.word(5)));
auto mask = CmpEQ(id, SIMD::Int(0, 1, 2, 3));
@@ -386,9 +386,9 @@
// The remaining instructions are GroupNonUniformArithmetic operations
default:
- auto &type = shader.getType(SpirvShader::Type::ID(insn.word(1)));
+ auto &type = shader.getType(Type::ID(insn.word(1)));
auto operation = static_cast<spv::GroupOperation>(insn.word(4));
- SpirvShader::Operand value(shader, *this, insn.word(5));
+ Operand value(shader, *this, insn.word(5));
auto mask = As<SIMD::UInt>(activeLaneMask()); // Considers helper invocations active. See b/151137030
for(uint32_t i = 0; i < type.componentCount; i++)
@@ -500,7 +500,7 @@
break;
default:
- UNSUPPORTED("EmitGroupNonUniform op: %s", OpcodeName(type.opcode()));
+ UNSUPPORTED("EmitGroupNonUniform op: %s", shader.OpcodeName(type.opcode()));
}
}
break;
diff --git a/src/Pipeline/SpirvShaderImage.cpp b/src/Pipeline/SpirvShaderImage.cpp
index ec9662f..648929b 100644
--- a/src/Pipeline/SpirvShaderImage.cpp
+++ b/src/Pipeline/SpirvShaderImage.cpp
@@ -74,9 +74,9 @@
}
}
-SpirvShader::ImageInstruction::ImageInstruction(InsnIterator insn, const SpirvShader &spirv, const EmitState &state)
+EmitState::ImageInstruction::ImageInstruction(InsnIterator insn, const SpirvShader &shader, const EmitState &state)
: ImageInstructionSignature(parseVariantAndMethod(insn))
- , position(insn.distanceFrom(spirv.begin()))
+ , position(insn.distanceFrom(shader.begin()))
{
if(samplerMethod == Write)
{
@@ -102,7 +102,7 @@
if(state.isSampledImage(sampledImageId)) // Result of an OpSampledImage instruction
{
const SampledImagePointer &sampledImage = state.getSampledImage(sampledImageId);
- imageId = spirv.getObject(sampledImageId).definition.word(3);
+ imageId = shader.getObject(sampledImageId).definition.word(3);
samplerId = sampledImage.samplerId;
}
else // Combined image/sampler
@@ -117,12 +117,12 @@
// `imageId` can represent either a Sampled Image, a samplerless Image, or a pointer to an Image.
// To get to the OpTypeImage operands, traverse the OpTypeSampledImage or OpTypePointer.
- const Type &imageObjectType = spirv.getObjectType(imageId);
+ const Type &imageObjectType = shader.getObjectType(imageId);
const Type &imageReferenceType = (imageObjectType.opcode() == spv::OpTypeSampledImage)
- ? spirv.getType(imageObjectType.definition.word(2))
+ ? shader.getType(imageObjectType.definition.word(2))
: imageObjectType;
const Type &imageType = ((imageReferenceType.opcode() == spv::OpTypePointer)
- ? spirv.getType(imageReferenceType.element)
+ ? shader.getType(imageReferenceType.element)
: imageReferenceType);
ASSERT(imageType.opcode() == spv::OpTypeImage);
@@ -130,14 +130,14 @@
arrayed = imageType.definition.word(5);
imageFormat = imageType.definition.word(8);
- const Object &coordinateObject = spirv.getObject(coordinateId);
- const Type &coordinateType = spirv.getType(coordinateObject);
+ const Object &coordinateObject = shader.getObject(coordinateId);
+ const Type &coordinateType = shader.getType(coordinateObject);
coordinates = coordinateType.componentCount - (isProj() ? 1 : 0);
if(samplerMethod == TexelPointer)
{
sampleId = insn.word(5);
- sample = !spirv.getObject(sampleId).isConstantZero();
+ sample = !shader.getObject(sampleId).isConstantZero();
}
if(isDref())
@@ -147,7 +147,7 @@
if(samplerMethod == Gather)
{
- gatherComponent = !isDref() ? spirv.getObject(insn.word(5)).constantValue[0] : 0;
+ gatherComponent = !isDref() ? shader.getObject(insn.word(5)).constantValue[0] : 0;
}
uint32_t operandsIndex = getImageOperandsIndex(insn);
@@ -179,7 +179,7 @@
operandsIndex += 2;
imageOperands &= ~spv::ImageOperandsGradMask;
- grad = spirv.getObjectType(gradDxId).componentCount;
+ grad = shader.getObjectType(gradDxId).componentCount;
}
if(imageOperands & spv::ImageOperandsConstOffsetMask)
@@ -188,7 +188,7 @@
operandsIndex += 1;
imageOperands &= ~spv::ImageOperandsConstOffsetMask;
- offset = spirv.getObjectType(offsetId).componentCount;
+ offset = shader.getObjectType(offsetId).componentCount;
}
if(imageOperands & spv::ImageOperandsSampleMask)
@@ -198,7 +198,7 @@
operandsIndex += 1;
imageOperands &= ~spv::ImageOperandsSampleMask;
- sample = !spirv.getObject(sampleId).isConstantZero();
+ sample = !shader.getObject(sampleId).isConstantZero();
}
// TODO(b/174475384)
@@ -254,7 +254,7 @@
}
}
-SpirvShader::ImageInstructionSignature SpirvShader::ImageInstruction::parseVariantAndMethod(InsnIterator insn)
+EmitState::ImageInstructionSignature EmitState::ImageInstruction::parseVariantAndMethod(InsnIterator insn)
{
uint32_t imageOperands = getImageOperandsMask(insn);
bool bias = imageOperands & spv::ImageOperandsBiasMask;
@@ -285,7 +285,7 @@
}
// Returns the instruction word index at which the Image Operands mask is located, or 0 if not present.
-uint32_t SpirvShader::ImageInstruction::getImageOperandsIndex(InsnIterator insn)
+uint32_t EmitState::ImageInstruction::getImageOperandsIndex(InsnIterator insn)
{
switch(insn.opcode())
{
@@ -323,13 +323,13 @@
}
}
-uint32_t SpirvShader::ImageInstruction::getImageOperandsMask(InsnIterator insn)
+uint32_t EmitState::ImageInstruction::getImageOperandsMask(InsnIterator insn)
{
uint32_t operandsIndex = getImageOperandsIndex(insn);
return (operandsIndex != 0) ? insn.word(operandsIndex) : 0;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageSample(const ImageInstruction &instruction)
+EmitState::EmitResult EmitState::EmitImageSample(const ImageInstruction &instruction)
{
auto &resultType = shader.getType(instruction.resultTypeId);
auto &result = createIntermediate(instruction.resultId, resultType.componentCount);
@@ -348,7 +348,7 @@
return EmitResult::Continue;
}
-void SpirvShader::EmitState::EmitImageSampleUnconditional(Array<SIMD::Float> &out, const ImageInstruction &instruction) const
+void EmitState::EmitImageSampleUnconditional(Array<SIMD::Float> &out, const ImageInstruction &instruction) const
{
auto decorations = shader.GetDecorationsForId(instruction.imageId);
@@ -387,17 +387,17 @@
}
}
-Pointer<Byte> SpirvShader::EmitState::getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const
+Pointer<Byte> EmitState::getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const
{
return ((instruction.samplerId == instruction.imageId) || (instruction.samplerId == 0)) ? imageDescriptor : getImage(instruction.samplerId).getUniformPointer();
}
-Pointer<Byte> SpirvShader::EmitState::getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction, int laneIdx) const
+Pointer<Byte> EmitState::getSamplerDescriptor(Pointer<Byte> imageDescriptor, const ImageInstruction &instruction, int laneIdx) const
{
return ((instruction.samplerId == instruction.imageId) || (instruction.samplerId == 0)) ? imageDescriptor : getImage(instruction.samplerId).getPointerForLane(laneIdx);
}
-Pointer<Byte> SpirvShader::EmitState::lookupSamplerFunction(Pointer<Byte> imageDescriptor, Pointer<Byte> samplerDescriptor, const ImageInstruction &instruction) const
+Pointer<Byte> EmitState::lookupSamplerFunction(Pointer<Byte> imageDescriptor, Pointer<Byte> samplerDescriptor, const ImageInstruction &instruction) const
{
Int samplerId = (instruction.samplerId != 0) ? *Pointer<rr::Int>(samplerDescriptor + OFFSET(vk::SampledImageDescriptor, samplerId)) : Int(0);
@@ -415,7 +415,7 @@
return cache.function;
}
-void SpirvShader::EmitState::callSamplerFunction(Pointer<Byte> samplerFunction, Array<SIMD::Float> &out, Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const
+void EmitState::callSamplerFunction(Pointer<Byte> samplerFunction, Array<SIMD::Float> &out, Pointer<Byte> imageDescriptor, const ImageInstruction &instruction) const
{
Array<SIMD::Float> in(16); // Maximum 16 input parameter components.
@@ -502,7 +502,7 @@
Call<ImageSampler>(samplerFunction, texture, &in, &out, routine->constants);
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageQuerySizeLod(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitImageQuerySizeLod(InsnIterator insn)
{
auto &resultTy = shader.getType(insn.resultTypeId());
auto imageId = Object::ID(insn.word(3));
@@ -514,7 +514,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageQuerySize(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitImageQuerySize(InsnIterator insn)
{
auto &resultTy = shader.getType(insn.resultTypeId());
auto imageId = Object::ID(insn.word(3));
@@ -526,7 +526,7 @@
return EmitResult::Continue;
}
-void SpirvShader::EmitState::GetImageDimensions(const Type &resultTy, Object::ID imageId, Object::ID lodId, Intermediate &dst) const
+void EmitState::GetImageDimensions(const Type &resultTy, Object::ID imageId, Object::ID lodId, Intermediate &dst) const
{
auto &image = shader.getObject(imageId);
auto &imageType = shader.getType(image);
@@ -535,7 +535,7 @@
bool isArrayed = imageType.definition.word(5) != 0;
uint32_t dimensions = resultTy.componentCount - (isArrayed ? 1 : 0);
- const DescriptorDecorations &d = shader.descriptorDecorations.at(imageId);
+ const SpirvShader::DescriptorDecorations &d = shader.descriptorDecorations.at(imageId);
auto descriptorType = routine->pipelineLayout->getDescriptorType(d.DescriptorSet, d.Binding);
Pointer<Byte> descriptor = getPointer(imageId).getUniformPointer();
@@ -588,13 +588,13 @@
}
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageQueryLevels(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitImageQueryLevels(InsnIterator insn)
{
auto &resultTy = shader.getType(insn.resultTypeId());
ASSERT(resultTy.componentCount == 1);
auto imageId = Object::ID(insn.word(3));
- const DescriptorDecorations &d = shader.descriptorDecorations.at(imageId);
+ const SpirvShader::DescriptorDecorations &d = shader.descriptorDecorations.at(imageId);
auto descriptorType = routine->pipelineLayout->getDescriptorType(d.DescriptorSet, d.Binding);
Pointer<Byte> descriptor = getPointer(imageId).getUniformPointer();
@@ -616,7 +616,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageQuerySamples(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitImageQuerySamples(InsnIterator insn)
{
auto &resultTy = shader.getType(insn.resultTypeId());
ASSERT(resultTy.componentCount == 1);
@@ -626,7 +626,7 @@
ASSERT(imageTy.definition.word(3) == spv::Dim2D);
ASSERT(imageTy.definition.word(6 /* MS */) == 1);
- const DescriptorDecorations &d = shader.descriptorDecorations.at(imageId);
+ const SpirvShader::DescriptorDecorations &d = shader.descriptorDecorations.at(imageId);
auto descriptorType = routine->pipelineLayout->getDescriptorType(d.DescriptorSet, d.Binding);
Pointer<Byte> descriptor = getPointer(imageId).getUniformPointer();
@@ -651,7 +651,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitState::TexelAddressData SpirvShader::EmitState::setupTexelAddressData(SIMD::Int rowPitch, SIMD::Int slicePitch, SIMD::Int samplePitch, ImageInstructionSignature instruction, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, const SpirvRoutine *routine)
+EmitState::TexelAddressData EmitState::setupTexelAddressData(SIMD::Int rowPitch, SIMD::Int slicePitch, SIMD::Int samplePitch, ImageInstructionSignature instruction, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, const SpirvRoutine *routine)
{
TexelAddressData data;
@@ -711,7 +711,7 @@
return data;
}
-SIMD::Pointer SpirvShader::EmitState::GetNonUniformTexelAddress(ImageInstructionSignature instruction, SIMD::Pointer descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, SIMD::Int activeLaneMask, const SpirvRoutine *routine)
+SIMD::Pointer EmitState::GetNonUniformTexelAddress(ImageInstructionSignature instruction, SIMD::Pointer descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, SIMD::Int activeLaneMask, const SpirvRoutine *routine)
{
const bool useStencilAspect = (imageFormat == VK_FORMAT_S8_UINT);
auto rowPitch = (descriptor + (useStencilAspect
@@ -772,7 +772,7 @@
return SIMD::Pointer(imageBase) + texelData.ptrOffset;
}
-SIMD::Pointer SpirvShader::EmitState::GetTexelAddress(ImageInstructionSignature instruction, Pointer<Byte> descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const SpirvRoutine *routine)
+SIMD::Pointer EmitState::GetTexelAddress(ImageInstructionSignature instruction, Pointer<Byte> descriptor, SIMD::Int coordinate[], SIMD::Int sample, vk::Format imageFormat, OutOfBoundsBehavior outOfBoundsBehavior, const SpirvRoutine *routine)
{
const bool useStencilAspect = (imageFormat == VK_FORMAT_S8_UINT);
auto rowPitch = SIMD::Int(*Pointer<Int>(descriptor + (useStencilAspect
@@ -830,7 +830,7 @@
return SIMD::Pointer(imageBase, imageSizeInBytes, texelData.ptrOffset);
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageRead(const ImageInstruction &instruction)
+EmitState::EmitResult EmitState::EmitImageRead(const ImageInstruction &instruction)
{
auto &resultType = shader.getObjectType(instruction.resultId);
auto &image = shader.getObject(instruction.imageId);
@@ -840,7 +840,7 @@
auto dim = static_cast<spv::Dim>(instruction.dim);
auto coordinate = Operand(shader, *this, instruction.coordinateId);
- const DescriptorDecorations &d = shader.descriptorDecorations.at(instruction.imageId);
+ const SpirvShader::DescriptorDecorations &d = shader.descriptorDecorations.at(instruction.imageId);
// For subpass data, format in the instruction is spv::ImageFormatUnknown. Get it from
// the renderpass data instead. In all other cases, we can use the format in the instruction.
@@ -1245,7 +1245,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageWrite(const ImageInstruction &instruction) const
+EmitState::EmitResult EmitState::EmitImageWrite(const ImageInstruction &instruction)
{
auto &image = shader.getObject(instruction.imageId);
auto &imageType = shader.getType(image);
@@ -1321,7 +1321,7 @@
return EmitResult::Continue;
}
-void SpirvShader::EmitState::WriteImage(ImageInstructionSignature instruction, Pointer<Byte> descriptor, const Pointer<SIMD::Int> &coord, const Pointer<SIMD::Int> &texelAndMask, vk::Format imageFormat)
+void EmitState::WriteImage(ImageInstructionSignature instruction, Pointer<Byte> descriptor, const Pointer<SIMD::Int> &coord, const Pointer<SIMD::Int> &texelAndMask, vk::Format imageFormat)
{
SIMD::Int texel[4];
texel[0] = texelAndMask[0];
@@ -1551,7 +1551,7 @@
UNREACHABLE("texelSize: %d", int(texelSize));
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImageTexelPointer(const ImageInstruction &instruction)
+EmitState::EmitResult EmitState::EmitImageTexelPointer(const ImageInstruction &instruction)
{
auto coordinate = Operand(shader, *this, instruction.coordinateId);
@@ -1580,7 +1580,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitSampledImage(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitSampledImage(InsnIterator insn)
{
Object::ID resultId = insn.word(2);
Object::ID imageId = insn.word(3);
@@ -1592,7 +1592,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitImage(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitImage(InsnIterator insn)
{
Object::ID resultId = insn.word(2);
Object::ID imageId = insn.word(3);
diff --git a/src/Pipeline/SpirvShaderMemory.cpp b/src/Pipeline/SpirvShaderMemory.cpp
index 5921e6e..ea46052 100644
--- a/src/Pipeline/SpirvShaderMemory.cpp
+++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -23,7 +23,7 @@
namespace sw {
-SpirvShader::EmitResult SpirvShader::EmitState::EmitLoad(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitLoad(InsnIterator insn)
{
bool atomic = (insn.opcode() == spv::OpAtomicLoad);
Object::ID resultId = insn.word(2);
@@ -50,18 +50,17 @@
{
Object::ID semanticsId = insn.word(5);
auto memorySemantics = static_cast<spv::MemorySemanticsMask>(shader.getObject(semanticsId).constantValue[0]);
- memoryOrder = MemoryOrder(memorySemantics);
+ memoryOrder = shader.MemoryOrder(memorySemantics);
}
auto ptr = GetPointerToData(pointerId, 0, false);
- bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
auto robustness = shader.getOutOfBoundsBehavior(pointerId, routine->pipelineLayout);
if(result.kind == Object::Kind::Pointer)
{
- shader.VisitMemoryObject(pointerId, true, [&](const MemoryElement &el) {
+ shader.VisitMemoryObject(pointerId, true, [&](const SpirvShader::MemoryElement &el) {
ASSERT(el.index == 0);
- auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
+ auto p = GetElementPointer(ptr, el.offset, pointerTy.storageClass);
createPointer(resultId, p.Load<SIMD::Pointer>(robustness, activeLaneMask(), atomic, memoryOrder, sizeof(void *)));
});
@@ -70,8 +69,8 @@
else
{
auto &dst = createIntermediate(resultId, resultTy.componentCount);
- shader.VisitMemoryObject(pointerId, false, [&](const MemoryElement &el) {
- auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
+ shader.VisitMemoryObject(pointerId, false, [&](const SpirvShader::MemoryElement &el) {
+ auto p = GetElementPointer(ptr, el.offset, pointerTy.storageClass);
dst.move(el.index, p.Load<SIMD::Float>(robustness, activeLaneMask(), atomic, memoryOrder));
});
@@ -81,7 +80,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitStore(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitStore(InsnIterator insn)
{
bool atomic = (insn.opcode() == spv::OpAtomicStore);
Object::ID pointerId = insn.word(1);
@@ -92,7 +91,7 @@
{
Object::ID semanticsId = insn.word(3);
auto memorySemantics = static_cast<spv::MemorySemanticsMask>(shader.getObject(semanticsId).constantValue[0]);
- memoryOrder = MemoryOrder(memorySemantics);
+ memoryOrder = shader.MemoryOrder(memorySemantics);
}
const auto &value = Operand(shader, *this, objectId);
@@ -102,7 +101,7 @@
return EmitResult::Continue;
}
-void SpirvShader::EmitState::Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder) const
+void EmitState::Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder) const
{
auto &pointer = shader.getObject(pointerId);
auto &pointerTy = shader.getType(pointer);
@@ -111,11 +110,10 @@
ASSERT(!atomic || elementTy.opcode() == spv::OpTypeInt); // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
auto ptr = GetPointerToData(pointerId, 0, false);
- bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
auto robustness = shader.getOutOfBoundsBehavior(pointerId, routine->pipelineLayout);
SIMD::Int mask = activeLaneMask();
- if(!StoresInHelperInvocation(pointerTy.storageClass))
+ if(!shader.StoresInHelperInvocation(pointerTy.storageClass))
{
mask = mask & storesAndAtomicsMask();
}
@@ -124,22 +122,22 @@
if(value.isPointer())
{
- shader.VisitMemoryObject(pointerId, true, [&](const MemoryElement &el) {
+ shader.VisitMemoryObject(pointerId, true, [&](const SpirvShader::MemoryElement &el) {
ASSERT(el.index == 0);
- auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
+ auto p = GetElementPointer(ptr, el.offset, pointerTy.storageClass);
p.Store(value.Pointer(), robustness, mask, atomic, memoryOrder);
});
}
else
{
- shader.VisitMemoryObject(pointerId, false, [&](const MemoryElement &el) {
- auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
+ shader.VisitMemoryObject(pointerId, false, [&](const SpirvShader::MemoryElement &el) {
+ auto p = GetElementPointer(ptr, el.offset, pointerTy.storageClass);
p.Store(value.Float(el.index), robustness, mask, atomic, memoryOrder);
});
}
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitVariable(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitVariable(InsnIterator insn)
{
Object::ID resultId = insn.word(2);
auto &object = shader.getObject(resultId);
@@ -173,7 +171,7 @@
auto &dst = routine->getVariable(resultId);
int offset = 0;
shader.VisitInterface(resultId,
- [&](const Decorations &d, AttribType type) {
+ [&](const Decorations &d, SpirvShader::AttribType type) {
auto scalarSlot = d.Location << 2 | d.Component;
dst[offset++] = routine->inputs[scalarSlot];
});
@@ -243,12 +241,11 @@
case spv::StorageClassFunction:
case spv::StorageClassWorkgroup:
{
- bool interleavedByLane = IsStorageInterleavedByLane(objectTy.storageClass);
auto ptr = GetPointerToData(resultId, 0, false);
Operand initialValue(shader, *this, initializerId);
- shader.VisitMemoryObject(resultId, false, [&](const MemoryElement &el) {
- auto p = GetElementPointer(ptr, el.offset, interleavedByLane);
+ shader.VisitMemoryObject(resultId, false, [&](const SpirvShader::MemoryElement &el) {
+ auto p = GetElementPointer(ptr, el.offset, objectTy.storageClass);
auto robustness = OutOfBoundsBehavior::UndefinedBehavior; // Local variables are always within bounds.
p.Store(initialValue.Float(el.index), robustness, activeLaneMask());
});
@@ -269,7 +266,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitCopyMemory(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitCopyMemory(InsnIterator insn)
{
Object::ID dstPtrId = insn.word(1);
Object::ID srcPtrId = insn.word(2);
@@ -277,23 +274,21 @@
auto &srcPtrTy = shader.getObjectType(srcPtrId);
ASSERT(dstPtrTy.element == srcPtrTy.element);
- bool dstInterleavedByLane = IsStorageInterleavedByLane(dstPtrTy.storageClass);
- bool srcInterleavedByLane = IsStorageInterleavedByLane(srcPtrTy.storageClass);
auto dstPtr = GetPointerToData(dstPtrId, 0, false);
auto srcPtr = GetPointerToData(srcPtrId, 0, false);
std::unordered_map<uint32_t, uint32_t> srcOffsets;
- shader.VisitMemoryObject(srcPtrId, false, [&](const MemoryElement &el) { srcOffsets[el.index] = el.offset; });
+ shader.VisitMemoryObject(srcPtrId, false, [&](const SpirvShader::MemoryElement &el) { srcOffsets[el.index] = el.offset; });
- shader.VisitMemoryObject(dstPtrId, false, [&](const MemoryElement &el) {
+ shader.VisitMemoryObject(dstPtrId, false, [&](const SpirvShader::MemoryElement &el) {
auto it = srcOffsets.find(el.index);
ASSERT(it != srcOffsets.end());
auto srcOffset = it->second;
auto dstOffset = el.offset;
- auto dst = GetElementPointer(dstPtr, dstOffset, dstInterleavedByLane);
- auto src = GetElementPointer(srcPtr, srcOffset, srcInterleavedByLane);
+ auto dst = GetElementPointer(dstPtr, dstOffset, dstPtrTy.storageClass);
+ auto src = GetElementPointer(srcPtr, srcOffset, srcPtrTy.storageClass);
// TODO(b/131224163): Optimize based on src/dst storage classes.
auto robustness = OutOfBoundsBehavior::RobustBufferAccess;
@@ -304,7 +299,7 @@
return EmitResult::Continue;
}
-SpirvShader::EmitResult SpirvShader::EmitState::EmitMemoryBarrier(InsnIterator insn)
+EmitState::EmitResult EmitState::EmitMemoryBarrier(InsnIterator insn)
{
auto semantics = spv::MemorySemanticsMask(shader.GetConstScalarInt(insn.word(2)));
// TODO(b/176819536): We probably want to consider the memory scope here.
@@ -313,7 +308,7 @@
return EmitResult::Continue;
}
-void SpirvShader::VisitMemoryObjectInner(sw::SpirvShader::Type::ID id, sw::SpirvShader::Decorations d, uint32_t &index, uint32_t offset, bool resultIsPointer, const MemoryVisitor &f) const
+void SpirvShader::VisitMemoryObjectInner(Type::ID id, Decorations d, uint32_t &index, uint32_t offset, bool resultIsPointer, const MemoryVisitor &f) const
{
ApplyDecorationsForId(&d, id);
const auto &type = getType(id);
@@ -407,7 +402,7 @@
}
}
-SIMD::Pointer SpirvShader::EmitState::GetPointerToData(Object::ID id, SIMD::Int arrayIndices, bool nonUniform) const
+SIMD::Pointer EmitState::GetPointerToData(Object::ID id, SIMD::Int arrayIndices, bool nonUniform) const
{
auto &object = shader.getObject(id);
switch(object.kind)
@@ -493,7 +488,7 @@
}
}
-void SpirvShader::EmitState::OffsetToElement(SIMD::Pointer &ptr, Object::ID elementId, int32_t arrayStride) const
+void EmitState::OffsetToElement(SIMD::Pointer &ptr, Object::ID elementId, int32_t arrayStride) const
{
if(elementId != 0 && arrayStride != 0)
{
@@ -511,11 +506,11 @@
}
}
-void SpirvShader::EmitState::Fence(spv::MemorySemanticsMask semantics) const
+void EmitState::Fence(spv::MemorySemanticsMask semantics) const
{
if(semantics != spv::MemorySemanticsMaskNone)
{
- rr::Fence(MemoryOrder(semantics));
+ rr::Fence(shader.MemoryOrder(semantics));
}
}
@@ -572,9 +567,9 @@
}
}
-sw::SIMD::Pointer SpirvShader::GetElementPointer(sw::SIMD::Pointer structure, uint32_t offset, bool interleavedByLane)
+sw::SIMD::Pointer EmitState::GetElementPointer(sw::SIMD::Pointer structure, uint32_t offset, spv::StorageClass storageClass)
{
- if(interleavedByLane)
+ if(IsStorageInterleavedByLane(storageClass))
{
for(int i = 0; i < SIMD::Width; i++)
{
@@ -589,7 +584,7 @@
}
}
-bool SpirvShader::IsStorageInterleavedByLane(spv::StorageClass storageClass)
+bool EmitState::IsStorageInterleavedByLane(spv::StorageClass storageClass)
{
switch(storageClass)
{
diff --git a/src/Pipeline/SpirvShaderSampling.cpp b/src/Pipeline/SpirvShaderSampling.cpp
index c2dbf7c..47b0713 100644
--- a/src/Pipeline/SpirvShaderSampling.cpp
+++ b/src/Pipeline/SpirvShaderSampling.cpp
@@ -30,7 +30,7 @@
namespace sw {
-SpirvShader::ImageSampler *SpirvShader::EmitState::getImageSampler(const vk::Device *device, uint32_t signature, uint32_t samplerId, uint32_t imageViewId)
+EmitState::ImageSampler *EmitState::getImageSampler(const vk::Device *device, uint32_t signature, uint32_t samplerId, uint32_t imageViewId)
{
ImageInstructionSignature instruction(signature);
ASSERT(imageViewId != 0 && (samplerId != 0 || instruction.samplerMethod == Fetch || instruction.samplerMethod == Write));
@@ -125,7 +125,7 @@
return (ImageSampler *)(routine->getEntry());
}
-std::shared_ptr<rr::Routine> SpirvShader::EmitState::emitWriteRoutine(ImageInstructionSignature instruction, const Sampler &samplerState)
+std::shared_ptr<rr::Routine> EmitState::emitWriteRoutine(ImageInstructionSignature instruction, const Sampler &samplerState)
{
// TODO(b/129523279): Hold a separate mutex lock for the sampler being built.
rr::Function<Void(Pointer<Byte>, Pointer<SIMD::Float>, Pointer<SIMD::Float>, Pointer<Byte>)> function;
@@ -141,7 +141,7 @@
return function("sampler");
}
-std::shared_ptr<rr::Routine> SpirvShader::EmitState::emitSamplerRoutine(ImageInstructionSignature instruction, const Sampler &samplerState)
+std::shared_ptr<rr::Routine> EmitState::emitSamplerRoutine(ImageInstructionSignature instruction, const Sampler &samplerState)
{
// TODO(b/129523279): Hold a separate mutex lock for the sampler being built.
rr::Function<Void(Pointer<Byte>, Pointer<SIMD::Float>, Pointer<SIMD::Float>, Pointer<Byte>)> function;
@@ -263,7 +263,7 @@
return function("sampler");
}
-sw::FilterType SpirvShader::EmitState::convertFilterMode(const vk::SamplerState *samplerState, VkImageViewType imageViewType, SamplerMethod samplerMethod)
+sw::FilterType EmitState::convertFilterMode(const vk::SamplerState *samplerState, VkImageViewType imageViewType, SamplerMethod samplerMethod)
{
if(samplerMethod == Gather)
{
@@ -316,7 +316,7 @@
return FILTER_POINT;
}
-sw::MipmapType SpirvShader::EmitState::convertMipmapMode(const vk::SamplerState *samplerState)
+sw::MipmapType EmitState::convertMipmapMode(const vk::SamplerState *samplerState)
{
if(!samplerState)
{
@@ -339,7 +339,7 @@
}
}
-sw::AddressingMode SpirvShader::EmitState::convertAddressingMode(int coordinateIndex, const vk::SamplerState *samplerState, VkImageViewType imageViewType)
+sw::AddressingMode EmitState::convertAddressingMode(int coordinateIndex, const vk::SamplerState *samplerState, VkImageViewType imageViewType)
{
switch(imageViewType)
{