Accumulate compile-time constant and runtime offsets separately
This makes WalkAccessChain slightly easier on the backend -- there is
less constant folding to do.
Bug: b/124388146
Change-Id: I4e76c3e494278e1399b8a86134b652c2a6d96d8c
Reviewed-on: https://swiftshader-review.googlesource.com/c/24988
Tested-by: Chris Forbes <chrisforbes@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index 87dfbf2..c9bd086 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -493,12 +493,13 @@
// TODO: think about explicit layout (UBO/SSBO) storage classes
// TODO: avoid doing per-lane work in some cases if we can?
- Int4 res = Int4(0);
+ int constantOffset = 0;
+ Int4 dynamicOffset = Int4(0);
auto & baseObject = getObject(id);
auto typeId = baseObject.definition.word(1);
if (baseObject.kind == Object::Kind::Value)
- res += As<Int4>(routine->getValue(id)[0]);
+ dynamicOffset += As<Int4>(routine->getValue(id)[0]);
for (auto i = 0u; i < numIndexes; i++)
{
@@ -512,7 +513,7 @@
for (auto j = 0; j < memberIndex; j++) {
offsetIntoStruct += getType(type.definition.word(2 + memberIndex)).sizeInComponents;
}
- res += Int4(offsetIntoStruct);
+ constantOffset += offsetIntoStruct;
break;
}
@@ -523,9 +524,9 @@
auto stride = getType(type.definition.word(2)).sizeInComponents;
auto & obj = getObject(indexIds[i]);
if (obj.kind == Object::Kind::Constant)
- res += Int4(stride * GetConstantInt(indexIds[i]));
+ constantOffset += stride * GetConstantInt(indexIds[i]);
else
- res += Int4(stride) * As<Int4>(routine->getValue(indexIds[i])[0]);
+ dynamicOffset += Int4(stride) * As<Int4>(routine->getValue(indexIds[i])[0]);
break;
}
@@ -534,7 +535,7 @@
}
}
- return res;
+ return dynamicOffset + Int4(constantOffset);
}
void SpirvShader::Decorations::Apply(spv::Decoration decoration, uint32_t arg)