Separate Operand store logic from EmitStore()

The new Store() helper function can store Operand instances independent
from SPIR-V instructions. This allows reuse of this logic for other
instructions that need to store to memory, like Modf and Frexp.

Bug: b/153641251
Change-Id: I453bb7cd24ba26b9a23d73568dc3374a52a36073
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/43695
Presubmit-Ready: Nicolas Capens <nicolascapens@google.com>
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Tested-by: Nicolas Capens <nicolascapens@google.com>
Reviewed-by: Ben Clayton <bclayton@google.com>
diff --git a/src/Pipeline/SpirvShader.hpp b/src/Pipeline/SpirvShader.hpp
index 7fa2a55..6051176 100644
--- a/src/Pipeline/SpirvShader.hpp
+++ b/src/Pipeline/SpirvShader.hpp
@@ -1175,6 +1175,10 @@
 	void EvalSpecConstantUnaryOp(InsnIterator insn);
 	void EvalSpecConstantBinaryOp(InsnIterator insn);
 
+	// Helper for implementing OpStore, which doesn't take an InsnIterator so it
+	// can also store independent operands.
+	void Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder, EmitState *state) const;
+
 	// LoadPhi loads the phi values from the alloca storage and places the
 	// load values into the intermediate with the phi's result id.
 	void LoadPhi(InsnIterator insn, EmitState *state) const;
diff --git a/src/Pipeline/SpirvShaderMemory.cpp b/src/Pipeline/SpirvShaderMemory.cpp
index b37f163..8407166 100644
--- a/src/Pipeline/SpirvShaderMemory.cpp
+++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -71,10 +71,6 @@
 	bool atomic = (insn.opcode() == spv::OpAtomicStore);
 	Object::ID pointerId = insn.word(1);
 	Object::ID objectId = insn.word(atomic ? 4 : 2);
-	auto &object = getObject(objectId);
-	auto &pointer = getObject(pointerId);
-	auto &pointerTy = getType(pointer);
-	auto &elementTy = getType(pointerTy.element);
 	std::memory_order memoryOrder = std::memory_order_relaxed;
 
 	if(atomic)
@@ -84,6 +80,19 @@
 		memoryOrder = MemoryOrder(memorySemantics);
 	}
 
+	const auto &value = Operand(this, state, objectId);
+
+	Store(pointerId, value, atomic, memoryOrder, state);
+
+	return EmitResult::Continue;
+}
+
+void SpirvShader::Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder, EmitState *state) const
+{
+	auto &pointer = getObject(pointerId);
+	auto &pointerTy = getType(pointer);
+	auto &elementTy = getType(pointerTy.element);
+
 	ASSERT(!atomic || elementTy.opcode() == spv::OpTypeInt);  // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
 
 	auto ptr = GetPointerToData(pointerId, 0, state);
@@ -96,28 +105,11 @@
 		mask = mask & state->storesAndAtomicsMask();
 	}
 
-	if(object.kind == Object::Kind::Constant)
-	{
-		// Constant source data.
-		const uint32_t *src = object.constantValue.data();
-		VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
-			auto p = ptr + el.offset;
-			if(interleavedByLane) { p = InterleaveByLane(p); }
-			p.Store(SIMD::Int(src[el.index]), robustness, mask, atomic, memoryOrder);
-		});
-	}
-	else
-	{
-		// Intermediate source data.
-		auto &src = state->getIntermediate(objectId);
-		VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
-			auto p = ptr + el.offset;
-			if(interleavedByLane) { p = InterleaveByLane(p); }
-			p.Store(src.Float(el.index), robustness, mask, atomic, memoryOrder);
-		});
-	}
-
-	return EmitResult::Continue;
+	VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
+		auto p = ptr + el.offset;
+		if(interleavedByLane) { p = InterleaveByLane(p); }
+		p.Store(value.Float(el.index), robustness, mask, atomic, memoryOrder);
+	});
 }
 
 SpirvShader::EmitResult SpirvShader::EmitVariable(InsnIterator insn, EmitState *state) const