SpirvShader: Move memory ops to new cpp file

Bug: b/145336353
Change-Id: I19b9ee325d349e5a0c6ca79cdac2ee1ac8d227a2
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/38813
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
Tested-by: Ben Clayton <bclayton@google.com>
diff --git a/src/Pipeline/BUILD.gn b/src/Pipeline/BUILD.gn
index 39283ed..eb03453 100644
--- a/src/Pipeline/BUILD.gn
+++ b/src/Pipeline/BUILD.gn
@@ -42,6 +42,7 @@
     "SpirvShader.cpp",
     "SpirvShaderControlFlow.cpp",
     "SpirvShaderGLSLstd450.cpp",
+    "SpirvShaderMemory.cpp",
     "SpirvShaderSampling.cpp",
     "VertexProgram.cpp",
     "VertexRoutine.cpp",
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index 115e57b..8f59ec4 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -936,59 +936,7 @@
 		}
 	}
 
-	bool SpirvShader::StoresInHelperInvocation(spv::StorageClass storageClass)
-	{
-		switch (storageClass)
-		{
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-		case spv::StorageClassImage:
-			return false;
-		default:
-			return true;
-		}
-	}
-
-	bool SpirvShader::IsExplicitLayout(spv::StorageClass storageClass)
-	{
-		switch (storageClass)
-		{
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-		case spv::StorageClassPushConstant:
-			return true;
-		default:
-			return false;
-		}
-	}
-
-	sw::SIMD::Pointer SpirvShader::InterleaveByLane(sw::SIMD::Pointer p)
-	{
-		p *= sw::SIMD::Width;
-		p.staticOffsets[0] += 0 * sizeof(float);
-		p.staticOffsets[1] += 1 * sizeof(float);
-		p.staticOffsets[2] += 2 * sizeof(float);
-		p.staticOffsets[3] += 3 * sizeof(float);
-		return p;
-	}
-
-	bool SpirvShader::IsStorageInterleavedByLane(spv::StorageClass storageClass)
-	{
-		switch (storageClass)
-		{
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-		case spv::StorageClassPushConstant:
-		case spv::StorageClassWorkgroup:
-		case spv::StorageClassImage:
-			return false;
-		default:
-			return true;
-		}
-	}
-
-	template<typename F>
-	int SpirvShader::VisitInterfaceInner(Type::ID id, Decorations d, F f) const
+	int SpirvShader::VisitInterfaceInner(Type::ID id, Decorations d, const InterfaceVisitor &f) const
 	{
 		// Recursively walks variable definition and its type tree, taking into account
 		// any explicit Location or Component decorations encountered; where explicit
@@ -1008,19 +956,19 @@
 		switch(obj.opcode())
 		{
 		case spv::OpTypePointer:
-			return VisitInterfaceInner<F>(obj.definition.word(3), d, f);
+			return VisitInterfaceInner(obj.definition.word(3), d, f);
 		case spv::OpTypeMatrix:
 			for (auto i = 0u; i < obj.definition.word(3); i++, d.Location++)
 			{
 				// consumes same components of N consecutive locations
-				VisitInterfaceInner<F>(obj.definition.word(2), d, f);
+				VisitInterfaceInner(obj.definition.word(2), d, f);
 			}
 			return d.Location;
 		case spv::OpTypeVector:
 			for (auto i = 0u; i < obj.definition.word(3); i++, d.Component++)
 			{
 				// consumes N consecutive components in the same location
-				VisitInterfaceInner<F>(obj.definition.word(2), d, f);
+				VisitInterfaceInner(obj.definition.word(2), d, f);
 			}
 			return d.Location + 1;
 		case spv::OpTypeFloat:
@@ -1038,7 +986,7 @@
 			for (auto i = 0u; i < obj.definition.wordCount() - 2; i++)
 			{
 				ApplyDecorationsForIdMember(&d, id, i);
-				d.Location = VisitInterfaceInner<F>(obj.definition.word(i + 2), d, f);
+				d.Location = VisitInterfaceInner(obj.definition.word(i + 2), d, f);
 				d.Component = 0;    // Implicit locations always have component=0
 			}
 			return d.Location;
@@ -1048,7 +996,7 @@
 			auto arraySize = GetConstScalarInt(obj.definition.word(3));
 			for (auto i = 0u; i < arraySize; i++)
 			{
-				d.Location = VisitInterfaceInner<F>(obj.definition.word(2), d, f);
+				d.Location = VisitInterfaceInner(obj.definition.word(2), d, f);
 			}
 			return d.Location;
 		}
@@ -1058,8 +1006,7 @@
 		}
 	}
 
-	template<typename F>
-	void SpirvShader::VisitInterface(Object::ID id, F f) const
+	void SpirvShader::VisitInterface(Object::ID id, const InterfaceVisitor &f) const
 	{
 		// Walk a variable definition and call f for each component in it.
 		Decorations d{};
@@ -1067,146 +1014,7 @@
 
 		auto def = getObject(id).definition;
 		ASSERT(def.opcode() == spv::OpVariable);
-		VisitInterfaceInner<F>(def.word(1), d, f);
-	}
-
-	template<typename F>
-	void SpirvShader::VisitMemoryObjectInner(sw::SpirvShader::Type::ID id, sw::SpirvShader::Decorations d, uint32_t& index, uint32_t offset, F f) const
-	{
-		// Walk a type tree in an explicitly laid out storage class, calling
-		// a functor for each scalar element within the object.
-
-		// The functor's first parameter is the index of the scalar element;
-		// the second parameter is the offset (in bytes) from the base of the
-		// object.
-
-		ApplyDecorationsForId(&d, id);
-		auto const &type = getType(id);
-
-		if (d.HasOffset)
-		{
-			offset += d.Offset;
-			d.HasOffset = false;
-		}
-
-		switch (type.opcode())
-		{
-		case spv::OpTypePointer:
-			VisitMemoryObjectInner<F>(type.definition.word(3), d, index, offset, f);
-			break;
-		case spv::OpTypeInt:
-		case spv::OpTypeFloat:
-			f(index++, offset);
-			break;
-		case spv::OpTypeVector:
-		{
-			auto elemStride = (d.InsideMatrix && d.HasRowMajor && d.RowMajor) ? d.MatrixStride : static_cast<int32_t>(sizeof(float));
-			for (auto i = 0u; i < type.definition.word(3); i++)
-			{
-				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + elemStride * i, f);
-			}
-			break;
-		}
-		case spv::OpTypeMatrix:
-		{
-			auto columnStride = (d.HasRowMajor && d.RowMajor) ? static_cast<int32_t>(sizeof(float)) : d.MatrixStride;
-			d.InsideMatrix = true;
-			for (auto i = 0u; i < type.definition.word(3); i++)
-			{
-				ASSERT(d.HasMatrixStride);
-				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + columnStride * i, f);
-			}
-			break;
-		}
-		case spv::OpTypeStruct:
-			for (auto i = 0u; i < type.definition.wordCount() - 2; i++)
-			{
-				ApplyDecorationsForIdMember(&d, id, i);
-				VisitMemoryObjectInner<F>(type.definition.word(i + 2), d, index, offset, f);
-			}
-			break;
-		case spv::OpTypeArray:
-		{
-			auto arraySize = GetConstScalarInt(type.definition.word(3));
-			for (auto i = 0u; i < arraySize; i++)
-			{
-				ASSERT(d.HasArrayStride);
-				VisitMemoryObjectInner<F>(type.definition.word(2), d, index, offset + i * d.ArrayStride, f);
-			}
-			break;
-		}
-		default:
-			UNREACHABLE("%s", OpcodeName(type.opcode()).c_str());
-		}
-	}
-
-	template<typename F>
-	void SpirvShader::VisitMemoryObject(sw::SpirvShader::Object::ID id, F f) const
-	{
-		auto typeId = getObject(id).type;
-		auto const & type = getType(typeId);
-		if (IsExplicitLayout(type.storageClass))
-		{
-			Decorations d{};
-			ApplyDecorationsForId(&d, id);
-			uint32_t index = 0;
-			VisitMemoryObjectInner<F>(typeId, d, index, 0, f);
-		}
-		else
-		{
-			// Objects without explicit layout are tightly packed.
-			for (auto i = 0u; i < getType(type.element).sizeInComponents; i++)
-			{
-				f(i, i * sizeof(float));
-			}
-		}
-	}
-
-	SIMD::Pointer SpirvShader::GetPointerToData(Object::ID id, int arrayIndex, EmitState const *state) const
-	{
-		auto routine = state->routine;
-		auto &object = getObject(id);
-		switch (object.kind)
-		{
-			case Object::Kind::Pointer:
-			case Object::Kind::InterfaceVariable:
-				return state->getPointer(id);
-
-			case Object::Kind::DescriptorSet:
-			{
-				const auto &d = descriptorDecorations.at(id);
-				ASSERT(d.DescriptorSet >= 0 && d.DescriptorSet < vk::MAX_BOUND_DESCRIPTOR_SETS);
-				ASSERT(d.Binding >= 0);
-
-				auto set = state->getPointer(id);
-
-				auto setLayout = routine->pipelineLayout->getDescriptorSetLayout(d.DescriptorSet);
-				ASSERT_MSG(setLayout->hasBinding(d.Binding), "Descriptor set %d does not contain binding %d", int(d.DescriptorSet), int(d.Binding));
-				int bindingOffset = static_cast<int>(setLayout->getBindingOffset(d.Binding, arrayIndex));
-
-				Pointer<Byte> descriptor = set.base + bindingOffset; // BufferDescriptor*
-				Pointer<Byte> data = *Pointer<Pointer<Byte>>(descriptor + OFFSET(vk::BufferDescriptor, ptr)); // void*
-				Int size = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, sizeInBytes));
-				if (setLayout->isBindingDynamic(d.Binding))
-				{
-					uint32_t dynamicBindingIndex =
-						routine->pipelineLayout->getDynamicOffsetBase(d.DescriptorSet) +
-						setLayout->getDynamicDescriptorOffset(d.Binding) +
-						arrayIndex;
-					Int offset = routine->descriptorDynamicOffsets[dynamicBindingIndex];
-					Int robustnessSize = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, robustnessSize));
-					return SIMD::Pointer(data + offset, Min(size, robustnessSize - offset));
-				}
-				else
-				{
-					return SIMD::Pointer(data, size);
-				}
-			}
-
-			default:
-				UNREACHABLE("Invalid pointer kind %d", int(object.kind));
-				return SIMD::Pointer(Pointer<Byte>(), 0);
-		}
+		VisitInterfaceInner(def.word(1), d, f);
 	}
 
 	void SpirvShader::ApplyDecorationsForAccessChain(Decorations *d, DescriptorDecorations *dd, Object::ID baseId, uint32_t numIndexes, uint32_t const *indexIds) const
@@ -2119,240 +1927,6 @@
 		return EmitResult::Continue;
 	}
 
-	SpirvShader::EmitResult SpirvShader::EmitVariable(InsnIterator insn, EmitState *state) const
-	{
-		auto routine = state->routine;
-		Object::ID resultId = insn.word(2);
-		auto &object = getObject(resultId);
-		auto &objectTy = getType(object.type);
-
-		switch (objectTy.storageClass)
-		{
-		case spv::StorageClassOutput:
-		case spv::StorageClassPrivate:
-		case spv::StorageClassFunction:
-		{
-			ASSERT(objectTy.opcode() == spv::OpTypePointer);
-			auto base = &routine->getVariable(resultId)[0];
-			auto elementTy = getType(objectTy.element);
-			auto size = elementTy.sizeInComponents * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
-			state->createPointer(resultId, SIMD::Pointer(base, size));
-			break;
-		}
-		case spv::StorageClassWorkgroup:
-		{
-			ASSERT(objectTy.opcode() == spv::OpTypePointer);
-			auto base = &routine->workgroupMemory[0];
-			auto size = workgroupMemory.size();
-			state->createPointer(resultId, SIMD::Pointer(base, size, workgroupMemory.offsetOf(resultId)));
-			break;
-		}
-		case spv::StorageClassInput:
-		{
-			if (object.kind == Object::Kind::InterfaceVariable)
-			{
-				auto &dst = routine->getVariable(resultId);
-				int offset = 0;
-				VisitInterface(resultId,
-								[&](Decorations const &d, AttribType type) {
-									auto scalarSlot = d.Location << 2 | d.Component;
-									dst[offset++] = routine->inputs[scalarSlot];
-								});
-			}
-			ASSERT(objectTy.opcode() == spv::OpTypePointer);
-			auto base = &routine->getVariable(resultId)[0];
-			auto elementTy = getType(objectTy.element);
-			auto size = elementTy.sizeInComponents * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
-			state->createPointer(resultId, SIMD::Pointer(base, size));
-			break;
-		}
-		case spv::StorageClassUniformConstant:
-		{
-			const auto &d = descriptorDecorations.at(resultId);
-			ASSERT(d.DescriptorSet >= 0);
-			ASSERT(d.Binding >= 0);
-
-			uint32_t arrayIndex = 0;  // TODO(b/129523279)
-			auto setLayout = routine->pipelineLayout->getDescriptorSetLayout(d.DescriptorSet);
-			if (setLayout->hasBinding(d.Binding))
-			{
-				uint32_t bindingOffset = static_cast<uint32_t>(setLayout->getBindingOffset(d.Binding, arrayIndex));
-				Pointer<Byte> set = routine->descriptorSets[d.DescriptorSet];  // DescriptorSet*
-				Pointer<Byte> binding = Pointer<Byte>(set + bindingOffset);    // vk::SampledImageDescriptor*
-				auto size = 0; // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
-				state->createPointer(resultId, SIMD::Pointer(binding, size));
-			}
-			else
-			{
-				// TODO: Error if the variable with the non-existant binding is
-				// used? Or perhaps strip these unused variable declarations as
-				// a preprocess on the SPIR-V?
-			}
-			break;
-		}
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-		{
-			const auto &d = descriptorDecorations.at(resultId);
-			ASSERT(d.DescriptorSet >= 0);
-			auto size = 0; // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
-			// Note: the module may contain descriptor set references that are not suitable for this implementation -- using a set index higher than the number
-			// of descriptor set binding points we support. As long as the selected entrypoint doesn't actually touch the out of range binding points, this
-			// is valid. In this case make the value nullptr to make it easier to diagnose an attempt to dereference it.
-			if (d.DescriptorSet < vk::MAX_BOUND_DESCRIPTOR_SETS)
-			{
-				state->createPointer(resultId, SIMD::Pointer(routine->descriptorSets[d.DescriptorSet], size));
-			}
-			else
-			{
-				state->createPointer(resultId, SIMD::Pointer(nullptr, 0));
-			}
-			break;
-		}
-		case spv::StorageClassPushConstant:
-		{
-			state->createPointer(resultId, SIMD::Pointer(routine->pushConstants, vk::MAX_PUSH_CONSTANT_SIZE));
-			break;
-		}
-		default:
-			UNREACHABLE("Storage class %d", objectTy.storageClass);
-			break;
-		}
-
-		if (insn.wordCount() > 4)
-		{
-			Object::ID initializerId = insn.word(4);
-			if (getObject(initializerId).kind != Object::Kind::Constant)
-			{
-				UNIMPLEMENTED("Non-constant initializers not yet implemented");
-			}
-			switch (objectTy.storageClass)
-			{
-			case spv::StorageClassOutput:
-			case spv::StorageClassPrivate:
-			case spv::StorageClassFunction:
-			{
-				bool interleavedByLane = IsStorageInterleavedByLane(objectTy.storageClass);
-				auto ptr = GetPointerToData(resultId, 0, state);
-				GenericValue initialValue(this, state, initializerId);
-				VisitMemoryObject(resultId, [&](uint32_t i, uint32_t offset)
-				{
-					auto p = ptr + offset;
-					if (interleavedByLane) { p = InterleaveByLane(p); }
-					auto robustness = OutOfBoundsBehavior::UndefinedBehavior;  // Local variables are always within bounds.
-					p.Store(initialValue.Float(i), robustness, state->activeLaneMask());
-				});
-				break;
-			}
-			default:
-				ASSERT_MSG(initializerId == 0, "Vulkan does not permit variables of storage class %d to have initializers", int(objectTy.storageClass));
-			}
-		}
-
-		return EmitResult::Continue;
-	}
-
-	SpirvShader::EmitResult SpirvShader::EmitLoad(InsnIterator insn, EmitState *state) const
-	{
-		bool atomic = (insn.opcode() == spv::OpAtomicLoad);
-		Object::ID resultId = insn.word(2);
-		Object::ID pointerId = insn.word(3);
-		auto &result = getObject(resultId);
-		auto &resultTy = getType(result.type);
-		auto &pointer = getObject(pointerId);
-		auto &pointerTy = getType(pointer.type);
-		std::memory_order memoryOrder = std::memory_order_relaxed;
-
-		ASSERT(getType(pointer.type).element == result.type);
-		ASSERT(Type::ID(insn.word(1)) == result.type);
-		ASSERT(!atomic || getType(getType(pointer.type).element).opcode() == spv::OpTypeInt);  // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
-
-		if(pointerTy.storageClass == spv::StorageClassUniformConstant)
-		{
-			// Just propagate the pointer.
-			auto &ptr = state->getPointer(pointerId);
-			state->createPointer(resultId, ptr);
-			return EmitResult::Continue;
-		}
-
-		if(atomic)
-		{
-			Object::ID semanticsId = insn.word(5);
-			auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
-			memoryOrder = MemoryOrder(memorySemantics);
-		}
-
-		auto ptr = GetPointerToData(pointerId, 0, state);
-		bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
-		auto &dst = state->createIntermediate(resultId, resultTy.sizeInComponents);
-		auto robustness = state->getOutOfBoundsBehavior(pointerTy.storageClass);
-
-		VisitMemoryObject(pointerId, [&](uint32_t i, uint32_t offset)
-		{
-			auto p = ptr + offset;
-			if (interleavedByLane) { p = InterleaveByLane(p); }  // TODO: Interleave once, then add offset?
-			dst.move(i, p.Load<SIMD::Float>(robustness, state->activeLaneMask(), atomic, memoryOrder));
-		});
-
-		return EmitResult::Continue;
-	}
-
-	SpirvShader::EmitResult SpirvShader::EmitStore(InsnIterator insn, EmitState *state) const
-	{
-		bool atomic = (insn.opcode() == spv::OpAtomicStore);
-		Object::ID pointerId = insn.word(1);
-		Object::ID objectId = insn.word(atomic ? 4 : 2);
-		auto &object = getObject(objectId);
-		auto &pointer = getObject(pointerId);
-		auto &pointerTy = getType(pointer.type);
-		auto &elementTy = getType(pointerTy.element);
-		std::memory_order memoryOrder = std::memory_order_relaxed;
-
-		if(atomic)
-		{
-			Object::ID semanticsId = insn.word(3);
-			auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
-			memoryOrder = MemoryOrder(memorySemantics);
-		}
-
-		ASSERT(!atomic || elementTy.opcode() == spv::OpTypeInt);  // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
-
-		auto ptr = GetPointerToData(pointerId, 0, state);
-		bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
-		auto robustness = state->getOutOfBoundsBehavior(pointerTy.storageClass);
-
-		SIMD::Int mask = state->activeLaneMask();
-		if (!StoresInHelperInvocation(pointerTy.storageClass))
-		{
-			mask = mask & state->storesAndAtomicsMask();
-		}
-
-		if (object.kind == Object::Kind::Constant)
-		{
-			// Constant source data.
-			const uint32_t *src = object.constantValue.get();
-			VisitMemoryObject(pointerId, [&](uint32_t i, uint32_t offset)
-			{
-				auto p = ptr + offset;
-				if (interleavedByLane) { p = InterleaveByLane(p); }
-				p.Store(SIMD::Int(src[i]), robustness, mask, atomic, memoryOrder);
-			});
-		}
-		else
-		{
-			// Intermediate source data.
-			auto &src = state->getIntermediate(objectId);
-			VisitMemoryObject(pointerId, [&](uint32_t i, uint32_t offset)
-			{
-				auto p = ptr + offset;
-				if (interleavedByLane) { p = InterleaveByLane(p); }
-				p.Store(src.Float(i), robustness, mask, atomic, memoryOrder);
-			});
-		}
-
-		return EmitResult::Continue;
-	}
-
 	SpirvShader::EmitResult SpirvShader::EmitAccessChain(InsnIterator insn, EmitState *state) const
 	{
 		Type::ID typeId = insn.word(1);
@@ -3051,29 +2625,6 @@
 		return EmitResult::Continue;
 	}
 
-	std::memory_order SpirvShader::MemoryOrder(spv::MemorySemanticsMask memorySemantics)
-	{
-		auto control = static_cast<uint32_t>(memorySemantics) & static_cast<uint32_t>(
-			spv::MemorySemanticsAcquireMask |
-			spv::MemorySemanticsReleaseMask |
-			spv::MemorySemanticsAcquireReleaseMask |
-			spv::MemorySemanticsSequentiallyConsistentMask
-		);
-		switch (control)
-		{
-		case spv::MemorySemanticsMaskNone:                   return std::memory_order_relaxed;
-		case spv::MemorySemanticsAcquireMask:                return std::memory_order_acquire;
-		case spv::MemorySemanticsReleaseMask:                return std::memory_order_release;
-		case spv::MemorySemanticsAcquireReleaseMask:         return std::memory_order_acq_rel;
-		case spv::MemorySemanticsSequentiallyConsistentMask: return std::memory_order_acq_rel;  // Vulkan 1.1: "SequentiallyConsistent is treated as AcquireRelease"
-		default:
-			// "it is invalid for more than one of these four bits to be set:
-			// Acquire, Release, AcquireRelease, or SequentiallyConsistent."
-			UNREACHABLE("MemorySemanticsMask: %x", int(control));
-			return std::memory_order_acq_rel;
-		}
-	}
-
 	SIMD::Float SpirvShader::Dot(unsigned numComponents, GenericValue const & x, GenericValue const & y) const
 	{
 		SIMD::Float d = x.Float(0) * y.Float(0);
@@ -4217,52 +3768,6 @@
 		return EmitResult::Continue;
 	}
 
-	SpirvShader::EmitResult SpirvShader::EmitCopyMemory(InsnIterator insn, EmitState *state) const
-	{
-		Object::ID dstPtrId = insn.word(1);
-		Object::ID srcPtrId = insn.word(2);
-		auto &dstPtrTy = getType(getObject(dstPtrId).type);
-		auto &srcPtrTy = getType(getObject(srcPtrId).type);
-		ASSERT(dstPtrTy.element == srcPtrTy.element);
-
-		bool dstInterleavedByLane = IsStorageInterleavedByLane(dstPtrTy.storageClass);
-		bool srcInterleavedByLane = IsStorageInterleavedByLane(srcPtrTy.storageClass);
-		auto dstPtr = GetPointerToData(dstPtrId, 0, state);
-		auto srcPtr = GetPointerToData(srcPtrId, 0, state);
-
-		std::unordered_map<uint32_t, uint32_t> srcOffsets;
-
-		VisitMemoryObject(srcPtrId, [&](uint32_t i, uint32_t srcOffset) { srcOffsets[i] = srcOffset; });
-
-		VisitMemoryObject(dstPtrId, [&](uint32_t i, uint32_t dstOffset)
-		{
-			auto it = srcOffsets.find(i);
-			ASSERT(it != srcOffsets.end());
-			auto srcOffset = it->second;
-
-			auto dst = dstPtr + dstOffset;
-			auto src = srcPtr + srcOffset;
-			if (dstInterleavedByLane) { dst = InterleaveByLane(dst); }
-			if (srcInterleavedByLane) { src = InterleaveByLane(src); }
-
-			// TODO(b/131224163): Optimize based on src/dst storage classes.
-			auto robustness = OutOfBoundsBehavior::RobustBufferAccess;
-
-			auto value = src.Load<SIMD::Float>(robustness, state->activeLaneMask());
-			dst.Store(value, robustness, state->activeLaneMask());
-		});
-		return EmitResult::Continue;
-	}
-
-	SpirvShader::EmitResult SpirvShader::EmitMemoryBarrier(InsnIterator insn, EmitState *state) const
-	{
-		auto semantics = spv::MemorySemanticsMask(GetConstScalarInt(insn.word(2)));
-		// TODO: We probably want to consider the memory scope here. For now,
-		// just always emit the full fence.
-		Fence(semantics);
-		return EmitResult::Continue;
-	}
-
 	SpirvShader::EmitResult SpirvShader::EmitGroupNonUniform(InsnIterator insn, EmitState *state) const
 	{
 		static_assert(SIMD::Width == 4, "EmitGroupNonUniform makes many assumptions that the SIMD vector width is 4");
diff --git a/src/Pipeline/SpirvShader.hpp b/src/Pipeline/SpirvShader.hpp
index 4889c57..20b96cc 100644
--- a/src/Pipeline/SpirvShader.hpp
+++ b/src/Pipeline/SpirvShader.hpp
@@ -731,17 +731,17 @@
 		// Output storage buffers and images should not be affected by helper invocations
 		static bool StoresInHelperInvocation(spv::StorageClass storageClass);
 
-		template<typename F>
-		int VisitInterfaceInner(Type::ID id, Decorations d, F f) const;
+		using InterfaceVisitor = std::function<void(Decorations const, AttribType)>;
 
-		template<typename F>
-		void VisitInterface(Object::ID id, F f) const;
+		void VisitInterface(Object::ID id, const InterfaceVisitor& v) const;
 
-		template<typename F>
-		void VisitMemoryObject(Object::ID id, F f) const;
+		int VisitInterfaceInner(Type::ID id, Decorations d, const InterfaceVisitor& v) const;
 
-		template<typename F>
-		void VisitMemoryObjectInner(Type::ID id, Decorations d, uint32_t &index, uint32_t offset, F f) const;
+		using MemoryVisitor = std::function<void(uint32_t index, uint32_t offset)>;
+
+		void VisitMemoryObject(Object::ID id, const MemoryVisitor& v) const;
+
+		void VisitMemoryObjectInner(Type::ID id, Decorations d, uint32_t &index, uint32_t offset, const MemoryVisitor& v) const;
 
 		Object& CreateConstant(InsnIterator it);
 
diff --git a/src/Pipeline/SpirvShaderMemory.cpp b/src/Pipeline/SpirvShaderMemory.cpp
new file mode 100644
index 0000000..afecbfe
--- /dev/null
+++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -0,0 +1,517 @@
+// Copyright 2019 The SwiftShader Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "SpirvShader.hpp"
+
+#include "ShaderCore.hpp"
+
+#include "Vulkan/VkPipelineLayout.hpp"
+
+#include <spirv/unified1/spirv.hpp>
+#include <spirv/unified1/GLSL.std.450.h>
+
+namespace sw {
+
+SpirvShader::EmitResult SpirvShader::EmitLoad(InsnIterator insn, EmitState *state) const
+{
+	bool atomic = (insn.opcode() == spv::OpAtomicLoad);
+	Object::ID resultId = insn.word(2);
+	Object::ID pointerId = insn.word(3);
+	auto &result = getObject(resultId);
+	auto &resultTy = getType(result.type);
+	auto &pointer = getObject(pointerId);
+	auto &pointerTy = getType(pointer.type);
+	std::memory_order memoryOrder = std::memory_order_relaxed;
+
+	ASSERT(getType(pointer.type).element == result.type);
+	ASSERT(Type::ID(insn.word(1)) == result.type);
+	ASSERT(!atomic || getType(getType(pointer.type).element).opcode() == spv::OpTypeInt);  // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
+
+	if(pointerTy.storageClass == spv::StorageClassUniformConstant)
+	{
+		// Just propagate the pointer.
+		auto &ptr = state->getPointer(pointerId);
+		state->createPointer(resultId, ptr);
+		return EmitResult::Continue;
+	}
+
+	if(atomic)
+	{
+		Object::ID semanticsId = insn.word(5);
+		auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
+		memoryOrder = MemoryOrder(memorySemantics);
+	}
+
+	auto ptr = GetPointerToData(pointerId, 0, state);
+	bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
+	auto &dst = state->createIntermediate(resultId, resultTy.sizeInComponents);
+	auto robustness = state->getOutOfBoundsBehavior(pointerTy.storageClass);
+
+	VisitMemoryObject(pointerId, [&](uint32_t i, uint32_t offset)
+	{
+		auto p = ptr + offset;
+		if (interleavedByLane) { p = InterleaveByLane(p); }  // TODO: Interleave once, then add offset?
+		dst.move(i, p.Load<SIMD::Float>(robustness, state->activeLaneMask(), atomic, memoryOrder));
+	});
+
+	return EmitResult::Continue;
+}
+
+SpirvShader::EmitResult SpirvShader::EmitStore(InsnIterator insn, EmitState *state) const
+{
+	bool atomic = (insn.opcode() == spv::OpAtomicStore);
+	Object::ID pointerId = insn.word(1);
+	Object::ID objectId = insn.word(atomic ? 4 : 2);
+	auto &object = getObject(objectId);
+	auto &pointer = getObject(pointerId);
+	auto &pointerTy = getType(pointer.type);
+	auto &elementTy = getType(pointerTy.element);
+	std::memory_order memoryOrder = std::memory_order_relaxed;
+
+	if(atomic)
+	{
+		Object::ID semanticsId = insn.word(3);
+		auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
+		memoryOrder = MemoryOrder(memorySemantics);
+	}
+
+	ASSERT(!atomic || elementTy.opcode() == spv::OpTypeInt);  // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
+
+	auto ptr = GetPointerToData(pointerId, 0, state);
+	bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
+	auto robustness = state->getOutOfBoundsBehavior(pointerTy.storageClass);
+
+	SIMD::Int mask = state->activeLaneMask();
+	if (!StoresInHelperInvocation(pointerTy.storageClass))
+	{
+		mask = mask & state->storesAndAtomicsMask();
+	}
+
+	if (object.kind == Object::Kind::Constant)
+	{
+		// Constant source data.
+		const uint32_t *src = object.constantValue.get();
+		VisitMemoryObject(pointerId, [&](uint32_t i, uint32_t offset)
+		{
+			auto p = ptr + offset;
+			if (interleavedByLane) { p = InterleaveByLane(p); }
+			p.Store(SIMD::Int(src[i]), robustness, mask, atomic, memoryOrder);
+		});
+	}
+	else
+	{
+		// Intermediate source data.
+		auto &src = state->getIntermediate(objectId);
+		VisitMemoryObject(pointerId, [&](uint32_t i, uint32_t offset)
+		{
+			auto p = ptr + offset;
+			if (interleavedByLane) { p = InterleaveByLane(p); }
+			p.Store(src.Float(i), robustness, mask, atomic, memoryOrder);
+		});
+	}
+
+	return EmitResult::Continue;
+}
+
+SpirvShader::EmitResult SpirvShader::EmitVariable(InsnIterator insn, EmitState *state) const
+{
+	auto routine = state->routine;
+	Object::ID resultId = insn.word(2);
+	auto &object = getObject(resultId);
+	auto &objectTy = getType(object.type);
+
+	switch (objectTy.storageClass)
+	{
+	case spv::StorageClassOutput:
+	case spv::StorageClassPrivate:
+	case spv::StorageClassFunction:
+	{
+		ASSERT(objectTy.opcode() == spv::OpTypePointer);
+		auto base = &routine->getVariable(resultId)[0];
+		auto elementTy = getType(objectTy.element);
+		auto size = elementTy.sizeInComponents * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
+		state->createPointer(resultId, SIMD::Pointer(base, size));
+		break;
+	}
+	case spv::StorageClassWorkgroup:
+	{
+		ASSERT(objectTy.opcode() == spv::OpTypePointer);
+		auto base = &routine->workgroupMemory[0];
+		auto size = workgroupMemory.size();
+		state->createPointer(resultId, SIMD::Pointer(base, size, workgroupMemory.offsetOf(resultId)));
+		break;
+	}
+	case spv::StorageClassInput:
+	{
+		if (object.kind == Object::Kind::InterfaceVariable)
+		{
+			auto &dst = routine->getVariable(resultId);
+			int offset = 0;
+			VisitInterface(resultId,
+							[&](Decorations const &d, AttribType type) {
+								auto scalarSlot = d.Location << 2 | d.Component;
+								dst[offset++] = routine->inputs[scalarSlot];
+							});
+		}
+		ASSERT(objectTy.opcode() == spv::OpTypePointer);
+		auto base = &routine->getVariable(resultId)[0];
+		auto elementTy = getType(objectTy.element);
+		auto size = elementTy.sizeInComponents * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
+		state->createPointer(resultId, SIMD::Pointer(base, size));
+		break;
+	}
+	case spv::StorageClassUniformConstant:
+	{
+		const auto &d = descriptorDecorations.at(resultId);
+		ASSERT(d.DescriptorSet >= 0);
+		ASSERT(d.Binding >= 0);
+
+		uint32_t arrayIndex = 0;  // TODO(b/129523279)
+		auto setLayout = routine->pipelineLayout->getDescriptorSetLayout(d.DescriptorSet);
+		if (setLayout->hasBinding(d.Binding))
+		{
+			uint32_t bindingOffset = static_cast<uint32_t>(setLayout->getBindingOffset(d.Binding, arrayIndex));
+			Pointer<Byte> set = routine->descriptorSets[d.DescriptorSet];  // DescriptorSet*
+			Pointer<Byte> binding = Pointer<Byte>(set + bindingOffset);    // vk::SampledImageDescriptor*
+			auto size = 0; // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
+			state->createPointer(resultId, SIMD::Pointer(binding, size));
+		}
+		else
+		{
+			// TODO: Error if the variable with the non-existant binding is
+			// used? Or perhaps strip these unused variable declarations as
+			// a preprocess on the SPIR-V?
+		}
+		break;
+	}
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+	{
+		const auto &d = descriptorDecorations.at(resultId);
+		ASSERT(d.DescriptorSet >= 0);
+		auto size = 0; // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
+		// Note: the module may contain descriptor set references that are not suitable for this implementation -- using a set index higher than the number
+		// of descriptor set binding points we support. As long as the selected entrypoint doesn't actually touch the out of range binding points, this
+		// is valid. In this case make the value nullptr to make it easier to diagnose an attempt to dereference it.
+		if (d.DescriptorSet < vk::MAX_BOUND_DESCRIPTOR_SETS)
+		{
+			state->createPointer(resultId, SIMD::Pointer(routine->descriptorSets[d.DescriptorSet], size));
+		}
+		else
+		{
+			state->createPointer(resultId, SIMD::Pointer(nullptr, 0));
+		}
+		break;
+	}
+	case spv::StorageClassPushConstant:
+	{
+		state->createPointer(resultId, SIMD::Pointer(routine->pushConstants, vk::MAX_PUSH_CONSTANT_SIZE));
+		break;
+	}
+	default:
+		UNREACHABLE("Storage class %d", objectTy.storageClass);
+		break;
+	}
+
+	if (insn.wordCount() > 4)
+	{
+		Object::ID initializerId = insn.word(4);
+		if (getObject(initializerId).kind != Object::Kind::Constant)
+		{
+			UNIMPLEMENTED("Non-constant initializers not yet implemented");
+		}
+		switch (objectTy.storageClass)
+		{
+		case spv::StorageClassOutput:
+		case spv::StorageClassPrivate:
+		case spv::StorageClassFunction:
+		{
+			bool interleavedByLane = IsStorageInterleavedByLane(objectTy.storageClass);
+			auto ptr = GetPointerToData(resultId, 0, state);
+			GenericValue initialValue(this, state, initializerId);
+			VisitMemoryObject(resultId, [&](uint32_t i, uint32_t offset)
+			{
+				auto p = ptr + offset;
+				if (interleavedByLane) { p = InterleaveByLane(p); }
+				auto robustness = OutOfBoundsBehavior::UndefinedBehavior;  // Local variables are always within bounds.
+				p.Store(initialValue.Float(i), robustness, state->activeLaneMask());
+			});
+			break;
+		}
+		default:
+			ASSERT_MSG(initializerId == 0, "Vulkan does not permit variables of storage class %d to have initializers", int(objectTy.storageClass));
+		}
+	}
+
+	return EmitResult::Continue;
+}
+
+SpirvShader::EmitResult SpirvShader::EmitCopyMemory(InsnIterator insn, EmitState *state) const
+{
+	Object::ID dstPtrId = insn.word(1);
+	Object::ID srcPtrId = insn.word(2);
+	auto &dstPtrTy = getType(getObject(dstPtrId).type);
+	auto &srcPtrTy = getType(getObject(srcPtrId).type);
+	ASSERT(dstPtrTy.element == srcPtrTy.element);
+
+	bool dstInterleavedByLane = IsStorageInterleavedByLane(dstPtrTy.storageClass);
+	bool srcInterleavedByLane = IsStorageInterleavedByLane(srcPtrTy.storageClass);
+	auto dstPtr = GetPointerToData(dstPtrId, 0, state);
+	auto srcPtr = GetPointerToData(srcPtrId, 0, state);
+
+	std::unordered_map<uint32_t, uint32_t> srcOffsets;
+
+	VisitMemoryObject(srcPtrId, [&](uint32_t i, uint32_t srcOffset) { srcOffsets[i] = srcOffset; });
+
+	VisitMemoryObject(dstPtrId, [&](uint32_t i, uint32_t dstOffset)
+	{
+		auto it = srcOffsets.find(i);
+		ASSERT(it != srcOffsets.end());
+		auto srcOffset = it->second;
+
+		auto dst = dstPtr + dstOffset;
+		auto src = srcPtr + srcOffset;
+		if (dstInterleavedByLane) { dst = InterleaveByLane(dst); }
+		if (srcInterleavedByLane) { src = InterleaveByLane(src); }
+
+		// TODO(b/131224163): Optimize based on src/dst storage classes.
+		auto robustness = OutOfBoundsBehavior::RobustBufferAccess;
+
+		auto value = src.Load<SIMD::Float>(robustness, state->activeLaneMask());
+		dst.Store(value, robustness, state->activeLaneMask());
+	});
+	return EmitResult::Continue;
+}
+
+SpirvShader::EmitResult SpirvShader::EmitMemoryBarrier(InsnIterator insn, EmitState *state) const
+{
+	auto semantics = spv::MemorySemanticsMask(GetConstScalarInt(insn.word(2)));
+	// TODO: We probably want to consider the memory scope here. For now,
+	// just always emit the full fence.
+	Fence(semantics);
+	return EmitResult::Continue;
+}
+
+void SpirvShader::VisitMemoryObjectInner(sw::SpirvShader::Type::ID id, sw::SpirvShader::Decorations d, uint32_t& index, uint32_t offset, const MemoryVisitor &f) const
+{
+	// Walk a type tree in an explicitly laid out storage class, calling
+	// a functor for each scalar element within the object.
+
+	// The functor's first parameter is the index of the scalar element;
+	// the second parameter is the offset (in bytes) from the base of the
+	// object.
+
+	ApplyDecorationsForId(&d, id);
+	auto const &type = getType(id);
+
+	if (d.HasOffset)
+	{
+		offset += d.Offset;
+		d.HasOffset = false;
+	}
+
+	switch (type.opcode())
+	{
+	case spv::OpTypePointer:
+		VisitMemoryObjectInner(type.definition.word(3), d, index, offset, f);
+		break;
+	case spv::OpTypeInt:
+	case spv::OpTypeFloat:
+		f(index++, offset);
+		break;
+	case spv::OpTypeVector:
+	{
+		auto elemStride = (d.InsideMatrix && d.HasRowMajor && d.RowMajor) ? d.MatrixStride : static_cast<int32_t>(sizeof(float));
+		for (auto i = 0u; i < type.definition.word(3); i++)
+		{
+			VisitMemoryObjectInner(type.definition.word(2), d, index, offset + elemStride * i, f);
+		}
+		break;
+	}
+	case spv::OpTypeMatrix:
+	{
+		auto columnStride = (d.HasRowMajor && d.RowMajor) ? static_cast<int32_t>(sizeof(float)) : d.MatrixStride;
+		d.InsideMatrix = true;
+		for (auto i = 0u; i < type.definition.word(3); i++)
+		{
+			ASSERT(d.HasMatrixStride);
+			VisitMemoryObjectInner(type.definition.word(2), d, index, offset + columnStride * i, f);
+		}
+		break;
+	}
+	case spv::OpTypeStruct:
+		for (auto i = 0u; i < type.definition.wordCount() - 2; i++)
+		{
+			ApplyDecorationsForIdMember(&d, id, i);
+			VisitMemoryObjectInner(type.definition.word(i + 2), d, index, offset, f);
+		}
+		break;
+	case spv::OpTypeArray:
+	{
+		auto arraySize = GetConstScalarInt(type.definition.word(3));
+		for (auto i = 0u; i < arraySize; i++)
+		{
+			ASSERT(d.HasArrayStride);
+			VisitMemoryObjectInner(type.definition.word(2), d, index, offset + i * d.ArrayStride, f);
+		}
+		break;
+	}
+	default:
+		UNREACHABLE("%s", OpcodeName(type.opcode()).c_str());
+	}
+}
+
+void SpirvShader::VisitMemoryObject(sw::SpirvShader::Object::ID id, const MemoryVisitor &f) const
+{
+	auto typeId = getObject(id).type;
+	auto const & type = getType(typeId);
+	if (IsExplicitLayout(type.storageClass))
+	{
+		Decorations d{};
+		ApplyDecorationsForId(&d, id);
+		uint32_t index = 0;
+		VisitMemoryObjectInner(typeId, d, index, 0, f);
+	}
+	else
+	{
+		// Objects without explicit layout are tightly packed.
+		for (auto i = 0u; i < getType(type.element).sizeInComponents; i++)
+		{
+			f(i, i * sizeof(float));
+		}
+	}
+}
+
+SIMD::Pointer SpirvShader::GetPointerToData(Object::ID id, int arrayIndex, EmitState const *state) const
+{
+	auto routine = state->routine;
+	auto &object = getObject(id);
+	switch (object.kind)
+	{
+		case Object::Kind::Pointer:
+		case Object::Kind::InterfaceVariable:
+			return state->getPointer(id);
+
+		case Object::Kind::DescriptorSet:
+		{
+			const auto &d = descriptorDecorations.at(id);
+			ASSERT(d.DescriptorSet >= 0 && d.DescriptorSet < vk::MAX_BOUND_DESCRIPTOR_SETS);
+			ASSERT(d.Binding >= 0);
+
+			auto set = state->getPointer(id);
+
+			auto setLayout = routine->pipelineLayout->getDescriptorSetLayout(d.DescriptorSet);
+			ASSERT_MSG(setLayout->hasBinding(d.Binding), "Descriptor set %d does not contain binding %d", int(d.DescriptorSet), int(d.Binding));
+			int bindingOffset = static_cast<int>(setLayout->getBindingOffset(d.Binding, arrayIndex));
+
+			Pointer<Byte> descriptor = set.base + bindingOffset; // BufferDescriptor*
+			Pointer<Byte> data = *Pointer<Pointer<Byte>>(descriptor + OFFSET(vk::BufferDescriptor, ptr)); // void*
+			Int size = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, sizeInBytes));
+			if (setLayout->isBindingDynamic(d.Binding))
+			{
+				uint32_t dynamicBindingIndex =
+					routine->pipelineLayout->getDynamicOffsetBase(d.DescriptorSet) +
+					setLayout->getDynamicDescriptorOffset(d.Binding) +
+					arrayIndex;
+				Int offset = routine->descriptorDynamicOffsets[dynamicBindingIndex];
+				Int robustnessSize = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, robustnessSize));
+				return SIMD::Pointer(data + offset, Min(size, robustnessSize - offset));
+			}
+			else
+			{
+				return SIMD::Pointer(data, size);
+			}
+		}
+
+		default:
+			UNREACHABLE("Invalid pointer kind %d", int(object.kind));
+			return SIMD::Pointer(Pointer<Byte>(), 0);
+	}
+}
+
+std::memory_order SpirvShader::MemoryOrder(spv::MemorySemanticsMask memorySemantics)
+{
+	auto control = static_cast<uint32_t>(memorySemantics) & static_cast<uint32_t>(
+		spv::MemorySemanticsAcquireMask |
+		spv::MemorySemanticsReleaseMask |
+		spv::MemorySemanticsAcquireReleaseMask |
+		spv::MemorySemanticsSequentiallyConsistentMask
+	);
+	switch (control)
+	{
+	case spv::MemorySemanticsMaskNone:                   return std::memory_order_relaxed;
+	case spv::MemorySemanticsAcquireMask:                return std::memory_order_acquire;
+	case spv::MemorySemanticsReleaseMask:                return std::memory_order_release;
+	case spv::MemorySemanticsAcquireReleaseMask:         return std::memory_order_acq_rel;
+	case spv::MemorySemanticsSequentiallyConsistentMask: return std::memory_order_acq_rel;  // Vulkan 1.1: "SequentiallyConsistent is treated as AcquireRelease"
+	default:
+		// "it is invalid for more than one of these four bits to be set:
+		// Acquire, Release, AcquireRelease, or SequentiallyConsistent."
+		UNREACHABLE("MemorySemanticsMask: %x", int(control));
+		return std::memory_order_acq_rel;
+	}
+}
+
+bool SpirvShader::StoresInHelperInvocation(spv::StorageClass storageClass)
+{
+	switch (storageClass)
+	{
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+	case spv::StorageClassImage:
+		return false;
+	default:
+		return true;
+	}
+}
+
+bool SpirvShader::IsExplicitLayout(spv::StorageClass storageClass)
+{
+	switch (storageClass)
+	{
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+	case spv::StorageClassPushConstant:
+		return true;
+	default:
+		return false;
+	}
+}
+
+sw::SIMD::Pointer SpirvShader::InterleaveByLane(sw::SIMD::Pointer p)
+{
+	p *= sw::SIMD::Width;
+	p.staticOffsets[0] += 0 * sizeof(float);
+	p.staticOffsets[1] += 1 * sizeof(float);
+	p.staticOffsets[2] += 2 * sizeof(float);
+	p.staticOffsets[3] += 3 * sizeof(float);
+	return p;
+}
+
+bool SpirvShader::IsStorageInterleavedByLane(spv::StorageClass storageClass)
+{
+	switch (storageClass)
+	{
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+	case spv::StorageClassPushConstant:
+	case spv::StorageClassWorkgroup:
+	case spv::StorageClassImage:
+		return false;
+	default:
+		return true;
+	}
+}
+
+}  // namespace sw
\ No newline at end of file
diff --git a/src/Vulkan/vulkan.vcxproj b/src/Vulkan/vulkan.vcxproj
index e77b208..8f0c931 100644
--- a/src/Vulkan/vulkan.vcxproj
+++ b/src/Vulkan/vulkan.vcxproj
@@ -167,6 +167,7 @@
     <ClCompile Include="..\Pipeline\SpirvShader.cpp" />

     <ClCompile Include="..\Pipeline\SpirvShaderControlFlow.cpp" />

     <ClCompile Include="..\Pipeline\SpirvShaderGLSLstd450.cpp" />

+    <ClCompile Include="..\Pipeline\SpirvShaderMemory.cpp" />

     <ClCompile Include="..\Pipeline\SpirvShaderSampling.cpp" />

     <ClCompile Include="..\Pipeline\SpirvShader_dbg.cpp" />

     <ClCompile Include="..\Pipeline\VertexProgram.cpp" />

diff --git a/src/Vulkan/vulkan.vcxproj.filters b/src/Vulkan/vulkan.vcxproj.filters
index e8e3c09..514b33a 100644
--- a/src/Vulkan/vulkan.vcxproj.filters
+++ b/src/Vulkan/vulkan.vcxproj.filters
@@ -252,6 +252,9 @@
     <ClCompile Include="..\Pipeline\SpirvShaderGLSLstd450.cpp">

       <Filter>Source Files\Pipeline</Filter>

     </ClCompile>

+    <ClCompile Include="..\Pipeline\SpirvShaderMemory.cpp">

+      <Filter>Source Files\Pipeline</Filter>

+    </ClCompile>

     <ClCompile Include="..\Pipeline\SpirvShaderSampling.cpp">

       <Filter>Source Files\Pipeline</Filter>

     </ClCompile>