SpirvShader: Fix SIMD::Pointer::hasSequentialOffsets()

This wasn't considering that the offsets are bytes and not whole words.
Another bug that wasn't caught by dEQP (this would only affect atomic load / stores).

Bug: b/131224163
Change-Id: I6356da3ecb86cdbb7a25667fe617f650019950e2
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/33168
Reviewed-by: Chris Forbes <chrisforbes@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
Tested-by: Ben Clayton <bclayton@google.com>
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index 9064006..12aa56f 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -306,7 +306,7 @@
 					auto offset = Extract(offsets, 0);
 					out = T(rr::Load(rr::Pointer<EL>(&ptr.base[offset]), sizeof(float), atomic, order));
 				}
-				Else If(ptr.hasSequentialOffsets() && !anyLanesDisabled)
+				Else If(ptr.hasSequentialOffsets(sizeof(float)) && !anyLanesDisabled)
 				{
 					// Load all elements in a single SIMD instruction.
 					auto offset = Extract(offsets, 0);
@@ -343,7 +343,7 @@
 			else
 			{
 				auto anyLanesDisabled = AnyFalse(mask);
-				If(ptr.hasSequentialOffsets() && !anyLanesDisabled)
+				If(ptr.hasSequentialOffsets(sizeof(float)) && !anyLanesDisabled)
 				{
 					// Store all elements in a single SIMD instruction.
 					auto offset = Extract(offsets, 0);
diff --git a/src/Pipeline/SpirvShader.hpp b/src/Pipeline/SpirvShader.hpp
index 2480a34..658c77c 100644
--- a/src/Pipeline/SpirvShader.hpp
+++ b/src/Pipeline/SpirvShader.hpp
@@ -166,23 +166,32 @@
 				return dynamicLimit + staticLimit;
 			}
 
-			// Returns true if all offsets are sequential (N+0, N+1, N+2, N+3)
-			inline rr::Bool hasSequentialOffsets() const
+			// Returns true if all offsets are sequential
+			// (N+0*step, N+1*step, N+2*step, N+3*step)
+			inline rr::Bool hasSequentialOffsets(unsigned int step) const
 			{
 				if (hasDynamicOffsets)
 				{
 					auto o = offsets();
 					static_assert(SIMD::Width == 4, "Expects SIMD::Width to be 4");
-					return rr::SignMask(~CmpEQ(o.yzww, o + SIMD::Int(1, 2, 3, 0))) == 0;
+					return rr::SignMask(~CmpEQ(o.yzww, o + SIMD::Int(1*step, 2*step, 3*step, 0))) == 0;
 				}
-				else
+				return hasStaticSequentialOffsets(step);
+			}
+
+			// Returns true if all offsets are are compile-time static and
+			// sequential (N+0*step, N+1*step, N+2*step, N+3*step)
+			inline bool hasStaticSequentialOffsets(unsigned int step) const
+			{
+				if (hasDynamicOffsets)
 				{
-					for (int i = 1; i < SIMD::Width; i++)
-					{
-						if (staticOffsets[i-1] + 1 != staticOffsets[i]) { return false; }
-					}
-					return true;
+					return false;
 				}
+				for (int i = 1; i < SIMD::Width; i++)
+				{
+					if (staticOffsets[i-1] + int32_t(step) != staticOffsets[i]) { return false; }
+				}
+				return true;
 			}
 
 			// Returns true if all offsets are equal (N, N, N, N)