Add optimization state to the optimized SPIR-V binary cache key

The SpirvShaderKey should uniquely identify each of the pipeline cache's
optimized SPIR-V binary entries. The Boolean which indicates whether
the binary is even optimized in the first place wasn't yet part of it.

Bug: b/197982536
Change-Id: I3c4c8f41fedc8baffe0822853a894bc1c65a583c
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/57148
Tested-by: Nicolas Capens <nicolascapens@google.com>
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/Vulkan/VkPipeline.cpp b/src/Vulkan/VkPipeline.cpp
index 0fb6f9d..ef2da43 100644
--- a/src/Vulkan/VkPipeline.cpp
+++ b/src/Vulkan/VkPipeline.cpp
@@ -105,16 +105,9 @@
 	return optimized;
 }
 
-sw::SpirvBinary optimizeSpirv(
-    const vk::PipelineCache::SpirvShaderKey &key,
-    const std::shared_ptr<vk::dbg::Context> &dbgctx)
+sw::SpirvBinary optimizeSpirv(const vk::PipelineCache::SpirvShaderKey &key)
 {
-	// Do not optimize the shader if we have a debugger context.
-	// Optimization passes are likely to damage debug information, and reorder
-	// instructions.
-	const bool optimize = !dbgctx;
-
-	auto code = preprocessSpirv(key.getInsns(), key.getSpecializationInfo(), optimize);
+	auto code = preprocessSpirv(key.getInsns(), key.getSpecializationInfo(), key.getOptimization());
 	ASSERT(code.size() > 0);
 
 	return code;
@@ -241,24 +234,29 @@
 			UNSUPPORTED("pStage->flags %d", int(pStage->flags));
 		}
 
+		auto dbgctx = device->getDebuggerContext();
+		// Do not optimize the shader if we have a debugger context.
+		// Optimization passes are likely to damage debug information, and reorder
+		// instructions.
+		const bool optimize = !dbgctx;
+
 		const ShaderModule *module = vk::Cast(pStage->module);
 		const PipelineCache::SpirvShaderKey key(pStage->stage, pStage->pName, module->getCode(),
 		                                        vk::Cast(pCreateInfo->renderPass), pCreateInfo->subpass,
-		                                        pStage->pSpecializationInfo);
+		                                        pStage->pSpecializationInfo, optimize);
 		auto pipelineStage = key.getPipelineStage();
-		auto dbgctx = device->getDebuggerContext();
 
 		sw::SpirvBinary spirv;
 
 		if(pPipelineCache)
 		{
 			spirv = pPipelineCache->getOrOptimizeSpirv(key, [&] {
-				return optimizeSpirv(key, dbgctx);
+				return optimizeSpirv(key);
 			});
 		}
 		else
 		{
-			spirv = optimizeSpirv(key, dbgctx);
+			spirv = optimizeSpirv(key);
 		}
 
 		auto shader = createShader(key, module, spirv, robustBufferAccess, dbgctx);
@@ -291,21 +289,26 @@
 	ASSERT(shader.get() == nullptr);
 	ASSERT(program.get() == nullptr);
 
-	const PipelineCache::SpirvShaderKey shaderKey(
-	    stage.stage, stage.pName, module->getCode(), nullptr, 0, stage.pSpecializationInfo);
 	auto dbgctx = device->getDebuggerContext();
+	// Do not optimize the shader if we have a debugger context.
+	// Optimization passes are likely to damage debug information, and reorder
+	// instructions.
+	const bool optimize = !dbgctx;
+
+	const PipelineCache::SpirvShaderKey shaderKey(
+	    stage.stage, stage.pName, module->getCode(), nullptr, 0, stage.pSpecializationInfo, optimize);
 
 	sw::SpirvBinary spirv;
 
 	if(pPipelineCache)
 	{
 		spirv = pPipelineCache->getOrOptimizeSpirv(shaderKey, [&] {
-			return optimizeSpirv(shaderKey, dbgctx);
+			return optimizeSpirv(shaderKey);
 		});
 	}
 	else
 	{
-		spirv = optimizeSpirv(shaderKey, dbgctx);
+		spirv = optimizeSpirv(shaderKey);
 	}
 
 	shader = createShader(shaderKey, module, spirv, robustBufferAccess, dbgctx);
diff --git a/src/Vulkan/VkPipelineCache.cpp b/src/Vulkan/VkPipelineCache.cpp
index dc4afe3..87b4c89 100644
--- a/src/Vulkan/VkPipelineCache.cpp
+++ b/src/Vulkan/VkPipelineCache.cpp
@@ -23,13 +23,15 @@
                                               const sw::SpirvBinary &insns,
                                               const vk::RenderPass *renderPass,
                                               const uint32_t subpassIndex,
-                                              const vk::SpecializationInfo &specializationInfo)
+                                              const vk::SpecializationInfo &specializationInfo,
+                                              bool optimize)
     : pipelineStage(pipelineStage)
     , entryPointName(entryPointName)
     , insns(insns)
     , renderPass(renderPass)
     , subpassIndex(subpassIndex)
     , specializationInfo(specializationInfo)
+    , optimize(optimize)
 {
 }
 
@@ -72,6 +74,11 @@
 		return cmp < 0;
 	}
 
+	if(optimize != other.optimize)
+	{
+		return !optimize && other.optimize;
+	}
+
 	return (specializationInfo < other.specializationInfo);
 }
 
diff --git a/src/Vulkan/VkPipelineCache.hpp b/src/Vulkan/VkPipelineCache.hpp
index c959e0d..9e950c6 100644
--- a/src/Vulkan/VkPipelineCache.hpp
+++ b/src/Vulkan/VkPipelineCache.hpp
@@ -60,7 +60,8 @@
 		               const sw::SpirvBinary &insns,
 		               const vk::RenderPass *renderPass,
 		               const uint32_t subpassIndex,
-		               const vk::SpecializationInfo &specializationInfo);
+		               const vk::SpecializationInfo &specializationInfo,
+		               bool optimize);
 
 		bool operator<(const SpirvShaderKey &other) const;
 
@@ -70,6 +71,7 @@
 		const vk::RenderPass *getRenderPass() const { return renderPass; }
 		uint32_t getSubpassIndex() const { return subpassIndex; }
 		const VkSpecializationInfo *getSpecializationInfo() const { return specializationInfo.get(); }
+		bool getOptimization() const { return optimize; }
 
 	private:
 		const VkShaderStageFlagBits pipelineStage;
@@ -78,6 +80,7 @@
 		const vk::RenderPass *renderPass;
 		const uint32_t subpassIndex;
 		const vk::SpecializationInfo specializationInfo;
+		const bool optimize;
 	};
 
 	// getOrOptimizeSpirv() queries the cache for a shader with the given key.