Use spirv-tools optimizer in GraphicsPipeline

Run some optimization passes before lowering the code, to allow for a
simpler compiler.

Current passes:
- Exhaustively inline functions
- Apply and freeze specialization constants

I expect this set of passes will change somewhat as the
compiler evolves. We may want to stop inlining at some point.

Bug: b/124058197
Bug: b/124056625

Change-Id: Ib2a3dc2d2599449d45ac7ebb1de107deba8eb2c3
Reviewed-on: https://swiftshader-review.googlesource.com/c/24288
Tested-by: Chris Forbes <chrisforbes@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
diff --git a/src/Vulkan/VkPipeline.cpp b/src/Vulkan/VkPipeline.cpp
index 103720c..6fd96df 100644
--- a/src/Vulkan/VkPipeline.cpp
+++ b/src/Vulkan/VkPipeline.cpp
@@ -13,6 +13,7 @@
 // limitations under the License.
 
 #include <Pipeline/SpirvShader.hpp>
+#include <spirv-tools/optimizer.hpp>
 #include "VkPipeline.hpp"
 #include "VkShaderModule.hpp"
 
@@ -382,28 +383,50 @@
 
 void GraphicsPipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkGraphicsPipelineCreateInfo* pCreateInfo)
 {
-	for (auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++) {
+	for (auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++)
+	{
 		auto module = Cast(pStage->module);
 
-		// TODO: apply prep passes using SPIRV-Opt here.
-		// - Apply and freeze specializations, etc.
 		auto code = module->getCode();
+		spvtools::Optimizer opt{SPV_ENV_VULKAN_1_1};
+		opt.RegisterPass(spvtools::CreateInlineExhaustivePass());
 
-		// TODO: pass in additional information here:
-		// - any NOS from pCreateInfo which we'll actually need
-		auto spirvShader = new sw::SpirvShader{code};
+		// If the pipeline uses specialization, apply the specializations before freezing
+		if (pStage->pSpecializationInfo)
+		{
+			std::unordered_map<uint32_t, std::vector<uint32_t>> specializations;
+			for (auto i = 0u; i < pStage->pSpecializationInfo->mapEntryCount; ++i)
+			{
+				auto const &e = pStage->pSpecializationInfo->pMapEntries[i];
+				auto value_ptr =
+						static_cast<uint32_t const *>(pStage->pSpecializationInfo->pData) + e.offset / sizeof(uint32_t);
+				specializations.emplace(e.constantID,
+										std::vector<uint32_t>{value_ptr, value_ptr + e.size / sizeof(uint32_t)});
+			}
+			opt.RegisterPass(spvtools::CreateSetSpecConstantDefaultValuePass(specializations));
+		}
+		// Freeze specialization constants into normal constants, and propagate through
+		opt.RegisterPass(spvtools::CreateFreezeSpecConstantValuePass());
+		opt.RegisterPass(spvtools::CreateFoldSpecConstantOpAndCompositePass());
 
-		switch (pStage->stage) {
-			case VK_SHADER_STAGE_VERTEX_BIT:
-				context.vertexShader = vertexShader = spirvShader;
-				break;
+		std::vector<uint32_t> postOptCode;
+		opt.Run(code.data(), code.size(), &postOptCode);
 
-			case VK_SHADER_STAGE_FRAGMENT_BIT:
-				context.pixelShader = fragmentShader = spirvShader;
-				break;
+		// TODO: also pass in any pipeline state which will affect shader compilation
+		auto spirvShader = new sw::SpirvShader{postOptCode};
 
-			default:
-				UNIMPLEMENTED("Unsupported stage");
+		switch (pStage->stage)
+		{
+		case VK_SHADER_STAGE_VERTEX_BIT:
+			context.vertexShader = vertexShader = spirvShader;
+			break;
+
+		case VK_SHADER_STAGE_FRAGMENT_BIT:
+			context.pixelShader = fragmentShader = spirvShader;
+			break;
+
+		default:
+			UNIMPLEMENTED("Unsupported stage");
 		}
 	}
 }