Initial stub implementation of SpirvShader
This will eventually replace Shader and related subclasses. The
interesting bit here is the instruction iterator, which allows fairly
safe access to the instruction stream without needing the rest of the
code to care too much about the physical layout.
Bug: b/120799499
Change-Id: Id0d94c4b807ddb1e4325de147ca1f651171779b7
Reviewed-on: https://swiftshader-review.googlesource.com/c/23049
Reviewed-by: Corentin Wallez <cwallez@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
Tested-by: Chris Forbes <chrisforbes@google.com>
diff --git a/src/Vulkan/VkPipeline.cpp b/src/Vulkan/VkPipeline.cpp
index ec472f7..917f5a9 100644
--- a/src/Vulkan/VkPipeline.cpp
+++ b/src/Vulkan/VkPipeline.cpp
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <Pipeline/SpirvShader.hpp>
#include "VkPipeline.hpp"
#include "VkShaderModule.hpp"
@@ -370,6 +371,8 @@
void GraphicsPipeline::destroyPipeline(const VkAllocationCallbacks* pAllocator)
{
+ delete vertexShader;
+ delete fragmentShader;
}
size_t GraphicsPipeline::ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo* pCreateInfo)
@@ -379,8 +382,30 @@
void GraphicsPipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkGraphicsPipelineCreateInfo* pCreateInfo)
{
- vertexRoutine = Cast(pCreateInfo->pStages[0].module)->compile(pAllocator);
- fragmentRoutine = Cast(pCreateInfo->pStages[1].module)->compile(pAllocator);
+ for (auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++) {
+ auto module = Cast(pStage->module);
+
+ // TODO: apply prep passes using SPIRV-Opt here.
+ // - Apply and freeze specializations, etc.
+ auto code = module->getCode();
+
+ // TODO: pass in additional information here:
+ // - any NOS from pCreateInfo which we'll actually need
+ auto spirvShader = new sw::SpirvShader{code};
+
+ switch (pStage->stage) {
+ case VK_SHADER_STAGE_VERTEX_BIT:
+ vertexShader = spirvShader;
+ break;
+
+ case VK_SHADER_STAGE_FRAGMENT_BIT:
+ fragmentShader = spirvShader;
+ break;
+
+ default:
+ UNIMPLEMENTED("Unsupported stage");
+ }
+ }
}
uint32_t GraphicsPipeline::computePrimitiveCount(uint32_t vertexCount) const
diff --git a/src/Vulkan/VkPipeline.hpp b/src/Vulkan/VkPipeline.hpp
index a38a105..0c7fef2 100644
--- a/src/Vulkan/VkPipeline.hpp
+++ b/src/Vulkan/VkPipeline.hpp
@@ -18,6 +18,8 @@
#include "VkObject.hpp"
#include "Device/Renderer.hpp"
+namespace sw { class SpirvShader; }
+
namespace vk
{
@@ -65,8 +67,12 @@
const sw::Color<float>& getBlendConstants() const;
private:
+ sw::SpirvShader *vertexShader = nullptr;
+ sw::SpirvShader *fragmentShader = nullptr;
+
rr::Routine* vertexRoutine;
rr::Routine* fragmentRoutine;
+
sw::Context context;
sw::Rect scissor;
VkViewport viewport;
diff --git a/src/Vulkan/VkShaderModule.cpp b/src/Vulkan/VkShaderModule.cpp
index 4c824a3..b48c909 100644
--- a/src/Vulkan/VkShaderModule.cpp
+++ b/src/Vulkan/VkShaderModule.cpp
@@ -22,6 +22,7 @@
ShaderModule::ShaderModule(const VkShaderModuleCreateInfo* pCreateInfo, void* mem) : code(reinterpret_cast<uint32_t*>(mem))
{
memcpy(code, pCreateInfo->pCode, pCreateInfo->codeSize);
+ wordCount = static_cast<uint32_t>(pCreateInfo->codeSize / sizeof(uint32_t));
}
void ShaderModule::destroy(const VkAllocationCallbacks* pAllocator)
@@ -34,10 +35,4 @@
return pCreateInfo->codeSize;
}
-rr::Routine* ShaderModule::compile(const VkAllocationCallbacks* pAllocator)
-{
- // FIXME: Compile the code here
- return nullptr;
-}
-
} // namespace vk
diff --git a/src/Vulkan/VkShaderModule.hpp b/src/Vulkan/VkShaderModule.hpp
index f7a4de7..7287d69 100644
--- a/src/Vulkan/VkShaderModule.hpp
+++ b/src/Vulkan/VkShaderModule.hpp
@@ -16,6 +16,7 @@
#define VK_SHADER_MODULE_HPP_
#include "VkObject.hpp"
+#include <vector>
namespace rr
{
@@ -32,12 +33,14 @@
~ShaderModule() = delete;
void destroy(const VkAllocationCallbacks* pAllocator);
- rr::Routine* compile(const VkAllocationCallbacks* pAllocator);
-
static size_t ComputeRequiredAllocationSize(const VkShaderModuleCreateInfo* pCreateInfo);
+ // TODO: reconsider boundary of ShaderModule class; try to avoid 'expose the
+ // guts' operations, and this copy.
+ std::vector<uint32_t> getCode() const { return std::vector<uint32_t>{ code, code + wordCount };}
private:
uint32_t* code = nullptr;
+ uint32_t wordCount = 0;
};
static inline ShaderModule* Cast(VkShaderModule object)
diff --git a/src/Vulkan/vulkan.vcxproj b/src/Vulkan/vulkan.vcxproj
index 410fadf..32abd27 100644
--- a/src/Vulkan/vulkan.vcxproj
+++ b/src/Vulkan/vulkan.vcxproj
@@ -145,6 +145,7 @@
<ClCompile Include="..\Pipeline\SetupRoutine.cpp" />
<ClCompile Include="..\Pipeline\Shader.cpp" />
<ClCompile Include="..\Pipeline\ShaderCore.cpp" />
+ <ClCompile Include="..\Pipeline\SpirvShader.cpp" />
<ClCompile Include="..\Pipeline\VertexProgram.cpp" />
<ClCompile Include="..\Pipeline\VertexRoutine.cpp" />
<ClCompile Include="..\Pipeline\VertexShader.cpp" />
@@ -251,6 +252,7 @@
<ClInclude Include="..\Pipeline\SetupRoutine.hpp" />
<ClInclude Include="..\Pipeline\Shader.hpp" />
<ClInclude Include="..\Pipeline\ShaderCore.hpp" />
+ <ClInclude Include="..\Pipeline\SpirvShader.hpp" />
<ClInclude Include="..\Pipeline\VertexPipeline.hpp" />
<ClInclude Include="..\Pipeline\VertexProgram.hpp" />
<ClInclude Include="..\Pipeline\VertexRoutine.hpp" />
@@ -319,4 +321,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
-</Project>
\ No newline at end of file
+</Project>