blob: 1013f2a16f9c49731241585715e4f98ce821a455 [file] [log] [blame]
Alexis Hetu1424ef62019-04-05 18:03:53 -04001// Copyright 2019 The SwiftShader Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#include "VkPipelineCache.hpp"
16#include <cstring>
17
18namespace vk
19{
20
Alexis Hetu52edb172019-06-26 10:17:18 -040021PipelineCache::SpirvShaderKey::SpecializationInfo::SpecializationInfo(const VkSpecializationInfo* specializationInfo)
22{
23 if(specializationInfo)
24 {
25 info = reinterpret_cast<VkSpecializationInfo*>(
26 allocate(sizeof(VkSpecializationInfo), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
27
28 info->mapEntryCount = specializationInfo->mapEntryCount;
29 if(specializationInfo->mapEntryCount > 0)
30 {
31 size_t entriesSize = specializationInfo->mapEntryCount * sizeof(VkSpecializationMapEntry);
32 VkSpecializationMapEntry* mapEntries = reinterpret_cast<VkSpecializationMapEntry*>(
33 allocate(entriesSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
34 memcpy(mapEntries, specializationInfo->pMapEntries, entriesSize);
35 info->pMapEntries = mapEntries;
36 }
37
38 info->dataSize = specializationInfo->dataSize;
39 if(specializationInfo->dataSize > 0)
40 {
41 void* data = allocate(specializationInfo->dataSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY);
42 memcpy(data, specializationInfo->pData, specializationInfo->dataSize);
43 info->pData = data;
44 }
45 }
46}
47
48PipelineCache::SpirvShaderKey::SpecializationInfo::~SpecializationInfo()
49{
50 if(info)
51 {
52 deallocate(const_cast<VkSpecializationMapEntry*>(info->pMapEntries), DEVICE_MEMORY);
53 deallocate(const_cast<void*>(info->pData), DEVICE_MEMORY);
54 deallocate(info, DEVICE_MEMORY);
55 }
56}
57
58bool PipelineCache::SpirvShaderKey::SpecializationInfo::operator<(const SpecializationInfo& specializationInfo) const
59{
60 if(info && specializationInfo.info)
61 {
62 if(info->mapEntryCount != specializationInfo.info->mapEntryCount)
63 {
64 return info->mapEntryCount < specializationInfo.info->mapEntryCount;
65 }
66
67 if(info->dataSize != specializationInfo.info->dataSize)
68 {
69 return info->dataSize < specializationInfo.info->dataSize;
70 }
71
72 if(info->mapEntryCount > 0)
73 {
74 int cmp = memcmp(info->pMapEntries, specializationInfo.info->pMapEntries, info->mapEntryCount * sizeof(VkSpecializationMapEntry));
75 if(cmp != 0)
76 {
77 return cmp < 0;
78 }
79 }
80
81 if(info->dataSize > 0)
82 {
83 int cmp = memcmp(info->pData, specializationInfo.info->pData, info->dataSize);
84 if(cmp != 0)
85 {
86 return cmp < 0;
87 }
88 }
89 }
90
91 return (info < specializationInfo.info);
92}
93
94PipelineCache::SpirvShaderKey::SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,
95 const std::string& entryPointName,
96 const std::vector<uint32_t>& insns,
97 const vk::RenderPass *renderPass,
98 const uint32_t subpassIndex,
99 const VkSpecializationInfo* specializationInfo) :
100 pipelineStage(pipelineStage),
101 entryPointName(entryPointName),
102 insns(insns),
103 renderPass(renderPass),
104 subpassIndex(subpassIndex),
105 specializationInfo(specializationInfo)
106{
107}
108
109bool PipelineCache::SpirvShaderKey::operator<(const SpirvShaderKey &other) const
110{
111 if(pipelineStage != other.pipelineStage)
112 {
113 return pipelineStage < other.pipelineStage;
114 }
115
116 if(renderPass != other.renderPass)
117 {
118 return renderPass < other.renderPass;
119 }
120
121 if(subpassIndex != other.subpassIndex)
122 {
123 return subpassIndex < other.subpassIndex;
124 }
125
126 if(insns.size() != other.insns.size())
127 {
128 return insns.size() < other.insns.size();
129 }
130
131 if(entryPointName.size() != other.entryPointName.size())
132 {
133 return entryPointName.size() < other.entryPointName.size();
134 }
135
136 int cmp = memcmp(entryPointName.c_str(), other.entryPointName.c_str(), entryPointName.size());
137 if(cmp != 0)
138 {
139 return cmp < 0;
140 }
141
142 cmp = memcmp(insns.data(), other.insns.data(), insns.size() * sizeof(uint32_t));
143 if(cmp != 0)
144 {
145 return cmp < 0;
146 }
147
148 return (specializationInfo < other.specializationInfo);
149}
150
Alexis Hetu1424ef62019-04-05 18:03:53 -0400151PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo* pCreateInfo, void* mem) :
152 dataSize(ComputeRequiredAllocationSize(pCreateInfo)), data(reinterpret_cast<uint8_t*>(mem))
153{
154 CacheHeader* header = reinterpret_cast<CacheHeader*>(mem);
155 header->headerLength = sizeof(CacheHeader);
156 header->headerVersion = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
157 header->vendorID = VENDOR_ID;
158 header->deviceID = DEVICE_ID;
159 memcpy(header->pipelineCacheUUID, SWIFTSHADER_UUID, VK_UUID_SIZE);
160
161 if(pCreateInfo->pInitialData && (pCreateInfo->initialDataSize > 0))
162 {
163 memcpy(data + sizeof(CacheHeader), pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
164 }
165}
166
Alexis Hetu52edb172019-06-26 10:17:18 -0400167PipelineCache::~PipelineCache()
168{
169 spirvShaders.clear();
170 computePrograms.clear();
171}
172
Alexis Hetu1424ef62019-04-05 18:03:53 -0400173void PipelineCache::destroy(const VkAllocationCallbacks* pAllocator)
174{
175 vk::deallocate(data, pAllocator);
176}
177
178size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo* pCreateInfo)
179{
180 return pCreateInfo->initialDataSize + sizeof(CacheHeader);
181}
182
183VkResult PipelineCache::getData(size_t* pDataSize, void* pData)
184{
185 if(!pData)
186 {
187 *pDataSize = dataSize;
188 return VK_SUCCESS;
189 }
190
191 if(*pDataSize != dataSize)
192 {
193 *pDataSize = 0;
194 return VK_INCOMPLETE;
195 }
196
197 if(*pDataSize > 0)
198 {
199 memcpy(pData, data, *pDataSize);
200 }
201
202 return VK_SUCCESS;
203}
204
205VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
206{
207 for(uint32_t i = 0; i < srcCacheCount; i++)
208 {
Alexis Hetu52edb172019-06-26 10:17:18 -0400209 PipelineCache* srcCache = Cast(pSrcCaches[i]);
210
211 {
212 std::unique_lock<std::mutex> lock(spirvShadersMutex);
213 spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end());
214 }
215
216 {
217 std::unique_lock<std::mutex> lock(computeProgramsMutex);
218 computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end());
219 }
Alexis Hetu1424ef62019-04-05 18:03:53 -0400220 }
221
222 return VK_SUCCESS;
223}
224
Alexis Hetu52edb172019-06-26 10:17:18 -0400225const std::shared_ptr<sw::SpirvShader>* PipelineCache::operator[](const PipelineCache::SpirvShaderKey& key) const
226{
227 auto it = spirvShaders.find(key);
228 return (it != spirvShaders.end()) ? &(it->second) : nullptr;
229}
230
231void PipelineCache::insert(const PipelineCache::SpirvShaderKey& key, const std::shared_ptr<sw::SpirvShader> &shader)
232{
233 spirvShaders[key] = shader;
234}
235
236const std::shared_ptr<sw::ComputeProgram>* PipelineCache::operator[](const PipelineCache::ComputeProgramKey& key) const
237{
238 auto it = computePrograms.find(key);
239 return (it != computePrograms.end()) ? &(it->second) : nullptr;
240}
241
242void PipelineCache::insert(const PipelineCache::ComputeProgramKey& key, const std::shared_ptr<sw::ComputeProgram> &computeProgram)
243{
244 computePrograms[key] = computeProgram;
245}
246
Alexis Hetu1424ef62019-04-05 18:03:53 -0400247} // namespace vk