blob: 4ef1ee55d3965a1ea292d26f9538bdab81445be9 [file] [log] [blame]
Alexis Hetu767b41b2018-09-26 11:25:46 -04001// Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
Nicolas Capensd689d1c2018-11-19 16:02:36 -050015#include "VkDevice.hpp"
16
Alexis Hetu767b41b2018-09-26 11:25:46 -040017#include "VkConfig.h"
18#include "VkDebug.hpp"
Alexis Hetu048974f2019-02-15 15:28:37 -050019#include "VkDescriptorSetLayout.hpp"
Alexis Hetue1f51b92019-04-23 15:34:34 -040020#include "VkFence.hpp"
Alexis Hetu9e4d0402018-10-16 15:44:12 -040021#include "VkQueue.hpp"
Alexis Hetu0da99f52019-02-27 12:54:52 -050022#include "Device/Blitter.hpp"
Nicolas Capensd689d1c2018-11-19 16:02:36 -050023
Alexis Hetue1f51b92019-04-23 15:34:34 -040024#include <chrono>
25#include <climits>
Alexis Hetu767b41b2018-09-26 11:25:46 -040026#include <new> // Must #include this to use "placement new"
27
Alexis Hetue1f51b92019-04-23 15:34:34 -040028namespace
29{
30 std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds> now()
31 {
32 return std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now());
33 }
34}
35
Alexis Hetu767b41b2018-09-26 11:25:46 -040036namespace vk
37{
38
Ben Clayton6897e9b2019-07-16 17:27:27 +010039std::shared_ptr<rr::Routine> Device::SamplingRoutineCache::query(const vk::Device::SamplingRoutineCache::Key& key) const
Alexis Hetu6448bd62019-06-11 15:58:59 -040040{
Ben Claytonf0464022019-08-08 18:33:41 +010041 return cache.query(key);
Alexis Hetu6448bd62019-06-11 15:58:59 -040042}
43
Ben Clayton6897e9b2019-07-16 17:27:27 +010044void Device::SamplingRoutineCache::add(const vk::Device::SamplingRoutineCache::Key& key, const std::shared_ptr<rr::Routine>& routine)
Alexis Hetu6448bd62019-06-11 15:58:59 -040045{
46 ASSERT(routine);
Ben Claytonf0464022019-08-08 18:33:41 +010047 cache.add(key, routine);
Alexis Hetu6448bd62019-06-11 15:58:59 -040048}
49
Ben Clayton03c2aea2019-07-29 19:04:53 +010050rr::Routine* Device::SamplingRoutineCache::queryConst(const vk::Device::SamplingRoutineCache::Key& key) const
Alexis Hetu35755502019-07-22 13:51:49 -040051{
Ben Claytonf0464022019-08-08 18:33:41 +010052 return cache.queryConstCache(key).get();
Alexis Hetu35755502019-07-22 13:51:49 -040053}
54
55void Device::SamplingRoutineCache::updateConstCache()
56{
57 cache.updateConstCache();
58}
59
Ben Claytonb6c572d2019-09-07 11:41:39 +000060Device::Device(const VkDeviceCreateInfo* pCreateInfo, void* mem, PhysicalDevice *physicalDevice, const VkPhysicalDeviceFeatures *enabledFeatures, yarn::Scheduler *scheduler)
Nicolas Capens0c736802019-05-27 12:53:31 -040061 : physicalDevice(physicalDevice),
Alexis Hetu352791e2019-05-17 16:42:34 -040062 queues(reinterpret_cast<Queue*>(mem)),
Nicolas Capensa29aa772019-06-26 00:36:28 -040063 enabledExtensionCount(pCreateInfo->enabledExtensionCount),
64 enabledFeatures(enabledFeatures ? *enabledFeatures : VkPhysicalDeviceFeatures{}) // "Setting pEnabledFeatures to NULL and not including a VkPhysicalDeviceFeatures2 in the pNext member of VkDeviceCreateInfo is equivalent to setting all members of the structure to VK_FALSE."
Alexis Hetu767b41b2018-09-26 11:25:46 -040065{
Alexis Hetu767b41b2018-09-26 11:25:46 -040066 for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
67 {
68 const VkDeviceQueueCreateInfo& queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
Alexis Hetue70c3512018-10-17 13:18:04 -040069 queueCount += queueCreateInfo.queueCount;
Alexis Hetu767b41b2018-09-26 11:25:46 -040070 }
71
72 uint32_t queueID = 0;
73 for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
74 {
75 const VkDeviceQueueCreateInfo& queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
76
77 for(uint32_t j = 0; j < queueCreateInfo.queueCount; j++, queueID++)
78 {
Ben Claytond6c61362019-08-14 18:16:01 +010079 new (&queues[queueID]) Queue(this, scheduler);
Alexis Hetu767b41b2018-09-26 11:25:46 -040080 }
81 }
Nicolas Capensd689d1c2018-11-19 16:02:36 -050082
Alexis Hetu352791e2019-05-17 16:42:34 -040083 extensions = reinterpret_cast<ExtensionName*>(static_cast<uint8_t*>(mem) + (sizeof(Queue) * queueCount));
84 for(uint32_t i = 0; i < enabledExtensionCount; i++)
85 {
86 strncpy(extensions[i], pCreateInfo->ppEnabledExtensionNames[i], VK_MAX_EXTENSION_NAME_SIZE);
87 }
88
Nicolas Capensd689d1c2018-11-19 16:02:36 -050089 if(pCreateInfo->enabledLayerCount)
90 {
91 // "The ppEnabledLayerNames and enabledLayerCount members of VkDeviceCreateInfo are deprecated and their values must be ignored by implementations."
Ben Clayton00424c12019-03-17 17:29:30 +000092 UNIMPLEMENTED("enabledLayerCount"); // TODO(b/119321052): UNIMPLEMENTED() should be used only for features that must still be implemented. Use a more informational macro here.
Nicolas Capensd689d1c2018-11-19 16:02:36 -050093 }
Alexis Hetu0da99f52019-02-27 12:54:52 -050094
Alexis Hetu1a9714a2019-04-12 11:48:12 -040095 // FIXME (b/119409619): use an allocator here so we can control all memory allocations
Alexis Hetu6448bd62019-06-11 15:58:59 -040096 blitter.reset(new sw::Blitter());
Alexis Hetu35755502019-07-22 13:51:49 -040097 samplingRoutineCache.reset(new SamplingRoutineCache());
Alexis Hetu767b41b2018-09-26 11:25:46 -040098}
99
100void Device::destroy(const VkAllocationCallbacks* pAllocator)
101{
Alexis Hetuaf3c1022018-12-12 13:26:15 -0500102 for(uint32_t i = 0; i < queueCount; i++)
103 {
Ben Clayton7e0a0362019-05-20 11:32:35 +0100104 queues[i].~Queue();
Alexis Hetuaf3c1022018-12-12 13:26:15 -0500105 }
106
Alexis Hetu767b41b2018-09-26 11:25:46 -0400107 vk::deallocate(queues, pAllocator);
108}
109
Nicolas Capens0c736802019-05-27 12:53:31 -0400110size_t Device::ComputeRequiredAllocationSize(const VkDeviceCreateInfo* pCreateInfo)
Alexis Hetu767b41b2018-09-26 11:25:46 -0400111{
112 uint32_t queueCount = 0;
Nicolas Capens0c736802019-05-27 12:53:31 -0400113 for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
Alexis Hetu767b41b2018-09-26 11:25:46 -0400114 {
Nicolas Capens0c736802019-05-27 12:53:31 -0400115 queueCount += pCreateInfo->pQueueCreateInfos[i].queueCount;
Alexis Hetu767b41b2018-09-26 11:25:46 -0400116 }
117
Nicolas Capens0c736802019-05-27 12:53:31 -0400118 return (sizeof(Queue) * queueCount) + (pCreateInfo->enabledExtensionCount * sizeof(ExtensionName));
Alexis Hetu352791e2019-05-17 16:42:34 -0400119}
120
121bool Device::hasExtension(const char* extensionName) const
122{
123 for(uint32_t i = 0; i < enabledExtensionCount; i++)
124 {
125 if(strncmp(extensions[i], extensionName, VK_MAX_EXTENSION_NAME_SIZE) == 0)
126 {
127 return true;
128 }
129 }
130 return false;
Alexis Hetu767b41b2018-09-26 11:25:46 -0400131}
132
133VkQueue Device::getQueue(uint32_t queueFamilyIndex, uint32_t queueIndex) const
134{
135 ASSERT(queueFamilyIndex == 0);
136
137 return queues[queueIndex];
138}
139
Alexis Hetue1f51b92019-04-23 15:34:34 -0400140VkResult Device::waitForFences(uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
Alexis Hetuc4bd9df2018-12-07 11:28:40 -0500141{
Ben Clayton6779e5e2019-05-20 11:07:58 +0100142 using time_point = std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
Alexis Hetue1f51b92019-04-23 15:34:34 -0400143 const time_point start = now();
144 const uint64_t max_timeout = (LLONG_MAX - start.time_since_epoch().count());
145 bool infiniteTimeout = (timeout > max_timeout);
146 const time_point end_ns = start + std::chrono::nanoseconds(std::min(max_timeout, timeout));
Nicolas Capens85035be2019-06-05 13:54:18 -0400147
148 if(waitAll != VK_FALSE) // All fences must be signaled
Alexis Hetue1f51b92019-04-23 15:34:34 -0400149 {
150 for(uint32_t i = 0; i < fenceCount; i++)
151 {
152 if(timeout == 0)
153 {
154 if(Cast(pFences[i])->getStatus() != VK_SUCCESS) // At least one fence is not signaled
155 {
156 return VK_TIMEOUT;
157 }
158 }
159 else if(infiniteTimeout)
160 {
161 if(Cast(pFences[i])->wait() != VK_SUCCESS) // At least one fence is not signaled
162 {
163 return VK_TIMEOUT;
164 }
165 }
166 else
167 {
Ben Clayton6779e5e2019-05-20 11:07:58 +0100168 if(Cast(pFences[i])->wait(end_ns) != VK_SUCCESS) // At least one fence is not signaled
Alexis Hetue1f51b92019-04-23 15:34:34 -0400169 {
170 return VK_TIMEOUT;
171 }
172 }
173 }
174
175 return VK_SUCCESS;
176 }
177 else // At least one fence must be signaled
178 {
179 // Start by quickly checking the status of all fences, as only one is required
180 for(uint32_t i = 0; i < fenceCount; i++)
181 {
182 if(Cast(pFences[i])->getStatus() == VK_SUCCESS) // At least one fence is signaled
183 {
184 return VK_SUCCESS;
185 }
186 }
187
188 if(timeout > 0)
189 {
190 for(uint32_t i = 0; i < fenceCount; i++)
191 {
192 if(infiniteTimeout)
193 {
194 if(Cast(pFences[i])->wait() == VK_SUCCESS) // At least one fence is signaled
195 {
196 return VK_SUCCESS;
197 }
198 }
199 else
200 {
Ben Clayton6779e5e2019-05-20 11:07:58 +0100201 if(Cast(pFences[i])->wait(end_ns) == VK_SUCCESS) // At least one fence is signaled
Alexis Hetue1f51b92019-04-23 15:34:34 -0400202 {
203 return VK_SUCCESS;
204 }
205 }
206 }
207 }
208
209 return VK_TIMEOUT;
210 }
Alexis Hetuc4bd9df2018-12-07 11:28:40 -0500211}
212
Alexis Hetue1f51b92019-04-23 15:34:34 -0400213VkResult Device::waitIdle()
Ben Clayton00424c12019-03-17 17:29:30 +0000214{
215 for(uint32_t i = 0; i < queueCount; i++)
216 {
217 queues[i].waitIdle();
218 }
Alexis Hetue1f51b92019-04-23 15:34:34 -0400219
220 return VK_SUCCESS;
Alexis Hetucda0cf92019-01-24 15:48:55 -0500221}
222
Alexis Hetu9e4d0402018-10-16 15:44:12 -0400223void Device::getDescriptorSetLayoutSupport(const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
224 VkDescriptorSetLayoutSupport* pSupport) const
225{
226 // Mark everything as unsupported
227 pSupport->supported = VK_FALSE;
228}
229
Alexis Hetu048974f2019-02-15 15:28:37 -0500230void Device::updateDescriptorSets(uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites,
231 uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
232{
233 for(uint32_t i = 0; i < descriptorWriteCount; i++)
234 {
Alexis Hetu6448bd62019-06-11 15:58:59 -0400235 DescriptorSetLayout::WriteDescriptorSet(this, pDescriptorWrites[i]);
Alexis Hetu048974f2019-02-15 15:28:37 -0500236 }
237
238 for(uint32_t i = 0; i < descriptorCopyCount; i++)
239 {
240 DescriptorSetLayout::CopyDescriptorSet(pDescriptorCopies[i]);
241 }
242}
243
Alexis Hetu35755502019-07-22 13:51:49 -0400244Device::SamplingRoutineCache* Device::getSamplingRoutineCache() const
Alexis Hetu6448bd62019-06-11 15:58:59 -0400245{
Alexis Hetu6448bd62019-06-11 15:58:59 -0400246 return samplingRoutineCache.get();
247}
248
Ben Clayton03c2aea2019-07-29 19:04:53 +0100249rr::Routine* Device::findInConstCache(const SamplingRoutineCache::Key& key) const
Alexis Hetu35755502019-07-22 13:51:49 -0400250{
251 return samplingRoutineCache->queryConst(key);
252}
253
254void Device::updateSamplingRoutineConstCache()
255{
256 std::unique_lock<std::mutex> lock(samplingRoutineCacheMutex);
257 samplingRoutineCache->updateConstCache();
258}
259
Alexis Hetu6448bd62019-06-11 15:58:59 -0400260std::mutex& Device::getSamplingRoutineCacheMutex()
261{
262 return samplingRoutineCacheMutex;
263}
264
Alexis Hetu767b41b2018-09-26 11:25:46 -0400265} // namespace vk