32 bit safe code
In order to easily add 32 bit support, this cl adds a
header which essentially uses a union to make sure that
even when we have 32 bit pointers, the Vulkan handles are
64 bit.
This change should be noop.
Bug b/129979580 b/127920555
Change-Id: I54254929186584ec4544a1da5a7def7cf56e392e
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/31070
Presubmit-Ready: Alexis Hétu <sugoi@google.com>
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
Tested-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/Device/SwiftConfig.hpp b/src/Device/SwiftConfig.hpp
index 924914f..5b3aefa 100644
--- a/src/Device/SwiftConfig.hpp
+++ b/src/Device/SwiftConfig.hpp
@@ -23,6 +23,10 @@
#include <string>
+#ifdef Status
+#undef Status // b/127920555
+#endif
+
namespace sw
{
class SwiftConfig
diff --git a/src/Pipeline/PixelRoutine.cpp b/src/Pipeline/PixelRoutine.cpp
index c762243..eeb2116 100644
--- a/src/Pipeline/PixelRoutine.cpp
+++ b/src/Pipeline/PixelRoutine.cpp
@@ -22,10 +22,6 @@
#include "Vulkan/VkDebug.hpp"
#include "Vulkan/VkPipelineLayout.hpp"
-#ifdef Bool
-#undef Bool // b/127920555
-#endif
-
namespace sw
{
extern bool postBlendSRGB;
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index d4024bb..8162f2f 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -29,11 +29,6 @@
#include <spirv/unified1/spirv.hpp>
#include <spirv/unified1/GLSL.std.450.h>
-#ifdef Bool
-#undef Bool // b/127920555
-#undef None
-#endif
-
namespace
{
constexpr float PI = 3.141592653589793f;
diff --git a/src/Pipeline/SpirvShaderSampling.cpp b/src/Pipeline/SpirvShaderSampling.cpp
index 63b7e1d..4e3a949 100644
--- a/src/Pipeline/SpirvShaderSampling.cpp
+++ b/src/Pipeline/SpirvShaderSampling.cpp
@@ -30,11 +30,6 @@
#include <mutex>
-#ifdef Bool
-#undef Bool // b/127920555
-#undef None
-#endif
-
namespace sw {
SpirvShader::ImageSampler *SpirvShader::getImageSampler(uint32_t inst, vk::SampledImageDescriptor const *imageDescriptor, const vk::Sampler *sampler)
diff --git a/src/Vulkan/VkBuffer.hpp b/src/Vulkan/VkBuffer.hpp
index 03cf95b..46a0017 100644
--- a/src/Vulkan/VkBuffer.hpp
+++ b/src/Vulkan/VkBuffer.hpp
@@ -52,7 +52,7 @@
static inline Buffer* Cast(VkBuffer object)
{
- return reinterpret_cast<Buffer*>(object);
+ return reinterpret_cast<Buffer*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkBufferView.hpp b/src/Vulkan/VkBufferView.hpp
index a2055f7..02e7044 100644
--- a/src/Vulkan/VkBufferView.hpp
+++ b/src/Vulkan/VkBufferView.hpp
@@ -48,7 +48,7 @@
static inline BufferView* Cast(VkBufferView object)
{
- return reinterpret_cast<BufferView*>(object);
+ return reinterpret_cast<BufferView*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkCommandPool.hpp b/src/Vulkan/VkCommandPool.hpp
index fca0a4b..f50cdae 100644
--- a/src/Vulkan/VkCommandPool.hpp
+++ b/src/Vulkan/VkCommandPool.hpp
@@ -41,7 +41,7 @@
static inline CommandPool* Cast(VkCommandPool object)
{
- return reinterpret_cast<CommandPool*>(object);
+ return reinterpret_cast<CommandPool*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkConfig.h b/src/Vulkan/VkConfig.h
index 27ef7de..6e4ad11 100644
--- a/src/Vulkan/VkConfig.h
+++ b/src/Vulkan/VkConfig.h
@@ -17,7 +17,7 @@
#include "Version.h"
-#include <vulkan/vulkan_core.h>
+#include <Vulkan/VulkanPlatform.h>
namespace vk
{
diff --git a/src/Vulkan/VkDescriptorPool.cpp b/src/Vulkan/VkDescriptorPool.cpp
index 8ac1558..bd1719a 100644
--- a/src/Vulkan/VkDescriptorPool.cpp
+++ b/src/Vulkan/VkDescriptorPool.cpp
@@ -24,7 +24,7 @@
{
DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem) :
- pool(reinterpret_cast<VkDescriptorSet>(mem)),
+ pool(static_cast<uint8_t*>(mem)),
poolSize(ComputeRequiredAllocationSize(pCreateInfo))
{
}
@@ -77,17 +77,17 @@
// First, look for space at the end of the pool
const auto itLast = nodes.rbegin();
- ptrdiff_t itemStart = reinterpret_cast<char*>(itLast->set) - reinterpret_cast<char*>(pool);
+ ptrdiff_t itemStart = itLast->set - pool;
ptrdiff_t nextItemStart = itemStart + itLast->size;
size_t freeSpace = poolSize - nextItemStart;
if(freeSpace >= size)
{
- return reinterpret_cast<VkDescriptorSet>(reinterpret_cast<char*>(pool) + nextItemStart);
+ return pool + nextItemStart;
}
// Second, look for space at the beginning of the pool
const auto itBegin = nodes.end();
- freeSpace = reinterpret_cast<char*>(itBegin->set) - reinterpret_cast<char*>(pool);
+ freeSpace = itBegin->set - pool;
if(freeSpace >= size)
{
return pool;
@@ -99,8 +99,8 @@
++nextIt;
for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt)
{
- VkDescriptorSet freeSpaceStart = reinterpret_cast<VkDescriptorSet>(reinterpret_cast<char*>(it->set) + it->size);
- freeSpace = reinterpret_cast<char*>(nextIt->set) - reinterpret_cast<char*>(freeSpaceStart);
+ VkDescriptorSet freeSpaceStart(it->set + it->size);
+ freeSpace = nextIt->set - freeSpaceStart;
if(freeSpace >= size)
{
return freeSpaceStart;
@@ -132,7 +132,7 @@
{
pDescriptorSets[i] = memory;
nodes.insert(Node(pDescriptorSets[i], sizes[i]));
- memory = reinterpret_cast<VkDescriptorSet>(reinterpret_cast<char*>(memory) + sizes[i]);
+ memory += sizes[i];
}
return VK_SUCCESS;
@@ -193,11 +193,11 @@
// Compute space at the end of the pool
const auto itLast = nodes.rbegin();
- totalFreeSize += poolSize - ((reinterpret_cast<char*>(itLast->set) - reinterpret_cast<char*>(pool)) + itLast->size);
+ totalFreeSize += poolSize - (itLast->set - pool) + itLast->size;
// Compute space at the beginning of the pool
const auto itBegin = nodes.end();
- totalFreeSize += reinterpret_cast<char*>(itBegin->set) - reinterpret_cast<char*>(pool);
+ totalFreeSize += itBegin->set - pool;
// Finally, look between existing pool items
const auto itEnd = nodes.end();
@@ -205,7 +205,7 @@
++nextIt;
for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt)
{
- totalFreeSize += (reinterpret_cast<char*>(nextIt->set) - reinterpret_cast<char*>(it->set)) - it->size;
+ totalFreeSize += (nextIt->set - it->set) - it->size;
}
return totalFreeSize;
diff --git a/src/Vulkan/VkDescriptorPool.hpp b/src/Vulkan/VkDescriptorPool.hpp
index baa64f9..7462de0 100644
--- a/src/Vulkan/VkDescriptorPool.hpp
+++ b/src/Vulkan/VkDescriptorPool.hpp
@@ -45,18 +45,18 @@
bool operator<(const Node& node) const { return this->set < node.set; }
bool operator==(VkDescriptorSet set) const { return this->set == set; }
- VkDescriptorSet set;
- size_t size;
+ VkDescriptorSet set = VK_NULL_HANDLE;
+ size_t size = 0;
};
std::set<Node> nodes;
- VkDescriptorSet pool = nullptr;
+ VkDescriptorSet pool = VK_NULL_HANDLE;
size_t poolSize = 0;
};
static inline DescriptorPool* Cast(VkDescriptorPool object)
{
- return reinterpret_cast<DescriptorPool*>(object);
+ return reinterpret_cast<DescriptorPool*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkDescriptorSet.hpp b/src/Vulkan/VkDescriptorSet.hpp
index 2ce3d69..38ee493 100644
--- a/src/Vulkan/VkDescriptorSet.hpp
+++ b/src/Vulkan/VkDescriptorSet.hpp
@@ -41,7 +41,7 @@
inline DescriptorSet* Cast(VkDescriptorSet object)
{
- return reinterpret_cast<DescriptorSet*>(object);
+ return reinterpret_cast<DescriptorSet*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkDescriptorSetLayout.hpp b/src/Vulkan/VkDescriptorSetLayout.hpp
index dd14de3..ccf9f9f 100644
--- a/src/Vulkan/VkDescriptorSetLayout.hpp
+++ b/src/Vulkan/VkDescriptorSetLayout.hpp
@@ -135,7 +135,7 @@
static inline DescriptorSetLayout* Cast(VkDescriptorSetLayout object)
{
- return reinterpret_cast<DescriptorSetLayout*>(object);
+ return reinterpret_cast<DescriptorSetLayout*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkDescriptorUpdateTemplate.hpp b/src/Vulkan/VkDescriptorUpdateTemplate.hpp
index cb75f22..16ae9d0 100644
--- a/src/Vulkan/VkDescriptorUpdateTemplate.hpp
+++ b/src/Vulkan/VkDescriptorUpdateTemplate.hpp
@@ -39,7 +39,7 @@
static inline DescriptorUpdateTemplate* Cast(VkDescriptorUpdateTemplate object)
{
- return reinterpret_cast<DescriptorUpdateTemplate*>(object);
+ return reinterpret_cast<DescriptorUpdateTemplate*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkDeviceMemory.hpp b/src/Vulkan/VkDeviceMemory.hpp
index ade5092..a117a84 100644
--- a/src/Vulkan/VkDeviceMemory.hpp
+++ b/src/Vulkan/VkDeviceMemory.hpp
@@ -43,7 +43,7 @@
static inline DeviceMemory* Cast(VkDeviceMemory object)
{
- return reinterpret_cast<DeviceMemory*>(object);
+ return reinterpret_cast<DeviceMemory*>(object.get());
}
diff --git a/src/Vulkan/VkEvent.hpp b/src/Vulkan/VkEvent.hpp
index d56da28..34309e6 100644
--- a/src/Vulkan/VkEvent.hpp
+++ b/src/Vulkan/VkEvent.hpp
@@ -72,7 +72,7 @@
static inline Event* Cast(VkEvent object)
{
- return reinterpret_cast<Event*>(object);
+ return reinterpret_cast<Event*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkFence.hpp b/src/Vulkan/VkFence.hpp
index e86ac67..32ce06c 100644
--- a/src/Vulkan/VkFence.hpp
+++ b/src/Vulkan/VkFence.hpp
@@ -108,7 +108,7 @@
static inline Fence* Cast(VkFence object)
{
- return reinterpret_cast<Fence*>(object);
+ return reinterpret_cast<Fence*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkFormat.h b/src/Vulkan/VkFormat.h
index e6c4509..7184c34 100644
--- a/src/Vulkan/VkFormat.h
+++ b/src/Vulkan/VkFormat.h
@@ -15,7 +15,7 @@
#ifndef VK_FORMAT_UTILS_HPP_
#define VK_FORMAT_UTILS_HPP_
-#include <vulkan/vulkan_core.h>
+#include <Vulkan/VulkanPlatform.h>
namespace sw
{
diff --git a/src/Vulkan/VkFramebuffer.hpp b/src/Vulkan/VkFramebuffer.hpp
index 8830bb7..bc9ea38 100644
--- a/src/Vulkan/VkFramebuffer.hpp
+++ b/src/Vulkan/VkFramebuffer.hpp
@@ -44,7 +44,7 @@
static inline Framebuffer* Cast(VkFramebuffer object)
{
- return reinterpret_cast<Framebuffer*>(object);
+ return reinterpret_cast<Framebuffer*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkGetProcAddress.cpp b/src/Vulkan/VkGetProcAddress.cpp
index 1158d45..0938ce1 100644
--- a/src/Vulkan/VkGetProcAddress.cpp
+++ b/src/Vulkan/VkGetProcAddress.cpp
@@ -17,8 +17,6 @@
#include <unordered_map>
#include <string>
-#include <vulkan/vulkan.h>
-
#ifdef __ANDROID__
#include <cerrno>
#include <hardware/hwvulkan.h>
diff --git a/src/Vulkan/VkGetProcAddress.h b/src/Vulkan/VkGetProcAddress.h
index e322892..0531b77 100644
--- a/src/Vulkan/VkGetProcAddress.h
+++ b/src/Vulkan/VkGetProcAddress.h
@@ -15,7 +15,7 @@
#ifndef VK_UTILS_HPP_
#define VK_UTILS_HPP_
-#include <vulkan/vulkan_core.h>
+#include <Vulkan/VulkanPlatform.h>
namespace vk
{
diff --git a/src/Vulkan/VkImage.hpp b/src/Vulkan/VkImage.hpp
index e82e618..86dae4c 100644
--- a/src/Vulkan/VkImage.hpp
+++ b/src/Vulkan/VkImage.hpp
@@ -111,7 +111,7 @@
static inline Image* Cast(VkImage object)
{
- return reinterpret_cast<Image*>(object);
+ return reinterpret_cast<Image*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkImageView.hpp b/src/Vulkan/VkImageView.hpp
index 6577399..e741c16 100644
--- a/src/Vulkan/VkImageView.hpp
+++ b/src/Vulkan/VkImageView.hpp
@@ -89,7 +89,7 @@
static inline ImageView* Cast(VkImageView object)
{
- return reinterpret_cast<ImageView*>(object);
+ return reinterpret_cast<ImageView*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkMemory.h b/src/Vulkan/VkMemory.h
index 7fc3837..bbc6006 100644
--- a/src/Vulkan/VkMemory.h
+++ b/src/Vulkan/VkMemory.h
@@ -15,7 +15,7 @@
#ifndef VK_MEMORY_HPP_
#define VK_MEMORY_HPP_
-#include <vulkan/vulkan_core.h>
+#include <Vulkan/VulkanPlatform.h>
namespace vk
{
@@ -27,7 +27,7 @@
template <typename T>
T* allocate(size_t count, const VkAllocationCallbacks* pAllocator)
{
- return reinterpret_cast<T*>(allocate(count, alignof(T), pAllocator, T::GetAllocationScope()));
+ return static_cast<T*>(allocate(count, alignof(T), pAllocator, T::GetAllocationScope()));
}
} // namespace vk
diff --git a/src/Vulkan/VkObject.hpp b/src/Vulkan/VkObject.hpp
index 805fe59..cefca04 100644
--- a/src/Vulkan/VkObject.hpp
+++ b/src/Vulkan/VkObject.hpp
@@ -20,7 +20,7 @@
#include "VkMemory.h"
#include <new>
-#include <vulkan/vulkan_core.h>
+#include <Vulkan/VulkanPlatform.h>
#include <vulkan/vk_icd.h>
namespace vk
@@ -97,7 +97,7 @@
public:
operator VkT()
{
- return reinterpret_cast<VkT>(this);
+ return reinterpret_cast<typename VkT::HandleType>(this);
}
};
diff --git a/src/Vulkan/VkPipeline.hpp b/src/Vulkan/VkPipeline.hpp
index 49d2fdd..63bbeaa 100644
--- a/src/Vulkan/VkPipeline.hpp
+++ b/src/Vulkan/VkPipeline.hpp
@@ -37,7 +37,7 @@
operator VkPipeline()
{
- return reinterpret_cast<VkPipeline>(this);
+ return reinterpret_cast<VkPipeline::HandleType>(this);
}
void destroy(const VkAllocationCallbacks* pAllocator)
@@ -125,7 +125,7 @@
static inline Pipeline* Cast(VkPipeline object)
{
- return reinterpret_cast<Pipeline*>(object);
+ return reinterpret_cast<Pipeline*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkPipelineCache.hpp b/src/Vulkan/VkPipelineCache.hpp
index 927c7a2..ab5e2b2 100644
--- a/src/Vulkan/VkPipelineCache.hpp
+++ b/src/Vulkan/VkPipelineCache.hpp
@@ -48,7 +48,7 @@
static inline PipelineCache* Cast(VkPipelineCache object)
{
- return reinterpret_cast<PipelineCache*>(object);
+ return reinterpret_cast<PipelineCache*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkPipelineLayout.hpp b/src/Vulkan/VkPipelineLayout.hpp
index 57dc9ca..ebd9295 100644
--- a/src/Vulkan/VkPipelineLayout.hpp
+++ b/src/Vulkan/VkPipelineLayout.hpp
@@ -46,7 +46,7 @@
static inline PipelineLayout* Cast(VkPipelineLayout object)
{
- return reinterpret_cast<PipelineLayout*>(object);
+ return reinterpret_cast<PipelineLayout*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkPromotedExtensions.cpp b/src/Vulkan/VkPromotedExtensions.cpp
index b244216..a48f173 100644
--- a/src/Vulkan/VkPromotedExtensions.cpp
+++ b/src/Vulkan/VkPromotedExtensions.cpp
@@ -40,7 +40,7 @@
// VK_KHR_storage_buffer_storage_class (no functions in this extension)
// VK_KHR_variable_pointers (no functions in this extension)
-#include <vulkan/vulkan_core.h>
+#include <Vulkan/VulkanPlatform.h>
extern "C"
{
diff --git a/src/Vulkan/VkQueryPool.hpp b/src/Vulkan/VkQueryPool.hpp
index 06161a9..ec2ba71 100644
--- a/src/Vulkan/VkQueryPool.hpp
+++ b/src/Vulkan/VkQueryPool.hpp
@@ -67,7 +67,7 @@
static inline QueryPool* Cast(VkQueryPool object)
{
- return reinterpret_cast<QueryPool*>(object);
+ return reinterpret_cast<QueryPool*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkQueue.cpp b/src/Vulkan/VkQueue.cpp
index 3f79c7f..037f347 100644
--- a/src/Vulkan/VkQueue.cpp
+++ b/src/Vulkan/VkQueue.cpp
@@ -45,22 +45,22 @@
for(uint32_t i = 0; i < submitCount; i++)
{
size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
- submits[i].pWaitSemaphores = new (mem) VkSemaphore[pSubmits[i].waitSemaphoreCount];
+ submits[i].pWaitSemaphores = reinterpret_cast<const VkSemaphore*>(mem);
memcpy(mem, pSubmits[i].pWaitSemaphores, size);
mem += size;
size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
- submits[i].pWaitDstStageMask = new (mem) VkPipelineStageFlags[pSubmits[i].waitSemaphoreCount];
+ submits[i].pWaitDstStageMask = reinterpret_cast<const VkPipelineStageFlags*>(mem);
memcpy(mem, pSubmits[i].pWaitDstStageMask, size);
mem += size;
size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
- submits[i].pSignalSemaphores = new (mem) VkSemaphore[pSubmits[i].signalSemaphoreCount];
+ submits[i].pSignalSemaphores = reinterpret_cast<const VkSemaphore*>(mem);
memcpy(mem, pSubmits[i].pSignalSemaphores, size);
mem += size;
size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
- submits[i].pCommandBuffers = new (mem) VkCommandBuffer[pSubmits[i].commandBufferCount];
+ submits[i].pCommandBuffers = reinterpret_cast<const VkCommandBuffer*>(mem);
memcpy(mem, pSubmits[i].pCommandBuffers, size);
mem += size;
}
diff --git a/src/Vulkan/VkRenderPass.hpp b/src/Vulkan/VkRenderPass.hpp
index 4b5677c..b70ec8d 100644
--- a/src/Vulkan/VkRenderPass.hpp
+++ b/src/Vulkan/VkRenderPass.hpp
@@ -90,7 +90,7 @@
static inline RenderPass* Cast(VkRenderPass object)
{
- return reinterpret_cast<RenderPass*>(object);
+ return reinterpret_cast<RenderPass*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkSampler.hpp b/src/Vulkan/VkSampler.hpp
index 551621d..bdf7b49 100644
--- a/src/Vulkan/VkSampler.hpp
+++ b/src/Vulkan/VkSampler.hpp
@@ -82,7 +82,7 @@
static inline Sampler* Cast(VkSampler object)
{
- return reinterpret_cast<Sampler*>(object);
+ return reinterpret_cast<Sampler*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkSemaphore.hpp b/src/Vulkan/VkSemaphore.hpp
index 5f3ca05..91597de 100644
--- a/src/Vulkan/VkSemaphore.hpp
+++ b/src/Vulkan/VkSemaphore.hpp
@@ -54,7 +54,7 @@
static inline Semaphore* Cast(VkSemaphore object)
{
- return reinterpret_cast<Semaphore*>(object);
+ return reinterpret_cast<Semaphore*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VkShaderModule.hpp b/src/Vulkan/VkShaderModule.hpp
index 7287d69..3d9c4cc 100644
--- a/src/Vulkan/VkShaderModule.hpp
+++ b/src/Vulkan/VkShaderModule.hpp
@@ -45,7 +45,7 @@
static inline ShaderModule* Cast(VkShaderModule object)
{
- return reinterpret_cast<ShaderModule*>(object);
+ return reinterpret_cast<ShaderModule*>(object.get());
}
} // namespace vk
diff --git a/src/Vulkan/VulkanPlatform.h b/src/Vulkan/VulkanPlatform.h
new file mode 100644
index 0000000..fb71a7b
--- /dev/null
+++ b/src/Vulkan/VulkanPlatform.h
@@ -0,0 +1,115 @@
+// Copyright 2019 The SwiftShader Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef VULKAN_PLATFORM
+#define VULKAN_PLATFORM
+
+#include <cstddef>
+#include <cstdint>
+
+template<typename HandleType> class alignas(sizeof(uint64_t)) VkWrapperBase
+{
+public:
+ VkWrapperBase(HandleType handle)
+ {
+ u.dummy = 0;
+ u.handle = handle;
+ }
+
+ HandleType get() const
+ {
+ return u.handle;
+ }
+
+ operator HandleType() const
+ {
+ return u.handle;
+ }
+
+protected:
+ HandleType set(HandleType handle)
+ {
+ return (u.handle = handle);
+ }
+
+private:
+ union PointerHandleUnion
+ {
+ HandleType handle;
+ uint64_t dummy; // VkWrapper's size must always be 64 bits even when void* is 32 bits
+ };
+ PointerHandleUnion u;
+};
+
+template<typename T> class alignas(sizeof(uint64_t)) VkWrapper : public VkWrapperBase<T>
+{
+public:
+ using HandleType = T;
+
+ VkWrapper() : VkWrapperBase<T>(nullptr)
+ {
+ }
+
+ VkWrapper(HandleType handle) : VkWrapperBase<T>(handle)
+ {
+ static_assert(sizeof(VkWrapper) == sizeof(uint64_t), "Size is not 64 bits!");
+ }
+
+ void operator=(HandleType handle)
+ {
+ this->set(handle);
+ }
+};
+
+// VkDescriptorSet objects are really just memory in the VkDescriptorPool
+// object, so define different/more convenient operators for this object.
+struct VkDescriptorSet_T;
+template<> class alignas(sizeof(uint64_t)) VkWrapper<VkDescriptorSet_T*> : public VkWrapperBase<uint8_t*>
+{
+public:
+ using HandleType = uint8_t*;
+
+ VkWrapper(HandleType handle) : VkWrapperBase<uint8_t*>(handle)
+ {
+ static_assert(sizeof(VkWrapper) == sizeof(uint64_t), "Size is not 64 bits!");
+ }
+
+ HandleType operator+(ptrdiff_t rhs) const
+ {
+ return get() + rhs;
+ }
+
+ HandleType operator+=(ptrdiff_t rhs)
+ {
+ return this->set(get() + rhs);
+ }
+
+ ptrdiff_t operator-(const HandleType rhs) const
+ {
+ return get() - rhs;
+ }
+};
+
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
+ typedef struct object##_T *object##Ptr; \
+ typedef VkWrapper<object##Ptr> object;
+
+#include <vulkan/vulkan.h>
+
+#ifdef Bool
+#undef Bool // b/127920555
+#undef None
+#endif
+
+#endif // VULKAN_PLATFORM
diff --git a/src/Vulkan/libVulkan.cpp b/src/Vulkan/libVulkan.cpp
index 5334dbd..21b982d 100644
--- a/src/Vulkan/libVulkan.cpp
+++ b/src/Vulkan/libVulkan.cpp
@@ -533,7 +533,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
{
TRACE("(VkQueue queue = %p, uint32_t submitCount = %d, const VkSubmitInfo* pSubmits = %p, VkFence fence = %p)",
- queue, submitCount, pSubmits, fence);
+ queue, submitCount, pSubmits, fence.get());
return vk::Cast(queue)->submit(submitCount, pSubmits, fence);
}
@@ -599,7 +599,7 @@
VKAPI_ATTR void VKAPI_CALL vkFreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, memory, pAllocator);
+ device, memory.get(), pAllocator);
vk::destroy(memory, pAllocator);
}
@@ -607,14 +607,14 @@
VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData)
{
TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p, VkDeviceSize offset = %d, VkDeviceSize size = %d, VkMemoryMapFlags flags = %d, void** ppData = %p)",
- device, memory, int(offset), int(size), flags, ppData);
+ device, memory.get(), int(offset), int(size), flags, ppData);
return vk::Cast(memory)->map(offset, size, ppData);
}
VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory memory)
{
- TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p)", device, memory);
+ TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p)", device, memory.get());
// Noop, memory will be released when the DeviceMemory object is released
}
@@ -642,7 +642,7 @@
VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(VkDevice pDevice, VkDeviceMemory pMemory, VkDeviceSize* pCommittedMemoryInBytes)
{
TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p, VkDeviceSize* pCommittedMemoryInBytes = %p)",
- pDevice, pMemory, pCommittedMemoryInBytes);
+ pDevice, pMemory.get(), pCommittedMemoryInBytes);
auto memory = vk::Cast(pMemory);
@@ -659,7 +659,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset)
{
TRACE("(VkDevice device = %p, VkBuffer buffer = %p, VkDeviceMemory memory = %p, VkDeviceSize memoryOffset = %d)",
- device, buffer, memory, int(memoryOffset));
+ device, buffer.get(), memory.get(), int(memoryOffset));
vk::Cast(buffer)->bind(memory, memoryOffset);
@@ -669,7 +669,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset)
{
TRACE("(VkDevice device = %p, VkImage image = %p, VkDeviceMemory memory = %p, VkDeviceSize memoryOffset = %d)",
- device, image, memory, int(memoryOffset));
+ device, image.get(), memory.get(), int(memoryOffset));
vk::Cast(image)->bind(memory, memoryOffset);
@@ -679,7 +679,7 @@
VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements)
{
TRACE("(VkDevice device = %p, VkBuffer buffer = %p, VkMemoryRequirements* pMemoryRequirements = %p)",
- device, buffer, pMemoryRequirements);
+ device, buffer.get(), pMemoryRequirements);
*pMemoryRequirements = vk::Cast(buffer)->getMemoryRequirements();
}
@@ -687,7 +687,7 @@
VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements)
{
TRACE("(VkDevice device = %p, VkImage image = %p, VkMemoryRequirements* pMemoryRequirements = %p)",
- device, image, pMemoryRequirements);
+ device, image.get(), pMemoryRequirements);
*pMemoryRequirements = vk::Cast(image)->getMemoryRequirements();
}
@@ -695,7 +695,7 @@
VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
{
TRACE("(VkDevice device = %p, VkImage image = %p, uint32_t* pSparseMemoryRequirementCount = %p, VkSparseImageMemoryRequirements* pSparseMemoryRequirements = %p)",
- device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
+ device, image.get(), pSparseMemoryRequirementCount, pSparseMemoryRequirements);
// The 'sparseBinding' feature is not supported, so images can not be created with the VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT flag.
// "If the image was not created with VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then pSparseMemoryRequirementCount will be set to zero and pSparseMemoryRequirements will not be written to."
@@ -734,7 +734,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkFence fence = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, fence, pAllocator);
+ device, fence.get(), pAllocator);
vk::destroy(fence, pAllocator);
}
@@ -754,7 +754,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence)
{
- TRACE("(VkDevice device = %p, VkFence fence = %p)", device, fence);
+ TRACE("(VkDevice device = %p, VkFence fence = %p)", device, fence.get());
return vk::Cast(fence)->getStatus();
}
@@ -783,7 +783,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkSemaphore semaphore = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, semaphore, pAllocator);
+ device, semaphore.get(), pAllocator);
vk::destroy(semaphore, pAllocator);
}
@@ -804,21 +804,21 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkEvent event = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, event, pAllocator);
+ device, event.get(), pAllocator);
vk::destroy(event, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(VkDevice device, VkEvent event)
{
- TRACE("(VkDevice device = %p, VkEvent event = %p)", device, event);
+ TRACE("(VkDevice device = %p, VkEvent event = %p)", device, event.get());
return vk::Cast(event)->getStatus();
}
VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event)
{
- TRACE("(VkDevice device = %p, VkEvent event = %p)", device, event);
+ TRACE("(VkDevice device = %p, VkEvent event = %p)", device, event.get());
vk::Cast(event)->signal();
@@ -827,7 +827,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(VkDevice device, VkEvent event)
{
- TRACE("(VkDevice device = %p, VkEvent event = %p)", device, event);
+ TRACE("(VkDevice device = %p, VkEvent event = %p)", device, event.get());
vk::Cast(event)->reset();
@@ -850,7 +850,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkQueryPool queryPool = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, queryPool, pAllocator);
+ device, queryPool.get(), pAllocator);
vk::destroy(queryPool, pAllocator);
}
@@ -858,7 +858,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags)
{
TRACE("(VkDevice device = %p, VkQueryPool queryPool = %p, uint32_t firstQuery = %d, uint32_t queryCount = %d, size_t dataSize = %d, void* pData = %p, VkDeviceSize stride = %d, VkQueryResultFlags flags = %d)",
- device, queryPool, int(firstQuery), int(queryCount), int(dataSize), pData, int(stride), flags);
+ device, queryPool.get(), int(firstQuery), int(queryCount), int(dataSize), pData, int(stride), flags);
return vk::Cast(queryPool)->getResults(firstQuery, queryCount, dataSize, pData, stride, flags);
}
@@ -879,7 +879,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkBuffer buffer = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, buffer, pAllocator);
+ device, buffer.get(), pAllocator);
vk::destroy(buffer, pAllocator);
}
@@ -900,7 +900,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkBufferView bufferView = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, bufferView, pAllocator);
+ device, bufferView.get(), pAllocator);
vk::destroy(bufferView, pAllocator);
}
@@ -998,7 +998,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkImage image = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, image, pAllocator);
+ device, image.get(), pAllocator);
vk::destroy(image, pAllocator);
}
@@ -1006,7 +1006,7 @@
VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout)
{
TRACE("(VkDevice device = %p, VkImage image = %p, const VkImageSubresource* pSubresource = %p, VkSubresourceLayout* pLayout = %p)",
- device, image, pSubresource, pLayout);
+ device, image.get(), pSubresource, pLayout);
vk::Cast(image)->getSubresourceLayout(pSubresource, pLayout);
}
@@ -1059,7 +1059,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkImageView imageView = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, imageView, pAllocator);
+ device, imageView.get(), pAllocator);
vk::destroy(imageView, pAllocator);
}
@@ -1080,7 +1080,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkShaderModule shaderModule = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, shaderModule, pAllocator);
+ device, shaderModule.get(), pAllocator);
vk::destroy(shaderModule, pAllocator);
}
@@ -1101,7 +1101,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, pipelineCache, pAllocator);
+ device, pipelineCache.get(), pAllocator);
vk::destroy(pipelineCache, pAllocator);
}
@@ -1109,7 +1109,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData)
{
TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, size_t* pDataSize = %p, void* pData = %p)",
- device, pipelineCache, pDataSize, pData);
+ device, pipelineCache.get(), pDataSize, pData);
return vk::Cast(pipelineCache)->getData(pDataSize, pData);
}
@@ -1117,7 +1117,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
{
TRACE("(VkDevice device = %p, VkPipelineCache dstCache = %p, uint32_t srcCacheCount = %d, const VkPipelineCache* pSrcCaches = %p)",
- device, dstCache, int(srcCacheCount), pSrcCaches);
+ device, dstCache.get(), int(srcCacheCount), pSrcCaches);
return vk::Cast(dstCache)->merge(srcCacheCount, pSrcCaches);
}
@@ -1125,7 +1125,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
{
TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, uint32_t createInfoCount = %d, const VkGraphicsPipelineCreateInfo* pCreateInfos = %p, const VkAllocationCallbacks* pAllocator = %p, VkPipeline* pPipelines = %p)",
- device, pipelineCache, int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
+ device, pipelineCache.get(), int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
// TODO (b/123588002): Optimize based on pipelineCache.
@@ -1158,7 +1158,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
{
TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, uint32_t createInfoCount = %d, const VkComputePipelineCreateInfo* pCreateInfos = %p, const VkAllocationCallbacks* pAllocator = %p, VkPipeline* pPipelines = %p)",
- device, pipelineCache, int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
+ device, pipelineCache.get(), int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
// TODO (b/123588002): Optimize based on pipelineCache.
@@ -1191,7 +1191,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkPipeline pipeline = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, pipeline, pAllocator);
+ device, pipeline.get(), pAllocator);
vk::destroy(pipeline, pAllocator);
}
@@ -1212,7 +1212,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkPipelineLayout pipelineLayout = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, pipelineLayout, pAllocator);
+ device, pipelineLayout.get(), pAllocator);
vk::destroy(pipelineLayout, pAllocator);
}
@@ -1233,7 +1233,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkSampler sampler = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, sampler, pAllocator);
+ device, sampler.get(), pAllocator);
vk::destroy(sampler, pAllocator);
}
@@ -1267,7 +1267,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkDescriptorSetLayout descriptorSetLayout = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, descriptorSetLayout, pAllocator);
+ device, descriptorSetLayout.get(), pAllocator);
vk::destroy(descriptorSetLayout, pAllocator);
}
@@ -1288,7 +1288,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkDescriptorPool descriptorPool = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, descriptorPool, pAllocator);
+ device, descriptorPool.get(), pAllocator);
vk::destroy(descriptorPool, pAllocator);
}
@@ -1296,7 +1296,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
{
TRACE("(VkDevice device = %p, VkDescriptorPool descriptorPool = %p, VkDescriptorPoolResetFlags flags = 0x%x)",
- device, descriptorPool, int(flags));
+ device, descriptorPool.get(), int(flags));
if(flags)
{
@@ -1323,7 +1323,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets)
{
TRACE("(VkDevice device = %p, VkDescriptorPool descriptorPool = %p, uint32_t descriptorSetCount = %d, const VkDescriptorSet* pDescriptorSets = %p)",
- device, descriptorPool, descriptorSetCount, pDescriptorSets);
+ device, descriptorPool.get(), descriptorSetCount, pDescriptorSets);
vk::Cast(descriptorPool)->freeSets(descriptorSetCount, pDescriptorSets);
@@ -1354,7 +1354,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkFramebuffer framebuffer = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, framebuffer, pAllocator);
+ device, framebuffer.get(), pAllocator);
vk::destroy(framebuffer, pAllocator);
}
@@ -1457,7 +1457,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkRenderPass renderPass = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, renderPass, pAllocator);
+ device, renderPass.get(), pAllocator);
vk::destroy(renderPass, pAllocator);
}
@@ -1465,7 +1465,7 @@
VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity)
{
TRACE("(VkDevice device = %p, VkRenderPass renderPass = %p, VkExtent2D* pGranularity = %p)",
- device, renderPass, pGranularity);
+ device, renderPass.get(), pGranularity);
vk::Cast(renderPass)->getRenderAreaGranularity(pGranularity);
}
@@ -1486,7 +1486,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, commandPool, pAllocator);
+ device, commandPool.get(), pAllocator);
vk::destroy(commandPool, pAllocator);
}
@@ -1494,7 +1494,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
{
TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, VkCommandPoolResetFlags flags = %d)",
- device, commandPool, int(flags));
+ device, commandPool.get(), int(flags));
return vk::Cast(commandPool)->reset(flags);
}
@@ -1516,7 +1516,7 @@
VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers)
{
TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, uint32_t commandBufferCount = %d, const VkCommandBuffer* pCommandBuffers = %p)",
- device, commandPool, int(commandBufferCount), pCommandBuffers);
+ device, commandPool.get(), int(commandBufferCount), pCommandBuffers);
vk::Cast(commandPool)->freeCommandBuffers(commandBufferCount, pCommandBuffers);
}
@@ -1551,7 +1551,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineBindPoint pipelineBindPoint = %d, VkPipeline pipeline = %p)",
- commandBuffer, int(pipelineBindPoint), pipeline);
+ commandBuffer, int(pipelineBindPoint), pipeline.get());
vk::Cast(commandBuffer)->bindPipeline(pipelineBindPoint, pipeline);
}
@@ -1630,7 +1630,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineBindPoint pipelineBindPoint = %d, VkPipelineLayout layout = %p, uint32_t firstSet = %d, uint32_t descriptorSetCount = %d, const VkDescriptorSet* pDescriptorSets = %p, uint32_t dynamicOffsetCount = %d, const uint32_t* pDynamicOffsets = %p)",
- commandBuffer, int(pipelineBindPoint), layout, int(firstSet), int(descriptorSetCount), pDescriptorSets, int(dynamicOffsetCount), pDynamicOffsets);
+ commandBuffer, int(pipelineBindPoint), layout.get(), int(firstSet), int(descriptorSetCount), pDescriptorSets, int(dynamicOffsetCount), pDynamicOffsets);
vk::Cast(commandBuffer)->bindDescriptorSets(pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
}
@@ -1638,7 +1638,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d, VkIndexType indexType = %d)",
- commandBuffer, buffer, int(offset), int(indexType));
+ commandBuffer, buffer.get(), int(offset), int(indexType));
vk::Cast(commandBuffer)->bindIndexBuffer(buffer, offset, indexType);
}
@@ -1670,7 +1670,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d, uint32_t drawCount = %d, uint32_t stride = %d)",
- commandBuffer, buffer, int(offset), int(drawCount), int(stride));
+ commandBuffer, buffer.get(), int(offset), int(drawCount), int(stride));
vk::Cast(commandBuffer)->drawIndirect(buffer, offset, drawCount, stride);
}
@@ -1678,7 +1678,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d, uint32_t drawCount = %d, uint32_t stride = %d)",
- commandBuffer, buffer, int(offset), int(drawCount), int(stride));
+ commandBuffer, buffer.get(), int(offset), int(drawCount), int(stride));
vk::Cast(commandBuffer)->drawIndexedIndirect(buffer, offset, drawCount, stride);
}
@@ -1694,7 +1694,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d)",
- commandBuffer, buffer, int(offset));
+ commandBuffer, buffer.get(), int(offset));
vk::Cast(commandBuffer)->dispatchIndirect(buffer, offset);
}
@@ -1702,7 +1702,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer srcBuffer = %p, VkBuffer dstBuffer = %p, uint32_t regionCount = %d, const VkBufferCopy* pRegions = %p)",
- commandBuffer, srcBuffer, dstBuffer, int(regionCount), pRegions);
+ commandBuffer, srcBuffer.get(), dstBuffer.get(), int(regionCount), pRegions);
vk::Cast(commandBuffer)->copyBuffer(srcBuffer, dstBuffer, regionCount, pRegions);
}
@@ -1710,7 +1710,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkImageCopy* pRegions = %p)",
- commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, int(regionCount), pRegions);
+ commandBuffer, srcImage.get(), srcImageLayout, dstImage.get(), dstImageLayout, int(regionCount), pRegions);
vk::Cast(commandBuffer)->copyImage(srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
}
@@ -1718,7 +1718,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkImageBlit* pRegions = %p, VkFilter filter = %d)",
- commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, int(regionCount), pRegions, filter);
+ commandBuffer, srcImage.get(), srcImageLayout, dstImage.get(), dstImageLayout, int(regionCount), pRegions, filter);
vk::Cast(commandBuffer)->blitImage(srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
}
@@ -1726,7 +1726,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer srcBuffer = %p, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkBufferImageCopy* pRegions = %p)",
- commandBuffer, srcBuffer, dstImage, dstImageLayout, int(regionCount), pRegions);
+ commandBuffer, srcBuffer.get(), dstImage.get(), dstImageLayout, int(regionCount), pRegions);
vk::Cast(commandBuffer)->copyBufferToImage(srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
}
@@ -1734,7 +1734,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkBuffer dstBuffer = %p, uint32_t regionCount = %d, const VkBufferImageCopy* pRegions = %p)",
- commandBuffer, srcImage, int(srcImageLayout), dstBuffer, int(regionCount), pRegions);
+ commandBuffer, srcImage.get(), int(srcImageLayout), dstBuffer.get(), int(regionCount), pRegions);
vk::Cast(commandBuffer)->copyImageToBuffer(srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
}
@@ -1742,7 +1742,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer dstBuffer = %p, VkDeviceSize dstOffset = %d, VkDeviceSize dataSize = %d, const void* pData = %p)",
- commandBuffer, dstBuffer, int(dstOffset), int(dataSize), pData);
+ commandBuffer, dstBuffer.get(), int(dstOffset), int(dataSize), pData);
vk::Cast(commandBuffer)->updateBuffer(dstBuffer, dstOffset, dataSize, pData);
}
@@ -1750,7 +1750,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer dstBuffer = %p, VkDeviceSize dstOffset = %d, VkDeviceSize size = %d, uint32_t data = %d)",
- commandBuffer, dstBuffer, int(dstOffset), int(size), data);
+ commandBuffer, dstBuffer.get(), int(dstOffset), int(size), data);
vk::Cast(commandBuffer)->fillBuffer(dstBuffer, dstOffset, size, data);
}
@@ -1758,7 +1758,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkImage image = %p, VkImageLayout imageLayout = %d, const VkClearColorValue* pColor = %p, uint32_t rangeCount = %d, const VkImageSubresourceRange* pRanges = %p)",
- commandBuffer, image, int(imageLayout), pColor, int(rangeCount), pRanges);
+ commandBuffer, image.get(), int(imageLayout), pColor, int(rangeCount), pRanges);
vk::Cast(commandBuffer)->clearColorImage(image, imageLayout, pColor, rangeCount, pRanges);
}
@@ -1766,7 +1766,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkImage image = %p, VkImageLayout imageLayout = %d, const VkClearDepthStencilValue* pDepthStencil = %p, uint32_t rangeCount = %d, const VkImageSubresourceRange* pRanges = %p)",
- commandBuffer, image, int(imageLayout), pDepthStencil, int(rangeCount), pRanges);
+ commandBuffer, image.get(), int(imageLayout), pDepthStencil, int(rangeCount), pRanges);
vk::Cast(commandBuffer)->clearDepthStencilImage(image, imageLayout, pDepthStencil, rangeCount, pRanges);
}
@@ -1782,7 +1782,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkImageResolve* pRegions = %p)",
- commandBuffer, srcImage, int(srcImageLayout), dstImage, int(dstImageLayout), regionCount, pRegions);
+ commandBuffer, srcImage.get(), int(srcImageLayout), dstImage.get(), int(dstImageLayout), regionCount, pRegions);
vk::Cast(commandBuffer)->resolveImage(srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
}
@@ -1790,7 +1790,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, VkPipelineStageFlags stageMask = %d)",
- commandBuffer, event, int(stageMask));
+ commandBuffer, event.get(), int(stageMask));
vk::Cast(commandBuffer)->setEvent(event, stageMask);
}
@@ -1798,7 +1798,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, VkPipelineStageFlags stageMask = %d)",
- commandBuffer, event, int(stageMask));
+ commandBuffer, event.get(), int(stageMask));
vk::Cast(commandBuffer)->resetEvent(event, stageMask);
}
@@ -1826,7 +1826,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t query = %d, VkQueryControlFlags flags = %d)",
- commandBuffer, queryPool, query, int(flags));
+ commandBuffer, queryPool.get(), query, int(flags));
vk::Cast(commandBuffer)->beginQuery(queryPool, query, flags);
}
@@ -1834,7 +1834,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t query = %d)",
- commandBuffer, queryPool, int(query));
+ commandBuffer, queryPool.get(), int(query));
vk::Cast(commandBuffer)->endQuery(queryPool, query);
}
@@ -1842,7 +1842,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t firstQuery = %d, uint32_t queryCount = %d)",
- commandBuffer, queryPool, int(firstQuery), int(queryCount));
+ commandBuffer, queryPool.get(), int(firstQuery), int(queryCount));
vk::Cast(commandBuffer)->resetQueryPool(queryPool, firstQuery, queryCount);
}
@@ -1850,7 +1850,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineStageFlagBits pipelineStage = %d, VkQueryPool queryPool = %p, uint32_t query = %d)",
- commandBuffer, int(pipelineStage), queryPool, int(query));
+ commandBuffer, int(pipelineStage), queryPool.get(), int(query));
vk::Cast(commandBuffer)->writeTimestamp(pipelineStage, queryPool, query);
}
@@ -1858,7 +1858,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t firstQuery = %d, uint32_t queryCount = %d, VkBuffer dstBuffer = %p, VkDeviceSize dstOffset = %d, VkDeviceSize stride = %d, VkQueryResultFlags flags = %d)",
- commandBuffer, queryPool, int(firstQuery), int(queryCount), dstBuffer, int(dstOffset), int(stride), int(flags));
+ commandBuffer, queryPool.get(), int(firstQuery), int(queryCount), dstBuffer.get(), int(dstOffset), int(stride), int(flags));
vk::Cast(commandBuffer)->copyQueryPoolResults(queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags);
}
@@ -1866,7 +1866,7 @@
VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineLayout layout = %p, VkShaderStageFlags stageFlags = %d, uint32_t offset = %d, uint32_t size = %d, const void* pValues = %p)",
- commandBuffer, layout, stageFlags, offset, size, pValues);
+ commandBuffer, layout.get(), stageFlags, offset, size, pValues);
vk::Cast(commandBuffer)->pushConstants(layout, stageFlags, offset, size, pValues);
}
@@ -2370,7 +2370,7 @@
VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags)
{
TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, VkCommandPoolTrimFlags flags = %d)",
- device, commandPool, flags);
+ device, commandPool.get(), flags);
vk::Cast(commandPool)->trim(flags);
}
@@ -2429,7 +2429,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkDescriptorUpdateTemplate descriptorUpdateTemplate = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, descriptorUpdateTemplate, pAllocator);
+ device, descriptorUpdateTemplate.get(), pAllocator);
vk::destroy(descriptorUpdateTemplate, pAllocator);
}
@@ -2437,7 +2437,7 @@
VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData)
{
TRACE("(VkDevice device = %p, VkDescriptorSet descriptorSet = %p, VkDescriptorUpdateTemplate descriptorUpdateTemplate = %p, const void* pData = %p)",
- device, descriptorSet, descriptorUpdateTemplate, pData);
+ device, descriptorSet.get(), descriptorUpdateTemplate.get(), pData);
vk::Cast(descriptorUpdateTemplate)->updateDescriptorSet(descriptorSet, pData);
}
@@ -2488,7 +2488,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkInstance instance = %p, VkSurfaceKHR surface = %p, const VkAllocationCallbacks* pAllocator = %p)",
- instance, surface, pAllocator);
+ instance, surface.get(), pAllocator);
vk::destroy(surface, pAllocator);
}
@@ -2496,7 +2496,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported)
{
TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t queueFamilyIndex = %d, VkSurface surface = %p, VKBool32* pSupported = %p)",
- physicalDevice, int(queueFamilyIndex), surface, pSupported);
+ physicalDevice, int(queueFamilyIndex), surface.get(), pSupported);
*pSupported = VK_TRUE;
return VK_SUCCESS;
@@ -2506,7 +2506,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
{
TRACE("(VkPhysicalDevice physicalDevice = %p, VkSurfaceKHR surface = %p, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities = %p)",
- physicalDevice, surface, pSurfaceCapabilities);
+ physicalDevice, surface.get(), pSurfaceCapabilities);
vk::Cast(surface)->getSurfaceCapabilities(pSurfaceCapabilities);
return VK_SUCCESS;
@@ -2515,7 +2515,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats)
{
TRACE("(VkPhysicalDevice physicalDevice = %p, VkSurfaceKHR surface = %p. uint32_t* pSurfaceFormatCount = %p, VkSurfaceFormatKHR* pSurfaceFormats = %p)",
- physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
+ physicalDevice, surface.get(), pSurfaceFormatCount, pSurfaceFormats);
if(!pSurfaceFormats)
{
@@ -2529,7 +2529,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes)
{
TRACE("(VkPhysicalDevice physicalDevice = %p, VkSurfaceKHR surface = %p uint32_t* pPresentModeCount = %p, VkPresentModeKHR* pPresentModes = %p)",
- physicalDevice, surface, pPresentModeCount, pPresentModes);
+ physicalDevice, surface.get(), pPresentModeCount, pPresentModes);
if(!pPresentModes)
{
@@ -2578,7 +2578,7 @@
VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator)
{
TRACE("(VkDevice device = %p, VkSwapchainKHR swapchain = %p, const VkAllocationCallbacks* pAllocator = %p)",
- device, swapchain, pAllocator);
+ device, swapchain.get(), pAllocator);
vk::destroy(swapchain, pAllocator);
}
@@ -2586,7 +2586,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages)
{
TRACE("(VkDevice device = %p, VkSwapchainKHR swapchain = %p, uint32_t* pSwapchainImageCount = %p, VkImage* pSwapchainImages = %p)",
- device, swapchain, pSwapchainImageCount, pSwapchainImages);
+ device, swapchain.get(), pSwapchainImageCount, pSwapchainImages);
if(!pSwapchainImages)
{
@@ -2600,7 +2600,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex)
{
TRACE("(VkDevice device = %p, VkSwapchainKHR swapchain = %p, uint64_t timeout = %d, VkSemaphore semaphore = %p, VkFence fence = %p, uint32_t* pImageIndex = %p)",
- device, swapchain, int(timeout), semaphore, fence, pImageIndex);
+ device, swapchain.get(), int(timeout), semaphore.get(), fence.get(), pImageIndex);
return vk::Cast(swapchain)->getNextImage(timeout, semaphore, fence, pImageIndex);
}
@@ -2635,7 +2635,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR *pModes)
{
TRACE("(VkDevice device = %p, VkSurfaceKHR surface = %p, VkDeviceGroupPresentModeFlagsKHR *pModes = %p)",
- device, surface, pModes);
+ device, surface.get(), pModes);
*pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
return VK_SUCCESS;
@@ -2667,7 +2667,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkAcquireImageANDROID(VkDevice device, VkImage image, int nativeFenceFd, VkSemaphore semaphore, VkFence fence)
{
TRACE("(VkDevice device = %p, VkImage image = %p, int nativeFenceFd = %d, VkSemaphore semaphore = %p, VkFence fence = %p)",
- device, image, nativeFenceFd, semaphore, fence);
+ device, image.get(), nativeFenceFd, semaphore.get(), fence.get());
return VK_SUCCESS;
}
@@ -2675,7 +2675,7 @@
VKAPI_ATTR VkResult VKAPI_CALL vkQueueSignalReleaseImageANDROID(VkQueue queue, uint32_t waitSemaphoreCount, const VkSemaphore* pWaitSemaphores, VkImage image, int* pNativeFenceFd)
{
TRACE("(VkQueue queue = %p, uint32_t waitSemaphoreCount = %d, const VkSemaphore* pWaitSemaphores = %p, VkImage image = %p, int* pNativeFenceFd = %p)",
- queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
+ queue, waitSemaphoreCount, pWaitSemaphores, image.get(), pNativeFenceFd);
GrallocModule* grallocMod = GrallocModule::getInstance();
void* nativeBuffer;
diff --git a/src/Vulkan/vulkan.vcxproj b/src/Vulkan/vulkan.vcxproj
index 1157cac..68f18ec 100644
--- a/src/Vulkan/vulkan.vcxproj
+++ b/src/Vulkan/vulkan.vcxproj
@@ -224,6 +224,7 @@
<ClInclude Include="VkSampler.hpp" />
<ClInclude Include="VkSemaphore.hpp" />
<ClInclude Include="VkShaderModule.hpp" />
+ <ClInclude Include="VulkanPlatform.h" />
<ClInclude Include="..\Device\Blitter.hpp" />
<ClInclude Include="..\Device\Clipper.hpp" />
<ClInclude Include="..\Device\Color.hpp" />
diff --git a/src/Vulkan/vulkan.vcxproj.filters b/src/Vulkan/vulkan.vcxproj.filters
index 916c493..feca9b6 100644
--- a/src/Vulkan/vulkan.vcxproj.filters
+++ b/src/Vulkan/vulkan.vcxproj.filters
@@ -518,6 +518,9 @@
<ClInclude Include="VkDestroy.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
+ <ClInclude Include="VulkanPlatform.h">
+ <Filter>Header Files\Vulkan</Filter>
+ </ClInclude>
<ClInclude Include="Version.h" />
<ClInclude Include="..\Pipeline\SpirvShader.hpp">
<Filter>Header Files\Pipeline</Filter>
diff --git a/src/WSI/VkSurfaceKHR.hpp b/src/WSI/VkSurfaceKHR.hpp
index c7c6752..23a8ee0 100644
--- a/src/WSI/VkSurfaceKHR.hpp
+++ b/src/WSI/VkSurfaceKHR.hpp
@@ -16,7 +16,7 @@
#define SWIFTSHADER_VKSURFACEKHR_HPP_
#include "Vulkan/VkObject.hpp"
-#include <vulkan/vulkan.h>
+#include <Vulkan/VulkanPlatform.h>
#include <vector>
namespace vk
@@ -42,7 +42,7 @@
public:
operator VkSurfaceKHR()
{
- return reinterpret_cast<VkSurfaceKHR>(this);
+ return reinterpret_cast<VkSurfaceKHR::HandleType>(this);
}
void destroy(const VkAllocationCallbacks* pAllocator)
@@ -85,7 +85,7 @@
static inline SurfaceKHR* Cast(VkSurfaceKHR object)
{
- return reinterpret_cast<SurfaceKHR*>(object);
+ return reinterpret_cast<SurfaceKHR*>(object.get());
}
}
diff --git a/src/WSI/VkSwapchainKHR.hpp b/src/WSI/VkSwapchainKHR.hpp
index 3924992..8ba649c 100644
--- a/src/WSI/VkSwapchainKHR.hpp
+++ b/src/WSI/VkSwapchainKHR.hpp
@@ -56,7 +56,7 @@
static inline SwapchainKHR* Cast(VkSwapchainKHR object)
{
- return reinterpret_cast<SwapchainKHR*>(object);
+ return reinterpret_cast<SwapchainKHR*>(object.get());
}
}