VK_KHR_synchronization2 support
This CL adds support for VK_KHR_synchronization2. In general, no new
functionality is added, but this extension provides new ways to pass
in parameters for synchronization related functions.
Tests: dEQP-VK.*sync_2
Tests: dEQP-VK.synchronization2.*
Bug: b/204110005
Change-Id: Id76323862b5e4208f34ced9db4de74d8d526704e
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/58549
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Reviewed-by: Sean Risser <srisser@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
Tested-by: Alexis Hétu <sugoi@google.com>
Commit-Queue: Alexis Hétu <sugoi@google.com>
diff --git a/src/Vulkan/VkCommandBuffer.cpp b/src/Vulkan/VkCommandBuffer.cpp
index 780edc2..59552f8 100644
--- a/src/Vulkan/VkCommandBuffer.cpp
+++ b/src/Vulkan/VkCommandBuffer.cpp
@@ -907,9 +907,8 @@
class CmdSignalEvent : public vk::CommandBuffer::Command
{
public:
- CmdSignalEvent(vk::Event *ev, VkPipelineStageFlags stageMask)
+ CmdSignalEvent(vk::Event *ev)
: ev(ev)
- , stageMask(stageMask)
{
}
@@ -923,13 +922,12 @@
private:
vk::Event *const ev;
- const VkPipelineStageFlags stageMask; // TODO(b/117835459): We currently ignore the flags and signal the event at the last stage
};
class CmdResetEvent : public vk::CommandBuffer::Command
{
public:
- CmdResetEvent(vk::Event *ev, VkPipelineStageFlags stageMask)
+ CmdResetEvent(vk::Event *ev, VkPipelineStageFlags2 stageMask)
: ev(ev)
, stageMask(stageMask)
{
@@ -944,7 +942,7 @@
private:
vk::Event *const ev;
- const VkPipelineStageFlags stageMask; // FIXME(b/117835459): We currently ignore the flags and reset the event at the last stage
+ const VkPipelineStageFlags2 stageMask; // FIXME(b/117835459): We currently ignore the flags and reset the event at the last stage
};
class CmdWaitEvent : public vk::CommandBuffer::Command
@@ -1144,7 +1142,7 @@
class CmdWriteTimeStamp : public vk::CommandBuffer::Command
{
public:
- CmdWriteTimeStamp(vk::QueryPool *queryPool, uint32_t query, VkPipelineStageFlagBits stage)
+ CmdWriteTimeStamp(vk::QueryPool *queryPool, uint32_t query, VkPipelineStageFlagBits2 stage)
: queryPool(queryPool)
, query(query)
, stage(stage)
@@ -1153,7 +1151,7 @@
void execute(vk::CommandBuffer::ExecutionState &executionState) override
{
- if(stage & ~(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
+ if(stage & ~(VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT))
{
// The `top of pipe` and `draw indirect` stages are handled in command buffer processing so a timestamp write
// done in those stages can just be done here without any additional synchronization.
@@ -1177,7 +1175,7 @@
private:
vk::QueryPool *const queryPool;
const uint32_t query;
- const VkPipelineStageFlagBits stage;
+ const VkPipelineStageFlagBits2 stage;
};
class CmdCopyQueryPoolResults : public vk::CommandBuffer::Command
@@ -1355,11 +1353,7 @@
addCommand<::CmdDispatch>(baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
}
-void CommandBuffer::pipelineBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
- VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
+void CommandBuffer::pipelineBarrier(const VkDependencyInfo &pDependencyInfo)
{
addCommand<::CmdPipelineBarrier>();
}
@@ -1401,7 +1395,7 @@
addCommand<::CmdResetQueryPool>(queryPool, firstQuery, queryCount);
}
-void CommandBuffer::writeTimestamp(VkPipelineStageFlagBits pipelineStage, QueryPool *queryPool, uint32_t query)
+void CommandBuffer::writeTimestamp(VkPipelineStageFlags2 pipelineStage, QueryPool *queryPool, uint32_t query)
{
addCommand<::CmdWriteTimeStamp>(queryPool, query, pipelineStage);
}
@@ -1660,24 +1654,23 @@
}
}
-void CommandBuffer::setEvent(Event *event, VkPipelineStageFlags stageMask)
+void CommandBuffer::setEvent(Event *event, const VkDependencyInfo &pDependencyInfo)
{
ASSERT(state == RECORDING);
- addCommand<::CmdSignalEvent>(event, stageMask);
+ // TODO(b/117835459): We currently ignore the flags and signal the event at the last stage
+
+ addCommand<::CmdSignalEvent>(event);
}
-void CommandBuffer::resetEvent(Event *event, VkPipelineStageFlags stageMask)
+void CommandBuffer::resetEvent(Event *event, VkPipelineStageFlags2 stageMask)
{
ASSERT(state == RECORDING);
addCommand<::CmdResetEvent>(event, stageMask);
}
-void CommandBuffer::waitEvents(uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
+void CommandBuffer::waitEvents(uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfo &pDependencyInfo)
{
ASSERT(state == RECORDING);
diff --git a/src/Vulkan/VkCommandBuffer.hpp b/src/Vulkan/VkCommandBuffer.hpp
index 869e4fa..edd0f34 100644
--- a/src/Vulkan/VkCommandBuffer.hpp
+++ b/src/Vulkan/VkCommandBuffer.hpp
@@ -70,10 +70,7 @@
void dispatchBase(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
- void pipelineBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers);
+ void pipelineBarrier(const VkDependencyInfo &pDependencyInfo);
void bindPipeline(VkPipelineBindPoint pipelineBindPoint, Pipeline *pipeline);
void bindVertexBuffers(uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets);
@@ -81,7 +78,7 @@
void beginQuery(QueryPool *queryPool, uint32_t query, VkQueryControlFlags flags);
void endQuery(QueryPool *queryPool, uint32_t query);
void resetQueryPool(QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount);
- void writeTimestamp(VkPipelineStageFlagBits pipelineStage, QueryPool *queryPool, uint32_t query);
+ void writeTimestamp(VkPipelineStageFlags2 pipelineStage, QueryPool *queryPool, uint32_t query);
void copyQueryPoolResults(const QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount,
Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
void pushConstants(PipelineLayout *layout, VkShaderStageFlags stageFlags,
@@ -116,12 +113,9 @@
void clearAttachments(uint32_t attachmentCount, const VkClearAttachment *pAttachments,
uint32_t rectCount, const VkClearRect *pRects);
void resolveImage(const VkResolveImageInfo2 &resolveImageInfo);
- void setEvent(Event *event, VkPipelineStageFlags stageMask);
- void resetEvent(Event *event, VkPipelineStageFlags stageMask);
- void waitEvents(uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers);
+ void setEvent(Event *event, const VkDependencyInfo &pDependencyInfo);
+ void resetEvent(Event *event, VkPipelineStageFlags2 stageMask);
+ void waitEvents(uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfo &pDependencyInfo);
void draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
void drawIndexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
diff --git a/src/Vulkan/VkGetProcAddress.cpp b/src/Vulkan/VkGetProcAddress.cpp
index 89e3700..62f01b4 100644
--- a/src/Vulkan/VkGetProcAddress.cpp
+++ b/src/Vulkan/VkGetProcAddress.cpp
@@ -448,6 +448,17 @@
{
MAKE_VULKAN_DEVICE_ENTRY(vkGetPhysicalDeviceToolPropertiesEXT),
} },
+ // VK_KHR_synchronization2
+ {
+ VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME,
+ {
+ MAKE_VULKAN_DEVICE_ENTRY(vkCmdPipelineBarrier2KHR),
+ MAKE_VULKAN_DEVICE_ENTRY(vkCmdResetEvent2KHR),
+ MAKE_VULKAN_DEVICE_ENTRY(vkCmdSetEvent2KHR),
+ MAKE_VULKAN_DEVICE_ENTRY(vkCmdWaitEvents2KHR),
+ MAKE_VULKAN_DEVICE_ENTRY(vkCmdWriteTimestamp2KHR),
+ MAKE_VULKAN_DEVICE_ENTRY(vkQueueSubmit2KHR),
+ } },
#ifndef __ANDROID__
// VK_KHR_swapchain
{
diff --git a/src/Vulkan/VkPhysicalDevice.cpp b/src/Vulkan/VkPhysicalDevice.cpp
index 8d3af56..5e2dfb6 100644
--- a/src/Vulkan/VkPhysicalDevice.cpp
+++ b/src/Vulkan/VkPhysicalDevice.cpp
@@ -349,6 +349,11 @@
features->formatA4B4G4R4 = VK_TRUE;
}
+static void getPhysicalDeviceSynchronization2Features(VkPhysicalDeviceSynchronization2Features *features)
+{
+ features->synchronization2 = VK_TRUE;
+}
+
void PhysicalDevice::getFeatures2(VkPhysicalDeviceFeatures2 *features) const
{
features->features = getFeatures();
@@ -449,6 +454,10 @@
break;
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
getPhysicalDevice4444FormatsFeaturesExt(reinterpret_cast<struct VkPhysicalDevice4444FormatsFeaturesEXT *>(curExtension));
+ break;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
+ getPhysicalDeviceSynchronization2Features(reinterpret_cast<struct VkPhysicalDeviceSynchronization2Features *>(curExtension));
+ break;
// Unsupported extensions, but used by dEQP
// TODO(b/176893525): This may not be legal.
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
diff --git a/src/Vulkan/VkQueue.cpp b/src/Vulkan/VkQueue.cpp
index f664fea..c48fb49 100644
--- a/src/Vulkan/VkQueue.cpp
+++ b/src/Vulkan/VkQueue.cpp
@@ -18,6 +18,7 @@
#include "VkFence.hpp"
#include "VkSemaphore.hpp"
#include "VkStringify.hpp"
+#include "VkStructConversion.hpp"
#include "VkTimelineSemaphore.hpp"
#include "Device/Renderer.hpp"
#include "WSI/VkSwapchainKHR.hpp"
@@ -31,139 +32,6 @@
namespace vk {
-Queue::SubmitInfo *Queue::DeepCopySubmitInfo(uint32_t submitCount, const VkSubmitInfo *pSubmits)
-{
- size_t submitSize = sizeof(SubmitInfo) * submitCount;
- size_t totalSize = submitSize;
- for(uint32_t i = 0; i < submitCount; i++)
- {
- totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
- totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
- totalSize += pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
- totalSize += pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
-
- for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext);
- extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext))
- {
- switch(extension->sType)
- {
- case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
- {
- const auto *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension);
- totalSize += tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t);
- totalSize += tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t);
- }
- break;
- case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
- // SwiftShader doesn't use device group submit info because it only supports a single physical device.
- // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type.
- break;
- case VK_STRUCTURE_TYPE_MAX_ENUM:
- // dEQP tests that this value is ignored.
- break;
- default:
- UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
- break;
- }
- }
- }
-
- uint8_t *mem = static_cast<uint8_t *>(
- vk::allocateHostMemory(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::NULL_ALLOCATION_CALLBACKS, vk::Fence::GetAllocationScope()));
-
- auto submits = new(mem) SubmitInfo[submitCount];
- mem += submitSize;
-
- for(uint32_t i = 0; i < submitCount; i++)
- {
- submits[i].commandBufferCount = pSubmits[i].commandBufferCount;
- submits[i].signalSemaphoreCount = pSubmits[i].signalSemaphoreCount;
- submits[i].waitSemaphoreCount = pSubmits[i].waitSemaphoreCount;
-
- submits[i].pWaitSemaphores = nullptr;
- submits[i].pWaitDstStageMask = nullptr;
- submits[i].pSignalSemaphores = nullptr;
- submits[i].pCommandBuffers = nullptr;
-
- if(pSubmits[i].waitSemaphoreCount > 0)
- {
- size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
- submits[i].pWaitSemaphores = reinterpret_cast<const VkSemaphore *>(mem);
- memcpy(mem, pSubmits[i].pWaitSemaphores, size);
- mem += size;
-
- size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
- submits[i].pWaitDstStageMask = reinterpret_cast<const VkPipelineStageFlags *>(mem);
- memcpy(mem, pSubmits[i].pWaitDstStageMask, size);
- mem += size;
- }
-
- if(pSubmits[i].signalSemaphoreCount > 0)
- {
- size_t size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
- submits[i].pSignalSemaphores = reinterpret_cast<const VkSemaphore *>(mem);
- memcpy(mem, pSubmits[i].pSignalSemaphores, size);
- mem += size;
- }
-
- if(pSubmits[i].commandBufferCount > 0)
- {
- size_t size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
- submits[i].pCommandBuffers = reinterpret_cast<const VkCommandBuffer *>(mem);
- memcpy(mem, pSubmits[i].pCommandBuffers, size);
- mem += size;
- }
-
- submits[i].waitSemaphoreValueCount = 0;
- submits[i].pWaitSemaphoreValues = nullptr;
- submits[i].signalSemaphoreValueCount = 0;
- submits[i].pSignalSemaphoreValues = nullptr;
-
- for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext);
- extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext))
- {
- switch(extension->sType)
- {
- case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
- {
- const VkTimelineSemaphoreSubmitInfo *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension);
-
- if(tlsSubmitInfo->waitSemaphoreValueCount > 0)
- {
- submits[i].waitSemaphoreValueCount = tlsSubmitInfo->waitSemaphoreValueCount;
- size_t size = tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t);
- submits[i].pWaitSemaphoreValues = reinterpret_cast<uint64_t *>(mem);
- memcpy(mem, tlsSubmitInfo->pWaitSemaphoreValues, size);
- mem += size;
- }
-
- if(tlsSubmitInfo->signalSemaphoreValueCount > 0)
- {
- submits[i].signalSemaphoreValueCount = tlsSubmitInfo->signalSemaphoreValueCount;
- size_t size = tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t);
- submits[i].pSignalSemaphoreValues = reinterpret_cast<uint64_t *>(mem);
- memcpy(mem, tlsSubmitInfo->pSignalSemaphoreValues, size);
- mem += size;
- }
- }
- break;
- case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
- // SwiftShader doesn't use device group submit info because it only supports a single physical device.
- // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type.
- break;
- case VK_STRUCTURE_TYPE_MAX_ENUM:
- // dEQP tests that this value is ignored.
- break;
- default:
- UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
- break;
- }
- }
- }
-
- return submits;
-}
-
Queue::Queue(Device *device, marl::Scheduler *scheduler)
: device(device)
{
@@ -182,13 +50,13 @@
garbageCollect();
}
-VkResult Queue::submit(uint32_t submitCount, const VkSubmitInfo *pSubmits, Fence *fence)
+VkResult Queue::submit(uint32_t submitCount, SubmitInfo *pSubmits, Fence *fence)
{
garbageCollect();
Task task;
task.submitCount = submitCount;
- task.pSubmits = DeepCopySubmitInfo(submitCount, pSubmits);
+ task.pSubmits = pSubmits;
if(fence)
{
task.events = fence->getCountedEvent();
@@ -317,7 +185,7 @@
{
auto v = toDelete.tryTake();
if(!v.second) { break; }
- vk::freeHostMemory(v.first, NULL_ALLOCATION_CALLBACKS);
+ SubmitInfo::Release(v.first);
}
}
diff --git a/src/Vulkan/VkQueue.hpp b/src/Vulkan/VkQueue.hpp
index f21b1fe..5691020 100644
--- a/src/Vulkan/VkQueue.hpp
+++ b/src/Vulkan/VkQueue.hpp
@@ -36,6 +36,7 @@
class Device;
class Fence;
+struct SubmitInfo;
class Queue
{
@@ -50,7 +51,7 @@
return reinterpret_cast<VkQueue>(this);
}
- VkResult submit(uint32_t submitCount, const VkSubmitInfo *pSubmits, Fence *fence);
+ VkResult submit(uint32_t submitCount, SubmitInfo *pSubmits, Fence *fence);
VkResult waitIdle();
#ifndef __ANDROID__
VkResult present(const VkPresentInfoKHR *presentInfo);
@@ -61,21 +62,6 @@
void insertDebugUtilsLabel(const VkDebugUtilsLabelEXT *pLabelInfo);
private:
- struct SubmitInfo
- {
- uint32_t waitSemaphoreCount;
- const VkSemaphore *pWaitSemaphores;
- const VkPipelineStageFlags *pWaitDstStageMask;
- uint32_t commandBufferCount;
- const VkCommandBuffer *pCommandBuffers;
- uint32_t signalSemaphoreCount;
- const VkSemaphore *pSignalSemaphores;
- uint32_t waitSemaphoreValueCount;
- const uint64_t *pWaitSemaphoreValues;
- uint32_t signalSemaphoreValueCount;
- const uint64_t *pSignalSemaphoreValues;
- };
-
struct Task
{
uint32_t submitCount = 0;
@@ -93,7 +79,6 @@
void taskLoop(marl::Scheduler *scheduler);
void garbageCollect();
void submitQueue(const Task &task);
- static SubmitInfo *DeepCopySubmitInfo(uint32_t submitCount, const VkSubmitInfo *pSubmits);
Device *device;
std::unique_ptr<sw::Renderer> renderer;
diff --git a/src/Vulkan/VkStructConversion.hpp b/src/Vulkan/VkStructConversion.hpp
index d87f9a2..6f34f11 100644
--- a/src/Vulkan/VkStructConversion.hpp
+++ b/src/Vulkan/VkStructConversion.hpp
@@ -15,7 +15,9 @@
#ifndef VK_STRUCT_CONVERSION_HPP_
#define VK_STRUCT_CONVERSION_HPP_
-#include "Vulkan/VulkanPlatform.hpp"
+#include "VkMemory.hpp"
+#include "VkStringify.hpp"
+#include <cstring>
#include <vector>
namespace vk {
@@ -226,6 +228,104 @@
std::vector<VkImageResolve2> regions;
};
+struct DependencyInfo : public VkDependencyInfo
+{
+ DependencyInfo(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
+ : VkDependencyInfo{
+ VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
+ nullptr,
+ dependencyFlags,
+ memoryBarrierCount,
+ nullptr,
+ bufferMemoryBarrierCount,
+ nullptr,
+ imageMemoryBarrierCount,
+ nullptr
+ }
+ {
+ if((memoryBarrierCount == 0) &&
+ (bufferMemoryBarrierCount == 0) &&
+ (imageMemoryBarrierCount == 0))
+ {
+ // Create a single memory barrier entry to store the source and destination stage masks
+ memoryBarriers.resize(1);
+ memoryBarriers[0] = {
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ nullptr,
+ srcStageMask,
+ VK_ACCESS_2_NONE,
+ dstStageMask,
+ VK_ACCESS_2_NONE
+ };
+ }
+ else
+ {
+ memoryBarriers.resize(memoryBarrierCount);
+ for(uint32_t i = 0; i < memoryBarrierCount; i++)
+ {
+ memoryBarriers[i] = {
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
+ pMemoryBarriers[i].pNext,
+ srcStageMask,
+ pMemoryBarriers[i].srcAccessMask,
+ dstStageMask,
+ pMemoryBarriers[i].dstAccessMask
+ };
+ }
+
+ bufferMemoryBarriers.resize(bufferMemoryBarrierCount);
+ for(uint32_t i = 0; i < bufferMemoryBarrierCount; i++)
+ {
+ bufferMemoryBarriers[i] = {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2,
+ pBufferMemoryBarriers[i].pNext,
+ srcStageMask,
+ pBufferMemoryBarriers[i].srcAccessMask,
+ dstStageMask,
+ pBufferMemoryBarriers[i].dstAccessMask,
+ pBufferMemoryBarriers[i].srcQueueFamilyIndex,
+ pBufferMemoryBarriers[i].dstQueueFamilyIndex,
+ pBufferMemoryBarriers[i].buffer,
+ pBufferMemoryBarriers[i].offset,
+ pBufferMemoryBarriers[i].size
+ };
+ }
+
+ imageMemoryBarriers.resize(imageMemoryBarrierCount);
+ for(uint32_t i = 0; i < imageMemoryBarrierCount; i++)
+ {
+ imageMemoryBarriers[i] = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
+ pImageMemoryBarriers[i].pNext,
+ srcStageMask,
+ pImageMemoryBarriers[i].srcAccessMask,
+ dstStageMask,
+ pImageMemoryBarriers[i].dstAccessMask,
+ pImageMemoryBarriers[i].oldLayout,
+ pImageMemoryBarriers[i].newLayout,
+ pImageMemoryBarriers[i].srcQueueFamilyIndex,
+ pImageMemoryBarriers[i].dstQueueFamilyIndex,
+ pImageMemoryBarriers[i].image,
+ pImageMemoryBarriers[i].subresourceRange
+ };
+ }
+ }
+
+ this->pMemoryBarriers = memoryBarriers.empty() ? nullptr : &memoryBarriers.front();
+ this->pBufferMemoryBarriers = bufferMemoryBarriers.empty() ? nullptr : &bufferMemoryBarriers.front();
+ this->pImageMemoryBarriers = imageMemoryBarriers.empty() ? nullptr : &imageMemoryBarriers.front();
+ }
+
+private:
+ std::vector<VkMemoryBarrier2> memoryBarriers;
+ std::vector<VkBufferMemoryBarrier2> bufferMemoryBarriers;
+ std::vector<VkImageMemoryBarrier2> imageMemoryBarriers;
+};
+
struct ImageSubresource : VkImageSubresource
{
ImageSubresource(const VkImageSubresourceLayers &subresourceLayers)
@@ -257,6 +357,267 @@
{}
};
+struct SubmitInfo
+{
+ static SubmitInfo *Allocate(uint32_t submitCount, const VkSubmitInfo *pSubmits)
+ {
+ size_t submitSize = sizeof(SubmitInfo) * submitCount;
+ size_t totalSize = submitSize;
+ for(uint32_t i = 0; i < submitCount; i++)
+ {
+ totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
+ totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
+ totalSize += pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
+ totalSize += pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
+
+ for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext);
+ extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext))
+ {
+ switch(extension->sType)
+ {
+ case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+ {
+ const auto *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension);
+ totalSize += tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t);
+ totalSize += tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t);
+ }
+ break;
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
+ // SwiftShader doesn't use device group submit info because it only supports a single physical device.
+ // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type.
+ break;
+ case VK_STRUCTURE_TYPE_MAX_ENUM:
+ // dEQP tests that this value is ignored.
+ break;
+ default:
+ UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
+ break;
+ }
+ }
+ }
+
+ uint8_t *mem = static_cast<uint8_t *>(
+ vk::allocateHostMemory(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::NULL_ALLOCATION_CALLBACKS, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT));
+
+ auto submits = new(mem) SubmitInfo[submitCount];
+ mem += submitSize;
+
+ for(uint32_t i = 0; i < submitCount; i++)
+ {
+ submits[i].commandBufferCount = pSubmits[i].commandBufferCount;
+ submits[i].signalSemaphoreCount = pSubmits[i].signalSemaphoreCount;
+ submits[i].waitSemaphoreCount = pSubmits[i].waitSemaphoreCount;
+
+ submits[i].pWaitSemaphores = nullptr;
+ submits[i].pWaitDstStageMask = nullptr;
+ submits[i].pSignalSemaphores = nullptr;
+ submits[i].pCommandBuffers = nullptr;
+
+ if(pSubmits[i].waitSemaphoreCount > 0)
+ {
+ size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
+ submits[i].pWaitSemaphores = reinterpret_cast<VkSemaphore *>(mem);
+ memcpy(mem, pSubmits[i].pWaitSemaphores, size);
+ mem += size;
+
+ size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
+ submits[i].pWaitDstStageMask = reinterpret_cast<VkPipelineStageFlags *>(mem);
+ memcpy(mem, pSubmits[i].pWaitDstStageMask, size);
+ mem += size;
+ }
+
+ if(pSubmits[i].signalSemaphoreCount > 0)
+ {
+ size_t size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
+ submits[i].pSignalSemaphores = reinterpret_cast<VkSemaphore *>(mem);
+ memcpy(mem, pSubmits[i].pSignalSemaphores, size);
+ mem += size;
+ }
+
+ if(pSubmits[i].commandBufferCount > 0)
+ {
+ size_t size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
+ submits[i].pCommandBuffers = reinterpret_cast<VkCommandBuffer *>(mem);
+ memcpy(mem, pSubmits[i].pCommandBuffers, size);
+ mem += size;
+ }
+
+ submits[i].waitSemaphoreValueCount = 0;
+ submits[i].pWaitSemaphoreValues = nullptr;
+ submits[i].signalSemaphoreValueCount = 0;
+ submits[i].pSignalSemaphoreValues = nullptr;
+
+ for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext);
+ extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext))
+ {
+ switch(extension->sType)
+ {
+ case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+ {
+ const VkTimelineSemaphoreSubmitInfo *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension);
+
+ if(tlsSubmitInfo->waitSemaphoreValueCount > 0)
+ {
+ submits[i].waitSemaphoreValueCount = tlsSubmitInfo->waitSemaphoreValueCount;
+ size_t size = tlsSubmitInfo->waitSemaphoreValueCount * sizeof(uint64_t);
+ submits[i].pWaitSemaphoreValues = reinterpret_cast<uint64_t *>(mem);
+ memcpy(mem, tlsSubmitInfo->pWaitSemaphoreValues, size);
+ mem += size;
+ }
+
+ if(tlsSubmitInfo->signalSemaphoreValueCount > 0)
+ {
+ submits[i].signalSemaphoreValueCount = tlsSubmitInfo->signalSemaphoreValueCount;
+ size_t size = tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t);
+ submits[i].pSignalSemaphoreValues = reinterpret_cast<uint64_t *>(mem);
+ memcpy(mem, tlsSubmitInfo->pSignalSemaphoreValues, size);
+ mem += size;
+ }
+ }
+ break;
+ case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
+ // SwiftShader doesn't use device group submit info because it only supports a single physical device.
+ // However, this extension is core in Vulkan 1.1, so we must treat it as a valid structure type.
+ break;
+ case VK_STRUCTURE_TYPE_MAX_ENUM:
+ // dEQP tests that this value is ignored.
+ break;
+ default:
+ UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
+ break;
+ }
+ }
+ }
+
+ return submits;
+ }
+
+ static SubmitInfo *Allocate(uint32_t submitCount, const VkSubmitInfo2 *pSubmits)
+ {
+ size_t submitSize = sizeof(SubmitInfo) * submitCount;
+ size_t totalSize = submitSize;
+ for(uint32_t i = 0; i < submitCount; i++)
+ {
+ totalSize += pSubmits[i].waitSemaphoreInfoCount * sizeof(VkSemaphore);
+ totalSize += pSubmits[i].waitSemaphoreInfoCount * sizeof(VkPipelineStageFlags);
+ totalSize += pSubmits[i].waitSemaphoreInfoCount * sizeof(uint64_t);
+ totalSize += pSubmits[i].signalSemaphoreInfoCount * sizeof(VkSemaphore);
+ totalSize += pSubmits[i].signalSemaphoreInfoCount * sizeof(uint64_t);
+ totalSize += pSubmits[i].commandBufferInfoCount * sizeof(VkCommandBuffer);
+
+ for(const auto *extension = reinterpret_cast<const VkBaseInStructure *>(pSubmits[i].pNext);
+ extension != nullptr; extension = reinterpret_cast<const VkBaseInStructure *>(extension->pNext))
+ {
+ switch(extension->sType)
+ {
+ case VK_STRUCTURE_TYPE_MAX_ENUM:
+ // dEQP tests that this value is ignored.
+ break;
+ case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR: // VK_KHR_performance_query
+ case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR: // VK_KHR_win32_keyed_mutex
+ case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV: // VK_NV_win32_keyed_mutex
+ default:
+ UNSUPPORTED("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
+ break;
+ }
+ }
+ }
+
+ uint8_t *mem = static_cast<uint8_t *>(
+ vk::allocateHostMemory(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::NULL_ALLOCATION_CALLBACKS, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT));
+
+ auto submits = new(mem) SubmitInfo[submitCount];
+ mem += submitSize;
+
+ for(uint32_t i = 0; i < submitCount; i++)
+ {
+ submits[i].commandBufferCount = pSubmits[i].commandBufferInfoCount;
+ submits[i].signalSemaphoreCount = pSubmits[i].signalSemaphoreInfoCount;
+ submits[i].waitSemaphoreCount = pSubmits[i].waitSemaphoreInfoCount;
+
+ submits[i].signalSemaphoreValueCount = pSubmits[i].signalSemaphoreInfoCount;
+ submits[i].waitSemaphoreValueCount = pSubmits[i].waitSemaphoreInfoCount;
+
+ submits[i].pWaitSemaphores = nullptr;
+ submits[i].pWaitDstStageMask = nullptr;
+ submits[i].pSignalSemaphores = nullptr;
+ submits[i].pCommandBuffers = nullptr;
+ submits[i].pWaitSemaphoreValues = nullptr;
+ submits[i].pSignalSemaphoreValues = nullptr;
+
+ if(submits[i].waitSemaphoreCount > 0)
+ {
+ size_t size = submits[i].waitSemaphoreCount * sizeof(VkSemaphore);
+ submits[i].pWaitSemaphores = reinterpret_cast<VkSemaphore *>(mem);
+ mem += size;
+
+ size = submits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
+ submits[i].pWaitDstStageMask = reinterpret_cast<VkPipelineStageFlags *>(mem);
+ mem += size;
+
+ size = submits[i].waitSemaphoreCount * sizeof(uint64_t);
+ submits[i].pWaitSemaphoreValues = reinterpret_cast<uint64_t *>(mem);
+ mem += size;
+
+ for(uint32_t j = 0; j < submits[i].waitSemaphoreCount; j++)
+ {
+ submits[i].pWaitSemaphores[j] = pSubmits[i].pWaitSemaphoreInfos[j].semaphore;
+ submits[i].pWaitDstStageMask[j] = pSubmits[i].pWaitSemaphoreInfos[j].stageMask;
+ submits[i].pWaitSemaphoreValues[j] = pSubmits[i].pWaitSemaphoreInfos[j].value;
+ }
+ }
+
+ if(submits[i].signalSemaphoreCount > 0)
+ {
+ size_t size = submits[i].signalSemaphoreCount * sizeof(VkSemaphore);
+ submits[i].pSignalSemaphores = reinterpret_cast<VkSemaphore *>(mem);
+ mem += size;
+
+ size = submits[i].signalSemaphoreCount * sizeof(uint64_t);
+ submits[i].pSignalSemaphoreValues = reinterpret_cast<uint64_t *>(mem);
+ mem += size;
+
+ for(uint32_t j = 0; j < submits[i].signalSemaphoreCount; j++)
+ {
+ submits[i].pSignalSemaphores[j] = pSubmits[i].pSignalSemaphoreInfos[j].semaphore;
+ submits[i].pSignalSemaphoreValues[j] = pSubmits[i].pSignalSemaphoreInfos[j].value;
+ }
+ }
+
+ if(submits[i].commandBufferCount > 0)
+ {
+ size_t size = submits[i].commandBufferCount * sizeof(VkCommandBuffer);
+ submits[i].pCommandBuffers = reinterpret_cast<VkCommandBuffer *>(mem);
+ mem += size;
+
+ for(uint32_t j = 0; j < submits[i].commandBufferCount; j++)
+ {
+ submits[i].pCommandBuffers[j] = pSubmits[i].pCommandBufferInfos[j].commandBuffer;
+ }
+ }
+ }
+
+ return submits;
+ }
+
+ static void Release(SubmitInfo *submitInfo)
+ {
+ vk::freeHostMemory(submitInfo, NULL_ALLOCATION_CALLBACKS);
+ }
+
+ uint32_t waitSemaphoreCount;
+ VkSemaphore *pWaitSemaphores;
+ VkPipelineStageFlags *pWaitDstStageMask;
+ uint32_t commandBufferCount;
+ VkCommandBuffer *pCommandBuffers;
+ uint32_t signalSemaphoreCount;
+ VkSemaphore *pSignalSemaphores;
+ uint32_t waitSemaphoreValueCount;
+ uint64_t *pWaitSemaphoreValues;
+ uint32_t signalSemaphoreValueCount;
+ uint64_t *pSignalSemaphoreValues;
+};
+
} // namespace vk
#endif // VK_STRUCT_CONVERSION_HPP_
\ No newline at end of file
diff --git a/src/Vulkan/libVulkan.cpp b/src/Vulkan/libVulkan.cpp
index 05e836f..f615fe8 100644
--- a/src/Vulkan/libVulkan.cpp
+++ b/src/Vulkan/libVulkan.cpp
@@ -419,6 +419,7 @@
{ { VK_EXT_TOOLING_INFO_EXTENSION_NAME, VK_EXT_TOOLING_INFO_SPEC_VERSION } },
{ { VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME, VK_KHR_COPY_COMMANDS_2_SPEC_VERSION } },
{ { VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME, VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION } },
+ { { VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME, VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION } },
// Additional extension
{ { VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME, VK_GOOGLE_DECORATE_STRING_SPEC_VERSION } },
{ { VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME, VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION } },
@@ -937,6 +938,7 @@
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES:
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
break;
default:
@@ -1068,7 +1070,15 @@
TRACE("(VkQueue queue = %p, uint32_t submitCount = %d, const VkSubmitInfo* pSubmits = %p, VkFence fence = %p)",
queue, submitCount, pSubmits, static_cast<void *>(fence));
- return vk::Cast(queue)->submit(submitCount, pSubmits, vk::Cast(fence));
+ return vk::Cast(queue)->submit(submitCount, vk::SubmitInfo::Allocate(submitCount, pSubmits), vk::Cast(fence));
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2 *pSubmits, VkFence fence)
+{
+ TRACE("(VkQueue queue = %p, uint32_t submitCount = %d, const VkSubmitInfo2* pSubmits = %p, VkFence fence = %p)",
+ queue, submitCount, pSubmits, static_cast<void *>(fence));
+
+ return vk::Cast(queue)->submit(submitCount, vk::SubmitInfo::Allocate(submitCount, pSubmits), vk::Cast(fence));
}
VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue)
@@ -1556,9 +1566,9 @@
TRACE("(VkDevice device = %p, const VkEventCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkEvent* pEvent = %p)",
device, pCreateInfo, pAllocator, pEvent);
- if(pCreateInfo->flags != 0)
+ // VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR is provided by VK_KHR_synchronization2
+ if((pCreateInfo->flags != 0) && (pCreateInfo->flags != VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR))
{
- // Vulkan 1.2: "flags is reserved for future use." "flags must be 0"
UNSUPPORTED("pCreateInfo->flags %d", int(pCreateInfo->flags));
}
@@ -2799,7 +2809,15 @@
TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, VkPipelineStageFlags stageMask = %d)",
commandBuffer, static_cast<void *>(event), int(stageMask));
- vk::Cast(commandBuffer)->setEvent(vk::Cast(event), stageMask);
+ vk::Cast(commandBuffer)->setEvent(vk::Cast(event), vk::DependencyInfo(stageMask, stageMask, VkDependencyFlags(0), 0, nullptr, 0, nullptr, 0, nullptr));
+}
+
+VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo *pDependencyInfo)
+{
+ TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, const VkDependencyInfo* pDependencyInfo = %p)",
+ commandBuffer, static_cast<void *>(event), pDependencyInfo);
+
+ vk::Cast(commandBuffer)->setEvent(vk::Cast(event), *pDependencyInfo);
}
VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask)
@@ -2810,12 +2828,28 @@
vk::Cast(commandBuffer)->resetEvent(vk::Cast(event), stageMask);
}
+VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask)
+{
+ TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, VkPipelineStageFlags2 stageMask = %d)",
+ commandBuffer, static_cast<void *>(event), int(stageMask));
+
+ vk::Cast(commandBuffer)->resetEvent(vk::Cast(event), stageMask);
+}
+
VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
{
TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t eventCount = %d, const VkEvent* pEvents = %p, VkPipelineStageFlags srcStageMask = 0x%x, VkPipelineStageFlags dstStageMask = 0x%x, uint32_t memoryBarrierCount = %d, const VkMemoryBarrier* pMemoryBarriers = %p, uint32_t bufferMemoryBarrierCount = %d, const VkBufferMemoryBarrier* pBufferMemoryBarriers = %p, uint32_t imageMemoryBarrierCount = %d, const VkImageMemoryBarrier* pImageMemoryBarriers = %p)",
commandBuffer, int(eventCount), pEvents, int(srcStageMask), int(dstStageMask), int(memoryBarrierCount), pMemoryBarriers, int(bufferMemoryBarrierCount), pBufferMemoryBarriers, int(imageMemoryBarrierCount), pImageMemoryBarriers);
- vk::Cast(commandBuffer)->waitEvents(eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
+ vk::Cast(commandBuffer)->waitEvents(eventCount, pEvents, vk::DependencyInfo(srcStageMask, dstStageMask, VkDependencyFlags(0), memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers));
+}
+
+VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfo *pDependencyInfos)
+{
+ TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t eventCount = %d, const VkEvent* pEvents = %p, const VkDependencyInfo* pDependencyInfos = %p)",
+ commandBuffer, int(eventCount), pEvents, pDependencyInfos);
+
+ vk::Cast(commandBuffer)->waitEvents(eventCount, pEvents, *pDependencyInfos);
}
VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
@@ -2825,7 +2859,15 @@
" uint32_t bufferMemoryBarrierCount = %d, const VkBufferMemoryBarrier* pBufferMemoryBarriers = %p, uint32_t imageMemoryBarrierCount = %d, const VkImageMemoryBarrier* pImageMemoryBarriers = %p)",
commandBuffer, int(srcStageMask), int(dstStageMask), dependencyFlags, int(memoryBarrierCount), pMemoryBarriers, int(bufferMemoryBarrierCount), pBufferMemoryBarriers, int(imageMemoryBarrierCount), pImageMemoryBarriers);
- vk::Cast(commandBuffer)->pipelineBarrier(srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
+ vk::Cast(commandBuffer)->pipelineBarrier(vk::DependencyInfo(srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers));
+}
+
+VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo *pDependencyInfo)
+{
+ TRACE("(VkCommandBuffer commandBuffer = %p, const VkDependencyInfo* pDependencyInfo = %p)",
+ commandBuffer, pDependencyInfo);
+
+ vk::Cast(commandBuffer)->pipelineBarrier(*pDependencyInfo);
}
VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags)
@@ -2860,6 +2902,14 @@
vk::Cast(commandBuffer)->writeTimestamp(pipelineStage, vk::Cast(queryPool), query);
}
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query)
+{
+ TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineStageFlags2 stage = %d, VkQueryPool queryPool = %p, uint32_t query = %d)",
+ commandBuffer, int(stage), static_cast<void *>(queryPool), int(query));
+
+ vk::Cast(commandBuffer)->writeTimestamp(stage, vk::Cast(queryPool), query);
+}
+
VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
{
TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t firstQuery = %d, uint32_t queryCount = %d, VkBuffer dstBuffer = %p, VkDeviceSize dstOffset = %d, VkDeviceSize stride = %d, VkQueryResultFlags flags = %d)",
@@ -3853,44 +3903,6 @@
device, objectType, objectHandle, static_cast<void *>(privateDataSlot), pData);
}
-VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo)
-{
- TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, const VkDependencyInfo* pDependencyInfo = %p)",
- commandBuffer, static_cast<void *>(event), pDependencyInfo);
-}
-
-VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask)
-{
- TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, VkPipelineStageFlags2 stageMask = %" PRIu64 ")",
- commandBuffer, static_cast<void *>(event), stageMask);
-}
-
-VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos)
-{
- TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t eventCount = %d, const VkEvent* pEvents = %p, const VkDependencyInfo* pDependencyInfos = %p)",
- commandBuffer, eventCount, pEvents, pDependencyInfos);
-}
-
-VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo)
-{
- TRACE("(VkCommandBuffer commandBuffer = %p, const VkDependencyInfo* pDependencyInfo = %p)",
- commandBuffer, pDependencyInfo);
-}
-
-VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query)
-{
- TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineStageFlags2 stage = %" PRIu64 ", VkQueryPool queryPool = %p, uint32_t query = %d)",
- commandBuffer, stage, static_cast<void *>(queryPool), query);
-}
-
-VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence)
-{
- TRACE("(VkQueue queue = %p, uint32_t submitCount = %d, const VkSubmitInfo2* pSubmits = %p, VkFence fence = %p)",
- queue, submitCount, pSubmits, static_cast<void *>(fence));
-
- return VK_SUCCESS;
-}
-
VKAPI_ATTR void VKAPI_CALL vkCmdBeginRendering(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo)
{
TRACE("(VkCommandBuffer commandBuffer = %p, const VkRenderingInfo* pRenderingInfo = %p)",