clang-format the src/Vulkan directory

Bug: b/144825072

Change-Id: I1bd5196b34a7974a41dcb95814a1ae8643b26f22
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/39658
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Tested-by: Ben Clayton <bclayton@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
diff --git a/src/Vulkan/Debug/Context.cpp b/src/Vulkan/Debug/Context.cpp
index aa62304..d11c130 100644
--- a/src/Vulkan/Debug/Context.cpp
+++ b/src/Vulkan/Debug/Context.cpp
@@ -26,8 +26,7 @@
 #include <unordered_map>
 #include <unordered_set>
 
-namespace
-{
+namespace {
 
 class Broadcaster : public vk::dbg::EventListener
 {
@@ -40,17 +39,17 @@
 	void onLineBreakpointHit(Thread::ID) override;
 	void onFunctionBreakpointHit(Thread::ID) override;
 
-	void add(EventListener*);
-	void remove(EventListener*);
+	void add(EventListener *);
+	void remove(EventListener *);
 
 private:
-	template <typename F>
-	inline void foreach(F&&);
+	template<typename F>
+	inline void foreach(F &&);
 
-	template <typename F>
-	inline void modify(F&&);
+	template<typename F>
+	inline void modify(F &&);
 
-	using ListenerSet = std::unordered_set<EventListener*>;
+	using ListenerSet = std::unordered_set<EventListener *>;
 	std::recursive_mutex mutex;
 	std::shared_ptr<ListenerSet> listeners = std::make_shared<ListenerSet>();
 	int listenersInUse = 0;
@@ -58,36 +57,36 @@
 
 void Broadcaster::onThreadStarted(Thread::ID id)
 {
-	foreach([&](EventListener* l) { l->onThreadStarted(id); });
+	foreach([&](EventListener *l) { l->onThreadStarted(id); });
 }
 
 void Broadcaster::onThreadStepped(Thread::ID id)
 {
-	foreach([&](EventListener* l) { l->onThreadStepped(id); });
+	foreach([&](EventListener *l) { l->onThreadStepped(id); });
 }
 
 void Broadcaster::onLineBreakpointHit(Thread::ID id)
 {
-	foreach([&](EventListener* l) { l->onLineBreakpointHit(id); });
+	foreach([&](EventListener *l) { l->onLineBreakpointHit(id); });
 }
 
 void Broadcaster::onFunctionBreakpointHit(Thread::ID id)
 {
-	foreach([&](EventListener* l) { l->onFunctionBreakpointHit(id); });
+	foreach([&](EventListener *l) { l->onFunctionBreakpointHit(id); });
 }
 
-void Broadcaster::add(EventListener* l)
+void Broadcaster::add(EventListener *l)
 {
 	modify([&]() { listeners->emplace(l); });
 }
 
-void Broadcaster::remove(EventListener* l)
+void Broadcaster::remove(EventListener *l)
 {
 	modify([&]() { listeners->erase(l); });
 }
 
-template <typename F>
-void Broadcaster::foreach(F&& f)
+template<typename F>
+void Broadcaster::foreach(F &&f)
 {
 	std::unique_lock<std::recursive_mutex> lock(mutex);
 	++listenersInUse;
@@ -96,8 +95,8 @@
 	--listenersInUse;
 }
 
-template <typename F>
-void Broadcaster::modify(F&& f)
+template<typename F>
+void Broadcaster::modify(F &&f)
 {
 	std::unique_lock<std::recursive_mutex> lock(mutex);
 	if(listenersInUse > 0)
@@ -111,10 +110,8 @@
 
 }  // namespace
 
-namespace vk
-{
-namespace dbg
-{
+namespace vk {
+namespace dbg {
 
 ////////////////////////////////////////////////////////////////////////////////
 // Context::Impl
@@ -124,16 +121,16 @@
 public:
 	// Context compliance
 	Lock lock() override;
-	void addListener(EventListener*) override;
-	void removeListener(EventListener*) override;
-	EventListener* broadcast() override;
+	void addListener(EventListener *) override;
+	void removeListener(EventListener *) override;
+	EventListener *broadcast() override;
 
-	void addFile(const std::shared_ptr<File>& file);
+	void addFile(const std::shared_ptr<File> &file);
 
 	Broadcaster broadcaster;
 
 	std::mutex mutex;
-	std::vector<EventListener*> eventListeners;
+	std::vector<EventListener *> eventListeners;
 	std::unordered_map<std::thread::id, std::shared_ptr<Thread>> threadsByStdId;
 	std::unordered_set<std::string> functionBreakpoints;
 	std::unordered_map<std::string, std::vector<int>> pendingBreakpoints;
@@ -154,22 +151,22 @@
 	return Lock(this);
 }
 
-void Context::Impl::addListener(EventListener* l)
+void Context::Impl::addListener(EventListener *l)
 {
 	broadcaster.add(l);
 }
 
-void Context::Impl::removeListener(EventListener* l)
+void Context::Impl::removeListener(EventListener *l)
 {
 	broadcaster.remove(l);
 }
 
-EventListener* Context::Impl::broadcast()
+EventListener *Context::Impl::broadcast()
 {
 	return &broadcaster;
 }
 
-void Context::Impl::addFile(const std::shared_ptr<File>& file)
+void Context::Impl::addFile(const std::shared_ptr<File> &file)
 {
 	files.add(file->id, file);
 
@@ -194,14 +191,14 @@
 ////////////////////////////////////////////////////////////////////////////////
 // Context::Lock
 ////////////////////////////////////////////////////////////////////////////////
-Context::Lock::Lock(Impl* ctx) :
-    ctx(ctx)
+Context::Lock::Lock(Impl *ctx)
+    : ctx(ctx)
 {
 	ctx->mutex.lock();
 }
 
-Context::Lock::Lock(Lock&& o) :
-    ctx(o.ctx)
+Context::Lock::Lock(Lock &&o)
+    : ctx(o.ctx)
 {
 	o.ctx = nullptr;
 }
@@ -211,7 +208,7 @@
 	unlock();
 }
 
-Context::Lock& Context::Lock::operator=(Lock&& o)
+Context::Lock &Context::Lock::operator=(Lock &&o)
 {
 	ctx = o.ctx;
 	o.ctx = nullptr;
@@ -264,15 +261,15 @@
 	return out;
 }
 
-std::shared_ptr<File> Context::Lock::createVirtualFile(const std::string& name,
-                                                       const std::string& source)
+std::shared_ptr<File> Context::Lock::createVirtualFile(const std::string &name,
+                                                       const std::string &source)
 {
 	auto file = File::createVirtual(ctx->nextFileID++, name, source);
 	ctx->addFile(file);
 	return file;
 }
 
-std::shared_ptr<File> Context::Lock::createPhysicalFile(const std::string& path)
+std::shared_ptr<File> Context::Lock::createPhysicalFile(const std::string &path)
 {
 	auto file = File::createPhysical(ctx->nextFileID++, path);
 	ctx->addFile(file);
@@ -296,7 +293,7 @@
 }
 
 std::shared_ptr<Frame> Context::Lock::createFrame(
-    const std::shared_ptr<File>& file)
+    const std::shared_ptr<File> &file)
 {
 	auto frame = std::make_shared<Frame>(ctx->nextFrameID++);
 	ctx->frames.add(frame->id, frame);
@@ -313,7 +310,7 @@
 }
 
 std::shared_ptr<Scope> Context::Lock::createScope(
-    const std::shared_ptr<File>& file)
+    const std::shared_ptr<File> &file)
 {
 	auto scope = std::make_shared<Scope>(ctx->nextScopeID++, file, createVariableContainer());
 	ctx->scopes.add(scope->id, scope);
@@ -337,17 +334,17 @@
 	return ctx->variableContainers.get(id);
 }
 
-void Context::Lock::addFunctionBreakpoint(const std::string& name)
+void Context::Lock::addFunctionBreakpoint(const std::string &name)
 {
 	ctx->functionBreakpoints.emplace(name);
 }
 
-void Context::Lock::addPendingBreakpoints(const std::string& filename, const std::vector<int>& lines)
+void Context::Lock::addPendingBreakpoints(const std::string &filename, const std::vector<int> &lines)
 {
 	ctx->pendingBreakpoints.emplace(filename, lines);
 }
 
-bool Context::Lock::isFunctionBreakpoint(const std::string& name)
+bool Context::Lock::isFunctionBreakpoint(const std::string &name)
 {
 	return ctx->functionBreakpoints.count(name) > 0;
 }
diff --git a/src/Vulkan/Debug/Context.hpp b/src/Vulkan/Debug/Context.hpp
index dea6545..790587d 100644
--- a/src/Vulkan/Debug/Context.hpp
+++ b/src/Vulkan/Debug/Context.hpp
@@ -21,10 +21,8 @@
 #include <string>
 #include <vector>
 
-namespace vk
-{
-namespace dbg
-{
+namespace vk {
+namespace dbg {
 
 // Forward declarations.
 class Thread;
@@ -50,12 +48,12 @@
 	class Lock
 	{
 	public:
-		Lock(Impl*);
-		Lock(Lock&&);
+		Lock(Impl *);
+		Lock(Lock &&);
 		~Lock();
 
 		// move-assignment operator.
-		Lock& operator=(Lock&&);
+		Lock &operator=(Lock &&);
 
 		// unlock() explicitly unlocks before the Lock destructor is called.
 		// It is illegal to call any other methods after calling unlock().
@@ -77,12 +75,12 @@
 		// filesystem.
 		// name is the unique name of the file.
 		// source is the content of the file.
-		std::shared_ptr<File> createVirtualFile(const std::string& name,
-		                                        const std::string& source);
+		std::shared_ptr<File> createVirtualFile(const std::string &name,
+		                                        const std::string &source);
 
 		// createPhysicalFile() returns a new file that is backed by the file
 		// at path.
-		std::shared_ptr<File> createPhysicalFile(const std::string& path);
+		std::shared_ptr<File> createPhysicalFile(const std::string &path);
 
 		// get() returns the file with the given ID, or null if the file
 		// does not exist or no longer has any external shared_ptr references.
@@ -93,7 +91,7 @@
 
 		// createFrame() returns a new frame for the given file.
 		std::shared_ptr<Frame> createFrame(
-		    const std::shared_ptr<File>& file);
+		    const std::shared_ptr<File> &file);
 
 		// get() returns the frame with the given ID, or null if the frame
 		// does not exist or no longer has any external shared_ptr references.
@@ -101,7 +99,7 @@
 
 		// createScope() returns a new scope for the given file.
 		std::shared_ptr<Scope> createScope(
-		    const std::shared_ptr<File>& file);
+		    const std::shared_ptr<File> &file);
 
 		// get() returns the scope with the given ID, or null if the scope
 		// does not exist.
@@ -117,21 +115,21 @@
 
 		// addFunctionBreakpoint() adds a breakpoint to the start of the
 		// function with the given name.
-		void addFunctionBreakpoint(const std::string& name);
+		void addFunctionBreakpoint(const std::string &name);
 
 		// addPendingBreakpoints() adds a number of breakpoints to the file with
 		// the given name which has not yet been created with a call to
 		// createVirtualFile() or createPhysicalFile().
-		void addPendingBreakpoints(const std::string& name, const std::vector<int>& lines);
+		void addPendingBreakpoints(const std::string &name, const std::vector<int> &lines);
 
 		// isFunctionBreakpoint() returns true if the function with the given
 		// name has a function breakpoint set.
-		bool isFunctionBreakpoint(const std::string& name);
+		bool isFunctionBreakpoint(const std::string &name);
 
 	private:
-		Lock(const Lock&) = delete;
-		Lock& operator=(const Lock&) = delete;
-		Impl* ctx;
+		Lock(const Lock &) = delete;
+		Lock &operator=(const Lock &) = delete;
+		Impl *ctx;
 	};
 
 	// create() creates and returns a new Context.
@@ -144,15 +142,15 @@
 	virtual Lock lock() = 0;
 
 	// addListener() registers an EventListener for event notifications.
-	virtual void addListener(EventListener*) = 0;
+	virtual void addListener(EventListener *) = 0;
 
 	// removeListener() unregisters an EventListener that was previously
 	// registered by a call to addListener().
-	virtual void removeListener(EventListener*) = 0;
+	virtual void removeListener(EventListener *) = 0;
 
 	// broadcast() returns an EventListener that will broadcast all methods on
 	// to all registered EventListeners.
-	virtual EventListener* broadcast() = 0;
+	virtual EventListener *broadcast() = 0;
 };
 
 }  // namespace dbg
diff --git a/src/Vulkan/Debug/Debug.cpp b/src/Vulkan/Debug/Debug.cpp
index 4976fba..0f80d84 100644
--- a/src/Vulkan/Debug/Debug.cpp
+++ b/src/Vulkan/Debug/Debug.cpp
@@ -13,5 +13,5 @@
 // limitations under the License.
 
 #ifndef ENABLE_VK_DEBUGGER
-#error "Source files in {SwiftShader}/src/Vulkan/Debug should not be built unless ENABLE_VK_DEBUGGER is defined"
-#endif // ENABLE_VK_DEBUGGER
+#	error "Source files in {SwiftShader}/src/Vulkan/Debug should not be built unless ENABLE_VK_DEBUGGER is defined"
+#endif  // ENABLE_VK_DEBUGGER
diff --git a/src/Vulkan/Debug/File.cpp b/src/Vulkan/Debug/File.cpp
index 8fc0c54..9a3094e 100644
--- a/src/Vulkan/Debug/File.cpp
+++ b/src/Vulkan/Debug/File.cpp
@@ -37,8 +37,9 @@
 	std::unordered_set<int> breakpoints;  // guarded by breakpointMutex
 };
 
-FileBase::FileBase(ID id, std::string dir, std::string name, std::string source) :
-    File(id, std::move(dir), std::move(name), std::move(source)) {}
+FileBase::FileBase(ID id, std::string dir, std::string name, std::string source)
+    : File(id, std::move(dir), std::move(name), std::move(source))
+{}
 
 void FileBase::clearBreakpoints()
 {
@@ -71,8 +72,9 @@
 private:
 };
 
-VirtualFile::VirtualFile(ID id, std::string name, std::string source) :
-    FileBase(id, "", std::move(name), std::move(source)) {}
+VirtualFile::VirtualFile(ID id, std::string name, std::string source)
+    : FileBase(id, "", std::move(name), std::move(source))
+{}
 
 bool VirtualFile::isVirtual() const
 {
@@ -93,8 +95,9 @@
 
 PhysicalFile::PhysicalFile(ID id,
                            std::string dir,
-                           std::string name) :
-    FileBase(id, std::move(dir), std::move(name), "") {}
+                           std::string name)
+    : FileBase(id, std::move(dir), std::move(name), "")
+{}
 
 bool PhysicalFile::isVirtual() const
 {
diff --git a/src/Vulkan/Debug/File.hpp b/src/Vulkan/Debug/File.hpp
index 9472c6b..342d512 100644
--- a/src/Vulkan/Debug/File.hpp
+++ b/src/Vulkan/Debug/File.hpp
@@ -76,11 +76,12 @@
 	inline File(ID id, std::string dir, std::string name, std::string source);
 };
 
-File::File(ID id, std::string dir, std::string name, std::string source) :
-    id(std::move(id)),
-    dir(std::move(dir)),
-    name(std::move(name)),
-    source(source) {}
+File::File(ID id, std::string dir, std::string name, std::string source)
+    : id(std::move(id))
+    , dir(std::move(dir))
+    , name(std::move(name))
+    , source(source)
+{}
 
 std::string File::path() const
 {
diff --git a/src/Vulkan/Debug/ID.hpp b/src/Vulkan/Debug/ID.hpp
index 29ebd2e..b818b98 100644
--- a/src/Vulkan/Debug/ID.hpp
+++ b/src/Vulkan/Debug/ID.hpp
@@ -25,17 +25,19 @@
 // ID; instead it is used to prevent implicit casts between identifiers of
 // different T types.
 // IDs are typically used as a map key to value of type T.
-template <typename T>
+template<typename T>
 class ID
 {
 public:
-	inline ID() :
-	    id(0) {}
-	inline ID(int id) :
-	    id(id) {}
-	inline bool operator==(const ID<T>& rhs) const { return id == rhs.id; }
-	inline bool operator!=(const ID<T>& rhs) const { return id != rhs.id; }
-	inline bool operator<(const ID<T>& rhs) const { return id < rhs.id; }
+	inline ID()
+	    : id(0)
+	{}
+	inline ID(int id)
+	    : id(id)
+	{}
+	inline bool operator==(const ID<T> &rhs) const { return id == rhs.id; }
+	inline bool operator!=(const ID<T> &rhs) const { return id != rhs.id; }
+	inline bool operator<(const ID<T> &rhs) const { return id < rhs.id; }
 	inline ID operator++() { return ID(++id); }
 	inline ID operator++(int) { return ID(id++); }
 
@@ -52,10 +54,10 @@
 namespace std {
 
 // std::hash implementation for vk::dbg::ID<T>
-template <typename T>
+template<typename T>
 struct hash<vk::dbg::ID<T> >
 {
-	std::size_t operator()(const vk::dbg::ID<T>& id) const noexcept
+	std::size_t operator()(const vk::dbg::ID<T> &id) const noexcept
 	{
 		return std::hash<int>()(id.value());
 	}
diff --git a/src/Vulkan/Debug/Location.hpp b/src/Vulkan/Debug/Location.hpp
index 9b3d883..3990472 100644
--- a/src/Vulkan/Debug/Location.hpp
+++ b/src/Vulkan/Debug/Location.hpp
@@ -26,15 +26,16 @@
 struct Location
 {
 	Location() = default;
-	inline Location(int line, const std::shared_ptr<File>& file);
+	inline Location(int line, const std::shared_ptr<File> &file);
 
-	int line = 0; // 1 based. 0 represents no line.
+	int line = 0;  // 1 based. 0 represents no line.
 	std::shared_ptr<File> file;
 };
 
-Location::Location(int line, const std::shared_ptr<File>& file) :
-    line(line),
-    file(file) {}
+Location::Location(int line, const std::shared_ptr<File> &file)
+    : line(line)
+    , file(file)
+{}
 
 }  // namespace dbg
 }  // namespace vk
diff --git a/src/Vulkan/Debug/Server.cpp b/src/Vulkan/Debug/Server.cpp
index 05d7714..244f5fc 100644
--- a/src/Vulkan/Debug/Server.cpp
+++ b/src/Vulkan/Debug/Server.cpp
@@ -40,15 +40,13 @@
 		} while(false)
 #endif
 
-namespace vk
-{
-namespace dbg
-{
+namespace vk {
+namespace dbg {
 
 class Server::Impl : public Server, public EventListener
 {
 public:
-	Impl(const std::shared_ptr<Context>& ctx, int port);
+	Impl(const std::shared_ptr<Context> &ctx, int port);
 	~Impl();
 
 	// EventListener
@@ -57,9 +55,9 @@
 	void onLineBreakpointHit(ID<Thread>) override;
 	void onFunctionBreakpointHit(ID<Thread>) override;
 
-	dap::Scope scope(const char* type, Scope*);
-	dap::Source source(File*);
-	std::shared_ptr<File> file(const dap::Source& source);
+	dap::Scope scope(const char *type, Scope *);
+	dap::Source source(File *);
+	std::shared_ptr<File> file(const dap::Source &source);
 
 	const std::shared_ptr<Context> ctx;
 	const std::unique_ptr<dap::net::Server> server;
@@ -67,17 +65,17 @@
 	std::atomic<bool> clientIsVisualStudio = { false };
 };
 
-Server::Impl::Impl(const std::shared_ptr<Context>& context, int port) :
-    ctx(context),
-    server(dap::net::Server::create()),
-    session(dap::Session::create())
+Server::Impl::Impl(const std::shared_ptr<Context> &context, int port)
+    : ctx(context)
+    , server(dap::net::Server::create())
+    , session(dap::Session::create())
 {
-	session->registerHandler([](const dap::DisconnectRequest& req) {
+	session->registerHandler([](const dap::DisconnectRequest &req) {
 		DAP_LOG("DisconnectRequest receieved");
 		return dap::DisconnectResponse();
 	});
 
-	session->registerHandler([&](const dap::InitializeRequest& req) {
+	session->registerHandler([&](const dap::InitializeRequest &req) {
 		DAP_LOG("InitializeRequest receieved");
 		dap::InitializeResponse response;
 		response.supportsFunctionBreakpoints = true;
@@ -88,23 +86,23 @@
 	});
 
 	session->registerSentHandler(
-	    [&](const dap::ResponseOrError<dap::InitializeResponse>& response) {
+	    [&](const dap::ResponseOrError<dap::InitializeResponse> &response) {
 		    DAP_LOG("InitializeResponse sent");
 		    session->send(dap::InitializedEvent());
 	    });
 
-	session->registerHandler([](const dap::SetExceptionBreakpointsRequest& req) {
+	session->registerHandler([](const dap::SetExceptionBreakpointsRequest &req) {
 		DAP_LOG("SetExceptionBreakpointsRequest receieved");
 		dap::SetExceptionBreakpointsResponse response;
 		return response;
 	});
 
 	session->registerHandler(
-	    [this](const dap::SetFunctionBreakpointsRequest& req) {
+	    [this](const dap::SetFunctionBreakpointsRequest &req) {
 		    DAP_LOG("SetFunctionBreakpointsRequest receieved");
 		    auto lock = ctx->lock();
 		    dap::SetFunctionBreakpointsResponse response;
-		    for(auto const& bp : req.breakpoints)
+		    for(auto const &bp : req.breakpoints)
 		    {
 			    lock.addFunctionBreakpoint(bp.name.c_str());
 			    response.breakpoints.push_back({});
@@ -113,7 +111,7 @@
 	    });
 
 	session->registerHandler(
-	    [this](const dap::SetBreakpointsRequest& req)
+	    [this](const dap::SetBreakpointsRequest &req)
 	        -> dap::ResponseOrError<dap::SetBreakpointsResponse> {
 		    DAP_LOG("SetBreakpointsRequest receieved");
 		    bool verified = false;
@@ -121,12 +119,12 @@
 		    size_t numBreakpoints = 0;
 		    if(req.breakpoints.has_value())
 		    {
-			    auto const& breakpoints = req.breakpoints.value();
+			    auto const &breakpoints = req.breakpoints.value();
 			    numBreakpoints = breakpoints.size();
 			    if(auto file = this->file(req.source))
 			    {
 				    file->clearBreakpoints();
-				    for(auto const& bp : breakpoints)
+				    for(auto const &bp : breakpoints)
 				    {
 					    file->addBreakpoint(bp.line);
 				    }
@@ -136,7 +134,7 @@
 			    {
 				    std::vector<int> lines;
 				    lines.reserve(breakpoints.size());
-				    for(auto const& bp : breakpoints)
+				    for(auto const &bp : breakpoints)
 				    {
 					    lines.push_back(bp.line);
 				    }
@@ -156,7 +154,7 @@
 		    return response;
 	    });
 
-	session->registerHandler([this](const dap::ThreadsRequest& req) {
+	session->registerHandler([this](const dap::ThreadsRequest &req) {
 		DAP_LOG("ThreadsRequest receieved");
 		auto lock = ctx->lock();
 		dap::ThreadsResponse response;
@@ -184,7 +182,7 @@
 	});
 
 	session->registerHandler(
-	    [this](const dap::StackTraceRequest& req)
+	    [this](const dap::StackTraceRequest &req)
 	        -> dap::ResponseOrError<dap::StackTraceResponse> {
 		    DAP_LOG("StackTraceRequest receieved");
 
@@ -200,9 +198,9 @@
 		    dap::StackTraceResponse response;
 		    response.totalFrames = stack.size();
 		    response.stackFrames.reserve(stack.size());
-		    for(auto const& frame : stack)
+		    for(auto const &frame : stack)
 		    {
-			    auto const& loc = frame.location;
+			    auto const &loc = frame.location;
 			    dap::StackFrame sf;
 			    sf.column = 0;
 			    sf.id = frame.id.value();
@@ -217,7 +215,7 @@
 		    return response;
 	    });
 
-	session->registerHandler([this](const dap::ScopesRequest& req)
+	session->registerHandler([this](const dap::ScopesRequest &req)
 	                             -> dap::ResponseOrError<dap::ScopesResponse> {
 		DAP_LOG("ScopesRequest receieved");
 
@@ -237,7 +235,7 @@
 		return response;
 	});
 
-	session->registerHandler([this](const dap::VariablesRequest& req)
+	session->registerHandler([this](const dap::VariablesRequest &req)
 	                             -> dap::ResponseOrError<dap::VariablesResponse> {
 		DAP_LOG("VariablesRequest receieved");
 
@@ -250,7 +248,7 @@
 		}
 
 		dap::VariablesResponse response;
-		vars->foreach(req.start.value(0), [&](const Variable& v) {
+		vars->foreach(req.start.value(0), [&](const Variable &v) {
 			if(!req.count.has_value() ||
 			   req.count.value() < int(response.variables.size()))
 			{
@@ -261,7 +259,7 @@
 				out.value = v.value->string();
 				if(v.value->type()->kind == Kind::VariableContainer)
 				{
-					auto const vc = static_cast<const VariableContainer*>(v.value.get());
+					auto const vc = static_cast<const VariableContainer *>(v.value.get());
 					out.variablesReference = vc->id.value();
 				}
 				response.variables.push_back(out);
@@ -270,7 +268,7 @@
 		return response;
 	});
 
-	session->registerHandler([this](const dap::SourceRequest& req)
+	session->registerHandler([this](const dap::SourceRequest &req)
 	                             -> dap::ResponseOrError<dap::SourceResponse> {
 		DAP_LOG("SourceRequest receieved");
 
@@ -287,7 +285,7 @@
 		return response;
 	});
 
-	session->registerHandler([this](const dap::PauseRequest& req)
+	session->registerHandler([this](const dap::PauseRequest &req)
 	                             -> dap::ResponseOrError<dap::PauseResponse> {
 		DAP_LOG("PauseRequest receieved");
 
@@ -327,7 +325,7 @@
 		return response;
 	});
 
-	session->registerHandler([this](const dap::ContinueRequest& req)
+	session->registerHandler([this](const dap::ContinueRequest &req)
 	                             -> dap::ResponseOrError<dap::ContinueResponse> {
 		DAP_LOG("ContinueRequest receieved");
 
@@ -351,7 +349,7 @@
 		return response;
 	});
 
-	session->registerHandler([this](const dap::NextRequest& req)
+	session->registerHandler([this](const dap::NextRequest &req)
 	                             -> dap::ResponseOrError<dap::NextResponse> {
 		DAP_LOG("NextRequest receieved");
 
@@ -366,7 +364,7 @@
 		return dap::NextResponse();
 	});
 
-	session->registerHandler([this](const dap::StepInRequest& req)
+	session->registerHandler([this](const dap::StepInRequest &req)
 	                             -> dap::ResponseOrError<dap::StepInResponse> {
 		DAP_LOG("StepInRequest receieved");
 
@@ -381,7 +379,7 @@
 		return dap::StepInResponse();
 	});
 
-	session->registerHandler([this](const dap::StepOutRequest& req)
+	session->registerHandler([this](const dap::StepOutRequest &req)
 	                             -> dap::ResponseOrError<dap::StepOutResponse> {
 		DAP_LOG("StepOutRequest receieved");
 
@@ -396,7 +394,7 @@
 		return dap::StepOutResponse();
 	});
 
-	session->registerHandler([this](const dap::EvaluateRequest& req)
+	session->registerHandler([this](const dap::EvaluateRequest &req)
 	                             -> dap::ResponseOrError<dap::EvaluateResponse> {
 		DAP_LOG("EvaluateRequest receieved");
 
@@ -426,7 +424,7 @@
 			}
 
 			dap::EvaluateResponse response;
-			auto findHandler = [&](const Variable& var) {
+			auto findHandler = [&](const Variable &var) {
 				response.result = var.value->string(fmt);
 				response.type = var.value->type()->string();
 			};
@@ -438,7 +436,7 @@
 				frame->hovers->variables,
 			};
 
-			for(auto const& vars : variables)
+			for(auto const &vars : variables)
 			{
 				if(vars->find(req.expression, findHandler)) { return response; }
 			}
@@ -447,7 +445,7 @@
 			// TODO: This might be a configuration problem of the SPIRV-Tools
 			// spirv-ls plugin. Investigate.
 			auto withPercent = "%" + req.expression;
-			for(auto const& vars : variables)
+			for(auto const &vars : variables)
 			{
 				if(vars->find(withPercent, findHandler)) { return response; }
 			}
@@ -456,20 +454,20 @@
 		return dap::Error("Could not evaluate expression");
 	});
 
-	session->registerHandler([](const dap::LaunchRequest& req) {
+	session->registerHandler([](const dap::LaunchRequest &req) {
 		DAP_LOG("LaunchRequest receieved");
 		return dap::LaunchResponse();
 	});
 
 	marl::WaitGroup configurationDone(1);
-	session->registerHandler([=](const dap::ConfigurationDoneRequest& req) {
+	session->registerHandler([=](const dap::ConfigurationDoneRequest &req) {
 		DAP_LOG("ConfigurationDoneRequest receieved");
 		configurationDone.done();
 		return dap::ConfigurationDoneResponse();
 	});
 
 	DAP_LOG("Waiting for debugger connection...");
-	server->start(port, [&](const std::shared_ptr<dap::ReaderWriter>& rw) {
+	server->start(port, [&](const std::shared_ptr<dap::ReaderWriter> &rw) {
 		session->bind(rw);
 		ctx->addListener(this);
 	});
@@ -514,7 +512,7 @@
 	session->send(event);
 }
 
-dap::Scope Server::Impl::scope(const char* type, Scope* s)
+dap::Scope Server::Impl::scope(const char *type, Scope *s)
 {
 	dap::Scope out;
 	// out.line = s->startLine;
@@ -526,7 +524,7 @@
 	return out;
 }
 
-dap::Source Server::Impl::source(File* file)
+dap::Source Server::Impl::source(File *file)
 {
 	dap::Source out;
 	out.name = file->name;
@@ -541,7 +539,7 @@
 	return out;
 }
 
-std::shared_ptr<File> Server::Impl::file(const dap::Source& source)
+std::shared_ptr<File> Server::Impl::file(const dap::Source &source)
 {
 	auto lock = ctx->lock();
 	if(source.sourceReference.has_value())
@@ -587,7 +585,7 @@
 	return nullptr;
 }
 
-std::shared_ptr<Server> Server::create(const std::shared_ptr<Context>& ctx, int port)
+std::shared_ptr<Server> Server::create(const std::shared_ptr<Context> &ctx, int port)
 {
 	return std::make_shared<Server::Impl>(ctx, port);
 }
diff --git a/src/Vulkan/Debug/Server.hpp b/src/Vulkan/Debug/Server.hpp
index dead78e..802cb61 100644
--- a/src/Vulkan/Debug/Server.hpp
+++ b/src/Vulkan/Debug/Server.hpp
@@ -17,10 +17,8 @@
 
 #include <memory>
 
-namespace vk
-{
-namespace dbg
-{
+namespace vk {
+namespace dbg {
 
 class Context;
 
@@ -29,7 +27,7 @@
 class Server
 {
 public:
-	static std::shared_ptr<Server> create(const std::shared_ptr<Context>&, int port);
+	static std::shared_ptr<Server> create(const std::shared_ptr<Context> &, int port);
 
 	virtual ~Server() = default;
 
diff --git a/src/Vulkan/Debug/Thread.cpp b/src/Vulkan/Debug/Thread.cpp
index b2c0088..a542e37 100644
--- a/src/Vulkan/Debug/Thread.cpp
+++ b/src/Vulkan/Debug/Thread.cpp
@@ -21,11 +21,12 @@
 namespace vk {
 namespace dbg {
 
-Thread::Thread(ID id, Context* ctx) :
-    id(id),
-    broadcast(ctx->broadcast()) {}
+Thread::Thread(ID id, Context *ctx)
+    : id(id)
+    , broadcast(ctx->broadcast())
+{}
 
-void Thread::setName(const std::string& name)
+void Thread::setName(const std::string &name)
 {
 	std::unique_lock<std::mutex> lock(mutex);
 	name_ = name;
@@ -37,7 +38,7 @@
 	return name_;
 }
 
-void Thread::update(const Location& location)
+void Thread::update(const Location &location)
 {
 	std::unique_lock<std::mutex> lock(mutex);
 	frames.back()->location = location;
@@ -53,30 +54,30 @@
 
 	switch(state_)
 	{
-	case State::Paused:
-	{
-		stateCV.wait(lock, [this] { return state_ != State::Paused; });
-		break;
-	}
-
-	case State::Stepping:
-	{
-		if(!pauseAtFrame || pauseAtFrame == frames.back())
+		case State::Paused:
 		{
-			broadcast->onThreadStepped(id);
-			state_ = State::Paused;
 			stateCV.wait(lock, [this] { return state_ != State::Paused; });
-			pauseAtFrame = 0;
+			break;
 		}
-		break;
-	}
 
-	case State::Running:
-		break;
+		case State::Stepping:
+		{
+			if(!pauseAtFrame || pauseAtFrame == frames.back())
+			{
+				broadcast->onThreadStepped(id);
+				state_ = State::Paused;
+				stateCV.wait(lock, [this] { return state_ != State::Paused; });
+				pauseAtFrame = 0;
+			}
+			break;
+		}
+
+		case State::Running:
+			break;
 	}
 }
 
-void Thread::enter(Context::Lock& ctxlck, const std::shared_ptr<File>& file, const std::string& function)
+void Thread::enter(Context::Lock &ctxlck, const std::shared_ptr<File> &file, const std::string &function)
 {
 	auto frame = ctxlck.createFrame(file);
 	auto isFunctionBreakpoint = ctxlck.isFunctionBreakpoint(function);
diff --git a/src/Vulkan/Debug/Thread.hpp b/src/Vulkan/Debug/Thread.hpp
index a2b63a9..77d7425 100644
--- a/src/Vulkan/Debug/Thread.hpp
+++ b/src/Vulkan/Debug/Thread.hpp
@@ -41,8 +41,8 @@
 	using ID = dbg::ID<Scope>;
 
 	inline Scope(ID id,
-	             const std::shared_ptr<File>& file,
-	             const std::shared_ptr<VariableContainer>& variables);
+	             const std::shared_ptr<File> &file,
+	             const std::shared_ptr<VariableContainer> &variables);
 
 	// The unique identifier of the scope.
 	const ID id;
@@ -55,11 +55,12 @@
 };
 
 Scope::Scope(ID id,
-             const std::shared_ptr<File>& file,
-             const std::shared_ptr<VariableContainer>& variables) :
-    id(id),
-    file(file),
-    variables(variables) {}
+             const std::shared_ptr<File> &file,
+             const std::shared_ptr<VariableContainer> &variables)
+    : id(id)
+    , file(file)
+    , variables(variables)
+{}
 
 // Frame holds a number of variable scopes for one of a thread's stack frame,
 // and is used to provide source data for the DAP 'StackFrame' type:
@@ -93,8 +94,9 @@
 	std::shared_ptr<Scope> hovers;
 };
 
-Frame::Frame(ID id) :
-    id(id) {}
+Frame::Frame(ID id)
+    : id(id)
+{}
 
 // Thread holds the state for a single thread of execution.
 class Thread
@@ -110,17 +112,17 @@
 		Paused     // Thread is currently paused.
 	};
 
-	Thread(ID id, Context* ctx);
+	Thread(ID id, Context *ctx);
 
 	// setName() sets the name of the thread.
-	void setName(const std::string&);
+	void setName(const std::string &);
 
 	// name() returns the name of the thread.
 	std::string name() const;
 
 	// enter() pushes the thread's stack with a new frame created with the given
 	// file and function.
-	void enter(Context::Lock& lock, const std::shared_ptr<File>& file, const std::string& function);
+	void enter(Context::Lock &lock, const std::shared_ptr<File> &file, const std::string &function);
 
 	// exit() pops the thread's stack frame.
 	void exit();
@@ -170,13 +172,13 @@
 
 	// update() updates the current stack frame's location, and potentially
 	// blocks until the thread is resumed with one of the methods above.
-	void update(const Location& location);
+	void update(const Location &location);
 
 	// The unique identifier of the thread.
 	const ID id;
 
 private:
-	EventListener* const broadcast;
+	EventListener *const broadcast;
 
 	mutable std::mutex mutex;
 	std::string name_;
diff --git a/src/Vulkan/Debug/Type.cpp b/src/Vulkan/Debug/Type.cpp
index f4049c3..c3e0ab4 100644
--- a/src/Vulkan/Debug/Type.cpp
+++ b/src/Vulkan/Debug/Type.cpp
@@ -36,32 +36,32 @@
 {
 	switch(kind)
 	{
-	case Kind::Bool:
-		return "bool";
-	case Kind::U8:
-		return "uint8_t";
-	case Kind::S8:
-		return "int8_t";
-	case Kind::U16:
-		return "uint16_t";
-	case Kind::S16:
-		return "int16_t";
-	case Kind::F32:
-		return "float";
-	case Kind::U32:
-		return "uint32_t";
-	case Kind::S32:
-		return "int32_t";
-	case Kind::F64:
-		return "double";
-	case Kind::U64:
-		return "uint64_t";
-	case Kind::S64:
-		return "int64_t";
-	case Kind::Ptr:
-		return elem->string() + "*";
-	case Kind::VariableContainer:
-		return "struct";
+		case Kind::Bool:
+			return "bool";
+		case Kind::U8:
+			return "uint8_t";
+		case Kind::S8:
+			return "int8_t";
+		case Kind::U16:
+			return "uint16_t";
+		case Kind::S16:
+			return "int16_t";
+		case Kind::F32:
+			return "float";
+		case Kind::U32:
+			return "uint32_t";
+		case Kind::S32:
+			return "int32_t";
+		case Kind::F64:
+			return "double";
+		case Kind::U64:
+			return "uint64_t";
+		case Kind::S64:
+			return "int64_t";
+		case Kind::Ptr:
+			return elem->string() + "*";
+		case Kind::VariableContainer:
+			return "struct";
 	}
 	return "";
 }
diff --git a/src/Vulkan/Debug/Type.hpp b/src/Vulkan/Debug/Type.hpp
index 07ceaf1..4b71a71 100644
--- a/src/Vulkan/Debug/Type.hpp
+++ b/src/Vulkan/Debug/Type.hpp
@@ -50,7 +50,7 @@
 public:
 	Type() = default;
 	inline Type(Kind kind);
-	inline Type(Kind kind, const std::shared_ptr<const Type>& elem);
+	inline Type(Kind kind, const std::shared_ptr<const Type> &elem);
 
 	// string() returns a string representation of the type.
 	std::string string() const;
@@ -59,12 +59,14 @@
 	const std::shared_ptr<const Type> elem;  // Element type of pointer.
 };
 
-Type::Type(Kind kind) :
-    kind(kind) {}
+Type::Type(Kind kind)
+    : kind(kind)
+{}
 
-Type::Type(Kind kind, const std::shared_ptr<const Type>& elem) :
-    kind(kind),
-    elem(elem) {}
+Type::Type(Kind kind, const std::shared_ptr<const Type> &elem)
+    : kind(kind)
+    , elem(elem)
+{}
 
 // clang-format off
 template <typename T> struct TypeOf;
diff --git a/src/Vulkan/Debug/Value.cpp b/src/Vulkan/Debug/Value.cpp
index 788e1db..7aa111d 100644
--- a/src/Vulkan/Debug/Value.cpp
+++ b/src/Vulkan/Debug/Value.cpp
@@ -12,66 +12,66 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "Type.hpp"
 #include "Value.hpp"
+#include "Type.hpp"
 #include "Variable.hpp"
 
 namespace vk {
 namespace dbg {
 
 const FormatFlags FormatFlags::Default = {
-	"[",                   // listPrefix
-	"]",                   // listSuffix
-	", ",                  // listDelimiter
-	"",                    // listIndent
-	&FormatFlags::Default, // subListFmt
+	"[",                    // listPrefix
+	"]",                    // listSuffix
+	", ",                   // listDelimiter
+	"",                     // listIndent
+	&FormatFlags::Default,  // subListFmt
 };
 
-std::string Value::string(const FormatFlags& fmt /* = FormatFlags::Default */) const
+std::string Value::string(const FormatFlags &fmt /* = FormatFlags::Default */) const
 {
 	switch(type()->kind)
 	{
-	case Kind::Bool:
-		return *reinterpret_cast<const bool*>(get()) ? "true" : "false";
-	case Kind::U8:
-		return std::to_string(*reinterpret_cast<const uint8_t*>(get()));
-	case Kind::S8:
-		return std::to_string(*reinterpret_cast<const int8_t*>(get()));
-	case Kind::U16:
-		return std::to_string(*reinterpret_cast<const uint16_t*>(get()));
-	case Kind::S16:
-		return std::to_string(*reinterpret_cast<const int16_t*>(get()));
-	case Kind::F32:
-		return std::to_string(*reinterpret_cast<const float*>(get()));
-	case Kind::U32:
-		return std::to_string(*reinterpret_cast<const uint32_t*>(get()));
-	case Kind::S32:
-		return std::to_string(*reinterpret_cast<const int32_t*>(get()));
-	case Kind::F64:
-		return std::to_string(*reinterpret_cast<const double*>(get()));
-	case Kind::U64:
-		return std::to_string(*reinterpret_cast<const uint64_t*>(get()));
-	case Kind::S64:
-		return std::to_string(*reinterpret_cast<const int64_t*>(get()));
-	case Kind::Ptr:
-		return std::to_string(reinterpret_cast<uintptr_t>(get()));
-	case Kind::VariableContainer:
-	{
-		auto const* vc = static_cast<const VariableContainer*>(this);
-		std::string out = "";
-		auto subfmt = *fmt.subListFmt;
-		subfmt.listIndent = fmt.listIndent + fmt.subListFmt->listIndent;
-		bool first = true;
-		vc->foreach(0, [&](const Variable& var) {
-			if(!first) { out += fmt.listDelimiter; }
-			first = false;
-			out += fmt.listIndent;
-			out += var.name;
-			out += ": ";
-			out += var.value->string(subfmt);
-		});
-		return fmt.listPrefix + out + fmt.listSuffix;
-	}
+		case Kind::Bool:
+			return *reinterpret_cast<const bool *>(get()) ? "true" : "false";
+		case Kind::U8:
+			return std::to_string(*reinterpret_cast<const uint8_t *>(get()));
+		case Kind::S8:
+			return std::to_string(*reinterpret_cast<const int8_t *>(get()));
+		case Kind::U16:
+			return std::to_string(*reinterpret_cast<const uint16_t *>(get()));
+		case Kind::S16:
+			return std::to_string(*reinterpret_cast<const int16_t *>(get()));
+		case Kind::F32:
+			return std::to_string(*reinterpret_cast<const float *>(get()));
+		case Kind::U32:
+			return std::to_string(*reinterpret_cast<const uint32_t *>(get()));
+		case Kind::S32:
+			return std::to_string(*reinterpret_cast<const int32_t *>(get()));
+		case Kind::F64:
+			return std::to_string(*reinterpret_cast<const double *>(get()));
+		case Kind::U64:
+			return std::to_string(*reinterpret_cast<const uint64_t *>(get()));
+		case Kind::S64:
+			return std::to_string(*reinterpret_cast<const int64_t *>(get()));
+		case Kind::Ptr:
+			return std::to_string(reinterpret_cast<uintptr_t>(get()));
+		case Kind::VariableContainer:
+		{
+			auto const *vc = static_cast<const VariableContainer *>(this);
+			std::string out = "";
+			auto subfmt = *fmt.subListFmt;
+			subfmt.listIndent = fmt.listIndent + fmt.subListFmt->listIndent;
+			bool first = true;
+			vc->foreach(0, [&](const Variable &var) {
+				if(!first) { out += fmt.listDelimiter; }
+				first = false;
+				out += fmt.listIndent;
+				out += var.name;
+				out += ": ";
+				out += var.value->string(subfmt);
+			});
+			return fmt.listPrefix + out + fmt.listSuffix;
+		}
 	}
 	return "";
 }
diff --git a/src/Vulkan/Debug/Value.hpp b/src/Vulkan/Debug/Value.hpp
index 69353d1..9e85ff2 100644
--- a/src/Vulkan/Debug/Value.hpp
+++ b/src/Vulkan/Debug/Value.hpp
@@ -33,7 +33,7 @@
 	std::string listSuffix;         // Suffix to lists.
 	std::string listDelimiter;      // List item delimiter.
 	std::string listIndent;         // List item indententation prefix.
-	const FormatFlags* subListFmt;  // Format used for list sub items.
+	const FormatFlags *subListFmt;  // Format used for list sub items.
 };
 
 // Value holds a value that can be read and possible written to.
@@ -47,51 +47,51 @@
 
 	// string() returns a string representation of the value using the specified
 	// FormatFlags.
-	virtual std::string string(const FormatFlags& = FormatFlags::Default) const;
+	virtual std::string string(const FormatFlags & = FormatFlags::Default) const;
 
 	// get() returns a pointer to the value.
-	virtual const void* get() const = 0;
+	virtual const void *get() const = 0;
 
 	// set() changes the value to a copy of the value at ptr.
 	// set() returns true if the value was changed, or false if the value cannot
 	// be set.
-	virtual bool set(void* ptr) { return false; }
+	virtual bool set(void *ptr) { return false; }
 };
 
 // Constant is an immutable value.
-template <typename T>
+template<typename T>
 class Constant : public Value
 {
 public:
-	inline Constant(const T& value);
+	inline Constant(const T &value);
 	inline std::shared_ptr<Type> type() const override;
-	inline const void* get() const override;
+	inline const void *get() const override;
 
 private:
 	const T value;
 };
 
-template <typename T>
-Constant<T>::Constant(const T& value) :
-    value(value)
+template<typename T>
+Constant<T>::Constant(const T &value)
+    : value(value)
 {
 }
 
-template <typename T>
+template<typename T>
 std::shared_ptr<Type> Constant<T>::type() const
 {
 	return TypeOf<T>::get();
 }
 
-template <typename T>
-const void* Constant<T>::get() const
+template<typename T>
+const void *Constant<T>::get() const
 {
 	return &value;
 }
 
 // make_constant() returns a shared_ptr to a Constant with the given value.
-template <typename T>
-inline std::shared_ptr<Constant<T>> make_constant(const T& value)
+template<typename T>
+inline std::shared_ptr<Constant<T>> make_constant(const T &value)
 {
 	return std::shared_ptr<Constant<T>>(new vk::dbg::Constant<T>(value));
 }
diff --git a/src/Vulkan/Debug/Variable.hpp b/src/Vulkan/Debug/Variable.hpp
index f24ca47..acb62a6 100644
--- a/src/Vulkan/Debug/Variable.hpp
+++ b/src/Vulkan/Debug/Variable.hpp
@@ -45,38 +45,39 @@
 
 	// foreach() calls cb with each of the variables in the container.
 	// F must be a function with the signature void(const Variable&).
-	template <typename F>
-	inline void foreach(size_t startIndex, const F& cb) const;
+	template<typename F>
+	inline void foreach(size_t startIndex, const F &cb) const;
 
 	// find() looks up the variable with the given name.
 	// If the variable with the given name is found, cb is called with the
 	// variable and find() returns true.
-	template <typename F>
-	inline bool find(const std::string& name, const F& cb) const;
+	template<typename F>
+	inline bool find(const std::string &name, const F &cb) const;
 
 	// put() places the variable var into the container.
-	inline void put(const Variable& var);
+	inline void put(const Variable &var);
 
 	// put() places the variable with the given name and value into the container.
-	inline void put(const std::string& name, const std::shared_ptr<Value>& value);
+	inline void put(const std::string &name, const std::shared_ptr<Value> &value);
 
 	// The unique identifier of the variable.
 	const ID id;
 
 private:
 	inline std::shared_ptr<Type> type() const override;
-	inline const void* get() const override;
+	inline const void *get() const override;
 
 	mutable std::mutex mutex;
 	std::vector<Variable> variables;
 	std::unordered_map<std::string, int> indices;
 };
 
-VariableContainer::VariableContainer(ID id) :
-    id(id) {}
+VariableContainer::VariableContainer(ID id)
+    : id(id)
+{}
 
-template <typename F>
-void VariableContainer::foreach(size_t startIndex, const F& cb) const
+template<typename F>
+void VariableContainer::foreach(size_t startIndex, const F &cb) const
 {
 	std::unique_lock<std::mutex> lock(mutex);
 	for(size_t i = startIndex; i < variables.size(); i++)
@@ -85,11 +86,11 @@
 	}
 }
 
-template <typename F>
-bool VariableContainer::find(const std::string& name, const F& cb) const
+template<typename F>
+bool VariableContainer::find(const std::string &name, const F &cb) const
 {
 	std::unique_lock<std::mutex> lock(mutex);
-	for(auto const& var : variables)
+	for(auto const &var : variables)
 	{
 		if(var.name == name)
 		{
@@ -100,7 +101,7 @@
 	return false;
 }
 
-void VariableContainer::put(const Variable& var)
+void VariableContainer::put(const Variable &var)
 {
 	std::unique_lock<std::mutex> lock(mutex);
 	auto it = indices.find(var.name);
@@ -115,8 +116,8 @@
 	}
 }
 
-void VariableContainer::put(const std::string& name,
-                            const std::shared_ptr<Value>& value)
+void VariableContainer::put(const std::string &name,
+                            const std::shared_ptr<Value> &value)
 {
 	put({ name, value });
 }
@@ -126,7 +127,7 @@
 	return TypeOf<VariableContainer>::get();
 }
 
-const void* VariableContainer::get() const
+const void *VariableContainer::get() const
 {
 	return nullptr;
 }
diff --git a/src/Vulkan/Debug/WeakMap.hpp b/src/Vulkan/Debug/WeakMap.hpp
index 0019c16..ef84c51 100644
--- a/src/Vulkan/Debug/WeakMap.hpp
+++ b/src/Vulkan/Debug/WeakMap.hpp
@@ -27,7 +27,7 @@
 // remaining std::shared_ptr<V> references.
 // WeakMap is not thread-safe and requires the use of an external mutex to be
 // used by multiple threads, concurrently.
-template <typename K, typename V>
+template<typename K, typename V>
 class WeakMap
 {
 	using Map = std::map<K, std::weak_ptr<V>>;
@@ -37,10 +37,10 @@
 	class iterator
 	{
 	public:
-		inline iterator(const MapIterator& it, const MapIterator& end);
+		inline iterator(const MapIterator &it, const MapIterator &end);
 		inline void operator++();
-		inline bool operator==(const iterator&) const;
-		inline bool operator!=(const iterator&) const;
+		inline bool operator==(const iterator &) const;
+		inline bool operator!=(const iterator &) const;
 		inline std::pair<K, std::shared_ptr<V>> operator*() const;
 
 	private:
@@ -65,17 +65,17 @@
 	// get() returns the std::shared_ptr<V> value for the given key, or nullptr
 	// if the map does not contain the key, or the last remaining
 	// std::shared_ptr<V> reference to the value has been dropped.
-	inline std::shared_ptr<V> get(const K& key) const;
+	inline std::shared_ptr<V> get(const K &key) const;
 
 	// add() attempts to insert the key-value pair into the map.
 	// add() returns true if there was no existing entry with the given key,
 	// and the pair was added, otherwise false.
-	inline bool add(const K& key, const std::shared_ptr<V>& val);
+	inline bool add(const K &key, const std::shared_ptr<V> &val);
 
 	// remove() attempts to remove the entry with the given key from the map.
 	// remove() returns true if there was no existing entry with the given key,
 	// and the entry was removed, otherwise false.
-	inline bool remove(const K& key);
+	inline bool remove(const K &key);
 
 private:
 	// reap() removes any entries that have values with no external references.
@@ -85,22 +85,22 @@
 	size_t reapAtSize = 32;
 };
 
-template <typename K, typename V>
-WeakMap<K, V>::iterator::iterator(const MapIterator& it, const MapIterator& end) :
-    it(it),
-    end(end)
+template<typename K, typename V>
+WeakMap<K, V>::iterator::iterator(const MapIterator &it, const MapIterator &end)
+    : it(it)
+    , end(end)
 {
 	skipNull();
 }
 
-template <typename K, typename V>
+template<typename K, typename V>
 void WeakMap<K, V>::iterator::operator++()
 {
 	it++;
 	skipNull();
 }
 
-template <typename K, typename V>
+template<typename K, typename V>
 void WeakMap<K, V>::iterator::skipNull()
 {
 	for(; it != end; ++it)
@@ -115,51 +115,51 @@
 	}
 }
 
-template <typename K, typename V>
-bool WeakMap<K, V>::iterator::operator==(const iterator& rhs) const
+template<typename K, typename V>
+bool WeakMap<K, V>::iterator::operator==(const iterator &rhs) const
 {
 	return it == rhs.it;
 }
 
-template <typename K, typename V>
-bool WeakMap<K, V>::iterator::operator!=(const iterator& rhs) const
+template<typename K, typename V>
+bool WeakMap<K, V>::iterator::operator!=(const iterator &rhs) const
 {
 	return it != rhs.it;
 }
 
-template <typename K, typename V>
+template<typename K, typename V>
 std::pair<K, std::shared_ptr<V>> WeakMap<K, V>::iterator::operator*() const
 {
 	return { it->first, sptr };
 }
 
-template <typename K, typename V>
+template<typename K, typename V>
 typename WeakMap<K, V>::iterator WeakMap<K, V>::begin() const
 {
 	return iterator(map.begin(), map.end());
 }
 
-template <typename K, typename V>
+template<typename K, typename V>
 typename WeakMap<K, V>::iterator WeakMap<K, V>::end() const
 {
 	return iterator(map.end(), map.end());
 }
 
-template <typename K, typename V>
+template<typename K, typename V>
 size_t WeakMap<K, V>::approx_size() const
 {
 	return map.size();
 }
 
-template <typename K, typename V>
-std::shared_ptr<V> WeakMap<K, V>::get(const K& key) const
+template<typename K, typename V>
+std::shared_ptr<V> WeakMap<K, V>::get(const K &key) const
 {
 	auto it = map.find(key);
 	return (it != map.end()) ? it->second.lock() : nullptr;
 }
 
-template <typename K, typename V>
-bool WeakMap<K, V>::add(const K& key, const std::shared_ptr<V>& val)
+template<typename K, typename V>
+bool WeakMap<K, V>::add(const K &key, const std::shared_ptr<V> &val)
 {
 	if(map.size() > reapAtSize)
 	{
@@ -169,13 +169,13 @@
 	return map.emplace(key, val).second;
 }
 
-template <typename K, typename V>
-bool WeakMap<K, V>::remove(const K& key)
+template<typename K, typename V>
+bool WeakMap<K, V>::remove(const K &key)
 {
 	return map.erase(key) > 0;
 }
 
-template <typename K, typename V>
+template<typename K, typename V>
 void WeakMap<K, V>::reap()
 {
 	for(auto it = map.begin(); it != map.end();)
diff --git a/src/Vulkan/VkBuffer.cpp b/src/Vulkan/VkBuffer.cpp
index ad24a83..129d5bd 100644
--- a/src/Vulkan/VkBuffer.cpp
+++ b/src/Vulkan/VkBuffer.cpp
@@ -20,34 +20,36 @@
 
 namespace vk {
 
-Buffer::Buffer(const VkBufferCreateInfo* pCreateInfo, void* mem) :
-	flags(pCreateInfo->flags), size(pCreateInfo->size), usage(pCreateInfo->usage),
-	sharingMode(pCreateInfo->sharingMode)
+Buffer::Buffer(const VkBufferCreateInfo *pCreateInfo, void *mem)
+    : flags(pCreateInfo->flags)
+    , size(pCreateInfo->size)
+    , usage(pCreateInfo->usage)
+    , sharingMode(pCreateInfo->sharingMode)
 {
 	if(pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT)
 	{
 		queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount;
-		queueFamilyIndices = reinterpret_cast<uint32_t*>(mem);
+		queueFamilyIndices = reinterpret_cast<uint32_t *>(mem);
 		memcpy(queueFamilyIndices, pCreateInfo->pQueueFamilyIndices, sizeof(uint32_t) * queueFamilyIndexCount);
 	}
 
-	const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	for(; nextInfo != nullptr; nextInfo = nextInfo->pNext)
 	{
 		if(nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO)
 		{
-			const auto* externalInfo = reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(nextInfo);
+			const auto *externalInfo = reinterpret_cast<const VkExternalMemoryBufferCreateInfo *>(nextInfo);
 			supportedExternalMemoryHandleTypes = externalInfo->handleTypes;
 		}
 	}
 }
 
-void Buffer::destroy(const VkAllocationCallbacks* pAllocator)
+void Buffer::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	vk::deallocate(queueFamilyIndices, pAllocator);
 }
 
-size_t Buffer::ComputeRequiredAllocationSize(const VkBufferCreateInfo* pCreateInfo)
+size_t Buffer::ComputeRequiredAllocationSize(const VkBufferCreateInfo *pCreateInfo)
 {
 	return (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) ? sizeof(uint32_t) * pCreateInfo->queueFamilyIndexCount : 0;
 }
@@ -72,36 +74,36 @@
 		memoryRequirements.alignment = REQUIRED_MEMORY_ALIGNMENT;
 	}
 	memoryRequirements.memoryTypeBits = vk::MEMORY_TYPE_GENERIC_BIT;
-	memoryRequirements.size = size; // TODO: also reserve space for a header containing
-		                            // the size of the buffer (for robust buffer access)
+	memoryRequirements.size = size;  // TODO: also reserve space for a header containing
+	                                 // the size of the buffer (for robust buffer access)
 	return memoryRequirements;
 }
 
-bool Buffer::canBindToMemory(DeviceMemory* pDeviceMemory) const
+bool Buffer::canBindToMemory(DeviceMemory *pDeviceMemory) const
 {
 	return pDeviceMemory->checkExternalMemoryHandleType(supportedExternalMemoryHandleTypes);
 }
 
-void Buffer::bind(DeviceMemory* pDeviceMemory, VkDeviceSize pMemoryOffset)
+void Buffer::bind(DeviceMemory *pDeviceMemory, VkDeviceSize pMemoryOffset)
 {
 	memory = pDeviceMemory->getOffsetPointer(pMemoryOffset);
 }
 
-void Buffer::copyFrom(const void* srcMemory, VkDeviceSize pSize, VkDeviceSize pOffset)
+void Buffer::copyFrom(const void *srcMemory, VkDeviceSize pSize, VkDeviceSize pOffset)
 {
 	ASSERT((pSize + pOffset) <= size);
 
 	memcpy(getOffsetPointer(pOffset), srcMemory, pSize);
 }
 
-void Buffer::copyTo(void* dstMemory, VkDeviceSize pSize, VkDeviceSize pOffset) const
+void Buffer::copyTo(void *dstMemory, VkDeviceSize pSize, VkDeviceSize pOffset) const
 {
 	ASSERT((pSize + pOffset) <= size);
 
 	memcpy(dstMemory, getOffsetPointer(pOffset), pSize);
 }
 
-void Buffer::copyTo(Buffer* dstBuffer, const VkBufferCopy& pRegion) const
+void Buffer::copyTo(Buffer *dstBuffer, const VkBufferCopy &pRegion) const
 {
 	copyTo(dstBuffer->getOffsetPointer(pRegion.dstOffset), pRegion.size, pRegion.srcOffset);
 }
@@ -112,7 +114,7 @@
 
 	ASSERT((bytes + dstOffset) <= size);
 
-	uint32_t* memToWrite = static_cast<uint32_t*>(getOffsetPointer(dstOffset));
+	uint32_t *memToWrite = static_cast<uint32_t *>(getOffsetPointer(dstOffset));
 
 	// Vulkan 1.1 spec: "If VK_WHOLE_SIZE is used and the remaining size of the buffer is
 	//                   not a multiple of 4, then the nearest smaller multiple is used."
@@ -122,21 +124,21 @@
 	}
 }
 
-void Buffer::update(VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData)
+void Buffer::update(VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData)
 {
 	ASSERT((dataSize + dstOffset) <= size);
 
 	memcpy(getOffsetPointer(dstOffset), pData, dataSize);
 }
 
-void* Buffer::getOffsetPointer(VkDeviceSize offset) const
+void *Buffer::getOffsetPointer(VkDeviceSize offset) const
 {
-	return reinterpret_cast<uint8_t*>(memory) + offset;
+	return reinterpret_cast<uint8_t *>(memory) + offset;
 }
 
-uint8_t* Buffer::end() const
+uint8_t *Buffer::end() const
 {
-	return reinterpret_cast<uint8_t*>(getOffsetPointer(size + 1));
+	return reinterpret_cast<uint8_t *>(getOffsetPointer(size + 1));
 }
 
 }  // namespace vk
diff --git a/src/Vulkan/VkBuffer.hpp b/src/Vulkan/VkBuffer.hpp
index 7dc1005..2aa59fc 100644
--- a/src/Vulkan/VkBuffer.hpp
+++ b/src/Vulkan/VkBuffer.hpp
@@ -24,40 +24,40 @@
 class Buffer : public Object<Buffer, VkBuffer>
 {
 public:
-	Buffer(const VkBufferCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	Buffer(const VkBufferCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkBufferCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkBufferCreateInfo *pCreateInfo);
 
 	const VkMemoryRequirements getMemoryRequirements() const;
-	void bind(DeviceMemory* pDeviceMemory, VkDeviceSize pMemoryOffset);
-	void copyFrom(const void* srcMemory, VkDeviceSize size, VkDeviceSize offset);
-	void copyTo(void* dstMemory, VkDeviceSize size, VkDeviceSize offset) const;
-	void copyTo(Buffer* dstBuffer, const VkBufferCopy& pRegion) const;
+	void bind(DeviceMemory *pDeviceMemory, VkDeviceSize pMemoryOffset);
+	void copyFrom(const void *srcMemory, VkDeviceSize size, VkDeviceSize offset);
+	void copyTo(void *dstMemory, VkDeviceSize size, VkDeviceSize offset) const;
+	void copyTo(Buffer *dstBuffer, const VkBufferCopy &pRegion) const;
 	void fill(VkDeviceSize dstOffset, VkDeviceSize fillSize, uint32_t data);
-	void update(VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData);
-	void* getOffsetPointer(VkDeviceSize offset) const;
+	void update(VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData);
+	void *getOffsetPointer(VkDeviceSize offset) const;
 	inline VkDeviceSize getSize() const { return size; }
-	uint8_t* end() const;
-	bool canBindToMemory(DeviceMemory* pDeviceMemory) const;
+	uint8_t *end() const;
+	bool canBindToMemory(DeviceMemory *pDeviceMemory) const;
 
 private:
-	void*                 memory = nullptr;
-	VkBufferCreateFlags   flags = 0;
-	VkDeviceSize          size = 0;
-	VkBufferUsageFlags    usage = 0;
-	VkSharingMode         sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-	uint32_t              queueFamilyIndexCount = 0;
-	uint32_t*             queueFamilyIndices = nullptr;
+	void *memory = nullptr;
+	VkBufferCreateFlags flags = 0;
+	VkDeviceSize size = 0;
+	VkBufferUsageFlags usage = 0;
+	VkSharingMode sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+	uint32_t queueFamilyIndexCount = 0;
+	uint32_t *queueFamilyIndices = nullptr;
 
 	VkExternalMemoryHandleTypeFlags supportedExternalMemoryHandleTypes = (VkExternalMemoryHandleTypeFlags)0;
 };
 
-static inline Buffer* Cast(VkBuffer object)
+static inline Buffer *Cast(VkBuffer object)
 {
 	return Buffer::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_BUFFER_HPP_
+#endif  // VK_BUFFER_HPP_
diff --git a/src/Vulkan/VkBufferView.cpp b/src/Vulkan/VkBufferView.cpp
index 7b007e2..a8b49a0 100644
--- a/src/Vulkan/VkBufferView.cpp
+++ b/src/Vulkan/VkBufferView.cpp
@@ -18,22 +18,24 @@
 
 namespace vk {
 
-BufferView::BufferView(const VkBufferViewCreateInfo* pCreateInfo, void* mem) :
-    buffer(vk::Cast(pCreateInfo->buffer)), format(pCreateInfo->format), offset(pCreateInfo->offset)
+BufferView::BufferView(const VkBufferViewCreateInfo *pCreateInfo, void *mem)
+    : buffer(vk::Cast(pCreateInfo->buffer))
+    , format(pCreateInfo->format)
+    , offset(pCreateInfo->offset)
 {
-    if(pCreateInfo->range == VK_WHOLE_SIZE)
-    {
-        range = buffer->getSize() - offset;
-    }
-    else
-    {
-        range = pCreateInfo->range;
-    }
+	if(pCreateInfo->range == VK_WHOLE_SIZE)
+	{
+		range = buffer->getSize() - offset;
+	}
+	else
+	{
+		range = pCreateInfo->range;
+	}
 }
 
-void * BufferView::getPointer() const
+void *BufferView::getPointer() const
 {
-    return buffer->getOffsetPointer(offset);
+	return buffer->getOffsetPointer(offset);
 }
 
 }  // namespace vk
\ No newline at end of file
diff --git a/src/Vulkan/VkBufferView.hpp b/src/Vulkan/VkBufferView.hpp
index bf20a6a..98b68cc 100644
--- a/src/Vulkan/VkBufferView.hpp
+++ b/src/Vulkan/VkBufferView.hpp
@@ -15,9 +15,9 @@
 #ifndef VK_BUFFER_VIEW_HPP_
 #define VK_BUFFER_VIEW_HPP_
 
-#include "VkObject.hpp"
 #include "VkFormat.h"
 #include "VkImageView.hpp"
+#include "VkObject.hpp"
 
 namespace vk {
 
@@ -26,9 +26,9 @@
 class BufferView : public Object<BufferView, VkBufferView>
 {
 public:
-	BufferView(const VkBufferViewCreateInfo* pCreateInfo, void* mem);
+	BufferView(const VkBufferViewCreateInfo *pCreateInfo, void *mem);
 
-	static size_t ComputeRequiredAllocationSize(const VkBufferViewCreateInfo* pCreateInfo)
+	static size_t ComputeRequiredAllocationSize(const VkBufferViewCreateInfo *pCreateInfo)
 	{
 		return 0;
 	}
@@ -38,19 +38,19 @@
 	uint32_t getRangeInBytes() const { return static_cast<uint32_t>(range); }
 	VkFormat getFormat() const { return format; }
 
-	const uint32_t id = ImageView::nextID++;	// ID space for sampling function cache, shared with imageviews
+	const uint32_t id = ImageView::nextID++;  // ID space for sampling function cache, shared with imageviews
 private:
-	Buffer      *buffer;
-	VkFormat     format;
+	Buffer *buffer;
+	VkFormat format;
 	VkDeviceSize offset;
 	VkDeviceSize range;
 };
 
-static inline BufferView* Cast(VkBufferView object)
+static inline BufferView *Cast(VkBufferView object)
 {
 	return BufferView::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_BUFFER_VIEW_HPP_
+#endif  // VK_BUFFER_VIEW_HPP_
diff --git a/src/Vulkan/VkCommandBuffer.cpp b/src/Vulkan/VkCommandBuffer.cpp
index 08178cd..60e968e 100644
--- a/src/Vulkan/VkCommandBuffer.cpp
+++ b/src/Vulkan/VkCommandBuffer.cpp
@@ -31,7 +31,7 @@
 {
 public:
 	// FIXME (b/119421344): change the commandBuffer argument to a CommandBuffer state
-	virtual void play(vk::CommandBuffer::ExecutionState& executionState) = 0;
+	virtual void play(vk::CommandBuffer::ExecutionState &executionState) = 0;
 	virtual std::string description() = 0;
 	virtual ~Command() {}
 };
@@ -41,10 +41,12 @@
 class CmdBeginRenderPass : public vk::CommandBuffer::Command
 {
 public:
-	CmdBeginRenderPass(vk::RenderPass* renderPass, vk::Framebuffer* framebuffer, VkRect2D renderArea,
-	                uint32_t clearValueCount, const VkClearValue* pClearValues) :
-		renderPass(renderPass), framebuffer(framebuffer), renderArea(renderArea),
-		clearValueCount(clearValueCount)
+	CmdBeginRenderPass(vk::RenderPass *renderPass, vk::Framebuffer *framebuffer, VkRect2D renderArea,
+	                   uint32_t clearValueCount, const VkClearValue *pClearValues)
+	    : renderPass(renderPass)
+	    , framebuffer(framebuffer)
+	    , renderArea(renderArea)
+	    , clearValueCount(clearValueCount)
 	{
 		// FIXME (b/119409619): use an allocator here so we can control all memory allocations
 		clearValues = new VkClearValue[clearValueCount];
@@ -53,10 +55,10 @@
 
 	~CmdBeginRenderPass() override
 	{
-		delete [] clearValues;
+		delete[] clearValues;
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.renderPass = renderPass;
 		executionState.renderPassFramebuffer = framebuffer;
@@ -67,17 +69,17 @@
 	std::string description() override { return "vkCmdBeginRenderPass()"; }
 
 private:
-	vk::RenderPass* renderPass;
-	vk::Framebuffer* framebuffer;
+	vk::RenderPass *renderPass;
+	vk::Framebuffer *framebuffer;
 	VkRect2D renderArea;
 	uint32_t clearValueCount;
-	VkClearValue* clearValues;
+	VkClearValue *clearValues;
 };
 
 class CmdNextSubpass : public vk::CommandBuffer::Command
 {
 public:
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		bool hasResolveAttachments = (executionState.renderPass->getSubpass(executionState.subpassIndex).pResolveAttachments != nullptr);
 		if(hasResolveAttachments)
@@ -98,7 +100,7 @@
 class CmdEndRenderPass : public vk::CommandBuffer::Command
 {
 public:
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		// Execute (implicit or explicit) VkSubpassDependency to VK_SUBPASS_EXTERNAL
 		// This is somewhat heavier than the actual ordering required.
@@ -118,11 +120,12 @@
 class CmdExecuteCommands : public vk::CommandBuffer::Command
 {
 public:
-	CmdExecuteCommands(const vk::CommandBuffer* commandBuffer) : commandBuffer(commandBuffer)
+	CmdExecuteCommands(const vk::CommandBuffer *commandBuffer)
+	    : commandBuffer(commandBuffer)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		commandBuffer->submitSecondary(executionState);
 	}
@@ -130,18 +133,19 @@
 	std::string description() override { return "vkCmdExecuteCommands()"; }
 
 private:
-	const vk::CommandBuffer* commandBuffer;
+	const vk::CommandBuffer *commandBuffer;
 };
 
 class CmdPipelineBind : public vk::CommandBuffer::Command
 {
 public:
-	CmdPipelineBind(VkPipelineBindPoint pipelineBindPoint, vk::Pipeline* pipeline) :
-		pipelineBindPoint(pipelineBindPoint), pipeline(pipeline)
+	CmdPipelineBind(VkPipelineBindPoint pipelineBindPoint, vk::Pipeline *pipeline)
+	    : pipelineBindPoint(pipelineBindPoint)
+	    , pipeline(pipeline)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.pipelineState[pipelineBindPoint].pipeline = pipeline;
 	}
@@ -150,28 +154,32 @@
 
 private:
 	VkPipelineBindPoint pipelineBindPoint;
-	vk::Pipeline* pipeline;
+	vk::Pipeline *pipeline;
 };
 
 class CmdDispatch : public vk::CommandBuffer::Command
 {
 public:
-	CmdDispatch(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) :
-			baseGroupX(baseGroupX), baseGroupY(baseGroupY), baseGroupZ(baseGroupZ),
-			groupCountX(groupCountX), groupCountY(groupCountY), groupCountZ(groupCountZ)
+	CmdDispatch(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ)
+	    : baseGroupX(baseGroupX)
+	    , baseGroupY(baseGroupY)
+	    , baseGroupZ(baseGroupZ)
+	    , groupCountX(groupCountX)
+	    , groupCountY(groupCountY)
+	    , groupCountZ(groupCountZ)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		auto const &pipelineState = executionState.pipelineState[VK_PIPELINE_BIND_POINT_COMPUTE];
 
-		vk::ComputePipeline* pipeline = static_cast<vk::ComputePipeline*>(pipelineState.pipeline);
+		vk::ComputePipeline *pipeline = static_cast<vk::ComputePipeline *>(pipelineState.pipeline);
 		pipeline->run(baseGroupX, baseGroupY, baseGroupZ,
-			groupCountX, groupCountY, groupCountZ,
-			pipelineState.descriptorSets,
-			pipelineState.descriptorDynamicOffsets,
-			executionState.pushConstants);
+		              groupCountX, groupCountY, groupCountZ,
+		              pipelineState.descriptorSets,
+		              pipelineState.descriptorDynamicOffsets,
+		              executionState.pushConstants);
 	}
 
 	std::string description() override { return "vkCmdDispatch()"; }
@@ -188,40 +196,43 @@
 class CmdDispatchIndirect : public vk::CommandBuffer::Command
 {
 public:
-	CmdDispatchIndirect(vk::Buffer* buffer, VkDeviceSize offset) :
-			buffer(buffer), offset(offset)
+	CmdDispatchIndirect(vk::Buffer *buffer, VkDeviceSize offset)
+	    : buffer(buffer)
+	    , offset(offset)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		auto cmd = reinterpret_cast<VkDispatchIndirectCommand const *>(buffer->getOffsetPointer(offset));
 
 		auto const &pipelineState = executionState.pipelineState[VK_PIPELINE_BIND_POINT_COMPUTE];
 
-		auto pipeline = static_cast<vk::ComputePipeline*>(pipelineState.pipeline);
+		auto pipeline = static_cast<vk::ComputePipeline *>(pipelineState.pipeline);
 		pipeline->run(0, 0, 0, cmd->x, cmd->y, cmd->z,
-			pipelineState.descriptorSets,
-			pipelineState.descriptorDynamicOffsets,
-			executionState.pushConstants);
+		              pipelineState.descriptorSets,
+		              pipelineState.descriptorDynamicOffsets,
+		              executionState.pushConstants);
 	}
 
 	std::string description() override { return "vkCmdDispatchIndirect()"; }
 
 private:
-	const vk::Buffer* buffer;
+	const vk::Buffer *buffer;
 	VkDeviceSize offset;
 };
 
 class CmdVertexBufferBind : public vk::CommandBuffer::Command
 {
 public:
-	CmdVertexBufferBind(uint32_t binding, vk::Buffer* buffer, const VkDeviceSize offset) :
-		binding(binding), buffer(buffer), offset(offset)
+	CmdVertexBufferBind(uint32_t binding, vk::Buffer *buffer, const VkDeviceSize offset)
+	    : binding(binding)
+	    , buffer(buffer)
+	    , offset(offset)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.vertexInputBindings[binding] = { buffer, offset };
 	}
@@ -230,19 +241,21 @@
 
 private:
 	uint32_t binding;
-	vk::Buffer* buffer;
+	vk::Buffer *buffer;
 	const VkDeviceSize offset;
 };
 
 class CmdIndexBufferBind : public vk::CommandBuffer::Command
 {
 public:
-	CmdIndexBufferBind(vk::Buffer* buffer, const VkDeviceSize offset, const VkIndexType indexType) :
-		buffer(buffer), offset(offset), indexType(indexType)
+	CmdIndexBufferBind(vk::Buffer *buffer, const VkDeviceSize offset, const VkIndexType indexType)
+	    : buffer(buffer)
+	    , offset(offset)
+	    , indexType(indexType)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.indexBufferBinding = { buffer, offset };
 		executionState.indexType = indexType;
@@ -251,7 +264,7 @@
 	std::string description() override { return "vkCmdIndexBufferBind()"; }
 
 private:
-	vk::Buffer* buffer;
+	vk::Buffer *buffer;
 	const VkDeviceSize offset;
 	const VkIndexType indexType;
 };
@@ -259,12 +272,13 @@
 class CmdSetViewport : public vk::CommandBuffer::Command
 {
 public:
-	CmdSetViewport(const VkViewport& viewport, uint32_t viewportID) :
-		viewport(viewport), viewportID(viewportID)
+	CmdSetViewport(const VkViewport &viewport, uint32_t viewportID)
+	    : viewport(viewport)
+	    , viewportID(viewportID)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.dynamicState.viewport = viewport;
 	}
@@ -279,12 +293,13 @@
 class CmdSetScissor : public vk::CommandBuffer::Command
 {
 public:
-	CmdSetScissor(const VkRect2D& scissor, uint32_t scissorID) :
-		scissor(scissor), scissorID(scissorID)
+	CmdSetScissor(const VkRect2D &scissor, uint32_t scissorID)
+	    : scissor(scissor)
+	    , scissorID(scissorID)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.dynamicState.scissor = scissor;
 	}
@@ -299,12 +314,14 @@
 class CmdSetDepthBias : public vk::CommandBuffer::Command
 {
 public:
-	CmdSetDepthBias(float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) :
-		depthBiasConstantFactor(depthBiasConstantFactor), depthBiasClamp(depthBiasClamp), depthBiasSlopeFactor(depthBiasSlopeFactor)
+	CmdSetDepthBias(float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor)
+	    : depthBiasConstantFactor(depthBiasConstantFactor)
+	    , depthBiasClamp(depthBiasClamp)
+	    , depthBiasSlopeFactor(depthBiasSlopeFactor)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.dynamicState.depthBiasConstantFactor = depthBiasConstantFactor;
 		executionState.dynamicState.depthBiasClamp = depthBiasClamp;
@@ -327,7 +344,7 @@
 		memcpy(this->blendConstants, blendConstants, sizeof(this->blendConstants));
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		memcpy(&(executionState.dynamicState.blendConstants[0]), blendConstants, sizeof(blendConstants));
 	}
@@ -341,12 +358,13 @@
 class CmdSetDepthBounds : public vk::CommandBuffer::Command
 {
 public:
-	CmdSetDepthBounds(float minDepthBounds, float maxDepthBounds) :
-		minDepthBounds(minDepthBounds), maxDepthBounds(maxDepthBounds)
+	CmdSetDepthBounds(float minDepthBounds, float maxDepthBounds)
+	    : minDepthBounds(minDepthBounds)
+	    , maxDepthBounds(maxDepthBounds)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.dynamicState.minDepthBounds = minDepthBounds;
 		executionState.dynamicState.maxDepthBounds = maxDepthBounds;
@@ -362,12 +380,13 @@
 class CmdSetStencilCompareMask : public vk::CommandBuffer::Command
 {
 public:
-	CmdSetStencilCompareMask(VkStencilFaceFlags faceMask, uint32_t compareMask) :
-		faceMask(faceMask), compareMask(compareMask)
+	CmdSetStencilCompareMask(VkStencilFaceFlags faceMask, uint32_t compareMask)
+	    : faceMask(faceMask)
+	    , compareMask(compareMask)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		if(faceMask & VK_STENCIL_FACE_FRONT_BIT)
 		{
@@ -389,12 +408,13 @@
 class CmdSetStencilWriteMask : public vk::CommandBuffer::Command
 {
 public:
-	CmdSetStencilWriteMask(VkStencilFaceFlags faceMask, uint32_t writeMask) :
-		faceMask(faceMask), writeMask(writeMask)
+	CmdSetStencilWriteMask(VkStencilFaceFlags faceMask, uint32_t writeMask)
+	    : faceMask(faceMask)
+	    , writeMask(writeMask)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		if(faceMask & VK_STENCIL_FACE_FRONT_BIT)
 		{
@@ -416,12 +436,13 @@
 class CmdSetStencilReference : public vk::CommandBuffer::Command
 {
 public:
-	CmdSetStencilReference(VkStencilFaceFlags faceMask, uint32_t reference) :
-		faceMask(faceMask), reference(reference)
+	CmdSetStencilReference(VkStencilFaceFlags faceMask, uint32_t reference)
+	    : faceMask(faceMask)
+	    , reference(reference)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		if(faceMask & VK_STENCIL_FACE_FRONT_BIT)
 		{
@@ -443,19 +464,19 @@
 class CmdDrawBase : public vk::CommandBuffer::Command
 {
 public:
-	int bytesPerIndex(vk::CommandBuffer::ExecutionState const& executionState)
+	int bytesPerIndex(vk::CommandBuffer::ExecutionState const &executionState)
 	{
 		return executionState.indexType == VK_INDEX_TYPE_UINT16 ? 2 : 4;
 	}
 
 	template<typename T>
-	void processPrimitiveRestart(T* indexBuffer,
+	void processPrimitiveRestart(T *indexBuffer,
 	                             uint32_t count,
-		                         vk::GraphicsPipeline* pipeline,
-	                             std::vector<std::pair<uint32_t, void*>>& indexBuffers)
+	                             vk::GraphicsPipeline *pipeline,
+	                             std::vector<std::pair<uint32_t, void *>> &indexBuffers)
 	{
 		static const T RestartIndex = static_cast<T>(-1);
-		T* indexBufferStart = indexBuffer;
+		T *indexBufferStart = indexBuffer;
 		uint32_t vertexCount = 0;
 		for(uint32_t i = 0; i < count; i++)
 		{
@@ -493,8 +514,8 @@
 		}
 	}
 
-	void draw(vk::CommandBuffer::ExecutionState& executionState, bool indexed,
-			uint32_t count, uint32_t instanceCount, uint32_t first, int32_t vertexOffset, uint32_t firstInstance)
+	void draw(vk::CommandBuffer::ExecutionState &executionState, bool indexed,
+	          uint32_t count, uint32_t instanceCount, uint32_t first, int32_t vertexOffset, uint32_t firstInstance)
 	{
 		auto const &pipelineState = executionState.pipelineState[VK_PIPELINE_BIND_POINT_GRAPHICS];
 
@@ -508,12 +529,9 @@
 		context.descriptorDynamicOffsets = pipelineState.descriptorDynamicOffsets;
 
 		// Apply either pipeline state or dynamic state
-		executionState.renderer->setScissor(pipeline->hasDynamicState(VK_DYNAMIC_STATE_SCISSOR) ?
-		                                    executionState.dynamicState.scissor : pipeline->getScissor());
-		executionState.renderer->setViewport(pipeline->hasDynamicState(VK_DYNAMIC_STATE_VIEWPORT) ?
-		                                     executionState.dynamicState.viewport : pipeline->getViewport());
-		executionState.renderer->setBlendConstant(pipeline->hasDynamicState(VK_DYNAMIC_STATE_BLEND_CONSTANTS) ?
-		                                          executionState.dynamicState.blendConstants : pipeline->getBlendConstants());
+		executionState.renderer->setScissor(pipeline->hasDynamicState(VK_DYNAMIC_STATE_SCISSOR) ? executionState.dynamicState.scissor : pipeline->getScissor());
+		executionState.renderer->setViewport(pipeline->hasDynamicState(VK_DYNAMIC_STATE_VIEWPORT) ? executionState.dynamicState.viewport : pipeline->getViewport());
+		executionState.renderer->setBlendConstant(pipeline->hasDynamicState(VK_DYNAMIC_STATE_BLEND_CONSTANTS) ? executionState.dynamicState.blendConstants : pipeline->getBlendConstants());
 
 		if(pipeline->hasDynamicState(VK_DYNAMIC_STATE_DEPTH_BIAS))
 		{
@@ -527,9 +545,9 @@
 		{
 			// Unless the VK_EXT_depth_range_unrestricted extension is enabled minDepthBounds and maxDepthBounds must be between 0.0 and 1.0, inclusive
 			ASSERT(executionState.dynamicState.minDepthBounds >= 0.0f &&
-				   executionState.dynamicState.minDepthBounds <= 1.0f);
+			       executionState.dynamicState.minDepthBounds <= 1.0f);
 			ASSERT(executionState.dynamicState.maxDepthBounds >= 0.0f &&
-				   executionState.dynamicState.maxDepthBounds <= 1.0f);
+			       executionState.dynamicState.maxDepthBounds <= 1.0f);
 
 			UNIMPLEMENTED("depthBoundsTestEnable");
 		}
@@ -557,29 +575,29 @@
 		if(indexed)
 		{
 			void *indexBuffer = executionState.indexBufferBinding.buffer->getOffsetPointer(
-					executionState.indexBufferBinding.offset + first * bytesPerIndex(executionState));
+			    executionState.indexBufferBinding.offset + first * bytesPerIndex(executionState));
 			if(pipeline->hasPrimitiveRestartEnable())
 			{
 				switch(executionState.indexType)
 				{
-				case VK_INDEX_TYPE_UINT16:
-					processPrimitiveRestart(static_cast<uint16_t *>(indexBuffer), count, pipeline, indexBuffers);
-					break;
-				case VK_INDEX_TYPE_UINT32:
-					processPrimitiveRestart(static_cast<uint32_t *>(indexBuffer), count, pipeline, indexBuffers);
-					break;
-				default:
-					UNIMPLEMENTED("executionState.indexType %d", int(executionState.indexType));
+					case VK_INDEX_TYPE_UINT16:
+						processPrimitiveRestart(static_cast<uint16_t *>(indexBuffer), count, pipeline, indexBuffers);
+						break;
+					case VK_INDEX_TYPE_UINT32:
+						processPrimitiveRestart(static_cast<uint32_t *>(indexBuffer), count, pipeline, indexBuffers);
+						break;
+					default:
+						UNIMPLEMENTED("executionState.indexType %d", int(executionState.indexType));
 				}
 			}
 			else
 			{
-				indexBuffers.push_back({pipeline->computePrimitiveCount(count), indexBuffer});
+				indexBuffers.push_back({ pipeline->computePrimitiveCount(count), indexBuffer });
 			}
 		}
 		else
 		{
-			indexBuffers.push_back({pipeline->computePrimitiveCount(count), nullptr});
+			indexBuffers.push_back({ pipeline->computePrimitiveCount(count), nullptr });
 		}
 
 		for(uint32_t instance = firstInstance; instance != firstInstance + instanceCount; instance++)
@@ -594,9 +612,9 @@
 				for(auto indexBuffer : indexBuffers)
 				{
 					executionState.renderer->draw(&context, executionState.indexType, indexBuffer.first, vertexOffset,
-												  executionState.events, instance, viewID, indexBuffer.second,
-												  executionState.renderPassFramebuffer->getExtent(),
-												  executionState.pushConstants);
+					                              executionState.events, instance, viewID, indexBuffer.second,
+					                              executionState.renderPassFramebuffer->getExtent(),
+					                              executionState.pushConstants);
 				}
 			}
 
@@ -609,11 +627,14 @@
 {
 public:
 	CmdDraw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance)
-		: vertexCount(vertexCount), instanceCount(instanceCount), firstVertex(firstVertex), firstInstance(firstInstance)
+	    : vertexCount(vertexCount)
+	    , instanceCount(instanceCount)
+	    , firstVertex(firstVertex)
+	    , firstInstance(firstInstance)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		draw(executionState, false, vertexCount, instanceCount, 0, firstVertex, firstInstance);
 	}
@@ -631,11 +652,15 @@
 {
 public:
 	CmdDrawIndexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
-			: indexCount(indexCount), instanceCount(instanceCount), firstIndex(firstIndex), vertexOffset(vertexOffset), firstInstance(firstInstance)
+	    : indexCount(indexCount)
+	    , instanceCount(instanceCount)
+	    , firstIndex(firstIndex)
+	    , vertexOffset(vertexOffset)
+	    , firstInstance(firstInstance)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		draw(executionState, true, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
 	}
@@ -653,12 +678,15 @@
 class CmdDrawIndirect : public CmdDrawBase
 {
 public:
-	CmdDrawIndirect(vk::Buffer* buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
-			: buffer(buffer), offset(offset), drawCount(drawCount), stride(stride)
+	CmdDrawIndirect(vk::Buffer *buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
+	    : buffer(buffer)
+	    , offset(offset)
+	    , drawCount(drawCount)
+	    , stride(stride)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		for(auto drawId = 0u; drawId < drawCount; drawId++)
 		{
@@ -670,7 +698,7 @@
 	std::string description() override { return "vkCmdDrawIndirect()"; }
 
 private:
-	const vk::Buffer* buffer;
+	const vk::Buffer *buffer;
 	VkDeviceSize offset;
 	uint32_t drawCount;
 	uint32_t stride;
@@ -679,12 +707,15 @@
 class CmdDrawIndexedIndirect : public CmdDrawBase
 {
 public:
-	CmdDrawIndexedIndirect(vk::Buffer* buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
-			: buffer(buffer), offset(offset), drawCount(drawCount), stride(stride)
+	CmdDrawIndexedIndirect(vk::Buffer *buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
+	    : buffer(buffer)
+	    , offset(offset)
+	    , drawCount(drawCount)
+	    , stride(stride)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		for(auto drawId = 0u; drawId < drawCount; drawId++)
 		{
@@ -696,7 +727,7 @@
 	std::string description() override { return "vkCmdDrawIndexedIndirect()"; }
 
 private:
-	const vk::Buffer* buffer;
+	const vk::Buffer *buffer;
 	VkDeviceSize offset;
 	uint32_t drawCount;
 	uint32_t stride;
@@ -705,12 +736,14 @@
 class CmdImageToImageCopy : public vk::CommandBuffer::Command
 {
 public:
-	CmdImageToImageCopy(const vk::Image* srcImage, vk::Image* dstImage, const VkImageCopy& region) :
-		srcImage(srcImage), dstImage(dstImage), region(region)
+	CmdImageToImageCopy(const vk::Image *srcImage, vk::Image *dstImage, const VkImageCopy &region)
+	    : srcImage(srcImage)
+	    , dstImage(dstImage)
+	    , region(region)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		srcImage->copyTo(dstImage, region);
 	}
@@ -718,20 +751,22 @@
 	std::string description() override { return "vkCmdImageToImageCopy()"; }
 
 private:
-	const vk::Image* srcImage;
-	vk::Image* dstImage;
+	const vk::Image *srcImage;
+	vk::Image *dstImage;
 	const VkImageCopy region;
 };
 
 class CmdBufferToBufferCopy : public vk::CommandBuffer::Command
 {
 public:
-	CmdBufferToBufferCopy(const vk::Buffer* srcBuffer, vk::Buffer* dstBuffer, const VkBufferCopy& region) :
-		srcBuffer(srcBuffer), dstBuffer(dstBuffer), region(region)
+	CmdBufferToBufferCopy(const vk::Buffer *srcBuffer, vk::Buffer *dstBuffer, const VkBufferCopy &region)
+	    : srcBuffer(srcBuffer)
+	    , dstBuffer(dstBuffer)
+	    , region(region)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		srcBuffer->copyTo(dstBuffer, region);
 	}
@@ -739,20 +774,22 @@
 	std::string description() override { return "vkCmdBufferToBufferCopy()"; }
 
 private:
-	const vk::Buffer* srcBuffer;
-	vk::Buffer* dstBuffer;
+	const vk::Buffer *srcBuffer;
+	vk::Buffer *dstBuffer;
 	const VkBufferCopy region;
 };
 
 class CmdImageToBufferCopy : public vk::CommandBuffer::Command
 {
 public:
-	CmdImageToBufferCopy(vk::Image* srcImage, vk::Buffer* dstBuffer, const VkBufferImageCopy& region) :
-		srcImage(srcImage), dstBuffer(dstBuffer), region(region)
+	CmdImageToBufferCopy(vk::Image *srcImage, vk::Buffer *dstBuffer, const VkBufferImageCopy &region)
+	    : srcImage(srcImage)
+	    , dstBuffer(dstBuffer)
+	    , region(region)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		srcImage->copyTo(dstBuffer, region);
 	}
@@ -760,20 +797,22 @@
 	std::string description() override { return "vkCmdImageToBufferCopy()"; }
 
 private:
-	vk::Image* srcImage;
-	vk::Buffer* dstBuffer;
+	vk::Image *srcImage;
+	vk::Buffer *dstBuffer;
 	const VkBufferImageCopy region;
 };
 
 class CmdBufferToImageCopy : public vk::CommandBuffer::Command
 {
 public:
-	CmdBufferToImageCopy(vk::Buffer* srcBuffer, vk::Image* dstImage, const VkBufferImageCopy& region) :
-		srcBuffer(srcBuffer), dstImage(dstImage), region(region)
+	CmdBufferToImageCopy(vk::Buffer *srcBuffer, vk::Image *dstImage, const VkBufferImageCopy &region)
+	    : srcBuffer(srcBuffer)
+	    , dstImage(dstImage)
+	    , region(region)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		dstImage->copyFrom(srcBuffer, region);
 	}
@@ -781,20 +820,23 @@
 	std::string description() override { return "vkCmdBufferToImageCopy()"; }
 
 private:
-	vk::Buffer* srcBuffer;
-	vk::Image* dstImage;
+	vk::Buffer *srcBuffer;
+	vk::Image *dstImage;
 	const VkBufferImageCopy region;
 };
 
 class CmdFillBuffer : public vk::CommandBuffer::Command
 {
 public:
-	CmdFillBuffer(vk::Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) :
-		dstBuffer(dstBuffer), dstOffset(dstOffset), size(size), data(data)
+	CmdFillBuffer(vk::Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data)
+	    : dstBuffer(dstBuffer)
+	    , dstOffset(dstOffset)
+	    , size(size)
+	    , data(data)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		dstBuffer->fill(dstOffset, size, data);
 	}
@@ -802,7 +844,7 @@
 	std::string description() override { return "vkCmdFillBuffer()"; }
 
 private:
-	vk::Buffer* dstBuffer;
+	vk::Buffer *dstBuffer;
 	VkDeviceSize dstOffset;
 	VkDeviceSize size;
 	uint32_t data;
@@ -811,12 +853,14 @@
 class CmdUpdateBuffer : public vk::CommandBuffer::Command
 {
 public:
-	CmdUpdateBuffer(vk::Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint8_t* pData) :
-		dstBuffer(dstBuffer), dstOffset(dstOffset), data(pData, &pData[dataSize])
+	CmdUpdateBuffer(vk::Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint8_t *pData)
+	    : dstBuffer(dstBuffer)
+	    , dstOffset(dstOffset)
+	    , data(pData, &pData[dataSize])
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		dstBuffer->update(dstOffset, data.size(), data.data());
 	}
@@ -824,20 +868,22 @@
 	std::string description() override { return "vkCmdUpdateBuffer()"; }
 
 private:
-	vk::Buffer* dstBuffer;
+	vk::Buffer *dstBuffer;
 	VkDeviceSize dstOffset;
-	std::vector<uint8_t> data; // FIXME (b/119409619): replace this vector by an allocator so we can control all memory allocations
+	std::vector<uint8_t> data;  // FIXME (b/119409619): replace this vector by an allocator so we can control all memory allocations
 };
 
 class CmdClearColorImage : public vk::CommandBuffer::Command
 {
 public:
-	CmdClearColorImage(vk::Image* image, const VkClearColorValue& color, const VkImageSubresourceRange& range) :
-		image(image), color(color), range(range)
+	CmdClearColorImage(vk::Image *image, const VkClearColorValue &color, const VkImageSubresourceRange &range)
+	    : image(image)
+	    , color(color)
+	    , range(range)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		image->clear(color, range);
 	}
@@ -845,7 +891,7 @@
 	std::string description() override { return "vkCmdClearColorImage()"; }
 
 private:
-	vk::Image* image;
+	vk::Image *image;
 	const VkClearColorValue color;
 	const VkImageSubresourceRange range;
 };
@@ -853,12 +899,14 @@
 class CmdClearDepthStencilImage : public vk::CommandBuffer::Command
 {
 public:
-	CmdClearDepthStencilImage(vk::Image* image, const VkClearDepthStencilValue& depthStencil, const VkImageSubresourceRange& range) :
-		image(image), depthStencil(depthStencil), range(range)
+	CmdClearDepthStencilImage(vk::Image *image, const VkClearDepthStencilValue &depthStencil, const VkImageSubresourceRange &range)
+	    : image(image)
+	    , depthStencil(depthStencil)
+	    , range(range)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		image->clear(depthStencil, range);
 	}
@@ -866,7 +914,7 @@
 	std::string description() override { return "vkCmdClearDepthStencilImage()"; }
 
 private:
-	vk::Image* image;
+	vk::Image *image;
 	const VkClearDepthStencilValue depthStencil;
 	const VkImageSubresourceRange range;
 };
@@ -874,12 +922,13 @@
 class CmdClearAttachment : public vk::CommandBuffer::Command
 {
 public:
-	CmdClearAttachment(const VkClearAttachment& attachment, const VkClearRect& rect) :
-		attachment(attachment), rect(rect)
+	CmdClearAttachment(const VkClearAttachment &attachment, const VkClearRect &rect)
+	    : attachment(attachment)
+	    , rect(rect)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		// attachment clears are drawing operations, and so have rasterization-order guarantees.
 		// however, we don't do the clear through the rasterizer, so need to ensure prior drawing
@@ -898,12 +947,15 @@
 class CmdBlitImage : public vk::CommandBuffer::Command
 {
 public:
-	CmdBlitImage(const vk::Image* srcImage, vk::Image* dstImage, const VkImageBlit& region, VkFilter filter) :
-		srcImage(srcImage), dstImage(dstImage), region(region), filter(filter)
+	CmdBlitImage(const vk::Image *srcImage, vk::Image *dstImage, const VkImageBlit &region, VkFilter filter)
+	    : srcImage(srcImage)
+	    , dstImage(dstImage)
+	    , region(region)
+	    , filter(filter)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		srcImage->blit(dstImage, region, filter);
 	}
@@ -911,8 +963,8 @@
 	std::string description() override { return "vkCmdBlitImage()"; }
 
 private:
-	const vk::Image* srcImage;
-	vk::Image* dstImage;
+	const vk::Image *srcImage;
+	vk::Image *dstImage;
 	VkImageBlit region;
 	VkFilter filter;
 };
@@ -920,12 +972,14 @@
 class CmdResolveImage : public vk::CommandBuffer::Command
 {
 public:
-	CmdResolveImage(const vk::Image* srcImage, vk::Image* dstImage, const VkImageResolve& region) :
-		srcImage(srcImage), dstImage(dstImage), region(region)
+	CmdResolveImage(const vk::Image *srcImage, vk::Image *dstImage, const VkImageResolve &region)
+	    : srcImage(srcImage)
+	    , dstImage(dstImage)
+	    , region(region)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		srcImage->resolve(dstImage, region);
 	}
@@ -933,15 +987,15 @@
 	std::string description() override { return "vkCmdBlitImage()"; }
 
 private:
-	const vk::Image* srcImage;
-	vk::Image* dstImage;
+	const vk::Image *srcImage;
+	vk::Image *dstImage;
 	VkImageResolve region;
 };
 
 class CmdPipelineBarrier : public vk::CommandBuffer::Command
 {
 public:
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		// This is a very simple implementation that simply calls sw::Renderer::synchronize(),
 		// since the driver is free to move the source stage towards the bottom of the pipe
@@ -959,11 +1013,13 @@
 class CmdSignalEvent : public vk::CommandBuffer::Command
 {
 public:
-	CmdSignalEvent(vk::Event* ev, VkPipelineStageFlags stageMask) : ev(ev), stageMask(stageMask)
+	CmdSignalEvent(vk::Event *ev, VkPipelineStageFlags stageMask)
+	    : ev(ev)
+	    , stageMask(stageMask)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.renderer->synchronize();
 		ev->signal();
@@ -972,18 +1028,20 @@
 	std::string description() override { return "vkCmdSignalEvent()"; }
 
 private:
-	vk::Event* ev;
-	VkPipelineStageFlags stageMask; // FIXME(b/117835459) : We currently ignore the flags and signal the event at the last stage
+	vk::Event *ev;
+	VkPipelineStageFlags stageMask;  // FIXME(b/117835459) : We currently ignore the flags and signal the event at the last stage
 };
 
 class CmdResetEvent : public vk::CommandBuffer::Command
 {
 public:
-	CmdResetEvent(vk::Event* ev, VkPipelineStageFlags stageMask) : ev(ev), stageMask(stageMask)
+	CmdResetEvent(vk::Event *ev, VkPipelineStageFlags stageMask)
+	    : ev(ev)
+	    , stageMask(stageMask)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		ev->reset();
 	}
@@ -991,18 +1049,19 @@
 	std::string description() override { return "vkCmdResetEvent()"; }
 
 private:
-	vk::Event* ev;
-	VkPipelineStageFlags stageMask; // FIXME(b/117835459) : We currently ignore the flags and reset the event at the last stage
+	vk::Event *ev;
+	VkPipelineStageFlags stageMask;  // FIXME(b/117835459) : We currently ignore the flags and reset the event at the last stage
 };
 
 class CmdWaitEvent : public vk::CommandBuffer::Command
 {
 public:
-	CmdWaitEvent(vk::Event* ev) : ev(ev)
+	CmdWaitEvent(vk::Event *ev)
+	    : ev(ev)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.renderer->synchronize();
 		ev->wait();
@@ -1011,16 +1070,19 @@
 	std::string description() override { return "vkCmdWaitEvent()"; }
 
 private:
-	vk::Event* ev;
+	vk::Event *ev;
 };
 
 class CmdBindDescriptorSet : public vk::CommandBuffer::Command
 {
 public:
-	CmdBindDescriptorSet(VkPipelineBindPoint pipelineBindPoint, const vk::PipelineLayout *pipelineLayout, uint32_t set, vk::DescriptorSet* descriptorSet,
-		uint32_t dynamicOffsetCount, uint32_t const *dynamicOffsets)
-		: pipelineBindPoint(pipelineBindPoint), pipelineLayout(pipelineLayout), set(set), descriptorSet(descriptorSet),
-		  dynamicOffsetCount(dynamicOffsetCount)
+	CmdBindDescriptorSet(VkPipelineBindPoint pipelineBindPoint, const vk::PipelineLayout *pipelineLayout, uint32_t set, vk::DescriptorSet *descriptorSet,
+	                     uint32_t dynamicOffsetCount, uint32_t const *dynamicOffsets)
+	    : pipelineBindPoint(pipelineBindPoint)
+	    , pipelineLayout(pipelineLayout)
+	    , set(set)
+	    , descriptorSet(descriptorSet)
+	    , dynamicOffsetCount(dynamicOffsetCount)
 	{
 		for(uint32_t i = 0; i < dynamicOffsetCount; i++)
 		{
@@ -1028,7 +1090,7 @@
 		}
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		ASSERT_OR_RETURN((pipelineBindPoint < VK_PIPELINE_BIND_POINT_RANGE_SIZE) && (set < vk::MAX_BOUND_DESCRIPTOR_SETS));
 		auto &pipelineState = executionState.pipelineState[pipelineBindPoint];
@@ -1048,7 +1110,7 @@
 	VkPipelineBindPoint pipelineBindPoint;
 	const vk::PipelineLayout *pipelineLayout;
 	uint32_t set;
-	vk::DescriptorSet* descriptorSet;
+	vk::DescriptorSet *descriptorSet;
 	uint32_t dynamicOffsetCount;
 	vk::DescriptorSet::DynamicOffsets dynamicOffsets;
 };
@@ -1057,7 +1119,8 @@
 {
 public:
 	CmdSetPushConstants(uint32_t offset, uint32_t size, void const *pValues)
-		: offset(offset), size(size)
+	    : offset(offset)
+	    , size(size)
 	{
 		ASSERT(offset < vk::MAX_PUSH_CONSTANT_SIZE);
 		ASSERT(offset + size <= vk::MAX_PUSH_CONSTANT_SIZE);
@@ -1065,7 +1128,7 @@
 		memcpy(data, pValues, size);
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		memcpy(&executionState.pushConstants.data[offset], data, size);
 	}
@@ -1081,12 +1144,14 @@
 class CmdBeginQuery : public vk::CommandBuffer::Command
 {
 public:
-	CmdBeginQuery(vk::QueryPool* queryPool, uint32_t query, VkQueryControlFlags flags)
-		: queryPool(queryPool), query(query), flags(flags)
+	CmdBeginQuery(vk::QueryPool *queryPool, uint32_t query, VkQueryControlFlags flags)
+	    : queryPool(queryPool)
+	    , query(query)
+	    , flags(flags)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		queryPool->begin(query, flags);
 		executionState.renderer->addQuery(queryPool->getQuery(query));
@@ -1095,7 +1160,7 @@
 	std::string description() override { return "vkCmdBeginQuery()"; }
 
 private:
-	vk::QueryPool* queryPool;
+	vk::QueryPool *queryPool;
 	uint32_t query;
 	VkQueryControlFlags flags;
 };
@@ -1103,12 +1168,13 @@
 class CmdEndQuery : public vk::CommandBuffer::Command
 {
 public:
-	CmdEndQuery(vk::QueryPool* queryPool, uint32_t query)
-		: queryPool(queryPool), query(query)
+	CmdEndQuery(vk::QueryPool *queryPool, uint32_t query)
+	    : queryPool(queryPool)
+	    , query(query)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		executionState.renderer->removeQuery(queryPool->getQuery(query));
 		queryPool->end(query);
@@ -1117,19 +1183,21 @@
 	std::string description() override { return "vkCmdEndQuery()"; }
 
 private:
-	vk::QueryPool* queryPool;
+	vk::QueryPool *queryPool;
 	uint32_t query;
 };
 
 class CmdResetQueryPool : public vk::CommandBuffer::Command
 {
 public:
-	CmdResetQueryPool(vk::QueryPool* queryPool, uint32_t firstQuery, uint32_t queryCount)
-		: queryPool(queryPool), firstQuery(firstQuery), queryCount(queryCount)
+	CmdResetQueryPool(vk::QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount)
+	    : queryPool(queryPool)
+	    , firstQuery(firstQuery)
+	    , queryCount(queryCount)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		queryPool->reset(firstQuery, queryCount);
 	}
@@ -1137,7 +1205,7 @@
 	std::string description() override { return "vkCmdResetQueryPool()"; }
 
 private:
-	vk::QueryPool* queryPool;
+	vk::QueryPool *queryPool;
 	uint32_t firstQuery;
 	uint32_t queryCount;
 };
@@ -1145,12 +1213,14 @@
 class CmdWriteTimeStamp : public vk::CommandBuffer::Command
 {
 public:
-	CmdWriteTimeStamp(vk::QueryPool* queryPool, uint32_t query, VkPipelineStageFlagBits stage)
-		: queryPool(queryPool), query(query), stage(stage)
+	CmdWriteTimeStamp(vk::QueryPool *queryPool, uint32_t query, VkPipelineStageFlagBits stage)
+	    : queryPool(queryPool)
+	    , query(query)
+	    , stage(stage)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		if(stage & ~(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
 		{
@@ -1169,7 +1239,7 @@
 	std::string description() override { return "vkCmdWriteTimeStamp()"; }
 
 private:
-	vk::QueryPool* queryPool;
+	vk::QueryPool *queryPool;
 	uint32_t query;
 	VkPipelineStageFlagBits stage;
 };
@@ -1177,14 +1247,19 @@
 class CmdCopyQueryPoolResults : public vk::CommandBuffer::Command
 {
 public:
-	CmdCopyQueryPoolResults(const vk::QueryPool* queryPool, uint32_t firstQuery, uint32_t queryCount,
-		vk::Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
-		: queryPool(queryPool), firstQuery(firstQuery), queryCount(queryCount),
-		  dstBuffer(dstBuffer), dstOffset(dstOffset), stride(stride), flags(flags)
+	CmdCopyQueryPoolResults(const vk::QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount,
+	                        vk::Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
+	    : queryPool(queryPool)
+	    , firstQuery(firstQuery)
+	    , queryCount(queryCount)
+	    , dstBuffer(dstBuffer)
+	    , dstOffset(dstOffset)
+	    , stride(stride)
+	    , flags(flags)
 	{
 	}
 
-	void play(vk::CommandBuffer::ExecutionState& executionState) override
+	void play(vk::CommandBuffer::ExecutionState &executionState) override
 	{
 		queryPool->getResults(firstQuery, queryCount, dstBuffer->getSize() - dstOffset,
 		                      dstBuffer->getOffsetPointer(dstOffset), stride, flags);
@@ -1193,10 +1268,10 @@
 	std::string description() override { return "vkCmdCopyQueryPoolResults()"; }
 
 private:
-	const vk::QueryPool* queryPool;
+	const vk::QueryPool *queryPool;
 	uint32_t firstQuery;
 	uint32_t queryCount;
-	vk::Buffer* dstBuffer;
+	vk::Buffer *dstBuffer;
 	VkDeviceSize dstOffset;
 	VkDeviceSize stride;
 	VkQueryResultFlags flags;
@@ -1206,13 +1281,14 @@
 
 namespace vk {
 
-CommandBuffer::CommandBuffer(VkCommandBufferLevel pLevel) : level(pLevel)
+CommandBuffer::CommandBuffer(VkCommandBufferLevel pLevel)
+    : level(pLevel)
 {
 	// FIXME (b/119409619): replace this vector by an allocator so we can control all memory allocations
-	commands = new std::vector<std::unique_ptr<Command> >();
+	commands = new std::vector<std::unique_ptr<Command>>();
 }
 
-void CommandBuffer::destroy(const VkAllocationCallbacks* pAllocator)
+void CommandBuffer::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	delete commands;
 }
@@ -1225,14 +1301,14 @@
 	state = INITIAL;
 }
 
-VkResult CommandBuffer::begin(VkCommandBufferUsageFlags flags, const VkCommandBufferInheritanceInfo* pInheritanceInfo)
+VkResult CommandBuffer::begin(VkCommandBufferUsageFlags flags, const VkCommandBufferInheritanceInfo *pInheritanceInfo)
 {
 	ASSERT((state != RECORDING) && (state != PENDING));
 
 	// Nothing interesting to do based on flags. We don't have any optimizations
 	// to apply for ONE_TIME_SUBMIT or (lack of) SIMULTANEOUS_USE. RENDER_PASS_CONTINUE
 	// must also provide a non-null pInheritanceInfo, which we don't implement yet, but is caught below.
-	(void) flags;
+	(void)flags;
 
 	// pInheritanceInfo merely contains optimization hints, so we currently ignore it
 
@@ -1266,14 +1342,14 @@
 }
 
 template<typename T, typename... Args>
-void CommandBuffer::addCommand(Args&&... args)
+void CommandBuffer::addCommand(Args &&... args)
 {
 	// FIXME (b/119409619): use an allocator here so we can control all memory allocations
 	commands->push_back(std::unique_ptr<T>(new T(std::forward<Args>(args)...)));
 }
 
-void CommandBuffer::beginRenderPass(RenderPass* renderPass, Framebuffer* framebuffer, VkRect2D renderArea,
-                                    uint32_t clearValueCount, const VkClearValue* clearValues, VkSubpassContents contents)
+void CommandBuffer::beginRenderPass(RenderPass *renderPass, Framebuffer *framebuffer, VkRect2D renderArea,
+                                    uint32_t clearValueCount, const VkClearValue *clearValues, VkSubpassContents contents)
 {
 	ASSERT(state == RECORDING);
 
@@ -1292,7 +1368,7 @@
 	addCommand<::CmdEndRenderPass>();
 }
 
-void CommandBuffer::executeCommands(uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers)
+void CommandBuffer::executeCommands(uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers)
 {
 	ASSERT(state == RECORDING);
 
@@ -1315,14 +1391,14 @@
 
 void CommandBuffer::pipelineBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
                                     VkDependencyFlags dependencyFlags,
-                                    uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
-                                    uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
-                                    uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers)
+                                    uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+                                    uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+                                    uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
 {
 	addCommand<::CmdPipelineBarrier>();
 }
 
-void CommandBuffer::bindPipeline(VkPipelineBindPoint pipelineBindPoint, Pipeline* pipeline)
+void CommandBuffer::bindPipeline(VkPipelineBindPoint pipelineBindPoint, Pipeline *pipeline)
 {
 	switch(pipelineBindPoint)
 	{
@@ -1336,7 +1412,7 @@
 }
 
 void CommandBuffer::bindVertexBuffers(uint32_t firstBinding, uint32_t bindingCount,
-                                      const VkBuffer* pBuffers, const VkDeviceSize* pOffsets)
+                                      const VkBuffer *pBuffers, const VkDeviceSize *pOffsets)
 {
 	for(uint32_t i = 0; i < bindingCount; ++i)
 	{
@@ -1344,39 +1420,39 @@
 	}
 }
 
-void CommandBuffer::beginQuery(QueryPool* queryPool, uint32_t query, VkQueryControlFlags flags)
+void CommandBuffer::beginQuery(QueryPool *queryPool, uint32_t query, VkQueryControlFlags flags)
 {
 	addCommand<::CmdBeginQuery>(queryPool, query, flags);
 }
 
-void CommandBuffer::endQuery(QueryPool* queryPool, uint32_t query)
+void CommandBuffer::endQuery(QueryPool *queryPool, uint32_t query)
 {
 	addCommand<::CmdEndQuery>(queryPool, query);
 }
 
-void CommandBuffer::resetQueryPool(QueryPool* queryPool, uint32_t firstQuery, uint32_t queryCount)
+void CommandBuffer::resetQueryPool(QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount)
 {
 	addCommand<::CmdResetQueryPool>(queryPool, firstQuery, queryCount);
 }
 
-void CommandBuffer::writeTimestamp(VkPipelineStageFlagBits pipelineStage, QueryPool* queryPool, uint32_t query)
+void CommandBuffer::writeTimestamp(VkPipelineStageFlagBits pipelineStage, QueryPool *queryPool, uint32_t query)
 {
 	addCommand<::CmdWriteTimeStamp>(queryPool, query, pipelineStage);
 }
 
-void CommandBuffer::copyQueryPoolResults(const QueryPool* queryPool, uint32_t firstQuery, uint32_t queryCount,
-	Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
+void CommandBuffer::copyQueryPoolResults(const QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount,
+                                         Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
 {
 	addCommand<::CmdCopyQueryPoolResults>(queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags);
 }
 
-void CommandBuffer::pushConstants(PipelineLayout* layout, VkShaderStageFlags stageFlags,
-	uint32_t offset, uint32_t size, const void* pValues)
+void CommandBuffer::pushConstants(PipelineLayout *layout, VkShaderStageFlags stageFlags,
+                                  uint32_t offset, uint32_t size, const void *pValues)
 {
 	addCommand<::CmdSetPushConstants>(offset, size, pValues);
 }
 
-void CommandBuffer::setViewport(uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports)
+void CommandBuffer::setViewport(uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports)
 {
 	if(firstViewport != 0 || viewportCount > 1)
 	{
@@ -1389,7 +1465,7 @@
 	}
 }
 
-void CommandBuffer::setScissor(uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors)
+void CommandBuffer::setScissor(uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors)
 {
 	if(firstScissor != 0 || scissorCount > 1)
 	{
@@ -1447,9 +1523,9 @@
 	addCommand<::CmdSetStencilReference>(faceMask, reference);
 }
 
-void CommandBuffer::bindDescriptorSets(VkPipelineBindPoint pipelineBindPoint, const PipelineLayout* layout,
-	uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets,
-	uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets)
+void CommandBuffer::bindDescriptorSets(VkPipelineBindPoint pipelineBindPoint, const PipelineLayout *layout,
+                                       uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets,
+                                       uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets)
 {
 	ASSERT(state == RECORDING);
 
@@ -1463,15 +1539,15 @@
 		ASSERT(dynamicOffsetCount >= numDynamicDescriptors);
 
 		addCommand<::CmdBindDescriptorSet>(
-				pipelineBindPoint, layout, descriptorSetIndex, vk::Cast(pDescriptorSets[i]),
-				dynamicOffsetCount, pDynamicOffsets);
+		    pipelineBindPoint, layout, descriptorSetIndex, vk::Cast(pDescriptorSets[i]),
+		    dynamicOffsetCount, pDynamicOffsets);
 
 		pDynamicOffsets += numDynamicDescriptors;
 		dynamicOffsetCount -= numDynamicDescriptors;
 	}
 }
 
-void CommandBuffer::bindIndexBuffer(Buffer* buffer, VkDeviceSize offset, VkIndexType indexType)
+void CommandBuffer::bindIndexBuffer(Buffer *buffer, VkDeviceSize offset, VkIndexType indexType)
 {
 	addCommand<::CmdIndexBufferBind>(buffer, offset, indexType);
 }
@@ -1481,12 +1557,12 @@
 	addCommand<::CmdDispatch>(0, 0, 0, groupCountX, groupCountY, groupCountZ);
 }
 
-void CommandBuffer::dispatchIndirect(Buffer* buffer, VkDeviceSize offset)
+void CommandBuffer::dispatchIndirect(Buffer *buffer, VkDeviceSize offset)
 {
 	addCommand<::CmdDispatchIndirect>(buffer, offset);
 }
 
-void CommandBuffer::copyBuffer(const Buffer* srcBuffer, Buffer* dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions)
+void CommandBuffer::copyBuffer(const Buffer *srcBuffer, Buffer *dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions)
 {
 	ASSERT(state == RECORDING);
 
@@ -1496,8 +1572,8 @@
 	}
 }
 
-void CommandBuffer::copyImage(const Image* srcImage, VkImageLayout srcImageLayout, Image* dstImage, VkImageLayout dstImageLayout,
-	uint32_t regionCount, const VkImageCopy* pRegions)
+void CommandBuffer::copyImage(const Image *srcImage, VkImageLayout srcImageLayout, Image *dstImage, VkImageLayout dstImageLayout,
+                              uint32_t regionCount, const VkImageCopy *pRegions)
 {
 	ASSERT(state == RECORDING);
 	ASSERT(srcImageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL ||
@@ -1511,8 +1587,8 @@
 	}
 }
 
-void CommandBuffer::blitImage(const Image* srcImage, VkImageLayout srcImageLayout, Image* dstImage, VkImageLayout dstImageLayout,
-	uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter)
+void CommandBuffer::blitImage(const Image *srcImage, VkImageLayout srcImageLayout, Image *dstImage, VkImageLayout dstImageLayout,
+                              uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter)
 {
 	ASSERT(state == RECORDING);
 	ASSERT(srcImageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL ||
@@ -1526,8 +1602,8 @@
 	}
 }
 
-void CommandBuffer::copyBufferToImage(Buffer* srcBuffer, Image* dstImage, VkImageLayout dstImageLayout,
-	uint32_t regionCount, const VkBufferImageCopy* pRegions)
+void CommandBuffer::copyBufferToImage(Buffer *srcBuffer, Image *dstImage, VkImageLayout dstImageLayout,
+                                      uint32_t regionCount, const VkBufferImageCopy *pRegions)
 {
 	ASSERT(state == RECORDING);
 
@@ -1537,8 +1613,8 @@
 	}
 }
 
-void CommandBuffer::copyImageToBuffer(Image* srcImage, VkImageLayout srcImageLayout, Buffer* dstBuffer,
-	uint32_t regionCount, const VkBufferImageCopy* pRegions)
+void CommandBuffer::copyImageToBuffer(Image *srcImage, VkImageLayout srcImageLayout, Buffer *dstBuffer,
+                                      uint32_t regionCount, const VkBufferImageCopy *pRegions)
 {
 	ASSERT(state == RECORDING);
 	ASSERT(srcImageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL || srcImageLayout == VK_IMAGE_LAYOUT_GENERAL);
@@ -1549,22 +1625,22 @@
 	}
 }
 
-void CommandBuffer::updateBuffer(Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData)
+void CommandBuffer::updateBuffer(Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData)
 {
 	ASSERT(state == RECORDING);
 
-	addCommand<::CmdUpdateBuffer>(dstBuffer, dstOffset, dataSize, reinterpret_cast<const uint8_t*>(pData));
+	addCommand<::CmdUpdateBuffer>(dstBuffer, dstOffset, dataSize, reinterpret_cast<const uint8_t *>(pData));
 }
 
-void CommandBuffer::fillBuffer(Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data)
+void CommandBuffer::fillBuffer(Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data)
 {
 	ASSERT(state == RECORDING);
 
 	addCommand<::CmdFillBuffer>(dstBuffer, dstOffset, size, data);
 }
 
-void CommandBuffer::clearColorImage(Image* image, VkImageLayout imageLayout, const VkClearColorValue* pColor,
-	uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
+void CommandBuffer::clearColorImage(Image *image, VkImageLayout imageLayout, const VkClearColorValue *pColor,
+                                    uint32_t rangeCount, const VkImageSubresourceRange *pRanges)
 {
 	ASSERT(state == RECORDING);
 
@@ -1574,8 +1650,8 @@
 	}
 }
 
-void CommandBuffer::clearDepthStencilImage(Image* image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil,
-	uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
+void CommandBuffer::clearDepthStencilImage(Image *image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pDepthStencil,
+                                           uint32_t rangeCount, const VkImageSubresourceRange *pRanges)
 {
 	ASSERT(state == RECORDING);
 
@@ -1585,8 +1661,8 @@
 	}
 }
 
-void CommandBuffer::clearAttachments(uint32_t attachmentCount, const VkClearAttachment* pAttachments,
-	uint32_t rectCount, const VkClearRect* pRects)
+void CommandBuffer::clearAttachments(uint32_t attachmentCount, const VkClearAttachment *pAttachments,
+                                     uint32_t rectCount, const VkClearRect *pRects)
 {
 	ASSERT(state == RECORDING);
 
@@ -1599,8 +1675,8 @@
 	}
 }
 
-void CommandBuffer::resolveImage(const Image* srcImage, VkImageLayout srcImageLayout, Image* dstImage, VkImageLayout dstImageLayout,
-	uint32_t regionCount, const VkImageResolve* pRegions)
+void CommandBuffer::resolveImage(const Image *srcImage, VkImageLayout srcImageLayout, Image *dstImage, VkImageLayout dstImageLayout,
+                                 uint32_t regionCount, const VkImageResolve *pRegions)
 {
 	ASSERT(state == RECORDING);
 	ASSERT(srcImageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL ||
@@ -1614,24 +1690,24 @@
 	}
 }
 
-void CommandBuffer::setEvent(Event* event, VkPipelineStageFlags stageMask)
+void CommandBuffer::setEvent(Event *event, VkPipelineStageFlags stageMask)
 {
 	ASSERT(state == RECORDING);
 
 	addCommand<::CmdSignalEvent>(event, stageMask);
 }
 
-void CommandBuffer::resetEvent(Event* event, VkPipelineStageFlags stageMask)
+void CommandBuffer::resetEvent(Event *event, VkPipelineStageFlags stageMask)
 {
 	ASSERT(state == RECORDING);
 
 	addCommand<::CmdResetEvent>(event, stageMask);
 }
 
-void CommandBuffer::waitEvents(uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask,
-	VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
-	uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
-	uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers)
+void CommandBuffer::waitEvents(uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
+                               VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+                               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+                               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
 {
 	ASSERT(state == RECORDING);
 
@@ -1654,22 +1730,22 @@
 	addCommand<::CmdDrawIndexed>(indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
 }
 
-void CommandBuffer::drawIndirect(Buffer* buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
+void CommandBuffer::drawIndirect(Buffer *buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
 {
 	addCommand<::CmdDrawIndirect>(buffer, offset, drawCount, stride);
 }
 
-void CommandBuffer::drawIndexedIndirect(Buffer* buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
+void CommandBuffer::drawIndexedIndirect(Buffer *buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
 {
 	addCommand<::CmdDrawIndexedIndirect>(buffer, offset, drawCount, stride);
 }
 
-void CommandBuffer::submit(CommandBuffer::ExecutionState& executionState)
+void CommandBuffer::submit(CommandBuffer::ExecutionState &executionState)
 {
 	// Perform recorded work
 	state = PENDING;
 
-	for(auto& command : *commands)
+	for(auto &command : *commands)
 	{
 		command->play(executionState);
 	}
@@ -1678,15 +1754,15 @@
 	state = EXECUTABLE;
 }
 
-void CommandBuffer::submitSecondary(CommandBuffer::ExecutionState& executionState) const
+void CommandBuffer::submitSecondary(CommandBuffer::ExecutionState &executionState) const
 {
-	for(auto& command : *commands)
+	for(auto &command : *commands)
 	{
 		command->play(executionState);
 	}
 }
 
-void CommandBuffer::ExecutionState::bindVertexInputs(sw::Context& context, int firstInstance)
+void CommandBuffer::ExecutionState::bindVertexInputs(sw::Context &context, int firstInstance)
 {
 	for(uint32_t i = 0; i < MAX_VERTEX_INPUT_BINDINGS; i++)
 	{
@@ -1704,14 +1780,14 @@
 	}
 }
 
-void CommandBuffer::ExecutionState::bindAttachments(sw::Context& context)
+void CommandBuffer::ExecutionState::bindAttachments(sw::Context &context)
 {
 	// Binds all the attachments for the current subpass
 	// Ideally this would be performed by BeginRenderPass and NextSubpass, but
 	// there is too much stomping of the renderer's state by setContext() in
 	// draws.
 
-	auto const & subpass = renderPass->getSubpass(subpassIndex);
+	auto const &subpass = renderPass->getSubpass(subpassIndex);
 
 	for(auto i = 0u; i < subpass.colorAttachmentCount; i++)
 	{
diff --git a/src/Vulkan/VkCommandBuffer.hpp b/src/Vulkan/VkCommandBuffer.hpp
index d4d3ec1..497e8d1 100644
--- a/src/Vulkan/VkCommandBuffer.hpp
+++ b/src/Vulkan/VkCommandBuffer.hpp
@@ -16,8 +16,8 @@
 #define VK_COMMAND_BUFFER_HPP_
 
 #include "VkConfig.h"
-#include "VkObject.hpp"
 #include "VkDescriptorSet.hpp"
+#include "VkObject.hpp"
 #include "Device/Color.hpp"
 #include "Device/Context.hpp"
 #include <memory>
@@ -49,46 +49,46 @@
 
 	CommandBuffer(VkCommandBufferLevel pLevel);
 
-	static inline CommandBuffer* Cast(VkCommandBuffer object)
+	static inline CommandBuffer *Cast(VkCommandBuffer object)
 	{
-		return reinterpret_cast<CommandBuffer*>(object);
+		return reinterpret_cast<CommandBuffer *>(object);
 	}
 
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	VkResult begin(VkCommandBufferUsageFlags flags, const VkCommandBufferInheritanceInfo* pInheritanceInfo);
+	VkResult begin(VkCommandBufferUsageFlags flags, const VkCommandBufferInheritanceInfo *pInheritanceInfo);
 	VkResult end();
 	VkResult reset(VkCommandPoolResetFlags flags);
 
-	void beginRenderPass(RenderPass* renderPass, Framebuffer* framebuffer, VkRect2D renderArea,
-	                     uint32_t clearValueCount, const VkClearValue* pClearValues, VkSubpassContents contents);
+	void beginRenderPass(RenderPass *renderPass, Framebuffer *framebuffer, VkRect2D renderArea,
+	                     uint32_t clearValueCount, const VkClearValue *pClearValues, VkSubpassContents contents);
 	void nextSubpass(VkSubpassContents contents);
 	void endRenderPass();
-	void executeCommands(uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+	void executeCommands(uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers);
 
 	void setDeviceMask(uint32_t deviceMask);
 	void dispatchBase(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
 	                  uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
 
 	void pipelineBarrier(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
-	                     uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
-	                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
-	                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-	void bindPipeline(VkPipelineBindPoint pipelineBindPoint, Pipeline* pipeline);
+	                     uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+	                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+	                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers);
+	void bindPipeline(VkPipelineBindPoint pipelineBindPoint, Pipeline *pipeline);
 	void bindVertexBuffers(uint32_t firstBinding, uint32_t bindingCount,
-	                       const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+	                       const VkBuffer *pBuffers, const VkDeviceSize *pOffsets);
 
-	void beginQuery(QueryPool* queryPool, uint32_t query, VkQueryControlFlags flags);
-	void endQuery(QueryPool* queryPool, uint32_t query);
-	void resetQueryPool(QueryPool* queryPool, uint32_t firstQuery, uint32_t queryCount);
-	void writeTimestamp(VkPipelineStageFlagBits pipelineStage, QueryPool* queryPool, uint32_t query);
-	void copyQueryPoolResults(const QueryPool* queryPool, uint32_t firstQuery, uint32_t queryCount,
-	                          Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
-	void pushConstants(PipelineLayout* layout, VkShaderStageFlags stageFlags,
-	                   uint32_t offset, uint32_t size, const void* pValues);
+	void beginQuery(QueryPool *queryPool, uint32_t query, VkQueryControlFlags flags);
+	void endQuery(QueryPool *queryPool, uint32_t query);
+	void resetQueryPool(QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount);
+	void writeTimestamp(VkPipelineStageFlagBits pipelineStage, QueryPool *queryPool, uint32_t query);
+	void copyQueryPoolResults(const QueryPool *queryPool, uint32_t firstQuery, uint32_t queryCount,
+	                          Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags);
+	void pushConstants(PipelineLayout *layout, VkShaderStageFlags stageFlags,
+	                   uint32_t offset, uint32_t size, const void *pValues);
 
-	void setViewport(uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports);
-	void setScissor(uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors);
+	void setViewport(uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports);
+	void setScissor(uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors);
 	void setLineWidth(float lineWidth);
 	void setDepthBias(float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor);
 	void setBlendConstants(const float blendConstants[4]);
@@ -96,42 +96,42 @@
 	void setStencilCompareMask(VkStencilFaceFlags faceMask, uint32_t compareMask);
 	void setStencilWriteMask(VkStencilFaceFlags faceMask, uint32_t writeMask);
 	void setStencilReference(VkStencilFaceFlags faceMask, uint32_t reference);
-	void bindDescriptorSets(VkPipelineBindPoint pipelineBindPoint, const PipelineLayout* layout,
-		uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets,
-		uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets);
-	void bindIndexBuffer(Buffer* buffer, VkDeviceSize offset, VkIndexType indexType);
+	void bindDescriptorSets(VkPipelineBindPoint pipelineBindPoint, const PipelineLayout *layout,
+	                        uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets,
+	                        uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets);
+	void bindIndexBuffer(Buffer *buffer, VkDeviceSize offset, VkIndexType indexType);
 	void dispatch(uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
-	void dispatchIndirect(Buffer* buffer, VkDeviceSize offset);
-	void copyBuffer(const Buffer* srcBuffer, Buffer* dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);
-	void copyImage(const Image* srcImage, VkImageLayout srcImageLayout, Image* dstImage, VkImageLayout dstImageLayout,
-		uint32_t regionCount, const VkImageCopy* pRegions);
-	void blitImage(const Image* srcImage, VkImageLayout srcImageLayout, Image* dstImage, VkImageLayout dstImageLayout,
-		uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter);
-	void copyBufferToImage(Buffer* srcBuffer, Image* dstImage, VkImageLayout dstImageLayout,
-		uint32_t regionCount, const VkBufferImageCopy* pRegions);
-	void copyImageToBuffer(Image* srcImage, VkImageLayout srcImageLayout, Buffer* dstBuffer,
-		uint32_t regionCount, const VkBufferImageCopy* pRegions);
-	void updateBuffer(Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData);
-	void fillBuffer(Buffer* dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);
-	void clearColorImage(Image* image, VkImageLayout imageLayout, const VkClearColorValue* pColor,
-		uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
-	void clearDepthStencilImage(Image* image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil,
-		uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
-	void clearAttachments(uint32_t attachmentCount, const VkClearAttachment* pAttachments,
-		uint32_t rectCount, const VkClearRect* pRects);
-	void resolveImage(const Image* srcImage, VkImageLayout srcImageLayout, Image* dstImage, VkImageLayout dstImageLayout,
-		uint32_t regionCount, const VkImageResolve* pRegions);
-	void setEvent(Event* event, VkPipelineStageFlags stageMask);
-	void resetEvent(Event* event, VkPipelineStageFlags stageMask);
-	void waitEvents(uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask,
-		VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
-		uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
-		uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+	void dispatchIndirect(Buffer *buffer, VkDeviceSize offset);
+	void copyBuffer(const Buffer *srcBuffer, Buffer *dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions);
+	void copyImage(const Image *srcImage, VkImageLayout srcImageLayout, Image *dstImage, VkImageLayout dstImageLayout,
+	               uint32_t regionCount, const VkImageCopy *pRegions);
+	void blitImage(const Image *srcImage, VkImageLayout srcImageLayout, Image *dstImage, VkImageLayout dstImageLayout,
+	               uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter);
+	void copyBufferToImage(Buffer *srcBuffer, Image *dstImage, VkImageLayout dstImageLayout,
+	                       uint32_t regionCount, const VkBufferImageCopy *pRegions);
+	void copyImageToBuffer(Image *srcImage, VkImageLayout srcImageLayout, Buffer *dstBuffer,
+	                       uint32_t regionCount, const VkBufferImageCopy *pRegions);
+	void updateBuffer(Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData);
+	void fillBuffer(Buffer *dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data);
+	void clearColorImage(Image *image, VkImageLayout imageLayout, const VkClearColorValue *pColor,
+	                     uint32_t rangeCount, const VkImageSubresourceRange *pRanges);
+	void clearDepthStencilImage(Image *image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pDepthStencil,
+	                            uint32_t rangeCount, const VkImageSubresourceRange *pRanges);
+	void clearAttachments(uint32_t attachmentCount, const VkClearAttachment *pAttachments,
+	                      uint32_t rectCount, const VkClearRect *pRects);
+	void resolveImage(const Image *srcImage, VkImageLayout srcImageLayout, Image *dstImage, VkImageLayout dstImageLayout,
+	                  uint32_t regionCount, const VkImageResolve *pRegions);
+	void setEvent(Event *event, VkPipelineStageFlags stageMask);
+	void resetEvent(Event *event, VkPipelineStageFlags stageMask);
+	void waitEvents(uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
+	                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+	                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+	                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers);
 
 	void draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
 	void drawIndexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
-	void drawIndirect(Buffer* buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
-	void drawIndexedIndirect(Buffer* buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+	void drawIndirect(Buffer *buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
+	void drawIndexedIndirect(Buffer *buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride);
 
 	// TODO(sugoi): Move ExecutionState out of CommandBuffer (possibly into Device)
 	struct ExecutionState
@@ -143,10 +143,10 @@
 			vk::DescriptorSet::DynamicOffsets descriptorDynamicOffsets = {};
 		};
 
-		sw::Renderer* renderer = nullptr;
-		sw::TaskEvents* events = nullptr;
-		RenderPass* renderPass = nullptr;
-		Framebuffer* renderPassFramebuffer = nullptr;
+		sw::Renderer *renderer = nullptr;
+		sw::TaskEvents *events = nullptr;
+		RenderPass *renderPass = nullptr;
+		Framebuffer *renderPassFramebuffer = nullptr;
 		std::array<PipelineState, VK_PIPELINE_BIND_POINT_RANGE_SIZE> pipelineState;
 
 		struct DynamicState
@@ -170,7 +170,7 @@
 
 		struct VertexInputBinding
 		{
-			Buffer* buffer;
+			Buffer *buffer;
 			VkDeviceSize offset;
 		};
 		VertexInputBinding vertexInputBindings[MAX_VERTEX_INPUT_BINDINGS] = {};
@@ -179,33 +179,42 @@
 
 		uint32_t subpassIndex = 0;
 
-		void bindAttachments(sw::Context& context);
-		void bindVertexInputs(sw::Context& context, int firstInstance);
+		void bindAttachments(sw::Context &context);
+		void bindVertexInputs(sw::Context &context, int firstInstance);
 	};
 
-	void submit(CommandBuffer::ExecutionState& executionState);
-	void submitSecondary(CommandBuffer::ExecutionState& executionState) const;
+	void submit(CommandBuffer::ExecutionState &executionState);
+	void submitSecondary(CommandBuffer::ExecutionState &executionState) const;
 
 	class Command;
+
 private:
 	void resetState();
-	template<typename T, typename... Args> void addCommand(Args&&... args);
+	template<typename T, typename... Args>
+	void addCommand(Args &&... args);
 
-	enum State { INITIAL, RECORDING, EXECUTABLE, PENDING, INVALID };
+	enum State
+	{
+		INITIAL,
+		RECORDING,
+		EXECUTABLE,
+		PENDING,
+		INVALID
+	};
 	State state = INITIAL;
 	VkCommandBufferLevel level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
 
 	// FIXME (b/119409619): replace this vector by an allocator so we can control all memory allocations
-	std::vector<std::unique_ptr<Command>>* commands;
+	std::vector<std::unique_ptr<Command>> *commands;
 };
 
 using DispatchableCommandBuffer = DispatchableObject<CommandBuffer, VkCommandBuffer>;
 
-static inline CommandBuffer* Cast(VkCommandBuffer object)
+static inline CommandBuffer *Cast(VkCommandBuffer object)
 {
 	return DispatchableCommandBuffer::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_COMMAND_BUFFER_HPP_
+#endif  // VK_COMMAND_BUFFER_HPP_
diff --git a/src/Vulkan/VkCommandPool.cpp b/src/Vulkan/VkCommandPool.cpp
index 9cb1603..35ec438 100644
--- a/src/Vulkan/VkCommandPool.cpp
+++ b/src/Vulkan/VkCommandPool.cpp
@@ -20,16 +20,16 @@
 
 namespace vk {
 
-CommandPool::CommandPool(const VkCommandPoolCreateInfo* pCreateInfo, void* mem)
+CommandPool::CommandPool(const VkCommandPoolCreateInfo *pCreateInfo, void *mem)
 {
 	// FIXME (b/119409619): use an allocator here so we can control all memory allocations
-	void* deviceMemory = vk::allocate(sizeof(std::set<VkCommandBuffer>), REQUIRED_MEMORY_ALIGNMENT,
+	void *deviceMemory = vk::allocate(sizeof(std::set<VkCommandBuffer>), REQUIRED_MEMORY_ALIGNMENT,
 	                                  DEVICE_MEMORY, GetAllocationScope());
 	ASSERT(deviceMemory);
-	commandBuffers = new (deviceMemory) std::set<VkCommandBuffer>();
+	commandBuffers = new(deviceMemory) std::set<VkCommandBuffer>();
 }
 
-void CommandPool::destroy(const VkAllocationCallbacks* pAllocator)
+void CommandPool::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	// Free command Buffers allocated in allocateCommandBuffers
 	for(auto commandBuffer : *commandBuffers)
@@ -41,20 +41,20 @@
 	vk::deallocate(commandBuffers, DEVICE_MEMORY);
 }
 
-size_t CommandPool::ComputeRequiredAllocationSize(const VkCommandPoolCreateInfo* pCreateInfo)
+size_t CommandPool::ComputeRequiredAllocationSize(const VkCommandPoolCreateInfo *pCreateInfo)
 {
 	return 0;
 }
 
-VkResult CommandPool::allocateCommandBuffers(VkCommandBufferLevel level, uint32_t commandBufferCount, VkCommandBuffer* pCommandBuffers)
+VkResult CommandPool::allocateCommandBuffers(VkCommandBufferLevel level, uint32_t commandBufferCount, VkCommandBuffer *pCommandBuffers)
 {
 	for(uint32_t i = 0; i < commandBufferCount; i++)
 	{
 		// FIXME (b/119409619): use an allocator here so we can control all memory allocations
-		void* deviceMemory = vk::allocate(sizeof(DispatchableCommandBuffer), REQUIRED_MEMORY_ALIGNMENT,
+		void *deviceMemory = vk::allocate(sizeof(DispatchableCommandBuffer), REQUIRED_MEMORY_ALIGNMENT,
 		                                  DEVICE_MEMORY, DispatchableCommandBuffer::GetAllocationScope());
 		ASSERT(deviceMemory);
-		DispatchableCommandBuffer* commandBuffer = new (deviceMemory) DispatchableCommandBuffer(level);
+		DispatchableCommandBuffer *commandBuffer = new(deviceMemory) DispatchableCommandBuffer(level);
 		if(commandBuffer)
 		{
 			pCommandBuffers[i] = *commandBuffer;
@@ -78,7 +78,7 @@
 	return VK_SUCCESS;
 }
 
-void CommandPool::freeCommandBuffers(uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers)
+void CommandPool::freeCommandBuffers(uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers)
 {
 	for(uint32_t i = 0; i < commandBufferCount; ++i)
 	{
diff --git a/src/Vulkan/VkCommandPool.hpp b/src/Vulkan/VkCommandPool.hpp
index 0f4c130..f4f99c2 100644
--- a/src/Vulkan/VkCommandPool.hpp
+++ b/src/Vulkan/VkCommandPool.hpp
@@ -23,25 +23,25 @@
 class CommandPool : public Object<CommandPool, VkCommandPool>
 {
 public:
-	CommandPool(const VkCommandPoolCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	CommandPool(const VkCommandPoolCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkCommandPoolCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkCommandPoolCreateInfo *pCreateInfo);
 
-	VkResult allocateCommandBuffers(VkCommandBufferLevel level, uint32_t commandBufferCount, VkCommandBuffer* pCommandBuffers);
-	void freeCommandBuffers(uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
+	VkResult allocateCommandBuffers(VkCommandBufferLevel level, uint32_t commandBufferCount, VkCommandBuffer *pCommandBuffers);
+	void freeCommandBuffers(uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers);
 	VkResult reset(VkCommandPoolResetFlags flags);
 	void trim(VkCommandPoolTrimFlags flags);
 
 private:
-	std::set<VkCommandBuffer>* commandBuffers;
+	std::set<VkCommandBuffer> *commandBuffers;
 };
 
-static inline CommandPool* Cast(VkCommandPool object)
+static inline CommandPool *Cast(VkCommandPool object)
 {
 	return CommandPool::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_COMMAND_POOL_HPP_
+#endif  // VK_COMMAND_POOL_HPP_
diff --git a/src/Vulkan/VkDebug.cpp b/src/Vulkan/VkDebug.cpp
index 02213da..ba4a5f8 100644
--- a/src/Vulkan/VkDebug.cpp
+++ b/src/Vulkan/VkDebug.cpp
@@ -14,19 +14,19 @@
 
 #include "VkDebug.hpp"
 
-#include <string>
-#include <atomic>
 #include <stdarg.h>
+#include <atomic>
+#include <string>
 
 #if defined(__unix__)
-#define PTRACE
-#include <sys/types.h>
-#include <sys/ptrace.h>
+#	define PTRACE
+#	include <sys/ptrace.h>
+#	include <sys/types.h>
 #elif defined(_WIN32) || defined(_WIN64)
-#include <windows.h>
+#	include <windows.h>
 #elif defined(__APPLE__) || defined(__MACH__)
-#include <unistd.h>
-#include <sys/sysctl.h>
+#	include <sys/sysctl.h>
+#	include <unistd.h>
 #endif
 
 namespace {
@@ -140,7 +140,7 @@
 
 void trace_assert(const char *format, ...)
 {
-	static std::atomic<bool> asserted = {false};
+	static std::atomic<bool> asserted = { false };
 	va_list vararg;
 	va_start(vararg, format);
 
diff --git a/src/Vulkan/VkDebug.hpp b/src/Vulkan/VkDebug.hpp
index 40462ed..b0287f8 100644
--- a/src/Vulkan/VkDebug.hpp
+++ b/src/Vulkan/VkDebug.hpp
@@ -17,19 +17,19 @@
 #ifndef VK_DEBUG_H_
 #define VK_DEBUG_H_
 
-#include <stdlib.h>
 #include <assert.h>
 #include <stdio.h>
+#include <stdlib.h>
 #include <string>
 
 #if !defined(TRACE_OUTPUT_FILE)
-#define TRACE_OUTPUT_FILE "debug.txt"
+#	define TRACE_OUTPUT_FILE "debug.txt"
 #endif
 
 #if defined(__GNUC__) || defined(__clang__)
-#define CHECK_PRINTF_ARGS __attribute__((format(printf, 1, 2)))
+#	define CHECK_PRINTF_ARGS __attribute__((format(printf, 1, 2)))
 #else
-#define CHECK_PRINTF_ARGS
+#	define CHECK_PRINTF_ARGS
 #endif
 
 namespace vk {
@@ -53,11 +53,11 @@
 // A macro to output a trace of a function call and its arguments to the
 // debugging log. Disabled if SWIFTSHADER_DISABLE_TRACE is defined.
 #if defined(SWIFTSHADER_DISABLE_TRACE)
-#define TRACE(message, ...) (void(0))
-#define TRACE_ASSERT(message, ...) (void(0))
+#	define TRACE(message, ...) (void(0))
+#	define TRACE_ASSERT(message, ...) (void(0))
 #else
-#define TRACE(message, ...) vk::trace("%s:%d TRACE: " message "\n", __FILE__, __LINE__, ##__VA_ARGS__)
-#define TRACE_ASSERT(message, ...) vk::trace_assert("%s:%d %s TRACE_ASSERT: " message "\n", __FILE__, __LINE__, __func__, ##__VA_ARGS__)
+#	define TRACE(message, ...) vk::trace("%s:%d TRACE: " message "\n", __FILE__, __LINE__, ##__VA_ARGS__)
+#	define TRACE_ASSERT(message, ...) vk::trace_assert("%s:%d %s TRACE_ASSERT: " message "\n", __FILE__, __LINE__, __func__, ##__VA_ARGS__)
 #endif
 
 // A macro to print a warning message to the debugging log and stderr to denote
@@ -81,26 +81,34 @@
 //   WARN() in release builds (NDEBUG && !DCHECK_ALWAYS_ON)
 #undef DABORT
 #if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-#define DABORT(message, ...) ABORT(message, ##__VA_ARGS__)
+#	define DABORT(message, ...) ABORT(message, ##__VA_ARGS__)
 #else
-#define DABORT(message, ...) WARN(message, ##__VA_ARGS__)
+#	define DABORT(message, ...) WARN(message, ##__VA_ARGS__)
 #endif
 
 // A macro asserting a condition.
 // If the condition fails, the condition and message is passed to DABORT().
 #undef ASSERT_MSG
-#define ASSERT_MSG(expression, format, ...) do { \
-	if(!(expression)) { \
-		DABORT("ASSERT(%s): " format "\n", #expression, ##__VA_ARGS__); \
-	} } while(0)
+#define ASSERT_MSG(expression, format, ...)                                 \
+	do                                                                      \
+	{                                                                       \
+		if(!(expression))                                                   \
+		{                                                                   \
+			DABORT("ASSERT(%s): " format "\n", #expression, ##__VA_ARGS__); \
+		}                                                                   \
+	} while(0)
 
 // A macro asserting a condition.
 // If the condition fails, the condition is passed to DABORT().
 #undef ASSERT
-#define ASSERT(expression) do { \
-	if(!(expression)) { \
-		DABORT("ASSERT(%s)\n", #expression); \
-	} } while(0)
+#define ASSERT(expression)                       \
+	do                                           \
+	{                                            \
+		if(!(expression))                        \
+		{                                        \
+			DABORT("ASSERT(%s)\n", #expression); \
+		}                                        \
+	} while(0)
 
 // A macro to indicate functionality currently unimplemented for a feature
 // advertised as supported. For unsupported features not advertised as supported
@@ -124,12 +132,16 @@
 // A macro asserting a condition and performing a return.
 #undef ASSERT_OR_RETURN
 #if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-#define ASSERT_OR_RETURN(expression) ASSERT(expression)
+#	define ASSERT_OR_RETURN(expression) ASSERT(expression)
 #else
-#define ASSERT_OR_RETURN(expression) do { \
-	if(!(expression)) { \
-		return; \
-	} } while(0)
+#	define ASSERT_OR_RETURN(expression) \
+		do                               \
+		{                                \
+			if(!(expression))            \
+			{                            \
+				return;                  \
+			}                            \
+		} while(0)
 #endif
 
-#endif   // VK_DEBUG_H_
+#endif  // VK_DEBUG_H_
diff --git a/src/Vulkan/VkDescriptorPool.cpp b/src/Vulkan/VkDescriptorPool.cpp
index 3c9b2dd..02754e7c 100644
--- a/src/Vulkan/VkDescriptorPool.cpp
+++ b/src/Vulkan/VkDescriptorPool.cpp
@@ -20,35 +20,34 @@
 #include <algorithm>
 #include <memory>
 
-namespace
-{
+namespace {
 
-inline VkDescriptorSet asDescriptorSet(uint8_t* memory)
+inline VkDescriptorSet asDescriptorSet(uint8_t *memory)
 {
-	return vk::TtoVkT<vk::DescriptorSet, VkDescriptorSet>(reinterpret_cast<vk::DescriptorSet*>(memory));
+	return vk::TtoVkT<vk::DescriptorSet, VkDescriptorSet>(reinterpret_cast<vk::DescriptorSet *>(memory));
 }
 
-inline uint8_t* asMemory(VkDescriptorSet descriptorSet)
+inline uint8_t *asMemory(VkDescriptorSet descriptorSet)
 {
-	return reinterpret_cast<uint8_t*>(vk::Cast(descriptorSet));
+	return reinterpret_cast<uint8_t *>(vk::Cast(descriptorSet));
 }
 
 }  // anonymous namespace
 
 namespace vk {
 
-DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem) :
-	pool(static_cast<uint8_t*>(mem)),
-	poolSize(ComputeRequiredAllocationSize(pCreateInfo))
+DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo *pCreateInfo, void *mem)
+    : pool(static_cast<uint8_t *>(mem))
+    , poolSize(ComputeRequiredAllocationSize(pCreateInfo))
 {
 }
 
-void DescriptorPool::destroy(const VkAllocationCallbacks* pAllocator)
+void DescriptorPool::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	vk::deallocate(pool, pAllocator);
 }
 
-size_t DescriptorPool::ComputeRequiredAllocationSize(const VkDescriptorPoolCreateInfo* pCreateInfo)
+size_t DescriptorPool::ComputeRequiredAllocationSize(const VkDescriptorPoolCreateInfo *pCreateInfo)
 {
 	size_t size = pCreateInfo->maxSets * sw::align(sizeof(DescriptorSetHeader), 16);
 
@@ -61,7 +60,7 @@
 	return size;
 }
 
-VkResult DescriptorPool::allocateSets(uint32_t descriptorSetCount, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets)
+VkResult DescriptorPool::allocateSets(uint32_t descriptorSetCount, const VkDescriptorSetLayout *pSetLayouts, VkDescriptorSet *pDescriptorSets)
 {
 	// FIXME (b/119409619): use an allocator here so we can control all memory allocations
 	std::unique_ptr<size_t[]> layoutSizes(new size_t[descriptorSetCount]);
@@ -82,7 +81,7 @@
 	return result;
 }
 
-uint8_t* DescriptorPool::findAvailableMemory(size_t size)
+uint8_t *DescriptorPool::findAvailableMemory(size_t size)
 {
 	if(nodes.empty())
 	{
@@ -113,7 +112,7 @@
 	++nextIt;
 	for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt)
 	{
-		uint8_t* freeSpaceStart = it->set + it->size;
+		uint8_t *freeSpaceStart = it->set + it->size;
 		freeSpace = nextIt->set - freeSpaceStart;
 		if(freeSpace >= size)
 		{
@@ -124,7 +123,7 @@
 	return nullptr;
 }
 
-VkResult DescriptorPool::allocateSets(size_t* sizes, uint32_t numAllocs, VkDescriptorSet* pDescriptorSets)
+VkResult DescriptorPool::allocateSets(size_t *sizes, uint32_t numAllocs, VkDescriptorSet *pDescriptorSets)
 {
 	size_t totalSize = 0;
 	for(uint32_t i = 0; i < numAllocs; i++)
@@ -139,7 +138,7 @@
 
 	// Attempt to allocate single chunk of memory
 	{
-		uint8_t* memory = findAvailableMemory(totalSize);
+		uint8_t *memory = findAvailableMemory(totalSize);
 		if(memory)
 		{
 			for(uint32_t i = 0; i < numAllocs; i++)
@@ -156,7 +155,7 @@
 	// Atttempt to allocate each descriptor set separately
 	for(uint32_t i = 0; i < numAllocs; i++)
 	{
-		uint8_t* memory = findAvailableMemory(sizes[i]);
+		uint8_t *memory = findAvailableMemory(sizes[i]);
 		if(memory)
 		{
 			pDescriptorSets[i] = asDescriptorSet(memory);
@@ -180,7 +179,7 @@
 	return VK_SUCCESS;
 }
 
-void DescriptorPool::freeSets(uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets)
+void DescriptorPool::freeSets(uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets)
 {
 	for(uint32_t i = 0; i < descriptorSetCount; i++)
 	{
diff --git a/src/Vulkan/VkDescriptorPool.hpp b/src/Vulkan/VkDescriptorPool.hpp
index c222018..7428835 100644
--- a/src/Vulkan/VkDescriptorPool.hpp
+++ b/src/Vulkan/VkDescriptorPool.hpp
@@ -23,41 +23,44 @@
 class DescriptorPool : public Object<DescriptorPool, VkDescriptorPool>
 {
 public:
-	DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	DescriptorPool(const VkDescriptorPoolCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkDescriptorPoolCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkDescriptorPoolCreateInfo *pCreateInfo);
 
-	VkResult allocateSets(uint32_t descriptorSetCount, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets);
-	void freeSets(uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets);
+	VkResult allocateSets(uint32_t descriptorSetCount, const VkDescriptorSetLayout *pSetLayouts, VkDescriptorSet *pDescriptorSets);
+	void freeSets(uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets);
 	VkResult reset();
 
 private:
-	VkResult allocateSets(size_t* sizes, uint32_t numAllocs, VkDescriptorSet* pDescriptorSets);
-	uint8_t* findAvailableMemory(size_t size);
+	VkResult allocateSets(size_t *sizes, uint32_t numAllocs, VkDescriptorSet *pDescriptorSets);
+	uint8_t *findAvailableMemory(size_t size);
 	void freeSet(const VkDescriptorSet descriptorSet);
 	size_t computeTotalFreeSize() const;
 
 	struct Node
 	{
-		Node(uint8_t* set, size_t size) : set(set), size(size) {}
-		bool operator<(const Node& node) const { return set < node.set; }
-		bool operator==(const uint8_t* other) const { return set == other; }
+		Node(uint8_t *set, size_t size)
+		    : set(set)
+		    , size(size)
+		{}
+		bool operator<(const Node &node) const { return set < node.set; }
+		bool operator==(const uint8_t *other) const { return set == other; }
 
-		uint8_t* set = nullptr;
+		uint8_t *set = nullptr;
 		size_t size = 0;
 	};
 	std::set<Node> nodes;
 
-	uint8_t* pool = nullptr;
+	uint8_t *pool = nullptr;
 	size_t poolSize = 0;
 };
 
-static inline DescriptorPool* Cast(VkDescriptorPool object)
+static inline DescriptorPool *Cast(VkDescriptorPool object)
 {
 	return DescriptorPool::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_DESCRIPTOR_POOL_HPP_
+#endif  // VK_DESCRIPTOR_POOL_HPP_
diff --git a/src/Vulkan/VkDescriptorSet.hpp b/src/Vulkan/VkDescriptorSet.hpp
index 89486b5..606b0f1 100644
--- a/src/Vulkan/VkDescriptorSet.hpp
+++ b/src/Vulkan/VkDescriptorSet.hpp
@@ -26,29 +26,29 @@
 
 struct alignas(16) DescriptorSetHeader
 {
-	DescriptorSetLayout* layout;
+	DescriptorSetLayout *layout;
 };
 
 class alignas(16) DescriptorSet
 {
 public:
-	static inline DescriptorSet* Cast(VkDescriptorSet object)
+	static inline DescriptorSet *Cast(VkDescriptorSet object)
 	{
-		return static_cast<DescriptorSet*>(static_cast<void*>(object));
+		return static_cast<DescriptorSet *>(static_cast<void *>(object));
 	}
 
-	using Bindings = std::array<vk::DescriptorSet*, vk::MAX_BOUND_DESCRIPTOR_SETS>;
+	using Bindings = std::array<vk::DescriptorSet *, vk::MAX_BOUND_DESCRIPTOR_SETS>;
 	using DynamicOffsets = std::array<uint32_t, vk::MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC>;
 
 	DescriptorSetHeader header;
 	alignas(16) uint8_t data[1];
 };
 
-inline DescriptorSet* Cast(VkDescriptorSet object)
+inline DescriptorSet *Cast(VkDescriptorSet object)
 {
 	return DescriptorSet::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_DESCRIPTOR_SET_HPP_
+#endif  // VK_DESCRIPTOR_SET_HPP_
diff --git a/src/Vulkan/VkDescriptorSetLayout.cpp b/src/Vulkan/VkDescriptorSetLayout.cpp
index 8342b44..5e45d81 100644
--- a/src/Vulkan/VkDescriptorSetLayout.cpp
+++ b/src/Vulkan/VkDescriptorSetLayout.cpp
@@ -14,11 +14,11 @@
 
 #include "VkDescriptorSetLayout.hpp"
 
-#include "VkDescriptorSet.hpp"
-#include "VkSampler.hpp"
-#include "VkImageView.hpp"
 #include "VkBuffer.hpp"
 #include "VkBufferView.hpp"
+#include "VkDescriptorSet.hpp"
+#include "VkImageView.hpp"
+#include "VkSampler.hpp"
 #include "System/Types.hpp"
 
 #include <algorithm>
@@ -26,10 +26,10 @@
 
 namespace {
 
-static bool UsesImmutableSamplers(const VkDescriptorSetLayoutBinding& binding)
+static bool UsesImmutableSamplers(const VkDescriptorSetLayoutBinding &binding)
 {
 	return (((binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) ||
-	        (binding.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) &&
+	         (binding.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) &&
 	        (binding.pImmutableSamplers != nullptr));
 }
 
@@ -37,11 +37,13 @@
 
 namespace vk {
 
-DescriptorSetLayout::DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo* pCreateInfo, void* mem) :
-	flags(pCreateInfo->flags), bindingCount(pCreateInfo->bindingCount), bindings(reinterpret_cast<VkDescriptorSetLayoutBinding*>(mem))
+DescriptorSetLayout::DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo *pCreateInfo, void *mem)
+    : flags(pCreateInfo->flags)
+    , bindingCount(pCreateInfo->bindingCount)
+    , bindings(reinterpret_cast<VkDescriptorSetLayoutBinding *>(mem))
 {
-	uint8_t* hostMemory = static_cast<uint8_t*>(mem) + bindingCount * sizeof(VkDescriptorSetLayoutBinding);
-	bindingOffsets = reinterpret_cast<size_t*>(hostMemory);
+	uint8_t *hostMemory = static_cast<uint8_t *>(mem) + bindingCount * sizeof(VkDescriptorSetLayoutBinding);
+	bindingOffsets = reinterpret_cast<size_t *>(hostMemory);
 	hostMemory += bindingCount * sizeof(size_t);
 
 	size_t offset = 0;
@@ -51,9 +53,9 @@
 		if(UsesImmutableSamplers(bindings[i]))
 		{
 			size_t immutableSamplersSize = bindings[i].descriptorCount * sizeof(VkSampler);
-			bindings[i].pImmutableSamplers = reinterpret_cast<const VkSampler*>(hostMemory);
+			bindings[i].pImmutableSamplers = reinterpret_cast<const VkSampler *>(hostMemory);
 			hostMemory += immutableSamplersSize;
-			memcpy(const_cast<VkSampler*>(bindings[i].pImmutableSamplers),
+			memcpy(const_cast<VkSampler *>(bindings[i].pImmutableSamplers),
 			       pCreateInfo->pBindings[i].pImmutableSamplers,
 			       immutableSamplersSize);
 		}
@@ -67,12 +69,12 @@
 	ASSERT_MSG(offset == getDescriptorSetDataSize(), "offset: %d, size: %d", int(offset), int(getDescriptorSetDataSize()));
 }
 
-void DescriptorSetLayout::destroy(const VkAllocationCallbacks* pAllocator)
+void DescriptorSetLayout::destroy(const VkAllocationCallbacks *pAllocator)
 {
-	vk::deallocate(bindings, pAllocator); // This allocation also contains pImmutableSamplers
+	vk::deallocate(bindings, pAllocator);  // This allocation also contains pImmutableSamplers
 }
 
-size_t DescriptorSetLayout::ComputeRequiredAllocationSize(const VkDescriptorSetLayoutCreateInfo* pCreateInfo)
+size_t DescriptorSetLayout::ComputeRequiredAllocationSize(const VkDescriptorSetLayoutCreateInfo *pCreateInfo)
 {
 	size_t allocationSize = pCreateInfo->bindingCount * (sizeof(VkDescriptorSetLayoutBinding) + sizeof(size_t));
 
@@ -91,23 +93,23 @@
 {
 	switch(type)
 	{
-	case VK_DESCRIPTOR_TYPE_SAMPLER:
-	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-		return sizeof(SampledImageDescriptor);
-	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
-	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
-	case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
-		return sizeof(StorageImageDescriptor);
-	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
-	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
-	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
-	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
-		return sizeof(BufferDescriptor);
-	default:
-		UNIMPLEMENTED("Unsupported Descriptor Type");
-		return 0;
+		case VK_DESCRIPTOR_TYPE_SAMPLER:
+		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+			return sizeof(SampledImageDescriptor);
+		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+			return sizeof(StorageImageDescriptor);
+		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+			return sizeof(BufferDescriptor);
+		default:
+			UNIMPLEMENTED("Unsupported Descriptor Type");
+			return 0;
 	}
 }
 
@@ -142,11 +144,11 @@
 	return 0;
 }
 
-void DescriptorSetLayout::initialize(DescriptorSet* descriptorSet)
+void DescriptorSetLayout::initialize(DescriptorSet *descriptorSet)
 {
 	// Use a pointer to this descriptor set layout as the descriptor set's header
 	descriptorSet->header.layout = this;
-	uint8_t* mem = descriptorSet->data;
+	uint8_t *mem = descriptorSet->data;
 
 	for(uint32_t i = 0; i < bindingCount; i++)
 	{
@@ -155,7 +157,7 @@
 		{
 			for(uint32_t j = 0; j < bindings[i].descriptorCount; j++)
 			{
-				SampledImageDescriptor* imageSamplerDescriptor = reinterpret_cast<SampledImageDescriptor*>(mem);
+				SampledImageDescriptor *imageSamplerDescriptor = reinterpret_cast<SampledImageDescriptor *>(mem);
 				imageSamplerDescriptor->updateSampler(bindings[i].pImmutableSamplers[j]);
 				mem += typeSize;
 			}
@@ -200,7 +202,7 @@
 bool DescriptorSetLayout::isDynamic(VkDescriptorType type)
 {
 	return type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
-		   type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+	       type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
 }
 
 bool DescriptorSetLayout::isBindingDynamic(uint32_t binding) const
@@ -238,41 +240,41 @@
 	return index;
 }
 
-VkDescriptorSetLayoutBinding const & DescriptorSetLayout::getBindingLayout(uint32_t binding) const
+VkDescriptorSetLayoutBinding const &DescriptorSetLayout::getBindingLayout(uint32_t binding) const
 {
 	uint32_t index = getBindingIndex(binding);
 	return bindings[index];
 }
 
-uint8_t* DescriptorSetLayout::getOffsetPointer(DescriptorSet *descriptorSet, uint32_t binding, uint32_t arrayElement, uint32_t count, size_t* typeSize) const
+uint8_t *DescriptorSetLayout::getOffsetPointer(DescriptorSet *descriptorSet, uint32_t binding, uint32_t arrayElement, uint32_t count, size_t *typeSize) const
 {
 	uint32_t index = getBindingIndex(binding);
 	*typeSize = GetDescriptorSize(bindings[index].descriptorType);
 	size_t byteOffset = bindingOffsets[index] + (*typeSize * arrayElement);
-	ASSERT(((*typeSize * count) + byteOffset) <= getDescriptorSetDataSize()); // Make sure the operation will not go out of bounds
+	ASSERT(((*typeSize * count) + byteOffset) <= getDescriptorSetDataSize());  // Make sure the operation will not go out of bounds
 	return &descriptorSet->data[byteOffset];
 }
 
 void SampledImageDescriptor::updateSampler(const VkSampler newSampler)
 {
-	memcpy(reinterpret_cast<void*>(&sampler), vk::Cast(newSampler), sizeof(sampler));
+	memcpy(reinterpret_cast<void *>(&sampler), vk::Cast(newSampler), sizeof(sampler));
 }
 
-void DescriptorSetLayout::WriteDescriptorSet(Device* device, DescriptorSet *dstSet, VkDescriptorUpdateTemplateEntry const &entry, char const *src)
+void DescriptorSetLayout::WriteDescriptorSet(Device *device, DescriptorSet *dstSet, VkDescriptorUpdateTemplateEntry const &entry, char const *src)
 {
-	DescriptorSetLayout* dstLayout = dstSet->header.layout;
+	DescriptorSetLayout *dstLayout = dstSet->header.layout;
 	auto &binding = dstLayout->bindings[dstLayout->getBindingIndex(entry.dstBinding)];
 	ASSERT(dstLayout);
 	ASSERT(binding.descriptorType == entry.descriptorType);
 
 	size_t typeSize = 0;
-	uint8_t* memToWrite = dstLayout->getOffsetPointer(dstSet, entry.dstBinding, entry.dstArrayElement, entry.descriptorCount, &typeSize);
+	uint8_t *memToWrite = dstLayout->getOffsetPointer(dstSet, entry.dstBinding, entry.dstArrayElement, entry.descriptorCount, &typeSize);
 
 	ASSERT(reinterpret_cast<intptr_t>(memToWrite) % 16 == 0);  // Each descriptor must be 16-byte aligned.
 
 	if(entry.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
 	{
-		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor*>(memToWrite);
+		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor *>(memToWrite);
 
 		for(uint32_t i = 0; i < entry.descriptorCount; i++)
 		{
@@ -288,7 +290,7 @@
 	}
 	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
 	{
-		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor*>(memToWrite);
+		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor *>(memToWrite);
 
 		for(uint32_t i = 0; i < entry.descriptorCount; i++)
 		{
@@ -323,9 +325,9 @@
 		}
 	}
 	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
-	         entry.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)
+	        entry.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)
 	{
-		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor*>(memToWrite);
+		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor *>(memToWrite);
 
 		for(uint32_t i = 0; i < entry.descriptorCount; i++)
 		{
@@ -366,7 +368,7 @@
 				// different planes in the descriptor's mipmap levels instead.
 
 				const int level = 0;
-				VkOffset3D offset = {0, 0, 0};
+				VkOffset3D offset = { 0, 0, 0 };
 				texture->mipmap[0].buffer = imageView->getOffsetPointer(offset, VK_IMAGE_ASPECT_PLANE_0_BIT, level, 0, ImageView::SAMPLING);
 				texture->mipmap[1].buffer = imageView->getOffsetPointer(offset, VK_IMAGE_ASPECT_PLANE_1_BIT, level, 0, ImageView::SAMPLING);
 				if(format.getAspects() & VK_IMAGE_ASPECT_PLANE_2_BIT)
@@ -404,12 +406,12 @@
 					{
 						// Obtain the pointer to the corner of the level including the border, for seamless sampling.
 						// This is taken into account in the sampling routine, which can't handle negative texel coordinates.
-						VkOffset3D offset = {-1, -1, 0};
+						VkOffset3D offset = { -1, -1, 0 };
 						mipmap.buffer = imageView->getOffsetPointer(offset, aspect, level, 0, ImageView::SAMPLING);
 					}
 					else
 					{
-						VkOffset3D offset = {0, 0, 0};
+						VkOffset3D offset = { 0, 0, 0 };
 						mipmap.buffer = imageView->getOffsetPointer(offset, aspect, level, 0, ImageView::SAMPLING);
 					}
 
@@ -431,19 +433,19 @@
 		}
 	}
 	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
-	         entry.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
+	        entry.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
 	{
 		auto descriptor = reinterpret_cast<StorageImageDescriptor *>(memToWrite);
 		for(uint32_t i = 0; i < entry.descriptorCount; i++)
 		{
 			auto update = reinterpret_cast<VkDescriptorImageInfo const *>(src + entry.offset + entry.stride * i);
 			auto imageView = vk::Cast(update->imageView);
-			descriptor[i].ptr = imageView->getOffsetPointer({0, 0, 0}, VK_IMAGE_ASPECT_COLOR_BIT, 0, 0);
+			descriptor[i].ptr = imageView->getOffsetPointer({ 0, 0, 0 }, VK_IMAGE_ASPECT_COLOR_BIT, 0, 0);
 			descriptor[i].extent = imageView->getMipLevelExtent(0);
 			descriptor[i].rowPitchBytes = imageView->rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0);
 			descriptor[i].samplePitchBytes = imageView->getSubresourceRange().layerCount > 1
-											 ? imageView->layerPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT)
-											 : imageView->slicePitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0);
+			                                     ? imageView->layerPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT)
+			                                     : imageView->slicePitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0);
 			descriptor[i].slicePitchBytes = descriptor[i].samplePitchBytes * imageView->getSampleCount();
 			descriptor[i].arrayLayers = imageView->getSubresourceRange().layerCount;
 			descriptor[i].sampleCount = imageView->getSampleCount();
@@ -451,11 +453,11 @@
 
 			if(imageView->getFormat().isStencil())
 			{
-				descriptor[i].stencilPtr = imageView->getOffsetPointer({0, 0, 0}, VK_IMAGE_ASPECT_STENCIL_BIT, 0, 0);
+				descriptor[i].stencilPtr = imageView->getOffsetPointer({ 0, 0, 0 }, VK_IMAGE_ASPECT_STENCIL_BIT, 0, 0);
 				descriptor[i].stencilRowPitchBytes = imageView->rowPitchBytes(VK_IMAGE_ASPECT_STENCIL_BIT, 0);
 				descriptor[i].stencilSamplePitchBytes = (imageView->getSubresourceRange().layerCount > 1)
-												        ? imageView->layerPitchBytes(VK_IMAGE_ASPECT_STENCIL_BIT)
-												        : imageView->slicePitchBytes(VK_IMAGE_ASPECT_STENCIL_BIT, 0);
+				                                            ? imageView->layerPitchBytes(VK_IMAGE_ASPECT_STENCIL_BIT)
+				                                            : imageView->slicePitchBytes(VK_IMAGE_ASPECT_STENCIL_BIT, 0);
 				descriptor[i].stencilSlicePitchBytes = descriptor[i].stencilSamplePitchBytes * imageView->getSampleCount();
 			}
 		}
@@ -468,7 +470,7 @@
 			auto update = reinterpret_cast<VkBufferView const *>(src + entry.offset + entry.stride * i);
 			auto bufferView = vk::Cast(*update);
 			descriptor[i].ptr = bufferView->getPointer();
-			descriptor[i].extent = {bufferView->getElementCount(), 1, 1};
+			descriptor[i].extent = { bufferView->getElementCount(), 1, 1 };
 			descriptor[i].rowPitchBytes = 0;
 			descriptor[i].slicePitchBytes = 0;
 			descriptor[i].samplePitchBytes = 0;
@@ -478,9 +480,9 @@
 		}
 	}
 	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
-	         entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
-	         entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
-	         entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
+	        entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
+	        entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
+	        entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
 	{
 		auto descriptor = reinterpret_cast<BufferDescriptor *>(memToWrite);
 		for(uint32_t i = 0; i < entry.descriptorCount; i++)
@@ -499,24 +501,24 @@
 	if(level == 0)
 	{
 		texture->widthWidthHeightHeight[0] =
-		texture->widthWidthHeightHeight[1] = static_cast<float>(width);
+		    texture->widthWidthHeightHeight[1] = static_cast<float>(width);
 		texture->widthWidthHeightHeight[2] =
-		texture->widthWidthHeightHeight[3] = static_cast<float>(height);
+		    texture->widthWidthHeightHeight[3] = static_cast<float>(height);
 
 		texture->width[0] =
-		texture->width[1] =
-		texture->width[2] =
-		texture->width[3] = static_cast<float>(width);
+		    texture->width[1] =
+		        texture->width[2] =
+		            texture->width[3] = static_cast<float>(width);
 
 		texture->height[0] =
-		texture->height[1] =
-		texture->height[2] =
-		texture->height[3] = static_cast<float>(height);
+		    texture->height[1] =
+		        texture->height[2] =
+		            texture->height[3] = static_cast<float>(height);
 
 		texture->depth[0] =
-		texture->depth[1] =
-		texture->depth[2] =
-		texture->depth[3] = static_cast<float>(depth);
+		    texture->depth[1] =
+		        texture->depth[2] =
+		            texture->depth[3] = static_cast<float>(depth);
 	}
 
 	sw::Mipmap &mipmap = texture->mipmap[level];
@@ -526,34 +528,34 @@
 	short halfTexelW = 0x8000 / depth;
 
 	mipmap.uHalf[0] =
-	mipmap.uHalf[1] =
-	mipmap.uHalf[2] =
-	mipmap.uHalf[3] = halfTexelU;
+	    mipmap.uHalf[1] =
+	        mipmap.uHalf[2] =
+	            mipmap.uHalf[3] = halfTexelU;
 
 	mipmap.vHalf[0] =
-	mipmap.vHalf[1] =
-	mipmap.vHalf[2] =
-	mipmap.vHalf[3] = halfTexelV;
+	    mipmap.vHalf[1] =
+	        mipmap.vHalf[2] =
+	            mipmap.vHalf[3] = halfTexelV;
 
 	mipmap.wHalf[0] =
-	mipmap.wHalf[1] =
-	mipmap.wHalf[2] =
-	mipmap.wHalf[3] = halfTexelW;
+	    mipmap.wHalf[1] =
+	        mipmap.wHalf[2] =
+	            mipmap.wHalf[3] = halfTexelW;
 
 	mipmap.width[0] =
-	mipmap.width[1] =
-	mipmap.width[2] =
-	mipmap.width[3] = width;
+	    mipmap.width[1] =
+	        mipmap.width[2] =
+	            mipmap.width[3] = width;
 
 	mipmap.height[0] =
-	mipmap.height[1] =
-	mipmap.height[2] =
-	mipmap.height[3] = height;
+	    mipmap.height[1] =
+	        mipmap.height[2] =
+	            mipmap.height[3] = height;
 
 	mipmap.depth[0] =
-	mipmap.depth[1] =
-	mipmap.depth[2] =
-	mipmap.depth[3] = depth;
+	    mipmap.depth[1] =
+	        mipmap.depth[2] =
+	            mipmap.depth[3] = depth;
 
 	mipmap.onePitchP[0] = 1;
 	mipmap.onePitchP[1] = static_cast<short>(pitchP);
@@ -581,9 +583,9 @@
 	mipmap.sampleMax[3] = sampleMax;
 }
 
-void DescriptorSetLayout::WriteDescriptorSet(Device* device, const VkWriteDescriptorSet& writeDescriptorSet)
+void DescriptorSetLayout::WriteDescriptorSet(Device *device, const VkWriteDescriptorSet &writeDescriptorSet)
 {
-	DescriptorSet* dstSet = vk::Cast(writeDescriptorSet.dstSet);
+	DescriptorSet *dstSet = vk::Cast(writeDescriptorSet.dstSet);
 	VkDescriptorUpdateTemplateEntry e;
 	e.descriptorType = writeDescriptorSet.descriptorType;
 	e.dstBinding = writeDescriptorSet.dstBinding;
@@ -593,51 +595,51 @@
 	void const *ptr = nullptr;
 	switch(writeDescriptorSet.descriptorType)
 	{
-	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
-	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-		ptr = writeDescriptorSet.pTexelBufferView;
-		e.stride = sizeof(VkBufferView);
-		break;
+		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+			ptr = writeDescriptorSet.pTexelBufferView;
+			e.stride = sizeof(VkBufferView);
+			break;
 
-	case VK_DESCRIPTOR_TYPE_SAMPLER:
-	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-	case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
-	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
-		ptr = writeDescriptorSet.pImageInfo;
-		e.stride = sizeof(VkDescriptorImageInfo);
-		break;
+		case VK_DESCRIPTOR_TYPE_SAMPLER:
+		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+			ptr = writeDescriptorSet.pImageInfo;
+			e.stride = sizeof(VkDescriptorImageInfo);
+			break;
 
-	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
-	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
-	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
-	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
-		ptr = writeDescriptorSet.pBufferInfo;
-		e.stride = sizeof(VkDescriptorBufferInfo);
-		break;
+		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+			ptr = writeDescriptorSet.pBufferInfo;
+			e.stride = sizeof(VkDescriptorBufferInfo);
+			break;
 
-	default:
-		UNIMPLEMENTED("descriptor type %u", writeDescriptorSet.descriptorType);
+		default:
+			UNIMPLEMENTED("descriptor type %u", writeDescriptorSet.descriptorType);
 	}
 
 	WriteDescriptorSet(device, dstSet, e, reinterpret_cast<char const *>(ptr));
 }
 
-void DescriptorSetLayout::CopyDescriptorSet(const VkCopyDescriptorSet& descriptorCopies)
+void DescriptorSetLayout::CopyDescriptorSet(const VkCopyDescriptorSet &descriptorCopies)
 {
-	DescriptorSet* srcSet = vk::Cast(descriptorCopies.srcSet);
-	DescriptorSetLayout* srcLayout = srcSet->header.layout;
+	DescriptorSet *srcSet = vk::Cast(descriptorCopies.srcSet);
+	DescriptorSetLayout *srcLayout = srcSet->header.layout;
 	ASSERT(srcLayout);
 
-	DescriptorSet* dstSet = vk::Cast(descriptorCopies.dstSet);
-	DescriptorSetLayout* dstLayout = dstSet->header.layout;
+	DescriptorSet *dstSet = vk::Cast(descriptorCopies.dstSet);
+	DescriptorSetLayout *dstLayout = dstSet->header.layout;
 	ASSERT(dstLayout);
 
 	size_t srcTypeSize = 0;
-	uint8_t* memToRead = srcLayout->getOffsetPointer(srcSet, descriptorCopies.srcBinding, descriptorCopies.srcArrayElement, descriptorCopies.descriptorCount, &srcTypeSize);
+	uint8_t *memToRead = srcLayout->getOffsetPointer(srcSet, descriptorCopies.srcBinding, descriptorCopies.srcArrayElement, descriptorCopies.descriptorCount, &srcTypeSize);
 
 	size_t dstTypeSize = 0;
-	uint8_t* memToWrite = dstLayout->getOffsetPointer(dstSet, descriptorCopies.dstBinding, descriptorCopies.dstArrayElement, descriptorCopies.descriptorCount, &dstTypeSize);
+	uint8_t *memToWrite = dstLayout->getOffsetPointer(dstSet, descriptorCopies.dstBinding, descriptorCopies.dstArrayElement, descriptorCopies.descriptorCount, &dstTypeSize);
 
 	ASSERT(srcTypeSize == dstTypeSize);
 	size_t writeSize = dstTypeSize * descriptorCopies.descriptorCount;
diff --git a/src/Vulkan/VkDescriptorSetLayout.hpp b/src/Vulkan/VkDescriptorSetLayout.hpp
index 9a7b333..b75f79b 100644
--- a/src/Vulkan/VkDescriptorSetLayout.hpp
+++ b/src/Vulkan/VkDescriptorSetLayout.hpp
@@ -17,9 +17,9 @@
 
 #include "VkObject.hpp"
 
-#include "Vulkan/VkSampler.hpp"
-#include "Vulkan/VkImageView.hpp"
 #include "Device/Sampler.hpp"
+#include "Vulkan/VkImageView.hpp"
+#include "Vulkan/VkSampler.hpp"
 
 namespace vk {
 
@@ -35,14 +35,14 @@
 
 	// TODO(b/129523279): Minimize to the data actually needed.
 	vk::Sampler sampler;
-	vk::Device* device;
+	vk::Device *device;
 
 	uint32_t imageViewId;
 	VkImageViewType type;
 	VkFormat format;
 	VkComponentMapping swizzle;
 	alignas(16) sw::Texture texture;
-	VkExtent3D extent; // Of base mip-level.
+	VkExtent3D extent;  // Of base mip-level.
 	int arrayLayers;
 	int mipLevels;
 	int sampleCount;
@@ -72,26 +72,26 @@
 	~BufferDescriptor() = delete;
 
 	void *ptr;
-	int sizeInBytes;		// intended size of the bound region -- slides along with dynamic offsets
-	int robustnessSize;		// total accessible size from static offset -- does not move with dynamic offset
+	int sizeInBytes;     // intended size of the bound region -- slides along with dynamic offsets
+	int robustnessSize;  // total accessible size from static offset -- does not move with dynamic offset
 };
 
 class DescriptorSetLayout : public Object<DescriptorSetLayout, VkDescriptorSetLayout>
 {
 public:
-	DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkDescriptorSetLayoutCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkDescriptorSetLayoutCreateInfo *pCreateInfo);
 
 	static size_t GetDescriptorSize(VkDescriptorType type);
-	static void WriteDescriptorSet(Device* device, const VkWriteDescriptorSet& descriptorWrites);
-	static void CopyDescriptorSet(const VkCopyDescriptorSet& descriptorCopies);
+	static void WriteDescriptorSet(Device *device, const VkWriteDescriptorSet &descriptorWrites);
+	static void CopyDescriptorSet(const VkCopyDescriptorSet &descriptorCopies);
 
-	static void WriteDescriptorSet(Device* device, DescriptorSet *dstSet, VkDescriptorUpdateTemplateEntry const &entry, char const *src);
+	static void WriteDescriptorSet(Device *device, DescriptorSet *dstSet, VkDescriptorUpdateTemplateEntry const &entry, char const *src);
 	static void WriteTextureLevelInfo(sw::Texture *texture, int level, int width, int height, int depth, int pitchP, int sliceP, int samplePitchP, int sampleMax);
 
-	void initialize(DescriptorSet* descriptorSet);
+	void initialize(DescriptorSet *descriptorSet);
 
 	// Returns the total size of the descriptor set in bytes.
 	size_t getDescriptorSetAllocationSize() const;
@@ -125,9 +125,9 @@
 	bool isBindingDynamic(uint32_t binding) const;
 
 	// Returns the VkDescriptorSetLayoutBinding for the given binding.
-	VkDescriptorSetLayoutBinding const & getBindingLayout(uint32_t binding) const;
+	VkDescriptorSetLayoutBinding const &getBindingLayout(uint32_t binding) const;
 
-	uint8_t* getOffsetPointer(DescriptorSet *descriptorSet, uint32_t binding, uint32_t arrayElement, uint32_t count, size_t* typeSize) const;
+	uint8_t *getOffsetPointer(DescriptorSet *descriptorSet, uint32_t binding, uint32_t arrayElement, uint32_t count, size_t *typeSize) const;
 
 private:
 	size_t getDescriptorSetDataSize() const;
@@ -135,16 +135,16 @@
 	static bool isDynamic(VkDescriptorType type);
 
 	VkDescriptorSetLayoutCreateFlags flags;
-	uint32_t                         bindingCount;
-	VkDescriptorSetLayoutBinding*    bindings;
-	size_t*                          bindingOffsets;
+	uint32_t bindingCount;
+	VkDescriptorSetLayoutBinding *bindings;
+	size_t *bindingOffsets;
 };
 
-static inline DescriptorSetLayout* Cast(VkDescriptorSetLayout object)
+static inline DescriptorSetLayout *Cast(VkDescriptorSetLayout object)
 {
 	return DescriptorSetLayout::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_DESCRIPTOR_SET_LAYOUT_HPP_
+#endif  // VK_DESCRIPTOR_SET_LAYOUT_HPP_
diff --git a/src/Vulkan/VkDescriptorUpdateTemplate.cpp b/src/Vulkan/VkDescriptorUpdateTemplate.cpp
index 3a83120..9394384 100644
--- a/src/Vulkan/VkDescriptorUpdateTemplate.cpp
+++ b/src/Vulkan/VkDescriptorUpdateTemplate.cpp
@@ -19,10 +19,10 @@
 
 namespace vk {
 
-DescriptorUpdateTemplate::DescriptorUpdateTemplate(const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, void* mem) :
-	descriptorUpdateEntryCount(pCreateInfo->descriptorUpdateEntryCount),
-	descriptorUpdateEntries(reinterpret_cast<VkDescriptorUpdateTemplateEntry*>(mem)),
-	descriptorSetLayout(vk::Cast(pCreateInfo->descriptorSetLayout))
+DescriptorUpdateTemplate::DescriptorUpdateTemplate(const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, void *mem)
+    : descriptorUpdateEntryCount(pCreateInfo->descriptorUpdateEntryCount)
+    , descriptorUpdateEntries(reinterpret_cast<VkDescriptorUpdateTemplateEntry *>(mem))
+    , descriptorSetLayout(vk::Cast(pCreateInfo->descriptorSetLayout))
 {
 	for(uint32_t i = 0; i < descriptorUpdateEntryCount; i++)
 	{
@@ -30,20 +30,20 @@
 	}
 }
 
-size_t DescriptorUpdateTemplate::ComputeRequiredAllocationSize(const VkDescriptorUpdateTemplateCreateInfo* info)
+size_t DescriptorUpdateTemplate::ComputeRequiredAllocationSize(const VkDescriptorUpdateTemplateCreateInfo *info)
 {
 	return info->descriptorUpdateEntryCount * sizeof(VkDescriptorUpdateTemplateEntry);
 }
 
-void DescriptorUpdateTemplate::updateDescriptorSet(Device* device, VkDescriptorSet vkDescriptorSet, const void* pData)
+void DescriptorUpdateTemplate::updateDescriptorSet(Device *device, VkDescriptorSet vkDescriptorSet, const void *pData)
 {
 
-	DescriptorSet* descriptorSet = vk::Cast(vkDescriptorSet);
+	DescriptorSet *descriptorSet = vk::Cast(vkDescriptorSet);
 
 	for(uint32_t i = 0; i < descriptorUpdateEntryCount; i++)
 	{
 		DescriptorSetLayout::WriteDescriptorSet(device, descriptorSet, descriptorUpdateEntries[i],
-												reinterpret_cast<char const *>(pData));
+		                                        reinterpret_cast<char const *>(pData));
 	}
 }
 
diff --git a/src/Vulkan/VkDescriptorUpdateTemplate.hpp b/src/Vulkan/VkDescriptorUpdateTemplate.hpp
index 0884b1e..e608e23 100644
--- a/src/Vulkan/VkDescriptorUpdateTemplate.hpp
+++ b/src/Vulkan/VkDescriptorUpdateTemplate.hpp
@@ -25,23 +25,23 @@
 class DescriptorUpdateTemplate : public Object<DescriptorUpdateTemplate, VkDescriptorUpdateTemplate>
 {
 public:
-	DescriptorUpdateTemplate(const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, void* mem);
+	DescriptorUpdateTemplate(const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, void *mem);
 
-	static size_t ComputeRequiredAllocationSize(const VkDescriptorUpdateTemplateCreateInfo* info);
+	static size_t ComputeRequiredAllocationSize(const VkDescriptorUpdateTemplateCreateInfo *info);
 
-	void updateDescriptorSet(Device* device, VkDescriptorSet descriptorSet, const void* pData);
+	void updateDescriptorSet(Device *device, VkDescriptorSet descriptorSet, const void *pData);
 
 private:
-	uint32_t                              descriptorUpdateEntryCount = 0;
-	VkDescriptorUpdateTemplateEntry*      descriptorUpdateEntries = nullptr;
-	DescriptorSetLayout*                  descriptorSetLayout = nullptr;
+	uint32_t descriptorUpdateEntryCount = 0;
+	VkDescriptorUpdateTemplateEntry *descriptorUpdateEntries = nullptr;
+	DescriptorSetLayout *descriptorSetLayout = nullptr;
 };
 
-static inline DescriptorUpdateTemplate* Cast(VkDescriptorUpdateTemplate object)
+static inline DescriptorUpdateTemplate *Cast(VkDescriptorUpdateTemplate object)
 {
 	return DescriptorUpdateTemplate::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_DESCRIPTOR_UPDATE_TEMPLATE_HPP_
+#endif  // VK_DESCRIPTOR_UPDATE_TEMPLATE_HPP_
diff --git a/src/Vulkan/VkDevice.cpp b/src/Vulkan/VkDevice.cpp
index 55add6e..cbc399c 100644
--- a/src/Vulkan/VkDevice.cpp
+++ b/src/Vulkan/VkDevice.cpp
@@ -23,7 +23,7 @@
 
 #include <chrono>
 #include <climits>
-#include <new> // Must #include this to use "placement new"
+#include <new>  // Must #include this to use "placement new"
 
 namespace {
 
@@ -36,18 +36,18 @@
 
 namespace vk {
 
-std::shared_ptr<rr::Routine> Device::SamplingRoutineCache::query(const vk::Device::SamplingRoutineCache::Key& key) const
+std::shared_ptr<rr::Routine> Device::SamplingRoutineCache::query(const vk::Device::SamplingRoutineCache::Key &key) const
 {
 	return cache.query(key);
 }
 
-void Device::SamplingRoutineCache::add(const vk::Device::SamplingRoutineCache::Key& key, const std::shared_ptr<rr::Routine>& routine)
+void Device::SamplingRoutineCache::add(const vk::Device::SamplingRoutineCache::Key &key, const std::shared_ptr<rr::Routine> &routine)
 {
 	ASSERT(routine);
 	cache.add(key, routine);
 }
 
-rr::Routine* Device::SamplingRoutineCache::queryConst(const vk::Device::SamplingRoutineCache::Key& key) const
+rr::Routine *Device::SamplingRoutineCache::queryConst(const vk::Device::SamplingRoutineCache::Key &key) const
 {
 	return cache.queryConstCache(key).get();
 }
@@ -57,31 +57,32 @@
 	cache.updateConstCache();
 }
 
-Device::Device(const VkDeviceCreateInfo* pCreateInfo, void* mem, PhysicalDevice *physicalDevice, const VkPhysicalDeviceFeatures *enabledFeatures, const std::shared_ptr<marl::Scheduler>& scheduler)
-	: physicalDevice(physicalDevice),
-	  queues(reinterpret_cast<Queue*>(mem)),
-	  enabledExtensionCount(pCreateInfo->enabledExtensionCount),
-	  enabledFeatures(enabledFeatures ? *enabledFeatures : VkPhysicalDeviceFeatures{}),  // "Setting pEnabledFeatures to NULL and not including a VkPhysicalDeviceFeatures2 in the pNext member of VkDeviceCreateInfo is equivalent to setting all members of the structure to VK_FALSE."
-	  scheduler(scheduler)
+Device::Device(const VkDeviceCreateInfo *pCreateInfo, void *mem, PhysicalDevice *physicalDevice, const VkPhysicalDeviceFeatures *enabledFeatures, const std::shared_ptr<marl::Scheduler> &scheduler)
+    : physicalDevice(physicalDevice)
+    , queues(reinterpret_cast<Queue *>(mem))
+    , enabledExtensionCount(pCreateInfo->enabledExtensionCount)
+    , enabledFeatures(enabledFeatures ? *enabledFeatures : VkPhysicalDeviceFeatures{})
+    ,  // "Setting pEnabledFeatures to NULL and not including a VkPhysicalDeviceFeatures2 in the pNext member of VkDeviceCreateInfo is equivalent to setting all members of the structure to VK_FALSE."
+    scheduler(scheduler)
 {
 	for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
 	{
-		const VkDeviceQueueCreateInfo& queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
+		const VkDeviceQueueCreateInfo &queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
 		queueCount += queueCreateInfo.queueCount;
 	}
 
 	uint32_t queueID = 0;
 	for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
 	{
-		const VkDeviceQueueCreateInfo& queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
+		const VkDeviceQueueCreateInfo &queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
 
 		for(uint32_t j = 0; j < queueCreateInfo.queueCount; j++, queueID++)
 		{
-			new (&queues[queueID]) Queue(this, scheduler.get());
+			new(&queues[queueID]) Queue(this, scheduler.get());
 		}
 	}
 
-	extensions = reinterpret_cast<ExtensionName*>(static_cast<uint8_t*>(mem) + (sizeof(Queue) * queueCount));
+	extensions = reinterpret_cast<ExtensionName *>(static_cast<uint8_t *>(mem) + (sizeof(Queue) * queueCount));
 	for(uint32_t i = 0; i < enabledExtensionCount; i++)
 	{
 		strncpy(extensions[i], pCreateInfo->ppEnabledExtensionNames[i], VK_MAX_EXTENSION_NAME_SIZE);
@@ -90,7 +91,7 @@
 	if(pCreateInfo->enabledLayerCount)
 	{
 		// "The ppEnabledLayerNames and enabledLayerCount members of VkDeviceCreateInfo are deprecated and their values must be ignored by implementations."
-		UNIMPLEMENTED("enabledLayerCount");   // TODO(b/119321052): UNIMPLEMENTED() should be used only for features that must still be implemented. Use a more informational macro here.
+		UNIMPLEMENTED("enabledLayerCount");  // TODO(b/119321052): UNIMPLEMENTED() should be used only for features that must still be implemented. Use a more informational macro here.
 	}
 
 	// FIXME (b/119409619): use an allocator here so we can control all memory allocations
@@ -98,7 +99,7 @@
 	samplingRoutineCache.reset(new SamplingRoutineCache());
 }
 
-void Device::destroy(const VkAllocationCallbacks* pAllocator)
+void Device::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	for(uint32_t i = 0; i < queueCount; i++)
 	{
@@ -108,7 +109,7 @@
 	vk::deallocate(queues, pAllocator);
 }
 
-size_t Device::ComputeRequiredAllocationSize(const VkDeviceCreateInfo* pCreateInfo)
+size_t Device::ComputeRequiredAllocationSize(const VkDeviceCreateInfo *pCreateInfo)
 {
 	uint32_t queueCount = 0;
 	for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
@@ -119,7 +120,7 @@
 	return (sizeof(Queue) * queueCount) + (pCreateInfo->enabledExtensionCount * sizeof(ExtensionName));
 }
 
-bool Device::hasExtension(const char* extensionName) const
+bool Device::hasExtension(const char *extensionName) const
 {
 	for(uint32_t i = 0; i < enabledExtensionCount; i++)
 	{
@@ -138,7 +139,7 @@
 	return queues[queueIndex];
 }
 
-VkResult Device::waitForFences(uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
+VkResult Device::waitForFences(uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout)
 {
 	using time_point = std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
 	const time_point start = now();
@@ -146,27 +147,27 @@
 	bool infiniteTimeout = (timeout > max_timeout);
 	const time_point end_ns = start + std::chrono::nanoseconds(std::min(max_timeout, timeout));
 
-	if(waitAll != VK_FALSE) // All fences must be signaled
+	if(waitAll != VK_FALSE)  // All fences must be signaled
 	{
 		for(uint32_t i = 0; i < fenceCount; i++)
 		{
 			if(timeout == 0)
 			{
-				if(Cast(pFences[i])->getStatus() != VK_SUCCESS) // At least one fence is not signaled
+				if(Cast(pFences[i])->getStatus() != VK_SUCCESS)  // At least one fence is not signaled
 				{
 					return VK_TIMEOUT;
 				}
 			}
 			else if(infiniteTimeout)
 			{
-				if(Cast(pFences[i])->wait() != VK_SUCCESS) // At least one fence is not signaled
+				if(Cast(pFences[i])->wait() != VK_SUCCESS)  // At least one fence is not signaled
 				{
 					return VK_TIMEOUT;
 				}
 			}
 			else
 			{
-				if(Cast(pFences[i])->wait(end_ns) != VK_SUCCESS) // At least one fence is not signaled
+				if(Cast(pFences[i])->wait(end_ns) != VK_SUCCESS)  // At least one fence is not signaled
 				{
 					return VK_TIMEOUT;
 				}
@@ -175,7 +176,7 @@
 
 		return VK_SUCCESS;
 	}
-	else // At least one fence must be signaled
+	else  // At least one fence must be signaled
 	{
 		marl::containers::vector<marl::Event, 8> events;
 		for(uint32_t i = 0; i < fenceCount; i++)
@@ -211,8 +212,8 @@
 	return VK_SUCCESS;
 }
 
-void Device::getDescriptorSetLayoutSupport(const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-                                           VkDescriptorSetLayoutSupport* pSupport) const
+void Device::getDescriptorSetLayoutSupport(const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+                                           VkDescriptorSetLayoutSupport *pSupport) const
 {
 	// From Vulkan Spec 13.2.1 Descriptor Set Layout, in description of vkGetDescriptorSetLayoutSupport:
 	// "This command does not consider other limits such as maxPerStageDescriptor*, and so a descriptor
@@ -223,8 +224,8 @@
 	pSupport->supported = VK_TRUE;
 }
 
-void Device::updateDescriptorSets(uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites,
-                                  uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
+void Device::updateDescriptorSets(uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
+                                  uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies)
 {
 	for(uint32_t i = 0; i < descriptorWriteCount; i++)
 	{
@@ -237,18 +238,18 @@
 	}
 }
 
-void Device::getRequirements(VkMemoryDedicatedRequirements* requirements) const
+void Device::getRequirements(VkMemoryDedicatedRequirements *requirements) const
 {
 	requirements->prefersDedicatedAllocation = VK_FALSE;
 	requirements->requiresDedicatedAllocation = VK_FALSE;
 }
 
-Device::SamplingRoutineCache* Device::getSamplingRoutineCache() const
+Device::SamplingRoutineCache *Device::getSamplingRoutineCache() const
 {
 	return samplingRoutineCache.get();
 }
 
-rr::Routine* Device::findInConstCache(const SamplingRoutineCache::Key& key) const
+rr::Routine *Device::findInConstCache(const SamplingRoutineCache::Key &key) const
 {
 	return samplingRoutineCache->queryConst(key);
 }
@@ -259,7 +260,7 @@
 	samplingRoutineCache->updateConstCache();
 }
 
-std::mutex& Device::getSamplingRoutineCacheMutex()
+std::mutex &Device::getSamplingRoutineCacheMutex()
 {
 	return samplingRoutineCacheMutex;
 }
diff --git a/src/Vulkan/VkDevice.hpp b/src/Vulkan/VkDevice.hpp
index 24882e0..94fbba6 100644
--- a/src/Vulkan/VkDevice.hpp
+++ b/src/Vulkan/VkDevice.hpp
@@ -21,8 +21,12 @@
 #include <memory>
 #include <mutex>
 
-namespace marl { class Scheduler; }
-namespace sw { class Blitter; }
+namespace marl {
+class Scheduler;
+}
+namespace sw {
+class Blitter;
+}
 
 namespace vk {
 
@@ -34,28 +38,30 @@
 public:
 	static constexpr VkSystemAllocationScope GetAllocationScope() { return VK_SYSTEM_ALLOCATION_SCOPE_DEVICE; }
 
-	Device(const VkDeviceCreateInfo* pCreateInfo, void* mem, PhysicalDevice *physicalDevice, const VkPhysicalDeviceFeatures *enabledFeatures, const std::shared_ptr<marl::Scheduler>& scheduler);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	Device(const VkDeviceCreateInfo *pCreateInfo, void *mem, PhysicalDevice *physicalDevice, const VkPhysicalDeviceFeatures *enabledFeatures, const std::shared_ptr<marl::Scheduler> &scheduler);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkDeviceCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkDeviceCreateInfo *pCreateInfo);
 
-	bool hasExtension(const char* extensionName) const;
+	bool hasExtension(const char *extensionName) const;
 	VkQueue getQueue(uint32_t queueFamilyIndex, uint32_t queueIndex) const;
-	VkResult waitForFences(uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout);
+	VkResult waitForFences(uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout);
 	VkResult waitIdle();
-	void getDescriptorSetLayoutSupport(const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-	                                   VkDescriptorSetLayoutSupport* pSupport) const;
+	void getDescriptorSetLayoutSupport(const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+	                                   VkDescriptorSetLayoutSupport *pSupport) const;
 	PhysicalDevice *getPhysicalDevice() const { return physicalDevice; }
-	void updateDescriptorSets(uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites,
-	                          uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies);
-	void getRequirements(VkMemoryDedicatedRequirements* requirements) const;
+	void updateDescriptorSets(uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
+	                          uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies);
+	void getRequirements(VkMemoryDedicatedRequirements *requirements) const;
 	const VkPhysicalDeviceFeatures &getEnabledFeatures() const { return enabledFeatures; }
-	sw::Blitter* getBlitter() const { return blitter.get(); }
+	sw::Blitter *getBlitter() const { return blitter.get(); }
 
 	class SamplingRoutineCache
 	{
 	public:
-		SamplingRoutineCache() : cache(1024) {}
+		SamplingRoutineCache()
+		    : cache(1024)
+		{}
 		~SamplingRoutineCache() {}
 
 		struct Key
@@ -64,27 +70,27 @@
 			uint32_t sampler;
 			uint32_t imageView;
 
-			inline bool operator == (const Key& rhs) const;
+			inline bool operator==(const Key &rhs) const;
 
 			struct Hash
 			{
-				inline std::size_t operator()(const Key& key) const noexcept;
+				inline std::size_t operator()(const Key &key) const noexcept;
 			};
 		};
 
-		std::shared_ptr<rr::Routine> query(const Key& key) const;
-		void add(const Key& key, const std::shared_ptr<rr::Routine>& routine);
+		std::shared_ptr<rr::Routine> query(const Key &key) const;
+		void add(const Key &key, const std::shared_ptr<rr::Routine> &routine);
 
-		rr::Routine* queryConst(const Key& key) const;
+		rr::Routine *queryConst(const Key &key) const;
 		void updateConstCache();
 
 	private:
 		sw::LRUConstCache<Key, std::shared_ptr<rr::Routine>, Key::Hash> cache;
 	};
 
-	SamplingRoutineCache* getSamplingRoutineCache() const;
-	std::mutex& getSamplingRoutineCacheMutex();
-	rr::Routine* findInConstCache(const SamplingRoutineCache::Key& key) const;
+	SamplingRoutineCache *getSamplingRoutineCache() const;
+	std::mutex &getSamplingRoutineCacheMutex();
+	rr::Routine *findInConstCache(const SamplingRoutineCache::Key &key) const;
 	void updateSamplingRoutineConstCache();
 
 private:
@@ -96,24 +102,24 @@
 	std::mutex samplingRoutineCacheMutex;
 	uint32_t enabledExtensionCount = 0;
 	typedef char ExtensionName[VK_MAX_EXTENSION_NAME_SIZE];
-	ExtensionName* extensions = nullptr;
+	ExtensionName *extensions = nullptr;
 	const VkPhysicalDeviceFeatures enabledFeatures = {};
 	std::shared_ptr<marl::Scheduler> scheduler;
 };
 
 using DispatchableDevice = DispatchableObject<Device, VkDevice>;
 
-static inline Device* Cast(VkDevice object)
+static inline Device *Cast(VkDevice object)
 {
 	return DispatchableDevice::Cast(object);
 }
 
-inline bool vk::Device::SamplingRoutineCache::Key::operator == (const Key& rhs) const
+inline bool vk::Device::SamplingRoutineCache::Key::operator==(const Key &rhs) const
 {
 	return instruction == rhs.instruction && sampler == rhs.sampler && imageView == rhs.imageView;
 }
 
-inline std::size_t vk::Device::SamplingRoutineCache::Key::Hash::operator() (const Key& key) const noexcept
+inline std::size_t vk::Device::SamplingRoutineCache::Key::Hash::operator()(const Key &key) const noexcept
 {
 	// Combine three 32-bit integers into a 64-bit hash.
 	// 2642239 is the largest prime which when cubed is smaller than 2^64.
@@ -125,4 +131,4 @@
 
 }  // namespace vk
 
-#endif // VK_DEVICE_HPP_
+#endif  // VK_DEVICE_HPP_
diff --git a/src/Vulkan/VkDeviceMemory.cpp b/src/Vulkan/VkDeviceMemory.cpp
index 2c1991a..6eff9e6 100644
--- a/src/Vulkan/VkDeviceMemory.cpp
+++ b/src/Vulkan/VkDeviceMemory.cpp
@@ -24,19 +24,19 @@
 public:
 	virtual ~ExternalBase() = default;
 
-    // Allocate the memory according to |size|. On success return VK_SUCCESS
-    // and sets |*pBuffer|.
-	virtual VkResult allocate(size_t size, void** pBuffer) = 0;
+	// Allocate the memory according to |size|. On success return VK_SUCCESS
+	// and sets |*pBuffer|.
+	virtual VkResult allocate(size_t size, void **pBuffer) = 0;
 
-    // Deallocate previously allocated memory at |buffer|.
-	virtual void deallocate(void* buffer, size_t size) = 0;
+	// Deallocate previously allocated memory at |buffer|.
+	virtual void deallocate(void *buffer, size_t size) = 0;
 
-    // Return the handle type flag bit supported by this implementation.
-    // A value of 0 corresponds to non-external memory.
+	// Return the handle type flag bit supported by this implementation.
+	// A value of 0 corresponds to non-external memory.
 	virtual VkExternalMemoryHandleTypeFlagBits getFlagBit() const = 0;
 
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-	virtual VkResult exportFd(int* pFd) const
+	virtual VkResult exportFd(int *pFd) const
 	{
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 	}
@@ -56,24 +56,24 @@
 public:
 	VkExternalMemoryHandleTypeFlagBits typeFlagBit;
 	size_t instanceSize;
-	void (*instanceInit)(void* external, const VkMemoryAllocateInfo* pAllocateInfo);
+	void (*instanceInit)(void *external, const VkMemoryAllocateInfo *pAllocateInfo);
 };
 
 // Template function that parses a |pAllocateInfo.pNext| chain to verify that
 // it asks for the creation or import of a memory type managed by implementation
 // class T. On success, return true and sets |pTraits| accordingly. Otherwise
 // return false.
-template <typename  T>
-static bool parseCreateInfo(const VkMemoryAllocateInfo* pAllocateInfo,
-							ExternalMemoryTraits* pTraits)
+template<typename T>
+static bool parseCreateInfo(const VkMemoryAllocateInfo *pAllocateInfo,
+                            ExternalMemoryTraits *pTraits)
 {
 	if(T::supportsAllocateInfo(pAllocateInfo))
 	{
 		pTraits->typeFlagBit = T::typeFlagBit;
 		pTraits->instanceSize = sizeof(T);
-		pTraits->instanceInit = [](void* external,
-								   const VkMemoryAllocateInfo* pAllocateInfo) {
-			new (external) T(pAllocateInfo);
+		pTraits->instanceInit = [](void *external,
+		                           const VkMemoryAllocateInfo *pAllocateInfo) {
+			new(external) T(pAllocateInfo);
 		};
 		return true;
 	}
@@ -85,20 +85,20 @@
 class DeviceMemoryHostExternalBase : public DeviceMemory::ExternalBase
 {
 public:
-    // Does not support any external memory type at all.
+	// Does not support any external memory type at all.
 	static const VkExternalMemoryHandleTypeFlagBits typeFlagBit = (VkExternalMemoryHandleTypeFlagBits)0;
 
-    // Always return true as is used as a fallback in findTraits() below.
-	static bool supportsAllocateInfo(const VkMemoryAllocateInfo* pAllocateInfo)
+	// Always return true as is used as a fallback in findTraits() below.
+	static bool supportsAllocateInfo(const VkMemoryAllocateInfo *pAllocateInfo)
 	{
 		return true;
 	}
 
-	DeviceMemoryHostExternalBase(const VkMemoryAllocateInfo* pAllocateInfo) {}
+	DeviceMemoryHostExternalBase(const VkMemoryAllocateInfo *pAllocateInfo) {}
 
-	VkResult allocate(size_t size, void** pBuffer) override
+	VkResult allocate(size_t size, void **pBuffer) override
 	{
-		void* buffer = vk::allocate(size, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY);
+		void *buffer = vk::allocate(size, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY);
 		if(!buffer)
 			return VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
@@ -106,7 +106,7 @@
 		return VK_SUCCESS;
 	}
 
-	void deallocate(void* buffer, size_t size) override
+	void deallocate(void *buffer, size_t size) override
 	{
 		vk::deallocate(buffer, DEVICE_MEMORY);
 	}
@@ -120,17 +120,17 @@
 }  // namespace vk
 
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-#  if defined(__linux__) || defined(__ANDROID__)
-#    include "VkDeviceMemoryExternalLinux.hpp"
-#  else
-#    error "Missing VK_KHR_external_memory_fd implementation for this platform!"
-#  endif
+#	if defined(__linux__) || defined(__ANDROID__)
+#		include "VkDeviceMemoryExternalLinux.hpp"
+#	else
+#		error "Missing VK_KHR_external_memory_fd implementation for this platform!"
+#	endif
 #endif
 
 namespace vk {
 
-static void findTraits(const VkMemoryAllocateInfo* pAllocateInfo,
-					   ExternalMemoryTraits*       pTraits)
+static void findTraits(const VkMemoryAllocateInfo *pAllocateInfo,
+                       ExternalMemoryTraits *pTraits)
 {
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
 	if(parseCreateInfo<OpaqueFdExternalMemory>(pAllocateInfo, pTraits))
@@ -141,9 +141,10 @@
 	parseCreateInfo<DeviceMemoryHostExternalBase>(pAllocateInfo, pTraits);
 }
 
-DeviceMemory::DeviceMemory(const VkMemoryAllocateInfo* pAllocateInfo, void* mem) :
-	size(pAllocateInfo->allocationSize), memoryTypeIndex(pAllocateInfo->memoryTypeIndex),
-	external(reinterpret_cast<ExternalBase *>(mem))
+DeviceMemory::DeviceMemory(const VkMemoryAllocateInfo *pAllocateInfo, void *mem)
+    : size(pAllocateInfo->allocationSize)
+    , memoryTypeIndex(pAllocateInfo->memoryTypeIndex)
+    , external(reinterpret_cast<ExternalBase *>(mem))
 {
 	ASSERT(size);
 
@@ -152,7 +153,7 @@
 	traits.instanceInit(external, pAllocateInfo);
 }
 
-void DeviceMemory::destroy(const VkAllocationCallbacks* pAllocator)
+void DeviceMemory::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	if(buffer)
 	{
@@ -163,7 +164,7 @@
 	vk::deallocate(external, pAllocator);
 }
 
-size_t DeviceMemory::ComputeRequiredAllocationSize(const VkMemoryAllocateInfo* pAllocateInfo)
+size_t DeviceMemory::ComputeRequiredAllocationSize(const VkMemoryAllocateInfo *pAllocateInfo)
 {
 	ExternalMemoryTraits traits;
 	findTraits(pAllocateInfo, &traits);
@@ -180,7 +181,7 @@
 	return result;
 }
 
-VkResult DeviceMemory::map(VkDeviceSize pOffset, VkDeviceSize pSize, void** ppData)
+VkResult DeviceMemory::map(VkDeviceSize pOffset, VkDeviceSize pSize, void **ppData)
 {
 	*ppData = getOffsetPointer(pOffset);
 
@@ -192,15 +193,15 @@
 	return size;
 }
 
-void* DeviceMemory::getOffsetPointer(VkDeviceSize pOffset) const
+void *DeviceMemory::getOffsetPointer(VkDeviceSize pOffset) const
 {
 	ASSERT(buffer);
 
-	return reinterpret_cast<char*>(buffer) + pOffset;
+	return reinterpret_cast<char *>(buffer) + pOffset;
 }
 
 bool DeviceMemory::checkExternalMemoryHandleType(
-		VkExternalMemoryHandleTypeFlags supportedHandleTypes) const
+    VkExternalMemoryHandleTypeFlags supportedHandleTypes) const
 {
 	if(!supportedHandleTypes)
 	{
@@ -222,7 +223,7 @@
 }
 
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-VkResult DeviceMemory::exportFd(int* pFd) const
+VkResult DeviceMemory::exportFd(int *pFd) const
 {
 	return external->exportFd(pFd);
 }
diff --git a/src/Vulkan/VkDeviceMemory.hpp b/src/Vulkan/VkDeviceMemory.hpp
index d529ec9..7ec0267 100644
--- a/src/Vulkan/VkDeviceMemory.hpp
+++ b/src/Vulkan/VkDeviceMemory.hpp
@@ -23,43 +23,41 @@
 class DeviceMemory : public Object<DeviceMemory, VkDeviceMemory>
 {
 public:
-	DeviceMemory(const VkMemoryAllocateInfo* pCreateInfo, void* mem);
+	DeviceMemory(const VkMemoryAllocateInfo *pCreateInfo, void *mem);
 
-	static size_t ComputeRequiredAllocationSize(const VkMemoryAllocateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkMemoryAllocateInfo *pCreateInfo);
 
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-	VkResult exportFd(int* pFd) const;
+	VkResult exportFd(int *pFd) const;
 #endif
 
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 	VkResult allocate();
-	VkResult map(VkDeviceSize offset, VkDeviceSize size, void** ppData);
+	VkResult map(VkDeviceSize offset, VkDeviceSize size, void **ppData);
 	VkDeviceSize getCommittedMemoryInBytes() const;
-	void* getOffsetPointer(VkDeviceSize pOffset) const;
+	void *getOffsetPointer(VkDeviceSize pOffset) const;
 	uint32_t getMemoryTypeIndex() const { return memoryTypeIndex; }
 
 	// If this is external memory, return true iff its handle type matches the bitmask
 	// provided by |supportedExternalHandleTypes|. Otherwise, always return true.
 	bool checkExternalMemoryHandleType(
-				VkExternalMemoryHandleTypeFlags supportedExternalMemoryHandleType) const;
+	    VkExternalMemoryHandleTypeFlags supportedExternalMemoryHandleType) const;
 
 	// Internal implementation class for external memory. Platform-specific.
 	class ExternalBase;
 
 private:
-
-	void*         buffer = nullptr;
-	VkDeviceSize  size = 0;
-	uint32_t      memoryTypeIndex = 0;
-	ExternalBase* external = nullptr;
+	void *buffer = nullptr;
+	VkDeviceSize size = 0;
+	uint32_t memoryTypeIndex = 0;
+	ExternalBase *external = nullptr;
 };
 
-static inline DeviceMemory* Cast(VkDeviceMemory object)
+static inline DeviceMemory *Cast(VkDeviceMemory object)
 {
 	return DeviceMemory::Cast(object);
 }
 
-
 }  // namespace vk
 
-#endif // VK_DEVICE_MEMORY_HPP_
+#endif  // VK_DEVICE_MEMORY_HPP_
diff --git a/src/Vulkan/VkDeviceMemoryExternalLinux.hpp b/src/Vulkan/VkDeviceMemoryExternalLinux.hpp
index a9f0d0d..89dd4f9 100644
--- a/src/Vulkan/VkDeviceMemoryExternalLinux.hpp
+++ b/src/Vulkan/VkDeviceMemoryExternalLinux.hpp
@@ -34,16 +34,16 @@
 		AllocateInfo() = default;
 
 		// Parse the VkMemoryAllocateInfo.pNext chain to initialize an AllocateInfo.
-		AllocateInfo(const VkMemoryAllocateInfo* pAllocateInfo)
+		AllocateInfo(const VkMemoryAllocateInfo *pAllocateInfo)
 		{
-			const auto* createInfo = reinterpret_cast<const VkBaseInStructure*>(pAllocateInfo->pNext);
+			const auto *createInfo = reinterpret_cast<const VkBaseInStructure *>(pAllocateInfo->pNext);
 			while(createInfo)
 			{
 				switch(createInfo->sType)
 				{
-				case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+					case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
 					{
-						const auto* importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR*>(createInfo);
+						const auto *importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR *>(createInfo);
 
 						if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 						{
@@ -53,9 +53,9 @@
 						fd = importInfo->fd;
 					}
 					break;
-				case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+					case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
 					{
-						const auto* exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo*>(createInfo);
+						const auto *exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(createInfo);
 
 						if(exportInfo->handleTypes != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 						{
@@ -65,8 +65,7 @@
 					}
 					break;
 
-				default:
-					;
+					default:;
 				}
 				createInfo = createInfo->pNext;
 			}
@@ -75,14 +74,14 @@
 
 	static const VkExternalMemoryHandleTypeFlagBits typeFlagBit = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
 
-	static bool supportsAllocateInfo(const VkMemoryAllocateInfo* pAllocateInfo)
+	static bool supportsAllocateInfo(const VkMemoryAllocateInfo *pAllocateInfo)
 	{
 		AllocateInfo info(pAllocateInfo);
 		return info.importFd || info.exportFd;
 	}
 
-	explicit OpaqueFdExternalMemory(const VkMemoryAllocateInfo* pAllocateInfo)
-			: allocateInfo(pAllocateInfo)
+	explicit OpaqueFdExternalMemory(const VkMemoryAllocateInfo *pAllocateInfo)
+	    : allocateInfo(pAllocateInfo)
 	{
 	}
 
@@ -91,7 +90,7 @@
 		memfd.close();
 	}
 
-	VkResult allocate(size_t size, void** pBuffer) override
+	VkResult allocate(size_t size, void **pBuffer) override
 	{
 		if(allocateInfo.importFd)
 		{
@@ -113,7 +112,7 @@
 				return VK_ERROR_OUT_OF_DEVICE_MEMORY;
 			}
 		}
-		void* addr = memfd.mapReadWrite(0, size);
+		void *addr = memfd.mapReadWrite(0, size);
 		if(!addr)
 		{
 			return VK_ERROR_MEMORY_MAP_FAILED;
@@ -122,7 +121,7 @@
 		return VK_SUCCESS;
 	}
 
-	void deallocate(void* buffer, size_t size) override
+	void deallocate(void *buffer, size_t size) override
 	{
 		memfd.unmap(buffer, size);
 	}
@@ -132,7 +131,7 @@
 		return typeFlagBit;
 	}
 
-	VkResult exportFd(int* pFd) const override
+	VkResult exportFd(int *pFd) const override
 	{
 		int fd = memfd.exportFd();
 		if(fd < 0)
@@ -144,6 +143,6 @@
 	}
 
 private:
-	LinuxMemFd   memfd;
+	LinuxMemFd memfd;
 	AllocateInfo allocateInfo;
 };
diff --git a/src/Vulkan/VkEvent.hpp b/src/Vulkan/VkEvent.hpp
index 3304558..1ec9f26 100644
--- a/src/Vulkan/VkEvent.hpp
+++ b/src/Vulkan/VkEvent.hpp
@@ -24,11 +24,11 @@
 class Event : public Object<Event, VkEvent>
 {
 public:
-	Event(const VkEventCreateInfo* pCreateInfo, void* mem)
+	Event(const VkEventCreateInfo *pCreateInfo, void *mem)
 	{
 	}
 
-	static size_t ComputeRequiredAllocationSize(const VkEventCreateInfo* pCreateInfo)
+	static size_t ComputeRequiredAllocationSize(const VkEventCreateInfo *pCreateInfo)
 	{
 		return 0;
 	}
@@ -62,16 +62,16 @@
 	}
 
 private:
-	VkResult status = VK_EVENT_RESET; // guarded by mutex
+	VkResult status = VK_EVENT_RESET;  // guarded by mutex
 	std::mutex mutex;
 	std::condition_variable condition;
 };
 
-static inline Event* Cast(VkEvent object)
+static inline Event *Cast(VkEvent object)
 {
 	return Event::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_EVENT_HPP_
+#endif  // VK_EVENT_HPP_
diff --git a/src/Vulkan/VkFence.hpp b/src/Vulkan/VkFence.hpp
index 170798b..086eff4 100644
--- a/src/Vulkan/VkFence.hpp
+++ b/src/Vulkan/VkFence.hpp
@@ -27,10 +27,11 @@
 class Fence : public Object<Fence, VkFence>, public sw::TaskEvents
 {
 public:
-	Fence(const VkFenceCreateInfo* pCreateInfo, void* mem) :
-		event(marl::Event::Mode::Manual, (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) != 0) {}
+	Fence(const VkFenceCreateInfo *pCreateInfo, void *mem)
+	    : event(marl::Event::Mode::Manual, (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) != 0)
+	{}
 
-	static size_t ComputeRequiredAllocationSize(const VkFenceCreateInfo* pCreateInfo)
+	static size_t ComputeRequiredAllocationSize(const VkFenceCreateInfo *pCreateInfo)
 	{
 		return 0;
 	}
@@ -51,13 +52,13 @@
 		return VK_SUCCESS;
 	}
 
-    template <class CLOCK, class DURATION>
-	VkResult wait(const std::chrono::time_point<CLOCK, DURATION>& timeout)
+	template<class CLOCK, class DURATION>
+	VkResult wait(const std::chrono::time_point<CLOCK, DURATION> &timeout)
 	{
 		return event.wait_until(timeout) ? VK_SUCCESS : VK_TIMEOUT;
 	}
 
-	const marl::Event& getEvent() const { return event; }
+	const marl::Event &getEvent() const { return event; }
 
 	// TaskEvents compliance
 	void start() override
@@ -76,17 +77,17 @@
 	}
 
 private:
-	Fence(const Fence&) = delete;
+	Fence(const Fence &) = delete;
 
 	marl::WaitGroup wg;
 	const marl::Event event;
 };
 
-static inline Fence* Cast(VkFence object)
+static inline Fence *Cast(VkFence object)
 {
 	return Fence::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_FENCE_HPP_
+#endif  // VK_FENCE_HPP_
diff --git a/src/Vulkan/VkFormat.cpp b/src/Vulkan/VkFormat.cpp
index efb3929..7487543 100644
--- a/src/Vulkan/VkFormat.cpp
+++ b/src/Vulkan/VkFormat.cpp
@@ -22,29 +22,29 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R4G4_UNORM_PACK8:
-	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B5G6R5_UNORM_PACK16:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8B8_UNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16B16_UNORM:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-		return true;
-	default:
-		return false;
+		case VK_FORMAT_R4G4_UNORM_PACK8:
+		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8B8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16B16_UNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+			return true;
+		default:
+			return false;
 	}
 }
 
@@ -52,21 +52,21 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8B8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_B8G8R8A8_SNORM:
-	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16B16_SNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-		return true;
-	default:
-		return false;
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_B8G8R8A8_SNORM:
+		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16B16_SNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+			return true;
+		default:
+			return false;
 	}
 }
 
@@ -74,30 +74,30 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8B8_SINT:
-	case VK_FORMAT_B8G8R8_SINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_SINT:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16B16_SINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R64_SINT:
-	case VK_FORMAT_R64G64_SINT:
-	case VK_FORMAT_R64G64B64_SINT:
-	case VK_FORMAT_R64G64B64A64_SINT:
-		return true;
-	default:
-		return false;
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8_SINT:
+		case VK_FORMAT_B8G8R8_SINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_SINT:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16B16_SINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R64_SINT:
+		case VK_FORMAT_R64G64_SINT:
+		case VK_FORMAT_R64G64B64_SINT:
+		case VK_FORMAT_R64G64B64A64_SINT:
+			return true;
+		default:
+			return false;
 	}
 }
 
@@ -105,31 +105,31 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8B8_UINT:
-	case VK_FORMAT_B8G8R8_UINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_B8G8R8A8_UINT:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16B16_UINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R64_UINT:
-	case VK_FORMAT_R64G64_UINT:
-	case VK_FORMAT_R64G64B64_UINT:
-	case VK_FORMAT_R64G64B64A64_UINT:
-	case VK_FORMAT_S8_UINT:
-		return true;
-	default:
-		return false;
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8_UINT:
+		case VK_FORMAT_B8G8R8_UINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_B8G8R8A8_UINT:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16B16_UINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R64_UINT:
+		case VK_FORMAT_R64G64_UINT:
+		case VK_FORMAT_R64G64B64_UINT:
+		case VK_FORMAT_R64G64B64A64_UINT:
+		case VK_FORMAT_S8_UINT:
+			return true;
+		default:
+			return false;
 	}
 }
 
@@ -150,15 +150,15 @@
 	// YCbCr formats
 	switch(format)
 	{
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT;
-		break;
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
-		break;
-	default:
-		ASSERT(!isYcbcrFormat());
-		break;
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+			aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT;
+			break;
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
+			break;
+		default:
+			ASSERT(!isYcbcrFormat());
+			break;
 	}
 
 	// Anything else is "color".
@@ -170,88 +170,88 @@
 {
 	switch(aspect)
 	{
-	case VK_IMAGE_ASPECT_COLOR_BIT:
-	case (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT):
-	case (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT):
-	case (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT):
-		return format;
+		case VK_IMAGE_ASPECT_COLOR_BIT:
+		case(VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT):
+		case(VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT):
+		case(VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT):
+			return format;
 
-	case VK_IMAGE_ASPECT_DEPTH_BIT:
-		switch(format)
-		{
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-			return VK_FORMAT_D16_UNORM;
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-			return VK_FORMAT_X8_D24_UNORM_PACK32;
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			return VK_FORMAT_D32_SFLOAT;
-		default:
-			UNSUPPORTED("format %d", int(format));
+		case VK_IMAGE_ASPECT_DEPTH_BIT:
+			switch(format)
+			{
+				case VK_FORMAT_D16_UNORM:
+				case VK_FORMAT_D16_UNORM_S8_UINT:
+					return VK_FORMAT_D16_UNORM;
+				case VK_FORMAT_D24_UNORM_S8_UINT:
+					return VK_FORMAT_X8_D24_UNORM_PACK32;
+				case VK_FORMAT_D32_SFLOAT:
+				case VK_FORMAT_D32_SFLOAT_S8_UINT:
+					return VK_FORMAT_D32_SFLOAT;
+				default:
+					UNSUPPORTED("format %d", int(format));
+					break;
+			}
 			break;
-		}
-		break;
 
-	case VK_IMAGE_ASPECT_STENCIL_BIT:
-		switch(format)
-		{
-		case VK_FORMAT_S8_UINT:
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			return VK_FORMAT_S8_UINT;
-		default:
-			UNSUPPORTED("format %d", int(format));
+		case VK_IMAGE_ASPECT_STENCIL_BIT:
+			switch(format)
+			{
+				case VK_FORMAT_S8_UINT:
+				case VK_FORMAT_D16_UNORM_S8_UINT:
+				case VK_FORMAT_D24_UNORM_S8_UINT:
+				case VK_FORMAT_D32_SFLOAT_S8_UINT:
+					return VK_FORMAT_S8_UINT;
+				default:
+					UNSUPPORTED("format %d", int(format));
+					break;
+			}
 			break;
-		}
-		break;
 
-	// YCbCr formats
-	// Vulkan 1.1 section 32.1.1. Compatible formats of planes of multi-planar formats
-	case VK_IMAGE_ASPECT_PLANE_0_BIT:
-		switch(format)
-		{
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			return VK_FORMAT_R8_UNORM;
-		default:
-			UNSUPPORTED("format %d", int(format));
+		// YCbCr formats
+		// Vulkan 1.1 section 32.1.1. Compatible formats of planes of multi-planar formats
+		case VK_IMAGE_ASPECT_PLANE_0_BIT:
+			switch(format)
+			{
+				case VK_FORMAT_R8_UNORM:
+				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+				case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+					return VK_FORMAT_R8_UNORM;
+				default:
+					UNSUPPORTED("format %d", int(format));
+					break;
+			}
 			break;
-		}
-		break;
 
-	case VK_IMAGE_ASPECT_PLANE_1_BIT:
-		switch(format)
-		{
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-			return VK_FORMAT_R8_UNORM;
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			return VK_FORMAT_R8G8_UNORM;
-		default:
-			UNSUPPORTED("format %d", int(format));
+		case VK_IMAGE_ASPECT_PLANE_1_BIT:
+			switch(format)
+			{
+				case VK_FORMAT_R8_UNORM:
+				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+					return VK_FORMAT_R8_UNORM;
+				case VK_FORMAT_R8G8_UNORM:
+				case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+					return VK_FORMAT_R8G8_UNORM;
+				default:
+					UNSUPPORTED("format %d", int(format));
+					break;
+			}
 			break;
-		}
-		break;
 
-	case VK_IMAGE_ASPECT_PLANE_2_BIT:
-		switch(format)
-		{
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-			return VK_FORMAT_R8_UNORM;
-		default:
-			UNSUPPORTED("format %d", int(format));
+		case VK_IMAGE_ASPECT_PLANE_2_BIT:
+			switch(format)
+			{
+				case VK_FORMAT_R8_UNORM:
+				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+					return VK_FORMAT_R8_UNORM;
+				default:
+					UNSUPPORTED("format %d", int(format));
+					break;
+			}
 			break;
-		}
-		break;
 
-	default:
-		UNSUPPORTED("aspect %x", int(aspect));
-		break;
+		default:
+			UNSUPPORTED("aspect %x", int(aspect));
+			break;
 	}
 
 	return format;
@@ -261,16 +261,16 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_D16_UNORM_S8_UINT:
-	case VK_FORMAT_D24_UNORM_S8_UINT:
-	case VK_FORMAT_S8_UINT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		return true;
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_X8_D24_UNORM_PACK32:
-	case VK_FORMAT_D32_SFLOAT:
-	default:
-		return false;
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_S8_UINT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			return true;
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_X8_D24_UNORM_PACK32:
+		case VK_FORMAT_D32_SFLOAT:
+		default:
+			return false;
 	}
 }
 
@@ -278,16 +278,16 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_D16_UNORM_S8_UINT:
-	case VK_FORMAT_X8_D24_UNORM_PACK32:
-	case VK_FORMAT_D24_UNORM_S8_UINT:
-	case VK_FORMAT_D32_SFLOAT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		return true;
-	case VK_FORMAT_S8_UINT:
-	default:
-		return false;
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+		case VK_FORMAT_X8_D24_UNORM_PACK32:
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			return true;
+		case VK_FORMAT_S8_UINT:
+		default:
+			return false;
 	}
 }
 
@@ -295,23 +295,23 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R8G8B8_SRGB:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-		return true;
-	default:
-		return false;
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R8G8B8_SRGB:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+			return true;
+		default:
+			return false;
 	}
 }
 
@@ -319,142 +319,142 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R4G4_UNORM_PACK8:
-	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B5G6R5_UNORM_PACK16:
-	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_USCALED:
-	case VK_FORMAT_R8_SSCALED:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_USCALED:
-	case VK_FORMAT_R8G8_SSCALED:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R8G8B8_UNORM:
-	case VK_FORMAT_R8G8B8_SNORM:
-	case VK_FORMAT_R8G8B8_USCALED:
-	case VK_FORMAT_R8G8B8_SSCALED:
-	case VK_FORMAT_R8G8B8_UINT:
-	case VK_FORMAT_R8G8B8_SINT:
-	case VK_FORMAT_R8G8B8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8_SNORM:
-	case VK_FORMAT_B8G8R8_USCALED:
-	case VK_FORMAT_B8G8R8_SSCALED:
-	case VK_FORMAT_B8G8R8_UINT:
-	case VK_FORMAT_B8G8R8_SINT:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_USCALED:
-	case VK_FORMAT_R8G8B8A8_SSCALED:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_SNORM:
-	case VK_FORMAT_B8G8R8A8_USCALED:
-	case VK_FORMAT_B8G8R8A8_SSCALED:
-	case VK_FORMAT_B8G8R8A8_UINT:
-	case VK_FORMAT_B8G8R8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_USCALED:
-	case VK_FORMAT_R16_SSCALED:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_USCALED:
-	case VK_FORMAT_R16G16_SSCALED:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16B16_UNORM:
-	case VK_FORMAT_R16G16B16_SNORM:
-	case VK_FORMAT_R16G16B16_USCALED:
-	case VK_FORMAT_R16G16B16_SSCALED:
-	case VK_FORMAT_R16G16B16_UINT:
-	case VK_FORMAT_R16G16B16_SINT:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_USCALED:
-	case VK_FORMAT_R16G16B16A16_SSCALED:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R64_UINT:
-	case VK_FORMAT_R64_SINT:
-	case VK_FORMAT_R64G64_UINT:
-	case VK_FORMAT_R64G64_SINT:
-	case VK_FORMAT_R64G64B64_UINT:
-	case VK_FORMAT_R64G64B64_SINT:
-	case VK_FORMAT_R64G64B64A64_UINT:
-	case VK_FORMAT_R64G64B64A64_SINT:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_X8_D24_UNORM_PACK32:
-	case VK_FORMAT_S8_UINT:
-	case VK_FORMAT_D16_UNORM_S8_UINT:
-	case VK_FORMAT_D24_UNORM_S8_UINT:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		return false;
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R64_SFLOAT:
-	case VK_FORMAT_R64G64_SFLOAT:
-	case VK_FORMAT_R64G64B64_SFLOAT:
-	case VK_FORMAT_R64G64B64A64_SFLOAT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_D32_SFLOAT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		return true;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_R4G4_UNORM_PACK8:
+		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_USCALED:
+		case VK_FORMAT_R8_SSCALED:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_USCALED:
+		case VK_FORMAT_R8G8_SSCALED:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R8G8B8_UNORM:
+		case VK_FORMAT_R8G8B8_SNORM:
+		case VK_FORMAT_R8G8B8_USCALED:
+		case VK_FORMAT_R8G8B8_SSCALED:
+		case VK_FORMAT_R8G8B8_UINT:
+		case VK_FORMAT_R8G8B8_SINT:
+		case VK_FORMAT_R8G8B8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8_SNORM:
+		case VK_FORMAT_B8G8R8_USCALED:
+		case VK_FORMAT_B8G8R8_SSCALED:
+		case VK_FORMAT_B8G8R8_UINT:
+		case VK_FORMAT_B8G8R8_SINT:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_USCALED:
+		case VK_FORMAT_R8G8B8A8_SSCALED:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SNORM:
+		case VK_FORMAT_B8G8R8A8_USCALED:
+		case VK_FORMAT_B8G8R8A8_SSCALED:
+		case VK_FORMAT_B8G8R8A8_UINT:
+		case VK_FORMAT_B8G8R8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_USCALED:
+		case VK_FORMAT_R16_SSCALED:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_USCALED:
+		case VK_FORMAT_R16G16_SSCALED:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16B16_UNORM:
+		case VK_FORMAT_R16G16B16_SNORM:
+		case VK_FORMAT_R16G16B16_USCALED:
+		case VK_FORMAT_R16G16B16_SSCALED:
+		case VK_FORMAT_R16G16B16_UINT:
+		case VK_FORMAT_R16G16B16_SINT:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_USCALED:
+		case VK_FORMAT_R16G16B16A16_SSCALED:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R64_UINT:
+		case VK_FORMAT_R64_SINT:
+		case VK_FORMAT_R64G64_UINT:
+		case VK_FORMAT_R64G64_SINT:
+		case VK_FORMAT_R64G64B64_UINT:
+		case VK_FORMAT_R64G64B64_SINT:
+		case VK_FORMAT_R64G64B64A64_UINT:
+		case VK_FORMAT_R64G64B64A64_SINT:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_X8_D24_UNORM_PACK32:
+		case VK_FORMAT_S8_UINT:
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			return false;
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R64_SFLOAT:
+		case VK_FORMAT_R64G64_SFLOAT:
+		case VK_FORMAT_R64G64B64_SFLOAT:
+		case VK_FORMAT_R64G64B64A64_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			return true;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -464,11 +464,11 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		return true;
-	default:
-		return false;
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			return true;
+		default:
+			return false;
 	}
 }
 
@@ -476,63 +476,63 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-	case VK_FORMAT_BC7_UNORM_BLOCK:
-	case VK_FORMAT_BC7_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		return true;
-	default:
-		return false;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		case VK_FORMAT_BC7_UNORM_BLOCK:
+		case VK_FORMAT_BC7_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			return true;
+		default:
+			return false;
 	}
 }
 
@@ -541,45 +541,45 @@
 	// Note: our ETC2 decoder decompresses the 64 bit RGB compressed texel data to B8G8R8
 	switch(format)
 	{
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		return VK_FORMAT_B8G8R8A8_UNORM;
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		return VK_FORMAT_B8G8R8A8_SRGB;
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		return VK_FORMAT_B8G8R8A8_UNORM;
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		return VK_FORMAT_B8G8R8A8_SRGB;
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		return VK_FORMAT_R16_UNORM;
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		return VK_FORMAT_R16_SNORM;
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		return VK_FORMAT_R16G16_UNORM;
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		return VK_FORMAT_R16G16_SNORM;
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-		return VK_FORMAT_B8G8R8A8_UNORM;
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-		return VK_FORMAT_B8G8R8A8_SRGB;
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-		return VK_FORMAT_R8_UNORM;
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-		return VK_FORMAT_R8_SNORM;
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-		return VK_FORMAT_R8G8_UNORM;
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-		return VK_FORMAT_R8G8_SNORM;
-	default:
-		UNIMPLEMENTED("format: %d", int(format));
-		return VK_FORMAT_UNDEFINED;
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+			return VK_FORMAT_B8G8R8A8_UNORM;
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+			return VK_FORMAT_B8G8R8A8_SRGB;
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+			return VK_FORMAT_B8G8R8A8_UNORM;
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+			return VK_FORMAT_B8G8R8A8_SRGB;
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+			return VK_FORMAT_R16_UNORM;
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+			return VK_FORMAT_R16_SNORM;
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+			return VK_FORMAT_R16G16_UNORM;
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+			return VK_FORMAT_R16G16_SNORM;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+			return VK_FORMAT_B8G8R8A8_UNORM;
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+			return VK_FORMAT_B8G8R8A8_SRGB;
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+			return VK_FORMAT_R8_UNORM;
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+			return VK_FORMAT_R8_SNORM;
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+			return VK_FORMAT_R8G8_UNORM;
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+			return VK_FORMAT_R8G8_SNORM;
+		default:
+			UNIMPLEMENTED("format: %d", int(format));
+			return VK_FORMAT_UNDEFINED;
 	}
 }
 
@@ -597,305 +597,305 @@
 	// Return a single format per group of compatible formats, for quick comparison
 	switch(format)
 	{
-	// 8 - bit, Block size 1 byte, 1 texel / block
-	case VK_FORMAT_R4G4_UNORM_PACK8:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_USCALED:
-	case VK_FORMAT_R8_SSCALED:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_SRGB:
-		return VK_FORMAT_R8_UNORM;
+		// 8 - bit, Block size 1 byte, 1 texel / block
+		case VK_FORMAT_R4G4_UNORM_PACK8:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_USCALED:
+		case VK_FORMAT_R8_SSCALED:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_SRGB:
+			return VK_FORMAT_R8_UNORM;
 
-	// 16 - bit, Block size 2 bytes, 1 texel / block
-	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B5G6R5_UNORM_PACK16:
-	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_USCALED:
-	case VK_FORMAT_R8G8_SSCALED:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_USCALED:
-	case VK_FORMAT_R16_SSCALED:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R10X6_UNORM_PACK16:
-	case VK_FORMAT_R12X4_UNORM_PACK16:
-		return VK_FORMAT_R16_UNORM;
+		// 16 - bit, Block size 2 bytes, 1 texel / block
+		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_USCALED:
+		case VK_FORMAT_R8G8_SSCALED:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_USCALED:
+		case VK_FORMAT_R16_SSCALED:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R10X6_UNORM_PACK16:
+		case VK_FORMAT_R12X4_UNORM_PACK16:
+			return VK_FORMAT_R16_UNORM;
 
-	// 24 - bit, Block size 3 bytes, 1 texel / block
-	case VK_FORMAT_R8G8B8_UNORM:
-	case VK_FORMAT_R8G8B8_SNORM:
-	case VK_FORMAT_R8G8B8_USCALED:
-	case VK_FORMAT_R8G8B8_SSCALED:
-	case VK_FORMAT_R8G8B8_UINT:
-	case VK_FORMAT_R8G8B8_SINT:
-	case VK_FORMAT_R8G8B8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8_SNORM:
-	case VK_FORMAT_B8G8R8_USCALED:
-	case VK_FORMAT_B8G8R8_SSCALED:
-	case VK_FORMAT_B8G8R8_UINT:
-	case VK_FORMAT_B8G8R8_SINT:
-	case VK_FORMAT_B8G8R8_SRGB:
-		return VK_FORMAT_R8G8B8_UNORM;
+		// 24 - bit, Block size 3 bytes, 1 texel / block
+		case VK_FORMAT_R8G8B8_UNORM:
+		case VK_FORMAT_R8G8B8_SNORM:
+		case VK_FORMAT_R8G8B8_USCALED:
+		case VK_FORMAT_R8G8B8_SSCALED:
+		case VK_FORMAT_R8G8B8_UINT:
+		case VK_FORMAT_R8G8B8_SINT:
+		case VK_FORMAT_R8G8B8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8_SNORM:
+		case VK_FORMAT_B8G8R8_USCALED:
+		case VK_FORMAT_B8G8R8_SSCALED:
+		case VK_FORMAT_B8G8R8_UINT:
+		case VK_FORMAT_B8G8R8_SINT:
+		case VK_FORMAT_B8G8R8_SRGB:
+			return VK_FORMAT_R8G8B8_UNORM;
 
-	// 32 - bit, Block size 4 bytes, 1 texel / block
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_USCALED:
-	case VK_FORMAT_R8G8B8A8_SSCALED:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_SNORM:
-	case VK_FORMAT_B8G8R8A8_USCALED:
-	case VK_FORMAT_B8G8R8A8_SSCALED:
-	case VK_FORMAT_B8G8R8A8_UINT:
-	case VK_FORMAT_B8G8R8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_USCALED:
-	case VK_FORMAT_R16G16_SSCALED:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
-	case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:
-		return VK_FORMAT_R8G8B8A8_UNORM;
+		// 32 - bit, Block size 4 bytes, 1 texel / block
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_USCALED:
+		case VK_FORMAT_R8G8B8A8_SSCALED:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SNORM:
+		case VK_FORMAT_B8G8R8A8_USCALED:
+		case VK_FORMAT_B8G8R8A8_SSCALED:
+		case VK_FORMAT_B8G8R8A8_UINT:
+		case VK_FORMAT_B8G8R8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_USCALED:
+		case VK_FORMAT_R16G16_SSCALED:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
+		case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:
+			return VK_FORMAT_R8G8B8A8_UNORM;
 
-	// 48 - bit, Block size 6 bytes, 1 texel / block
-	case VK_FORMAT_R16G16B16_UNORM:
-	case VK_FORMAT_R16G16B16_SNORM:
-	case VK_FORMAT_R16G16B16_USCALED:
-	case VK_FORMAT_R16G16B16_SSCALED:
-	case VK_FORMAT_R16G16B16_UINT:
-	case VK_FORMAT_R16G16B16_SINT:
-	case VK_FORMAT_R16G16B16_SFLOAT:
-		return VK_FORMAT_R16G16B16_UNORM;
+		// 48 - bit, Block size 6 bytes, 1 texel / block
+		case VK_FORMAT_R16G16B16_UNORM:
+		case VK_FORMAT_R16G16B16_SNORM:
+		case VK_FORMAT_R16G16B16_USCALED:
+		case VK_FORMAT_R16G16B16_SSCALED:
+		case VK_FORMAT_R16G16B16_UINT:
+		case VK_FORMAT_R16G16B16_SINT:
+		case VK_FORMAT_R16G16B16_SFLOAT:
+			return VK_FORMAT_R16G16B16_UNORM;
 
-	// 64 - bit, Block size 8 bytes, 1 texel / block
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_USCALED:
-	case VK_FORMAT_R16G16B16A16_SSCALED:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R64_UINT:
-	case VK_FORMAT_R64_SINT:
-	case VK_FORMAT_R64_SFLOAT:
-		return VK_FORMAT_R16G16B16A16_UNORM;
+		// 64 - bit, Block size 8 bytes, 1 texel / block
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_USCALED:
+		case VK_FORMAT_R16G16B16A16_SSCALED:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R64_UINT:
+		case VK_FORMAT_R64_SINT:
+		case VK_FORMAT_R64_SFLOAT:
+			return VK_FORMAT_R16G16B16A16_UNORM;
 
-	// 96 - bit, Block size 12 bytes, 1 texel / block
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-		return VK_FORMAT_R32G32B32_UINT;
+		// 96 - bit, Block size 12 bytes, 1 texel / block
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+			return VK_FORMAT_R32G32B32_UINT;
 
-	// 128 - bit, Block size 16 bytes, 1 texel / block
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R64G64_UINT:
-	case VK_FORMAT_R64G64_SINT:
-	case VK_FORMAT_R64G64_SFLOAT:
-		return VK_FORMAT_R32G32B32A32_UINT;
+		// 128 - bit, Block size 16 bytes, 1 texel / block
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R64G64_UINT:
+		case VK_FORMAT_R64G64_SINT:
+		case VK_FORMAT_R64G64_SFLOAT:
+			return VK_FORMAT_R32G32B32A32_UINT;
 
-	// 192 - bit, Block size 24 bytes, 1 texel / block
-	case VK_FORMAT_R64G64B64_UINT:
-	case VK_FORMAT_R64G64B64_SINT:
-	case VK_FORMAT_R64G64B64_SFLOAT:
-		return VK_FORMAT_R64G64B64_UINT;
+		// 192 - bit, Block size 24 bytes, 1 texel / block
+		case VK_FORMAT_R64G64B64_UINT:
+		case VK_FORMAT_R64G64B64_SINT:
+		case VK_FORMAT_R64G64B64_SFLOAT:
+			return VK_FORMAT_R64G64B64_UINT;
 
-	// 256 - bit, Block size 32 bytes, 1 texel / block
-	case VK_FORMAT_R64G64B64A64_UINT:
-	case VK_FORMAT_R64G64B64A64_SINT:
-	case VK_FORMAT_R64G64B64A64_SFLOAT:
-		return VK_FORMAT_R64G64B64A64_UINT;
+		// 256 - bit, Block size 32 bytes, 1 texel / block
+		case VK_FORMAT_R64G64B64A64_UINT:
+		case VK_FORMAT_R64G64B64A64_SINT:
+		case VK_FORMAT_R64G64B64A64_SFLOAT:
+			return VK_FORMAT_R64G64B64A64_UINT;
 
-	// BC1_RGB(64 bit), Block size 8 bytes, 16 texels / block
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
+		// BC1_RGB(64 bit), Block size 8 bytes, 16 texels / block
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+			return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
 
-	// BC1_RGBA(64 bit), Block size 8 bytes, 16 texels / block
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+		// BC1_RGBA(64 bit), Block size 8 bytes, 16 texels / block
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+			return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
 
-	// BC2(128 bit), Block size 16 bytes, 16 texels / block
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-		return VK_FORMAT_BC2_UNORM_BLOCK;
+		// BC2(128 bit), Block size 16 bytes, 16 texels / block
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+			return VK_FORMAT_BC2_UNORM_BLOCK;
 
-	// BC3(128 bit), Block size 16 bytes, 16 texels / block
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-		return VK_FORMAT_BC3_UNORM_BLOCK;
+		// BC3(128 bit), Block size 16 bytes, 16 texels / block
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+			return VK_FORMAT_BC3_UNORM_BLOCK;
 
-	// BC4(64 bit), Block size 8 bytes, 16 texels / block
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-		return VK_FORMAT_BC4_UNORM_BLOCK;
+		// BC4(64 bit), Block size 8 bytes, 16 texels / block
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+			return VK_FORMAT_BC4_UNORM_BLOCK;
 
-	// BC5(128 bit), Block size 16 bytes, 16 texels / block
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-		return VK_FORMAT_BC5_UNORM_BLOCK;
+		// BC5(128 bit), Block size 16 bytes, 16 texels / block
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+			return VK_FORMAT_BC5_UNORM_BLOCK;
 
-	// BC6H(128 bit), Block size 16 bytes, 16 texels / block
-	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		return VK_FORMAT_BC6H_UFLOAT_BLOCK;
+		// BC6H(128 bit), Block size 16 bytes, 16 texels / block
+		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+			return VK_FORMAT_BC6H_UFLOAT_BLOCK;
 
-	// BC7(128 bit), Block size 16 bytes, 16 texels / block
-	case VK_FORMAT_BC7_UNORM_BLOCK:
-	case VK_FORMAT_BC7_SRGB_BLOCK:
-		return VK_FORMAT_BC7_UNORM_BLOCK;
+		// BC7(128 bit), Block size 16 bytes, 16 texels / block
+		case VK_FORMAT_BC7_UNORM_BLOCK:
+		case VK_FORMAT_BC7_SRGB_BLOCK:
+			return VK_FORMAT_BC7_UNORM_BLOCK;
 
-	// ETC2_RGB(64 bit), Block size 8 bytes, 16 texels / block
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+		// ETC2_RGB(64 bit), Block size 8 bytes, 16 texels / block
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+			return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
 
-	// ETC2_RGBA(64 bit), Block size 8 bytes, 16 texels / block
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+		// ETC2_RGBA(64 bit), Block size 8 bytes, 16 texels / block
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+			return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
 
-	// ETC2_EAC_RGBA(64 bit), Block size 8 bytes, 16 texels / block
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+		// ETC2_EAC_RGBA(64 bit), Block size 8 bytes, 16 texels / block
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+			return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
 
-	// EAC_R(64 bit), Block size 8 bytes, 16 texels / block
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		return VK_FORMAT_EAC_R11_UNORM_BLOCK;
+		// EAC_R(64 bit), Block size 8 bytes, 16 texels / block
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+			return VK_FORMAT_EAC_R11_UNORM_BLOCK;
 
-	// EAC_RG(128 bit), Block size 16 bytes, 16 texels / block
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
+		// EAC_RG(128 bit), Block size 16 bytes, 16 texels / block
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+			return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
 
-	// ASTC_4x4(128 bit), Block size 16 bytes, 16 texels / block
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+		// ASTC_4x4(128 bit), Block size 16 bytes, 16 texels / block
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
 
-	// ASTC_5x4(128 bit), Block size 16 bytes, 20 texels / block
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+		// ASTC_5x4(128 bit), Block size 16 bytes, 20 texels / block
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
 
-	// ASTC_5x5(128 bit), Block size 16 bytes, 25 texels / block
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+		// ASTC_5x5(128 bit), Block size 16 bytes, 25 texels / block
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
 
-	// ASTC_6x5(128 bit), Block size 16 bytes, 30 texels / block
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+		// ASTC_6x5(128 bit), Block size 16 bytes, 30 texels / block
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
 
-	// ASTC_6x6(128 bit), Block size 16 bytes, 36 texels / block
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+		// ASTC_6x6(128 bit), Block size 16 bytes, 36 texels / block
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
 
-	// ASTC_8x5(128 bit), Block size 16 bytes, 40 texels / block
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+		// ASTC_8x5(128 bit), Block size 16 bytes, 40 texels / block
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
 
-	// ASTC_8x6(128 bit), Block size 16 bytes, 48 texels / block
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+		// ASTC_8x6(128 bit), Block size 16 bytes, 48 texels / block
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
 
-	// ASTC_8x8(128 bit), Block size 16 bytes, 64 texels / block
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+		// ASTC_8x8(128 bit), Block size 16 bytes, 64 texels / block
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
 
-	// ASTC_10x5(128 bit), Block size 16 bytes, 50 texels / block
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+		// ASTC_10x5(128 bit), Block size 16 bytes, 50 texels / block
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
 
-	// ASTC_10x6(128 bit), Block size 16 bytes, 60 texels / block
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+		// ASTC_10x6(128 bit), Block size 16 bytes, 60 texels / block
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
 
-	// ASTC_10x8(128 bit), Block size 16 bytes, 80 texels / block
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+		// ASTC_10x8(128 bit), Block size 16 bytes, 80 texels / block
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
 
-	// ASTC_10x10(128 bit), Block size 16 bytes, 100 texels / block
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+		// ASTC_10x10(128 bit), Block size 16 bytes, 100 texels / block
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
 
-	// ASTC_12x10(128 bit), Block size 16 bytes, 120 texels / block
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+		// ASTC_12x10(128 bit), Block size 16 bytes, 120 texels / block
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
 
-	// ASTC_12x12(128 bit), Block size 16 bytes, 144 texels / block
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+		// ASTC_12x12(128 bit), Block size 16 bytes, 144 texels / block
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
 
-	// All other formats are only compatible with themselves
-	default:
-		return format;
+		// All other formats are only compatible with themselves
+		default:
+			return format;
 	}
 }
 
-bool Format::isCompatible(const Format& other) const
+bool Format::isCompatible(const Format &other) const
 {
 	return compatibleFormat() == other.compatibleFormat();
 }
@@ -904,68 +904,68 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-	case VK_FORMAT_BC7_UNORM_BLOCK:
-	case VK_FORMAT_BC7_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		return 4;
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		return 5;
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		return 6;
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		return 8;
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		return 10;
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		return 12;
-	default:
-		return 1;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		case VK_FORMAT_BC7_UNORM_BLOCK:
+		case VK_FORMAT_BC7_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+			return 4;
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+			return 5;
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+			return 6;
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+			return 8;
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+			return 10;
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			return 12;
+		default:
+			return 1;
 	}
 }
 
@@ -973,68 +973,68 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-	case VK_FORMAT_BC7_UNORM_BLOCK:
-	case VK_FORMAT_BC7_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		return 4;
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		return 5;
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		return 6;
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		return 8;
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		return 10;
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		return 12;
-	default:
-		return 1;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		case VK_FORMAT_BC7_UNORM_BLOCK:
+		case VK_FORMAT_BC7_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+			return 4;
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+			return 5;
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+			return 6;
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+			return 8;
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+			return 10;
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			return 12;
+		default:
+			return 1;
 	}
 }
 
@@ -1042,64 +1042,64 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		return 8;
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-	case VK_FORMAT_BC7_UNORM_BLOCK:
-	case VK_FORMAT_BC7_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		return 16;
-	default:
-		return bytes();
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+			return 8;
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		case VK_FORMAT_BC7_UNORM_BLOCK:
+		case VK_FORMAT_BC7_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			return 16;
+		default:
+			return bytes();
 	}
 }
 
@@ -1107,166 +1107,166 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_USCALED:
-	case VK_FORMAT_R8_SSCALED:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_USCALED:
-	case VK_FORMAT_R16_SSCALED:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R64_UINT:
-	case VK_FORMAT_R64_SINT:
-	case VK_FORMAT_R64_SFLOAT:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_X8_D24_UNORM_PACK32:
-	case VK_FORMAT_D32_SFLOAT:
-	case VK_FORMAT_S8_UINT:
-	case VK_FORMAT_D16_UNORM_S8_UINT:
-	case VK_FORMAT_D24_UNORM_S8_UINT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		return 1;
-	case VK_FORMAT_R4G4_UNORM_PACK8:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_USCALED:
-	case VK_FORMAT_R8G8_SSCALED:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_USCALED:
-	case VK_FORMAT_R16G16_SSCALED:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R64G64_UINT:
-	case VK_FORMAT_R64G64_SINT:
-	case VK_FORMAT_R64G64_SFLOAT:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		return 2;
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B5G6R5_UNORM_PACK16:
-	case VK_FORMAT_R8G8B8_UNORM:
-	case VK_FORMAT_R8G8B8_SNORM:
-	case VK_FORMAT_R8G8B8_USCALED:
-	case VK_FORMAT_R8G8B8_SSCALED:
-	case VK_FORMAT_R8G8B8_UINT:
-	case VK_FORMAT_R8G8B8_SINT:
-	case VK_FORMAT_R8G8B8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8_SNORM:
-	case VK_FORMAT_B8G8R8_USCALED:
-	case VK_FORMAT_B8G8R8_SSCALED:
-	case VK_FORMAT_B8G8R8_UINT:
-	case VK_FORMAT_B8G8R8_SINT:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R16G16B16_UNORM:
-	case VK_FORMAT_R16G16B16_SNORM:
-	case VK_FORMAT_R16G16B16_USCALED:
-	case VK_FORMAT_R16G16B16_SSCALED:
-	case VK_FORMAT_R16G16B16_UINT:
-	case VK_FORMAT_R16G16B16_SINT:
-	case VK_FORMAT_R16G16B16_SFLOAT:
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-	case VK_FORMAT_R64G64B64_UINT:
-	case VK_FORMAT_R64G64B64_SINT:
-	case VK_FORMAT_R64G64B64_SFLOAT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		return 3;
-	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_USCALED:
-	case VK_FORMAT_R8G8B8A8_SSCALED:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_SNORM:
-	case VK_FORMAT_B8G8R8A8_USCALED:
-	case VK_FORMAT_B8G8R8A8_SSCALED:
-	case VK_FORMAT_B8G8R8A8_UINT:
-	case VK_FORMAT_B8G8R8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_USCALED:
-	case VK_FORMAT_R16G16B16A16_SSCALED:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R64G64B64A64_UINT:
-	case VK_FORMAT_R64G64B64A64_SINT:
-	case VK_FORMAT_R64G64B64A64_SFLOAT:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		return 4;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_USCALED:
+		case VK_FORMAT_R8_SSCALED:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_USCALED:
+		case VK_FORMAT_R16_SSCALED:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R64_UINT:
+		case VK_FORMAT_R64_SINT:
+		case VK_FORMAT_R64_SFLOAT:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_X8_D24_UNORM_PACK32:
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_S8_UINT:
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+			return 1;
+		case VK_FORMAT_R4G4_UNORM_PACK8:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_USCALED:
+		case VK_FORMAT_R8G8_SSCALED:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_USCALED:
+		case VK_FORMAT_R16G16_SSCALED:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R64G64_UINT:
+		case VK_FORMAT_R64G64_SINT:
+		case VK_FORMAT_R64G64_SFLOAT:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+			return 2;
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		case VK_FORMAT_R8G8B8_UNORM:
+		case VK_FORMAT_R8G8B8_SNORM:
+		case VK_FORMAT_R8G8B8_USCALED:
+		case VK_FORMAT_R8G8B8_SSCALED:
+		case VK_FORMAT_R8G8B8_UINT:
+		case VK_FORMAT_R8G8B8_SINT:
+		case VK_FORMAT_R8G8B8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8_SNORM:
+		case VK_FORMAT_B8G8R8_USCALED:
+		case VK_FORMAT_B8G8R8_SSCALED:
+		case VK_FORMAT_B8G8R8_UINT:
+		case VK_FORMAT_B8G8R8_SINT:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R16G16B16_UNORM:
+		case VK_FORMAT_R16G16B16_SNORM:
+		case VK_FORMAT_R16G16B16_USCALED:
+		case VK_FORMAT_R16G16B16_SSCALED:
+		case VK_FORMAT_R16G16B16_UINT:
+		case VK_FORMAT_R16G16B16_SINT:
+		case VK_FORMAT_R16G16B16_SFLOAT:
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+		case VK_FORMAT_R64G64B64_UINT:
+		case VK_FORMAT_R64G64B64_SINT:
+		case VK_FORMAT_R64G64B64_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+			return 3;
+		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_USCALED:
+		case VK_FORMAT_R8G8B8A8_SSCALED:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SNORM:
+		case VK_FORMAT_B8G8R8A8_USCALED:
+		case VK_FORMAT_B8G8R8A8_SSCALED:
+		case VK_FORMAT_B8G8R8A8_UINT:
+		case VK_FORMAT_B8G8R8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_USCALED:
+		case VK_FORMAT_R16G16B16A16_SSCALED:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R64G64B64A64_UINT:
+		case VK_FORMAT_R64G64B64A64_SINT:
+		case VK_FORMAT_R64G64B64A64_SFLOAT:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+			return 4;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return 1;
@@ -1276,170 +1276,170 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_UNDEFINED:
-	case VK_FORMAT_R4G4_UNORM_PACK8:
-	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B5G6R5_UNORM_PACK16:
-	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_USCALED:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R8G8B8_UNORM:
-	case VK_FORMAT_R8G8B8_USCALED:
-	case VK_FORMAT_R8G8B8_UINT:
-	case VK_FORMAT_R8G8B8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8_USCALED:
-	case VK_FORMAT_B8G8R8_UINT:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_USCALED:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_USCALED:
-	case VK_FORMAT_B8G8R8A8_UINT:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_USCALED:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_USCALED:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16B16_UNORM:
-	case VK_FORMAT_R16G16B16_USCALED:
-	case VK_FORMAT_R16G16B16_UINT:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_USCALED:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R64_UINT:
-	case VK_FORMAT_R64G64_UINT:
-	case VK_FORMAT_R64G64B64_UINT:
-	case VK_FORMAT_R64G64B64A64_UINT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_X8_D24_UNORM_PACK32:
-	case VK_FORMAT_S8_UINT:
-	case VK_FORMAT_D16_UNORM_S8_UINT:
-	case VK_FORMAT_D24_UNORM_S8_UINT:
-	case VK_FORMAT_D32_SFLOAT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		return true;
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SSCALED:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_SNORM:
-	case VK_FORMAT_B8G8R8A8_SSCALED:
-	case VK_FORMAT_B8G8R8A8_SINT:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_SSCALED:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R64G64B64A64_SINT:
-	case VK_FORMAT_R64G64B64A64_SFLOAT:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-	// YCbCr formats treated as signed because VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY
-	// expects chroma components to be in range [-0.5, 0.5]
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		return false;
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_USCALED:
-	case VK_FORMAT_R8_SSCALED:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_SSCALED:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R64_SINT:
-	case VK_FORMAT_R64_SFLOAT:
-		return component >= 1;
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_SSCALED:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_SSCALED:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R64G64_SINT:
-	case VK_FORMAT_R64G64_SFLOAT:
-		return component >= 2;
-	case VK_FORMAT_R8G8B8_SNORM:
-	case VK_FORMAT_R8G8B8_SSCALED:
-	case VK_FORMAT_R8G8B8_SINT:
-	case VK_FORMAT_B8G8R8_SNORM:
-	case VK_FORMAT_B8G8R8_SSCALED:
-	case VK_FORMAT_B8G8R8_SINT:
-	case VK_FORMAT_R16G16B16_SNORM:
-	case VK_FORMAT_R16G16B16_SSCALED:
-	case VK_FORMAT_R16G16B16_SINT:
-	case VK_FORMAT_R16G16B16_SFLOAT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-	case VK_FORMAT_R64G64B64_SINT:
-	case VK_FORMAT_R64G64B64_SFLOAT:
-		return component >= 3;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_UNDEFINED:
+		case VK_FORMAT_R4G4_UNORM_PACK8:
+		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_USCALED:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R8G8B8_UNORM:
+		case VK_FORMAT_R8G8B8_USCALED:
+		case VK_FORMAT_R8G8B8_UINT:
+		case VK_FORMAT_R8G8B8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8_USCALED:
+		case VK_FORMAT_B8G8R8_UINT:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_USCALED:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_USCALED:
+		case VK_FORMAT_B8G8R8A8_UINT:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_USCALED:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_USCALED:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16B16_UNORM:
+		case VK_FORMAT_R16G16B16_USCALED:
+		case VK_FORMAT_R16G16B16_UINT:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_USCALED:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R64_UINT:
+		case VK_FORMAT_R64G64_UINT:
+		case VK_FORMAT_R64G64B64_UINT:
+		case VK_FORMAT_R64G64B64A64_UINT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_X8_D24_UNORM_PACK32:
+		case VK_FORMAT_S8_UINT:
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+			return true;
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SSCALED:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_SNORM:
+		case VK_FORMAT_B8G8R8A8_SSCALED:
+		case VK_FORMAT_B8G8R8A8_SINT:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_SSCALED:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R64G64B64A64_SINT:
+		case VK_FORMAT_R64G64B64A64_SFLOAT:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		// YCbCr formats treated as signed because VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY
+		// expects chroma components to be in range [-0.5, 0.5]
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			return false;
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_USCALED:
+		case VK_FORMAT_R8_SSCALED:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_SSCALED:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R64_SINT:
+		case VK_FORMAT_R64_SFLOAT:
+			return component >= 1;
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_SSCALED:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_SSCALED:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R64G64_SINT:
+		case VK_FORMAT_R64G64_SFLOAT:
+			return component >= 2;
+		case VK_FORMAT_R8G8B8_SNORM:
+		case VK_FORMAT_R8G8B8_SSCALED:
+		case VK_FORMAT_R8G8B8_SINT:
+		case VK_FORMAT_B8G8R8_SNORM:
+		case VK_FORMAT_B8G8R8_SSCALED:
+		case VK_FORMAT_B8G8R8_SINT:
+		case VK_FORMAT_R16G16B16_SNORM:
+		case VK_FORMAT_R16G16B16_SSCALED:
+		case VK_FORMAT_R16G16B16_SINT:
+		case VK_FORMAT_R16G16B16_SFLOAT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+		case VK_FORMAT_R64G64B64_SINT:
+		case VK_FORMAT_R64G64B64_SFLOAT:
+			return component >= 3;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -1449,222 +1449,223 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_UNDEFINED:
-		return 0;
-	case VK_FORMAT_R4G4_UNORM_PACK8:
-		return 1;
-	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B5G6R5_UNORM_PACK16:
-	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		return 2;
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_USCALED:
-	case VK_FORMAT_R8_SSCALED:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_SRGB:
-		return 1;
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_USCALED:
-	case VK_FORMAT_R8G8_SSCALED:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_SRGB:
-		return 2;
-	case VK_FORMAT_R8G8B8_UNORM:
-	case VK_FORMAT_R8G8B8_SNORM:
-	case VK_FORMAT_R8G8B8_USCALED:
-	case VK_FORMAT_R8G8B8_SSCALED:
-	case VK_FORMAT_R8G8B8_UINT:
-	case VK_FORMAT_R8G8B8_SINT:
-	case VK_FORMAT_R8G8B8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8_SNORM:
-	case VK_FORMAT_B8G8R8_USCALED:
-	case VK_FORMAT_B8G8R8_SSCALED:
-	case VK_FORMAT_B8G8R8_UINT:
-	case VK_FORMAT_B8G8R8_SINT:
-	case VK_FORMAT_B8G8R8_SRGB:
-		return 3;
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_USCALED:
-	case VK_FORMAT_R8G8B8A8_SSCALED:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_SNORM:
-	case VK_FORMAT_B8G8R8A8_USCALED:
-	case VK_FORMAT_B8G8R8A8_SSCALED:
-	case VK_FORMAT_B8G8R8A8_UINT:
-	case VK_FORMAT_B8G8R8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		return 4;
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_USCALED:
-	case VK_FORMAT_R16_SSCALED:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_SFLOAT:
-		return 2;
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_USCALED:
-	case VK_FORMAT_R16G16_SSCALED:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-		return 4;
-	case VK_FORMAT_R16G16B16_UNORM:
-	case VK_FORMAT_R16G16B16_SNORM:
-	case VK_FORMAT_R16G16B16_USCALED:
-	case VK_FORMAT_R16G16B16_SSCALED:
-	case VK_FORMAT_R16G16B16_UINT:
-	case VK_FORMAT_R16G16B16_SINT:
-	case VK_FORMAT_R16G16B16_SFLOAT:
-		return 6;
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_USCALED:
-	case VK_FORMAT_R16G16B16A16_SSCALED:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-		return 8;
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-		return 4;
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-		return 8;
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-		return 12;
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-		return 16;
-	case VK_FORMAT_R64_UINT:
-	case VK_FORMAT_R64_SINT:
-	case VK_FORMAT_R64_SFLOAT:
-		return 8;
-	case VK_FORMAT_R64G64_UINT:
-	case VK_FORMAT_R64G64_SINT:
-	case VK_FORMAT_R64G64_SFLOAT:
-		return 16;
-	case VK_FORMAT_R64G64B64_UINT:
-	case VK_FORMAT_R64G64B64_SINT:
-	case VK_FORMAT_R64G64B64_SFLOAT:
-		return 24;
-	case VK_FORMAT_R64G64B64A64_UINT:
-	case VK_FORMAT_R64G64B64A64_SINT:
-	case VK_FORMAT_R64G64B64A64_SFLOAT:
-		return 32;
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:   return 4;
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:    return 4;
-	case VK_FORMAT_D16_UNORM:                 return 2;
-	case VK_FORMAT_X8_D24_UNORM_PACK32:       return 4;
-	case VK_FORMAT_D32_SFLOAT:                return 4;
-	case VK_FORMAT_S8_UINT:                   return 1;
-	case VK_FORMAT_D16_UNORM_S8_UINT:         return 2; // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
-	case VK_FORMAT_D24_UNORM_S8_UINT:         return 4; // Combined depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:        return 4; // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
-	// Note: Compressed formats don't return bytes per pixel,
-	//       since these would be fractional. The returned value
-	//       is bytes per pixel for 1 column, so 2 for 64 bit 4x4
-	//       blocks and 4 for 128 bit 4x4 blocks.
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:       return 2;
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:        return 2;
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:      return 2;
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:       return 2;
-	case VK_FORMAT_BC2_UNORM_BLOCK:           return 4;
-	case VK_FORMAT_BC2_SRGB_BLOCK:            return 4;
-	case VK_FORMAT_BC3_UNORM_BLOCK:           return 4;
-	case VK_FORMAT_BC3_SRGB_BLOCK:            return 4;
-	case VK_FORMAT_BC4_UNORM_BLOCK:           return 2;
-	case VK_FORMAT_BC4_SNORM_BLOCK:           return 2;
-	case VK_FORMAT_BC5_UNORM_BLOCK:           return 4;
-	case VK_FORMAT_BC5_SNORM_BLOCK:           return 4;
-	case VK_FORMAT_BC6H_UFLOAT_BLOCK:         return 4;
-	case VK_FORMAT_BC6H_SFLOAT_BLOCK:         return 4;
-	case VK_FORMAT_BC7_UNORM_BLOCK:           return 4;
-	case VK_FORMAT_BC7_SRGB_BLOCK:            return 4;
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:   return 2;
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:    return 2;
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return 2;
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:  return 2;
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return 4;
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:  return 4;
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:       return 2;
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:       return 2;
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:    return 4;
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:    return 4;
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:      return 4;
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:       return 4;
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		UNSUPPORTED("format: %d", int(format));
-		return 0;
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		// TODO: ASSERT to ensure this is only called per-aspect?
-		return 1;  // Y plane only
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_UNDEFINED:
+			return 0;
+		case VK_FORMAT_R4G4_UNORM_PACK8:
+			return 1;
+		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+			return 2;
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_USCALED:
+		case VK_FORMAT_R8_SSCALED:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_SRGB:
+			return 1;
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_USCALED:
+		case VK_FORMAT_R8G8_SSCALED:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_SRGB:
+			return 2;
+		case VK_FORMAT_R8G8B8_UNORM:
+		case VK_FORMAT_R8G8B8_SNORM:
+		case VK_FORMAT_R8G8B8_USCALED:
+		case VK_FORMAT_R8G8B8_SSCALED:
+		case VK_FORMAT_R8G8B8_UINT:
+		case VK_FORMAT_R8G8B8_SINT:
+		case VK_FORMAT_R8G8B8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8_SNORM:
+		case VK_FORMAT_B8G8R8_USCALED:
+		case VK_FORMAT_B8G8R8_SSCALED:
+		case VK_FORMAT_B8G8R8_UINT:
+		case VK_FORMAT_B8G8R8_SINT:
+		case VK_FORMAT_B8G8R8_SRGB:
+			return 3;
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_USCALED:
+		case VK_FORMAT_R8G8B8A8_SSCALED:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SNORM:
+		case VK_FORMAT_B8G8R8A8_USCALED:
+		case VK_FORMAT_B8G8R8A8_SSCALED:
+		case VK_FORMAT_B8G8R8A8_UINT:
+		case VK_FORMAT_B8G8R8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+			return 4;
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_USCALED:
+		case VK_FORMAT_R16_SSCALED:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_SFLOAT:
+			return 2;
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_USCALED:
+		case VK_FORMAT_R16G16_SSCALED:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+			return 4;
+		case VK_FORMAT_R16G16B16_UNORM:
+		case VK_FORMAT_R16G16B16_SNORM:
+		case VK_FORMAT_R16G16B16_USCALED:
+		case VK_FORMAT_R16G16B16_SSCALED:
+		case VK_FORMAT_R16G16B16_UINT:
+		case VK_FORMAT_R16G16B16_SINT:
+		case VK_FORMAT_R16G16B16_SFLOAT:
+			return 6;
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_USCALED:
+		case VK_FORMAT_R16G16B16A16_SSCALED:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+			return 8;
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+			return 4;
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+			return 8;
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+			return 12;
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+			return 16;
+		case VK_FORMAT_R64_UINT:
+		case VK_FORMAT_R64_SINT:
+		case VK_FORMAT_R64_SFLOAT:
+			return 8;
+		case VK_FORMAT_R64G64_UINT:
+		case VK_FORMAT_R64G64_SINT:
+		case VK_FORMAT_R64G64_SFLOAT:
+			return 16;
+		case VK_FORMAT_R64G64B64_UINT:
+		case VK_FORMAT_R64G64B64_SINT:
+		case VK_FORMAT_R64G64B64_SFLOAT:
+			return 24;
+		case VK_FORMAT_R64G64B64A64_UINT:
+		case VK_FORMAT_R64G64B64A64_SINT:
+		case VK_FORMAT_R64G64B64A64_SFLOAT:
+			return 32;
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return 4;
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4;
+		case VK_FORMAT_D16_UNORM: return 2;
+		case VK_FORMAT_X8_D24_UNORM_PACK32: return 4;
+		case VK_FORMAT_D32_SFLOAT: return 4;
+		case VK_FORMAT_S8_UINT: return 1;
+		case VK_FORMAT_D16_UNORM_S8_UINT: return 2;  // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
+		case VK_FORMAT_D24_UNORM_S8_UINT: return 4;  // Combined depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			return 4;  // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
+		// Note: Compressed formats don't return bytes per pixel,
+		//       since these would be fractional. The returned value
+		//       is bytes per pixel for 1 column, so 2 for 64 bit 4x4
+		//       blocks and 4 for 128 bit 4x4 blocks.
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return 2;
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK: return 2;
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return 2;
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return 2;
+		case VK_FORMAT_BC2_UNORM_BLOCK: return 4;
+		case VK_FORMAT_BC2_SRGB_BLOCK: return 4;
+		case VK_FORMAT_BC3_UNORM_BLOCK: return 4;
+		case VK_FORMAT_BC3_SRGB_BLOCK: return 4;
+		case VK_FORMAT_BC4_UNORM_BLOCK: return 2;
+		case VK_FORMAT_BC4_SNORM_BLOCK: return 2;
+		case VK_FORMAT_BC5_UNORM_BLOCK: return 4;
+		case VK_FORMAT_BC5_SNORM_BLOCK: return 4;
+		case VK_FORMAT_BC6H_UFLOAT_BLOCK: return 4;
+		case VK_FORMAT_BC6H_SFLOAT_BLOCK: return 4;
+		case VK_FORMAT_BC7_UNORM_BLOCK: return 4;
+		case VK_FORMAT_BC7_SRGB_BLOCK: return 4;
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return 2;
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return 2;
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return 2;
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return 2;
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return 4;
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return 4;
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK: return 2;
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK: return 2;
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: return 4;
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: return 4;
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return 4;
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return 4;
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			UNSUPPORTED("format: %d", int(format));
+			return 0;
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			// TODO: ASSERT to ensure this is only called per-aspect?
+			return 1;  // Y plane only
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return 0;
@@ -1682,68 +1683,68 @@
 
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		return 8 * ((width + 3) / 4);    // 64 bit per 4x4 block, computed per 4 rows
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		return 16 * ((width + 3) / 4);    // 128 bit per 4x4 block, computed per 4 rows
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		return 16 * ((width + 4) / 5);
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		return 16 * ((width + 5) / 6);
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		return 16 * ((width + 7) / 8);
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		return 16 * ((width + 9) / 10);
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		return 16 * ((width + 11) / 12);
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		return sw::align<16>(width);  // Y plane only  // TODO: ASSERT to ensure this is only called per-aspect?
-	default:
-		return bytes() * width;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+			return 8 * ((width + 3) / 4);  // 64 bit per 4x4 block, computed per 4 rows
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+			return 16 * ((width + 3) / 4);  // 128 bit per 4x4 block, computed per 4 rows
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+			return 16 * ((width + 4) / 5);
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+			return 16 * ((width + 5) / 6);
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+			return 16 * ((width + 7) / 8);
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+			return 16 * ((width + 9) / 10);
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			return 16 * ((width + 11) / 12);
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			return sw::align<16>(width);  // Y plane only  // TODO: ASSERT to ensure this is only called per-aspect?
+		default:
+			return bytes() * width;
 	}
 }
 
@@ -1759,68 +1760,68 @@
 
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		return pitchB(width, border, target) * ((height + 3) / 4);   // Pitch computed per 4 rows
-	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		return pitchB(width, border, target) * ((height + 4) / 5);   // Pitch computed per 5 rows
-	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		return pitchB(width, border, target) * ((height + 5) / 6);   // Pitch computed per 6 rows
-	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		return pitchB(width, border, target) * ((height + 7) / 8);   // Pitch computed per 8 rows
-	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		return pitchB(width, border, target) * ((height + 9) / 10);   // Pitch computed per 10 rows
-	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-		return pitchB(width, border, target) * ((height + 11) / 12);   // Pitch computed per 12 rows
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		// "Images in this format must be defined with a width and height that is a multiple of two."
-		return pitchB(width, border, target) * (height + height / 2);  // U and V planes are 1/4 size of Y plane.
-	default:
-		return pitchB(width, border, target) * height;   // Pitch computed per row
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+			return pitchB(width, border, target) * ((height + 3) / 4);  // Pitch computed per 4 rows
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+			return pitchB(width, border, target) * ((height + 4) / 5);  // Pitch computed per 5 rows
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+			return pitchB(width, border, target) * ((height + 5) / 6);  // Pitch computed per 6 rows
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+			return pitchB(width, border, target) * ((height + 7) / 8);  // Pitch computed per 8 rows
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+			return pitchB(width, border, target) * ((height + 9) / 10);  // Pitch computed per 10 rows
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			return pitchB(width, border, target) * ((height + 11) / 12);  // Pitch computed per 12 rows
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			// "Images in this format must be defined with a width and height that is a multiple of two."
+			return pitchB(width, border, target) * (height + height / 2);  // U and V planes are 1/4 size of Y plane.
+		default:
+			return pitchB(width, border, target) * height;  // Pitch computed per row
 	}
 }
 
@@ -1833,137 +1834,137 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R4G4_UNORM_PACK8:
-	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		return sw::float4(0xF, 0xF, 0xF, 0xF);
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8B8_UNORM:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R8G8B8_SRGB:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-		return sw::float4(0xFF, 0xFF, 0xFF, 0xFF);
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8B8_SNORM:
-	case VK_FORMAT_B8G8R8_SNORM:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_B8G8R8A8_SNORM:
-		return sw::float4(0x7F, 0x7F, 0x7F, 0x7F);
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16B16_UNORM:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-		return sw::float4(0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16B16_SNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-		return sw::float4(0x7FFF, 0x7FFF, 0x7FFF, 0x7FFF);
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8B8_SINT:
-	case VK_FORMAT_R8G8B8_UINT:
-	case VK_FORMAT_B8G8R8_SINT:
-	case VK_FORMAT_B8G8R8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_B8G8R8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_UINT:
-	case VK_FORMAT_R8_USCALED:
-	case VK_FORMAT_R8G8_USCALED:
-	case VK_FORMAT_R8G8B8_USCALED:
-	case VK_FORMAT_B8G8R8_USCALED:
-	case VK_FORMAT_R8G8B8A8_USCALED:
-	case VK_FORMAT_B8G8R8A8_USCALED:
-	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-	case VK_FORMAT_R8_SSCALED:
-	case VK_FORMAT_R8G8_SSCALED:
-	case VK_FORMAT_R8G8B8_SSCALED:
-	case VK_FORMAT_B8G8R8_SSCALED:
-	case VK_FORMAT_R8G8B8A8_SSCALED:
-	case VK_FORMAT_B8G8R8A8_SSCALED:
-	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16_SSCALED:
-	case VK_FORMAT_R16G16_SSCALED:
-	case VK_FORMAT_R16G16B16_SSCALED:
-	case VK_FORMAT_R16G16B16A16_SSCALED:
-	case VK_FORMAT_R16_USCALED:
-	case VK_FORMAT_R16G16_USCALED:
-	case VK_FORMAT_R16G16B16_USCALED:
-	case VK_FORMAT_R16G16B16A16_USCALED:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R16G16B16_SFLOAT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
-	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		return sw::float4(0x1F, 0x1F, 0x1F, 0x01);
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B5G6R5_UNORM_PACK16:
-		return sw::float4(0x1F, 0x3F, 0x1F, 1.0f);
-	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		return sw::float4(0x3FF, 0x3FF, 0x3FF, 0x03);
-	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		return sw::float4(0x1FF, 0x1FF, 0x1FF, 0x01);
-	case VK_FORMAT_D16_UNORM:
-		return sw::float4(0xFFFF, 0.0f, 0.0f, 0.0f);
-	case VK_FORMAT_D24_UNORM_S8_UINT:
-	case VK_FORMAT_X8_D24_UNORM_PACK32:
-		return sw::float4(0xFFFFFF, 0.0f, 0.0f, 0.0f);
-	case VK_FORMAT_D32_SFLOAT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-	case VK_FORMAT_S8_UINT:
-		return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
-	default:
-		UNSUPPORTED("format %d", int(format));
-		break;
+		case VK_FORMAT_R4G4_UNORM_PACK8:
+		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+			return sw::float4(0xF, 0xF, 0xF, 0xF);
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8B8_UNORM:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R8G8B8_SRGB:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+			return sw::float4(0xFF, 0xFF, 0xFF, 0xFF);
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8_SNORM:
+		case VK_FORMAT_B8G8R8_SNORM:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_B8G8R8A8_SNORM:
+			return sw::float4(0x7F, 0x7F, 0x7F, 0x7F);
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16B16_UNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+			return sw::float4(0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16B16_SNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+			return sw::float4(0x7FFF, 0x7FFF, 0x7FFF, 0x7FFF);
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8_SINT:
+		case VK_FORMAT_R8G8B8_UINT:
+		case VK_FORMAT_B8G8R8_SINT:
+		case VK_FORMAT_B8G8R8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_B8G8R8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_UINT:
+		case VK_FORMAT_R8_USCALED:
+		case VK_FORMAT_R8G8_USCALED:
+		case VK_FORMAT_R8G8B8_USCALED:
+		case VK_FORMAT_B8G8R8_USCALED:
+		case VK_FORMAT_R8G8B8A8_USCALED:
+		case VK_FORMAT_B8G8R8A8_USCALED:
+		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		case VK_FORMAT_R8_SSCALED:
+		case VK_FORMAT_R8G8_SSCALED:
+		case VK_FORMAT_R8G8B8_SSCALED:
+		case VK_FORMAT_B8G8R8_SSCALED:
+		case VK_FORMAT_R8G8B8A8_SSCALED:
+		case VK_FORMAT_B8G8R8A8_SSCALED:
+		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16_SSCALED:
+		case VK_FORMAT_R16G16_SSCALED:
+		case VK_FORMAT_R16G16B16_SSCALED:
+		case VK_FORMAT_R16G16B16A16_SSCALED:
+		case VK_FORMAT_R16_USCALED:
+		case VK_FORMAT_R16G16_USCALED:
+		case VK_FORMAT_R16G16B16_USCALED:
+		case VK_FORMAT_R16G16B16A16_USCALED:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R16G16B16_SFLOAT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+			return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
+		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+			return sw::float4(0x1F, 0x1F, 0x1F, 0x01);
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B5G6R5_UNORM_PACK16:
+			return sw::float4(0x1F, 0x3F, 0x1F, 1.0f);
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+			return sw::float4(0x3FF, 0x3FF, 0x3FF, 0x03);
+		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+			return sw::float4(0x1FF, 0x1FF, 0x1FF, 0x01);
+		case VK_FORMAT_D16_UNORM:
+			return sw::float4(0xFFFF, 0.0f, 0.0f, 0.0f);
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_X8_D24_UNORM_PACK32:
+			return sw::float4(0xFFFFFF, 0.0f, 0.0f, 0.0f);
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		case VK_FORMAT_S8_UINT:
+			return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
+		default:
+			UNSUPPORTED("format %d", int(format));
+			break;
 	}
 
 	return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
@@ -1973,63 +1974,63 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		return true;
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_S8_UINT:
-		return false;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+			return true;
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_S8_UINT:
+			return false;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2039,63 +2040,63 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_S8_UINT:
-		return true;
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_D16_UNORM:
-		return false;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_S8_UINT:
+			return true;
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_D16_UNORM:
+			return false;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2105,63 +2106,63 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_S8_UINT:
-		return false;
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_D16_UNORM:
-		return true;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_S8_UINT:
+			return false;
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_D16_UNORM:
+			return true;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2171,63 +2172,63 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_S8_UINT:
-		return false;
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-		return true;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_S8_UINT:
+			return false;
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+			return true;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2237,64 +2238,64 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SFLOAT:
-		return component < 1;
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-		return component < 2;
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_B8G8R8_UNORM:
-	case VK_FORMAT_B8G8R8_SRGB:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		return component < 3;
-	case VK_FORMAT_D32_SFLOAT:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_S8_UINT:
-		return false;
-	default:
-		UNIMPLEMENTED("Format: %d", int(format));
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SFLOAT:
+			return component < 1;
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+			return component < 2;
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B8G8R8_UNORM:
+		case VK_FORMAT_B8G8R8_SRGB:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+			return component < 3;
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_S8_UINT:
+			return false;
+		default:
+			UNIMPLEMENTED("Format: %d", int(format));
 	}
 
 	return false;
diff --git a/src/Vulkan/VkFramebuffer.cpp b/src/Vulkan/VkFramebuffer.cpp
index 96804c1..4c797e1 100644
--- a/src/Vulkan/VkFramebuffer.cpp
+++ b/src/Vulkan/VkFramebuffer.cpp
@@ -15,15 +15,15 @@
 #include "VkFramebuffer.hpp"
 #include "VkImageView.hpp"
 #include "VkRenderPass.hpp"
-#include <algorithm>
 #include <memory.h>
+#include <algorithm>
 
 namespace vk {
 
-Framebuffer::Framebuffer(const VkFramebufferCreateInfo* pCreateInfo, void* mem) :
-	attachmentCount(pCreateInfo->attachmentCount),
-	attachments(reinterpret_cast<ImageView**>(mem)),
-	extent{pCreateInfo->width, pCreateInfo->height, pCreateInfo->layers}
+Framebuffer::Framebuffer(const VkFramebufferCreateInfo *pCreateInfo, void *mem)
+    : attachmentCount(pCreateInfo->attachmentCount)
+    , attachments(reinterpret_cast<ImageView **>(mem))
+    , extent{ pCreateInfo->width, pCreateInfo->height, pCreateInfo->layers }
 {
 	for(uint32_t i = 0; i < attachmentCount; i++)
 	{
@@ -31,12 +31,12 @@
 	}
 }
 
-void Framebuffer::destroy(const VkAllocationCallbacks* pAllocator)
+void Framebuffer::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	vk::deallocate(attachments, pAllocator);
 }
 
-void Framebuffer::clear(const RenderPass* renderPass, uint32_t clearValueCount, const VkClearValue* pClearValues, const VkRect2D& renderArea)
+void Framebuffer::clear(const RenderPass *renderPass, uint32_t clearValueCount, const VkClearValue *pClearValues, const VkRect2D &renderArea)
 {
 	ASSERT(attachmentCount == renderPass->getAttachmentCount());
 
@@ -59,7 +59,7 @@
 		if(renderPass->isMultiView())
 		{
 			attachments[i]->clearWithLayerMask(pClearValues[i], aspectMask, renderArea,
-											   renderPass->getAttachmentViewMask(i));
+			                                   renderPass->getAttachmentViewMask(i));
 		}
 		else
 		{
@@ -68,7 +68,7 @@
 	}
 }
 
-void Framebuffer::clearAttachment(const RenderPass* renderPass, uint32_t subpassIndex, const VkClearAttachment& attachment, const VkClearRect& rect)
+void Framebuffer::clearAttachment(const RenderPass *renderPass, uint32_t subpassIndex, const VkClearAttachment &attachment, const VkClearRect &rect)
 {
 	VkSubpassDescription subpass = renderPass->getSubpass(subpassIndex);
 
@@ -85,7 +85,7 @@
 			if(renderPass->isMultiView())
 			{
 				imageView->clearWithLayerMask(attachment.clearValue, attachment.aspectMask, rect.rect,
-											  renderPass->getViewMask(subpassIndex));
+				                              renderPass->getViewMask(subpassIndex));
 			}
 			else
 			{
@@ -105,7 +105,7 @@
 			if(renderPass->isMultiView())
 			{
 				imageView->clearWithLayerMask(attachment.clearValue, attachment.aspectMask, rect.rect,
-											  renderPass->getViewMask(subpassIndex));
+				                              renderPass->getViewMask(subpassIndex));
 			}
 			else
 			{
@@ -120,9 +120,9 @@
 	return attachments[index];
 }
 
-void Framebuffer::resolve(const RenderPass* renderPass, uint32_t subpassIndex)
+void Framebuffer::resolve(const RenderPass *renderPass, uint32_t subpassIndex)
 {
-	auto const& subpass = renderPass->getSubpass(subpassIndex);
+	auto const &subpass = renderPass->getSubpass(subpassIndex);
 	if(subpass.pResolveAttachments)
 	{
 		for(uint32_t i = 0; i < subpass.colorAttachmentCount; i++)
@@ -134,7 +134,7 @@
 				if(renderPass->isMultiView())
 				{
 					imageView->resolveWithLayerMask(attachments[resolveAttachment],
-													renderPass->getViewMask(subpassIndex));
+					                                renderPass->getViewMask(subpassIndex));
 				}
 				else
 				{
@@ -145,9 +145,9 @@
 	}
 }
 
-size_t Framebuffer::ComputeRequiredAllocationSize(const VkFramebufferCreateInfo* pCreateInfo)
+size_t Framebuffer::ComputeRequiredAllocationSize(const VkFramebufferCreateInfo *pCreateInfo)
 {
-	return pCreateInfo->attachmentCount * sizeof(void*);
+	return pCreateInfo->attachmentCount * sizeof(void *);
 }
 
 }  // namespace vk
diff --git a/src/Vulkan/VkFramebuffer.hpp b/src/Vulkan/VkFramebuffer.hpp
index a4c50bb..e9839ee 100644
--- a/src/Vulkan/VkFramebuffer.hpp
+++ b/src/Vulkan/VkFramebuffer.hpp
@@ -25,29 +25,29 @@
 class Framebuffer : public Object<Framebuffer, VkFramebuffer>
 {
 public:
-	Framebuffer(const VkFramebufferCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	Framebuffer(const VkFramebufferCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	void clear(const RenderPass* renderPass, uint32_t clearValueCount, const VkClearValue* pClearValues, const VkRect2D& renderArea);
-	void clearAttachment(const RenderPass* renderPass, uint32_t subpassIndex, const VkClearAttachment& attachment, const VkClearRect& rect);
+	void clear(const RenderPass *renderPass, uint32_t clearValueCount, const VkClearValue *pClearValues, const VkRect2D &renderArea);
+	void clearAttachment(const RenderPass *renderPass, uint32_t subpassIndex, const VkClearAttachment &attachment, const VkClearRect &rect);
 
-	static size_t ComputeRequiredAllocationSize(const VkFramebufferCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkFramebufferCreateInfo *pCreateInfo);
 	ImageView *getAttachment(uint32_t index) const;
-	void resolve(const RenderPass* renderPass, uint32_t subpassIndex);
+	void resolve(const RenderPass *renderPass, uint32_t subpassIndex);
 
-	const VkExtent3D& getExtent() const { return extent; }
+	const VkExtent3D &getExtent() const { return extent; }
 
 private:
-	uint32_t         attachmentCount = 0;
-	ImageView**      attachments = nullptr;
+	uint32_t attachmentCount = 0;
+	ImageView **attachments = nullptr;
 	const VkExtent3D extent = {};
 };
 
-static inline Framebuffer* Cast(VkFramebuffer object)
+static inline Framebuffer *Cast(VkFramebuffer object)
 {
 	return Framebuffer::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_FRAMEBUFFER_HPP_
+#endif  // VK_FRAMEBUFFER_HPP_
diff --git a/src/Vulkan/VkGetProcAddress.cpp b/src/Vulkan/VkGetProcAddress.cpp
index fd870c7..fe7f282 100644
--- a/src/Vulkan/VkGetProcAddress.cpp
+++ b/src/Vulkan/VkGetProcAddress.cpp
@@ -15,21 +15,23 @@
 #include "VkGetProcAddress.h"
 #include "VkDevice.hpp"
 
-#include <unordered_map>
 #include <string>
+#include <unordered_map>
 #include <vector>
 
 #ifdef __ANDROID__
-#include <cerrno>
-#include <hardware/hwvulkan.h>
-#include <vulkan/vk_android_native_buffer.h>
+#	include <hardware/hwvulkan.h>
+#	include <vulkan/vk_android_native_buffer.h>
+#	include <cerrno>
 #endif
 
 namespace vk {
 
-#define MAKE_VULKAN_GLOBAL_ENTRY(aFunction) { #aFunction, reinterpret_cast<PFN_vkVoidFunction>(aFunction) }
-static const std::unordered_map<std::string, PFN_vkVoidFunction> globalFunctionPointers =
-{
+#define MAKE_VULKAN_GLOBAL_ENTRY(aFunction)                           \
+	{                                                                 \
+#		aFunction, reinterpret_cast < PFN_vkVoidFunction>(aFunction) \
+	}
+static const std::unordered_map<std::string, PFN_vkVoidFunction> globalFunctionPointers = {
 	MAKE_VULKAN_GLOBAL_ENTRY(vkCreateInstance),
 	MAKE_VULKAN_GLOBAL_ENTRY(vkEnumerateInstanceExtensionProperties),
 	MAKE_VULKAN_GLOBAL_ENTRY(vkEnumerateInstanceLayerProperties),
@@ -37,9 +39,11 @@
 };
 #undef MAKE_VULKAN_GLOBAL_ENTRY
 
-#define MAKE_VULKAN_INSTANCE_ENTRY(aFunction) { #aFunction, reinterpret_cast<PFN_vkVoidFunction>(aFunction) }
-static const std::unordered_map<std::string, PFN_vkVoidFunction> instanceFunctionPointers =
-{
+#define MAKE_VULKAN_INSTANCE_ENTRY(aFunction)                         \
+	{                                                                 \
+#		aFunction, reinterpret_cast < PFN_vkVoidFunction>(aFunction) \
+	}
+static const std::unordered_map<std::string, PFN_vkVoidFunction> instanceFunctionPointers = {
 	MAKE_VULKAN_INSTANCE_ENTRY(vkDestroyInstance),
 	MAKE_VULKAN_INSTANCE_ENTRY(vkEnumeratePhysicalDevices),
 	MAKE_VULKAN_INSTANCE_ENTRY(vkGetPhysicalDeviceFeatures),
@@ -99,12 +103,12 @@
 	MAKE_VULKAN_INSTANCE_ENTRY(vkGetPhysicalDeviceXlibPresentationSupportKHR),
 #endif
 #ifdef VK_USE_PLATFORM_MACOS_MVK
-    // VK_MVK_macos_surface
-    MAKE_VULKAN_INSTANCE_ENTRY(vkCreateMacOSSurfaceMVK),
+	// VK_MVK_macos_surface
+	MAKE_VULKAN_INSTANCE_ENTRY(vkCreateMacOSSurfaceMVK),
 #endif
 #ifdef VK_USE_PLATFORM_METAL_EXT
-    // VK_EXT_metal_surface
-    MAKE_VULKAN_INSTANCE_ENTRY(vkCreateMetalSurfaceEXT),
+	// VK_EXT_metal_surface
+	MAKE_VULKAN_INSTANCE_ENTRY(vkCreateMetalSurfaceEXT),
 #endif
 #ifdef VK_USE_PLATFORM_WIN32_KHR
 	// VK_KHR_win32_surface
@@ -114,9 +118,11 @@
 };
 #undef MAKE_VULKAN_INSTANCE_ENTRY
 
-#define MAKE_VULKAN_DEVICE_ENTRY(aFunction) { #aFunction, reinterpret_cast<PFN_vkVoidFunction>(aFunction) }
-static const std::unordered_map<std::string, PFN_vkVoidFunction> deviceFunctionPointers =
-{
+#define MAKE_VULKAN_DEVICE_ENTRY(aFunction)                           \
+	{                                                                 \
+#		aFunction, reinterpret_cast < PFN_vkVoidFunction>(aFunction) \
+	}
+static const std::unordered_map<std::string, PFN_vkVoidFunction> deviceFunctionPointers = {
 	MAKE_VULKAN_DEVICE_ENTRY(vkGetInstanceProcAddr),
 	MAKE_VULKAN_DEVICE_ENTRY(vkGetDeviceProcAddr),
 	MAKE_VULKAN_DEVICE_ENTRY(vkDestroyDevice),
@@ -263,126 +269,113 @@
 #endif
 };
 
-static const std::vector<std::pair<const char*, std::unordered_map<std::string, PFN_vkVoidFunction>>> deviceExtensionFunctionPointers =
-{
+static const std::vector<std::pair<const char *, std::unordered_map<std::string, PFN_vkVoidFunction>>> deviceExtensionFunctionPointers = {
 	// VK_KHR_descriptor_update_template
 	{
-		VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkCreateDescriptorUpdateTemplateKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkDestroyDescriptorUpdateTemplateKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkUpdateDescriptorSetWithTemplateKHR),
-		}
-	},
+	    VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkCreateDescriptorUpdateTemplateKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkDestroyDescriptorUpdateTemplateKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkUpdateDescriptorSetWithTemplateKHR),
+	    } },
 	// VK_KHR_device_group
 	{
-		VK_KHR_DEVICE_GROUP_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetDeviceGroupPeerMemoryFeaturesKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkCmdSetDeviceMaskKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkCmdDispatchBaseKHR),
-		}
-	},
+	    VK_KHR_DEVICE_GROUP_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetDeviceGroupPeerMemoryFeaturesKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkCmdSetDeviceMaskKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkCmdDispatchBaseKHR),
+	    } },
 	// VK_KHR_maintenance1
 	{
-		VK_KHR_MAINTENANCE1_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkTrimCommandPoolKHR),
-		}
-	},
+	    VK_KHR_MAINTENANCE1_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkTrimCommandPoolKHR),
+	    } },
 	// VK_KHR_sampler_ycbcr_conversion
 	{
-		VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkCreateSamplerYcbcrConversionKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkDestroySamplerYcbcrConversionKHR),
-		}
-	},
+	    VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkCreateSamplerYcbcrConversionKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkDestroySamplerYcbcrConversionKHR),
+	    } },
 	// VK_KHR_bind_memory2
 	{
-		VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkBindBufferMemory2KHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkBindImageMemory2KHR),
-		}
-	},
+	    VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkBindBufferMemory2KHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkBindImageMemory2KHR),
+	    } },
 	// VK_KHR_get_memory_requirements2
 	{
-		VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetImageMemoryRequirements2KHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetBufferMemoryRequirements2KHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetImageSparseMemoryRequirements2KHR),
-		}
-	},
+	    VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetImageMemoryRequirements2KHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetBufferMemoryRequirements2KHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetImageSparseMemoryRequirements2KHR),
+	    } },
 	// VK_KHR_maintenance3
 	{
-		VK_KHR_MAINTENANCE3_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetDescriptorSetLayoutSupportKHR),
-		}
-	},
+	    VK_KHR_MAINTENANCE3_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetDescriptorSetLayoutSupportKHR),
+	    } },
 	// VK_EXT_line_rasterization
 	{
-		VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkCmdSetLineStippleEXT),
-		}
-	},
+	    VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkCmdSetLineStippleEXT),
+	    } },
 #ifndef __ANDROID__
 	// VK_KHR_swapchain
 	{
-		VK_KHR_SWAPCHAIN_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkCreateSwapchainKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkDestroySwapchainKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetSwapchainImagesKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkAcquireNextImageKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkAcquireNextImage2KHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkQueuePresentKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetDeviceGroupPresentCapabilitiesKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetDeviceGroupSurfacePresentModesKHR),
-		}
-	},
+	    VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkCreateSwapchainKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkDestroySwapchainKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetSwapchainImagesKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkAcquireNextImageKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkAcquireNextImage2KHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkQueuePresentKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetDeviceGroupPresentCapabilitiesKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetDeviceGroupSurfacePresentModesKHR),
+	    } },
 #endif
 
 #if SWIFTSHADER_EXTERNAL_SEMAPHORE_OPAQUE_FD
 	// VK_KHR_external_semaphore_fd
 	{
-		VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetSemaphoreFdKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkImportSemaphoreFdKHR),
-		}
-	},
+	    VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetSemaphoreFdKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkImportSemaphoreFdKHR),
+	    } },
 #endif
 
 #if VK_USE_PLATFORM_FUCHSIA
 	// VK_FUCHSIA_external_semaphore
 	{
-		VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetSemaphoreZirconHandleFUCHSIA),
-			MAKE_VULKAN_DEVICE_ENTRY(vkImportSemaphoreZirconHandleFUCHSIA),
-		}
-	},
+	    VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetSemaphoreZirconHandleFUCHSIA),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkImportSemaphoreZirconHandleFUCHSIA),
+	    } },
 #endif
 
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
 	// VK_KHR_external_memory_fd
 	{
-		VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
-		{
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetMemoryFdKHR),
-			MAKE_VULKAN_DEVICE_ENTRY(vkGetMemoryFdPropertiesKHR),
-		}
-	},
+	    VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
+	    {
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetMemoryFdKHR),
+	        MAKE_VULKAN_DEVICE_ENTRY(vkGetMemoryFdPropertiesKHR),
+	    } },
 #endif
 };
 
 #undef MAKE_VULKAN_DEVICE_ENTRY
 
-PFN_vkVoidFunction GetInstanceProcAddr(Instance* instance, const char* pName)
+PFN_vkVoidFunction GetInstanceProcAddr(Instance *instance, const char *pName)
 {
 	auto globalFunction = globalFunctionPointers.find(std::string(pName));
 	if(globalFunction != globalFunctionPointers.end())
@@ -404,7 +397,7 @@
 			return deviceFunction->second;
 		}
 
-		for(const auto& deviceExtensionFunctions : deviceExtensionFunctionPointers)
+		for(const auto &deviceExtensionFunctions : deviceExtensionFunctionPointers)
 		{
 			deviceFunction = deviceExtensionFunctions.second.find(std::string(pName));
 			if(deviceFunction != deviceExtensionFunctions.second.end())
@@ -417,7 +410,7 @@
 	return nullptr;
 }
 
-PFN_vkVoidFunction GetDeviceProcAddr(Device* device, const char* pName)
+PFN_vkVoidFunction GetDeviceProcAddr(Device *device, const char *pName)
 {
 	auto deviceFunction = deviceFunctionPointers.find(std::string(pName));
 	if(deviceFunction != deviceFunctionPointers.end())
@@ -425,7 +418,7 @@
 		return deviceFunction->second;
 	}
 
-	for(const auto& deviceExtensionFunctions : deviceExtensionFunctionPointers)
+	for(const auto &deviceExtensionFunctions : deviceExtensionFunctionPointers)
 	{
 		if(device->hasExtension(deviceExtensionFunctions.first))
 		{
@@ -448,42 +441,43 @@
 
 namespace {
 
-	int CloseDevice(struct hw_device_t *) { return 0; }
-
-	hwvulkan_device_t hal_device = {
-		.common = {
-			.tag = HARDWARE_DEVICE_TAG,
-			.version = HWVULKAN_DEVICE_API_VERSION_0_1,
-			.module = &HAL_MODULE_INFO_SYM.common,
-			.close = CloseDevice,
-		},
-		.EnumerateInstanceExtensionProperties = vkEnumerateInstanceExtensionProperties,
-		.CreateInstance = vkCreateInstance,
-		.GetInstanceProcAddr = vkGetInstanceProcAddr,
-	};
-
-	int OpenDevice(const hw_module_t *module, const char *id, hw_device_t **device)
-	{
-		if(strcmp(id, HWVULKAN_DEVICE_0) != 0) return -ENOENT;
-		*device = &hal_device.common;
-		return 0;
-	}
-
-	hw_module_methods_t module_methods = { .open = OpenDevice };
-
+int CloseDevice(struct hw_device_t *)
+{
+	return 0;
 }
 
-extern "C" hwvulkan_module_t HAL_MODULE_INFO_SYM =
+hwvulkan_device_t hal_device = {
+	.common = {
+	    .tag = HARDWARE_DEVICE_TAG,
+	    .version = HWVULKAN_DEVICE_API_VERSION_0_1,
+	    .module = &HAL_MODULE_INFO_SYM.common,
+	    .close = CloseDevice,
+	},
+	.EnumerateInstanceExtensionProperties = vkEnumerateInstanceExtensionProperties,
+	.CreateInstance = vkCreateInstance,
+	.GetInstanceProcAddr = vkGetInstanceProcAddr,
+};
+
+int OpenDevice(const hw_module_t *module, const char *id, hw_device_t **device)
 {
-	.common =
-	{
-		.tag = HARDWARE_MODULE_TAG,
-		.module_api_version = HWVULKAN_MODULE_API_VERSION_0_1,
-		.hal_api_version = HARDWARE_HAL_API_VERSION,
-		.id = HWVULKAN_HARDWARE_MODULE_ID,
-		.name = "Swiftshader Pastel",
-		.author = "Google",
-		.methods = &module_methods,
+	if(strcmp(id, HWVULKAN_DEVICE_0) != 0) return -ENOENT;
+	*device = &hal_device.common;
+	return 0;
+}
+
+hw_module_methods_t module_methods = { .open = OpenDevice };
+
+}  // namespace
+
+extern "C" hwvulkan_module_t HAL_MODULE_INFO_SYM = {
+	.common = {
+	    .tag = HARDWARE_MODULE_TAG,
+	    .module_api_version = HWVULKAN_MODULE_API_VERSION_0_1,
+	    .hal_api_version = HARDWARE_HAL_API_VERSION,
+	    .id = HWVULKAN_HARDWARE_MODULE_ID,
+	    .name = "Swiftshader Pastel",
+	    .author = "Google",
+	    .methods = &module_methods,
 	}
 };
 
diff --git a/src/Vulkan/VkImage.cpp b/src/Vulkan/VkImage.cpp
index 9cde705..c605f4a 100644
--- a/src/Vulkan/VkImage.cpp
+++ b/src/Vulkan/VkImage.cpp
@@ -12,99 +12,99 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include "VkDeviceMemory.hpp"
+#include "VkImage.hpp"
 #include "VkBuffer.hpp"
 #include "VkDevice.hpp"
-#include "VkImage.hpp"
-#include "Device/Blitter.hpp"
+#include "VkDeviceMemory.hpp"
 #include "Device/BC_Decoder.hpp"
+#include "Device/Blitter.hpp"
 #include "Device/ETC_Decoder.hpp"
 #include <cstring>
 
 #ifdef __ANDROID__
-#include "System/GrallocAndroid.hpp"
+#	include "System/GrallocAndroid.hpp"
 #endif
 
 namespace {
 
-ETC_Decoder::InputType GetInputType(const vk::Format& format)
+ETC_Decoder::InputType GetInputType(const vk::Format &format)
 {
 	switch(format)
 	{
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		return ETC_Decoder::ETC_R_UNSIGNED;
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		return ETC_Decoder::ETC_R_SIGNED;
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		return ETC_Decoder::ETC_RG_UNSIGNED;
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		return ETC_Decoder::ETC_RG_SIGNED;
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		return ETC_Decoder::ETC_RGB;
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		return ETC_Decoder::ETC_RGB_PUNCHTHROUGH_ALPHA;
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		return ETC_Decoder::ETC_RGBA;
-	default:
-		UNIMPLEMENTED("format: %d", int(format));
-		return ETC_Decoder::ETC_RGBA;
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+			return ETC_Decoder::ETC_R_UNSIGNED;
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+			return ETC_Decoder::ETC_R_SIGNED;
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+			return ETC_Decoder::ETC_RG_UNSIGNED;
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+			return ETC_Decoder::ETC_RG_SIGNED;
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+			return ETC_Decoder::ETC_RGB;
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+			return ETC_Decoder::ETC_RGB_PUNCHTHROUGH_ALPHA;
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+			return ETC_Decoder::ETC_RGBA;
+		default:
+			UNIMPLEMENTED("format: %d", int(format));
+			return ETC_Decoder::ETC_RGBA;
 	}
 }
 
-int GetBCn(const vk::Format& format)
+int GetBCn(const vk::Format &format)
 {
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		return 1;
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-		return 2;
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-		return 3;
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-		return 4;
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-		return 5;
-	default:
-		UNIMPLEMENTED("format: %d", int(format));
-		return 0;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+			return 1;
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+			return 2;
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+			return 3;
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+			return 4;
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+			return 5;
+		default:
+			UNIMPLEMENTED("format: %d", int(format));
+			return 0;
 	}
 }
 
 // Returns true for BC1 if we have an RGB format, false for RGBA
 // Returns true for BC4 and BC5 if we have an unsigned format, false for signed
 // Ignored by BC2 and BC3
-bool GetNoAlphaOrUnsigned(const vk::Format& format)
+bool GetNoAlphaOrUnsigned(const vk::Format &format)
 {
 	switch(format)
 	{
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-		return true;
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-		return false;
-	default:
-		UNIMPLEMENTED("format: %d", int(format));
-		return false;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+			return true;
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+			return false;
+		default:
+			UNIMPLEMENTED("format: %d", int(format));
+			return false;
 	}
 }
 
@@ -112,37 +112,37 @@
 
 namespace vk {
 
-Image::Image(const VkImageCreateInfo* pCreateInfo, void* mem, Device *device) :
-	device(device),
-	flags(pCreateInfo->flags),
-	imageType(pCreateInfo->imageType),
-	format(pCreateInfo->format),
-	extent(pCreateInfo->extent),
-	mipLevels(pCreateInfo->mipLevels),
-	arrayLayers(pCreateInfo->arrayLayers),
-	samples(pCreateInfo->samples),
-	tiling(pCreateInfo->tiling),
-	usage(pCreateInfo->usage)
+Image::Image(const VkImageCreateInfo *pCreateInfo, void *mem, Device *device)
+    : device(device)
+    , flags(pCreateInfo->flags)
+    , imageType(pCreateInfo->imageType)
+    , format(pCreateInfo->format)
+    , extent(pCreateInfo->extent)
+    , mipLevels(pCreateInfo->mipLevels)
+    , arrayLayers(pCreateInfo->arrayLayers)
+    , samples(pCreateInfo->samples)
+    , tiling(pCreateInfo->tiling)
+    , usage(pCreateInfo->usage)
 {
 	if(format.isCompressed())
 	{
 		VkImageCreateInfo compressedImageCreateInfo = *pCreateInfo;
 		compressedImageCreateInfo.format = format.getDecompressedFormat();
-		decompressedImage = new (mem) Image(&compressedImageCreateInfo, nullptr, device);
+		decompressedImage = new(mem) Image(&compressedImageCreateInfo, nullptr, device);
 	}
 
-	const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	for(; nextInfo != nullptr; nextInfo = nextInfo->pNext)
 	{
 		if(nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO)
 		{
-			const auto* externalInfo = reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(nextInfo);
+			const auto *externalInfo = reinterpret_cast<const VkExternalMemoryImageCreateInfo *>(nextInfo);
 			supportedExternalMemoryHandleTypes = externalInfo->handleTypes;
 		}
 	}
 }
 
-void Image::destroy(const VkAllocationCallbacks* pAllocator)
+void Image::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	if(decompressedImage)
 	{
@@ -150,7 +150,7 @@
 	}
 }
 
-size_t Image::ComputeRequiredAllocationSize(const VkImageCreateInfo* pCreateInfo)
+size_t Image::ComputeRequiredAllocationSize(const VkImageCreateInfo *pCreateInfo)
 {
 	return Format(pCreateInfo->format).isCompressed() ? sizeof(Image) : 0;
 }
@@ -165,12 +165,12 @@
 	return memoryRequirements;
 }
 
-bool Image::canBindToMemory(DeviceMemory* pDeviceMemory) const
+bool Image::canBindToMemory(DeviceMemory *pDeviceMemory) const
 {
 	return pDeviceMemory->checkExternalMemoryHandleType(supportedExternalMemoryHandleTypes);
 }
 
-void Image::bind(DeviceMemory* pDeviceMemory, VkDeviceSize pMemoryOffset)
+void Image::bind(DeviceMemory *pDeviceMemory, VkDeviceSize pMemoryOffset)
 {
 	deviceMemory = pDeviceMemory;
 	memoryOffset = pMemoryOffset;
@@ -184,7 +184,7 @@
 #ifdef __ANDROID__
 VkResult Image::prepareForExternalUseANDROID() const
 {
-	void* nativeBuffer = nullptr;
+	void *nativeBuffer = nullptr;
 	VkExtent3D extent = getMipLevelExtent(VK_IMAGE_ASPECT_COLOR_BIT, 0);
 
 	if(GrallocModule::getInstance()->lock(backingMemory.nativeHandle, GRALLOC_USAGE_SW_WRITE_OFTEN, 0, 0, extent.width, extent.height, &nativeBuffer) != 0)
@@ -201,8 +201,8 @@
 	int bufferRowBytes = backingMemory.stride * getFormat().bytes();
 	ASSERT(imageRowBytes <= bufferRowBytes);
 
-	uint8_t* srcBuffer = static_cast<uint8_t*>(deviceMemory->getOffsetPointer(0));
-	uint8_t* dstBuffer = static_cast<uint8_t*>(nativeBuffer);
+	uint8_t *srcBuffer = static_cast<uint8_t *>(deviceMemory->getOffsetPointer(0));
+	uint8_t *dstBuffer = static_cast<uint8_t *>(nativeBuffer);
 	for(uint32_t i = 0; i < extent.height; i++)
 	{
 		memcpy(dstBuffer + (i * bufferRowBytes), srcBuffer + (i * imageRowBytes), imageRowBytes);
@@ -222,15 +222,15 @@
 }
 #endif
 
-void Image::getSubresourceLayout(const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const
+void Image::getSubresourceLayout(const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout) const
 {
 	// By spec, aspectMask has a single bit set.
 	if(!((pSubresource->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
-	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
-	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
-	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
-	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
-	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
+	     (pSubresource->aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
+	     (pSubresource->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
+	     (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
+	     (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
+	     (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
 	{
 		UNSUPPORTED("aspectMask %X", pSubresource->aspectMask);
 	}
@@ -243,27 +243,27 @@
 	pLayout->arrayPitch = getLayerSize(aspect);
 }
 
-void Image::copyTo(Image* dstImage, const VkImageCopy& region) const
+void Image::copyTo(Image *dstImage, const VkImageCopy &region) const
 {
 	// Image copy does not perform any conversion, it simply copies memory from
 	// an image to another image that has the same number of bytes per pixel.
 
 	if(!((region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
-	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
-	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
-	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
-	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
-	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
+	     (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
+	     (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
+	     (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
+	     (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
+	     (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
 	{
 		UNSUPPORTED("srcSubresource.aspectMask %X", region.srcSubresource.aspectMask);
 	}
 
 	if(!((region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
-	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
-	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
-	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
-	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
-	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
+	     (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
+	     (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
+	     (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
+	     (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) ||
+	     (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT)))
 	{
 		UNSUPPORTED("dstSubresource.aspectMask %X", region.dstSubresource.aspectMask);
 	}
@@ -296,8 +296,8 @@
 	int srcBytesPerBlock = srcFormat.bytesPerBlock();
 	ASSERT(srcBytesPerBlock == dstFormat.bytesPerBlock());
 
-	const uint8_t* srcMem = static_cast<const uint8_t*>(getTexelPointer(region.srcOffset, region.srcSubresource));
-	uint8_t* dstMem = static_cast<uint8_t*>(dstImage->getTexelPointer(region.dstOffset, region.dstSubresource));
+	const uint8_t *srcMem = static_cast<const uint8_t *>(getTexelPointer(region.srcOffset, region.srcSubresource));
+	uint8_t *dstMem = static_cast<uint8_t *>(dstImage->getTexelPointer(region.dstOffset, region.dstSubresource));
 
 	int srcRowPitchBytes = rowPitchBytes(srcAspect, region.srcSubresource.mipLevel);
 	int srcSlicePitchBytes = slicePitchBytes(srcAspect, region.srcSubresource.mipLevel);
@@ -309,21 +309,21 @@
 	VkExtent3D copyExtent = imageExtentInBlocks(region.extent, srcAspect);
 
 	bool isSinglePlane = (copyExtent.depth == 1);
-	bool isSingleLine  = (copyExtent.height == 1) && isSinglePlane;
+	bool isSingleLine = (copyExtent.height == 1) && isSinglePlane;
 	// In order to copy multiple lines using a single memcpy call, we
 	// have to make sure that we need to copy the entire line and that
 	// both source and destination lines have the same length in bytes
-	bool isEntireLine  = (region.extent.width == srcExtent.width) &&
-	                     (region.extent.width == dstExtent.width) &&
-	// For non compressed formats, blockWidth is 1. For compressed
-	// formats, rowPitchBytes returns the number of bytes for a row of
-	// blocks, so we have to divide by the block height, which means:
-	// srcRowPitchBytes / srcBlockWidth == dstRowPitchBytes / dstBlockWidth
-	// And, to avoid potential non exact integer division, for example if a
-	// block has 16 bytes and represents 5 lines, we change the equation to:
-	// srcRowPitchBytes * dstBlockWidth == dstRowPitchBytes * srcBlockWidth
-	                     ((srcRowPitchBytes * dstFormat.blockWidth()) ==
-	                      (dstRowPitchBytes * srcFormat.blockWidth()));
+	bool isEntireLine = (region.extent.width == srcExtent.width) &&
+	                    (region.extent.width == dstExtent.width) &&
+	                    // For non compressed formats, blockWidth is 1. For compressed
+	                    // formats, rowPitchBytes returns the number of bytes for a row of
+	                    // blocks, so we have to divide by the block height, which means:
+	                    // srcRowPitchBytes / srcBlockWidth == dstRowPitchBytes / dstBlockWidth
+	                    // And, to avoid potential non exact integer division, for example if a
+	                    // block has 16 bytes and represents 5 lines, we change the equation to:
+	                    // srcRowPitchBytes * dstBlockWidth == dstRowPitchBytes * srcBlockWidth
+	                    ((srcRowPitchBytes * dstFormat.blockWidth()) ==
+	                     (dstRowPitchBytes * srcFormat.blockWidth()));
 	// In order to copy multiple planes using a single memcpy call, we
 	// have to make sure that we need to copy the entire plane and that
 	// both source and destination planes have the same length in bytes
@@ -332,28 +332,28 @@
 	                     (copyExtent.height == dstExtent.height) &&
 	                     (srcSlicePitchBytes == dstSlicePitchBytes);
 
-	if(isSingleLine) // Copy one line
+	if(isSingleLine)  // Copy one line
 	{
 		size_t copySize = copyExtent.width * srcBytesPerBlock;
 		ASSERT((srcMem + copySize) < end());
 		ASSERT((dstMem + copySize) < dstImage->end());
 		memcpy(dstMem, srcMem, copySize);
 	}
-	else if(isEntireLine && isSinglePlane) // Copy one plane
+	else if(isEntireLine && isSinglePlane)  // Copy one plane
 	{
 		size_t copySize = copyExtent.height * srcRowPitchBytes;
 		ASSERT((srcMem + copySize) < end());
 		ASSERT((dstMem + copySize) < dstImage->end());
 		memcpy(dstMem, srcMem, copySize);
 	}
-	else if(isEntirePlane) // Copy multiple planes
+	else if(isEntirePlane)  // Copy multiple planes
 	{
 		size_t copySize = copyExtent.depth * srcSlicePitchBytes;
 		ASSERT((srcMem + copySize) < end());
 		ASSERT((dstMem + copySize) < dstImage->end());
 		memcpy(dstMem, srcMem, copySize);
 	}
-	else if(isEntireLine) // Copy plane by plane
+	else if(isEntireLine)  // Copy plane by plane
 	{
 		size_t copySize = copyExtent.height * srcRowPitchBytes;
 
@@ -364,14 +364,14 @@
 			memcpy(dstMem, srcMem, copySize);
 		}
 	}
-	else // Copy line by line
+	else  // Copy line by line
 	{
 		size_t copySize = copyExtent.width * srcBytesPerBlock;
 
 		for(uint32_t z = 0; z < copyExtent.depth; z++, dstMem += dstSlicePitchBytes, srcMem += srcSlicePitchBytes)
 		{
-			const uint8_t* srcSlice = srcMem;
-			uint8_t* dstSlice = dstMem;
+			const uint8_t *srcSlice = srcMem;
+			uint8_t *dstSlice = dstMem;
 			for(uint32_t y = 0; y < copyExtent.height; y++, dstSlice += dstRowPitchBytes, srcSlice += srcRowPitchBytes)
 			{
 				ASSERT((srcSlice + copySize) < end());
@@ -385,20 +385,20 @@
 	                               region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount });
 }
 
-void Image::copy(Buffer* buffer, const VkBufferImageCopy& region, bool bufferIsSource)
+void Image::copy(Buffer *buffer, const VkBufferImageCopy &region, bool bufferIsSource)
 {
 	switch(region.imageSubresource.aspectMask)
 	{
-	case VK_IMAGE_ASPECT_COLOR_BIT:
-	case VK_IMAGE_ASPECT_DEPTH_BIT:
-	case VK_IMAGE_ASPECT_STENCIL_BIT:
-	case VK_IMAGE_ASPECT_PLANE_0_BIT:
-	case VK_IMAGE_ASPECT_PLANE_1_BIT:
-	case VK_IMAGE_ASPECT_PLANE_2_BIT:
-		break;
-	default:
-		UNSUPPORTED("aspectMask %x", int(region.imageSubresource.aspectMask));
-		break;
+		case VK_IMAGE_ASPECT_COLOR_BIT:
+		case VK_IMAGE_ASPECT_DEPTH_BIT:
+		case VK_IMAGE_ASPECT_STENCIL_BIT:
+		case VK_IMAGE_ASPECT_PLANE_0_BIT:
+		case VK_IMAGE_ASPECT_PLANE_1_BIT:
+		case VK_IMAGE_ASPECT_PLANE_2_BIT:
+			break;
+		default:
+			UNSUPPORTED("aspectMask %x", int(region.imageSubresource.aspectMask));
+			break;
 	}
 
 	auto aspect = static_cast<VkImageAspectFlagBits>(region.imageSubresource.aspectMask);
@@ -410,10 +410,10 @@
 	int bufferRowPitchBytes = bufferExtent.width * bytesPerBlock;
 	int bufferSlicePitchBytes = bufferExtent.height * bufferRowPitchBytes;
 
-	uint8_t* bufferMemory = static_cast<uint8_t*>(buffer->getOffsetPointer(region.bufferOffset));
-	uint8_t* imageMemory = static_cast<uint8_t*>(getTexelPointer(region.imageOffset, region.imageSubresource));
-	uint8_t* srcMemory = bufferIsSource ? bufferMemory : imageMemory;
-	uint8_t* dstMemory = bufferIsSource ? imageMemory : bufferMemory;
+	uint8_t *bufferMemory = static_cast<uint8_t *>(buffer->getOffsetPointer(region.bufferOffset));
+	uint8_t *imageMemory = static_cast<uint8_t *>(getTexelPointer(region.imageOffset, region.imageSubresource));
+	uint8_t *srcMemory = bufferIsSource ? bufferMemory : imageMemory;
+	uint8_t *dstMemory = bufferIsSource ? imageMemory : bufferMemory;
 	int imageRowPitchBytes = rowPitchBytes(aspect, region.imageSubresource.mipLevel);
 	int imageSlicePitchBytes = slicePitchBytes(aspect, region.imageSubresource.mipLevel);
 
@@ -424,11 +424,11 @@
 
 	VkExtent3D mipLevelExtent = getMipLevelExtent(aspect, region.imageSubresource.mipLevel);
 	bool isSinglePlane = (imageExtent.depth == 1);
-	bool isSingleLine  = (imageExtent.height == 1) && isSinglePlane;
-	bool isEntireLine  = (imageExtent.width == mipLevelExtent.width) &&
-						 (imageRowPitchBytes == bufferRowPitchBytes);
+	bool isSingleLine = (imageExtent.height == 1) && isSinglePlane;
+	bool isEntireLine = (imageExtent.width == mipLevelExtent.width) &&
+	                    (imageRowPitchBytes == bufferRowPitchBytes);
 	bool isEntirePlane = isEntireLine && (imageExtent.height == mipLevelExtent.height) &&
-						 (imageSlicePitchBytes == bufferSlicePitchBytes);
+	                     (imageSlicePitchBytes == bufferSlicePitchBytes);
 
 	VkDeviceSize copySize = 0;
 	VkDeviceSize bufferLayerSize = 0;
@@ -444,15 +444,15 @@
 	}
 	else if(isEntirePlane)
 	{
-		copySize = imageExtent.depth * imageSlicePitchBytes; // Copy multiple planes
+		copySize = imageExtent.depth * imageSlicePitchBytes;  // Copy multiple planes
 		bufferLayerSize = copySize;
 	}
-	else if(isEntireLine) // Copy plane by plane
+	else if(isEntireLine)  // Copy plane by plane
 	{
 		copySize = imageExtent.height * imageRowPitchBytes;
 		bufferLayerSize = copySize * imageExtent.depth;
 	}
-	else // Copy line by line
+	else  // Copy line by line
 	{
 		copySize = imageExtent.width * bytesPerBlock;
 		bufferLayerSize = copySize * imageExtent.depth * imageExtent.height;
@@ -470,10 +470,10 @@
 			ASSERT(((bufferIsSource ? srcMemory : dstMemory) + copySize) < buffer->end());
 			memcpy(dstMemory, srcMemory, copySize);
 		}
-		else if(isEntireLine) // Copy plane by plane
+		else if(isEntireLine)  // Copy plane by plane
 		{
-			uint8_t* srcPlaneMemory = srcMemory;
-			uint8_t* dstPlaneMemory = dstMemory;
+			uint8_t *srcPlaneMemory = srcMemory;
+			uint8_t *dstPlaneMemory = dstMemory;
 			for(uint32_t z = 0; z < imageExtent.depth; z++)
 			{
 				ASSERT(((bufferIsSource ? dstPlaneMemory : srcPlaneMemory) + copySize) < end());
@@ -483,14 +483,14 @@
 				dstPlaneMemory += dstSlicePitchBytes;
 			}
 		}
-		else // Copy line by line
+		else  // Copy line by line
 		{
-			uint8_t* srcLayerMemory = srcMemory;
-			uint8_t* dstLayerMemory = dstMemory;
+			uint8_t *srcLayerMemory = srcMemory;
+			uint8_t *dstLayerMemory = dstMemory;
 			for(uint32_t z = 0; z < imageExtent.depth; z++)
 			{
-				uint8_t* srcPlaneMemory = srcLayerMemory;
-				uint8_t* dstPlaneMemory = dstLayerMemory;
+				uint8_t *srcPlaneMemory = srcLayerMemory;
+				uint8_t *dstPlaneMemory = dstLayerMemory;
 				for(uint32_t y = 0; y < imageExtent.height; y++)
 				{
 					ASSERT(((bufferIsSource ? dstPlaneMemory : srcPlaneMemory) + copySize) < end());
@@ -515,24 +515,24 @@
 	}
 }
 
-void Image::copyTo(Buffer* dstBuffer, const VkBufferImageCopy& region)
+void Image::copyTo(Buffer *dstBuffer, const VkBufferImageCopy &region)
 {
 	copy(dstBuffer, region, false);
 }
 
-void Image::copyFrom(Buffer* srcBuffer, const VkBufferImageCopy& region)
+void Image::copyFrom(Buffer *srcBuffer, const VkBufferImageCopy &region)
 {
 	copy(srcBuffer, region, true);
 }
 
-void* Image::getTexelPointer(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const
+void *Image::getTexelPointer(const VkOffset3D &offset, const VkImageSubresourceLayers &subresource) const
 {
 	VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(subresource.aspectMask);
 	return deviceMemory->getOffsetPointer(texelOffsetBytesInStorage(offset, subresource) +
-	       getMemoryOffset(aspect, subresource.mipLevel, subresource.baseArrayLayer));
+	                                      getMemoryOffset(aspect, subresource.mipLevel, subresource.baseArrayLayer));
 }
 
-VkExtent3D Image::imageExtentInBlocks(const VkExtent3D& extent, VkImageAspectFlagBits aspect) const
+VkExtent3D Image::imageExtentInBlocks(const VkExtent3D &extent, VkImageAspectFlagBits aspect) const
 {
 	VkExtent3D adjustedExtent = extent;
 	Format usedFormat = getFormat(aspect);
@@ -549,7 +549,7 @@
 	return adjustedExtent;
 }
 
-VkOffset3D Image::imageOffsetInBlocks(const VkOffset3D& offset, VkImageAspectFlagBits aspect) const
+VkOffset3D Image::imageOffsetInBlocks(const VkOffset3D &offset, VkImageAspectFlagBits aspect) const
 {
 	VkOffset3D adjustedOffset = offset;
 	Format usedFormat = getFormat(aspect);
@@ -559,7 +559,7 @@
 		int blockWidth = usedFormat.blockWidth();
 		int blockHeight = usedFormat.blockHeight();
 
-		ASSERT(((offset.x % blockWidth) == 0) && ((offset.y % blockHeight) == 0)); // We can't offset within a block
+		ASSERT(((offset.x % blockWidth) == 0) && ((offset.y % blockHeight) == 0));  // We can't offset within a block
 
 		adjustedOffset.x /= blockWidth;
 		adjustedOffset.y /= blockHeight;
@@ -567,7 +567,7 @@
 	return adjustedOffset;
 }
 
-VkExtent2D Image::bufferExtentInBlocks(const VkExtent2D& extent, const VkBufferImageCopy& region) const
+VkExtent2D Image::bufferExtentInBlocks(const VkExtent2D &extent, const VkBufferImageCopy &region) const
 {
 	VkExtent2D adjustedExtent = extent;
 	VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(region.imageSubresource.aspectMask);
@@ -603,7 +603,7 @@
 	return (isCube() && !format.isCompressed()) ? 1 : 0;
 }
 
-VkDeviceSize Image::texelOffsetBytesInStorage(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const
+VkDeviceSize Image::texelOffsetBytesInStorage(const VkOffset3D &offset, const VkImageSubresourceLayers &subresource) const
 {
 	VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(subresource.aspectMask);
 	VkOffset3D adjustedOffset = imageOffsetInBlocks(offset, aspect);
@@ -620,35 +620,35 @@
 	mipLevelExtent.height = extent.height >> mipLevel;
 	mipLevelExtent.depth = extent.depth >> mipLevel;
 
-	if(mipLevelExtent.width  == 0) { mipLevelExtent.width  = 1; }
+	if(mipLevelExtent.width == 0) { mipLevelExtent.width = 1; }
 	if(mipLevelExtent.height == 0) { mipLevelExtent.height = 1; }
-	if(mipLevelExtent.depth  == 0) { mipLevelExtent.depth  = 1; }
+	if(mipLevelExtent.depth == 0) { mipLevelExtent.depth = 1; }
 
 	switch(aspect)
 	{
-	case VK_IMAGE_ASPECT_COLOR_BIT:
-	case VK_IMAGE_ASPECT_DEPTH_BIT:
-	case VK_IMAGE_ASPECT_STENCIL_BIT:
-	case VK_IMAGE_ASPECT_PLANE_0_BIT:  // Vulkan 1.1 Table 31. Plane Format Compatibility Table: plane 0 of all defined formats is full resolution.
-		break;
-	case VK_IMAGE_ASPECT_PLANE_1_BIT:
-	case VK_IMAGE_ASPECT_PLANE_2_BIT:
-		switch(format)
-		{
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			ASSERT(mipLevelExtent.width % 2 == 0 && mipLevelExtent.height % 2 == 0);  // Vulkan 1.1: "Images in this format must be defined with a width and height that is a multiple of two."
-			// Vulkan 1.1 Table 31. Plane Format Compatibility Table:
-			// Half-resolution U and V planes.
-			mipLevelExtent.width /= 2;
-			mipLevelExtent.height /= 2;
+		case VK_IMAGE_ASPECT_COLOR_BIT:
+		case VK_IMAGE_ASPECT_DEPTH_BIT:
+		case VK_IMAGE_ASPECT_STENCIL_BIT:
+		case VK_IMAGE_ASPECT_PLANE_0_BIT:  // Vulkan 1.1 Table 31. Plane Format Compatibility Table: plane 0 of all defined formats is full resolution.
+			break;
+		case VK_IMAGE_ASPECT_PLANE_1_BIT:
+		case VK_IMAGE_ASPECT_PLANE_2_BIT:
+			switch(format)
+			{
+				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+				case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+					ASSERT(mipLevelExtent.width % 2 == 0 && mipLevelExtent.height % 2 == 0);  // Vulkan 1.1: "Images in this format must be defined with a width and height that is a multiple of two."
+					// Vulkan 1.1 Table 31. Plane Format Compatibility Table:
+					// Half-resolution U and V planes.
+					mipLevelExtent.width /= 2;
+					mipLevelExtent.height /= 2;
+					break;
+				default:
+					UNSUPPORTED("format %d", int(format));
+			}
 			break;
 		default:
-			UNSUPPORTED("format %d", int(format));
-		}
-		break;
-	default:
-		UNSUPPORTED("aspect %x", int(aspect));
+			UNSUPPORTED("aspect %x", int(aspect));
 	}
 
 	return mipLevelExtent;
@@ -658,7 +658,7 @@
 {
 	// Depth and Stencil pitch should be computed separately
 	ASSERT((aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) !=
-	                 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
+	       (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
 
 	return getFormat(aspect).pitchB(getMipLevelExtent(aspect, mipLevel).width, borderSize(), true);
 }
@@ -667,7 +667,7 @@
 {
 	// Depth and Stencil slice should be computed separately
 	ASSERT((aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) !=
-	                 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
+	       (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
 
 	VkExtent3D mipLevelExtent = getMipLevelExtent(aspect, mipLevel);
 	Format usedFormat = getFormat(aspect);
@@ -690,47 +690,46 @@
 	return (flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) && (imageType == VK_IMAGE_TYPE_2D);
 }
 
-uint8_t* Image::end() const
+uint8_t *Image::end() const
 {
-	return reinterpret_cast<uint8_t*>(deviceMemory->getOffsetPointer(deviceMemory->getCommittedMemoryInBytes() + 1));
+	return reinterpret_cast<uint8_t *>(deviceMemory->getOffsetPointer(deviceMemory->getCommittedMemoryInBytes() + 1));
 }
 
 VkDeviceSize Image::getMemoryOffset(VkImageAspectFlagBits aspect) const
 {
 	switch(format)
 	{
-	case VK_FORMAT_D16_UNORM_S8_UINT:
-	case VK_FORMAT_D24_UNORM_S8_UINT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		if(aspect == VK_IMAGE_ASPECT_STENCIL_BIT)
-		{
-			// Offset by depth buffer to get to stencil buffer
-			return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_DEPTH_BIT);
-		}
-		break;
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			if(aspect == VK_IMAGE_ASPECT_STENCIL_BIT)
+			{
+				// Offset by depth buffer to get to stencil buffer
+				return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_DEPTH_BIT);
+			}
+			break;
 
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		if(aspect == VK_IMAGE_ASPECT_PLANE_2_BIT)
-		{
-			return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_1_BIT)
-			                    + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
-		}
-		// Fall through to 2PLANE case:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		if(aspect == VK_IMAGE_ASPECT_PLANE_1_BIT)
-		{
-			return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
-		}
-		else
-		{
-			ASSERT(aspect == VK_IMAGE_ASPECT_PLANE_0_BIT);
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+			if(aspect == VK_IMAGE_ASPECT_PLANE_2_BIT)
+			{
+				return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_1_BIT) + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
+			}
+			// Fall through to 2PLANE case:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			if(aspect == VK_IMAGE_ASPECT_PLANE_1_BIT)
+			{
+				return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
+			}
+			else
+			{
+				ASSERT(aspect == VK_IMAGE_ASPECT_PLANE_0_BIT);
 
-			return memoryOffset;
-		}
-		break;
+				return memoryOffset;
+			}
+			break;
 
-	default:
-		break;
+		default:
+			break;
 	}
 
 	return memoryOffset;
@@ -805,8 +804,8 @@
 
 	VkDeviceSize storageSize = 0;
 
-	if(aspectMask & VK_IMAGE_ASPECT_COLOR_BIT)   storageSize += getLayerSize(VK_IMAGE_ASPECT_COLOR_BIT);
-	if(aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)   storageSize += getLayerSize(VK_IMAGE_ASPECT_DEPTH_BIT);
+	if(aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_COLOR_BIT);
+	if(aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_DEPTH_BIT);
 	if(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_STENCIL_BIT);
 	if(aspectMask & VK_IMAGE_ASPECT_PLANE_0_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
 	if(aspectMask & VK_IMAGE_ASPECT_PLANE_1_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_1_BIT);
@@ -815,7 +814,7 @@
 	return arrayLayers * storageSize;
 }
 
-const Image* Image::getSampledImage(const vk::Format& imageViewFormat) const
+const Image *Image::getSampledImage(const vk::Format &imageViewFormat) const
 {
 	bool isImageViewCompressed = imageViewFormat.isCompressed();
 	if(decompressedImage && !isImageViewCompressed)
@@ -830,17 +829,17 @@
 	return (decompressedImage && isImageViewCompressed) ? decompressedImage : this;
 }
 
-void Image::blit(Image* dstImage, const VkImageBlit& region, VkFilter filter) const
+void Image::blit(Image *dstImage, const VkImageBlit &region, VkFilter filter) const
 {
 	device->getBlitter()->blit(this, dstImage, region, filter);
 }
 
-void Image::blitToBuffer(VkImageSubresourceLayers subresource, VkOffset3D offset, VkExtent3D extent, uint8_t* dst, int bufferRowPitch, int bufferSlicePitch) const
+void Image::blitToBuffer(VkImageSubresourceLayers subresource, VkOffset3D offset, VkExtent3D extent, uint8_t *dst, int bufferRowPitch, int bufferSlicePitch) const
 {
 	device->getBlitter()->blitToBuffer(this, subresource, offset, extent, dst, bufferRowPitch, bufferSlicePitch);
 }
 
-void Image::resolve(Image* dstImage, const VkImageResolve& region) const
+void Image::resolve(Image *dstImage, const VkImageResolve &region) const
 {
 	VkImageBlit blitRegion;
 
@@ -876,34 +875,32 @@
 	return VK_FORMAT_R32G32B32A32_SFLOAT;
 }
 
-uint32_t Image::getLastLayerIndex(const VkImageSubresourceRange& subresourceRange) const
+uint32_t Image::getLastLayerIndex(const VkImageSubresourceRange &subresourceRange) const
 {
-	return ((subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) ?
-	        arrayLayers : (subresourceRange.baseArrayLayer + subresourceRange.layerCount)) - 1;
+	return ((subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) ? arrayLayers : (subresourceRange.baseArrayLayer + subresourceRange.layerCount)) - 1;
 }
 
-uint32_t Image::getLastMipLevel(const VkImageSubresourceRange& subresourceRange) const
+uint32_t Image::getLastMipLevel(const VkImageSubresourceRange &subresourceRange) const
 {
-	return ((subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) ?
-	        mipLevels : (subresourceRange.baseMipLevel + subresourceRange.levelCount)) - 1;
+	return ((subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) ? mipLevels : (subresourceRange.baseMipLevel + subresourceRange.levelCount)) - 1;
 }
 
-void Image::clear(void* pixelData, VkFormat pixelFormat, const vk::Format& viewFormat, const VkImageSubresourceRange& subresourceRange, const VkRect2D& renderArea)
+void Image::clear(void *pixelData, VkFormat pixelFormat, const vk::Format &viewFormat, const VkImageSubresourceRange &subresourceRange, const VkRect2D &renderArea)
 {
 	device->getBlitter()->clear(pixelData, pixelFormat, this, viewFormat, subresourceRange, &renderArea);
 }
 
-void Image::clear(const VkClearColorValue& color, const VkImageSubresourceRange& subresourceRange)
+void Image::clear(const VkClearColorValue &color, const VkImageSubresourceRange &subresourceRange)
 {
 	if(!(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT))
 	{
 		UNIMPLEMENTED("aspectMask");
 	}
 
-	device->getBlitter()->clear((void*)color.float32, getClearFormat(), this, format, subresourceRange);
+	device->getBlitter()->clear((void *)color.float32, getClearFormat(), this, format, subresourceRange);
 }
 
-void Image::clear(const VkClearDepthStencilValue& color, const VkImageSubresourceRange& subresourceRange)
+void Image::clear(const VkClearDepthStencilValue &color, const VkImageSubresourceRange &subresourceRange)
 {
 	if((subresourceRange.aspectMask & ~(VK_IMAGE_ASPECT_DEPTH_BIT |
 	                                    VK_IMAGE_ASPECT_STENCIL_BIT)) != 0)
@@ -915,18 +912,18 @@
 	{
 		VkImageSubresourceRange depthSubresourceRange = subresourceRange;
 		depthSubresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
-		device->getBlitter()->clear((void*)(&color.depth), VK_FORMAT_D32_SFLOAT, this, format, depthSubresourceRange);
+		device->getBlitter()->clear((void *)(&color.depth), VK_FORMAT_D32_SFLOAT, this, format, depthSubresourceRange);
 	}
 
 	if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
 	{
 		VkImageSubresourceRange stencilSubresourceRange = subresourceRange;
 		stencilSubresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
-		device->getBlitter()->clear((void*)(&color.stencil), VK_FORMAT_S8_UINT, this, format, stencilSubresourceRange);
+		device->getBlitter()->clear((void *)(&color.stencil), VK_FORMAT_S8_UINT, this, format, stencilSubresourceRange);
 	}
 }
 
-void Image::clear(const VkClearValue& clearValue, const vk::Format& viewFormat, const VkRect2D& renderArea, const VkImageSubresourceRange& subresourceRange)
+void Image::clear(const VkClearValue &clearValue, const vk::Format &viewFormat, const VkRect2D &renderArea, const VkImageSubresourceRange &subresourceRange)
 {
 	if(!((subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
 	     (subresourceRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
@@ -937,7 +934,7 @@
 
 	if(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
 	{
-		clear((void*)(clearValue.color.float32), getClearFormat(), viewFormat, subresourceRange, renderArea);
+		clear((void *)(clearValue.color.float32), getClearFormat(), viewFormat, subresourceRange, renderArea);
 	}
 	else
 	{
@@ -945,59 +942,58 @@
 		{
 			VkImageSubresourceRange depthSubresourceRange = subresourceRange;
 			depthSubresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
-			clear((void*)(&clearValue.depthStencil.depth), VK_FORMAT_D32_SFLOAT, viewFormat, depthSubresourceRange, renderArea);
+			clear((void *)(&clearValue.depthStencil.depth), VK_FORMAT_D32_SFLOAT, viewFormat, depthSubresourceRange, renderArea);
 		}
 
 		if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
 		{
 			VkImageSubresourceRange stencilSubresourceRange = subresourceRange;
 			stencilSubresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
-			clear((void*)(&clearValue.depthStencil.stencil), VK_FORMAT_S8_UINT, viewFormat, stencilSubresourceRange, renderArea);
+			clear((void *)(&clearValue.depthStencil.stencil), VK_FORMAT_S8_UINT, viewFormat, stencilSubresourceRange, renderArea);
 		}
 	}
 }
 
-void Image::prepareForSampling(const VkImageSubresourceRange& subresourceRange)
+void Image::prepareForSampling(const VkImageSubresourceRange &subresourceRange)
 {
 	if(decompressedImage)
 	{
 		switch(format)
 		{
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-			decodeETC2(subresourceRange);
-			break;
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-			decodeBC(subresourceRange);
-			break;
-		default:
-			break;
+			case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+			case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+			case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+			case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+			case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+			case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+			case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+			case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+			case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+			case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+				decodeETC2(subresourceRange);
+				break;
+			case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+			case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+			case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+			case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+			case VK_FORMAT_BC2_UNORM_BLOCK:
+			case VK_FORMAT_BC2_SRGB_BLOCK:
+			case VK_FORMAT_BC3_UNORM_BLOCK:
+			case VK_FORMAT_BC3_SRGB_BLOCK:
+			case VK_FORMAT_BC4_UNORM_BLOCK:
+			case VK_FORMAT_BC4_SNORM_BLOCK:
+			case VK_FORMAT_BC5_UNORM_BLOCK:
+			case VK_FORMAT_BC5_SNORM_BLOCK:
+				decodeBC(subresourceRange);
+				break;
+			default:
+				break;
 		}
 	}
 
 	if(isCube() && (arrayLayers >= 6))
 	{
-		VkImageSubresourceLayers subresourceLayers =
-		{
+		VkImageSubresourceLayers subresourceLayers = {
 			subresourceRange.aspectMask,
 			subresourceRange.baseMipLevel,
 			subresourceRange.baseArrayLayer,
@@ -1007,8 +1003,8 @@
 		for(; subresourceLayers.mipLevel <= lastMipLevel; subresourceLayers.mipLevel++)
 		{
 			for(subresourceLayers.baseArrayLayer = 0;
-				subresourceLayers.baseArrayLayer < arrayLayers;
-				subresourceLayers.baseArrayLayer += 6)
+			    subresourceLayers.baseArrayLayer < arrayLayers;
+			    subresourceLayers.baseArrayLayer += 6)
 			{
 				device->getBlitter()->updateBorders(decompressedImage ? decompressedImage : this, subresourceLayers);
 			}
@@ -1016,7 +1012,7 @@
 	}
 }
 
-void Image::decodeETC2(const VkImageSubresourceRange& subresourceRange) const
+void Image::decodeETC2(const VkImageSubresourceRange &subresourceRange) const
 {
 	ASSERT(decompressedImage);
 
@@ -1048,8 +1044,8 @@
 
 			for(int32_t depth = 0; depth < static_cast<int32_t>(mipLevelExtent.depth); depth++)
 			{
-				uint8_t* source = static_cast<uint8_t*>(getTexelPointer({ 0, 0, depth }, subresourceLayers));
-				uint8_t* dest = static_cast<uint8_t*>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresourceLayers));
+				uint8_t *source = static_cast<uint8_t *>(getTexelPointer({ 0, 0, depth }, subresourceLayers));
+				uint8_t *dest = static_cast<uint8_t *>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresourceLayers));
 
 				if(fakeAlpha)
 				{
@@ -1058,13 +1054,13 @@
 				}
 
 				ETC_Decoder::Decode(source, dest, mipLevelExtent.width, mipLevelExtent.height,
-									mipLevelExtent.width, mipLevelExtent.height, pitchB, bytes, inputType);
+				                    mipLevelExtent.width, mipLevelExtent.height, pitchB, bytes, inputType);
 			}
 		}
 	}
 }
 
-void Image::decodeBC(const VkImageSubresourceRange& subresourceRange) const
+void Image::decodeBC(const VkImageSubresourceRange &subresourceRange) const
 {
 	ASSERT(decompressedImage);
 
@@ -1087,8 +1083,8 @@
 
 			for(int32_t depth = 0; depth < static_cast<int32_t>(mipLevelExtent.depth); depth++)
 			{
-				uint8_t* source = static_cast<uint8_t*>(getTexelPointer({ 0, 0, depth }, subresourceLayers));
-				uint8_t* dest = static_cast<uint8_t*>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresourceLayers));
+				uint8_t *source = static_cast<uint8_t *>(getTexelPointer({ 0, 0, depth }, subresourceLayers));
+				uint8_t *dest = static_cast<uint8_t *>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresourceLayers));
 
 				BC_Decoder::Decode(source, dest, mipLevelExtent.width, mipLevelExtent.height,
 				                   mipLevelExtent.width, mipLevelExtent.height, pitchB, bytes, n, noAlphaU);
@@ -1097,4 +1093,4 @@
 	}
 }
 
-} // namespace vk
+}  // namespace vk
diff --git a/src/Vulkan/VkImage.hpp b/src/Vulkan/VkImage.hpp
index 358834f..d5303ea 100644
--- a/src/Vulkan/VkImage.hpp
+++ b/src/Vulkan/VkImage.hpp
@@ -15,11 +15,11 @@
 #ifndef VK_IMAGE_HPP_
 #define VK_IMAGE_HPP_
 
-#include "VkObject.hpp"
 #include "VkFormat.h"
+#include "VkObject.hpp"
 
 #ifdef __ANDROID__
-#include <vulkan/vk_android_native_buffer.h> // For VkSwapchainImageUsageFlagsANDROID and buffer_handle_t
+#	include <vulkan/vk_android_native_buffer.h>  // For VkSwapchainImageUsageFlagsANDROID and buffer_handle_t
 #endif
 
 namespace vk {
@@ -29,7 +29,8 @@
 class DeviceMemory;
 
 #ifdef __ANDROID__
-struct BackingMemory {
+struct BackingMemory
+{
 	int stride = 0;
 	bool externalMemory = false;
 	buffer_handle_t nativeHandle = nullptr;
@@ -40,101 +41,104 @@
 class Image : public Object<Image, VkImage>
 {
 public:
-	Image(const VkImageCreateInfo* pCreateInfo, void* mem, Device *device);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	Image(const VkImageCreateInfo *pCreateInfo, void *mem, Device *device);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
 #ifdef __ANDROID__
 	VkResult prepareForExternalUseANDROID() const;
 #endif
 
-	static size_t ComputeRequiredAllocationSize(const VkImageCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkImageCreateInfo *pCreateInfo);
 
 	const VkMemoryRequirements getMemoryRequirements() const;
-	void getSubresourceLayout(const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const;
-	void bind(DeviceMemory* pDeviceMemory, VkDeviceSize pMemoryOffset);
-	void copyTo(Image* dstImage, const VkImageCopy& pRegion) const;
-	void copyTo(Buffer* dstBuffer, const VkBufferImageCopy& region);
-	void copyFrom(Buffer* srcBuffer, const VkBufferImageCopy& region);
+	void getSubresourceLayout(const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout) const;
+	void bind(DeviceMemory *pDeviceMemory, VkDeviceSize pMemoryOffset);
+	void copyTo(Image *dstImage, const VkImageCopy &pRegion) const;
+	void copyTo(Buffer *dstBuffer, const VkBufferImageCopy &region);
+	void copyFrom(Buffer *srcBuffer, const VkBufferImageCopy &region);
 
-	void blit(Image* dstImage, const VkImageBlit& region, VkFilter filter) const;
-	void blitToBuffer(VkImageSubresourceLayers subresource, VkOffset3D offset, VkExtent3D extent, uint8_t* dst, int bufferRowPitch, int bufferSlicePitch) const;
-	void resolve(Image* dstImage, const VkImageResolve& region) const;
-	void clear(const VkClearValue& clearValue, const vk::Format& viewFormat, const VkRect2D& renderArea, const VkImageSubresourceRange& subresourceRange);
-	void clear(const VkClearColorValue& color, const VkImageSubresourceRange& subresourceRange);
-	void clear(const VkClearDepthStencilValue& color, const VkImageSubresourceRange& subresourceRange);
+	void blit(Image *dstImage, const VkImageBlit &region, VkFilter filter) const;
+	void blitToBuffer(VkImageSubresourceLayers subresource, VkOffset3D offset, VkExtent3D extent, uint8_t *dst, int bufferRowPitch, int bufferSlicePitch) const;
+	void resolve(Image *dstImage, const VkImageResolve &region) const;
+	void clear(const VkClearValue &clearValue, const vk::Format &viewFormat, const VkRect2D &renderArea, const VkImageSubresourceRange &subresourceRange);
+	void clear(const VkClearColorValue &color, const VkImageSubresourceRange &subresourceRange);
+	void clear(const VkClearDepthStencilValue &color, const VkImageSubresourceRange &subresourceRange);
 
-	VkImageType              getImageType() const { return imageType; }
-	const Format&            getFormat() const { return format; }
-	Format                   getFormat(VkImageAspectFlagBits aspect) const;
-	uint32_t                 getArrayLayers() const { return arrayLayers; }
-	uint32_t                 getMipLevels() const { return mipLevels; }
-	VkImageUsageFlags        getUsage() const { return usage; }
-	uint32_t                 getLastLayerIndex(const VkImageSubresourceRange& subresourceRange) const;
-	uint32_t                 getLastMipLevel(const VkImageSubresourceRange& subresourceRange) const;
-	VkSampleCountFlagBits    getSampleCountFlagBits() const { return samples; }
-	VkExtent3D               getMipLevelExtent(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
-	int                      rowPitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
-	int                      slicePitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
-	void*                    getTexelPointer(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const;
-	bool                     isCube() const;
-	bool                     is3DSlice() const;
-	uint8_t*                 end() const;
-	VkDeviceSize             getLayerSize(VkImageAspectFlagBits aspect) const;
-	VkDeviceSize             getMipLevelSize(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
-	bool                     canBindToMemory(DeviceMemory* pDeviceMemory) const;
+	VkImageType getImageType() const { return imageType; }
+	const Format &getFormat() const { return format; }
+	Format getFormat(VkImageAspectFlagBits aspect) const;
+	uint32_t getArrayLayers() const { return arrayLayers; }
+	uint32_t getMipLevels() const { return mipLevels; }
+	VkImageUsageFlags getUsage() const { return usage; }
+	uint32_t getLastLayerIndex(const VkImageSubresourceRange &subresourceRange) const;
+	uint32_t getLastMipLevel(const VkImageSubresourceRange &subresourceRange) const;
+	VkSampleCountFlagBits getSampleCountFlagBits() const { return samples; }
+	VkExtent3D getMipLevelExtent(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
+	int rowPitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
+	int slicePitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
+	void *getTexelPointer(const VkOffset3D &offset, const VkImageSubresourceLayers &subresource) const;
+	bool isCube() const;
+	bool is3DSlice() const;
+	uint8_t *end() const;
+	VkDeviceSize getLayerSize(VkImageAspectFlagBits aspect) const;
+	VkDeviceSize getMipLevelSize(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
+	bool canBindToMemory(DeviceMemory *pDeviceMemory) const;
 
-	void                     prepareForSampling(const VkImageSubresourceRange& subresourceRange);
-	const Image*             getSampledImage(const vk::Format& imageViewFormat) const;
+	void prepareForSampling(const VkImageSubresourceRange &subresourceRange);
+	const Image *getSampledImage(const vk::Format &imageViewFormat) const;
 
 #ifdef __ANDROID__
-	void                     setBackingMemory(BackingMemory& bm) { backingMemory = bm; }
-	bool                     hasExternalMemory() const { return backingMemory.externalMemory; }
-	VkDeviceMemory           getExternalMemory() const;
+	void setBackingMemory(BackingMemory &bm)
+	{
+		backingMemory = bm;
+	}
+	bool hasExternalMemory() const { return backingMemory.externalMemory; }
+	VkDeviceMemory getExternalMemory() const;
 #endif
 
 private:
-	void copy(Buffer* buffer, const VkBufferImageCopy& region, bool bufferIsSource);
+	void copy(Buffer *buffer, const VkBufferImageCopy &region, bool bufferIsSource);
 	VkDeviceSize getStorageSize(VkImageAspectFlags flags) const;
 	VkDeviceSize getMultiSampledLevelSize(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
 	VkDeviceSize getLayerOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
 	VkDeviceSize getMemoryOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel) const;
 	VkDeviceSize getMemoryOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel, uint32_t layer) const;
-	VkDeviceSize texelOffsetBytesInStorage(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const;
+	VkDeviceSize texelOffsetBytesInStorage(const VkOffset3D &offset, const VkImageSubresourceLayers &subresource) const;
 	VkDeviceSize getMemoryOffset(VkImageAspectFlagBits aspect) const;
-	VkExtent3D imageExtentInBlocks(const VkExtent3D& extent, VkImageAspectFlagBits aspect) const;
-	VkOffset3D imageOffsetInBlocks(const VkOffset3D& offset, VkImageAspectFlagBits aspect) const;
-	VkExtent2D bufferExtentInBlocks(const VkExtent2D& extent, const VkBufferImageCopy& region) const;
+	VkExtent3D imageExtentInBlocks(const VkExtent3D &extent, VkImageAspectFlagBits aspect) const;
+	VkOffset3D imageOffsetInBlocks(const VkOffset3D &offset, VkImageAspectFlagBits aspect) const;
+	VkExtent2D bufferExtentInBlocks(const VkExtent2D &extent, const VkBufferImageCopy &region) const;
 	VkFormat getClearFormat() const;
-	void clear(void* pixelData, VkFormat pixelFormat, const vk::Format& viewFormat, const VkImageSubresourceRange& subresourceRange, const VkRect2D& renderArea);
+	void clear(void *pixelData, VkFormat pixelFormat, const vk::Format &viewFormat, const VkImageSubresourceRange &subresourceRange, const VkRect2D &renderArea);
 	int borderSize() const;
-	void decodeETC2(const VkImageSubresourceRange& subresourceRange) const;
-	void decodeBC(const VkImageSubresourceRange& subresourceRange) const;
+	void decodeETC2(const VkImageSubresourceRange &subresourceRange) const;
+	void decodeBC(const VkImageSubresourceRange &subresourceRange) const;
 
-	const Device *const      device = nullptr;
-	DeviceMemory*            deviceMemory = nullptr;
-	VkDeviceSize             memoryOffset = 0;
-	VkImageCreateFlags       flags = 0;
-	VkImageType              imageType = VK_IMAGE_TYPE_2D;
-	Format                   format;
-	VkExtent3D               extent = {0, 0, 0};
-	uint32_t                 mipLevels = 0;
-	uint32_t                 arrayLayers = 0;
-	VkSampleCountFlagBits    samples = VK_SAMPLE_COUNT_1_BIT;
-	VkImageTiling            tiling = VK_IMAGE_TILING_OPTIMAL;
-	VkImageUsageFlags        usage = (VkImageUsageFlags)0;
-	Image*                   decompressedImage = nullptr;
+	const Device *const device = nullptr;
+	DeviceMemory *deviceMemory = nullptr;
+	VkDeviceSize memoryOffset = 0;
+	VkImageCreateFlags flags = 0;
+	VkImageType imageType = VK_IMAGE_TYPE_2D;
+	Format format;
+	VkExtent3D extent = { 0, 0, 0 };
+	uint32_t mipLevels = 0;
+	uint32_t arrayLayers = 0;
+	VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT;
+	VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL;
+	VkImageUsageFlags usage = (VkImageUsageFlags)0;
+	Image *decompressedImage = nullptr;
 #ifdef __ANDROID__
-	BackingMemory            backingMemory = {};
+	BackingMemory backingMemory = {};
 #endif
 
 	VkExternalMemoryHandleTypeFlags supportedExternalMemoryHandleTypes = (VkExternalMemoryHandleTypeFlags)0;
 };
 
-static inline Image* Cast(VkImage object)
+static inline Image *Cast(VkImage object)
 {
 	return Image::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_IMAGE_HPP_
+#endif  // VK_IMAGE_HPP_
diff --git a/src/Vulkan/VkImageView.cpp b/src/Vulkan/VkImageView.cpp
index 905e4a0..cc1d558 100644
--- a/src/Vulkan/VkImageView.cpp
+++ b/src/Vulkan/VkImageView.cpp
@@ -36,7 +36,7 @@
 		format.componentCount() < 4 ? VK_COMPONENT_SWIZZLE_ONE : VK_COMPONENT_SWIZZLE_A,
 	};
 
-	return {table[m.r], table[m.g], table[m.b], table[m.a]};
+	return { table[m.r], table[m.g], table[m.b], table[m.a] };
 }
 
 VkImageSubresourceRange ResolveRemainingLevelsLayers(VkImageSubresourceRange range, const vk::Image *image)
@@ -56,20 +56,22 @@
 
 std::atomic<uint32_t> ImageView::nextID(1);
 
-ImageView::ImageView(const VkImageViewCreateInfo* pCreateInfo, void* mem, const vk::SamplerYcbcrConversion *ycbcrConversion) :
-	image(vk::Cast(pCreateInfo->image)), viewType(pCreateInfo->viewType), format(pCreateInfo->format),
-	components(ResolveComponentMapping(pCreateInfo->components, format)),
-	subresourceRange(ResolveRemainingLevelsLayers(pCreateInfo->subresourceRange, image)),
-	ycbcrConversion(ycbcrConversion)
+ImageView::ImageView(const VkImageViewCreateInfo *pCreateInfo, void *mem, const vk::SamplerYcbcrConversion *ycbcrConversion)
+    : image(vk::Cast(pCreateInfo->image))
+    , viewType(pCreateInfo->viewType)
+    , format(pCreateInfo->format)
+    , components(ResolveComponentMapping(pCreateInfo->components, format))
+    , subresourceRange(ResolveRemainingLevelsLayers(pCreateInfo->subresourceRange, image))
+    , ycbcrConversion(ycbcrConversion)
 {
 }
 
-size_t ImageView::ComputeRequiredAllocationSize(const VkImageViewCreateInfo* pCreateInfo)
+size_t ImageView::ComputeRequiredAllocationSize(const VkImageViewCreateInfo *pCreateInfo)
 {
 	return 0;
 }
 
-void ImageView::destroy(const VkAllocationCallbacks* pAllocator)
+void ImageView::destroy(const VkAllocationCallbacks *pAllocator)
 {
 }
 
@@ -79,40 +81,40 @@
 
 	switch(viewType)
 	{
-	case VK_IMAGE_VIEW_TYPE_1D:
-		return (imageType == VK_IMAGE_TYPE_1D) &&
-		       (subresourceRange.layerCount == 1);
-	case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-		return imageType == VK_IMAGE_TYPE_1D;
-	case VK_IMAGE_VIEW_TYPE_2D:
-		return ((imageType == VK_IMAGE_TYPE_2D) ||
-		        ((imageType == VK_IMAGE_TYPE_3D) &&
-		         (imageArrayLayers == 1))) &&
-		       (subresourceRange.layerCount == 1);
-	case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-		return (imageType == VK_IMAGE_TYPE_2D) ||
-		       ((imageType == VK_IMAGE_TYPE_3D) &&
-		        (imageArrayLayers == 1));
-	case VK_IMAGE_VIEW_TYPE_CUBE:
-		return image->isCube() &&
-		       (imageArrayLayers >= subresourceRange.layerCount) &&
-		       (subresourceRange.layerCount == 6);
-	case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-		return image->isCube() &&
-		       (imageArrayLayers >= subresourceRange.layerCount) &&
-		       (subresourceRange.layerCount >= 6);
-	case VK_IMAGE_VIEW_TYPE_3D:
-		return (imageType == VK_IMAGE_TYPE_3D) &&
-		       (imageArrayLayers == 1) &&
-		       (subresourceRange.layerCount == 1);
-	default:
-		UNREACHABLE("Unexpected viewType %d", (int)viewType);
+		case VK_IMAGE_VIEW_TYPE_1D:
+			return (imageType == VK_IMAGE_TYPE_1D) &&
+			       (subresourceRange.layerCount == 1);
+		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+			return imageType == VK_IMAGE_TYPE_1D;
+		case VK_IMAGE_VIEW_TYPE_2D:
+			return ((imageType == VK_IMAGE_TYPE_2D) ||
+			        ((imageType == VK_IMAGE_TYPE_3D) &&
+			         (imageArrayLayers == 1))) &&
+			       (subresourceRange.layerCount == 1);
+		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+			return (imageType == VK_IMAGE_TYPE_2D) ||
+			       ((imageType == VK_IMAGE_TYPE_3D) &&
+			        (imageArrayLayers == 1));
+		case VK_IMAGE_VIEW_TYPE_CUBE:
+			return image->isCube() &&
+			       (imageArrayLayers >= subresourceRange.layerCount) &&
+			       (subresourceRange.layerCount == 6);
+		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+			return image->isCube() &&
+			       (imageArrayLayers >= subresourceRange.layerCount) &&
+			       (subresourceRange.layerCount >= 6);
+		case VK_IMAGE_VIEW_TYPE_3D:
+			return (imageType == VK_IMAGE_TYPE_3D) &&
+			       (imageArrayLayers == 1) &&
+			       (subresourceRange.layerCount == 1);
+		default:
+			UNREACHABLE("Unexpected viewType %d", (int)viewType);
 	}
 
 	return false;
 }
 
-void ImageView::clear(const VkClearValue& clearValue, const VkImageAspectFlags aspectMask, const VkRect2D& renderArea)
+void ImageView::clear(const VkClearValue &clearValue, const VkImageAspectFlags aspectMask, const VkRect2D &renderArea)
 {
 	// Note: clearing ignores swizzling, so components is ignored.
 
@@ -131,7 +133,7 @@
 	image->clear(clearValue, format, renderArea, sr);
 }
 
-void ImageView::clear(const VkClearValue& clearValue, const VkImageAspectFlags aspectMask, const VkClearRect& renderArea)
+void ImageView::clear(const VkClearValue &clearValue, const VkImageAspectFlags aspectMask, const VkClearRect &renderArea)
 {
 	// Note: clearing ignores swizzling, so components is ignored.
 
@@ -161,13 +163,13 @@
 	{
 		uint32_t layer = sw::log2i(layerMask);
 		layerMask &= ~(1 << layer);
-		VkClearRect r = {renderArea, layer, 1};
+		VkClearRect r = { renderArea, layer, 1 };
 		r.baseArrayLayer = layer;
 		clear(clearValue, aspectMask, r);
 	}
 }
 
-void ImageView::resolve(ImageView* resolveAttachment, int layer)
+void ImageView::resolve(ImageView *resolveAttachment, int layer)
 {
 	if((subresourceRange.levelCount != 1) || (resolveAttachment->subresourceRange.levelCount != 1))
 	{
@@ -175,16 +177,14 @@
 	}
 
 	VkImageCopy region;
-	region.srcSubresource =
-	{
+	region.srcSubresource = {
 		subresourceRange.aspectMask,
 		subresourceRange.baseMipLevel,
 		subresourceRange.baseArrayLayer + layer,
 		1
 	};
 	region.srcOffset = { 0, 0, 0 };
-	region.dstSubresource =
-	{
+	region.dstSubresource = {
 		resolveAttachment->subresourceRange.aspectMask,
 		resolveAttachment->subresourceRange.baseMipLevel,
 		resolveAttachment->subresourceRange.baseArrayLayer + layer,
@@ -197,7 +197,7 @@
 	image->copyTo(resolveAttachment->image, region);
 }
 
-void ImageView::resolve(ImageView* resolveAttachment)
+void ImageView::resolve(ImageView *resolveAttachment)
 {
 	if((subresourceRange.levelCount != 1) || (resolveAttachment->subresourceRange.levelCount != 1))
 	{
@@ -205,16 +205,14 @@
 	}
 
 	VkImageCopy region;
-	region.srcSubresource =
-	{
+	region.srcSubresource = {
 		subresourceRange.aspectMask,
 		subresourceRange.baseMipLevel,
 		subresourceRange.baseArrayLayer,
 		subresourceRange.layerCount
 	};
 	region.srcOffset = { 0, 0, 0 };
-	region.dstSubresource =
-	{
+	region.dstSubresource = {
 		resolveAttachment->subresourceRange.aspectMask,
 		resolveAttachment->subresourceRange.baseMipLevel,
 		resolveAttachment->subresourceRange.baseArrayLayer,
@@ -237,17 +235,17 @@
 	}
 }
 
-const Image* ImageView::getImage(Usage usage) const
+const Image *ImageView::getImage(Usage usage) const
 {
 	switch(usage)
 	{
-	case RAW:
-		return image;
-	case SAMPLING:
-		return image->getSampledImage(format);
-	default:
-		UNIMPLEMENTED("usage %d", int(usage));
-		return nullptr;
+		case RAW:
+			return image;
+		case SAMPLING:
+			return image->getSampledImage(format);
+		default:
+			UNIMPLEMENTED("usage %d", int(usage));
+			return nullptr;
 	}
 }
 
@@ -283,12 +281,11 @@
 	                                subresourceRange.baseMipLevel + mipLevel);
 }
 
-void *ImageView::getOffsetPointer(const VkOffset3D& offset, VkImageAspectFlagBits aspect, uint32_t mipLevel, uint32_t layer, Usage usage) const
+void *ImageView::getOffsetPointer(const VkOffset3D &offset, VkImageAspectFlagBits aspect, uint32_t mipLevel, uint32_t layer, Usage usage) const
 {
 	ASSERT(mipLevel < subresourceRange.levelCount);
 
-	VkImageSubresourceLayers imageSubresourceLayers =
-	{
+	VkImageSubresourceLayers imageSubresourceLayers = {
 		static_cast<VkImageAspectFlags>(aspect),
 		subresourceRange.baseMipLevel + mipLevel,
 		subresourceRange.baseArrayLayer + layer,
diff --git a/src/Vulkan/VkImageView.hpp b/src/Vulkan/VkImageView.hpp
index a922946..73fdcc6 100644
--- a/src/Vulkan/VkImageView.hpp
+++ b/src/Vulkan/VkImageView.hpp
@@ -17,8 +17,8 @@
 
 #include "VkDebug.hpp"
 #include "VkFormat.h"
-#include "VkObject.hpp"
 #include "VkImage.hpp"
+#include "VkObject.hpp"
 
 #include <atomic>
 
@@ -32,18 +32,22 @@
 	// Image usage:
 	// RAW: Use the base image as is
 	// SAMPLING: Image used for texture sampling
-	enum Usage { RAW, SAMPLING };
+	enum Usage
+	{
+		RAW,
+		SAMPLING
+	};
 
-	ImageView(const VkImageViewCreateInfo* pCreateInfo, void* mem, const vk::SamplerYcbcrConversion *ycbcrConversion);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	ImageView(const VkImageViewCreateInfo *pCreateInfo, void *mem, const vk::SamplerYcbcrConversion *ycbcrConversion);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkImageViewCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkImageViewCreateInfo *pCreateInfo);
 
-	void clear(const VkClearValue& clearValues, VkImageAspectFlags aspectMask, const VkRect2D& renderArea);
-	void clear(const VkClearValue& clearValue, VkImageAspectFlags aspectMask, const VkClearRect& renderArea);
+	void clear(const VkClearValue &clearValues, VkImageAspectFlags aspectMask, const VkRect2D &renderArea);
+	void clear(const VkClearValue &clearValue, VkImageAspectFlags aspectMask, const VkClearRect &renderArea);
 	void clearWithLayerMask(const VkClearValue &clearValue, VkImageAspectFlags aspectMask, const VkRect2D &renderArea, uint32_t layerMask);
-	void resolve(ImageView* resolveAttachment);
-	void resolve(ImageView* resolveAttachment, int layer);
+	void resolve(ImageView *resolveAttachment);
+	void resolve(ImageView *resolveAttachment, int layer);
 	void resolveWithLayerMask(ImageView *resolveAttachment, uint32_t layerMask);
 
 	VkImageViewType getType() const { return viewType; }
@@ -59,15 +63,15 @@
 	{
 		switch(image->getSampleCountFlagBits())
 		{
-		case VK_SAMPLE_COUNT_1_BIT: return 1;
-		case VK_SAMPLE_COUNT_4_BIT: return 4;
-		default:
-			UNIMPLEMENTED("Sample count flags %d", image->getSampleCountFlagBits());
-			return 1;
+			case VK_SAMPLE_COUNT_1_BIT: return 1;
+			case VK_SAMPLE_COUNT_4_BIT: return 4;
+			default:
+				UNIMPLEMENTED("Sample count flags %d", image->getSampleCountFlagBits());
+				return 1;
 		}
 	}
 
-	void *getOffsetPointer(const VkOffset3D& offset, VkImageAspectFlagBits aspect, uint32_t mipLevel, uint32_t layer, Usage usage = RAW) const;
+	void *getOffsetPointer(const VkOffset3D &offset, VkImageAspectFlagBits aspect, uint32_t mipLevel, uint32_t layer, Usage usage = RAW) const;
 	bool hasDepthAspect() const { return (subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0; }
 	bool hasStencilAspect() const { return (subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0; }
 
@@ -81,15 +85,15 @@
 
 private:
 	static std::atomic<uint32_t> nextID;
-	friend class BufferView;	// ImageView/BufferView share the ID space above.
+	friend class BufferView;  // ImageView/BufferView share the ID space above.
 
-	bool                          imageTypesMatch(VkImageType imageType) const;
-	const Image*                  getImage(Usage usage) const;
+	bool imageTypesMatch(VkImageType imageType) const;
+	const Image *getImage(Usage usage) const;
 
-	Image *const                  image = nullptr;
-	const VkImageViewType         viewType = VK_IMAGE_VIEW_TYPE_2D;
-	const Format                  format;
-	const VkComponentMapping      components = {};
+	Image *const image = nullptr;
+	const VkImageViewType viewType = VK_IMAGE_VIEW_TYPE_2D;
+	const Format format;
+	const VkComponentMapping components = {};
 	const VkImageSubresourceRange subresourceRange = {};
 
 	const vk::SamplerYcbcrConversion *ycbcrConversion = nullptr;
@@ -99,18 +103,18 @@
 inline VkComponentMapping ResolveIdentityMapping(VkComponentMapping m)
 {
 	return {
-			(m.r == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_R : m.r,
-			(m.g == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_G : m.g,
-			(m.b == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_B : m.b,
-			(m.a == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_A : m.a,
-		};
+		(m.r == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_R : m.r,
+		(m.g == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_G : m.g,
+		(m.b == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_B : m.b,
+		(m.a == VK_COMPONENT_SWIZZLE_IDENTITY) ? VK_COMPONENT_SWIZZLE_A : m.a,
+	};
 }
 
-static inline ImageView* Cast(VkImageView object)
+static inline ImageView *Cast(VkImageView object)
 {
 	return ImageView::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_IMAGE_VIEW_HPP_
+#endif  // VK_IMAGE_VIEW_HPP_
diff --git a/src/Vulkan/VkInstance.cpp b/src/Vulkan/VkInstance.cpp
index 235a3eb..46a61be 100644
--- a/src/Vulkan/VkInstance.cpp
+++ b/src/Vulkan/VkInstance.cpp
@@ -17,17 +17,17 @@
 
 namespace vk {
 
-Instance::Instance(const VkInstanceCreateInfo* pCreateInfo, void* mem, VkPhysicalDevice physicalDevice)
-	: physicalDevice(physicalDevice)
+Instance::Instance(const VkInstanceCreateInfo *pCreateInfo, void *mem, VkPhysicalDevice physicalDevice)
+    : physicalDevice(physicalDevice)
 {
 }
 
-void Instance::destroy(const VkAllocationCallbacks* pAllocator)
+void Instance::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	vk::destroy(physicalDevice, pAllocator);
 }
 
-VkResult Instance::getPhysicalDevices(uint32_t *pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) const
+VkResult Instance::getPhysicalDevices(uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) const
 {
 	if(!pPhysicalDevices)
 	{
@@ -47,7 +47,7 @@
 }
 
 VkResult Instance::getPhysicalDeviceGroups(uint32_t *pPhysicalDeviceGroupCount,
-                                           VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties) const
+                                           VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) const
 {
 	if(!pPhysicalDeviceGroupProperties)
 	{
diff --git a/src/Vulkan/VkInstance.hpp b/src/Vulkan/VkInstance.hpp
index 0234d9c..53d6764 100644
--- a/src/Vulkan/VkInstance.hpp
+++ b/src/Vulkan/VkInstance.hpp
@@ -24,14 +24,14 @@
 public:
 	static constexpr VkSystemAllocationScope GetAllocationScope() { return VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE; }
 
-	Instance(const VkInstanceCreateInfo* pCreateInfo, void* mem, VkPhysicalDevice physicalDevice);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	Instance(const VkInstanceCreateInfo *pCreateInfo, void *mem, VkPhysicalDevice physicalDevice);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkInstanceCreateInfo*) { return 0; }
+	static size_t ComputeRequiredAllocationSize(const VkInstanceCreateInfo *) { return 0; }
 
-	VkResult getPhysicalDevices(uint32_t *pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) const;
+	VkResult getPhysicalDevices(uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) const;
 	VkResult getPhysicalDeviceGroups(uint32_t *pPhysicalDeviceGroupCount,
-                                 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties) const;
+	                                 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) const;
 
 private:
 	VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
@@ -39,11 +39,11 @@
 
 using DispatchableInstance = DispatchableObject<Instance, VkInstance>;
 
-static inline Instance* Cast(VkInstance object)
+static inline Instance *Cast(VkInstance object)
 {
 	return DispatchableInstance::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_INSTANCE_HPP_
+#endif  // VK_INSTANCE_HPP_
diff --git a/src/Vulkan/VkMemory.cpp b/src/Vulkan/VkMemory.cpp
index 128e648..76afb0e 100644
--- a/src/Vulkan/VkMemory.cpp
+++ b/src/Vulkan/VkMemory.cpp
@@ -15,24 +15,22 @@
 #ifndef VK_OBJECT_HPP_
 #define VK_OBJECT_HPP_
 
-#include "VkConfig.h"
 #include "VkMemory.h"
+#include "VkConfig.h"
 #include "System/Memory.hpp"
 
 namespace vk {
 
-void* allocate(size_t count, size_t alignment, const VkAllocationCallbacks* pAllocator, VkSystemAllocationScope allocationScope)
+void *allocate(size_t count, size_t alignment, const VkAllocationCallbacks *pAllocator, VkSystemAllocationScope allocationScope)
 {
-	return pAllocator ?
-		pAllocator->pfnAllocation(pAllocator->pUserData, count, alignment, allocationScope) :
-		sw::allocate(count, alignment);
+	return pAllocator ? pAllocator->pfnAllocation(pAllocator->pUserData, count, alignment, allocationScope) : sw::allocate(count, alignment);
 }
 
-void deallocate(void* ptr, const VkAllocationCallbacks* pAllocator)
+void deallocate(void *ptr, const VkAllocationCallbacks *pAllocator)
 {
 	pAllocator ? pAllocator->pfnFree(pAllocator->pUserData, ptr) : sw::deallocate(ptr);
 }
 
 }  // namespace vk
 
-#endif // VK_OBJECT_HPP_
+#endif  // VK_OBJECT_HPP_
diff --git a/src/Vulkan/VkObject.hpp b/src/Vulkan/VkObject.hpp
index 105cc94..0d7010f 100644
--- a/src/Vulkan/VkObject.hpp
+++ b/src/Vulkan/VkObject.hpp
@@ -19,34 +19,34 @@
 #include "VkDebug.hpp"
 #include "VkMemory.h"
 
-#include <new>
 #include <Vulkan/VulkanPlatform.h>
 #include <vulkan/vk_icd.h>
+#include <new>
 
 namespace vk {
 
 template<typename T, typename VkT>
-static inline T* VkTtoT(VkT vkObject)
+static inline T *VkTtoT(VkT vkObject)
 {
-	return static_cast<T*>(static_cast<void*>(vkObject));
+	return static_cast<T *>(static_cast<void *>(vkObject));
 }
 
 template<typename T, typename VkT>
-static inline VkT TtoVkT(T* object)
+static inline VkT TtoVkT(T *object)
 {
 	return { static_cast<uint64_t>(reinterpret_cast<uintptr_t>(object)) };
 }
 
 // For use in the placement new to make it verbose that we're allocating an object using device memory
-static constexpr VkAllocationCallbacks* DEVICE_MEMORY = nullptr;
+static constexpr VkAllocationCallbacks *DEVICE_MEMORY = nullptr;
 
 template<typename T, typename VkT, typename CreateInfo, typename... ExtendedInfo>
-static VkResult Create(const VkAllocationCallbacks* pAllocator, const CreateInfo* pCreateInfo, VkT* outObject, ExtendedInfo... extendedInfo)
+static VkResult Create(const VkAllocationCallbacks *pAllocator, const CreateInfo *pCreateInfo, VkT *outObject, ExtendedInfo... extendedInfo)
 {
 	*outObject = VK_NULL_HANDLE;
 
 	size_t size = T::ComputeRequiredAllocationSize(pCreateInfo);
-	void* memory = nullptr;
+	void *memory = nullptr;
 	if(size)
 	{
 		memory = vk::allocate(size, REQUIRED_MEMORY_ALIGNMENT, pAllocator, T::GetAllocationScope());
@@ -56,14 +56,14 @@
 		}
 	}
 
-	void* objectMemory = vk::allocate(sizeof(T), alignof(T), pAllocator, T::GetAllocationScope());
+	void *objectMemory = vk::allocate(sizeof(T), alignof(T), pAllocator, T::GetAllocationScope());
 	if(!objectMemory)
 	{
 		vk::deallocate(memory, pAllocator);
 		return VK_ERROR_OUT_OF_HOST_MEMORY;
 	}
 
-	auto object = new (objectMemory) T(pCreateInfo, memory, extendedInfo...);
+	auto object = new(objectMemory) T(pCreateInfo, memory, extendedInfo...);
 
 	if(!object)
 	{
@@ -85,10 +85,10 @@
 public:
 	using VkType = VkT;
 
-	void destroy(const VkAllocationCallbacks* pAllocator) {} // Method defined by objects to delete their content, if necessary
+	void destroy(const VkAllocationCallbacks *pAllocator) {}  // Method defined by objects to delete their content, if necessary
 
 	template<typename CreateInfo, typename... ExtendedInfo>
-	static VkResult Create(const VkAllocationCallbacks* pAllocator, const CreateInfo* pCreateInfo, VkT* outObject, ExtendedInfo... extendedInfo)
+	static VkResult Create(const VkAllocationCallbacks *pAllocator, const CreateInfo *pCreateInfo, VkT *outObject, ExtendedInfo... extendedInfo)
 	{
 		return vk::Create<T, VkT, CreateInfo>(pAllocator, pCreateInfo, outObject, extendedInfo...);
 	}
@@ -104,10 +104,10 @@
 	{
 		// The static_cast<T*> is used to make sure the returned pointer points to the
 		// beginning of the object, even if the derived class uses multiple inheritance
-		return vk::TtoVkT<T, VkT>(static_cast<T*>(this));
+		return vk::TtoVkT<T, VkT>(static_cast<T *>(this));
 	}
 
-	static inline T* Cast(VkT vkObject)
+	static inline T *Cast(VkT vkObject)
 	{
 		return vk::VkTtoT<T, VkT>(vkObject);
 	}
@@ -123,40 +123,40 @@
 public:
 	static constexpr VkSystemAllocationScope GetAllocationScope() { return T::GetAllocationScope(); }
 
-	template<typename ...Args>
-	DispatchableObject(Args... args) : object(args...)
+	template<typename... Args>
+	DispatchableObject(Args... args)
+	    : object(args...)
 	{
 	}
 
 	~DispatchableObject() = delete;
 
-	void destroy(const VkAllocationCallbacks* pAllocator)
+	void destroy(const VkAllocationCallbacks *pAllocator)
 	{
 		object.destroy(pAllocator);
 	}
 
-	void operator delete(void* ptr, const VkAllocationCallbacks* pAllocator)
+	void operator delete(void *ptr, const VkAllocationCallbacks *pAllocator)
 	{
 		// Should never happen
 		ASSERT(false);
 	}
 
 	template<typename CreateInfo, typename... ExtendedInfo>
-	static VkResult Create(const VkAllocationCallbacks* pAllocator, const CreateInfo* pCreateInfo, VkT* outObject, ExtendedInfo... extendedInfo)
+	static VkResult Create(const VkAllocationCallbacks *pAllocator, const CreateInfo *pCreateInfo, VkT *outObject, ExtendedInfo... extendedInfo)
 	{
 		return vk::Create<DispatchableObject<T, VkT>, VkT, CreateInfo>(pAllocator, pCreateInfo, outObject, extendedInfo...);
 	}
 
 	template<typename CreateInfo>
-	static size_t ComputeRequiredAllocationSize(const CreateInfo* pCreateInfo)
+	static size_t ComputeRequiredAllocationSize(const CreateInfo *pCreateInfo)
 	{
 		return T::ComputeRequiredAllocationSize(pCreateInfo);
 	}
 
-	static inline T* Cast(VkT vkObject)
+	static inline T *Cast(VkT vkObject)
 	{
-		return (vkObject == VK_NULL_HANDLE) ? nullptr :
-		       &(reinterpret_cast<DispatchableObject<T, VkT>*>(vkObject)->object);
+		return (vkObject == VK_NULL_HANDLE) ? nullptr : &(reinterpret_cast<DispatchableObject<T, VkT> *>(vkObject)->object);
 	}
 
 	operator VkT()
@@ -167,4 +167,4 @@
 
 }  // namespace vk
 
-#endif // VK_OBJECT_HPP_
+#endif  // VK_OBJECT_HPP_
diff --git a/src/Vulkan/VkPhysicalDevice.cpp b/src/Vulkan/VkPhysicalDevice.cpp
index e30b6ac..164a12f 100644
--- a/src/Vulkan/VkPhysicalDevice.cpp
+++ b/src/Vulkan/VkPhysicalDevice.cpp
@@ -15,14 +15,14 @@
 #include "VkPhysicalDevice.hpp"
 
 #include "VkConfig.h"
-#include "Pipeline/SpirvShader.hpp" // sw::SIMD::Width
+#include "Pipeline/SpirvShader.hpp"  // sw::SIMD::Width
 
-#include <limits>
 #include <cstring>
+#include <limits>
 
 namespace vk {
 
-static void setExternalMemoryProperties(VkExternalMemoryHandleTypeFlagBits handleType, VkExternalMemoryProperties* properties)
+static void setExternalMemoryProperties(VkExternalMemoryHandleTypeFlagBits handleType, VkExternalMemoryProperties *properties)
 {
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
 	if(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
@@ -38,14 +38,13 @@
 	properties->externalMemoryFeatures = 0;
 }
 
-PhysicalDevice::PhysicalDevice(const void*, void* mem)
+PhysicalDevice::PhysicalDevice(const void *, void *mem)
 {
 }
 
-const VkPhysicalDeviceFeatures& PhysicalDevice::getFeatures() const
+const VkPhysicalDeviceFeatures &PhysicalDevice::getFeatures() const
 {
-	static const VkPhysicalDeviceFeatures features
-	{
+	static const VkPhysicalDeviceFeatures features{
 		VK_TRUE,   // robustBufferAccess
 		VK_TRUE,   // fullDrawIndexUint32
 		VK_TRUE,   // imageCubeArray
@@ -106,12 +105,12 @@
 	return features;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDeviceSamplerYcbcrConversionFeatures* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDeviceSamplerYcbcrConversionFeatures *features) const
 {
 	features->samplerYcbcrConversion = VK_TRUE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDevice16BitStorageFeatures* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDevice16BitStorageFeatures *features) const
 {
 	features->storageBuffer16BitAccess = VK_FALSE;
 	features->storageInputOutput16 = VK_FALSE;
@@ -119,47 +118,47 @@
 	features->uniformAndStorageBuffer16BitAccess = VK_FALSE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDeviceVariablePointerFeatures* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDeviceVariablePointerFeatures *features) const
 {
 	features->variablePointersStorageBuffer = VK_FALSE;
 	features->variablePointers = VK_FALSE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDevice8BitStorageFeaturesKHR* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDevice8BitStorageFeaturesKHR *features) const
 {
 	features->storageBuffer8BitAccess = VK_FALSE;
 	features->uniformAndStorageBuffer8BitAccess = VK_FALSE;
 	features->storagePushConstant8 = VK_FALSE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDeviceMultiviewFeatures* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDeviceMultiviewFeatures *features) const
 {
 	features->multiview = VK_TRUE;
 	features->multiviewGeometryShader = VK_FALSE;
 	features->multiviewTessellationShader = VK_FALSE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDeviceProtectedMemoryFeatures* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDeviceProtectedMemoryFeatures *features) const
 {
 	features->protectedMemory = VK_FALSE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDeviceShaderDrawParameterFeatures* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDeviceShaderDrawParameterFeatures *features) const
 {
 	features->shaderDrawParameters = VK_FALSE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDeviceLineRasterizationFeaturesEXT* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDeviceLineRasterizationFeaturesEXT *features) const
 {
 	features->rectangularLines = VK_TRUE;
-    features->bresenhamLines = VK_TRUE;
-    features->smoothLines = VK_FALSE;
-    features->stippledRectangularLines = VK_FALSE;
-    features->stippledBresenhamLines = VK_FALSE;
-    features->stippledSmoothLines = VK_FALSE;
+	features->bresenhamLines = VK_TRUE;
+	features->smoothLines = VK_FALSE;
+	features->stippledRectangularLines = VK_FALSE;
+	features->stippledBresenhamLines = VK_FALSE;
+	features->stippledSmoothLines = VK_FALSE;
 }
 
-void PhysicalDevice::getFeatures(VkPhysicalDeviceProvokingVertexFeaturesEXT* features) const
+void PhysicalDevice::getFeatures(VkPhysicalDeviceProvokingVertexFeaturesEXT *features) const
 {
 	features->provokingVertexLast = VK_TRUE;
 }
@@ -169,220 +168,218 @@
 	return VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT;
 }
 
-const VkPhysicalDeviceLimits& PhysicalDevice::getLimits() const
+const VkPhysicalDeviceLimits &PhysicalDevice::getLimits() const
 {
 	VkSampleCountFlags sampleCounts = getSampleCounts();
 
-	static const VkPhysicalDeviceLimits limits =
-	{
-		1 << (vk::MAX_IMAGE_LEVELS_1D - 1), // maxImageDimension1D
-		1 << (vk::MAX_IMAGE_LEVELS_2D - 1), // maxImageDimension2D
-		1 << (vk::MAX_IMAGE_LEVELS_3D - 1), // maxImageDimension3D
-		1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1), // maxImageDimensionCube
-		vk::MAX_IMAGE_ARRAY_LAYERS, // maxImageArrayLayers
-		65536, // maxTexelBufferElements
-		16384, // maxUniformBufferRange
-		(1ul << 27), // maxStorageBufferRange
-		vk::MAX_PUSH_CONSTANT_SIZE, // maxPushConstantsSize
-		4096, // maxMemoryAllocationCount
-		4000, // maxSamplerAllocationCount
-		131072, // bufferImageGranularity
-		0, // sparseAddressSpaceSize (unsupported)
-		MAX_BOUND_DESCRIPTOR_SETS, // maxBoundDescriptorSets
-		16, // maxPerStageDescriptorSamplers
-		14, // maxPerStageDescriptorUniformBuffers
-		16, // maxPerStageDescriptorStorageBuffers
-		16, // maxPerStageDescriptorSampledImages
-		4, // maxPerStageDescriptorStorageImages
-		4, // maxPerStageDescriptorInputAttachments
-		128, // maxPerStageResources
-		96, // maxDescriptorSetSamplers
-		72, // maxDescriptorSetUniformBuffers
-		MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC, // maxDescriptorSetUniformBuffersDynamic
-		24, // maxDescriptorSetStorageBuffers
-		MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC, // maxDescriptorSetStorageBuffersDynamic
-		96, // maxDescriptorSetSampledImages
-		24, // maxDescriptorSetStorageImages
-		4, // maxDescriptorSetInputAttachments
-		16, // maxVertexInputAttributes
-		vk::MAX_VERTEX_INPUT_BINDINGS, // maxVertexInputBindings
-		2047, // maxVertexInputAttributeOffset
-		2048, // maxVertexInputBindingStride
-		sw::MAX_INTERFACE_COMPONENTS, // maxVertexOutputComponents
-		0, // maxTessellationGenerationLevel (unsupported)
-		0, // maxTessellationPatchSize (unsupported)
-		0, // maxTessellationControlPerVertexInputComponents (unsupported)
-		0, // maxTessellationControlPerVertexOutputComponents (unsupported)
-		0, // maxTessellationControlPerPatchOutputComponents (unsupported)
-		0, // maxTessellationControlTotalOutputComponents (unsupported)
-		0, // maxTessellationEvaluationInputComponents (unsupported)
-		0, // maxTessellationEvaluationOutputComponents (unsupported)
-		0, // maxGeometryShaderInvocations (unsupported)
-		0, // maxGeometryInputComponents (unsupported)
-		0, // maxGeometryOutputComponents (unsupported)
-		0, // maxGeometryOutputVertices (unsupported)
-		0, // maxGeometryTotalOutputComponents (unsupported)
-		sw::MAX_INTERFACE_COMPONENTS, // maxFragmentInputComponents
-		4, // maxFragmentOutputAttachments
-		1, // maxFragmentDualSrcAttachments
-		4, // maxFragmentCombinedOutputResources
-		16384, // maxComputeSharedMemorySize
-		{ 65535, 65535, 65535 }, // maxComputeWorkGroupCount[3]
-		128, // maxComputeWorkGroupInvocations
-		{ 128, 128, 64 }, // maxComputeWorkGroupSize[3]
-		vk::SUBPIXEL_PRECISION_BITS, // subPixelPrecisionBits
-		4, // subTexelPrecisionBits
-		4, // mipmapPrecisionBits
-		UINT32_MAX, // maxDrawIndexedIndexValue
-		UINT32_MAX, // maxDrawIndirectCount
-		vk::MAX_SAMPLER_LOD_BIAS, // maxSamplerLodBias
-		16, // maxSamplerAnisotropy
-		16, // maxViewports
-		{ 4096, 4096 }, // maxViewportDimensions[2]
-		{ -8192, 8191 }, // viewportBoundsRange[2]
-		0, // viewportSubPixelBits
-		64, // minMemoryMapAlignment
-		vk::MIN_TEXEL_BUFFER_OFFSET_ALIGNMENT, // minTexelBufferOffsetAlignment
-		vk::MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT, // minUniformBufferOffsetAlignment
-		vk::MIN_STORAGE_BUFFER_OFFSET_ALIGNMENT, // minStorageBufferOffsetAlignment
-		sw::MIN_TEXEL_OFFSET, // minTexelOffset
-		sw::MAX_TEXEL_OFFSET, // maxTexelOffset
-		sw::MIN_TEXEL_OFFSET, // minTexelGatherOffset
-		sw::MAX_TEXEL_OFFSET, // maxTexelGatherOffset
-		-0.5, // minInterpolationOffset
-		0.5, // maxInterpolationOffset
-		4, // subPixelInterpolationOffsetBits
-		4096, // maxFramebufferWidth
-		4096, // maxFramebufferHeight
-		256, // maxFramebufferLayers
-		sampleCounts, // framebufferColorSampleCounts
-		sampleCounts, // framebufferDepthSampleCounts
-		sampleCounts, // framebufferStencilSampleCounts
-		sampleCounts, // framebufferNoAttachmentsSampleCounts
-		4,  // maxColorAttachments
-		sampleCounts, // sampledImageColorSampleCounts
-		VK_SAMPLE_COUNT_1_BIT, // sampledImageIntegerSampleCounts
-		sampleCounts, // sampledImageDepthSampleCounts
-		sampleCounts, // sampledImageStencilSampleCounts
-		VK_SAMPLE_COUNT_1_BIT, // storageImageSampleCounts (unsupported)
-		1, // maxSampleMaskWords
-		VK_FALSE, // timestampComputeAndGraphics
-		60, // timestampPeriod
-		sw::MAX_CLIP_DISTANCES, // maxClipDistances
-		sw::MAX_CULL_DISTANCES, // maxCullDistances
-		sw::MAX_CLIP_DISTANCES + sw::MAX_CULL_DISTANCES, // maxCombinedClipAndCullDistances
-		2, // discreteQueuePriorities
-		{ 1.0, vk::MAX_POINT_SIZE }, // pointSizeRange[2]
-		{ 1.0, 1.0 }, // lineWidthRange[2] (unsupported)
-		0.0, // pointSizeGranularity (unsupported)
-		0.0, // lineWidthGranularity (unsupported)
-		VK_TRUE,  // strictLines
-		VK_TRUE,  // standardSampleLocations
-		64, // optimalBufferCopyOffsetAlignment
-		64, // optimalBufferCopyRowPitchAlignment
-		256, // nonCoherentAtomSize
+	static const VkPhysicalDeviceLimits limits = {
+		1 << (vk::MAX_IMAGE_LEVELS_1D - 1),               // maxImageDimension1D
+		1 << (vk::MAX_IMAGE_LEVELS_2D - 1),               // maxImageDimension2D
+		1 << (vk::MAX_IMAGE_LEVELS_3D - 1),               // maxImageDimension3D
+		1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1),             // maxImageDimensionCube
+		vk::MAX_IMAGE_ARRAY_LAYERS,                       // maxImageArrayLayers
+		65536,                                            // maxTexelBufferElements
+		16384,                                            // maxUniformBufferRange
+		(1ul << 27),                                      // maxStorageBufferRange
+		vk::MAX_PUSH_CONSTANT_SIZE,                       // maxPushConstantsSize
+		4096,                                             // maxMemoryAllocationCount
+		4000,                                             // maxSamplerAllocationCount
+		131072,                                           // bufferImageGranularity
+		0,                                                // sparseAddressSpaceSize (unsupported)
+		MAX_BOUND_DESCRIPTOR_SETS,                        // maxBoundDescriptorSets
+		16,                                               // maxPerStageDescriptorSamplers
+		14,                                               // maxPerStageDescriptorUniformBuffers
+		16,                                               // maxPerStageDescriptorStorageBuffers
+		16,                                               // maxPerStageDescriptorSampledImages
+		4,                                                // maxPerStageDescriptorStorageImages
+		4,                                                // maxPerStageDescriptorInputAttachments
+		128,                                              // maxPerStageResources
+		96,                                               // maxDescriptorSetSamplers
+		72,                                               // maxDescriptorSetUniformBuffers
+		MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC,       // maxDescriptorSetUniformBuffersDynamic
+		24,                                               // maxDescriptorSetStorageBuffers
+		MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC,       // maxDescriptorSetStorageBuffersDynamic
+		96,                                               // maxDescriptorSetSampledImages
+		24,                                               // maxDescriptorSetStorageImages
+		4,                                                // maxDescriptorSetInputAttachments
+		16,                                               // maxVertexInputAttributes
+		vk::MAX_VERTEX_INPUT_BINDINGS,                    // maxVertexInputBindings
+		2047,                                             // maxVertexInputAttributeOffset
+		2048,                                             // maxVertexInputBindingStride
+		sw::MAX_INTERFACE_COMPONENTS,                     // maxVertexOutputComponents
+		0,                                                // maxTessellationGenerationLevel (unsupported)
+		0,                                                // maxTessellationPatchSize (unsupported)
+		0,                                                // maxTessellationControlPerVertexInputComponents (unsupported)
+		0,                                                // maxTessellationControlPerVertexOutputComponents (unsupported)
+		0,                                                // maxTessellationControlPerPatchOutputComponents (unsupported)
+		0,                                                // maxTessellationControlTotalOutputComponents (unsupported)
+		0,                                                // maxTessellationEvaluationInputComponents (unsupported)
+		0,                                                // maxTessellationEvaluationOutputComponents (unsupported)
+		0,                                                // maxGeometryShaderInvocations (unsupported)
+		0,                                                // maxGeometryInputComponents (unsupported)
+		0,                                                // maxGeometryOutputComponents (unsupported)
+		0,                                                // maxGeometryOutputVertices (unsupported)
+		0,                                                // maxGeometryTotalOutputComponents (unsupported)
+		sw::MAX_INTERFACE_COMPONENTS,                     // maxFragmentInputComponents
+		4,                                                // maxFragmentOutputAttachments
+		1,                                                // maxFragmentDualSrcAttachments
+		4,                                                // maxFragmentCombinedOutputResources
+		16384,                                            // maxComputeSharedMemorySize
+		{ 65535, 65535, 65535 },                          // maxComputeWorkGroupCount[3]
+		128,                                              // maxComputeWorkGroupInvocations
+		{ 128, 128, 64 },                                 // maxComputeWorkGroupSize[3]
+		vk::SUBPIXEL_PRECISION_BITS,                      // subPixelPrecisionBits
+		4,                                                // subTexelPrecisionBits
+		4,                                                // mipmapPrecisionBits
+		UINT32_MAX,                                       // maxDrawIndexedIndexValue
+		UINT32_MAX,                                       // maxDrawIndirectCount
+		vk::MAX_SAMPLER_LOD_BIAS,                         // maxSamplerLodBias
+		16,                                               // maxSamplerAnisotropy
+		16,                                               // maxViewports
+		{ 4096, 4096 },                                   // maxViewportDimensions[2]
+		{ -8192, 8191 },                                  // viewportBoundsRange[2]
+		0,                                                // viewportSubPixelBits
+		64,                                               // minMemoryMapAlignment
+		vk::MIN_TEXEL_BUFFER_OFFSET_ALIGNMENT,            // minTexelBufferOffsetAlignment
+		vk::MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT,          // minUniformBufferOffsetAlignment
+		vk::MIN_STORAGE_BUFFER_OFFSET_ALIGNMENT,          // minStorageBufferOffsetAlignment
+		sw::MIN_TEXEL_OFFSET,                             // minTexelOffset
+		sw::MAX_TEXEL_OFFSET,                             // maxTexelOffset
+		sw::MIN_TEXEL_OFFSET,                             // minTexelGatherOffset
+		sw::MAX_TEXEL_OFFSET,                             // maxTexelGatherOffset
+		-0.5,                                             // minInterpolationOffset
+		0.5,                                              // maxInterpolationOffset
+		4,                                                // subPixelInterpolationOffsetBits
+		4096,                                             // maxFramebufferWidth
+		4096,                                             // maxFramebufferHeight
+		256,                                              // maxFramebufferLayers
+		sampleCounts,                                     // framebufferColorSampleCounts
+		sampleCounts,                                     // framebufferDepthSampleCounts
+		sampleCounts,                                     // framebufferStencilSampleCounts
+		sampleCounts,                                     // framebufferNoAttachmentsSampleCounts
+		4,                                                // maxColorAttachments
+		sampleCounts,                                     // sampledImageColorSampleCounts
+		VK_SAMPLE_COUNT_1_BIT,                            // sampledImageIntegerSampleCounts
+		sampleCounts,                                     // sampledImageDepthSampleCounts
+		sampleCounts,                                     // sampledImageStencilSampleCounts
+		VK_SAMPLE_COUNT_1_BIT,                            // storageImageSampleCounts (unsupported)
+		1,                                                // maxSampleMaskWords
+		VK_FALSE,                                         // timestampComputeAndGraphics
+		60,                                               // timestampPeriod
+		sw::MAX_CLIP_DISTANCES,                           // maxClipDistances
+		sw::MAX_CULL_DISTANCES,                           // maxCullDistances
+		sw::MAX_CLIP_DISTANCES + sw::MAX_CULL_DISTANCES,  // maxCombinedClipAndCullDistances
+		2,                                                // discreteQueuePriorities
+		{ 1.0, vk::MAX_POINT_SIZE },                      // pointSizeRange[2]
+		{ 1.0, 1.0 },                                     // lineWidthRange[2] (unsupported)
+		0.0,                                              // pointSizeGranularity (unsupported)
+		0.0,                                              // lineWidthGranularity (unsupported)
+		VK_TRUE,                                          // strictLines
+		VK_TRUE,                                          // standardSampleLocations
+		64,                                               // optimalBufferCopyOffsetAlignment
+		64,                                               // optimalBufferCopyRowPitchAlignment
+		256,                                              // nonCoherentAtomSize
 	};
 
 	return limits;
 }
 
-const VkPhysicalDeviceProperties& PhysicalDevice::getProperties() const
+const VkPhysicalDeviceProperties &PhysicalDevice::getProperties() const
 {
-	static const VkPhysicalDeviceProperties properties
-	{
+	static const VkPhysicalDeviceProperties properties{
 		API_VERSION,
 		DRIVER_VERSION,
 		VENDOR_ID,
 		DEVICE_ID,
-		VK_PHYSICAL_DEVICE_TYPE_CPU, // deviceType
-		SWIFTSHADER_DEVICE_NAME, // deviceName
-		SWIFTSHADER_UUID, // pipelineCacheUUID
-		getLimits(), // limits
-		{} // sparseProperties
+		VK_PHYSICAL_DEVICE_TYPE_CPU,  // deviceType
+		SWIFTSHADER_DEVICE_NAME,      // deviceName
+		SWIFTSHADER_UUID,             // pipelineCacheUUID
+		getLimits(),                  // limits
+		{}                            // sparseProperties
 	};
 
 	return properties;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceIDProperties* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceIDProperties *properties) const
 {
 	memset(properties->deviceUUID, 0, VK_UUID_SIZE);
 	memset(properties->driverUUID, 0, VK_UUID_SIZE);
 	memset(properties->deviceLUID, 0, VK_LUID_SIZE);
 
 	memcpy(properties->deviceUUID, SWIFTSHADER_UUID, VK_UUID_SIZE);
-	*((uint64_t*)properties->driverUUID) = DRIVER_VERSION;
+	*((uint64_t *)properties->driverUUID) = DRIVER_VERSION;
 
 	properties->deviceNodeMask = 0;
 	properties->deviceLUIDValid = VK_FALSE;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceMaintenance3Properties* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceMaintenance3Properties *properties) const
 {
 	properties->maxMemoryAllocationSize = 1u << 31;
 	properties->maxPerSetDescriptors = 1024;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceMultiviewProperties* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceMultiviewProperties *properties) const
 {
 	properties->maxMultiviewViewCount = 6;
-	properties->maxMultiviewInstanceIndex = 1u<<27;
+	properties->maxMultiviewInstanceIndex = 1u << 27;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDevicePointClippingProperties* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDevicePointClippingProperties *properties) const
 {
 	properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceProtectedMemoryProperties* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceProtectedMemoryProperties *properties) const
 {
 	properties->protectedNoFault = VK_FALSE;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceSubgroupProperties* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceSubgroupProperties *properties) const
 {
 	properties->subgroupSize = sw::SIMD::Width;
 	properties->supportedStages = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT;
 	properties->supportedOperations =
-		VK_SUBGROUP_FEATURE_BASIC_BIT |
-		VK_SUBGROUP_FEATURE_VOTE_BIT |
-		VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
-		VK_SUBGROUP_FEATURE_BALLOT_BIT |
-		VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
-		VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
+	    VK_SUBGROUP_FEATURE_BASIC_BIT |
+	    VK_SUBGROUP_FEATURE_VOTE_BIT |
+	    VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
+	    VK_SUBGROUP_FEATURE_BALLOT_BIT |
+	    VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
+	    VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
 	properties->quadOperationsInAllStages = VK_FALSE;
 }
 
-void PhysicalDevice::getProperties(const VkExternalMemoryHandleTypeFlagBits* handleType, VkExternalImageFormatProperties* properties) const
+void PhysicalDevice::getProperties(const VkExternalMemoryHandleTypeFlagBits *handleType, VkExternalImageFormatProperties *properties) const
 {
 	setExternalMemoryProperties(*handleType, &properties->externalMemoryProperties);
 }
 
-void PhysicalDevice::getProperties(VkSamplerYcbcrConversionImageFormatProperties* properties) const
+void PhysicalDevice::getProperties(VkSamplerYcbcrConversionImageFormatProperties *properties) const
 {
 	properties->combinedImageSamplerDescriptorCount = 1;  // Need only one descriptor for YCbCr sampling.
 }
 
 #ifdef __ANDROID__
-void PhysicalDevice::getProperties(VkPhysicalDevicePresentationPropertiesANDROID* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDevicePresentationPropertiesANDROID *properties) const
 {
 	properties->sharedImage = VK_FALSE;
 }
 #endif
 
-void PhysicalDevice::getProperties(const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties) const
+void PhysicalDevice::getProperties(const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo, VkExternalBufferProperties *pExternalBufferProperties) const
 {
 	setExternalMemoryProperties(pExternalBufferInfo->handleType, &pExternalBufferProperties->externalMemoryProperties);
 }
 
-void PhysicalDevice::getProperties(const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties) const
+void PhysicalDevice::getProperties(const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo, VkExternalFenceProperties *pExternalFenceProperties) const
 {
 	pExternalFenceProperties->compatibleHandleTypes = 0;
 	pExternalFenceProperties->exportFromImportedHandleTypes = 0;
 	pExternalFenceProperties->externalFenceFeatures = 0;
 }
 
-void PhysicalDevice::getProperties(const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties) const
+void PhysicalDevice::getProperties(const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo, VkExternalSemaphoreProperties *pExternalSemaphoreProperties) const
 {
 #if SWIFTSHADER_EXTERNAL_SEMAPHORE_OPAQUE_FD
 	if(pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
@@ -407,29 +404,29 @@
 	pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceDriverPropertiesKHR* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceDriverPropertiesKHR *properties) const
 {
 	properties->driverID = VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR;
 	strcpy(properties->driverName, "SwiftShader driver");
 	strcpy(properties->driverInfo, "");
-	properties->conformanceVersion = {1, 1, 3, 3};
+	properties->conformanceVersion = { 1, 1, 3, 3 };
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceLineRasterizationPropertiesEXT* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceLineRasterizationPropertiesEXT *properties) const
 {
 	properties->lineSubPixelPrecisionBits = vk::SUBPIXEL_PRECISION_BITS;
 }
 
-void PhysicalDevice::getProperties(VkPhysicalDeviceProvokingVertexPropertiesEXT* properties) const
+void PhysicalDevice::getProperties(VkPhysicalDeviceProvokingVertexPropertiesEXT *properties) const
 {
 	properties->provokingVertexModePerPipeline = VK_TRUE;
 }
 
-bool PhysicalDevice::hasFeatures(const VkPhysicalDeviceFeatures& requestedFeatures) const
+bool PhysicalDevice::hasFeatures(const VkPhysicalDeviceFeatures &requestedFeatures) const
 {
-	const VkPhysicalDeviceFeatures& supportedFeatures = getFeatures();
-	const VkBool32* supportedFeature = reinterpret_cast<const VkBool32*>(&supportedFeatures);
-	const VkBool32* requestedFeature = reinterpret_cast<const VkBool32*>(&requestedFeatures);
+	const VkPhysicalDeviceFeatures &supportedFeatures = getFeatures();
+	const VkBool32 *supportedFeature = reinterpret_cast<const VkBool32 *>(&supportedFeatures);
+	const VkBool32 *requestedFeature = reinterpret_cast<const VkBool32 *>(&requestedFeatures);
 	constexpr auto featureCount = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
 
 	for(unsigned int i = 0; i < featureCount; i++)
@@ -443,308 +440,308 @@
 	return true;
 }
 
-void PhysicalDevice::getFormatProperties(Format format, VkFormatProperties* pFormatProperties) const
+void PhysicalDevice::getFormatProperties(Format format, VkFormatProperties *pFormatProperties) const
 {
-	pFormatProperties->linearTilingFeatures = 0; // Unsupported format
-	pFormatProperties->optimalTilingFeatures = 0; // Unsupported format
-	pFormatProperties->bufferFeatures = 0; // Unsupported format
+	pFormatProperties->linearTilingFeatures = 0;   // Unsupported format
+	pFormatProperties->optimalTilingFeatures = 0;  // Unsupported format
+	pFormatProperties->bufferFeatures = 0;         // Unsupported format
 
 	switch(format)
 	{
-		// Formats which can be sampled *and* filtered
-	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SRGB:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SRGB:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-	case VK_FORMAT_BC2_UNORM_BLOCK:
-	case VK_FORMAT_BC2_SRGB_BLOCK:
-	case VK_FORMAT_BC3_UNORM_BLOCK:
-	case VK_FORMAT_BC3_SRGB_BLOCK:
-	case VK_FORMAT_BC4_UNORM_BLOCK:
-	case VK_FORMAT_BC4_SNORM_BLOCK:
-	case VK_FORMAT_BC5_UNORM_BLOCK:
-	case VK_FORMAT_BC5_SNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
-		// Fall through
+			// Formats which can be sampled *and* filtered
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SRGB:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+			// Fall through
 
-		// Formats which can be sampled, but don't support filtering
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_S8_UINT:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_D32_SFLOAT:
-	case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
-			VK_FORMAT_FEATURE_BLIT_SRC_BIT |
-			VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
-			VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
-		break;
+			// Formats which can be sampled, but don't support filtering
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_S8_UINT:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+			    VK_FORMAT_FEATURE_BLIT_SRC_BIT |
+			    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
+			    VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
+			break;
 
-		// YCbCr formats:
-	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
-			VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
-			VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
-			VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT;
-		break;
-	default:
-		break;
+			// YCbCr formats:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+			    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
+			    VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
+			    VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT;
+			break;
+		default:
+			break;
 	}
 
 	switch(format)
 	{
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
-		pFormatProperties->bufferFeatures |=
-			VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
-		// Fall through
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
-		// Fall through
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		pFormatProperties->bufferFeatures |=
-			VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
-		break;
-	default:
-		break;
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
+			pFormatProperties->bufferFeatures |=
+			    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
+			// Fall through
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
+			// Fall through
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+			pFormatProperties->bufferFeatures |=
+			    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
+			break;
+		default:
+			break;
 	}
 
 	switch(format)
 	{
-	case VK_FORMAT_R5G6B5_UNORM_PACK16:
-	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SRGB:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_B8G8R8A8_SRGB:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
-		// Fall through
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
-			VK_FORMAT_FEATURE_BLIT_DST_BIT;
-		break;
-	case VK_FORMAT_S8_UINT:
-	case VK_FORMAT_D16_UNORM:
-	case VK_FORMAT_D32_SFLOAT: // Note: either VK_FORMAT_D32_SFLOAT or VK_FORMAT_X8_D24_UNORM_PACK32 must be supported
-	case VK_FORMAT_D32_SFLOAT_S8_UINT: // Note: either VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT must be supported
-		pFormatProperties->optimalTilingFeatures |=
-			VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
-		break;
-	default:
-		break;
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
+			// Fall through
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+			    VK_FORMAT_FEATURE_BLIT_DST_BIT;
+			break;
+		case VK_FORMAT_S8_UINT:
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_D32_SFLOAT:          // Note: either VK_FORMAT_D32_SFLOAT or VK_FORMAT_X8_D24_UNORM_PACK32 must be supported
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:  // Note: either VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT must be supported
+			pFormatProperties->optimalTilingFeatures |=
+			    VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+			break;
+		default:
+			break;
 	}
 
 	switch(format)
 	{
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-		pFormatProperties->bufferFeatures |=
-			VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
-		break;
-	default:
-		break;
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+			pFormatProperties->bufferFeatures |=
+			    VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
+			break;
+		default:
+			break;
 	}
 
 	switch(format)
 	{
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		pFormatProperties->bufferFeatures |=
-			VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
-		break;
-	default:
-		break;
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+			pFormatProperties->bufferFeatures |=
+			    VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
+			break;
+		default:
+			break;
 	}
 
 	if(pFormatProperties->optimalTilingFeatures)
@@ -756,7 +753,7 @@
 
 void PhysicalDevice::getImageFormatProperties(Format format, VkImageType type, VkImageTiling tiling,
                                               VkImageUsageFlags usage, VkImageCreateFlags flags,
-                                              VkImageFormatProperties* pImageFormatProperties) const
+                                              VkImageFormatProperties *pImageFormatProperties) const
 {
 	pImageFormatProperties->sampleCounts = VK_SAMPLE_COUNT_1_BIT;
 	pImageFormatProperties->maxArrayLayers = vk::MAX_IMAGE_ARRAY_LAYERS;
@@ -764,47 +761,47 @@
 
 	switch(type)
 	{
-	case VK_IMAGE_TYPE_1D:
-		pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_1D;
-		pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_1D - 1);
-		pImageFormatProperties->maxExtent.height = 1;
-		break;
-	case VK_IMAGE_TYPE_2D:
-		if(flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
-		{
-			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_CUBE;
-			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
-			pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
-		}
-		else
-		{
-			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_2D;
-			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
-			pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
-
-			VkFormatProperties props;
-			getFormatProperties(format, &props);
-			auto features = tiling == VK_IMAGE_TILING_LINEAR ? props.linearTilingFeatures : props.optimalTilingFeatures;
-			if(features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+		case VK_IMAGE_TYPE_1D:
+			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_1D;
+			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_1D - 1);
+			pImageFormatProperties->maxExtent.height = 1;
+			break;
+		case VK_IMAGE_TYPE_2D:
+			if(flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
 			{
-				// Only renderable formats make sense for multisample
-				pImageFormatProperties->sampleCounts = getSampleCounts();
+				pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_CUBE;
+				pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
+				pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
 			}
-		}
-		break;
-	case VK_IMAGE_TYPE_3D:
-		pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_3D;
-		pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
-		pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
-		pImageFormatProperties->maxExtent.depth = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
-		pImageFormatProperties->maxArrayLayers = 1;		// no 3D + layers
-		break;
-	default:
-		UNREACHABLE("VkImageType: %d", int(type));
-		break;
+			else
+			{
+				pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_2D;
+				pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
+				pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
+
+				VkFormatProperties props;
+				getFormatProperties(format, &props);
+				auto features = tiling == VK_IMAGE_TILING_LINEAR ? props.linearTilingFeatures : props.optimalTilingFeatures;
+				if(features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+				{
+					// Only renderable formats make sense for multisample
+					pImageFormatProperties->sampleCounts = getSampleCounts();
+				}
+			}
+			break;
+		case VK_IMAGE_TYPE_3D:
+			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_3D;
+			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
+			pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
+			pImageFormatProperties->maxExtent.depth = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
+			pImageFormatProperties->maxArrayLayers = 1;  // no 3D + layers
+			break;
+		default:
+			UNREACHABLE("VkImageType: %d", int(type));
+			break;
 	}
 
-	pImageFormatProperties->maxResourceSize = 1u << 31; // Minimum value for maxResourceSize
+	pImageFormatProperties->maxResourceSize = 1u << 31;  // Minimum value for maxResourceSize
 
 	// "Images created with tiling equal to VK_IMAGE_TILING_LINEAR have further restrictions on their limits and capabilities
 	//  compared to images created with tiling equal to VK_IMAGE_TILING_OPTIMAL."
@@ -831,7 +828,7 @@
 }
 
 void PhysicalDevice::getQueueFamilyProperties(uint32_t pQueueFamilyPropertyCount,
-                                              VkQueueFamilyProperties* pQueueFamilyProperties) const
+                                              VkQueueFamilyProperties *pQueueFamilyProperties) const
 {
 	for(uint32_t i = 0; i < pQueueFamilyPropertyCount; i++)
 	{
@@ -840,12 +837,12 @@
 		pQueueFamilyProperties[i].minImageTransferGranularity.depth = 1;
 		pQueueFamilyProperties[i].queueCount = 1;
 		pQueueFamilyProperties[i].queueFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT;
-		pQueueFamilyProperties[i].timestampValidBits = 0; // No support for time stamps
+		pQueueFamilyProperties[i].timestampValidBits = 0;  // No support for time stamps
 	}
 }
 
 void PhysicalDevice::getQueueFamilyProperties(uint32_t pQueueFamilyPropertyCount,
-                                              VkQueueFamilyProperties2* pQueueFamilyProperties) const
+                                              VkQueueFamilyProperties2 *pQueueFamilyProperties) const
 {
 	for(uint32_t i = 0; i < pQueueFamilyPropertyCount; i++)
 	{
@@ -854,31 +851,30 @@
 		pQueueFamilyProperties[i].queueFamilyProperties.minImageTransferGranularity.depth = 1;
 		pQueueFamilyProperties[i].queueFamilyProperties.queueCount = 1;
 		pQueueFamilyProperties[i].queueFamilyProperties.queueFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT;
-		pQueueFamilyProperties[i].queueFamilyProperties.timestampValidBits = 0; // No support for time stamps
+		pQueueFamilyProperties[i].queueFamilyProperties.timestampValidBits = 0;  // No support for time stamps
 	}
 }
 
-const VkPhysicalDeviceMemoryProperties& PhysicalDevice::getMemoryProperties() const
+const VkPhysicalDeviceMemoryProperties &PhysicalDevice::getMemoryProperties() const
 {
-	static const VkPhysicalDeviceMemoryProperties properties
-	{
-		1, // memoryTypeCount
+	static const VkPhysicalDeviceMemoryProperties properties{
+		1,  // memoryTypeCount
 		{
-			// vk::MEMORY_TYPE_GENERIC_BIT
-			{
-				(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
-				 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
-				 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
-				 VK_MEMORY_PROPERTY_HOST_CACHED_BIT), // propertyFlags
-				0 // heapIndex
-			},
+		    // vk::MEMORY_TYPE_GENERIC_BIT
+		    {
+		        (VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+		         VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+		         VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+		         VK_MEMORY_PROPERTY_HOST_CACHED_BIT),  // propertyFlags
+		        0                                      // heapIndex
+		    },
 		},
-		1, // memoryHeapCount
+		1,  // memoryHeapCount
 		{
-			{
-				1ull << 31, // size, FIXME(sugoi): This should be configurable based on available RAM
-				VK_MEMORY_HEAP_DEVICE_LOCAL_BIT // flags
-			},
+		    {
+		        1ull << 31,                      // size, FIXME(sugoi): This should be configurable based on available RAM
+		        VK_MEMORY_HEAP_DEVICE_LOCAL_BIT  // flags
+		    },
 		}
 	};
 
diff --git a/src/Vulkan/VkPhysicalDevice.hpp b/src/Vulkan/VkPhysicalDevice.hpp
index c4007cc..8e94466 100644
--- a/src/Vulkan/VkPhysicalDevice.hpp
+++ b/src/Vulkan/VkPhysicalDevice.hpp
@@ -15,11 +15,11 @@
 #ifndef VK_PHYSICAL_DEVICE_HPP_
 #define VK_PHYSICAL_DEVICE_HPP_
 
-#include "VkObject.hpp"
 #include "VkFormat.h"
+#include "VkObject.hpp"
 
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
-#include <vulkan/vk_android_native_buffer.h>
+#	include <vulkan/vk_android_native_buffer.h>
 #endif
 
 namespace vk {
@@ -29,65 +29,65 @@
 public:
 	static constexpr VkSystemAllocationScope GetAllocationScope() { return VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE; }
 
-	PhysicalDevice(const void*, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator) {}
+	PhysicalDevice(const void *, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator) {}
 
-	static size_t ComputeRequiredAllocationSize(const void*) { return 0; }
+	static size_t ComputeRequiredAllocationSize(const void *) { return 0; }
 
-	const VkPhysicalDeviceFeatures& getFeatures() const;
-	void getFeatures(VkPhysicalDeviceSamplerYcbcrConversionFeatures* features) const;
-	void getFeatures(VkPhysicalDevice16BitStorageFeatures* features) const;
-	void getFeatures(VkPhysicalDeviceVariablePointerFeatures* features) const;
-	void getFeatures(VkPhysicalDevice8BitStorageFeaturesKHR* features) const;
-	void getFeatures(VkPhysicalDeviceMultiviewFeatures* features) const;
-	void getFeatures(VkPhysicalDeviceProtectedMemoryFeatures* features) const;
-	void getFeatures(VkPhysicalDeviceShaderDrawParameterFeatures* features) const;
-	void getFeatures(VkPhysicalDeviceLineRasterizationFeaturesEXT* features) const;
-	void getFeatures(VkPhysicalDeviceProvokingVertexFeaturesEXT* features) const;
-	bool hasFeatures(const VkPhysicalDeviceFeatures& requestedFeatures) const;
+	const VkPhysicalDeviceFeatures &getFeatures() const;
+	void getFeatures(VkPhysicalDeviceSamplerYcbcrConversionFeatures *features) const;
+	void getFeatures(VkPhysicalDevice16BitStorageFeatures *features) const;
+	void getFeatures(VkPhysicalDeviceVariablePointerFeatures *features) const;
+	void getFeatures(VkPhysicalDevice8BitStorageFeaturesKHR *features) const;
+	void getFeatures(VkPhysicalDeviceMultiviewFeatures *features) const;
+	void getFeatures(VkPhysicalDeviceProtectedMemoryFeatures *features) const;
+	void getFeatures(VkPhysicalDeviceShaderDrawParameterFeatures *features) const;
+	void getFeatures(VkPhysicalDeviceLineRasterizationFeaturesEXT *features) const;
+	void getFeatures(VkPhysicalDeviceProvokingVertexFeaturesEXT *features) const;
+	bool hasFeatures(const VkPhysicalDeviceFeatures &requestedFeatures) const;
 
-	const VkPhysicalDeviceProperties& getProperties() const;
-	void getProperties(VkPhysicalDeviceIDProperties* properties) const;
-	void getProperties(VkPhysicalDeviceMaintenance3Properties* properties) const;
-	void getProperties(VkPhysicalDeviceMultiviewProperties* properties) const;
-	void getProperties(VkPhysicalDevicePointClippingProperties* properties) const;
-	void getProperties(VkPhysicalDeviceProtectedMemoryProperties* properties) const;
-	void getProperties(VkPhysicalDeviceSubgroupProperties* properties) const;
-	void getProperties(const VkExternalMemoryHandleTypeFlagBits* handleType, VkExternalImageFormatProperties* properties) const;
-	void getProperties(VkSamplerYcbcrConversionImageFormatProperties* properties) const;
+	const VkPhysicalDeviceProperties &getProperties() const;
+	void getProperties(VkPhysicalDeviceIDProperties *properties) const;
+	void getProperties(VkPhysicalDeviceMaintenance3Properties *properties) const;
+	void getProperties(VkPhysicalDeviceMultiviewProperties *properties) const;
+	void getProperties(VkPhysicalDevicePointClippingProperties *properties) const;
+	void getProperties(VkPhysicalDeviceProtectedMemoryProperties *properties) const;
+	void getProperties(VkPhysicalDeviceSubgroupProperties *properties) const;
+	void getProperties(const VkExternalMemoryHandleTypeFlagBits *handleType, VkExternalImageFormatProperties *properties) const;
+	void getProperties(VkSamplerYcbcrConversionImageFormatProperties *properties) const;
 #ifdef __ANDROID__
-	void getProperties(VkPhysicalDevicePresentationPropertiesANDROID* properties) const;
+	void getProperties(VkPhysicalDevicePresentationPropertiesANDROID *properties) const;
 #endif
-	void getProperties(const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties) const;
-	void getProperties(const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties) const;
-	void getProperties(const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties) const;
-	void getProperties(VkPhysicalDeviceDriverPropertiesKHR* properties) const;
-	void getProperties(VkPhysicalDeviceLineRasterizationPropertiesEXT* properties) const;
-	void getProperties(VkPhysicalDeviceProvokingVertexPropertiesEXT* properties) const;
+	void getProperties(const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo, VkExternalBufferProperties *pExternalBufferProperties) const;
+	void getProperties(const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo, VkExternalFenceProperties *pExternalFenceProperties) const;
+	void getProperties(const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo, VkExternalSemaphoreProperties *pExternalSemaphoreProperties) const;
+	void getProperties(VkPhysicalDeviceDriverPropertiesKHR *properties) const;
+	void getProperties(VkPhysicalDeviceLineRasterizationPropertiesEXT *properties) const;
+	void getProperties(VkPhysicalDeviceProvokingVertexPropertiesEXT *properties) const;
 
-	void getFormatProperties(Format format, VkFormatProperties* pFormatProperties) const;
+	void getFormatProperties(Format format, VkFormatProperties *pFormatProperties) const;
 	void getImageFormatProperties(Format format, VkImageType type, VkImageTiling tiling,
 	                              VkImageUsageFlags usage, VkImageCreateFlags flags,
-	                              VkImageFormatProperties* pImageFormatProperties) const;
+	                              VkImageFormatProperties *pImageFormatProperties) const;
 	uint32_t getQueueFamilyPropertyCount() const;
 	void getQueueFamilyProperties(uint32_t pQueueFamilyPropertyCount,
-	                              VkQueueFamilyProperties* pQueueFamilyProperties) const;
+	                              VkQueueFamilyProperties *pQueueFamilyProperties) const;
 	void getQueueFamilyProperties(uint32_t pQueueFamilyPropertyCount,
-	                              VkQueueFamilyProperties2* pQueueFamilyProperties) const;
-	const VkPhysicalDeviceMemoryProperties& getMemoryProperties() const;
+	                              VkQueueFamilyProperties2 *pQueueFamilyProperties) const;
+	const VkPhysicalDeviceMemoryProperties &getMemoryProperties() const;
 
 private:
-	const VkPhysicalDeviceLimits& getLimits() const;
+	const VkPhysicalDeviceLimits &getLimits() const;
 	VkSampleCountFlags getSampleCounts() const;
 };
 
 using DispatchablePhysicalDevice = DispatchableObject<PhysicalDevice, VkPhysicalDevice>;
 
-static inline PhysicalDevice* Cast(VkPhysicalDevice object)
+static inline PhysicalDevice *Cast(VkPhysicalDevice object)
 {
 	return DispatchablePhysicalDevice::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_PHYSICAL_DEVICE_HPP_
+#endif  // VK_PHYSICAL_DEVICE_HPP_
diff --git a/src/Vulkan/VkPipeline.cpp b/src/Vulkan/VkPipeline.cpp
index f35b203..a57867b 100644
--- a/src/Vulkan/VkPipeline.cpp
+++ b/src/Vulkan/VkPipeline.cpp
@@ -17,9 +17,9 @@
 #include "VkDevice.hpp"
 #include "VkPipelineCache.hpp"
 #include "VkPipelineLayout.hpp"
+#include "VkRenderPass.hpp"
 #include "VkShaderModule.hpp"
 #include "VkStringify.hpp"
-#include "VkRenderPass.hpp"
 #include "Pipeline/ComputeProgram.hpp"
 #include "Pipeline/SpirvShader.hpp"
 
@@ -35,63 +35,63 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		return sw::STREAMTYPE_BYTE;
-	case VK_FORMAT_B8G8R8A8_UNORM:
-		return sw::STREAMTYPE_COLOR;
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		return sw::STREAMTYPE_SBYTE;
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		return sw::STREAMTYPE_2_10_10_10_UINT;
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_UINT:
-		return sw::STREAMTYPE_USHORT;
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_SINT:
-		return sw::STREAMTYPE_SHORT;
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-		return sw::STREAMTYPE_HALF;
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-		return sw::STREAMTYPE_UINT;
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-		return sw::STREAMTYPE_INT;
-	case VK_FORMAT_R32_SFLOAT:
-	case VK_FORMAT_R32G32_SFLOAT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-		return sw::STREAMTYPE_FLOAT;
-	default:
-		UNIMPLEMENTED("format");
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+			return sw::STREAMTYPE_BYTE;
+		case VK_FORMAT_B8G8R8A8_UNORM:
+			return sw::STREAMTYPE_COLOR;
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+			return sw::STREAMTYPE_SBYTE;
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+			return sw::STREAMTYPE_2_10_10_10_UINT;
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_UINT:
+			return sw::STREAMTYPE_USHORT;
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_SINT:
+			return sw::STREAMTYPE_SHORT;
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+			return sw::STREAMTYPE_HALF;
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+			return sw::STREAMTYPE_UINT;
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+			return sw::STREAMTYPE_INT;
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+			return sw::STREAMTYPE_FLOAT;
+		default:
+			UNIMPLEMENTED("format");
 	}
 
 	return sw::STREAMTYPE_BYTE;
@@ -101,57 +101,57 @@
 {
 	switch(format)
 	{
-	case VK_FORMAT_R8_UNORM:
-	case VK_FORMAT_R8_SNORM:
-	case VK_FORMAT_R8_UINT:
-	case VK_FORMAT_R8_SINT:
-	case VK_FORMAT_R16_UNORM:
-	case VK_FORMAT_R16_SNORM:
-	case VK_FORMAT_R16_UINT:
-	case VK_FORMAT_R16_SINT:
-	case VK_FORMAT_R16_SFLOAT:
-	case VK_FORMAT_R32_UINT:
-	case VK_FORMAT_R32_SINT:
-	case VK_FORMAT_R32_SFLOAT:
-		return 1;
-	case VK_FORMAT_R8G8_UNORM:
-	case VK_FORMAT_R8G8_SNORM:
-	case VK_FORMAT_R8G8_UINT:
-	case VK_FORMAT_R8G8_SINT:
-	case VK_FORMAT_R16G16_UNORM:
-	case VK_FORMAT_R16G16_SNORM:
-	case VK_FORMAT_R16G16_UINT:
-	case VK_FORMAT_R16G16_SINT:
-	case VK_FORMAT_R16G16_SFLOAT:
-	case VK_FORMAT_R32G32_UINT:
-	case VK_FORMAT_R32G32_SINT:
-	case VK_FORMAT_R32G32_SFLOAT:
-		return 2;
-	case VK_FORMAT_R32G32B32_UINT:
-	case VK_FORMAT_R32G32B32_SINT:
-	case VK_FORMAT_R32G32B32_SFLOAT:
-		return 3;
-	case VK_FORMAT_R8G8B8A8_UNORM:
-	case VK_FORMAT_R8G8B8A8_SNORM:
-	case VK_FORMAT_R8G8B8A8_UINT:
-	case VK_FORMAT_R8G8B8A8_SINT:
-	case VK_FORMAT_B8G8R8A8_UNORM:
-	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-	case VK_FORMAT_R16G16B16A16_UNORM:
-	case VK_FORMAT_R16G16B16A16_SNORM:
-	case VK_FORMAT_R16G16B16A16_UINT:
-	case VK_FORMAT_R16G16B16A16_SINT:
-	case VK_FORMAT_R16G16B16A16_SFLOAT:
-	case VK_FORMAT_R32G32B32A32_UINT:
-	case VK_FORMAT_R32G32B32A32_SINT:
-	case VK_FORMAT_R32G32B32A32_SFLOAT:
-		return 4;
-	default:
-		UNIMPLEMENTED("format");
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R16_UNORM:
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_SFLOAT:
+			return 1;
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_SFLOAT:
+			return 2;
+		case VK_FORMAT_R32G32B32_UINT:
+		case VK_FORMAT_R32G32B32_SINT:
+		case VK_FORMAT_R32G32B32_SFLOAT:
+			return 3;
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+			return 4;
+		default:
+			UNIMPLEMENTED("format");
 	}
 
 	return 0;
@@ -159,21 +159,21 @@
 
 // preprocessSpirv applies and freezes specializations into constants, and inlines all functions.
 std::vector<uint32_t> preprocessSpirv(
-		std::vector<uint32_t> const &code,
-		VkSpecializationInfo const *specializationInfo)
+    std::vector<uint32_t> const &code,
+    VkSpecializationInfo const *specializationInfo)
 {
-	spvtools::Optimizer opt{SPV_ENV_VULKAN_1_1};
+	spvtools::Optimizer opt{ SPV_ENV_VULKAN_1_1 };
 
-	opt.SetMessageConsumer([](spv_message_level_t level, const char*, const spv_position_t& p, const char* m) {
+	opt.SetMessageConsumer([](spv_message_level_t level, const char *, const spv_position_t &p, const char *m) {
 		switch(level)
 		{
-		case SPV_MSG_FATAL:          vk::warn("SPIR-V FATAL: %d:%d %s\n", int(p.line), int(p.column), m);
-		case SPV_MSG_INTERNAL_ERROR: vk::warn("SPIR-V INTERNAL_ERROR: %d:%d %s\n", int(p.line), int(p.column), m);
-		case SPV_MSG_ERROR:          vk::warn("SPIR-V ERROR: %d:%d %s\n", int(p.line), int(p.column), m);
-		case SPV_MSG_WARNING:        vk::warn("SPIR-V WARNING: %d:%d %s\n", int(p.line), int(p.column), m);
-		case SPV_MSG_INFO:           vk::trace("SPIR-V INFO: %d:%d %s\n", int(p.line), int(p.column), m);
-		case SPV_MSG_DEBUG:          vk::trace("SPIR-V DEBUG: %d:%d %s\n", int(p.line), int(p.column), m);
-		default:                     vk::trace("SPIR-V MESSAGE: %d:%d %s\n", int(p.line), int(p.column), m);
+			case SPV_MSG_FATAL: vk::warn("SPIR-V FATAL: %d:%d %s\n", int(p.line), int(p.column), m);
+			case SPV_MSG_INTERNAL_ERROR: vk::warn("SPIR-V INTERNAL_ERROR: %d:%d %s\n", int(p.line), int(p.column), m);
+			case SPV_MSG_ERROR: vk::warn("SPIR-V ERROR: %d:%d %s\n", int(p.line), int(p.column), m);
+			case SPV_MSG_WARNING: vk::warn("SPIR-V WARNING: %d:%d %s\n", int(p.line), int(p.column), m);
+			case SPV_MSG_INFO: vk::trace("SPIR-V INFO: %d:%d %s\n", int(p.line), int(p.column), m);
+			case SPV_MSG_DEBUG: vk::trace("SPIR-V DEBUG: %d:%d %s\n", int(p.line), int(p.column), m);
+			default: vk::trace("SPIR-V MESSAGE: %d:%d %s\n", int(p.line), int(p.column), m);
 		}
 	});
 
@@ -185,9 +185,9 @@
 		{
 			auto const &e = specializationInfo->pMapEntries[i];
 			auto value_ptr =
-					static_cast<uint32_t const *>(specializationInfo->pData) + e.offset / sizeof(uint32_t);
+			    static_cast<uint32_t const *>(specializationInfo->pData) + e.offset / sizeof(uint32_t);
 			specializations.emplace(e.constantID,
-									std::vector<uint32_t>{value_ptr, value_ptr + e.size / sizeof(uint32_t)});
+			                        std::vector<uint32_t>{ value_ptr, value_ptr + e.size / sizeof(uint32_t) });
 		}
 		opt.RegisterPass(spvtools::CreateSetSpecConstantDefaultValuePass(specializations));
 	}
@@ -198,20 +198,21 @@
 	std::vector<uint32_t> optimized;
 	opt.Run(code.data(), code.size(), &optimized);
 
-	if(false) {
+	if(false)
+	{
 		spvtools::SpirvTools core(SPV_ENV_VULKAN_1_1);
 		std::string preOpt;
 		core.Disassemble(code, &preOpt, SPV_BINARY_TO_TEXT_OPTION_NONE);
 		std::string postOpt;
 		core.Disassemble(optimized, &postOpt, SPV_BINARY_TO_TEXT_OPTION_NONE);
 		std::cout << "PRE-OPT: " << preOpt << std::endl
-		 		<< "POST-OPT: " << postOpt << std::endl;
+		          << "POST-OPT: " << postOpt << std::endl;
 	}
 
 	return optimized;
 }
 
-std::shared_ptr<sw::SpirvShader> createShader(const vk::PipelineCache::SpirvShaderKey& key, const vk::ShaderModule *module, bool robustBufferAccess)
+std::shared_ptr<sw::SpirvShader> createShader(const vk::PipelineCache::SpirvShaderKey &key, const vk::ShaderModule *module, bool robustBufferAccess)
 {
 	auto code = preprocessSpirv(key.getInsns(), key.getSpecializationInfo());
 	ASSERT(code.size() > 0);
@@ -222,10 +223,10 @@
 
 	// TODO(b/119409619): use allocator.
 	return std::make_shared<sw::SpirvShader>(codeSerialID, key.getPipelineStage(), key.getEntryPointName().c_str(),
-		code, key.getRenderPass(), key.getSubpassIndex(), robustBufferAccess);
+	                                         code, key.getRenderPass(), key.getSubpassIndex(), robustBufferAccess);
 }
 
-std::shared_ptr<sw::ComputeProgram> createProgram(const vk::PipelineCache::ComputeProgramKey& key)
+std::shared_ptr<sw::ComputeProgram> createProgram(const vk::PipelineCache::ComputeProgramKey &key)
 {
 	MARL_SCOPED_EVENT("createProgram");
 
@@ -237,25 +238,25 @@
 	return program;
 }
 
-} // anonymous namespace
+}  // anonymous namespace
 
 namespace vk {
 
 Pipeline::Pipeline(PipelineLayout const *layout, const Device *device)
-	: layout(layout),
-	  robustBufferAccess(device->getEnabledFeatures().robustBufferAccess)
+    : layout(layout)
+    , robustBufferAccess(device->getEnabledFeatures().robustBufferAccess)
 {
 }
 
-GraphicsPipeline::GraphicsPipeline(const VkGraphicsPipelineCreateInfo* pCreateInfo, void* mem, const Device *device)
-	: Pipeline(vk::Cast(pCreateInfo->layout), device)
+GraphicsPipeline::GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo, void *mem, const Device *device)
+    : Pipeline(vk::Cast(pCreateInfo->layout), device)
 {
 	context.robustBufferAccess = robustBufferAccess;
 
 	if(((pCreateInfo->flags &
-		~(VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT |
-	      VK_PIPELINE_CREATE_DERIVATIVE_BIT |
-	      VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) != 0) ||
+	     ~(VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT |
+	       VK_PIPELINE_CREATE_DERIVATIVE_BIT |
+	       VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) != 0) ||
 	   (pCreateInfo->pTessellationState != nullptr))
 	{
 		UNIMPLEMENTED("pCreateInfo settings");
@@ -268,25 +269,25 @@
 			VkDynamicState dynamicState = pCreateInfo->pDynamicState->pDynamicStates[i];
 			switch(dynamicState)
 			{
-			case VK_DYNAMIC_STATE_VIEWPORT:
-			case VK_DYNAMIC_STATE_SCISSOR:
-			case VK_DYNAMIC_STATE_LINE_WIDTH:
-			case VK_DYNAMIC_STATE_DEPTH_BIAS:
-			case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
-			case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
-			case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
-			case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
-			case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
-				ASSERT(dynamicState < (sizeof(dynamicStateFlags) * 8));
-				dynamicStateFlags |= (1 << dynamicState);
-				break;
-			default:
-				UNIMPLEMENTED("dynamic state");
+				case VK_DYNAMIC_STATE_VIEWPORT:
+				case VK_DYNAMIC_STATE_SCISSOR:
+				case VK_DYNAMIC_STATE_LINE_WIDTH:
+				case VK_DYNAMIC_STATE_DEPTH_BIAS:
+				case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
+				case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
+				case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
+				case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
+				case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
+					ASSERT(dynamicState < (sizeof(dynamicStateFlags) * 8));
+					dynamicStateFlags |= (1 << dynamicState);
+					break;
+				default:
+					UNIMPLEMENTED("dynamic state");
 			}
 		}
 	}
 
-	const VkPipelineVertexInputStateCreateInfo* vertexInputState = pCreateInfo->pVertexInputState;
+	const VkPipelineVertexInputStateCreateInfo *vertexInputState = pCreateInfo->pVertexInputState;
 	if(vertexInputState->flags != 0)
 	{
 		UNIMPLEMENTED("vertexInputState->flags");
@@ -301,15 +302,15 @@
 	uint32_t instanceStrides[MAX_VERTEX_INPUT_BINDINGS];
 	for(uint32_t i = 0; i < vertexInputState->vertexBindingDescriptionCount; i++)
 	{
-		auto const & desc = vertexInputState->pVertexBindingDescriptions[i];
+		auto const &desc = vertexInputState->pVertexBindingDescriptions[i];
 		vertexStrides[desc.binding] = desc.inputRate == VK_VERTEX_INPUT_RATE_VERTEX ? desc.stride : 0;
 		instanceStrides[desc.binding] = desc.inputRate == VK_VERTEX_INPUT_RATE_INSTANCE ? desc.stride : 0;
 	}
 
 	for(uint32_t i = 0; i < vertexInputState->vertexAttributeDescriptionCount; i++)
 	{
-		auto const & desc = vertexInputState->pVertexAttributeDescriptions[i];
-		sw::Stream& input = context.input[desc.location];
+		auto const &desc = vertexInputState->pVertexAttributeDescriptions[i];
+		sw::Stream &input = context.input[desc.location];
 		input.count = getNumberOfChannels(desc.format);
 		input.type = getStreamType(desc.format);
 		input.normalized = !vk::Format(desc.format).isNonNormalizedInteger();
@@ -319,7 +320,7 @@
 		input.instanceStride = instanceStrides[desc.binding];
 	}
 
-	const VkPipelineInputAssemblyStateCreateInfo* assemblyState = pCreateInfo->pInputAssemblyState;
+	const VkPipelineInputAssemblyStateCreateInfo *assemblyState = pCreateInfo->pInputAssemblyState;
 	if(assemblyState->flags != 0)
 	{
 		UNIMPLEMENTED("pCreateInfo->pInputAssemblyState settings");
@@ -328,12 +329,12 @@
 	primitiveRestartEnable = (assemblyState->primitiveRestartEnable != VK_FALSE);
 	context.topology = assemblyState->topology;
 
-	const VkPipelineViewportStateCreateInfo* viewportState = pCreateInfo->pViewportState;
+	const VkPipelineViewportStateCreateInfo *viewportState = pCreateInfo->pViewportState;
 	if(viewportState)
 	{
 		if((viewportState->flags != 0) ||
-			(viewportState->viewportCount != 1) ||
-			(viewportState->scissorCount != 1))
+		   (viewportState->viewportCount != 1) ||
+		   (viewportState->scissorCount != 1))
 		{
 			UNIMPLEMENTED("pCreateInfo->pViewportState settings");
 		}
@@ -349,7 +350,7 @@
 		}
 	}
 
-	const VkPipelineRasterizationStateCreateInfo* rasterizationState = pCreateInfo->pRasterizationState;
+	const VkPipelineRasterizationStateCreateInfo *rasterizationState = pCreateInfo->pRasterizationState;
 	if((rasterizationState->flags != 0) ||
 	   (rasterizationState->depthClampEnable != VK_FALSE))
 	{
@@ -363,7 +364,7 @@
 	context.depthBias = (rasterizationState->depthBiasEnable != VK_FALSE) ? rasterizationState->depthBiasConstantFactor : 0.0f;
 	context.slopeDepthBias = (rasterizationState->depthBiasEnable != VK_FALSE) ? rasterizationState->depthBiasSlopeFactor : 0.0f;
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(rasterizationState->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(rasterizationState->pNext);
 	while(extensionCreateInfo)
 	{
 		// Casting to a long since some structures, such as
@@ -371,40 +372,40 @@
 		// are not enumerated in the official Vulkan header
 		switch((long)(extensionCreateInfo->sType))
 		{
-		case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
-		{
-			const VkPipelineRasterizationLineStateCreateInfoEXT* lineStateCreateInfo = reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT*>(extensionCreateInfo);
-			context.lineRasterizationMode = lineStateCreateInfo->lineRasterizationMode;
-		}
-		break;
-		case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
-		{
-			const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT* provokingVertexModeCreateInfo =
-				reinterpret_cast<const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT*>(extensionCreateInfo);
-			context.provokingVertexMode = provokingVertexModeCreateInfo->provokingVertexMode;
-		}
-		break;
-		default:
-			WARN("pCreateInfo->pRasterizationState->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+			{
+				const VkPipelineRasterizationLineStateCreateInfoEXT *lineStateCreateInfo = reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT *>(extensionCreateInfo);
+				context.lineRasterizationMode = lineStateCreateInfo->lineRasterizationMode;
+			}
 			break;
+			case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
+			{
+				const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *provokingVertexModeCreateInfo =
+				    reinterpret_cast<const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *>(extensionCreateInfo);
+				context.provokingVertexMode = provokingVertexModeCreateInfo->provokingVertexMode;
+			}
+			break;
+			default:
+				WARN("pCreateInfo->pRasterizationState->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
 	}
 
-	const VkPipelineMultisampleStateCreateInfo* multisampleState = pCreateInfo->pMultisampleState;
+	const VkPipelineMultisampleStateCreateInfo *multisampleState = pCreateInfo->pMultisampleState;
 	if(multisampleState)
 	{
 		switch(multisampleState->rasterizationSamples)
 		{
-		case VK_SAMPLE_COUNT_1_BIT:
-			context.sampleCount = 1;
-			break;
-		case VK_SAMPLE_COUNT_4_BIT:
-			context.sampleCount = 4;
-			break;
-		default:
-			UNIMPLEMENTED("Unsupported sample count");
+			case VK_SAMPLE_COUNT_1_BIT:
+				context.sampleCount = 1;
+				break;
+			case VK_SAMPLE_COUNT_4_BIT:
+				context.sampleCount = 4;
+				break;
+			default:
+				UNIMPLEMENTED("Unsupported sample count");
 		}
 
 		if(multisampleState->pSampleMask)
@@ -415,8 +416,8 @@
 		context.alphaToCoverage = (multisampleState->alphaToCoverageEnable == VK_TRUE);
 
 		if((multisampleState->flags != 0) ||
-			(multisampleState->sampleShadingEnable != VK_FALSE) ||
-			(multisampleState->alphaToOneEnable != VK_FALSE))
+		   (multisampleState->sampleShadingEnable != VK_FALSE) ||
+		   (multisampleState->alphaToOneEnable != VK_FALSE))
 		{
 			UNIMPLEMENTED("multisampleState");
 		}
@@ -426,7 +427,7 @@
 		context.sampleCount = 1;
 	}
 
-	const VkPipelineDepthStencilStateCreateInfo* depthStencilState = pCreateInfo->pDepthStencilState;
+	const VkPipelineDepthStencilStateCreateInfo *depthStencilState = pCreateInfo->pDepthStencilState;
 	if(depthStencilState)
 	{
 		if((depthStencilState->flags != 0) ||
@@ -448,7 +449,7 @@
 		}
 	}
 
-	const VkPipelineColorBlendStateCreateInfo* colorBlendState = pCreateInfo->pColorBlendState;
+	const VkPipelineColorBlendStateCreateInfo *colorBlendState = pCreateInfo->pColorBlendState;
 	if(colorBlendState)
 	{
 		if((colorBlendState->flags != 0) ||
@@ -467,66 +468,66 @@
 
 		for(auto i = 0u; i < colorBlendState->attachmentCount; i++)
 		{
-			const VkPipelineColorBlendAttachmentState& attachment = colorBlendState->pAttachments[i];
+			const VkPipelineColorBlendAttachmentState &attachment = colorBlendState->pAttachments[i];
 			context.colorWriteMask[i] = attachment.colorWriteMask;
 
 			context.setBlendState(i, { (attachment.blendEnable == VK_TRUE),
-				attachment.srcColorBlendFactor, attachment.dstColorBlendFactor, attachment.colorBlendOp,
-				attachment.srcAlphaBlendFactor, attachment.dstAlphaBlendFactor, attachment.alphaBlendOp });
+			                           attachment.srcColorBlendFactor, attachment.dstColorBlendFactor, attachment.colorBlendOp,
+			                           attachment.srcAlphaBlendFactor, attachment.dstAlphaBlendFactor, attachment.alphaBlendOp });
 		}
 	}
 
-	context.multiSampleMask = context.sampleMask & ((unsigned) 0xFFFFFFFF >> (32 - context.sampleCount));
+	context.multiSampleMask = context.sampleMask & ((unsigned)0xFFFFFFFF >> (32 - context.sampleCount));
 }
 
-void GraphicsPipeline::destroyPipeline(const VkAllocationCallbacks* pAllocator)
+void GraphicsPipeline::destroyPipeline(const VkAllocationCallbacks *pAllocator)
 {
 	vertexShader.reset();
 	fragmentShader.reset();
 }
 
-size_t GraphicsPipeline::ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo* pCreateInfo)
+size_t GraphicsPipeline::ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo)
 {
 	return 0;
 }
 
-void GraphicsPipeline::setShader(const VkShaderStageFlagBits& stage, const std::shared_ptr<sw::SpirvShader> spirvShader)
+void GraphicsPipeline::setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader)
 {
 	switch(stage)
 	{
-	case VK_SHADER_STAGE_VERTEX_BIT:
-		ASSERT(vertexShader.get() == nullptr);
-		vertexShader = spirvShader;
-		context.vertexShader = vertexShader.get();
-		break;
+		case VK_SHADER_STAGE_VERTEX_BIT:
+			ASSERT(vertexShader.get() == nullptr);
+			vertexShader = spirvShader;
+			context.vertexShader = vertexShader.get();
+			break;
 
-	case VK_SHADER_STAGE_FRAGMENT_BIT:
-		ASSERT(fragmentShader.get() == nullptr);
-		fragmentShader = spirvShader;
-		context.pixelShader = fragmentShader.get();
-		break;
+		case VK_SHADER_STAGE_FRAGMENT_BIT:
+			ASSERT(fragmentShader.get() == nullptr);
+			fragmentShader = spirvShader;
+			context.pixelShader = fragmentShader.get();
+			break;
 
-	default:
-		UNSUPPORTED("Unsupported stage");
-		break;
+		default:
+			UNSUPPORTED("Unsupported stage");
+			break;
 	}
 }
 
-const std::shared_ptr<sw::SpirvShader> GraphicsPipeline::getShader(const VkShaderStageFlagBits& stage) const
+const std::shared_ptr<sw::SpirvShader> GraphicsPipeline::getShader(const VkShaderStageFlagBits &stage) const
 {
 	switch(stage)
 	{
-	case VK_SHADER_STAGE_VERTEX_BIT:
-		return vertexShader;
-	case VK_SHADER_STAGE_FRAGMENT_BIT:
-		return fragmentShader;
-	default:
-		UNSUPPORTED("Unsupported stage");
-		return fragmentShader;
+		case VK_SHADER_STAGE_VERTEX_BIT:
+			return vertexShader;
+		case VK_SHADER_STAGE_FRAGMENT_BIT:
+			return fragmentShader;
+		default:
+			UNSUPPORTED("Unsupported stage");
+			return fragmentShader;
 	}
 }
 
-void GraphicsPipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkGraphicsPipelineCreateInfo* pCreateInfo, PipelineCache* pPipelineCache)
+void GraphicsPipeline::compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pPipelineCache)
 {
 	for(auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++)
 	{
@@ -543,10 +544,10 @@
 
 		if(pPipelineCache)
 		{
-			PipelineCache& pipelineCache = *pPipelineCache;
+			PipelineCache &pipelineCache = *pPipelineCache;
 			{
 				std::unique_lock<std::mutex> lock(pipelineCache.getShaderMutex());
-				const std::shared_ptr<sw::SpirvShader>* spirvShader = pipelineCache[key];
+				const std::shared_ptr<sw::SpirvShader> *spirvShader = pipelineCache[key];
 				if(!spirvShader)
 				{
 					auto shader = createShader(key, module, robustBufferAccess);
@@ -571,41 +572,41 @@
 {
 	switch(context.topology)
 	{
-	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
-		return vertexCount;
-	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
-		return vertexCount / 2;
-	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
-		return std::max<uint32_t>(vertexCount, 1) - 1;
-	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
-		return vertexCount / 3;
-	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
-		return std::max<uint32_t>(vertexCount, 2) - 2;
-	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
-		return std::max<uint32_t>(vertexCount, 2) - 2;
-	default:
-		UNIMPLEMENTED("context.topology %d", int(context.topology));
+		case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+			return vertexCount;
+		case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+			return vertexCount / 2;
+		case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+			return std::max<uint32_t>(vertexCount, 1) - 1;
+		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+			return vertexCount / 3;
+		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+			return std::max<uint32_t>(vertexCount, 2) - 2;
+		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+			return std::max<uint32_t>(vertexCount, 2) - 2;
+		default:
+			UNIMPLEMENTED("context.topology %d", int(context.topology));
 	}
 
 	return 0;
 }
 
-const sw::Context& GraphicsPipeline::getContext() const
+const sw::Context &GraphicsPipeline::getContext() const
 {
 	return context;
 }
 
-const VkRect2D& GraphicsPipeline::getScissor() const
+const VkRect2D &GraphicsPipeline::getScissor() const
 {
 	return scissor;
 }
 
-const VkViewport& GraphicsPipeline::getViewport() const
+const VkViewport &GraphicsPipeline::getViewport() const
 {
 	return viewport;
 }
 
-const sw::Color<float>& GraphicsPipeline::getBlendConstants() const
+const sw::Color<float> &GraphicsPipeline::getBlendConstants() const
 {
 	return blendConstants;
 }
@@ -615,23 +616,23 @@
 	return (dynamicStateFlags & (1 << dynamicState)) != 0;
 }
 
-ComputePipeline::ComputePipeline(const VkComputePipelineCreateInfo* pCreateInfo, void* mem, const Device *device)
-	: Pipeline(vk::Cast(pCreateInfo->layout), device)
+ComputePipeline::ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, const Device *device)
+    : Pipeline(vk::Cast(pCreateInfo->layout), device)
 {
 }
 
-void ComputePipeline::destroyPipeline(const VkAllocationCallbacks* pAllocator)
+void ComputePipeline::destroyPipeline(const VkAllocationCallbacks *pAllocator)
 {
 	shader.reset();
 	program.reset();
 }
 
-size_t ComputePipeline::ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo* pCreateInfo)
+size_t ComputePipeline::ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo)
 {
 	return 0;
 }
 
-void ComputePipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkComputePipelineCreateInfo* pCreateInfo, PipelineCache* pPipelineCache)
+void ComputePipeline::compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pPipelineCache)
 {
 	auto &stage = pCreateInfo->stage;
 	const ShaderModule *module = vk::Cast(stage.module);
@@ -640,13 +641,13 @@
 	ASSERT(program.get() == nullptr);
 
 	const PipelineCache::SpirvShaderKey shaderKey(
-		stage.stage, stage.pName, module->getCode(), nullptr, 0, stage.pSpecializationInfo);
+	    stage.stage, stage.pName, module->getCode(), nullptr, 0, stage.pSpecializationInfo);
 	if(pPipelineCache)
 	{
-		PipelineCache& pipelineCache = *pPipelineCache;
+		PipelineCache &pipelineCache = *pPipelineCache;
 		{
 			std::unique_lock<std::mutex> lock(pipelineCache.getShaderMutex());
-			const std::shared_ptr<sw::SpirvShader>* spirvShader = pipelineCache[shaderKey];
+			const std::shared_ptr<sw::SpirvShader> *spirvShader = pipelineCache[shaderKey];
 			if(!spirvShader)
 			{
 				shader = createShader(shaderKey, module, robustBufferAccess);
@@ -661,7 +662,7 @@
 		{
 			const PipelineCache::ComputeProgramKey programKey(shader.get(), layout);
 			std::unique_lock<std::mutex> lock(pipelineCache.getProgramMutex());
-			const std::shared_ptr<sw::ComputeProgram>* computeProgram = pipelineCache[programKey];
+			const std::shared_ptr<sw::ComputeProgram> *computeProgram = pipelineCache[programKey];
 			if(!computeProgram)
 			{
 				program = createProgram(programKey);
@@ -682,16 +683,16 @@
 }
 
 void ComputePipeline::run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
-	uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
-	vk::DescriptorSet::Bindings const &descriptorSets,
-	vk::DescriptorSet::DynamicOffsets const &descriptorDynamicOffsets,
-	sw::PushConstantStorage const &pushConstants)
+                          uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
+                          vk::DescriptorSet::Bindings const &descriptorSets,
+                          vk::DescriptorSet::DynamicOffsets const &descriptorDynamicOffsets,
+                          sw::PushConstantStorage const &pushConstants)
 {
 	ASSERT_OR_RETURN(program != nullptr);
 	program->run(
-		descriptorSets, descriptorDynamicOffsets, pushConstants,
-		baseGroupX, baseGroupY, baseGroupZ,
-		groupCountX, groupCountY, groupCountZ);
+	    descriptorSets, descriptorDynamicOffsets, pushConstants,
+	    baseGroupX, baseGroupY, baseGroupZ,
+	    groupCountX, groupCountY, groupCountZ);
 }
 
 }  // namespace vk
diff --git a/src/Vulkan/VkPipeline.hpp b/src/Vulkan/VkPipeline.hpp
index 1d50226..ad52c8e 100644
--- a/src/Vulkan/VkPipeline.hpp
+++ b/src/Vulkan/VkPipeline.hpp
@@ -16,9 +16,9 @@
 #define VK_PIPELINE_HPP_
 
 #include "VkObject.hpp"
+#include "Device/Renderer.hpp"
 #include "Vulkan/VkDescriptorSet.hpp"
 #include "Vulkan/VkPipelineCache.hpp"
-#include "Device/Renderer.hpp"
 #include <memory>
 
 namespace sw {
@@ -46,22 +46,25 @@
 		return vk::TtoVkT<Pipeline, VkPipeline>(this);
 	}
 
-	static inline Pipeline* Cast(VkPipeline object)
+	static inline Pipeline *Cast(VkPipeline object)
 	{
 		return vk::VkTtoT<Pipeline, VkPipeline>(object);
 	}
 
-	void destroy(const VkAllocationCallbacks* pAllocator)
+	void destroy(const VkAllocationCallbacks *pAllocator)
 	{
 		destroyPipeline(pAllocator);
 	}
 
-	virtual void destroyPipeline(const VkAllocationCallbacks* pAllocator) = 0;
+	virtual void destroyPipeline(const VkAllocationCallbacks *pAllocator) = 0;
 #ifndef NDEBUG
 	virtual VkPipelineBindPoint bindPoint() const = 0;
 #endif
 
-	PipelineLayout const * getLayout() const { return layout; }
+	PipelineLayout const *getLayout() const
+	{
+		return layout;
+	}
 
 protected:
 	PipelineLayout const *layout = nullptr;
@@ -72,10 +75,10 @@
 class GraphicsPipeline : public Pipeline, public ObjectBase<GraphicsPipeline, VkPipeline>
 {
 public:
-	GraphicsPipeline(const VkGraphicsPipelineCreateInfo* pCreateInfo, void* mem, const Device *device);
+	GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo, void *mem, const Device *device);
 	virtual ~GraphicsPipeline() = default;
 
-	void destroyPipeline(const VkAllocationCallbacks* pAllocator) override;
+	void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
 
 #ifndef NDEBUG
 	VkPipelineBindPoint bindPoint() const override
@@ -84,21 +87,21 @@
 	}
 #endif
 
-	static size_t ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo);
 
-	void compileShaders(const VkAllocationCallbacks* pAllocator, const VkGraphicsPipelineCreateInfo* pCreateInfo, PipelineCache* pipelineCache);
+	void compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
 
 	uint32_t computePrimitiveCount(uint32_t vertexCount) const;
-	const sw::Context& getContext() const;
-	const VkRect2D& getScissor() const;
-	const VkViewport& getViewport() const;
-	const sw::Color<float>& getBlendConstants() const;
+	const sw::Context &getContext() const;
+	const VkRect2D &getScissor() const;
+	const VkViewport &getViewport() const;
+	const sw::Color<float> &getBlendConstants() const;
 	bool hasDynamicState(VkDynamicState dynamicState) const;
 	bool hasPrimitiveRestartEnable() const { return primitiveRestartEnable; }
 
 private:
-	void setShader(const VkShaderStageFlagBits& stage, const std::shared_ptr<sw::SpirvShader> spirvShader);
-	const std::shared_ptr<sw::SpirvShader> getShader(const VkShaderStageFlagBits& stage) const;
+	void setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader);
+	const std::shared_ptr<sw::SpirvShader> getShader(const VkShaderStageFlagBits &stage) const;
 	std::shared_ptr<sw::SpirvShader> vertexShader;
 	std::shared_ptr<sw::SpirvShader> fragmentShader;
 
@@ -113,10 +116,10 @@
 class ComputePipeline : public Pipeline, public ObjectBase<ComputePipeline, VkPipeline>
 {
 public:
-	ComputePipeline(const VkComputePipelineCreateInfo* pCreateInfo, void* mem, const Device *device);
+	ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, const Device *device);
 	virtual ~ComputePipeline() = default;
 
-	void destroyPipeline(const VkAllocationCallbacks* pAllocator) override;
+	void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
 
 #ifndef NDEBUG
 	VkPipelineBindPoint bindPoint() const override
@@ -125,26 +128,26 @@
 	}
 #endif
 
-	static size_t ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo);
 
-	void compileShaders(const VkAllocationCallbacks* pAllocator, const VkComputePipelineCreateInfo* pCreateInfo, PipelineCache* pipelineCache);
+	void compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
 
 	void run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
-			uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
-		vk::DescriptorSet::Bindings const &descriptorSets,
-		vk::DescriptorSet::DynamicOffsets const &descriptorDynamicOffsets,
-		sw::PushConstantStorage const &pushConstants);
+	         uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
+	         vk::DescriptorSet::Bindings const &descriptorSets,
+	         vk::DescriptorSet::DynamicOffsets const &descriptorDynamicOffsets,
+	         sw::PushConstantStorage const &pushConstants);
 
 protected:
 	std::shared_ptr<sw::SpirvShader> shader;
 	std::shared_ptr<sw::ComputeProgram> program;
 };
 
-static inline Pipeline* Cast(VkPipeline object)
+static inline Pipeline *Cast(VkPipeline object)
 {
 	return Pipeline::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_PIPELINE_HPP_
+#endif  // VK_PIPELINE_HPP_
diff --git a/src/Vulkan/VkPipelineCache.cpp b/src/Vulkan/VkPipelineCache.cpp
index 40fa5c3..9ee5723 100644
--- a/src/Vulkan/VkPipelineCache.cpp
+++ b/src/Vulkan/VkPipelineCache.cpp
@@ -17,12 +17,12 @@
 
 namespace vk {
 
-PipelineCache::SpirvShaderKey::SpecializationInfo::SpecializationInfo(const VkSpecializationInfo* specializationInfo)
+PipelineCache::SpirvShaderKey::SpecializationInfo::SpecializationInfo(const VkSpecializationInfo *specializationInfo)
 {
 	if(specializationInfo)
 	{
-		auto ptr = reinterpret_cast<VkSpecializationInfo*>(
-			allocate(sizeof(VkSpecializationInfo), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
+		auto ptr = reinterpret_cast<VkSpecializationInfo *>(
+		    allocate(sizeof(VkSpecializationInfo), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
 
 		info = std::shared_ptr<VkSpecializationInfo>(ptr, Deleter());
 
@@ -30,8 +30,8 @@
 		if(specializationInfo->mapEntryCount > 0)
 		{
 			size_t entriesSize = specializationInfo->mapEntryCount * sizeof(VkSpecializationMapEntry);
-			VkSpecializationMapEntry* mapEntries = reinterpret_cast<VkSpecializationMapEntry*>(
-				allocate(entriesSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
+			VkSpecializationMapEntry *mapEntries = reinterpret_cast<VkSpecializationMapEntry *>(
+			    allocate(entriesSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
 			memcpy(mapEntries, specializationInfo->pMapEntries, entriesSize);
 			info->pMapEntries = mapEntries;
 		}
@@ -39,7 +39,7 @@
 		info->dataSize = specializationInfo->dataSize;
 		if(specializationInfo->dataSize > 0)
 		{
-			void* data = allocate(specializationInfo->dataSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY);
+			void *data = allocate(specializationInfo->dataSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY);
 			memcpy(data, specializationInfo->pData, specializationInfo->dataSize);
 			info->pData = data;
 		}
@@ -50,17 +50,17 @@
 	}
 }
 
-void PipelineCache::SpirvShaderKey::SpecializationInfo::Deleter::operator() (VkSpecializationInfo* info) const
+void PipelineCache::SpirvShaderKey::SpecializationInfo::Deleter::operator()(VkSpecializationInfo *info) const
 {
 	if(info)
 	{
-		deallocate(const_cast<VkSpecializationMapEntry*>(info->pMapEntries), DEVICE_MEMORY);
-		deallocate(const_cast<void*>(info->pData), DEVICE_MEMORY);
+		deallocate(const_cast<VkSpecializationMapEntry *>(info->pMapEntries), DEVICE_MEMORY);
+		deallocate(const_cast<void *>(info->pData), DEVICE_MEMORY);
 		deallocate(info, DEVICE_MEMORY);
 	}
 }
 
-bool PipelineCache::SpirvShaderKey::SpecializationInfo::operator<(const SpecializationInfo& specializationInfo) const
+bool PipelineCache::SpirvShaderKey::SpecializationInfo::operator<(const SpecializationInfo &specializationInfo) const
 {
 	if(info && specializationInfo.info)
 	{
@@ -97,17 +97,17 @@
 }
 
 PipelineCache::SpirvShaderKey::SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,
-	                                          const std::string& entryPointName,
-	                                          const std::vector<uint32_t>& insns,
-	                                          const vk::RenderPass *renderPass,
-	                                          const uint32_t subpassIndex,
-	                                          const VkSpecializationInfo* specializationInfo) :
-	pipelineStage(pipelineStage),
-	entryPointName(entryPointName),
-	insns(insns),
-	renderPass(renderPass),
-	subpassIndex(subpassIndex),
-	specializationInfo(specializationInfo)
+                                              const std::string &entryPointName,
+                                              const std::vector<uint32_t> &insns,
+                                              const vk::RenderPass *renderPass,
+                                              const uint32_t subpassIndex,
+                                              const VkSpecializationInfo *specializationInfo)
+    : pipelineStage(pipelineStage)
+    , entryPointName(entryPointName)
+    , insns(insns)
+    , renderPass(renderPass)
+    , subpassIndex(subpassIndex)
+    , specializationInfo(specializationInfo)
 {
 }
 
@@ -153,10 +153,11 @@
 	return (specializationInfo < other.specializationInfo);
 }
 
-PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo* pCreateInfo, void* mem) :
-	dataSize(ComputeRequiredAllocationSize(pCreateInfo)), data(reinterpret_cast<uint8_t*>(mem))
+PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo *pCreateInfo, void *mem)
+    : dataSize(ComputeRequiredAllocationSize(pCreateInfo))
+    , data(reinterpret_cast<uint8_t *>(mem))
 {
-	CacheHeader* header = reinterpret_cast<CacheHeader*>(mem);
+	CacheHeader *header = reinterpret_cast<CacheHeader *>(mem);
 	header->headerLength = sizeof(CacheHeader);
 	header->headerVersion = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
 	header->vendorID = VENDOR_ID;
@@ -175,17 +176,17 @@
 	computePrograms.clear();
 }
 
-void PipelineCache::destroy(const VkAllocationCallbacks* pAllocator)
+void PipelineCache::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	vk::deallocate(data, pAllocator);
 }
 
-size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo* pCreateInfo)
+size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo *pCreateInfo)
 {
 	return pCreateInfo->initialDataSize + sizeof(CacheHeader);
 }
 
-VkResult PipelineCache::getData(size_t* pDataSize, void* pData)
+VkResult PipelineCache::getData(size_t *pDataSize, void *pData)
 {
 	if(!pData)
 	{
@@ -207,11 +208,11 @@
 	return VK_SUCCESS;
 }
 
-VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
+VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches)
 {
 	for(uint32_t i = 0; i < srcCacheCount; i++)
 	{
-		PipelineCache* srcCache = Cast(pSrcCaches[i]);
+		PipelineCache *srcCache = Cast(pSrcCaches[i]);
 
 		{
 			std::unique_lock<std::mutex> lock(spirvShadersMutex);
@@ -227,24 +228,24 @@
 	return VK_SUCCESS;
 }
 
-const std::shared_ptr<sw::SpirvShader>* PipelineCache::operator[](const PipelineCache::SpirvShaderKey& key) const
+const std::shared_ptr<sw::SpirvShader> *PipelineCache::operator[](const PipelineCache::SpirvShaderKey &key) const
 {
 	auto it = spirvShaders.find(key);
 	return (it != spirvShaders.end()) ? &(it->second) : nullptr;
 }
 
-void PipelineCache::insert(const PipelineCache::SpirvShaderKey& key, const std::shared_ptr<sw::SpirvShader> &shader)
+void PipelineCache::insert(const PipelineCache::SpirvShaderKey &key, const std::shared_ptr<sw::SpirvShader> &shader)
 {
 	spirvShaders[key] = shader;
 }
 
-const std::shared_ptr<sw::ComputeProgram>* PipelineCache::operator[](const PipelineCache::ComputeProgramKey& key) const
+const std::shared_ptr<sw::ComputeProgram> *PipelineCache::operator[](const PipelineCache::ComputeProgramKey &key) const
 {
 	auto it = computePrograms.find(key);
 	return (it != computePrograms.end()) ? &(it->second) : nullptr;
 }
 
-void PipelineCache::insert(const PipelineCache::ComputeProgramKey& key, const std::shared_ptr<sw::ComputeProgram> &computeProgram)
+void PipelineCache::insert(const PipelineCache::ComputeProgramKey &key, const std::shared_ptr<sw::ComputeProgram> &computeProgram)
 {
 	computePrograms[key] = computeProgram;
 }
diff --git a/src/Vulkan/VkPipelineCache.hpp b/src/Vulkan/VkPipelineCache.hpp
index efe5563..42b4df6 100644
--- a/src/Vulkan/VkPipelineCache.hpp
+++ b/src/Vulkan/VkPipelineCache.hpp
@@ -40,46 +40,46 @@
 class PipelineCache : public Object<PipelineCache, VkPipelineCache>
 {
 public:
-	PipelineCache(const VkPipelineCacheCreateInfo* pCreateInfo, void* mem);
+	PipelineCache(const VkPipelineCacheCreateInfo *pCreateInfo, void *mem);
 	virtual ~PipelineCache();
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo *pCreateInfo);
 
-	VkResult getData(size_t* pDataSize, void* pData);
-	VkResult merge(uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches);
+	VkResult getData(size_t *pDataSize, void *pData);
+	VkResult merge(uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches);
 
 	struct SpirvShaderKey
 	{
 		struct SpecializationInfo
 		{
-			SpecializationInfo(const VkSpecializationInfo* specializationInfo);
+			SpecializationInfo(const VkSpecializationInfo *specializationInfo);
 
-			bool operator<(const SpecializationInfo& specializationInfo) const;
+			bool operator<(const SpecializationInfo &specializationInfo) const;
 
-			const VkSpecializationInfo* get() const { return info.get(); }
+			const VkSpecializationInfo *get() const { return info.get(); }
 
 		private:
 			struct Deleter
 			{
-				void operator()(VkSpecializationInfo*) const;
+				void operator()(VkSpecializationInfo *) const;
 			};
 
 			std::shared_ptr<VkSpecializationInfo> info;
 		};
 
 		SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,
-		               const std::string& entryPointName,
-		               const std::vector<uint32_t>& insns,
+		               const std::string &entryPointName,
+		               const std::vector<uint32_t> &insns,
 		               const vk::RenderPass *renderPass,
 		               const uint32_t subpassIndex,
-		               const VkSpecializationInfo* specializationInfo);
+		               const VkSpecializationInfo *specializationInfo);
 
 		bool operator<(const SpirvShaderKey &other) const;
 
-		const VkShaderStageFlagBits& getPipelineStage() const { return pipelineStage; }
-		const std::string& getEntryPointName() const { return entryPointName; }
-		const std::vector<uint32_t>& getInsns() const { return insns; }
+		const VkShaderStageFlagBits &getPipelineStage() const { return pipelineStage; }
+		const std::string &getEntryPointName() const { return entryPointName; }
+		const std::vector<uint32_t> &getInsns() const { return insns; }
 		const vk::RenderPass *getRenderPass() const { return renderPass; }
 		uint32_t getSubpassIndex() const { return subpassIndex; }
 		const VkSpecializationInfo *getSpecializationInfo() const { return specializationInfo.get(); }
@@ -93,14 +93,15 @@
 		const SpecializationInfo specializationInfo;
 	};
 
-	std::mutex& getShaderMutex() { return spirvShadersMutex; }
-	const std::shared_ptr<sw::SpirvShader>* operator[](const PipelineCache::SpirvShaderKey& key) const;
-	void insert(const PipelineCache::SpirvShaderKey& key, const std::shared_ptr<sw::SpirvShader> &shader);
+	std::mutex &getShaderMutex() { return spirvShadersMutex; }
+	const std::shared_ptr<sw::SpirvShader> *operator[](const PipelineCache::SpirvShaderKey &key) const;
+	void insert(const PipelineCache::SpirvShaderKey &key, const std::shared_ptr<sw::SpirvShader> &shader);
 
 	struct ComputeProgramKey
 	{
-		ComputeProgramKey(const sw::SpirvShader* shader, const vk::PipelineLayout* layout) :
-			shader(shader), layout(layout)
+		ComputeProgramKey(const sw::SpirvShader *shader, const vk::PipelineLayout *layout)
+		    : shader(shader)
+		    , layout(layout)
 		{}
 
 		bool operator<(const ComputeProgramKey &other) const
@@ -108,17 +109,17 @@
 			return std::tie(shader, layout) < std::tie(other.shader, other.layout);
 		}
 
-		const sw::SpirvShader* getShader() const { return shader; }
-		const vk::PipelineLayout* getLayout() const { return layout; }
+		const sw::SpirvShader *getShader() const { return shader; }
+		const vk::PipelineLayout *getLayout() const { return layout; }
 
 	private:
-		const sw::SpirvShader* shader;
-		const vk::PipelineLayout* layout;
+		const sw::SpirvShader *shader;
+		const vk::PipelineLayout *layout;
 	};
 
-	std::mutex& getProgramMutex() { return computeProgramsMutex; }
-	const std::shared_ptr<sw::ComputeProgram>* operator[](const PipelineCache::ComputeProgramKey& key) const;
-	void insert(const PipelineCache::ComputeProgramKey& key, const std::shared_ptr<sw::ComputeProgram> &computeProgram);
+	std::mutex &getProgramMutex() { return computeProgramsMutex; }
+	const std::shared_ptr<sw::ComputeProgram> *operator[](const PipelineCache::ComputeProgramKey &key) const;
+	void insert(const PipelineCache::ComputeProgramKey &key, const std::shared_ptr<sw::ComputeProgram> &computeProgram);
 
 private:
 	struct CacheHeader
@@ -127,11 +128,11 @@
 		uint32_t headerVersion;
 		uint32_t vendorID;
 		uint32_t deviceID;
-		uint8_t  pipelineCacheUUID[VK_UUID_SIZE];
+		uint8_t pipelineCacheUUID[VK_UUID_SIZE];
 	};
 
 	size_t dataSize = 0;
-	uint8_t* data   = nullptr;
+	uint8_t *data = nullptr;
 
 	std::mutex spirvShadersMutex;
 	std::map<SpirvShaderKey, std::shared_ptr<sw::SpirvShader>> spirvShaders;
@@ -140,11 +141,11 @@
 	std::map<ComputeProgramKey, std::shared_ptr<sw::ComputeProgram>> computePrograms;
 };
 
-static inline PipelineCache* Cast(VkPipelineCache object)
+static inline PipelineCache *Cast(VkPipelineCache object)
 {
 	return PipelineCache::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_PIPELINE_CACHE_HPP_
+#endif  // VK_PIPELINE_CACHE_HPP_
diff --git a/src/Vulkan/VkPipelineLayout.cpp b/src/Vulkan/VkPipelineLayout.cpp
index 4394c7e..dfd106c 100644
--- a/src/Vulkan/VkPipelineLayout.cpp
+++ b/src/Vulkan/VkPipelineLayout.cpp
@@ -17,13 +17,14 @@
 
 namespace vk {
 
-PipelineLayout::PipelineLayout(const VkPipelineLayoutCreateInfo* pCreateInfo, void* mem)
-	: setLayoutCount(pCreateInfo->setLayoutCount), pushConstantRangeCount(pCreateInfo->pushConstantRangeCount)
+PipelineLayout::PipelineLayout(const VkPipelineLayoutCreateInfo *pCreateInfo, void *mem)
+    : setLayoutCount(pCreateInfo->setLayoutCount)
+    , pushConstantRangeCount(pCreateInfo->pushConstantRangeCount)
 {
-	char* hostMem = reinterpret_cast<char*>(mem);
+	char *hostMem = reinterpret_cast<char *>(mem);
 
-	size_t setLayoutsSize = pCreateInfo->setLayoutCount * sizeof(DescriptorSetLayout*);
-	setLayouts = reinterpret_cast<DescriptorSetLayout**>(hostMem);
+	size_t setLayoutsSize = pCreateInfo->setLayoutCount * sizeof(DescriptorSetLayout *);
+	setLayouts = reinterpret_cast<DescriptorSetLayout **>(hostMem);
 	for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++)
 	{
 		setLayouts[i] = vk::Cast(pCreateInfo->pSetLayouts[i]);
@@ -31,11 +32,11 @@
 	hostMem += setLayoutsSize;
 
 	size_t pushConstantRangesSize = pCreateInfo->pushConstantRangeCount * sizeof(VkPushConstantRange);
-	pushConstantRanges = reinterpret_cast<VkPushConstantRange*>(hostMem);
+	pushConstantRanges = reinterpret_cast<VkPushConstantRange *>(hostMem);
 	memcpy(pushConstantRanges, pCreateInfo->pPushConstantRanges, pushConstantRangesSize);
 	hostMem += pushConstantRangesSize;
 
-	dynamicOffsetBases = reinterpret_cast<uint32_t*>(hostMem);
+	dynamicOffsetBases = reinterpret_cast<uint32_t *>(hostMem);
 	uint32_t dynamicOffsetBase = 0;
 	for(uint32_t i = 0; i < setLayoutCount; i++)
 	{
@@ -46,16 +47,16 @@
 	}
 }
 
-void PipelineLayout::destroy(const VkAllocationCallbacks* pAllocator)
+void PipelineLayout::destroy(const VkAllocationCallbacks *pAllocator)
 {
-	vk::deallocate(setLayouts, pAllocator); // pushConstantRanges are in the same allocation
+	vk::deallocate(setLayouts, pAllocator);  // pushConstantRanges are in the same allocation
 }
 
-size_t PipelineLayout::ComputeRequiredAllocationSize(const VkPipelineLayoutCreateInfo* pCreateInfo)
+size_t PipelineLayout::ComputeRequiredAllocationSize(const VkPipelineLayoutCreateInfo *pCreateInfo)
 {
-	return (pCreateInfo->setLayoutCount * sizeof(DescriptorSetLayout*)) +
+	return (pCreateInfo->setLayoutCount * sizeof(DescriptorSetLayout *)) +
 	       (pCreateInfo->pushConstantRangeCount * sizeof(VkPushConstantRange)) +
-		   (pCreateInfo->setLayoutCount * sizeof(uint32_t)); // dynamicOffsetBases
+	       (pCreateInfo->setLayoutCount * sizeof(uint32_t));  // dynamicOffsetBases
 }
 
 size_t PipelineLayout::getNumDescriptorSets() const
@@ -63,7 +64,7 @@
 	return setLayoutCount;
 }
 
-DescriptorSetLayout const* PipelineLayout::getDescriptorSetLayout(size_t descriptorSet) const
+DescriptorSetLayout const *PipelineLayout::getDescriptorSetLayout(size_t descriptorSet) const
 {
 	ASSERT(descriptorSet < setLayoutCount);
 	return setLayouts[descriptorSet];
diff --git a/src/Vulkan/VkPipelineLayout.hpp b/src/Vulkan/VkPipelineLayout.hpp
index 7821396..a1b4ac8 100644
--- a/src/Vulkan/VkPipelineLayout.hpp
+++ b/src/Vulkan/VkPipelineLayout.hpp
@@ -22,31 +22,31 @@
 class PipelineLayout : public Object<PipelineLayout, VkPipelineLayout>
 {
 public:
-	PipelineLayout(const VkPipelineLayoutCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	PipelineLayout(const VkPipelineLayoutCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkPipelineLayoutCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkPipelineLayoutCreateInfo *pCreateInfo);
 
 	size_t getNumDescriptorSets() const;
-	DescriptorSetLayout const* getDescriptorSetLayout(size_t descriptorSet) const;
+	DescriptorSetLayout const *getDescriptorSetLayout(size_t descriptorSet) const;
 
 	// Returns the starting index into the pipeline's dynamic offsets array for
 	// the given descriptor set.
 	uint32_t getDynamicOffsetBase(size_t descriptorSet) const;
 
 private:
-	uint32_t              setLayoutCount = 0;
-	DescriptorSetLayout** setLayouts = nullptr;
-	uint32_t              pushConstantRangeCount = 0;
-	VkPushConstantRange*  pushConstantRanges = nullptr;
-	uint32_t*             dynamicOffsetBases = nullptr; // Base offset per set layout.
+	uint32_t setLayoutCount = 0;
+	DescriptorSetLayout **setLayouts = nullptr;
+	uint32_t pushConstantRangeCount = 0;
+	VkPushConstantRange *pushConstantRanges = nullptr;
+	uint32_t *dynamicOffsetBases = nullptr;  // Base offset per set layout.
 };
 
-static inline PipelineLayout* Cast(VkPipelineLayout object)
+static inline PipelineLayout *Cast(VkPipelineLayout object)
 {
 	return PipelineLayout::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_PIPELINE_LAYOUT_HPP_
+#endif  // VK_PIPELINE_LAYOUT_HPP_
diff --git a/src/Vulkan/VkPromotedExtensions.cpp b/src/Vulkan/VkPromotedExtensions.cpp
index a48f173..b9e6e7a 100644
--- a/src/Vulkan/VkPromotedExtensions.cpp
+++ b/src/Vulkan/VkPromotedExtensions.cpp
@@ -42,38 +42,37 @@
 
 #include <Vulkan/VulkanPlatform.h>
 
-extern "C"
-{
+extern "C" {
 
 // VK_KHR_bind_memory2
-VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos)
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos)
 {
 	return vkBindBufferMemory2(device, bindInfoCount, pBindInfos);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos)
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos)
 {
 	return vkBindImageMemory2(device, bindInfoCount, pBindInfos);
 }
 
 // VK_KHR_descriptor_update_template
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
 {
 	return vkCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks *pAllocator)
 {
 	vkDestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData)
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData)
 {
 	vkUpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
 }
 
 // VK_KHR_device_group
-VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
 {
 	vkGetDeviceGroupPeerMemoryFeatures(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures);
 }
@@ -89,77 +88,77 @@
 }
 
 // VK_KHR_device_group_creation
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
 {
 	return vkEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
 }
 
 // VK_KHR_external_fence_capabilities
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo, VkExternalFenceProperties *pExternalFenceProperties)
 {
 	vkGetPhysicalDeviceExternalFenceProperties(physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
 }
 
 // VK_KHR_external_memory_capabilities
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo, VkExternalBufferProperties *pExternalBufferProperties)
 {
 	vkGetPhysicalDeviceExternalBufferProperties(physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
 }
 
 // VK_KHR_external_semaphore_capabilities
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo, VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
 {
 	vkGetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
 }
 
 // VK_KHR_get_memory_requirements2
-VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements)
 {
 	vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements)
 {
 	vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR(VkDevice device, const VkImageSparseMemoryRequirementsInfo2 *pInfo, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
 {
 	vkGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
 }
 
 // VK_KHR_get_physical_device_properties2
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2 *pFeatures)
 {
 	vkGetPhysicalDeviceFeatures2(physicalDevice, pFeatures);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2 *pProperties)
 {
 	vkGetPhysicalDeviceProperties2(physicalDevice, pProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2 *pFormatProperties)
 {
 	vkGetPhysicalDeviceFormatProperties2(physicalDevice, format, pFormatProperties);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties)
 {
 	return vkGetPhysicalDeviceImageFormatProperties2(physicalDevice, pImageFormatInfo, pImageFormatProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2 *pQueueFamilyProperties)
 {
 	vkGetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
 {
 	vkGetPhysicalDeviceMemoryProperties2(physicalDevice, pMemoryProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo, uint32_t *pPropertyCount, VkSparseImageFormatProperties2 *pProperties)
 {
 	vkGetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount, pProperties);
 }
@@ -171,20 +170,19 @@
 }
 
 // VK_KHR_maintenance3
-VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport)
+VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, VkDescriptorSetLayoutSupport *pSupport)
 {
 	vkGetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport);
 }
 
 // VK_KHR_sampler_ycbcr_conversion
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion)
 {
 	return vkCreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks *pAllocator)
 {
 	vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator);
 }
-
 }
\ No newline at end of file
diff --git a/src/Vulkan/VkQueryPool.cpp b/src/Vulkan/VkQueryPool.cpp
index f31aa8b..8dbfb26 100644
--- a/src/Vulkan/VkQueryPool.cpp
+++ b/src/Vulkan/VkQueryPool.cpp
@@ -20,7 +20,12 @@
 
 namespace vk {
 
-Query::Query() : finished(marl::Event::Mode::Manual), state(UNAVAILABLE), type(INVALID_TYPE), value(0) {}
+Query::Query()
+    : finished(marl::Event::Mode::Manual)
+    , state(UNAVAILABLE)
+    , type(INVALID_TYPE)
+    , value(0)
+{}
 
 void Query::reset()
 {
@@ -82,9 +87,10 @@
 	value += v;
 }
 
-QueryPool::QueryPool(const VkQueryPoolCreateInfo* pCreateInfo, void* mem) :
-	pool(reinterpret_cast<Query*>(mem)), type(pCreateInfo->queryType),
-	count(pCreateInfo->queryCount)
+QueryPool::QueryPool(const VkQueryPoolCreateInfo *pCreateInfo, void *mem)
+    : pool(reinterpret_cast<Query *>(mem))
+    , type(pCreateInfo->queryType)
+    , count(pCreateInfo->queryCount)
 {
 	// According to the Vulkan spec, section 34.1. Features:
 	// "pipelineStatisticsQuery specifies whether the pipeline statistics
@@ -100,22 +106,22 @@
 	// Construct all queries
 	for(uint32_t i = 0; i < count; i++)
 	{
-		new (&pool[i]) Query();
+		new(&pool[i]) Query();
 	}
 }
 
-void QueryPool::destroy(const VkAllocationCallbacks* pAllocator)
+void QueryPool::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	vk::deallocate(pool, pAllocator);
 }
 
-size_t QueryPool::ComputeRequiredAllocationSize(const VkQueryPoolCreateInfo* pCreateInfo)
+size_t QueryPool::ComputeRequiredAllocationSize(const VkQueryPoolCreateInfo *pCreateInfo)
 {
 	return sizeof(Query) * pCreateInfo->queryCount;
 }
 
 VkResult QueryPool::getResults(uint32_t firstQuery, uint32_t queryCount, size_t dataSize,
-                               void* pData, VkDeviceSize stride, VkQueryResultFlags flags) const
+                               void *pData, VkDeviceSize stride, VkQueryResultFlags flags) const
 {
 	// dataSize must be large enough to contain the result of each query
 	ASSERT(static_cast<size_t>(stride * queryCount) <= dataSize);
@@ -124,7 +130,7 @@
 	ASSERT((firstQuery + queryCount) <= count);
 
 	VkResult result = VK_SUCCESS;
-	uint8_t* data = static_cast<uint8_t*>(pData);
+	uint8_t *data = static_cast<uint8_t *>(pData);
 	for(uint32_t i = firstQuery; i < (firstQuery + queryCount); i++, data += stride)
 	{
 		// If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are both not set
@@ -134,7 +140,7 @@
 		// queries if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
 		auto &query = pool[i];
 
-		if(flags & VK_QUERY_RESULT_WAIT_BIT) // Must wait for query to finish
+		if(flags & VK_QUERY_RESULT_WAIT_BIT)  // Must wait for query to finish
 		{
 			query.wait();
 		}
@@ -145,29 +151,29 @@
 		if(current.state == Query::ACTIVE)
 		{
 			result = VK_NOT_READY;
-			writeResult = (flags & VK_QUERY_RESULT_PARTIAL_BIT); // Allow writing partial results
+			writeResult = (flags & VK_QUERY_RESULT_PARTIAL_BIT);  // Allow writing partial results
 		}
 
 		if(flags & VK_QUERY_RESULT_64_BIT)
 		{
-			uint64_t* result64 = reinterpret_cast<uint64_t*>(data);
+			uint64_t *result64 = reinterpret_cast<uint64_t *>(data);
 			if(writeResult)
 			{
 				result64[0] = current.value;
 			}
-			if(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) // Output query availablity
+			if(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)  // Output query availablity
 			{
 				result64[1] = current.state;
 			}
 		}
 		else
 		{
-			uint32_t* result32 = reinterpret_cast<uint32_t*>(data);
+			uint32_t *result32 = reinterpret_cast<uint32_t *>(data);
 			if(writeResult)
 			{
 				result32[0] = static_cast<uint32_t>(current.value);
 			}
-			if(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) // Output query availablity
+			if(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)  // Output query availablity
 			{
 				result32[1] = current.state;
 			}
@@ -213,7 +219,9 @@
 	ASSERT(type == VK_QUERY_TYPE_TIMESTAMP);
 
 	pool[query].set(std::chrono::time_point_cast<std::chrono::nanoseconds>(
-		std::chrono::system_clock::now()).time_since_epoch().count());
+	                    std::chrono::system_clock::now())
+	                    .time_since_epoch()
+	                    .count());
 }
 
 }  // namespace vk
diff --git a/src/Vulkan/VkQueryPool.hpp b/src/Vulkan/VkQueryPool.hpp
index 6e887b1..9c7d246 100644
--- a/src/Vulkan/VkQueryPool.hpp
+++ b/src/Vulkan/VkQueryPool.hpp
@@ -42,8 +42,8 @@
 
 	struct Data
 	{
-		State state;   // The current query state.
-		int64_t value; // The current query value.
+		State state;    // The current query state.
+		int64_t value;  // The current query value.
 	};
 
 	// reset() sets the state of the Query to UNAVAILABLE, sets the type to
@@ -92,32 +92,32 @@
 class QueryPool : public Object<QueryPool, VkQueryPool>
 {
 public:
-	QueryPool(const VkQueryPoolCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	QueryPool(const VkQueryPoolCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkQueryPoolCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkQueryPoolCreateInfo *pCreateInfo);
 
 	VkResult getResults(uint32_t firstQuery, uint32_t queryCount, size_t dataSize,
-		                void* pData, VkDeviceSize stride, VkQueryResultFlags flags) const;
+	                    void *pData, VkDeviceSize stride, VkQueryResultFlags flags) const;
 	void begin(uint32_t query, VkQueryControlFlags flags);
 	void end(uint32_t query);
 	void reset(uint32_t firstQuery, uint32_t queryCount);
 
 	void writeTimestamp(uint32_t query);
 
-	inline Query* getQuery(uint32_t query) const { return &(pool[query]); }
+	inline Query *getQuery(uint32_t query) const { return &(pool[query]); }
 
 private:
-	Query* pool;
+	Query *pool;
 	VkQueryType type;
 	uint32_t count;
 };
 
-static inline QueryPool* Cast(VkQueryPool object)
+static inline QueryPool *Cast(VkQueryPool object)
 {
 	return QueryPool::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_QUERY_POOL_HPP_
+#endif  // VK_QUERY_POOL_HPP_
diff --git a/src/Vulkan/VkQueue.cpp b/src/Vulkan/VkQueue.cpp
index 36fb0a8..55ce531 100644
--- a/src/Vulkan/VkQueue.cpp
+++ b/src/Vulkan/VkQueue.cpp
@@ -12,12 +12,12 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include "VkQueue.hpp"
 #include "VkCommandBuffer.hpp"
 #include "VkFence.hpp"
-#include "VkQueue.hpp"
 #include "VkSemaphore.hpp"
-#include "WSI/VkSwapchainKHR.hpp"
 #include "Device/Renderer.hpp"
+#include "WSI/VkSwapchainKHR.hpp"
 
 #include "marl/defer.h"
 #include "marl/scheduler.h"
@@ -28,7 +28,7 @@
 
 namespace {
 
-VkSubmitInfo* DeepCopySubmitInfo(uint32_t submitCount, const VkSubmitInfo* pSubmits)
+VkSubmitInfo *DeepCopySubmitInfo(uint32_t submitCount, const VkSubmitInfo *pSubmits)
 {
 	size_t submitSize = sizeof(VkSubmitInfo) * submitCount;
 	size_t totalSize = submitSize;
@@ -40,32 +40,32 @@
 		totalSize += pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
 	}
 
-	uint8_t* mem = static_cast<uint8_t*>(
-		vk::allocate(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::DEVICE_MEMORY, vk::Fence::GetAllocationScope()));
+	uint8_t *mem = static_cast<uint8_t *>(
+	    vk::allocate(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::DEVICE_MEMORY, vk::Fence::GetAllocationScope()));
 
-	auto submits = new (mem) VkSubmitInfo[submitCount];
+	auto submits = new(mem) VkSubmitInfo[submitCount];
 	memcpy(mem, pSubmits, submitSize);
 	mem += submitSize;
 
 	for(uint32_t i = 0; i < submitCount; i++)
 	{
 		size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
-		submits[i].pWaitSemaphores = reinterpret_cast<const VkSemaphore*>(mem);
+		submits[i].pWaitSemaphores = reinterpret_cast<const VkSemaphore *>(mem);
 		memcpy(mem, pSubmits[i].pWaitSemaphores, size);
 		mem += size;
 
 		size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
-		submits[i].pWaitDstStageMask = reinterpret_cast<const VkPipelineStageFlags*>(mem);
+		submits[i].pWaitDstStageMask = reinterpret_cast<const VkPipelineStageFlags *>(mem);
 		memcpy(mem, pSubmits[i].pWaitDstStageMask, size);
 		mem += size;
 
 		size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
-		submits[i].pSignalSemaphores = reinterpret_cast<const VkSemaphore*>(mem);
+		submits[i].pSignalSemaphores = reinterpret_cast<const VkSemaphore *>(mem);
 		memcpy(mem, pSubmits[i].pSignalSemaphores, size);
 		mem += size;
 
 		size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
-		submits[i].pCommandBuffers = reinterpret_cast<const VkCommandBuffer*>(mem);
+		submits[i].pCommandBuffers = reinterpret_cast<const VkCommandBuffer *>(mem);
 		memcpy(mem, pSubmits[i].pCommandBuffers, size);
 		mem += size;
 	}
@@ -77,7 +77,8 @@
 
 namespace vk {
 
-Queue::Queue(Device* device, marl::Scheduler *scheduler) : device(device)
+Queue::Queue(Device *device, marl::Scheduler *scheduler)
+    : device(device)
 {
 	queueThread = std::thread(&Queue::taskLoop, this, scheduler);
 }
@@ -94,7 +95,7 @@
 	garbageCollect();
 }
 
-VkResult Queue::submit(uint32_t submitCount, const VkSubmitInfo* pSubmits, Fence* fence)
+VkResult Queue::submit(uint32_t submitCount, const VkSubmitInfo *pSubmits, Fence *fence)
 {
 	garbageCollect();
 
@@ -113,7 +114,7 @@
 	return VK_SUCCESS;
 }
 
-void Queue::submitQueue(const Task& task)
+void Queue::submitQueue(const Task &task)
 {
 	if(renderer == nullptr)
 	{
@@ -122,7 +123,7 @@
 
 	for(uint32_t i = 0; i < task.submitCount; i++)
 	{
-		auto& submitInfo = task.pSubmits[i];
+		auto &submitInfo = task.pSubmits[i];
 		for(uint32_t j = 0; j < submitInfo.waitSemaphoreCount; j++)
 		{
 			vk::Cast(submitInfo.pWaitSemaphores[j])->wait(submitInfo.pWaitDstStageMask[j]);
@@ -158,7 +159,7 @@
 	}
 }
 
-void Queue::taskLoop(marl::Scheduler* scheduler)
+void Queue::taskLoop(marl::Scheduler *scheduler)
 {
 	marl::Thread::setName("Queue<%p>", this);
 	scheduler->bind();
@@ -170,15 +171,15 @@
 
 		switch(task.type)
 		{
-		case Task::KILL_THREAD:
-			ASSERT_MSG(pending.count() == 0, "queue has remaining work!");
-			return;
-		case Task::SUBMIT_QUEUE:
-			submitQueue(task);
-			break;
-		default:
-			UNIMPLEMENTED("task.type %d", static_cast<int>(task.type));
-			break;
+			case Task::KILL_THREAD:
+				ASSERT_MSG(pending.count() == 0, "queue has remaining work!");
+				return;
+			case Task::SUBMIT_QUEUE:
+				submitQueue(task);
+				break;
+			default:
+				UNIMPLEMENTED("task.type %d", static_cast<int>(task.type));
+				break;
 		}
 	}
 }
@@ -211,7 +212,7 @@
 }
 
 #ifndef __ANDROID__
-VkResult Queue::present(const VkPresentInfoKHR* presentInfo)
+VkResult Queue::present(const VkPresentInfoKHR *presentInfo)
 {
 	// This is a hack to deal with screen tearing for now.
 	// Need to correctly implement threading using VkSemaphore
diff --git a/src/Vulkan/VkQueue.hpp b/src/Vulkan/VkQueue.hpp
index e5c600e..022a73d 100644
--- a/src/Vulkan/VkQueue.hpp
+++ b/src/Vulkan/VkQueue.hpp
@@ -17,12 +17,14 @@
 
 #include "VkObject.hpp"
 #include "Device/Renderer.hpp"
-#include <thread>
 #include <vulkan/vk_icd.h>
+#include <thread>
 
 #include "System/Synchronization.hpp"
 
-namespace marl { class Scheduler; }
+namespace marl {
+class Scheduler;
+}
 
 namespace sw {
 
@@ -41,7 +43,7 @@
 	VK_LOADER_DATA loaderData = { ICD_LOADER_MAGIC };
 
 public:
-	Queue(Device* device, marl::Scheduler *scheduler);
+	Queue(Device *device, marl::Scheduler *scheduler);
 	~Queue();
 
 	operator VkQueue()
@@ -49,39 +51,43 @@
 		return reinterpret_cast<VkQueue>(this);
 	}
 
-	VkResult submit(uint32_t submitCount, const VkSubmitInfo* pSubmits, Fence* fence);
+	VkResult submit(uint32_t submitCount, const VkSubmitInfo *pSubmits, Fence *fence);
 	VkResult waitIdle();
 #ifndef __ANDROID__
-	VkResult present(const VkPresentInfoKHR* presentInfo);
+	VkResult present(const VkPresentInfoKHR *presentInfo);
 #endif
 
 private:
 	struct Task
 	{
 		uint32_t submitCount = 0;
-		VkSubmitInfo* pSubmits = nullptr;
-		sw::TaskEvents* events = nullptr;
+		VkSubmitInfo *pSubmits = nullptr;
+		sw::TaskEvents *events = nullptr;
 
-		enum Type { KILL_THREAD, SUBMIT_QUEUE };
+		enum Type
+		{
+			KILL_THREAD,
+			SUBMIT_QUEUE
+		};
 		Type type = SUBMIT_QUEUE;
 	};
 
-	void taskLoop(marl::Scheduler* scheduler);
+	void taskLoop(marl::Scheduler *scheduler);
 	void garbageCollect();
-	void submitQueue(const Task& task);
+	void submitQueue(const Task &task);
 
-	Device* device;
+	Device *device;
 	std::unique_ptr<sw::Renderer> renderer;
 	sw::Chan<Task> pending;
-	sw::Chan<VkSubmitInfo*> toDelete;
+	sw::Chan<VkSubmitInfo *> toDelete;
 	std::thread queueThread;
 };
 
-static inline Queue* Cast(VkQueue object)
+static inline Queue *Cast(VkQueue object)
 {
-	return reinterpret_cast<Queue*>(object);
+	return reinterpret_cast<Queue *>(object);
 }
 
 }  // namespace vk
 
-#endif // VK_QUEUE_HPP_
+#endif  // VK_QUEUE_HPP_
diff --git a/src/Vulkan/VkRenderPass.cpp b/src/Vulkan/VkRenderPass.cpp
index 3fc4d90..914c1d7 100644
--- a/src/Vulkan/VkRenderPass.cpp
+++ b/src/Vulkan/VkRenderPass.cpp
@@ -18,18 +18,18 @@
 
 namespace vk {
 
-RenderPass::RenderPass(const VkRenderPassCreateInfo* pCreateInfo, void* mem) :
-	attachmentCount(pCreateInfo->attachmentCount),
-	subpassCount(pCreateInfo->subpassCount),
-	dependencyCount(pCreateInfo->dependencyCount)
+RenderPass::RenderPass(const VkRenderPassCreateInfo *pCreateInfo, void *mem)
+    : attachmentCount(pCreateInfo->attachmentCount)
+    , subpassCount(pCreateInfo->subpassCount)
+    , dependencyCount(pCreateInfo->dependencyCount)
 {
-	char* hostMemory = reinterpret_cast<char*>(mem);
+	char *hostMemory = reinterpret_cast<char *>(mem);
 
 	// subpassCount must be greater than 0
 	ASSERT(pCreateInfo->subpassCount > 0);
 
 	size_t subpassesSize = pCreateInfo->subpassCount * sizeof(VkSubpassDescription);
-	subpasses = reinterpret_cast<VkSubpassDescription*>(hostMemory);
+	subpasses = reinterpret_cast<VkSubpassDescription *>(hostMemory);
 	memcpy(subpasses, pCreateInfo->pSubpasses, subpassesSize);
 	hostMemory += subpassesSize;
 	uint32_t *masks = reinterpret_cast<uint32_t *>(hostMemory);
@@ -38,12 +38,12 @@
 	if(pCreateInfo->attachmentCount > 0)
 	{
 		size_t attachmentSize = pCreateInfo->attachmentCount * sizeof(VkAttachmentDescription);
-		attachments = reinterpret_cast<VkAttachmentDescription*>(hostMemory);
+		attachments = reinterpret_cast<VkAttachmentDescription *>(hostMemory);
 		memcpy(attachments, pCreateInfo->pAttachments, attachmentSize);
 		hostMemory += attachmentSize;
 
 		size_t firstUseSize = pCreateInfo->attachmentCount * sizeof(int);
-		attachmentFirstUse = reinterpret_cast<int*>(hostMemory);
+		attachmentFirstUse = reinterpret_cast<int *>(hostMemory);
 		hostMemory += firstUseSize;
 
 		attachmentViewMasks = reinterpret_cast<uint32_t *>(hostMemory);
@@ -55,29 +55,29 @@
 		}
 	}
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	while(extensionCreateInfo)
 	{
 		switch(extensionCreateInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
-		{
-			// Renderpass uses multiview if this structure is present AND some subpass specifies
-			// a nonzero view mask
-			auto const *multiviewCreateInfo = reinterpret_cast<VkRenderPassMultiviewCreateInfo const *>(extensionCreateInfo);
-			for(auto i = 0u; i < pCreateInfo->subpassCount; i++)
+			case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
 			{
-				masks[i] = multiviewCreateInfo->pViewMasks[i];
-				// This is now a multiview renderpass, so make the masks available
-				if(masks[i])
-					viewMasks = masks;
-			}
+				// Renderpass uses multiview if this structure is present AND some subpass specifies
+				// a nonzero view mask
+				auto const *multiviewCreateInfo = reinterpret_cast<VkRenderPassMultiviewCreateInfo const *>(extensionCreateInfo);
+				for(auto i = 0u; i < pCreateInfo->subpassCount; i++)
+				{
+					masks[i] = multiviewCreateInfo->pViewMasks[i];
+					// This is now a multiview renderpass, so make the masks available
+					if(masks[i])
+						viewMasks = masks;
+				}
 
-			break;
-		}
-		default:
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-			break;
+				break;
+			}
+			default:
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -86,7 +86,7 @@
 	// Deep copy subpasses
 	for(uint32_t i = 0; i < pCreateInfo->subpassCount; ++i)
 	{
-		const auto& subpass = pCreateInfo->pSubpasses[i];
+		const auto &subpass = pCreateInfo->pSubpasses[i];
 		subpasses[i].pInputAttachments = nullptr;
 		subpasses[i].pColorAttachments = nullptr;
 		subpasses[i].pResolveAttachments = nullptr;
@@ -96,8 +96,8 @@
 		if(subpass.inputAttachmentCount > 0)
 		{
 			size_t inputAttachmentsSize = subpass.inputAttachmentCount * sizeof(VkAttachmentReference);
-			subpasses[i].pInputAttachments = reinterpret_cast<VkAttachmentReference*>(hostMemory);
-			memcpy(const_cast<VkAttachmentReference*>(subpasses[i].pInputAttachments),
+			subpasses[i].pInputAttachments = reinterpret_cast<VkAttachmentReference *>(hostMemory);
+			memcpy(const_cast<VkAttachmentReference *>(subpasses[i].pInputAttachments),
 			       pCreateInfo->pSubpasses[i].pInputAttachments, inputAttachmentsSize);
 			hostMemory += inputAttachmentsSize;
 
@@ -111,15 +111,15 @@
 		if(subpass.colorAttachmentCount > 0)
 		{
 			size_t colorAttachmentsSize = subpass.colorAttachmentCount * sizeof(VkAttachmentReference);
-			subpasses[i].pColorAttachments = reinterpret_cast<VkAttachmentReference*>(hostMemory);
-			memcpy(const_cast<VkAttachmentReference*>(subpasses[i].pColorAttachments),
+			subpasses[i].pColorAttachments = reinterpret_cast<VkAttachmentReference *>(hostMemory);
+			memcpy(const_cast<VkAttachmentReference *>(subpasses[i].pColorAttachments),
 			       subpass.pColorAttachments, colorAttachmentsSize);
 			hostMemory += colorAttachmentsSize;
 
 			if(subpass.pResolveAttachments)
 			{
-				subpasses[i].pResolveAttachments = reinterpret_cast<VkAttachmentReference*>(hostMemory);
-				memcpy(const_cast<VkAttachmentReference*>(subpasses[i].pResolveAttachments),
+				subpasses[i].pResolveAttachments = reinterpret_cast<VkAttachmentReference *>(hostMemory);
+				memcpy(const_cast<VkAttachmentReference *>(subpasses[i].pResolveAttachments),
 				       subpass.pResolveAttachments, colorAttachmentsSize);
 				hostMemory += colorAttachmentsSize;
 			}
@@ -129,16 +129,16 @@
 				if(subpass.pColorAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
 					MarkFirstUse(subpass.pColorAttachments[j].attachment, i);
 				if(subpass.pResolveAttachments &&
-					subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
+				   subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
 					MarkFirstUse(subpass.pResolveAttachments[j].attachment, i);
 			}
 		}
 
 		if(subpass.pDepthStencilAttachment)
 		{
-			subpasses[i].pDepthStencilAttachment = reinterpret_cast<VkAttachmentReference*>(hostMemory);
-			memcpy(const_cast<VkAttachmentReference*>(subpasses[i].pDepthStencilAttachment),
-				subpass.pDepthStencilAttachment, sizeof(VkAttachmentReference));
+			subpasses[i].pDepthStencilAttachment = reinterpret_cast<VkAttachmentReference *>(hostMemory);
+			memcpy(const_cast<VkAttachmentReference *>(subpasses[i].pDepthStencilAttachment),
+			       subpass.pDepthStencilAttachment, sizeof(VkAttachmentReference));
 			hostMemory += sizeof(VkAttachmentReference);
 
 			if(subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)
@@ -148,8 +148,8 @@
 		if(subpass.preserveAttachmentCount > 0)
 		{
 			size_t preserveAttachmentSize = subpass.preserveAttachmentCount * sizeof(uint32_t);
-			subpasses[i].pPreserveAttachments = reinterpret_cast<uint32_t*>(hostMemory);
-			memcpy(const_cast<uint32_t*>(subpasses[i].pPreserveAttachments),
+			subpasses[i].pPreserveAttachments = reinterpret_cast<uint32_t *>(hostMemory);
+			memcpy(const_cast<uint32_t *>(subpasses[i].pPreserveAttachments),
 			       pCreateInfo->pSubpasses[i].pPreserveAttachments, preserveAttachmentSize);
 			hostMemory += preserveAttachmentSize;
 
@@ -164,25 +164,24 @@
 	if(pCreateInfo->dependencyCount > 0)
 	{
 		size_t dependenciesSize = pCreateInfo->dependencyCount * sizeof(VkSubpassDependency);
-		dependencies = reinterpret_cast<VkSubpassDependency*>(hostMemory);
+		dependencies = reinterpret_cast<VkSubpassDependency *>(hostMemory);
 		memcpy(dependencies, pCreateInfo->pDependencies, dependenciesSize);
 	}
 }
 
-void RenderPass::destroy(const VkAllocationCallbacks* pAllocator)
+void RenderPass::destroy(const VkAllocationCallbacks *pAllocator)
 {
-	vk::deallocate(subpasses, pAllocator); // attachments and dependencies are in the same allocation
+	vk::deallocate(subpasses, pAllocator);  // attachments and dependencies are in the same allocation
 }
 
-size_t RenderPass::ComputeRequiredAllocationSize(const VkRenderPassCreateInfo* pCreateInfo)
+size_t RenderPass::ComputeRequiredAllocationSize(const VkRenderPassCreateInfo *pCreateInfo)
 {
-	size_t attachmentSize = pCreateInfo->attachmentCount * sizeof(VkAttachmentDescription)
-			+ pCreateInfo->attachmentCount * sizeof(int)			// first use
-			+ pCreateInfo->attachmentCount * sizeof(uint32_t);		// union of subpass view masks, per attachment
+	size_t attachmentSize = pCreateInfo->attachmentCount * sizeof(VkAttachmentDescription) + pCreateInfo->attachmentCount * sizeof(int)  // first use
+	                        + pCreateInfo->attachmentCount * sizeof(uint32_t);                                                           // union of subpass view masks, per attachment
 	size_t subpassesSize = 0;
 	for(uint32_t i = 0; i < pCreateInfo->subpassCount; ++i)
 	{
-		const auto& subpass = pCreateInfo->pSubpasses[i];
+		const auto &subpass = pCreateInfo->pSubpasses[i];
 		uint32_t nbAttachments = subpass.inputAttachmentCount + subpass.colorAttachmentCount;
 		if(subpass.pResolveAttachments)
 		{
@@ -195,14 +194,14 @@
 		subpassesSize += sizeof(VkSubpassDescription) +
 		                 sizeof(VkAttachmentReference) * nbAttachments +
 		                 sizeof(uint32_t) * subpass.preserveAttachmentCount +
-		                 sizeof(uint32_t);			// view mask
+		                 sizeof(uint32_t);  // view mask
 	}
 	size_t dependenciesSize = pCreateInfo->dependencyCount * sizeof(VkSubpassDependency);
 
 	return attachmentSize + subpassesSize + dependenciesSize;
 }
 
-void RenderPass::getRenderAreaGranularity(VkExtent2D* pGranularity) const
+void RenderPass::getRenderAreaGranularity(VkExtent2D *pGranularity) const
 {
 	pGranularity->width = 1;
 	pGranularity->height = 1;
diff --git a/src/Vulkan/VkRenderPass.hpp b/src/Vulkan/VkRenderPass.hpp
index 70cf2a8..2c2fe85 100644
--- a/src/Vulkan/VkRenderPass.hpp
+++ b/src/Vulkan/VkRenderPass.hpp
@@ -24,12 +24,12 @@
 class RenderPass : public Object<RenderPass, VkRenderPass>
 {
 public:
-	RenderPass(const VkRenderPassCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	RenderPass(const VkRenderPassCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkRenderPassCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkRenderPassCreateInfo *pCreateInfo);
 
-	void getRenderAreaGranularity(VkExtent2D* pGranularity) const;
+	void getRenderAreaGranularity(VkExtent2D *pGranularity) const;
 
 	uint32_t getAttachmentCount() const
 	{
@@ -46,7 +46,7 @@
 		return subpassCount;
 	}
 
-	VkSubpassDescription const& getSubpass(uint32_t subpassIndex) const
+	VkSubpassDescription const &getSubpass(uint32_t subpassIndex) const
 	{
 		return subpasses[subpassIndex];
 	}
@@ -82,24 +82,24 @@
 	}
 
 private:
-	uint32_t                 attachmentCount = 0;
-	VkAttachmentDescription* attachments = nullptr;
-	uint32_t                 subpassCount = 0;
-	VkSubpassDescription*    subpasses = nullptr;
-	uint32_t                 dependencyCount = 0;
-	VkSubpassDependency*     dependencies = nullptr;
-	int*                     attachmentFirstUse = nullptr;
-	uint32_t*                viewMasks = nullptr;
-	uint32_t*                attachmentViewMasks = nullptr;
+	uint32_t attachmentCount = 0;
+	VkAttachmentDescription *attachments = nullptr;
+	uint32_t subpassCount = 0;
+	VkSubpassDescription *subpasses = nullptr;
+	uint32_t dependencyCount = 0;
+	VkSubpassDependency *dependencies = nullptr;
+	int *attachmentFirstUse = nullptr;
+	uint32_t *viewMasks = nullptr;
+	uint32_t *attachmentViewMasks = nullptr;
 
 	void MarkFirstUse(int attachment, int subpass);
 };
 
-static inline RenderPass* Cast(VkRenderPass object)
+static inline RenderPass *Cast(VkRenderPass object)
 {
 	return RenderPass::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_RENDER_PASS_HPP_
\ No newline at end of file
+#endif  // VK_RENDER_PASS_HPP_
\ No newline at end of file
diff --git a/src/Vulkan/VkSampler.cpp b/src/Vulkan/VkSampler.cpp
index c19515b..df62cc2 100644
--- a/src/Vulkan/VkSampler.cpp
+++ b/src/Vulkan/VkSampler.cpp
@@ -12,7 +12,6 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-
 #include "VkSampler.hpp"
 
 namespace vk {
diff --git a/src/Vulkan/VkSampler.hpp b/src/Vulkan/VkSampler.hpp
index 042a7c7..5a6ecf0 100644
--- a/src/Vulkan/VkSampler.hpp
+++ b/src/Vulkan/VkSampler.hpp
@@ -27,27 +27,27 @@
 class Sampler : public Object<Sampler, VkSampler>
 {
 public:
-	Sampler(const VkSamplerCreateInfo* pCreateInfo, void* mem, const vk::SamplerYcbcrConversion *ycbcrConversion) :
-		magFilter(pCreateInfo->magFilter),
-		minFilter(pCreateInfo->minFilter),
-		mipmapMode(pCreateInfo->mipmapMode),
-		addressModeU(pCreateInfo->addressModeU),
-		addressModeV(pCreateInfo->addressModeV),
-		addressModeW(pCreateInfo->addressModeW),
-		mipLodBias(pCreateInfo->mipLodBias),
-		anisotropyEnable(pCreateInfo->anisotropyEnable),
-		maxAnisotropy(pCreateInfo->maxAnisotropy),
-		compareEnable(pCreateInfo->compareEnable),
-		compareOp(pCreateInfo->compareOp),
-		minLod(ClampLod(pCreateInfo->minLod)),
-		maxLod(ClampLod(pCreateInfo->maxLod)),
-		borderColor(pCreateInfo->borderColor),
-		unnormalizedCoordinates(pCreateInfo->unnormalizedCoordinates),
-		ycbcrConversion(ycbcrConversion)
+	Sampler(const VkSamplerCreateInfo *pCreateInfo, void *mem, const vk::SamplerYcbcrConversion *ycbcrConversion)
+	    : magFilter(pCreateInfo->magFilter)
+	    , minFilter(pCreateInfo->minFilter)
+	    , mipmapMode(pCreateInfo->mipmapMode)
+	    , addressModeU(pCreateInfo->addressModeU)
+	    , addressModeV(pCreateInfo->addressModeV)
+	    , addressModeW(pCreateInfo->addressModeW)
+	    , mipLodBias(pCreateInfo->mipLodBias)
+	    , anisotropyEnable(pCreateInfo->anisotropyEnable)
+	    , maxAnisotropy(pCreateInfo->maxAnisotropy)
+	    , compareEnable(pCreateInfo->compareEnable)
+	    , compareOp(pCreateInfo->compareOp)
+	    , minLod(ClampLod(pCreateInfo->minLod))
+	    , maxLod(ClampLod(pCreateInfo->maxLod))
+	    , borderColor(pCreateInfo->borderColor)
+	    , unnormalizedCoordinates(pCreateInfo->unnormalizedCoordinates)
+	    , ycbcrConversion(ycbcrConversion)
 	{
 	}
 
-	static size_t ComputeRequiredAllocationSize(const VkSamplerCreateInfo* pCreateInfo)
+	static size_t ComputeRequiredAllocationSize(const VkSamplerCreateInfo *pCreateInfo)
 	{
 		return 0;
 	}
@@ -58,22 +58,22 @@
 		return sw::clamp(lod, 0.0f, (float)(sw::MAX_TEXTURE_LOD));
 	}
 
-	const uint32_t             id = nextID++;
-	const VkFilter             magFilter = VK_FILTER_NEAREST;
-	const VkFilter             minFilter = VK_FILTER_NEAREST;
-	const VkSamplerMipmapMode  mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
+	const uint32_t id = nextID++;
+	const VkFilter magFilter = VK_FILTER_NEAREST;
+	const VkFilter minFilter = VK_FILTER_NEAREST;
+	const VkSamplerMipmapMode mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
 	const VkSamplerAddressMode addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
 	const VkSamplerAddressMode addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
 	const VkSamplerAddressMode addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
-	const float                mipLodBias = 0.0f;
-	const VkBool32             anisotropyEnable = VK_FALSE;
-	const float                maxAnisotropy = 0.0f;
-	const VkBool32             compareEnable = VK_FALSE;
-	const VkCompareOp          compareOp = VK_COMPARE_OP_NEVER;
-	const float                minLod = 0.0f;
-	const float                maxLod = 0.0f;
-	const VkBorderColor        borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
-	const VkBool32             unnormalizedCoordinates = VK_FALSE;
+	const float mipLodBias = 0.0f;
+	const VkBool32 anisotropyEnable = VK_FALSE;
+	const float maxAnisotropy = 0.0f;
+	const VkBool32 compareEnable = VK_FALSE;
+	const VkCompareOp compareOp = VK_COMPARE_OP_NEVER;
+	const float minLod = 0.0f;
+	const float maxLod = 0.0f;
+	const VkBorderColor borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
+	const VkBool32 unnormalizedCoordinates = VK_FALSE;
 
 	const vk::SamplerYcbcrConversion *ycbcrConversion = nullptr;
 
@@ -84,45 +84,45 @@
 class SamplerYcbcrConversion : public Object<SamplerYcbcrConversion, VkSamplerYcbcrConversion>
 {
 public:
-	SamplerYcbcrConversion(const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, void* mem) :
-		format(pCreateInfo->format),
-		ycbcrModel(pCreateInfo->ycbcrModel),
-		ycbcrRange(pCreateInfo->ycbcrRange),
-		components(ResolveIdentityMapping(pCreateInfo->components)),
-		xChromaOffset(pCreateInfo->xChromaOffset),
-		yChromaOffset(pCreateInfo->yChromaOffset),
-		chromaFilter(pCreateInfo->chromaFilter),
-		forceExplicitReconstruction(pCreateInfo->forceExplicitReconstruction)
+	SamplerYcbcrConversion(const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, void *mem)
+	    : format(pCreateInfo->format)
+	    , ycbcrModel(pCreateInfo->ycbcrModel)
+	    , ycbcrRange(pCreateInfo->ycbcrRange)
+	    , components(ResolveIdentityMapping(pCreateInfo->components))
+	    , xChromaOffset(pCreateInfo->xChromaOffset)
+	    , yChromaOffset(pCreateInfo->yChromaOffset)
+	    , chromaFilter(pCreateInfo->chromaFilter)
+	    , forceExplicitReconstruction(pCreateInfo->forceExplicitReconstruction)
 	{
 	}
 
 	~SamplerYcbcrConversion() = default;
 
-	static size_t ComputeRequiredAllocationSize(const VkSamplerYcbcrConversionCreateInfo* pCreateInfo)
+	static size_t ComputeRequiredAllocationSize(const VkSamplerYcbcrConversionCreateInfo *pCreateInfo)
 	{
 		return 0;
 	}
 
-	const VkFormat                      format = VK_FORMAT_UNDEFINED;
+	const VkFormat format = VK_FORMAT_UNDEFINED;
 	const VkSamplerYcbcrModelConversion ycbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
-	const VkSamplerYcbcrRange           ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
-	const VkComponentMapping            components = {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
-	const VkChromaLocation              xChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
-	const VkChromaLocation              yChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
-	const VkFilter                      chromaFilter = VK_FILTER_NEAREST;
-	const VkBool32                      forceExplicitReconstruction = VK_FALSE;
+	const VkSamplerYcbcrRange ycbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
+	const VkComponentMapping components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
+	const VkChromaLocation xChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
+	const VkChromaLocation yChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
+	const VkFilter chromaFilter = VK_FILTER_NEAREST;
+	const VkBool32 forceExplicitReconstruction = VK_FALSE;
 };
 
-static inline Sampler* Cast(VkSampler object)
+static inline Sampler *Cast(VkSampler object)
 {
 	return Sampler::Cast(object);
 }
 
-static inline SamplerYcbcrConversion* Cast(VkSamplerYcbcrConversion object)
+static inline SamplerYcbcrConversion *Cast(VkSamplerYcbcrConversion object)
 {
 	return SamplerYcbcrConversion::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_SAMPLER_HPP_
\ No newline at end of file
+#endif  // VK_SAMPLER_HPP_
\ No newline at end of file
diff --git a/src/Vulkan/VkSemaphore.cpp b/src/Vulkan/VkSemaphore.cpp
index e921e4e..c46e399 100644
--- a/src/Vulkan/VkSemaphore.cpp
+++ b/src/Vulkan/VkSemaphore.cpp
@@ -17,15 +17,15 @@
 #include "VkConfig.h"
 
 #if SWIFTSHADER_EXTERNAL_SEMAPHORE_OPAQUE_FD
-#  if defined(__linux__) || defined(__ANDROID__)
-#    include "VkSemaphoreExternalLinux.hpp"
-#  else
-#    error "Missing VK_KHR_external_semaphore_fd implementation for this platform!"
-#  endif
+#	if defined(__linux__) || defined(__ANDROID__)
+#		include "VkSemaphoreExternalLinux.hpp"
+#	else
+#		error "Missing VK_KHR_external_semaphore_fd implementation for this platform!"
+#	endif
 #elif VK_USE_PLATFORM_FUCHSIA
-#include "VkSemaphoreExternalFuchsia.hpp"
+#	include "VkSemaphoreExternalFuchsia.hpp"
 #else
-#include "VkSemaphoreExternalNone.hpp"
+#	include "VkSemaphoreExternalNone.hpp"
 #endif
 
 #include "marl/blockingcall.h"
@@ -44,14 +44,15 @@
 public:
 	// Create a new instance. The external instance will be allocated only
 	// the pCreateInfo->pNext chain indicates it needs to be exported.
-	Impl(const VkSemaphoreCreateInfo* pCreateInfo) {
+	Impl(const VkSemaphoreCreateInfo *pCreateInfo)
+	{
 		bool exportSemaphore = false;
-		for(const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
-			 nextInfo != nullptr; nextInfo = nextInfo->pNext)
+		for(const auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
+		    nextInfo != nullptr; nextInfo = nextInfo->pNext)
 		{
 			if(nextInfo->sType == VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO)
 			{
-				const auto* exportInfo = reinterpret_cast<const VkExportSemaphoreCreateInfo *>(nextInfo);
+				const auto *exportInfo = reinterpret_cast<const VkExportSemaphoreCreateInfo *>(nextInfo);
 				if(exportInfo->handleTypes != External::kExternalSemaphoreHandleType)
 				{
 					UNIMPLEMENTED("exportInfo->handleTypes");
@@ -68,7 +69,8 @@
 		}
 	}
 
-	~Impl() {
+	~Impl()
+	{
 		deallocateExternal();
 	}
 
@@ -88,7 +90,7 @@
 	// a platform-specific external->importXXX(...) method.
 	void allocateExternalNoInit()
 	{
-		external = new (externalStorage) External();
+		external = new(externalStorage) External();
 	}
 
 	void wait()
@@ -102,7 +104,7 @@
 				// call, it is assumed that this is negligible
 				// compared with the actual semaphore wait()
 				// operation.
-				marl::blocking_call([this](){
+				marl::blocking_call([this]() {
 					external->wait();
 				});
 			}
@@ -144,7 +146,7 @@
 	{
 		// Wait on the marl condition variable only.
 		std::unique_lock<std::mutex> lock(mutex);
-		condition.wait(lock, [this]{ return this->signaled; });
+		condition.wait(lock, [this] { return this->signaled; });
 		signaled = false;  // Vulkan requires resetting after waiting.
 	}
 
@@ -165,7 +167,7 @@
 	bool signaled = false;
 
 	// Optional external semaphore data might be referenced and stored here.
-	External* external = nullptr;
+	External *external = nullptr;
 
 	// Set to true if |external| comes from a temporary import.
 	bool temporaryImport = false;
@@ -173,18 +175,18 @@
 	alignas(External) char externalStorage[sizeof(External)];
 };
 
-Semaphore::Semaphore(const VkSemaphoreCreateInfo* pCreateInfo, void* mem)
+Semaphore::Semaphore(const VkSemaphoreCreateInfo *pCreateInfo, void *mem)
 {
-	impl = new (mem) Impl(pCreateInfo);
+	impl = new(mem) Impl(pCreateInfo);
 }
 
-void Semaphore::destroy(const VkAllocationCallbacks* pAllocator)
+void Semaphore::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	impl->~Impl();
 	vk::deallocate(impl, pAllocator);
 }
 
-size_t Semaphore::ComputeRequiredAllocationSize(const VkSemaphoreCreateInfo* pCreateInfo)
+size_t Semaphore::ComputeRequiredAllocationSize(const VkSemaphoreCreateInfo *pCreateInfo)
 {
 	return sizeof(Semaphore::Impl);
 }
@@ -219,7 +221,7 @@
 	return result;
 }
 
-VkResult Semaphore::exportFd(int* pFd) const
+VkResult Semaphore::exportFd(int *pFd) const
 {
 	std::unique_lock<std::mutex> lock(impl->mutex);
 	if(!impl->external)
diff --git a/src/Vulkan/VkSemaphore.hpp b/src/Vulkan/VkSemaphore.hpp
index 742fc8a..257c9f5 100644
--- a/src/Vulkan/VkSemaphore.hpp
+++ b/src/Vulkan/VkSemaphore.hpp
@@ -19,7 +19,7 @@
 #include "VkObject.hpp"
 
 #if VK_USE_PLATFORM_FUCHSIA
-#include <zircon/types.h>
+#	include <zircon/types.h>
 #endif
 
 namespace vk {
@@ -27,14 +27,14 @@
 class Semaphore : public Object<Semaphore, VkSemaphore>
 {
 public:
-	Semaphore(const VkSemaphoreCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	Semaphore(const VkSemaphoreCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkSemaphoreCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkSemaphoreCreateInfo *pCreateInfo);
 
 	void wait();
 
-	void wait(const VkPipelineStageFlags& flag)
+	void wait(const VkPipelineStageFlags &flag)
 	{
 		// NOTE: not sure what else to do here?
 		wait();
@@ -44,7 +44,7 @@
 
 #if SWIFTSHADER_EXTERNAL_SEMAPHORE_OPAQUE_FD
 	VkResult importFd(int fd, bool temporaryImport);
-	VkResult exportFd(int* pFd) const;
+	VkResult exportFd(int *pFd) const;
 #endif
 
 #if VK_USE_PLATFORM_FUCHSIA
@@ -55,14 +55,14 @@
 private:
 	class External;
 	class Impl;
-	Impl* impl = nullptr;
+	Impl *impl = nullptr;
 };
 
-static inline Semaphore* Cast(VkSemaphore object)
+static inline Semaphore *Cast(VkSemaphore object)
 {
 	return Semaphore::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_SEMAPHORE_HPP_
+#endif  // VK_SEMAPHORE_HPP_
diff --git a/src/Vulkan/VkSemaphoreExternalFuchsia.hpp b/src/Vulkan/VkSemaphoreExternalFuchsia.hpp
index d42787a..926687c 100644
--- a/src/Vulkan/VkSemaphoreExternalFuchsia.hpp
+++ b/src/Vulkan/VkSemaphoreExternalFuchsia.hpp
@@ -26,11 +26,12 @@
 
 namespace vk {
 
-class Semaphore::External {
+class Semaphore::External
+{
 public:
 	// The type of external semaphore handle types supported by this implementation.
 	static const VkExternalSemaphoreHandleTypeFlags kExternalSemaphoreHandleType =
-			VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
+	    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
 
 	// Default constructor. Note that one should call either init() or
 	// importFd() before any call to wait() or signal().
@@ -56,7 +57,7 @@
 		handle = new_handle;
 	}
 
-	VkResult exportHandle(zx_handle_t* pHandle) const
+	VkResult exportHandle(zx_handle_t *pHandle) const
 	{
 		zx_handle_t new_handle = ZX_HANDLE_INVALID;
 		zx_status_t status = zx_handle_duplicate(handle, ZX_RIGHT_SAME_RIGHTS, &new_handle);
@@ -73,7 +74,7 @@
 	{
 		zx_signals_t observed = 0;
 		zx_status_t status = zx_object_wait_one(
-				handle, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, &observed);
+		    handle, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, &observed);
 		if(status != ZX_OK)
 		{
 			ABORT("zx_object_wait_one() returned %d", status);
@@ -94,7 +95,7 @@
 	{
 		zx_signals_t observed = 0;
 		zx_status_t status = zx_object_wait_one(
-				handle, ZX_EVENT_SIGNALED, zx_clock_get_monotonic(), &observed);
+		    handle, ZX_EVENT_SIGNALED, zx_clock_get_monotonic(), &observed);
 		if(status != ZX_OK)
 		{
 			ABORT("zx_object_wait_one() returned %d", status);
diff --git a/src/Vulkan/VkSemaphoreExternalLinux.hpp b/src/Vulkan/VkSemaphoreExternalLinux.hpp
index 47b18c1..36bac26 100644
--- a/src/Vulkan/VkSemaphoreExternalLinux.hpp
+++ b/src/Vulkan/VkSemaphoreExternalLinux.hpp
@@ -129,7 +129,8 @@
 
 namespace vk {
 
-class Semaphore::External {
+class Semaphore::External
+{
 public:
 	// The type of external semaphore handle types supported by this implementation.
 	static const VkExternalSemaphoreHandleTypeFlags kExternalSemaphoreHandleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
@@ -169,7 +170,7 @@
 	// Export the current semaphore as a duplicated file descriptor to the same
 	// region. This can be consumed by importFd() running in a different
 	// process.
-	VkResult exportFd(int* pFd) const
+	VkResult exportFd(int *pFd) const
 	{
 		int fd = memfd.exportFd();
 		if(fd < 0)
@@ -215,7 +216,7 @@
 	void mapRegion(size_t size, bool needInitialization)
 	{
 		// Map the region into memory and point the semaphore to it.
-		void* addr = memfd.mapReadWrite(0, size);
+		void *addr = memfd.mapReadWrite(0, size);
 		if(!addr)
 		{
 			ABORT("mmap() failed: %s", strerror(errno));
@@ -223,7 +224,7 @@
 		semaphore = reinterpret_cast<SharedSemaphore *>(addr);
 		if(needInitialization)
 		{
-			new (semaphore) SharedSemaphore();
+			new(semaphore) SharedSemaphore();
 		}
 		else
 		{
@@ -232,7 +233,7 @@
 	}
 
 	LinuxMemFd memfd;
-	SharedSemaphore* semaphore = nullptr;
+	SharedSemaphore *semaphore = nullptr;
 };
 
 }  // namespace vk
diff --git a/src/Vulkan/VkSemaphoreExternalNone.hpp b/src/Vulkan/VkSemaphoreExternalNone.hpp
index 1b64003..9592e3f 100644
--- a/src/Vulkan/VkSemaphoreExternalNone.hpp
+++ b/src/Vulkan/VkSemaphoreExternalNone.hpp
@@ -18,7 +18,8 @@
 namespace vk {
 
 // Empty external sempahore implementation.
-class Semaphore::External {
+class Semaphore::External
+{
 public:
 	// The type of external semaphore handle types supported by this implementation.
 	static const VkExternalSemaphoreHandleTypeFlags kExternalSemaphoreHandleType = 0;
diff --git a/src/Vulkan/VkShaderModule.cpp b/src/Vulkan/VkShaderModule.cpp
index 78a5ed4..337e188 100644
--- a/src/Vulkan/VkShaderModule.cpp
+++ b/src/Vulkan/VkShaderModule.cpp
@@ -18,21 +18,22 @@
 
 namespace vk {
 
-std::atomic<uint32_t> ShaderModule::serialCounter(1);    // Start at 1, 0 is invalid shader.
+std::atomic<uint32_t> ShaderModule::serialCounter(1);  // Start at 1, 0 is invalid shader.
 
-ShaderModule::ShaderModule(const VkShaderModuleCreateInfo* pCreateInfo, void* mem)
-	: serialID(nextSerialID()), code(reinterpret_cast<uint32_t*>(mem))
+ShaderModule::ShaderModule(const VkShaderModuleCreateInfo *pCreateInfo, void *mem)
+    : serialID(nextSerialID())
+    , code(reinterpret_cast<uint32_t *>(mem))
 {
 	memcpy(code, pCreateInfo->pCode, pCreateInfo->codeSize);
 	wordCount = static_cast<uint32_t>(pCreateInfo->codeSize / sizeof(uint32_t));
 }
 
-void ShaderModule::destroy(const VkAllocationCallbacks* pAllocator)
+void ShaderModule::destroy(const VkAllocationCallbacks *pAllocator)
 {
 	vk::deallocate(code, pAllocator);
 }
 
-size_t ShaderModule::ComputeRequiredAllocationSize(const VkShaderModuleCreateInfo* pCreateInfo)
+size_t ShaderModule::ComputeRequiredAllocationSize(const VkShaderModuleCreateInfo *pCreateInfo)
 {
 	return pCreateInfo->codeSize;
 }
diff --git a/src/Vulkan/VkShaderModule.hpp b/src/Vulkan/VkShaderModule.hpp
index c7e5ff9..19dc131 100644
--- a/src/Vulkan/VkShaderModule.hpp
+++ b/src/Vulkan/VkShaderModule.hpp
@@ -20,20 +20,22 @@
 #include <atomic>
 #include <vector>
 
-namespace rr { class Routine; }
+namespace rr {
+class Routine;
+}
 
 namespace vk {
 
 class ShaderModule : public Object<ShaderModule, VkShaderModule>
 {
 public:
-	ShaderModule(const VkShaderModuleCreateInfo* pCreateInfo, void* mem);
-	void destroy(const VkAllocationCallbacks* pAllocator);
+	ShaderModule(const VkShaderModuleCreateInfo *pCreateInfo, void *mem);
+	void destroy(const VkAllocationCallbacks *pAllocator);
 
-	static size_t ComputeRequiredAllocationSize(const VkShaderModuleCreateInfo* pCreateInfo);
+	static size_t ComputeRequiredAllocationSize(const VkShaderModuleCreateInfo *pCreateInfo);
 	// TODO: reconsider boundary of ShaderModule class; try to avoid 'expose the
 	// guts' operations, and this copy.
-	std::vector<uint32_t> getCode() const { return std::vector<uint32_t>{ code, code + wordCount };}
+	std::vector<uint32_t> getCode() const { return std::vector<uint32_t>{ code, code + wordCount }; }
 
 	uint32_t getSerialID() const { return serialID; }
 	static uint32_t nextSerialID() { return serialCounter++; }
@@ -42,15 +44,15 @@
 	const uint32_t serialID;
 	static std::atomic<uint32_t> serialCounter;
 
-	uint32_t* code = nullptr;
+	uint32_t *code = nullptr;
 	uint32_t wordCount = 0;
 };
 
-static inline ShaderModule* Cast(VkShaderModule object)
+static inline ShaderModule *Cast(VkShaderModule object)
 {
 	return ShaderModule::Cast(object);
 }
 
 }  // namespace vk
 
-#endif // VK_SHADER_MODULE_HPP_
+#endif  // VK_SHADER_MODULE_HPP_
diff --git a/src/Vulkan/VkStringify.cpp b/src/Vulkan/VkStringify.cpp
index b7c0b63..f274724 100644
--- a/src/Vulkan/VkStringify.cpp
+++ b/src/Vulkan/VkStringify.cpp
@@ -33,7 +33,7 @@
 	// becomes out of date, then this function will throw a warning if someone
 	// tries to stringify that enum value.
 	static const std::map<VkStructureType, const char *> strings = {
-#define INSERT_ELEMENT(p) std::make_pair(p, #p)
+#	define INSERT_ELEMENT(p) std::make_pair(p, #    p)
 		INSERT_ELEMENT(VK_STRUCTURE_TYPE_APPLICATION_INFO),
 		INSERT_ELEMENT(VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO),
 		INSERT_ELEMENT(VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO),
@@ -454,7 +454,7 @@
 		INSERT_ELEMENT(VK_STRUCTURE_TYPE_END_RANGE),
 		INSERT_ELEMENT(VK_STRUCTURE_TYPE_RANGE_SIZE),
 		INSERT_ELEMENT(VK_STRUCTURE_TYPE_MAX_ENUM)
-#undef INSERT_ELEMENT
+#	undef INSERT_ELEMENT
 	};
 	auto it = strings.find(value);
 	if(it != strings.end())
@@ -466,9 +466,9 @@
 		WARN("Stringify(VkStructureType v) is out of date. Please update it to match vulkan/vulkan_core.h");
 		return std::to_string(value);
 	}
-#else // if not debug:
+#else  // if not debug:
 	return std::to_string(value);
 #endif
 }
 
-}
+}  // namespace vk
diff --git a/src/Vulkan/libVulkan.cpp b/src/Vulkan/libVulkan.cpp
index ae9a9b8..40d3412 100644
--- a/src/Vulkan/libVulkan.cpp
+++ b/src/Vulkan/libVulkan.cpp
@@ -37,33 +37,33 @@
 #include "VkPipelineLayout.hpp"
 #include "VkQueryPool.hpp"
 #include "VkQueue.hpp"
+#include "VkRenderPass.hpp"
 #include "VkSampler.hpp"
 #include "VkSemaphore.hpp"
 #include "VkShaderModule.hpp"
 #include "VkStringify.hpp"
-#include "VkRenderPass.hpp"
 
 #if defined(VK_USE_PLATFORM_METAL_EXT) || defined(VK_USE_PLATFORM_MACOS_MVK)
-#include "WSI/MetalSurface.h"
+#	include "WSI/MetalSurface.h"
 #endif
 
 #ifdef VK_USE_PLATFORM_XCB_KHR
-#include "WSI/XcbSurfaceKHR.hpp"
+#	include "WSI/XcbSurfaceKHR.hpp"
 #endif
 
 #ifdef VK_USE_PLATFORM_XLIB_KHR
-#include "WSI/XlibSurfaceKHR.hpp"
+#	include "WSI/XlibSurfaceKHR.hpp"
 #endif
 
 #ifdef VK_USE_PLATFORM_WIN32_KHR
-#include "WSI/Win32SurfaceKHR.hpp"
+#	include "WSI/Win32SurfaceKHR.hpp"
 #endif
 
 #ifdef __ANDROID__
-#include <android/log.h>
-#include "System/GrallocAndroid.hpp"
-#include <sync/sync.h>
-#include "commit.h"
+#	include "commit.h"
+#	include "System/GrallocAndroid.hpp"
+#	include <android/log.h>
+#	include <sync/sync.h>
 #endif
 
 #include "WSI/VkSwapchainKHR.hpp"
@@ -77,11 +77,10 @@
 
 #include <algorithm>
 #include <cstring>
-#include <string>
 #include <map>
+#include <string>
 
-namespace
-{
+namespace {
 
 // Enable commit_id.py and #include commit.h for other platforms.
 #if defined(__ANDROID__) && defined(ENABLE_BUILD_VERSION_OUTPUT)
@@ -92,8 +91,7 @@
 }
 #endif  // __ANDROID__ && ENABLE_BUILD_VERSION_OUTPUT
 
-
-bool HasExtensionProperty(const char* extensionName, const VkExtensionProperties* extensionProperties, uint32_t extensionPropertiesCount)
+bool HasExtensionProperty(const char *extensionName, const VkExtensionProperties *extensionProperties, uint32_t extensionPropertiesCount)
 {
 	for(uint32_t j = 0; j < extensionPropertiesCount; ++j)
 	{
@@ -111,14 +109,14 @@
 void setReactorDefaultConfig()
 {
 	auto cfg = rr::Config::Edit()
-		.set(rr::Optimization::Level::Default)
-		.clearOptimizationPasses()
-		.add(rr::Optimization::Pass::ScalarReplAggregates)
-		.add(rr::Optimization::Pass::SCCP)
-		.add(rr::Optimization::Pass::CFGSimplification)
-		.add(rr::Optimization::Pass::EarlyCSEPass)
-		.add(rr::Optimization::Pass::CFGSimplification)
-		.add(rr::Optimization::Pass::InstructionCombining);
+	               .set(rr::Optimization::Level::Default)
+	               .clearOptimizationPasses()
+	               .add(rr::Optimization::Pass::ScalarReplAggregates)
+	               .add(rr::Optimization::Pass::SCCP)
+	               .add(rr::Optimization::Pass::CFGSimplification)
+	               .add(rr::Optimization::Pass::EarlyCSEPass)
+	               .add(rr::Optimization::Pass::CFGSimplification)
+	               .add(rr::Optimization::Pass::InstructionCombining);
 
 	rr::Nucleus::adjustDefaultConfig(cfg);
 }
@@ -166,25 +164,23 @@
 	(void)doOnce;
 }
 
-}
+}  // namespace
 
-extern "C"
-{
-VK_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName)
+extern "C" {
+VK_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
 {
 	TRACE("(VkInstance instance = %p, const char* pName = %p)", instance, pName);
 
 	return vk::GetInstanceProcAddr(vk::Cast(instance), pName);
 }
 
-VK_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion)
+VK_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
 {
 	*pSupportedVersion = 3;
 	return VK_SUCCESS;
 }
 
-static const VkExtensionProperties instanceExtensionProperties[] =
-{
+static const VkExtensionProperties instanceExtensionProperties[] = {
 	{ VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME, VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION },
 	{ VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME, VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION },
 	{ VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION },
@@ -200,25 +196,24 @@
 	{ VK_KHR_XLIB_SURFACE_EXTENSION_NAME, VK_KHR_XLIB_SURFACE_SPEC_VERSION },
 #endif
 #ifdef VK_USE_PLATFORM_MACOS_MVK
-    { VK_MVK_MACOS_SURFACE_EXTENSION_NAME, VK_MVK_MACOS_SURFACE_SPEC_VERSION },
+	{ VK_MVK_MACOS_SURFACE_EXTENSION_NAME, VK_MVK_MACOS_SURFACE_SPEC_VERSION },
 #endif
 #ifdef VK_USE_PLATFORM_METAL_EXT
-    { VK_EXT_METAL_SURFACE_EXTENSION_NAME, VK_EXT_METAL_SURFACE_SPEC_VERSION },
+	{ VK_EXT_METAL_SURFACE_EXTENSION_NAME, VK_EXT_METAL_SURFACE_SPEC_VERSION },
 #endif
 #ifdef VK_USE_PLATFORM_WIN32_KHR
 	{ VK_KHR_WIN32_SURFACE_EXTENSION_NAME, VK_KHR_WIN32_SURFACE_SPEC_VERSION },
 #endif
 };
 
-static const VkExtensionProperties deviceExtensionProperties[] =
-{
+static const VkExtensionProperties deviceExtensionProperties[] = {
 	{ VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME, VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION },
 	// Vulkan 1.1 promoted extensions
 	{ VK_KHR_16BIT_STORAGE_EXTENSION_NAME, VK_KHR_16BIT_STORAGE_SPEC_VERSION },
 	{ VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, VK_KHR_BIND_MEMORY_2_SPEC_VERSION },
 	{ VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION },
 	{ VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION },
-	{ VK_KHR_DEVICE_GROUP_EXTENSION_NAME,  VK_KHR_DEVICE_GROUP_SPEC_VERSION },
+	{ VK_KHR_DEVICE_GROUP_EXTENSION_NAME, VK_KHR_DEVICE_GROUP_SPEC_VERSION },
 	{ VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME, VK_KHR_EXTERNAL_FENCE_SPEC_VERSION },
 	{ VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION },
 	{ VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME, VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION },
@@ -260,10 +255,10 @@
 	{ VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME, VK_EXT_PROVOKING_VERTEX_SPEC_VERSION },
 };
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance)
 {
 	TRACE("(const VkInstanceCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkInstance* pInstance = %p)",
-			pCreateInfo, pAllocator, pInstance);
+	      pCreateInfo, pAllocator, pInstance);
 
 	initializeLibrary();
 
@@ -283,19 +278,19 @@
 
 	if(pCreateInfo->pNext)
 	{
-		const VkBaseInStructure* createInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+		const VkBaseInStructure *createInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 		switch(createInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO:
-			// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
-			// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
-			//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
-			//  internal use by the loader, and do not have corresponding
-			//  Vulkan structures in this Specification."
-			break;
-		default:
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(createInfo->sType).c_str());
-			break;
+			case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO:
+				// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
+				// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
+				//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
+				//  internal use by the loader, and do not have corresponding
+				//  Vulkan structures in this Specification."
+				break;
+			default:
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(createInfo->sType).c_str());
+				break;
 		}
 	}
 
@@ -318,41 +313,41 @@
 	return result;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkInstance instance = %p, const VkAllocationCallbacks* pAllocator = %p)", instance, pAllocator);
 
 	vk::destroy(instance, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices)
 {
 	TRACE("(VkInstance instance = %p, uint32_t* pPhysicalDeviceCount = %p, VkPhysicalDevice* pPhysicalDevices = %p)",
-		    instance, pPhysicalDeviceCount, pPhysicalDevices);
+	      instance, pPhysicalDeviceCount, pPhysicalDevices);
 
 	return vk::Cast(instance)->getPhysicalDevices(pPhysicalDeviceCount, pPhysicalDevices);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures *pFeatures)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkPhysicalDeviceFeatures* pFeatures = %p)",
-			physicalDevice, pFeatures);
+	      physicalDevice, pFeatures);
 
 	*pFeatures = vk::Cast(physicalDevice)->getFeatures();
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties *pFormatProperties)
 {
 	TRACE("GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice = %p, VkFormat format = %d, VkFormatProperties* pFormatProperties = %p)",
-			physicalDevice, (int)format, pFormatProperties);
+	      physicalDevice, (int)format, pFormatProperties);
 
 	vk::Cast(physicalDevice)->getFormatProperties(format, pFormatProperties);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties *pImageFormatProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkFormat format = %d, VkImageType type = %d, VkImageTiling tiling = %d, VkImageUsageFlags usage = %d, VkImageCreateFlags flags = %d, VkImageFormatProperties* pImageFormatProperties = %p)",
-			physicalDevice, (int)format, (int)type, (int)tiling, usage, flags, pImageFormatProperties);
+	      physicalDevice, (int)format, (int)type, (int)tiling, usage, flags, pImageFormatProperties);
 
 	// "If the combination of parameters to vkGetPhysicalDeviceImageFormatProperties is not supported by the implementation
 	//  for use in vkCreateImage, then all members of VkImageFormatProperties will be filled with zero."
@@ -364,17 +359,17 @@
 	VkFormatFeatureFlags features;
 	switch(tiling)
 	{
-	case VK_IMAGE_TILING_LINEAR:
-		features = properties.linearTilingFeatures;
-		break;
+		case VK_IMAGE_TILING_LINEAR:
+			features = properties.linearTilingFeatures;
+			break;
 
-	case VK_IMAGE_TILING_OPTIMAL:
-		features = properties.optimalTilingFeatures;
-		break;
+		case VK_IMAGE_TILING_OPTIMAL:
+			features = properties.optimalTilingFeatures;
+			break;
 
-	default:
-		UNIMPLEMENTED("tiling");
-		features = 0;
+		default:
+			UNIMPLEMENTED("tiling");
+			features = 0;
 	}
 
 	if(features == 0)
@@ -419,13 +414,13 @@
 	}
 
 	auto allRecognizedUsageBits = VK_IMAGE_USAGE_SAMPLED_BIT |
-			VK_IMAGE_USAGE_STORAGE_BIT |
-			VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
-			VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
-			VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
-			VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
-			VK_IMAGE_USAGE_TRANSFER_DST_BIT |
-			VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
+	                              VK_IMAGE_USAGE_STORAGE_BIT |
+	                              VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+	                              VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
+	                              VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
+	                              VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+	                              VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+	                              VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
 	ASSERT(!(usage & ~(allRecognizedUsageBits)));
 
 	// "Images created with tiling equal to VK_IMAGE_TILING_LINEAR have further restrictions on their limits and capabilities
@@ -458,15 +453,15 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkPhysicalDeviceProperties* pProperties = %p)",
-		    physicalDevice, pProperties);
+	      physicalDevice, pProperties);
 
 	*pProperties = vk::Cast(physicalDevice)->getProperties();
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties *pQueueFamilyProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t* pQueueFamilyPropertyCount = %p, VkQueueFamilyProperties* pQueueFamilyProperties = %p))", physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
 
@@ -480,36 +475,36 @@
 	}
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkPhysicalDeviceMemoryProperties* pMemoryProperties = %p)", physicalDevice, pMemoryProperties);
 
 	*pMemoryProperties = vk::Cast(physicalDevice)->getMemoryProperties();
 }
 
-VK_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* pName)
+VK_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *pName)
 {
 	TRACE("(VkInstance instance = %p, const char* pName = %p)", instance, pName);
 
 	return vk::GetInstanceProcAddr(vk::Cast(instance), pName);
 }
 
-VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* pName)
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char *pName)
 {
 	TRACE("(VkDevice device = %p, const char* pName = %p)", device, pName);
 
 	return vk::GetDeviceProcAddr(vk::Cast(device), pName);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, const VkDeviceCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkDevice* pDevice = %p)",
-		physicalDevice, pCreateInfo, pAllocator, pDevice);
+	      physicalDevice, pCreateInfo, pAllocator, pDevice);
 
 	if(pCreateInfo->enabledLayerCount)
 	{
 		// "The ppEnabledLayerNames and enabledLayerCount members of VkDeviceCreateInfo are deprecated and their values must be ignored by implementations."
-		UNIMPLEMENTED("pCreateInfo->enabledLayerCount");   // TODO(b/119321052): UNIMPLEMENTED() should be used only for features that must still be implemented. Use a more informational macro here.
+		UNIMPLEMENTED("pCreateInfo->enabledLayerCount");  // TODO(b/119321052): UNIMPLEMENTED() should be used only for features that must still be implemented. Use a more informational macro here.
 	}
 
 	uint32_t extensionPropertiesCount = sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0]);
@@ -521,7 +516,7 @@
 		}
 	}
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 
 	const VkPhysicalDeviceFeatures *enabledFeatures = pCreateInfo->pEnabledFeatures;
 
@@ -532,25 +527,25 @@
 		// are not enumerated in the official Vulkan header
 		switch((long)(extensionCreateInfo->sType))
 		{
-		case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO:
-			// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
-			// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
-			//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
-			//  internal use by the loader, and do not have corresponding
-			//  Vulkan structures in this Specification."
-			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+			case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO:
+				// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
+				// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
+				//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
+				//  internal use by the loader, and do not have corresponding
+				//  Vulkan structures in this Specification."
+				break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
 			{
-				ASSERT(!pCreateInfo->pEnabledFeatures);   // "If the pNext chain includes a VkPhysicalDeviceFeatures2 structure, then pEnabledFeatures must be NULL"
+				ASSERT(!pCreateInfo->pEnabledFeatures);  // "If the pNext chain includes a VkPhysicalDeviceFeatures2 structure, then pEnabledFeatures must be NULL"
 
-				const VkPhysicalDeviceFeatures2* physicalDeviceFeatures2 = reinterpret_cast<const VkPhysicalDeviceFeatures2*>(extensionCreateInfo);
+				const VkPhysicalDeviceFeatures2 *physicalDeviceFeatures2 = reinterpret_cast<const VkPhysicalDeviceFeatures2 *>(extensionCreateInfo);
 
 				enabledFeatures = &physicalDeviceFeatures2->features;
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
 			{
-				const VkPhysicalDeviceSamplerYcbcrConversionFeatures* samplerYcbcrConversionFeatures = reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(extensionCreateInfo);
+				const VkPhysicalDeviceSamplerYcbcrConversionFeatures *samplerYcbcrConversionFeatures = reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures *>(extensionCreateInfo);
 
 				// YCbCr conversion is supported.
 				// samplerYcbcrConversionFeatures->samplerYcbcrConversion can be VK_TRUE or VK_FALSE.
@@ -559,9 +554,9 @@
 				(void)samplerYcbcrConversionFeatures->samplerYcbcrConversion;
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
 			{
-				const VkPhysicalDevice16BitStorageFeatures* storage16BitFeatures = reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures*>(extensionCreateInfo);
+				const VkPhysicalDevice16BitStorageFeatures *storage16BitFeatures = reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures *>(extensionCreateInfo);
 
 				if(storage16BitFeatures->storageBuffer16BitAccess == VK_TRUE ||
 				   storage16BitFeatures->uniformAndStorageBuffer16BitAccess == VK_TRUE ||
@@ -572,9 +567,9 @@
 				}
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
 			{
-				const VkPhysicalDeviceVariablePointerFeatures* variablePointerFeatures = reinterpret_cast<const VkPhysicalDeviceVariablePointerFeatures*>(extensionCreateInfo);
+				const VkPhysicalDeviceVariablePointerFeatures *variablePointerFeatures = reinterpret_cast<const VkPhysicalDeviceVariablePointerFeatures *>(extensionCreateInfo);
 
 				if(variablePointerFeatures->variablePointersStorageBuffer == VK_TRUE ||
 				   variablePointerFeatures->variablePointers == VK_TRUE)
@@ -583,9 +578,9 @@
 				}
 			}
 			break;
-		case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
+			case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
 			{
-				const VkDeviceGroupDeviceCreateInfo* groupDeviceCreateInfo = reinterpret_cast<const VkDeviceGroupDeviceCreateInfo*>(extensionCreateInfo);
+				const VkDeviceGroupDeviceCreateInfo *groupDeviceCreateInfo = reinterpret_cast<const VkDeviceGroupDeviceCreateInfo *>(extensionCreateInfo);
 
 				if((groupDeviceCreateInfo->physicalDeviceCount != 1) ||
 				   (groupDeviceCreateInfo->pPhysicalDevices[0] != physicalDevice))
@@ -594,20 +589,20 @@
 				}
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
 			{
-				const VkPhysicalDeviceMultiviewFeatures* multiviewFeatures = reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(extensionCreateInfo);
+				const VkPhysicalDeviceMultiviewFeatures *multiviewFeatures = reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures *>(extensionCreateInfo);
 
 				if(multiviewFeatures->multiviewGeometryShader ||
-				    multiviewFeatures->multiviewTessellationShader)
+				   multiviewFeatures->multiviewTessellationShader)
 				{
 					return VK_ERROR_FEATURE_NOT_PRESENT;
 				}
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
 			{
-				const VkPhysicalDeviceShaderDrawParametersFeatures* shaderDrawParametersFeatures = reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures*>(extensionCreateInfo);
+				const VkPhysicalDeviceShaderDrawParametersFeatures *shaderDrawParametersFeatures = reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures *>(extensionCreateInfo);
 
 				if(shaderDrawParametersFeatures->shaderDrawParameters)
 				{
@@ -615,9 +610,9 @@
 				}
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
 			{
-				const VkPhysicalDeviceLineRasterizationFeaturesEXT* lineRasterizationFeatures = reinterpret_cast<const VkPhysicalDeviceLineRasterizationFeaturesEXT*>(extensionCreateInfo);
+				const VkPhysicalDeviceLineRasterizationFeaturesEXT *lineRasterizationFeatures = reinterpret_cast<const VkPhysicalDeviceLineRasterizationFeaturesEXT *>(extensionCreateInfo);
 				if((lineRasterizationFeatures->smoothLines == VK_TRUE) ||
 				   (lineRasterizationFeatures->stippledBresenhamLines == VK_TRUE) ||
 				   (lineRasterizationFeatures->stippledRectangularLines == VK_TRUE) ||
@@ -627,9 +622,9 @@
 				}
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
 			{
-				const VkPhysicalDeviceProvokingVertexFeaturesEXT* provokingVertexFeatures = reinterpret_cast<const VkPhysicalDeviceProvokingVertexFeaturesEXT*>(extensionCreateInfo);
+				const VkPhysicalDeviceProvokingVertexFeaturesEXT *provokingVertexFeatures = reinterpret_cast<const VkPhysicalDeviceProvokingVertexFeaturesEXT *>(extensionCreateInfo);
 
 				// Provoking vertex is supported.
 				// provokingVertexFeatures->provokingVertexLast can be VK_TRUE or VK_FALSE.
@@ -638,10 +633,10 @@
 				(void)provokingVertexFeatures->provokingVertexLast;
 			}
 			break;
-		default:
-			// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-			break;
+			default:
+				// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -661,13 +656,13 @@
 
 	for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
 	{
-		const VkDeviceQueueCreateInfo& queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
+		const VkDeviceQueueCreateInfo &queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
 		if(queueCreateInfo.flags)
 		{
 			UNIMPLEMENTED("queueCreateInfo.flags");
 		}
 
-		auto extInfo = reinterpret_cast<VkBaseInStructure const*>(queueCreateInfo.pNext);
+		auto extInfo = reinterpret_cast<VkBaseInStructure const *>(queueCreateInfo.pNext);
 		while(extInfo)
 		{
 			WARN("pCreateInfo->pQueueCreateInfos[%d].pNext sType = %s", i, vk::Stringify(extInfo->sType).c_str());
@@ -675,21 +670,21 @@
 		}
 
 		ASSERT(queueCreateInfo.queueFamilyIndex < queueFamilyPropertyCount);
-		(void)queueFamilyPropertyCount; // Silence unused variable warning
+		(void)queueFamilyPropertyCount;  // Silence unused variable warning
 	}
 
 	auto scheduler = getOrCreateScheduler();
 	return vk::DispatchableDevice::Create(pAllocator, pCreateInfo, pDevice, vk::Cast(physicalDevice), enabledFeatures, scheduler);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, const VkAllocationCallbacks* pAllocator = %p)", device, pAllocator);
 
 	vk::destroy(device, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties)
 {
 	TRACE("(const char* pLayerName = %p, uint32_t* pPropertyCount = %p, VkExtensionProperties* pProperties = %p)",
 	      pLayerName, pPropertyCount, pProperties);
@@ -712,7 +707,7 @@
 	return (toCopy < extensionPropertiesCount) ? VK_INCOMPLETE : VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, const char* pLayerName, uint32_t* pPropertyCount = %p, VkExtensionProperties* pProperties = %p)", physicalDevice, pPropertyCount, pProperties);
 
@@ -734,7 +729,7 @@
 	return (toCopy < extensionPropertiesCount) ? VK_INCOMPLETE : VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t* pPropertyCount, VkLayerProperties* pProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pPropertyCount, VkLayerProperties *pProperties)
 {
 	TRACE("(uint32_t* pPropertyCount = %p, VkLayerProperties* pProperties = %p)", pPropertyCount, pProperties);
 
@@ -747,7 +742,7 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkLayerProperties *pProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t* pPropertyCount = %p, VkLayerProperties* pProperties = %p)", physicalDevice, pPropertyCount, pProperties);
 
@@ -760,18 +755,18 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue)
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue)
 {
 	TRACE("(VkDevice device = %p, uint32_t queueFamilyIndex = %d, uint32_t queueIndex = %d, VkQueue* pQueue = %p)",
-		    device, queueFamilyIndex, queueIndex, pQueue);
+	      device, queueFamilyIndex, queueIndex, pQueue);
 
 	*pQueue = vk::Cast(device)->getQueue(queueFamilyIndex, queueIndex);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence)
 {
 	TRACE("(VkQueue queue = %p, uint32_t submitCount = %d, const VkSubmitInfo* pSubmits = %p, VkFence fence = %p)",
-	      queue, submitCount, pSubmits, static_cast<void*>(fence));
+	      queue, submitCount, pSubmits, static_cast<void *>(fence));
 
 	return vk::Cast(queue)->submit(submitCount, pSubmits, vk::Cast(fence));
 }
@@ -790,50 +785,50 @@
 	return vk::Cast(device)->waitIdle();
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory)
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory)
 {
 	TRACE("(VkDevice device = %p, const VkMemoryAllocateInfo* pAllocateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkDeviceMemory* pMemory = %p)",
-		    device, pAllocateInfo, pAllocator, pMemory);
+	      device, pAllocateInfo, pAllocator, pMemory);
 
-	const VkBaseInStructure* allocationInfo = reinterpret_cast<const VkBaseInStructure*>(pAllocateInfo->pNext);
+	const VkBaseInStructure *allocationInfo = reinterpret_cast<const VkBaseInStructure *>(pAllocateInfo->pNext);
 	while(allocationInfo)
 	{
 		switch(allocationInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
-			// This can safely be ignored, as the Vulkan spec mentions:
-			// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
-			//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
-			break;
-		case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
-			// This extension controls on which physical devices the memory gets allocated.
-			// SwiftShader only has a single physical device, so this extension does nothing in this case.
-			break;
+			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+				// This can safely be ignored, as the Vulkan spec mentions:
+				// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
+				//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
+				break;
+			case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
+				// This extension controls on which physical devices the memory gets allocated.
+				// SwiftShader only has a single physical device, so this extension does nothing in this case.
+				break;
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-		case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
-		{
-			auto* importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR *>(allocationInfo);
-			if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+			case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
 			{
-				UNSUPPORTED("importInfo->handleType %u", importInfo->handleType);
-				return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+				auto *importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR *>(allocationInfo);
+				if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+				{
+					UNSUPPORTED("importInfo->handleType %u", importInfo->handleType);
+					return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+				}
+				break;
 			}
-			break;
-		}
-		case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
-		{
-			auto* exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(allocationInfo);
-			if(exportInfo->handleTypes != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+			case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
 			{
-				UNSUPPORTED("exportInfo->handleTypes %u", exportInfo->handleTypes);
-				return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+				auto *exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(allocationInfo);
+				if(exportInfo->handleTypes != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+				{
+					UNSUPPORTED("exportInfo->handleTypes %u", exportInfo->handleTypes);
+					return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+				}
+				break;
 			}
-			break;
-		}
 #endif  // SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-		default:
-			WARN("pAllocateInfo->pNext sType = %s", vk::Stringify(allocationInfo->sType).c_str());
-			break;
+			default:
+				WARN("pAllocateInfo->pNext sType = %s", vk::Stringify(allocationInfo->sType).c_str());
+				break;
 		}
 
 		allocationInfo = allocationInfo->pNext;
@@ -856,19 +851,19 @@
 	return result;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkFreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkFreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(memory), pAllocator);
+	      device, static_cast<void *>(memory), pAllocator);
 
 	vk::destroy(memory, pAllocator);
 }
 
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR* getFdInfo, int* pFd)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR *getFdInfo, int *pFd)
 {
 	TRACE("(VkDevice device = %p, const VkMemoryGetFdInfoKHR* getFdInfo = %p, int* pFd = %p",
-		  device, getFdInfo, pFd);
+	      device, getFdInfo, pFd);
 
 	if(getFdInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
@@ -878,10 +873,10 @@
 	return vk::Cast(getFdInfo->memory)->exportFd(pFd);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR *pMemoryFdProperties)
 {
 	TRACE("(VkDevice device = %p, VkExternalMemoryHandleTypeFlagBits handleType = %x, int fd = %d, VkMemoryFdPropertiesKHR* pMemoryFdProperties = %p)",
-		  device, handleType, fd, pMemoryFdProperties);
+	      device, handleType, fd, pMemoryFdProperties);
 
 	if(handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
@@ -894,8 +889,8 @@
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 	}
 
-	const VkPhysicalDeviceMemoryProperties& memoryProperties =
-			vk::Cast(device)->getPhysicalDevice()->getMemoryProperties();
+	const VkPhysicalDeviceMemoryProperties &memoryProperties =
+	    vk::Cast(device)->getPhysicalDevice()->getMemoryProperties();
 
 	// All SwiftShader memory types support this!
 	pMemoryFdProperties->memoryTypeBits = (1U << memoryProperties.memoryTypeCount) - 1U;
@@ -904,50 +899,50 @@
 }
 #endif  // SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
 
-VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData)
+VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void **ppData)
 {
 	TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p, VkDeviceSize offset = %d, VkDeviceSize size = %d, VkMemoryMapFlags flags = %d, void** ppData = %p)",
-		    device, static_cast<void*>(memory), int(offset), int(size), flags, ppData);
+	      device, static_cast<void *>(memory), int(offset), int(size), flags, ppData);
 
 	return vk::Cast(memory)->map(offset, size, ppData);
 }
 
 VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory memory)
 {
-	TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p)", device, static_cast<void*>(memory));
+	TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p)", device, static_cast<void *>(memory));
 
 	// Noop, memory will be released when the DeviceMemory object is released
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
+VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges)
 {
 	TRACE("(VkDevice device = %p, uint32_t memoryRangeCount = %d, const VkMappedMemoryRange* pMemoryRanges = %p)",
-		    device, memoryRangeCount, pMemoryRanges);
+	      device, memoryRangeCount, pMemoryRanges);
 
 	// Noop, host and device memory are the same to SwiftShader
 
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
+VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges)
 {
 	TRACE("(VkDevice device = %p, uint32_t memoryRangeCount = %d, const VkMappedMemoryRange* pMemoryRanges = %p)",
-		    device, memoryRangeCount, pMemoryRanges);
+	      device, memoryRangeCount, pMemoryRanges);
 
 	// Noop, host and device memory are the same to SwiftShader
 
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(VkDevice pDevice, VkDeviceMemory pMemory, VkDeviceSize* pCommittedMemoryInBytes)
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(VkDevice pDevice, VkDeviceMemory pMemory, VkDeviceSize *pCommittedMemoryInBytes)
 {
 	TRACE("(VkDevice device = %p, VkDeviceMemory memory = %p, VkDeviceSize* pCommittedMemoryInBytes = %p)",
-	      pDevice, static_cast<void*>(pMemory), pCommittedMemoryInBytes);
+	      pDevice, static_cast<void *>(pMemory), pCommittedMemoryInBytes);
 
 	auto memory = vk::Cast(pMemory);
 
 #if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-	const auto& memoryProperties = vk::Cast(pDevice)->getPhysicalDevice()->getMemoryProperties();
+	const auto &memoryProperties = vk::Cast(pDevice)->getPhysicalDevice()->getMemoryProperties();
 	uint32_t typeIndex = memory->getMemoryTypeIndex();
 	ASSERT(typeIndex < memoryProperties.memoryTypeCount);
 	ASSERT(memoryProperties.memoryTypes[typeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT);
@@ -959,7 +954,7 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset)
 {
 	TRACE("(VkDevice device = %p, VkBuffer buffer = %p, VkDeviceMemory memory = %p, VkDeviceSize memoryOffset = %d)",
-		    device, static_cast<void*>(buffer), static_cast<void*>(memory), int(memoryOffset));
+	      device, static_cast<void *>(buffer), static_cast<void *>(memory), int(memoryOffset));
 
 	if(!vk::Cast(buffer)->canBindToMemory(vk::Cast(memory)))
 	{
@@ -973,7 +968,7 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset)
 {
 	TRACE("(VkDevice device = %p, VkImage image = %p, VkDeviceMemory memory = %p, VkDeviceSize memoryOffset = %d)",
-		    device, static_cast<void*>(image), static_cast<void*>(memory), int(memoryOffset));
+	      device, static_cast<void *>(image), static_cast<void *>(memory), int(memoryOffset));
 
 	if(!vk::Cast(image)->canBindToMemory(vk::Cast(memory)))
 	{
@@ -984,54 +979,54 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements)
 {
 	TRACE("(VkDevice device = %p, VkBuffer buffer = %p, VkMemoryRequirements* pMemoryRequirements = %p)",
-		    device, static_cast<void*>(buffer), pMemoryRequirements);
+	      device, static_cast<void *>(buffer), pMemoryRequirements);
 
 	*pMemoryRequirements = vk::Cast(buffer)->getMemoryRequirements();
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements)
 {
 	TRACE("(VkDevice device = %p, VkImage image = %p, VkMemoryRequirements* pMemoryRequirements = %p)",
-		    device, static_cast<void*>(image), pMemoryRequirements);
+	      device, static_cast<void *>(image), pMemoryRequirements);
 
 	*pMemoryRequirements = vk::Cast(image)->getMemoryRequirements();
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
 {
 	TRACE("(VkDevice device = %p, VkImage image = %p, uint32_t* pSparseMemoryRequirementCount = %p, VkSparseImageMemoryRequirements* pSparseMemoryRequirements = %p)",
-	        device, static_cast<void*>(image), pSparseMemoryRequirementCount, pSparseMemoryRequirements);
+	      device, static_cast<void *>(image), pSparseMemoryRequirementCount, pSparseMemoryRequirements);
 
 	// The 'sparseBinding' feature is not supported, so images can not be created with the VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT flag.
 	// "If the image was not created with VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then pSparseMemoryRequirementCount will be set to zero and pSparseMemoryRequirements will not be written to."
 	*pSparseMemoryRequirementCount = 0;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t *pPropertyCount, VkSparseImageFormatProperties *pProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkFormat format = %d, VkImageType type = %d, VkSampleCountFlagBits samples = %d, VkImageUsageFlags usage = %d, VkImageTiling tiling = %d, uint32_t* pPropertyCount = %p, VkSparseImageFormatProperties* pProperties = %p)",
-			physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties);
+	      physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties);
 
 	// We do not support sparse images.
 	*pPropertyCount = 0;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence)
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence)
 {
 	TRACE("()");
 	UNIMPLEMENTED("vkQueueBindSparse");
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence)
 {
 	TRACE("(VkDevice device = %p, const VkFenceCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkFence* pFence = %p)",
-		    device, pCreateInfo, pAllocator, pFence);
+	      device, pCreateInfo, pAllocator, pFence);
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	while(nextInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
@@ -1041,15 +1036,15 @@
 	return vk::Fence::Create(pAllocator, pCreateInfo, pFence);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkFence fence = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(fence), pAllocator);
+	      device, static_cast<void *>(fence), pAllocator);
 
 	vk::destroy(fence, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences)
+VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)
 {
 	TRACE("(VkDevice device = %p, uint32_t fenceCount = %d, const VkFence* pFences = %p)",
 	      device, fenceCount, pFences);
@@ -1064,20 +1059,20 @@
 
 VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence)
 {
-	TRACE("(VkDevice device = %p, VkFence fence = %p)", device, static_cast<void*>(fence));
+	TRACE("(VkDevice device = %p, VkFence fence = %p)", device, static_cast<void *>(fence));
 
 	return vk::Cast(fence)->getStatus();
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
+VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout)
 {
 	TRACE("(VkDevice device = %p, uint32_t fenceCount = %d, const VkFence* pFences = %p, VkBool32 waitAll = %d, uint64_t timeout = %d)",
-		device, int(fenceCount), pFences, int(waitAll), int(timeout));
+	      device, int(fenceCount), pFences, int(waitAll), int(timeout));
 
 	return vk::Cast(device)->waitForFences(fenceCount, pFences, waitAll, timeout);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore)
 {
 	TRACE("(VkDevice device = %p, const VkSemaphoreCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkSemaphore* pSemaphore = %p)",
 	      device, pCreateInfo, pAllocator, pSemaphore);
@@ -1090,19 +1085,19 @@
 	return vk::Semaphore::Create(pAllocator, pCreateInfo, pSemaphore);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkSemaphore semaphore = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(semaphore), pAllocator);
+	      device, static_cast<void *>(semaphore), pAllocator);
 
 	vk::destroy(semaphore, pAllocator);
 }
 
 #if SWIFTSHADER_EXTERNAL_SEMAPHORE_OPAQUE_FD
-VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd)
 {
 	TRACE("(VkDevice device = %p, const VkSemaphoreGetFdInfoKHR* pGetFdInfo = %p, int* pFd = %p)",
-	      device, static_cast<const void*>(pGetFdInfo), static_cast<void*>(pFd));
+	      device, static_cast<const void *>(pGetFdInfo), static_cast<void *>(pFd));
 
 	if(pGetFdInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
@@ -1112,10 +1107,10 @@
 	return vk::Cast(pGetFdInfo->semaphore)->exportFd(pFd);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreInfo)
+VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreInfo)
 {
 	TRACE("(VkDevice device = %p, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreInfo = %p",
-	      device, static_cast<const void*>(pImportSemaphoreInfo));
+	      device, static_cast<const void *>(pImportSemaphoreInfo));
 
 	if(pImportSemaphoreInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
@@ -1129,8 +1124,8 @@
 
 #if VK_USE_PLATFORM_FUCHSIA
 VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreZirconHandleFUCHSIA(
-	VkDevice                                        device,
-	const VkImportSemaphoreZirconHandleInfoFUCHSIA* pImportSemaphoreZirconHandleInfo)
+    VkDevice device,
+    const VkImportSemaphoreZirconHandleInfoFUCHSIA *pImportSemaphoreZirconHandleInfo)
 {
 	TRACE("(VkDevice device = %p, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pImportSemaphoreZirconHandleInfo = %p)",
 	      device, pImportSemaphoreZirconHandleInfo);
@@ -1141,18 +1136,16 @@
 	}
 	bool temporaryImport = (pImportSemaphoreZirconHandleInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) != 0;
 
-	return vk::Cast(pImportSemaphoreZirconHandleInfo->semaphore)->importHandle(
-			pImportSemaphoreZirconHandleInfo->handle,
-			temporaryImport);
+	return vk::Cast(pImportSemaphoreZirconHandleInfo->semaphore)->importHandle(pImportSemaphoreZirconHandleInfo->handle, temporaryImport);
 }
 
 VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreZirconHandleFUCHSIA(
-	VkDevice                                     device,
-	const VkSemaphoreGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo,
-	zx_handle_t*                                 pZirconHandle)
+    VkDevice device,
+    const VkSemaphoreGetZirconHandleInfoFUCHSIA *pGetZirconHandleInfo,
+    zx_handle_t *pZirconHandle)
 {
 	TRACE("(VkDevice device = %p, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo = %p, zx_handle_t* pZirconHandle = %p)",
-	      device, static_cast<const void*>(pGetZirconHandleInfo), static_cast<void*>(pZirconHandle));
+	      device, static_cast<const void *>(pGetZirconHandleInfo), static_cast<void *>(pZirconHandle));
 
 	if(pGetZirconHandleInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA)
 	{
@@ -1163,7 +1156,7 @@
 }
 #endif  // VK_USE_PLATFORM_FUCHSIA
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent)
 {
 	TRACE("(VkDevice device = %p, const VkEventCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkEvent* pEvent = %p)",
 	      device, pCreateInfo, pAllocator, pEvent);
@@ -1173,7 +1166,7 @@
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pCreateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pCreateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -1183,24 +1176,24 @@
 	return vk::Event::Create(pAllocator, pCreateInfo, pEvent);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkEvent event = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(event), pAllocator);
+	      device, static_cast<void *>(event), pAllocator);
 
 	vk::destroy(event, pAllocator);
 }
 
 VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(VkDevice device, VkEvent event)
 {
-	TRACE("(VkDevice device = %p, VkEvent event = %p)", device, static_cast<void*>(event));
+	TRACE("(VkDevice device = %p, VkEvent event = %p)", device, static_cast<void *>(event));
 
 	return vk::Cast(event)->getStatus();
 }
 
 VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event)
 {
-	TRACE("(VkDevice device = %p, VkEvent event = %p)", device, static_cast<void*>(event));
+	TRACE("(VkDevice device = %p, VkEvent event = %p)", device, static_cast<void *>(event));
 
 	vk::Cast(event)->signal();
 
@@ -1209,14 +1202,14 @@
 
 VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(VkDevice device, VkEvent event)
 {
-	TRACE("(VkDevice device = %p, VkEvent event = %p)", device, static_cast<void*>(event));
+	TRACE("(VkDevice device = %p, VkEvent event = %p)", device, static_cast<void *>(event));
 
 	vk::Cast(event)->reset();
 
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool)
 {
 	TRACE("(VkDevice device = %p, const VkQueryPoolCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkQueryPool* pQueryPool = %p)",
 	      device, pCreateInfo, pAllocator, pQueryPool);
@@ -1226,7 +1219,7 @@
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pCreateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pCreateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -1236,38 +1229,38 @@
 	return vk::QueryPool::Create(pAllocator, pCreateInfo, pQueryPool);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkQueryPool queryPool = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(queryPool), pAllocator);
+	      device, static_cast<void *>(queryPool), pAllocator);
 
 	vk::destroy(queryPool, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags)
 {
 	TRACE("(VkDevice device = %p, VkQueryPool queryPool = %p, uint32_t firstQuery = %d, uint32_t queryCount = %d, size_t dataSize = %d, void* pData = %p, VkDeviceSize stride = %d, VkQueryResultFlags flags = %d)",
-	      device, static_cast<void*>(queryPool), int(firstQuery), int(queryCount), int(dataSize), pData, int(stride), flags);
+	      device, static_cast<void *>(queryPool), int(firstQuery), int(queryCount), int(dataSize), pData, int(stride), flags);
 
 	return vk::Cast(queryPool)->getResults(firstQuery, queryCount, dataSize, pData, stride, flags);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer)
 {
 	TRACE("(VkDevice device = %p, const VkBufferCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkBuffer* pBuffer = %p)",
-		    device, pCreateInfo, pAllocator, pBuffer);
+	      device, pCreateInfo, pAllocator, pBuffer);
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	while(nextInfo)
 	{
 		switch(nextInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
-			// Do nothing. Should be handled by vk::Buffer::Create().
-			break;
-		default:
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
-			break;
+			case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
+				// Do nothing. Should be handled by vk::Buffer::Create().
+				break;
+			default:
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
+				break;
 		}
 		nextInfo = nextInfo->pNext;
 	}
@@ -1275,25 +1268,25 @@
 	return vk::Buffer::Create(pAllocator, pCreateInfo, pBuffer);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkBuffer buffer = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(buffer), pAllocator);
+	      device, static_cast<void *>(buffer), pAllocator);
 
 	vk::destroy(buffer, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBufferView *pView)
 {
 	TRACE("(VkDevice device = %p, const VkBufferViewCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkBufferView* pView = %p)",
-	        device, pCreateInfo, pAllocator, pView);
+	      device, pCreateInfo, pAllocator, pView);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pCreateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pCreateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -1303,20 +1296,20 @@
 	return vk::BufferView::Create(pAllocator, pCreateInfo, pView);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkBufferView bufferView = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	        device, static_cast<void*>(bufferView), pAllocator);
+	      device, static_cast<void *>(bufferView), pAllocator);
 
 	vk::destroy(bufferView, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage)
 {
 	TRACE("(VkDevice device = %p, const VkImageCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkImage* pImage = %p)",
-		    device, pCreateInfo, pAllocator, pImage);
+	      device, pCreateInfo, pAllocator, pImage);
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 
 #ifdef __ANDROID__
 	vk::BackingMemory backmem;
@@ -1328,31 +1321,31 @@
 		switch((long)(extensionCreateInfo->sType))
 		{
 #ifdef __ANDROID__
-		case VK_STRUCTURE_TYPE_SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID:
-		{
-			const VkSwapchainImageCreateInfoANDROID* swapImageCreateInfo = reinterpret_cast<const VkSwapchainImageCreateInfoANDROID*>(extensionCreateInfo);
-			backmem.androidUsage = swapImageCreateInfo->usage;
-		}
-		break;
-		case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
-		{
-			const VkNativeBufferANDROID* nativeBufferInfo = reinterpret_cast<const VkNativeBufferANDROID*>(extensionCreateInfo);
-			backmem.nativeHandle = nativeBufferInfo->handle;
-			backmem.stride = nativeBufferInfo->stride;
-			swapchainImage = true;
-		}
-		break;
+			case VK_STRUCTURE_TYPE_SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID:
+			{
+				const VkSwapchainImageCreateInfoANDROID *swapImageCreateInfo = reinterpret_cast<const VkSwapchainImageCreateInfoANDROID *>(extensionCreateInfo);
+				backmem.androidUsage = swapImageCreateInfo->usage;
+			}
+			break;
+			case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
+			{
+				const VkNativeBufferANDROID *nativeBufferInfo = reinterpret_cast<const VkNativeBufferANDROID *>(extensionCreateInfo);
+				backmem.nativeHandle = nativeBufferInfo->handle;
+				backmem.stride = nativeBufferInfo->stride;
+				swapchainImage = true;
+			}
+			break;
 #endif
-		case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
-			// Do nothing. Should be handled by vk::Image::Create()
-			break;
-		case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
-			/* Do nothing. We don't actually need the swapchain handle yet; we'll do all the work in vkBindImageMemory2. */
-			break;
-		default:
-			// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-			break;
+			case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
+				// Do nothing. Should be handled by vk::Image::Create()
+				break;
+			case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
+				/* Do nothing. We don't actually need the swapchain handle yet; we'll do all the work in vkBindImageMemory2. */
+				break;
+			default:
+				// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -1368,7 +1361,7 @@
 			return result;
 		}
 
-		vk::Image* image = vk::Cast(*pImage);
+		vk::Image *image = vk::Cast(*pImage);
 		VkMemoryRequirements memRequirements = image->getMemoryRequirements();
 
 		VkMemoryAllocateInfo allocInfo = {};
@@ -1393,13 +1386,13 @@
 	return result;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkImage image = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(image), pAllocator);
+	      device, static_cast<void *>(image), pAllocator);
 
 #ifdef __ANDROID__
-	vk::Image* img = vk::Cast(image);
+	vk::Image *img = vk::Cast(image);
 	if(img && img->hasExternalMemory())
 	{
 		vk::destroy(img->getExternalMemory(), pAllocator);
@@ -1409,54 +1402,54 @@
 	vk::destroy(image, pAllocator);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout)
+VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout)
 {
 	TRACE("(VkDevice device = %p, VkImage image = %p, const VkImageSubresource* pSubresource = %p, VkSubresourceLayout* pLayout = %p)",
-	        device, static_cast<void*>(image), pSubresource, pLayout);
+	      device, static_cast<void *>(image), pSubresource, pLayout);
 
 	vk::Cast(image)->getSubresourceLayout(pSubresource, pLayout);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImageView *pView)
 {
 	TRACE("(VkDevice device = %p, const VkImageViewCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkImageView* pView = %p)",
-		    device, pCreateInfo, pAllocator, pView);
+	      device, pCreateInfo, pAllocator, pView);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	const vk::SamplerYcbcrConversion *ycbcrConversion = nullptr;
 
 	while(extensionCreateInfo)
 	{
 		switch(extensionCreateInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR:
-		{
-			const VkImageViewUsageCreateInfo* multiviewCreateInfo = reinterpret_cast<const VkImageViewUsageCreateInfo*>(extensionCreateInfo);
-			ASSERT(!(~vk::Cast(pCreateInfo->image)->getUsage() & multiviewCreateInfo->usage));
-		}
-		break;
-		case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
-		{
-			const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo = reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(extensionCreateInfo);
-			ycbcrConversion = vk::Cast(samplerYcbcrConversionInfo->conversion);
-
-			if(ycbcrConversion)
+			case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR:
 			{
-				ASSERT((pCreateInfo->components.r == VK_COMPONENT_SWIZZLE_IDENTITY) &&
-				       (pCreateInfo->components.g == VK_COMPONENT_SWIZZLE_IDENTITY) &&
-				       (pCreateInfo->components.b == VK_COMPONENT_SWIZZLE_IDENTITY) &&
-				       (pCreateInfo->components.a == VK_COMPONENT_SWIZZLE_IDENTITY));
+				const VkImageViewUsageCreateInfo *multiviewCreateInfo = reinterpret_cast<const VkImageViewUsageCreateInfo *>(extensionCreateInfo);
+				ASSERT(!(~vk::Cast(pCreateInfo->image)->getUsage() & multiviewCreateInfo->usage));
 			}
-		}
-		break;
-		default:
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
 			break;
+			case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+			{
+				const VkSamplerYcbcrConversionInfo *samplerYcbcrConversionInfo = reinterpret_cast<const VkSamplerYcbcrConversionInfo *>(extensionCreateInfo);
+				ycbcrConversion = vk::Cast(samplerYcbcrConversionInfo->conversion);
+
+				if(ycbcrConversion)
+				{
+					ASSERT((pCreateInfo->components.r == VK_COMPONENT_SWIZZLE_IDENTITY) &&
+					       (pCreateInfo->components.g == VK_COMPONENT_SWIZZLE_IDENTITY) &&
+					       (pCreateInfo->components.b == VK_COMPONENT_SWIZZLE_IDENTITY) &&
+					       (pCreateInfo->components.a == VK_COMPONENT_SWIZZLE_IDENTITY));
+				}
+			}
+			break;
+			default:
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -1465,25 +1458,25 @@
 	return vk::ImageView::Create(pAllocator, pCreateInfo, pView, ycbcrConversion);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkImageView imageView = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(imageView), pAllocator);
+	      device, static_cast<void *>(imageView), pAllocator);
 
 	vk::destroy(imageView, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule)
 {
 	TRACE("(VkDevice device = %p, const VkShaderModuleCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkShaderModule* pShaderModule = %p)",
-		    device, pCreateInfo, pAllocator, pShaderModule);
+	      device, pCreateInfo, pAllocator, pShaderModule);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	while(nextInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
@@ -1493,25 +1486,25 @@
 	return vk::ShaderModule::Create(pAllocator, pCreateInfo, pShaderModule);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkShaderModule shaderModule = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(shaderModule), pAllocator);
+	      device, static_cast<void *>(shaderModule), pAllocator);
 
 	vk::destroy(shaderModule, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache)
 {
 	TRACE("(VkDevice device = %p, const VkPipelineCacheCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkPipelineCache* pPipelineCache = %p)",
-	        device, pCreateInfo, pAllocator, pPipelineCache);
+	      device, pCreateInfo, pAllocator, pPipelineCache);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pCreateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pCreateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -1521,34 +1514,34 @@
 	return vk::PipelineCache::Create(pAllocator, pCreateInfo, pPipelineCache);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	        device, static_cast<void*>(pipelineCache), pAllocator);
+	      device, static_cast<void *>(pipelineCache), pAllocator);
 
 	vk::destroy(pipelineCache, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData)
 {
 	TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, size_t* pDataSize = %p, void* pData = %p)",
-	        device, static_cast<void*>(pipelineCache), pDataSize, pData);
+	      device, static_cast<void *>(pipelineCache), pDataSize, pData);
 
 	return vk::Cast(pipelineCache)->getData(pDataSize, pData);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
+VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches)
 {
 	TRACE("(VkDevice device = %p, VkPipelineCache dstCache = %p, uint32_t srcCacheCount = %d, const VkPipelineCache* pSrcCaches = %p)",
-	        device, static_cast<void*>(dstCache), int(srcCacheCount), pSrcCaches);
+	      device, static_cast<void *>(dstCache), int(srcCacheCount), pSrcCaches);
 
 	return vk::Cast(dstCache)->merge(srcCacheCount, pSrcCaches);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines)
 {
 	TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, uint32_t createInfoCount = %d, const VkGraphicsPipelineCreateInfo* pCreateInfos = %p, const VkAllocationCallbacks* pAllocator = %p, VkPipeline* pPipelines = %p)",
-		    device, static_cast<void*>(pipelineCache), int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
+	      device, static_cast<void *>(pipelineCache), int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
 
 	VkResult errorResult = VK_SUCCESS;
 	for(uint32_t i = 0; i < createInfoCount; i++)
@@ -1557,7 +1550,7 @@
 
 		if(result == VK_SUCCESS)
 		{
-			static_cast<vk::GraphicsPipeline*>(vk::Cast(pPipelines[i]))->compileShaders(pAllocator, &pCreateInfos[i], vk::Cast(pipelineCache));
+			static_cast<vk::GraphicsPipeline *>(vk::Cast(pPipelines[i]))->compileShaders(pAllocator, &pCreateInfos[i], vk::Cast(pipelineCache));
 		}
 		else
 		{
@@ -1577,10 +1570,10 @@
 	return errorResult;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines)
 {
 	TRACE("(VkDevice device = %p, VkPipelineCache pipelineCache = %p, uint32_t createInfoCount = %d, const VkComputePipelineCreateInfo* pCreateInfos = %p, const VkAllocationCallbacks* pAllocator = %p, VkPipeline* pPipelines = %p)",
-		device, static_cast<void*>(pipelineCache), int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
+	      device, static_cast<void *>(pipelineCache), int(createInfoCount), pCreateInfos, pAllocator, pPipelines);
 
 	VkResult errorResult = VK_SUCCESS;
 	for(uint32_t i = 0; i < createInfoCount; i++)
@@ -1589,7 +1582,7 @@
 
 		if(result == VK_SUCCESS)
 		{
-			static_cast<vk::ComputePipeline*>(vk::Cast(pPipelines[i]))->compileShaders(pAllocator, &pCreateInfos[i], vk::Cast(pipelineCache));
+			static_cast<vk::ComputePipeline *>(vk::Cast(pPipelines[i]))->compileShaders(pAllocator, &pCreateInfos[i], vk::Cast(pipelineCache));
 		}
 		else
 		{
@@ -1609,25 +1602,25 @@
 	return errorResult;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkPipeline pipeline = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(pipeline), pAllocator);
+	      device, static_cast<void *>(pipeline), pAllocator);
 
 	vk::destroy(pipeline, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout)
 {
 	TRACE("(VkDevice device = %p, const VkPipelineLayoutCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkPipelineLayout* pPipelineLayout = %p)",
-		    device, pCreateInfo, pAllocator, pPipelineLayout);
+	      device, pCreateInfo, pAllocator, pPipelineLayout);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	while(nextInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
@@ -1637,40 +1630,40 @@
 	return vk::PipelineLayout::Create(pAllocator, pCreateInfo, pPipelineLayout);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkPipelineLayout pipelineLayout = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(pipelineLayout), pAllocator);
+	      device, static_cast<void *>(pipelineLayout), pAllocator);
 
 	vk::destroy(pipelineLayout, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler)
 {
 	TRACE("(VkDevice device = %p, const VkSamplerCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkSampler* pSampler = %p)",
-		    device, pCreateInfo, pAllocator, pSampler);
+	      device, pCreateInfo, pAllocator, pSampler);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	const vk::SamplerYcbcrConversion *ycbcrConversion = nullptr;
 
 	while(extensionCreateInfo)
 	{
 		switch(extensionCreateInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+			case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
 			{
-				const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo = reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(extensionCreateInfo);
+				const VkSamplerYcbcrConversionInfo *samplerYcbcrConversionInfo = reinterpret_cast<const VkSamplerYcbcrConversionInfo *>(extensionCreateInfo);
 				ycbcrConversion = vk::Cast(samplerYcbcrConversionInfo->conversion);
 			}
 			break;
-		default:
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-			break;
+			default:
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -1679,31 +1672,31 @@
 	return vk::Sampler::Create(pAllocator, pCreateInfo, pSampler, ycbcrConversion);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkSampler sampler = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(sampler), pAllocator);
+	      device, static_cast<void *>(sampler), pAllocator);
 
 	vk::destroy(sampler, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout)
 {
 	TRACE("(VkDevice device = %p, const VkDescriptorSetLayoutCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkDescriptorSetLayout* pSetLayout = %p)",
 	      device, pCreateInfo, pAllocator, pSetLayout);
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 
 	while(extensionCreateInfo)
 	{
 		switch(extensionCreateInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
-			ASSERT(!vk::Cast(device)->hasExtension(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME));
-			break;
-		default:
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-			break;
+			case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+				ASSERT(!vk::Cast(device)->hasExtension(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME));
+				break;
+			default:
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -1712,20 +1705,20 @@
 	return vk::DescriptorSetLayout::Create(pAllocator, pCreateInfo, pSetLayout);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkDescriptorSetLayout descriptorSetLayout = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(descriptorSetLayout), pAllocator);
+	      device, static_cast<void *>(descriptorSetLayout), pAllocator);
 
 	vk::destroy(descriptorSetLayout, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool)
 {
 	TRACE("(VkDevice device = %p, const VkDescriptorPoolCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkDescriptorPool* pDescriptorPool = %p)",
 	      device, pCreateInfo, pAllocator, pDescriptorPool);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pCreateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pCreateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -1735,10 +1728,10 @@
 	return vk::DescriptorPool::Create(pAllocator, pCreateInfo, pDescriptorPool);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkDescriptorPool descriptorPool = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(descriptorPool), pAllocator);
+	      device, static_cast<void *>(descriptorPool), pAllocator);
 
 	vk::destroy(descriptorPool, pAllocator);
 }
@@ -1746,7 +1739,7 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
 {
 	TRACE("(VkDevice device = %p, VkDescriptorPool descriptorPool = %p, VkDescriptorPoolResetFlags flags = 0x%x)",
-		device, static_cast<void*>(descriptorPool), int(flags));
+	      device, static_cast<void *>(descriptorPool), int(flags));
 
 	if(flags)
 	{
@@ -1756,33 +1749,32 @@
 	return vk::Cast(descriptorPool)->reset();
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets)
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets)
 {
 	TRACE("(VkDevice device = %p, const VkDescriptorSetAllocateInfo* pAllocateInfo = %p, VkDescriptorSet* pDescriptorSets = %p)",
-		device, pAllocateInfo, pDescriptorSets);
+	      device, pAllocateInfo, pDescriptorSets);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pAllocateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pAllocateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pAllocateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
 		extInfo = extInfo->pNext;
 	}
 
-	return vk::Cast(pAllocateInfo->descriptorPool)->allocateSets(
-		pAllocateInfo->descriptorSetCount, pAllocateInfo->pSetLayouts, pDescriptorSets);
+	return vk::Cast(pAllocateInfo->descriptorPool)->allocateSets(pAllocateInfo->descriptorSetCount, pAllocateInfo->pSetLayouts, pDescriptorSets);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets)
+VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets)
 {
 	TRACE("(VkDevice device = %p, VkDescriptorPool descriptorPool = %p, uint32_t descriptorSetCount = %d, const VkDescriptorSet* pDescriptorSets = %p)",
-		device, static_cast<void*>(descriptorPool), descriptorSetCount, pDescriptorSets);
+	      device, static_cast<void *>(descriptorPool), descriptorSetCount, pDescriptorSets);
 
 	vk::Cast(descriptorPool)->freeSets(descriptorSetCount, pDescriptorSets);
 
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies)
 {
 	TRACE("(VkDevice device = %p, uint32_t descriptorWriteCount = %d, const VkWriteDescriptorSet* pDescriptorWrites = %p, uint32_t descriptorCopyCount = %d, const VkCopyDescriptorSet* pDescriptorCopies = %p)",
 	      device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
@@ -1790,17 +1782,17 @@
 	vk::Cast(device)->updateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer)
 {
 	TRACE("(VkDevice device = %p, const VkFramebufferCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkFramebuffer* pFramebuffer = %p)",
-		    device, pCreateInfo, pAllocator, pFramebuffer);
+	      device, pCreateInfo, pAllocator, pFramebuffer);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	while(nextInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
@@ -1810,101 +1802,101 @@
 	return vk::Framebuffer::Create(pAllocator, pCreateInfo, pFramebuffer);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkFramebuffer framebuffer = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		device, static_cast<void*>(framebuffer), pAllocator);
+	      device, static_cast<void *>(framebuffer), pAllocator);
 
 	vk::destroy(framebuffer, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass)
 {
 	TRACE("(VkDevice device = %p, const VkRenderPassCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkRenderPass* pRenderPass = %p)",
-		    device, pCreateInfo, pAllocator, pRenderPass);
+	      device, pCreateInfo, pAllocator, pRenderPass);
 
 	if(pCreateInfo->flags)
 	{
 		UNIMPLEMENTED("pCreateInfo->flags");
 	}
 
-	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	const VkBaseInStructure *extensionCreateInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 
 	while(extensionCreateInfo)
 	{
 		switch(extensionCreateInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
-		{
-			const VkRenderPassInputAttachmentAspectCreateInfo* inputAttachmentAspectCreateInfo = reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo*>(extensionCreateInfo);
-
-			for(uint32_t i = 0; i < inputAttachmentAspectCreateInfo->aspectReferenceCount; i++)
+			case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
 			{
-				const VkInputAttachmentAspectReference& aspectReference = inputAttachmentAspectCreateInfo->pAspectReferences[i];
-				ASSERT(aspectReference.subpass < pCreateInfo->subpassCount);
-				const VkSubpassDescription& subpassDescription = pCreateInfo->pSubpasses[aspectReference.subpass];
-				ASSERT(aspectReference.inputAttachmentIndex < subpassDescription.inputAttachmentCount);
-				const VkAttachmentReference& attachmentReference = subpassDescription.pInputAttachments[aspectReference.inputAttachmentIndex];
-				if(attachmentReference.attachment != VK_ATTACHMENT_UNUSED)
+				const VkRenderPassInputAttachmentAspectCreateInfo *inputAttachmentAspectCreateInfo = reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo *>(extensionCreateInfo);
+
+				for(uint32_t i = 0; i < inputAttachmentAspectCreateInfo->aspectReferenceCount; i++)
 				{
-					// If the pNext chain includes an instance of VkRenderPassInputAttachmentAspectCreateInfo, for any
-					// element of the pInputAttachments member of any element of pSubpasses where the attachment member
-					// is not VK_ATTACHMENT_UNUSED, the aspectMask member of the corresponding element of
-					// VkRenderPassInputAttachmentAspectCreateInfo::pAspectReferences must only include aspects that are
-					// present in images of the format specified by the element of pAttachments at attachment
-					vk::Format format(pCreateInfo->pAttachments[attachmentReference.attachment].format);
-					bool isDepth = format.isDepth();
-					bool isStencil = format.isStencil();
-					ASSERT(!(aspectReference.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) || (!isDepth && !isStencil));
-					ASSERT(!(aspectReference.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) || isDepth);
-					ASSERT(!(aspectReference.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) || isStencil);
+					const VkInputAttachmentAspectReference &aspectReference = inputAttachmentAspectCreateInfo->pAspectReferences[i];
+					ASSERT(aspectReference.subpass < pCreateInfo->subpassCount);
+					const VkSubpassDescription &subpassDescription = pCreateInfo->pSubpasses[aspectReference.subpass];
+					ASSERT(aspectReference.inputAttachmentIndex < subpassDescription.inputAttachmentCount);
+					const VkAttachmentReference &attachmentReference = subpassDescription.pInputAttachments[aspectReference.inputAttachmentIndex];
+					if(attachmentReference.attachment != VK_ATTACHMENT_UNUSED)
+					{
+						// If the pNext chain includes an instance of VkRenderPassInputAttachmentAspectCreateInfo, for any
+						// element of the pInputAttachments member of any element of pSubpasses where the attachment member
+						// is not VK_ATTACHMENT_UNUSED, the aspectMask member of the corresponding element of
+						// VkRenderPassInputAttachmentAspectCreateInfo::pAspectReferences must only include aspects that are
+						// present in images of the format specified by the element of pAttachments at attachment
+						vk::Format format(pCreateInfo->pAttachments[attachmentReference.attachment].format);
+						bool isDepth = format.isDepth();
+						bool isStencil = format.isStencil();
+						ASSERT(!(aspectReference.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) || (!isDepth && !isStencil));
+						ASSERT(!(aspectReference.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) || isDepth);
+						ASSERT(!(aspectReference.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) || isStencil);
+					}
 				}
 			}
-		}
-		break;
-		case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
-		{
-			const VkRenderPassMultiviewCreateInfo* multiviewCreateInfo = reinterpret_cast<const VkRenderPassMultiviewCreateInfo*>(extensionCreateInfo);
-			ASSERT((multiviewCreateInfo->subpassCount == 0) || (multiviewCreateInfo->subpassCount == pCreateInfo->subpassCount));
-			ASSERT((multiviewCreateInfo->dependencyCount == 0) || (multiviewCreateInfo->dependencyCount == pCreateInfo->dependencyCount));
-
-			bool zeroMask = (multiviewCreateInfo->pViewMasks[0] == 0);
-			for(uint32_t i = 1; i < multiviewCreateInfo->subpassCount; i++)
+			break;
+			case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
 			{
-				ASSERT((multiviewCreateInfo->pViewMasks[i] == 0) == zeroMask);
-			}
+				const VkRenderPassMultiviewCreateInfo *multiviewCreateInfo = reinterpret_cast<const VkRenderPassMultiviewCreateInfo *>(extensionCreateInfo);
+				ASSERT((multiviewCreateInfo->subpassCount == 0) || (multiviewCreateInfo->subpassCount == pCreateInfo->subpassCount));
+				ASSERT((multiviewCreateInfo->dependencyCount == 0) || (multiviewCreateInfo->dependencyCount == pCreateInfo->dependencyCount));
 
-			if(zeroMask)
-			{
-				ASSERT(multiviewCreateInfo->correlationMaskCount == 0);
-			}
-
-			for(uint32_t i = 0; i < multiviewCreateInfo->dependencyCount; i++)
-			{
-				const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
-				if(multiviewCreateInfo->pViewOffsets[i] != 0)
+				bool zeroMask = (multiviewCreateInfo->pViewMasks[0] == 0);
+				for(uint32_t i = 1; i < multiviewCreateInfo->subpassCount; i++)
 				{
-					ASSERT(dependency.srcSubpass != dependency.dstSubpass);
-					ASSERT(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT);
+					ASSERT((multiviewCreateInfo->pViewMasks[i] == 0) == zeroMask);
 				}
+
 				if(zeroMask)
 				{
-					ASSERT(!(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT));
+					ASSERT(multiviewCreateInfo->correlationMaskCount == 0);
 				}
-			}
 
-			// If the pNext chain includes an instance of VkRenderPassMultiviewCreateInfo,
-			// each element of its pViewMask member must not include a bit at a position
-			// greater than the value of VkPhysicalDeviceLimits::maxFramebufferLayers
-			// pViewMask is a 32 bit value. If maxFramebufferLayers > 32, it's impossible
-			// for pViewMask to contain a bit at an illegal position
-			// Note: Verify pViewMask values instead if we hit this assert
-			ASSERT(vk::Cast(device)->getPhysicalDevice()->getProperties().limits.maxFramebufferLayers >= 32);
-		}
-		break;
-		default:
-			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				for(uint32_t i = 0; i < multiviewCreateInfo->dependencyCount; i++)
+				{
+					const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
+					if(multiviewCreateInfo->pViewOffsets[i] != 0)
+					{
+						ASSERT(dependency.srcSubpass != dependency.dstSubpass);
+						ASSERT(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT);
+					}
+					if(zeroMask)
+					{
+						ASSERT(!(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT));
+					}
+				}
+
+				// If the pNext chain includes an instance of VkRenderPassMultiviewCreateInfo,
+				// each element of its pViewMask member must not include a bit at a position
+				// greater than the value of VkPhysicalDeviceLimits::maxFramebufferLayers
+				// pViewMask is a 32 bit value. If maxFramebufferLayers > 32, it's impossible
+				// for pViewMask to contain a bit at an illegal position
+				// Note: Verify pViewMask values instead if we hit this assert
+				ASSERT(vk::Cast(device)->getPhysicalDevice()->getProperties().limits.maxFramebufferLayers >= 32);
+			}
 			break;
+			default:
+				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+				break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -1913,28 +1905,28 @@
 	return vk::RenderPass::Create(pAllocator, pCreateInfo, pRenderPass);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkRenderPass renderPass = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(renderPass), pAllocator);
+	      device, static_cast<void *>(renderPass), pAllocator);
 
 	vk::destroy(renderPass, pAllocator);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity)
+VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(VkDevice device, VkRenderPass renderPass, VkExtent2D *pGranularity)
 {
 	TRACE("(VkDevice device = %p, VkRenderPass renderPass = %p, VkExtent2D* pGranularity = %p)",
-	      device, static_cast<void*>(renderPass), pGranularity);
+	      device, static_cast<void *>(renderPass), pGranularity);
 
 	vk::Cast(renderPass)->getRenderAreaGranularity(pGranularity);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool)
 {
 	TRACE("(VkDevice device = %p, const VkCommandPoolCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkCommandPool* pCommandPool = %p)",
-		    device, pCreateInfo, pAllocator, pCommandPool);
+	      device, pCreateInfo, pAllocator, pCommandPool);
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 	while(nextInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
@@ -1944,10 +1936,10 @@
 	return vk::CommandPool::Create(pAllocator, pCreateInfo, pCommandPool);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, const VkAllocationCallbacks* pAllocator = %p)",
-		    device, static_cast<void*>(commandPool), pAllocator);
+	      device, static_cast<void *>(commandPool), pAllocator);
 
 	vk::destroy(commandPool, pAllocator);
 }
@@ -1955,41 +1947,40 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
 {
 	TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, VkCommandPoolResetFlags flags = %d)",
-		device, static_cast<void*>(commandPool), int(flags));
+	      device, static_cast<void *>(commandPool), int(flags));
 
 	return vk::Cast(commandPool)->reset(flags);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers)
+VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo, VkCommandBuffer *pCommandBuffers)
 {
 	TRACE("(VkDevice device = %p, const VkCommandBufferAllocateInfo* pAllocateInfo = %p, VkCommandBuffer* pCommandBuffers = %p)",
-		    device, pAllocateInfo, pCommandBuffers);
+	      device, pAllocateInfo, pCommandBuffers);
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pAllocateInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pAllocateInfo->pNext);
 	while(nextInfo)
 	{
 		WARN("pAllocateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
 		nextInfo = nextInfo->pNext;
 	}
 
-	return vk::Cast(pAllocateInfo->commandPool)->allocateCommandBuffers(
-		pAllocateInfo->level, pAllocateInfo->commandBufferCount, pCommandBuffers);
+	return vk::Cast(pAllocateInfo->commandPool)->allocateCommandBuffers(pAllocateInfo->level, pAllocateInfo->commandBufferCount, pCommandBuffers);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers)
+VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers)
 {
 	TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, uint32_t commandBufferCount = %d, const VkCommandBuffer* pCommandBuffers = %p)",
-		    device, static_cast<void*>(commandPool), int(commandBufferCount), pCommandBuffers);
+	      device, static_cast<void *>(commandPool), int(commandBufferCount), pCommandBuffers);
 
 	vk::Cast(commandPool)->freeCommandBuffers(commandBufferCount, pCommandBuffers);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo)
+VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, const VkCommandBufferBeginInfo* pBeginInfo = %p)",
-		    commandBuffer, pBeginInfo);
+	      commandBuffer, pBeginInfo);
 
-	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pBeginInfo->pNext);
+	auto *nextInfo = reinterpret_cast<const VkBaseInStructure *>(pBeginInfo->pNext);
 	while(nextInfo)
 	{
 		WARN("pBeginInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
@@ -2016,12 +2007,12 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineBindPoint pipelineBindPoint = %d, VkPipeline pipeline = %p)",
-		    commandBuffer, int(pipelineBindPoint), static_cast<void*>(pipeline));
+	      commandBuffer, int(pipelineBindPoint), static_cast<void *>(pipeline));
 
 	vk::Cast(commandBuffer)->bindPipeline(pipelineBindPoint, vk::Cast(pipeline));
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports)
+VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t firstViewport = %d, uint32_t viewportCount = %d, const VkViewport* pViewports = %p)",
 	      commandBuffer, int(firstViewport), int(viewportCount), pViewports);
@@ -2029,7 +2020,7 @@
 	vk::Cast(commandBuffer)->setViewport(firstViewport, viewportCount, pViewports);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors)
+VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t firstScissor = %d, uint32_t scissorCount = %d, const VkRect2D* pScissors = %p)",
 	      commandBuffer, int(firstScissor), int(scissorCount), pScissors);
@@ -2092,10 +2083,10 @@
 	vk::Cast(commandBuffer)->setStencilReference(faceMask, reference);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets)
+VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineBindPoint pipelineBindPoint = %d, VkPipelineLayout layout = %p, uint32_t firstSet = %d, uint32_t descriptorSetCount = %d, const VkDescriptorSet* pDescriptorSets = %p, uint32_t dynamicOffsetCount = %d, const uint32_t* pDynamicOffsets = %p)",
-	      commandBuffer, int(pipelineBindPoint), static_cast<void*>(layout), int(firstSet), int(descriptorSetCount), pDescriptorSets, int(dynamicOffsetCount), pDynamicOffsets);
+	      commandBuffer, int(pipelineBindPoint), static_cast<void *>(layout), int(firstSet), int(descriptorSetCount), pDescriptorSets, int(dynamicOffsetCount), pDynamicOffsets);
 
 	vk::Cast(commandBuffer)->bindDescriptorSets(pipelineBindPoint, vk::Cast(layout), firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
 }
@@ -2103,15 +2094,15 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d, VkIndexType indexType = %d)",
-	      commandBuffer, static_cast<void*>(buffer), int(offset), int(indexType));
+	      commandBuffer, static_cast<void *>(buffer), int(offset), int(indexType));
 
 	vk::Cast(commandBuffer)->bindIndexBuffer(vk::Cast(buffer), offset, indexType);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets)
+VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t firstBinding = %d, uint32_t bindingCount = %d, const VkBuffer* pBuffers = %p, const VkDeviceSize* pOffsets = %p)",
-		    commandBuffer, int(firstBinding), int(bindingCount), pBuffers, pOffsets);
+	      commandBuffer, int(firstBinding), int(bindingCount), pBuffers, pOffsets);
 
 	vk::Cast(commandBuffer)->bindVertexBuffers(firstBinding, bindingCount, pBuffers, pOffsets);
 }
@@ -2119,7 +2110,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t vertexCount = %d, uint32_t instanceCount = %d, uint32_t firstVertex = %d, uint32_t firstInstance = %d)",
-		    commandBuffer, int(vertexCount), int(instanceCount), int(firstVertex), int(firstInstance));
+	      commandBuffer, int(vertexCount), int(instanceCount), int(firstVertex), int(firstInstance));
 
 	vk::Cast(commandBuffer)->draw(vertexCount, instanceCount, firstVertex, firstInstance);
 }
@@ -2127,7 +2118,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t indexCount = %d, uint32_t instanceCount = %d, uint32_t firstIndex = %d, int32_t vertexOffset = %d, uint32_t firstInstance = %d)",
-		    commandBuffer, int(indexCount), int(instanceCount), int(firstIndex), int(vertexOffset), int(firstInstance));
+	      commandBuffer, int(indexCount), int(instanceCount), int(firstIndex), int(vertexOffset), int(firstInstance));
 
 	vk::Cast(commandBuffer)->drawIndexed(indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
 }
@@ -2135,7 +2126,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d, uint32_t drawCount = %d, uint32_t stride = %d)",
-		    commandBuffer, static_cast<void*>(buffer), int(offset), int(drawCount), int(stride));
+	      commandBuffer, static_cast<void *>(buffer), int(offset), int(drawCount), int(stride));
 
 	vk::Cast(commandBuffer)->drawIndirect(vk::Cast(buffer), offset, drawCount, stride);
 }
@@ -2143,7 +2134,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d, uint32_t drawCount = %d, uint32_t stride = %d)",
-		    commandBuffer, static_cast<void*>(buffer), int(offset), int(drawCount), int(stride));
+	      commandBuffer, static_cast<void *>(buffer), int(offset), int(drawCount), int(stride));
 
 	vk::Cast(commandBuffer)->drawIndexedIndirect(vk::Cast(buffer), offset, drawCount, stride);
 }
@@ -2159,55 +2150,55 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer buffer = %p, VkDeviceSize offset = %d)",
-	      commandBuffer, static_cast<void*>(buffer), int(offset));
+	      commandBuffer, static_cast<void *>(buffer), int(offset));
 
 	vk::Cast(commandBuffer)->dispatchIndirect(vk::Cast(buffer), offset);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions)
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer srcBuffer = %p, VkBuffer dstBuffer = %p, uint32_t regionCount = %d, const VkBufferCopy* pRegions = %p)",
-	      commandBuffer, static_cast<void*>(srcBuffer), static_cast<void*>(dstBuffer), int(regionCount), pRegions);
+	      commandBuffer, static_cast<void *>(srcBuffer), static_cast<void *>(dstBuffer), int(regionCount), pRegions);
 
 	vk::Cast(commandBuffer)->copyBuffer(vk::Cast(srcBuffer), vk::Cast(dstBuffer), regionCount, pRegions);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions)
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkImageCopy* pRegions = %p)",
-	      commandBuffer, static_cast<void*>(srcImage), srcImageLayout, static_cast<void*>(dstImage), dstImageLayout, int(regionCount), pRegions);
+	      commandBuffer, static_cast<void *>(srcImage), srcImageLayout, static_cast<void *>(dstImage), dstImageLayout, int(regionCount), pRegions);
 
 	vk::Cast(commandBuffer)->copyImage(vk::Cast(srcImage), srcImageLayout, vk::Cast(dstImage), dstImageLayout, regionCount, pRegions);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter)
+VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkImageBlit* pRegions = %p, VkFilter filter = %d)",
-	      commandBuffer, static_cast<void*>(srcImage), srcImageLayout, static_cast<void*>(dstImage), dstImageLayout, int(regionCount), pRegions, filter);
+	      commandBuffer, static_cast<void *>(srcImage), srcImageLayout, static_cast<void *>(dstImage), dstImageLayout, int(regionCount), pRegions, filter);
 
 	vk::Cast(commandBuffer)->blitImage(vk::Cast(srcImage), srcImageLayout, vk::Cast(dstImage), dstImageLayout, regionCount, pRegions, filter);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions)
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer srcBuffer = %p, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkBufferImageCopy* pRegions = %p)",
-	      commandBuffer, static_cast<void*>(srcBuffer), static_cast<void*>(dstImage), dstImageLayout, int(regionCount), pRegions);
+	      commandBuffer, static_cast<void *>(srcBuffer), static_cast<void *>(dstImage), dstImageLayout, int(regionCount), pRegions);
 
 	vk::Cast(commandBuffer)->copyBufferToImage(vk::Cast(srcBuffer), vk::Cast(dstImage), dstImageLayout, regionCount, pRegions);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions)
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkBuffer dstBuffer = %p, uint32_t regionCount = %d, const VkBufferImageCopy* pRegions = %p)",
-		    commandBuffer, static_cast<void*>(srcImage), int(srcImageLayout), static_cast<void*>(dstBuffer), int(regionCount), pRegions);
+	      commandBuffer, static_cast<void *>(srcImage), int(srcImageLayout), static_cast<void *>(dstBuffer), int(regionCount), pRegions);
 
 	vk::Cast(commandBuffer)->copyImageToBuffer(vk::Cast(srcImage), srcImageLayout, vk::Cast(dstBuffer), regionCount, pRegions);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData)
+VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer dstBuffer = %p, VkDeviceSize dstOffset = %d, VkDeviceSize dataSize = %d, const void* pData = %p)",
-	      commandBuffer, static_cast<void*>(dstBuffer), int(dstOffset), int(dataSize), pData);
+	      commandBuffer, static_cast<void *>(dstBuffer), int(dstOffset), int(dataSize), pData);
 
 	vk::Cast(commandBuffer)->updateBuffer(vk::Cast(dstBuffer), dstOffset, dataSize, pData);
 }
@@ -2215,39 +2206,39 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkBuffer dstBuffer = %p, VkDeviceSize dstOffset = %d, VkDeviceSize size = %d, uint32_t data = %d)",
-	      commandBuffer, static_cast<void*>(dstBuffer), int(dstOffset), int(size), data);
+	      commandBuffer, static_cast<void *>(dstBuffer), int(dstOffset), int(size), data);
 
 	vk::Cast(commandBuffer)->fillBuffer(vk::Cast(dstBuffer), dstOffset, size, data);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
+VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkImage image = %p, VkImageLayout imageLayout = %d, const VkClearColorValue* pColor = %p, uint32_t rangeCount = %d, const VkImageSubresourceRange* pRanges = %p)",
-	      commandBuffer, static_cast<void*>(image), int(imageLayout), pColor, int(rangeCount), pRanges);
+	      commandBuffer, static_cast<void *>(image), int(imageLayout), pColor, int(rangeCount), pRanges);
 
 	vk::Cast(commandBuffer)->clearColorImage(vk::Cast(image), imageLayout, pColor, rangeCount, pRanges);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
+VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange *pRanges)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkImage image = %p, VkImageLayout imageLayout = %d, const VkClearDepthStencilValue* pDepthStencil = %p, uint32_t rangeCount = %d, const VkImageSubresourceRange* pRanges = %p)",
-	      commandBuffer, static_cast<void*>(image), int(imageLayout), pDepthStencil, int(rangeCount), pRanges);
+	      commandBuffer, static_cast<void *>(image), int(imageLayout), pDepthStencil, int(rangeCount), pRanges);
 
 	vk::Cast(commandBuffer)->clearDepthStencilImage(vk::Cast(image), imageLayout, pDepthStencil, rangeCount, pRanges);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects)
+VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t attachmentCount = %d, const VkClearAttachment* pAttachments = %p, uint32_t rectCount = %d, const VkClearRect* pRects = %p)",
-	      commandBuffer, int(attachmentCount), pAttachments, int(rectCount),  pRects);
+	      commandBuffer, int(attachmentCount), pAttachments, int(rectCount), pRects);
 
 	vk::Cast(commandBuffer)->clearAttachments(attachmentCount, pAttachments, rectCount, pRects);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions)
+VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkImage srcImage = %p, VkImageLayout srcImageLayout = %d, VkImage dstImage = %p, VkImageLayout dstImageLayout = %d, uint32_t regionCount = %d, const VkImageResolve* pRegions = %p)",
-	      commandBuffer, static_cast<void*>(srcImage), int(srcImageLayout), static_cast<void*>(dstImage), int(dstImageLayout), regionCount, pRegions);
+	      commandBuffer, static_cast<void *>(srcImage), int(srcImageLayout), static_cast<void *>(dstImage), int(dstImageLayout), regionCount, pRegions);
 
 	vk::Cast(commandBuffer)->resolveImage(vk::Cast(srcImage), srcImageLayout, vk::Cast(dstImage), dstImageLayout, regionCount, pRegions);
 }
@@ -2255,7 +2246,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, VkPipelineStageFlags stageMask = %d)",
-	      commandBuffer, static_cast<void*>(event), int(stageMask));
+	      commandBuffer, static_cast<void *>(event), int(stageMask));
 
 	vk::Cast(commandBuffer)->setEvent(vk::Cast(event), stageMask);
 }
@@ -2263,12 +2254,12 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkEvent event = %p, VkPipelineStageFlags stageMask = %d)",
-	      commandBuffer, static_cast<void*>(event), int(stageMask));
+	      commandBuffer, static_cast<void *>(event), int(stageMask));
 
 	vk::Cast(commandBuffer)->resetEvent(vk::Cast(event), stageMask);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers)
+VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t eventCount = %d, const VkEvent* pEvents = %p, VkPipelineStageFlags srcStageMask = 0x%x, VkPipelineStageFlags dstStageMask = 0x%x, uint32_t memoryBarrierCount = %d, const VkMemoryBarrier* pMemoryBarriers = %p, uint32_t bufferMemoryBarrierCount = %d, const VkBufferMemoryBarrier* pBufferMemoryBarriers = %p, uint32_t imageMemoryBarrierCount = %d, const VkImageMemoryBarrier* pImageMemoryBarriers = %p)",
 	      commandBuffer, int(eventCount), pEvents, int(srcStageMask), int(dstStageMask), int(memoryBarrierCount), pMemoryBarriers, int(bufferMemoryBarrierCount), pBufferMemoryBarriers, int(imageMemoryBarrierCount), pImageMemoryBarriers);
@@ -2276,22 +2267,20 @@
 	vk::Cast(commandBuffer)->waitEvents(eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers)
+VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers)
 {
-	TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineStageFlags srcStageMask = 0x%x, VkPipelineStageFlags dstStageMask = 0x%x, VkDependencyFlags dependencyFlags = %d, uint32_t memoryBarrierCount = %d, onst VkMemoryBarrier* pMemoryBarriers = %p,"
-	      " uint32_t bufferMemoryBarrierCount = %d, const VkBufferMemoryBarrier* pBufferMemoryBarriers = %p, uint32_t imageMemoryBarrierCount = %d, const VkImageMemoryBarrier* pImageMemoryBarriers = %p)",
-	      commandBuffer, int(srcStageMask), int(dstStageMask), dependencyFlags, int(memoryBarrierCount), pMemoryBarriers, int(bufferMemoryBarrierCount), pBufferMemoryBarriers, int(imageMemoryBarrierCount), pImageMemoryBarriers);
+	TRACE(
+	    "(VkCommandBuffer commandBuffer = %p, VkPipelineStageFlags srcStageMask = 0x%x, VkPipelineStageFlags dstStageMask = 0x%x, VkDependencyFlags dependencyFlags = %d, uint32_t memoryBarrierCount = %d, onst VkMemoryBarrier* pMemoryBarriers = %p,"
+	    " uint32_t bufferMemoryBarrierCount = %d, const VkBufferMemoryBarrier* pBufferMemoryBarriers = %p, uint32_t imageMemoryBarrierCount = %d, const VkImageMemoryBarrier* pImageMemoryBarriers = %p)",
+	    commandBuffer, int(srcStageMask), int(dstStageMask), dependencyFlags, int(memoryBarrierCount), pMemoryBarriers, int(bufferMemoryBarrierCount), pBufferMemoryBarriers, int(imageMemoryBarrierCount), pImageMemoryBarriers);
 
-	vk::Cast(commandBuffer)->pipelineBarrier(srcStageMask, dstStageMask, dependencyFlags,
-	                                         memoryBarrierCount, pMemoryBarriers,
-	                                         bufferMemoryBarrierCount, pBufferMemoryBarriers,
-	                                         imageMemoryBarrierCount, pImageMemoryBarriers);
+	vk::Cast(commandBuffer)->pipelineBarrier(srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
 }
 
 VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t query = %d, VkQueryControlFlags flags = %d)",
-	      commandBuffer, static_cast<void*>(queryPool), query, int(flags));
+	      commandBuffer, static_cast<void *>(queryPool), query, int(flags));
 
 	vk::Cast(commandBuffer)->beginQuery(vk::Cast(queryPool), query, flags);
 }
@@ -2299,7 +2288,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t query = %d)",
-	      commandBuffer, static_cast<void*>(queryPool), int(query));
+	      commandBuffer, static_cast<void *>(queryPool), int(query));
 
 	vk::Cast(commandBuffer)->endQuery(vk::Cast(queryPool), query);
 }
@@ -2307,7 +2296,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t firstQuery = %d, uint32_t queryCount = %d)",
-	      commandBuffer, static_cast<void*>(queryPool), int(firstQuery), int(queryCount));
+	      commandBuffer, static_cast<void *>(queryPool), int(firstQuery), int(queryCount));
 
 	vk::Cast(commandBuffer)->resetQueryPool(vk::Cast(queryPool), firstQuery, queryCount);
 }
@@ -2315,7 +2304,7 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineStageFlagBits pipelineStage = %d, VkQueryPool queryPool = %p, uint32_t query = %d)",
-	      commandBuffer, int(pipelineStage), static_cast<void*>(queryPool), int(query));
+	      commandBuffer, int(pipelineStage), static_cast<void *>(queryPool), int(query));
 
 	vk::Cast(commandBuffer)->writeTimestamp(pipelineStage, vk::Cast(queryPool), query);
 }
@@ -2323,45 +2312,43 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkQueryPool queryPool = %p, uint32_t firstQuery = %d, uint32_t queryCount = %d, VkBuffer dstBuffer = %p, VkDeviceSize dstOffset = %d, VkDeviceSize stride = %d, VkQueryResultFlags flags = %d)",
-	      commandBuffer, static_cast<void*>(queryPool), int(firstQuery), int(queryCount), static_cast<void*>(dstBuffer), int(dstOffset), int(stride), int(flags));
+	      commandBuffer, static_cast<void *>(queryPool), int(firstQuery), int(queryCount), static_cast<void *>(dstBuffer), int(dstOffset), int(stride), int(flags));
 
 	vk::Cast(commandBuffer)->copyQueryPoolResults(vk::Cast(queryPool), firstQuery, queryCount, vk::Cast(dstBuffer), dstOffset, stride, flags);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues)
+VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *pValues)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, VkPipelineLayout layout = %p, VkShaderStageFlags stageFlags = %d, uint32_t offset = %d, uint32_t size = %d, const void* pValues = %p)",
-	      commandBuffer, static_cast<void*>(layout), stageFlags, offset, size, pValues);
+	      commandBuffer, static_cast<void *>(layout), stageFlags, offset, size, pValues);
 
 	vk::Cast(commandBuffer)->pushConstants(vk::Cast(layout), stageFlags, offset, size, pValues);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents)
+VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, const VkRenderPassBeginInfo* pRenderPassBegin = %p, VkSubpassContents contents = %d)",
 	      commandBuffer, pRenderPassBegin, contents);
 
-	const VkBaseInStructure* renderPassBeginInfo = reinterpret_cast<const VkBaseInStructure*>(pRenderPassBegin->pNext);
+	const VkBaseInStructure *renderPassBeginInfo = reinterpret_cast<const VkBaseInStructure *>(pRenderPassBegin->pNext);
 	while(renderPassBeginInfo)
 	{
 		switch(renderPassBeginInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
-			// This extension controls which render area is used on which physical device,
-			// in order to distribute rendering between multiple physical devices.
-			// SwiftShader only has a single physical device, so this extension does nothing in this case.
-			break;
-		default:
-			WARN("pRenderPassBegin->pNext sType = %s", vk::Stringify(renderPassBeginInfo->sType).c_str());
-			break;
+			case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+				// This extension controls which render area is used on which physical device,
+				// in order to distribute rendering between multiple physical devices.
+				// SwiftShader only has a single physical device, so this extension does nothing in this case.
+				break;
+			default:
+				WARN("pRenderPassBegin->pNext sType = %s", vk::Stringify(renderPassBeginInfo->sType).c_str());
+				break;
 		}
 
 		renderPassBeginInfo = renderPassBeginInfo->pNext;
 	}
 
-	vk::Cast(commandBuffer)->beginRenderPass(vk::Cast(pRenderPassBegin->renderPass), vk::Cast(pRenderPassBegin->framebuffer),
-	                                         pRenderPassBegin->renderArea, pRenderPassBegin->clearValueCount,
-	                                         pRenderPassBegin->pClearValues, contents);
+	vk::Cast(commandBuffer)->beginRenderPass(vk::Cast(pRenderPassBegin->renderPass), vk::Cast(pRenderPassBegin->framebuffer), pRenderPassBegin->renderArea, pRenderPassBegin->clearValueCount, pRenderPassBegin->pClearValues, contents);
 }
 
 VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
@@ -2379,7 +2366,7 @@
 	vk::Cast(commandBuffer)->endRenderPass();
 }
 
-VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers)
+VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t commandBufferCount = %d, const VkCommandBuffer* pCommandBuffers = %p)",
 	      commandBuffer, commandBufferCount, pCommandBuffers);
@@ -2387,21 +2374,21 @@
 	vk::Cast(commandBuffer)->executeCommands(commandBufferCount, pCommandBuffers);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion(uint32_t* pApiVersion)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion(uint32_t *pApiVersion)
 {
 	TRACE("(uint32_t* pApiVersion = %p)", pApiVersion);
 	*pApiVersion = vk::API_VERSION;
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos)
+VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos)
 {
 	TRACE("(VkDevice device = %p, uint32_t bindInfoCount = %d, const VkBindBufferMemoryInfo* pBindInfos = %p)",
 	      device, bindInfoCount, pBindInfos);
 
 	for(uint32_t i = 0; i < bindInfoCount; i++)
 	{
-		auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pBindInfos[i].pNext);
+		auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pBindInfos[i].pNext);
 		while(extInfo)
 		{
 			WARN("pBindInfos[%d].pNext sType = %s", i, vk::Stringify(extInfo->sType).c_str());
@@ -2423,7 +2410,7 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos)
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos)
 {
 	TRACE("(VkDevice device = %p, uint32_t bindInfoCount = %d, const VkBindImageMemoryInfo* pBindInfos = %p)",
 	      device, bindInfoCount, pBindInfos);
@@ -2447,12 +2434,12 @@
 		{
 			switch(extInfo->sType)
 			{
-			case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
-				/* Do nothing */
-				break;
+				case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
+					/* Do nothing */
+					break;
 
 #ifndef __ANDROID__
-			case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
+				case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
 				{
 					auto swapchainInfo = reinterpret_cast<VkBindImageMemorySwapchainInfoKHR const *>(extInfo);
 					memory = vk::Cast(swapchainInfo->swapchain)->getImage(swapchainInfo->imageIndex).getImageMemory();
@@ -2461,9 +2448,9 @@
 				break;
 #endif
 
-			default:
-				WARN("pBindInfos[%d].pNext sType = %s", i, vk::Stringify(extInfo->sType).c_str());
-				break;
+				default:
+					WARN("pBindInfos[%d].pNext sType = %s", i, vk::Stringify(extInfo->sType).c_str());
+					break;
 			}
 			extInfo = extInfo->pNext;
 		}
@@ -2474,13 +2461,13 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
 {
 	TRACE("(VkDevice device = %p, uint32_t heapIndex = %d, uint32_t localDeviceIndex = %d, uint32_t remoteDeviceIndex = %d, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures = %p)",
 	      device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures);
 
-	ASSERT(localDeviceIndex != remoteDeviceIndex); // "localDeviceIndex must not equal remoteDeviceIndex"
-	UNREACHABLE("remoteDeviceIndex: %d", int(remoteDeviceIndex));   // Only one physical device is supported, and since the device indexes can't be equal, this should never be called.
+	ASSERT(localDeviceIndex != remoteDeviceIndex);                 // "localDeviceIndex must not equal remoteDeviceIndex"
+	UNREACHABLE("remoteDeviceIndex: %d", int(remoteDeviceIndex));  // Only one physical device is supported, and since the device indexes can't be equal, this should never be called.
 }
 
 VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
@@ -2493,12 +2480,12 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, baseGroupX = %u, baseGroupY = %u, baseGroupZ = %u, groupCountX = %u, groupCountY = %u, groupCountZ = %u)",
-			commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
+	      commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
 
 	vk::Cast(commandBuffer)->dispatchBase(baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
 {
 	TRACE("VkInstance instance = %p, uint32_t* pPhysicalDeviceGroupCount = %p, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties = %p",
 	      instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
@@ -2506,32 +2493,32 @@
 	return vk::Cast(instance)->getPhysicalDeviceGroups(pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements)
 {
 	TRACE("(VkDevice device = %p, const VkImageMemoryRequirementsInfo2* pInfo = %p, VkMemoryRequirements2* pMemoryRequirements = %p)",
 	      device, pInfo, pMemoryRequirements);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
 		extInfo = extInfo->pNext;
 	}
 
-	VkBaseOutStructure* extensionRequirements = reinterpret_cast<VkBaseOutStructure*>(pMemoryRequirements->pNext);
+	VkBaseOutStructure *extensionRequirements = reinterpret_cast<VkBaseOutStructure *>(pMemoryRequirements->pNext);
 	while(extensionRequirements)
 	{
 		switch(extensionRequirements->sType)
 		{
-		case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
-		{
-			auto requirements = reinterpret_cast<VkMemoryDedicatedRequirements*>(extensionRequirements);
-			vk::Cast(device)->getRequirements(requirements);
-		}
-		break;
-		default:
-			WARN("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
+			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+			{
+				auto requirements = reinterpret_cast<VkMemoryDedicatedRequirements *>(extensionRequirements);
+				vk::Cast(device)->getRequirements(requirements);
+			}
 			break;
+			default:
+				WARN("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
+				break;
 		}
 
 		extensionRequirements = extensionRequirements->pNext;
@@ -2540,32 +2527,32 @@
 	vkGetImageMemoryRequirements(device, pInfo->image, &(pMemoryRequirements->memoryRequirements));
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements)
 {
 	TRACE("(VkDevice device = %p, const VkBufferMemoryRequirementsInfo2* pInfo = %p, VkMemoryRequirements2* pMemoryRequirements = %p)",
 	      device, pInfo, pMemoryRequirements);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
 		extInfo = extInfo->pNext;
 	}
 
-	VkBaseOutStructure* extensionRequirements = reinterpret_cast<VkBaseOutStructure*>(pMemoryRequirements->pNext);
+	VkBaseOutStructure *extensionRequirements = reinterpret_cast<VkBaseOutStructure *>(pMemoryRequirements->pNext);
 	while(extensionRequirements)
 	{
 		switch(extensionRequirements->sType)
 		{
-		case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
 			{
-				auto requirements = reinterpret_cast<VkMemoryDedicatedRequirements*>(extensionRequirements);
+				auto requirements = reinterpret_cast<VkMemoryDedicatedRequirements *>(extensionRequirements);
 				vk::Cast(device)->getRequirements(requirements);
 			}
 			break;
-		default:
-			WARN("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
-			break;
+			default:
+				WARN("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
+				break;
 		}
 
 		extensionRequirements = extensionRequirements->pNext;
@@ -2574,19 +2561,19 @@
 	vkGetBufferMemoryRequirements(device, pInfo->buffer, &(pMemoryRequirements->memoryRequirements));
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2 *pInfo, uint32_t *pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
 {
 	TRACE("(VkDevice device = %p, const VkImageSparseMemoryRequirementsInfo2* pInfo = %p, uint32_t* pSparseMemoryRequirementCount = %p, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements = %p)",
 	      device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
 		extInfo = extInfo->pNext;
 	}
 
-	auto extensionRequirements = reinterpret_cast<VkBaseInStructure const*>(pSparseMemoryRequirements->pNext);
+	auto extensionRequirements = reinterpret_cast<VkBaseInStructure const *>(pSparseMemoryRequirements->pNext);
 	while(extensionRequirements)
 	{
 		WARN("pSparseMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
@@ -2598,88 +2585,88 @@
 	*pSparseMemoryRequirementCount = 0;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2 *pFeatures)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkPhysicalDeviceFeatures2* pFeatures = %p)", physicalDevice, pFeatures);
 
-	VkBaseOutStructure* extensionFeatures = reinterpret_cast<VkBaseOutStructure*>(pFeatures->pNext);
+	VkBaseOutStructure *extensionFeatures = reinterpret_cast<VkBaseOutStructure *>(pFeatures->pNext);
 	while(extensionFeatures)
 	{
 		switch((long)(extensionFeatures->sType))
 		{
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
 			{
-				auto features = reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
 			{
-				auto features = reinterpret_cast<VkPhysicalDevice16BitStorageFeatures*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDevice16BitStorageFeatures *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
 			{
-				auto features = reinterpret_cast<VkPhysicalDeviceVariablePointerFeatures*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDeviceVariablePointerFeatures *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
 			{
-				auto features = reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
 			{
-				auto features = reinterpret_cast<VkPhysicalDeviceMultiviewFeatures*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDeviceMultiviewFeatures *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
 			{
-				auto features = reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
 			{
-				auto features = reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
 			{
-				auto features = reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
 			{
-				auto features = reinterpret_cast<VkPhysicalDeviceProvokingVertexFeaturesEXT*>(extensionFeatures);
+				auto features = reinterpret_cast<VkPhysicalDeviceProvokingVertexFeaturesEXT *>(extensionFeatures);
 				vk::Cast(physicalDevice)->getFeatures(features);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
-			ASSERT(!HasExtensionProperty(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME, deviceExtensionProperties,
-										 sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT:
-			ASSERT(!HasExtensionProperty(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME, deviceExtensionProperties,
-										 sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR:
-			ASSERT(!HasExtensionProperty(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME, deviceExtensionProperties,
-										 sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
-			ASSERT(!HasExtensionProperty(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME, deviceExtensionProperties,
-										 sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-			break;
-		default:
-			WARN("pFeatures->pNext sType = %s", vk::Stringify(extensionFeatures->sType).c_str());
-			break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+				ASSERT(!HasExtensionProperty(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+				break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT:
+				ASSERT(!HasExtensionProperty(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+				break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR:
+				ASSERT(!HasExtensionProperty(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+				break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
+				ASSERT(!HasExtensionProperty(VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+				break;
+			default:
+				WARN("pFeatures->pNext sType = %s", vk::Stringify(extensionFeatures->sType).c_str());
+				break;
 		}
 
 		extensionFeatures = extensionFeatures->pNext;
@@ -2688,11 +2675,11 @@
 	vkGetPhysicalDeviceFeatures(physicalDevice, &(pFeatures->features));
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2 *pProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkPhysicalDeviceProperties2* pProperties = %p)", physicalDevice, pProperties);
 
-	VkBaseOutStructure* extensionProperties = reinterpret_cast<VkBaseOutStructure*>(pProperties->pNext);
+	VkBaseOutStructure *extensionProperties = reinterpret_cast<VkBaseOutStructure *>(pProperties->pNext);
 	while(extensionProperties)
 	{
 		// Casting to a long since some structures, such as
@@ -2701,81 +2688,81 @@
 		// are not enumerated in the official Vulkan header
 		switch((long)(extensionProperties->sType))
 		{
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceIDProperties*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceIDProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceMaintenance3Properties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceMultiviewProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDevicePointClippingProperties*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDevicePointClippingProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceProtectedMemoryProperties*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceProtectedMemoryProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceSubgroupProperties*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceSubgroupProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
-			// Explicitly ignored, since VK_EXT_sample_locations is not supported
-			ASSERT(!HasExtensionProperty(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME, deviceExtensionProperties,
-			                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
-			ASSERT(!HasExtensionProperty(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME, deviceExtensionProperties,
-			                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
+				// Explicitly ignored, since VK_EXT_sample_locations is not supported
+				ASSERT(!HasExtensionProperty(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+				break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
+				ASSERT(!HasExtensionProperty(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+				break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceDriverPropertiesKHR*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceDriverPropertiesKHR *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
 #ifdef __ANDROID__
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDevicePresentationPropertiesANDROID*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDevicePresentationPropertiesANDROID *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
 #endif
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
 			{
-				auto properties = reinterpret_cast<VkPhysicalDeviceProvokingVertexPropertiesEXT*>(extensionProperties);
+				auto properties = reinterpret_cast<VkPhysicalDeviceProvokingVertexPropertiesEXT *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-		default:
-			// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
-			WARN("pProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
-			break;
+			default:
+				// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
+				WARN("pProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
+				break;
 		}
 
 		extensionProperties = extensionProperties->pNext;
@@ -2784,12 +2771,12 @@
 	vkGetPhysicalDeviceProperties(physicalDevice, &(pProperties->properties));
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2 *pFormatProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkFormat format = %d, VkFormatProperties2* pFormatProperties = %p)",
-		    physicalDevice, format, pFormatProperties);
+	      physicalDevice, format, pFormatProperties);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pFormatProperties->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pFormatProperties->pNext);
 	while(extInfo)
 	{
 		WARN("pFormatProperties->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -2799,103 +2786,103 @@
 	vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &(pFormatProperties->formatProperties));
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo = %p, VkImageFormatProperties2* pImageFormatProperties = %p)",
-		    physicalDevice, pImageFormatInfo, pImageFormatProperties);
+	      physicalDevice, pImageFormatInfo, pImageFormatProperties);
 
-	const VkBaseInStructure* extensionFormatInfo = reinterpret_cast<const VkBaseInStructure*>(pImageFormatInfo->pNext);
+	const VkBaseInStructure *extensionFormatInfo = reinterpret_cast<const VkBaseInStructure *>(pImageFormatInfo->pNext);
 
-	const VkExternalMemoryHandleTypeFlagBits* handleType = nullptr;
+	const VkExternalMemoryHandleTypeFlagBits *handleType = nullptr;
 	while(extensionFormatInfo)
 	{
 		switch(extensionFormatInfo->sType)
 		{
-		case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
-		{
-			// Explicitly ignored, since VK_KHR_image_format_list is not supported
-			ASSERT(!HasExtensionProperty(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME, deviceExtensionProperties,
-			                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-		}
-		break;
-		case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT:
-		{
-			// Explicitly ignored, since VK_EXT_separate_stencil_usage is not supported
-			ASSERT(!HasExtensionProperty(VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME, deviceExtensionProperties,
-			                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-		}
-		break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
-		{
-			const VkPhysicalDeviceExternalImageFormatInfo* imageFormatInfo = reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo*>(extensionFormatInfo);
-			handleType = &(imageFormatInfo->handleType);
-		}
-		break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
-		{
-			// Explicitly ignored, since VK_EXT_image_drm_format_modifier is not supported
-			ASSERT(!HasExtensionProperty(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME, deviceExtensionProperties,
-			                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-		}
-		break;
-		default:
-			WARN("pImageFormatInfo->pNext sType = %s", vk::Stringify(extensionFormatInfo->sType).c_str());
+			case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+			{
+				// Explicitly ignored, since VK_KHR_image_format_list is not supported
+				ASSERT(!HasExtensionProperty(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+			}
 			break;
+			case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT:
+			{
+				// Explicitly ignored, since VK_EXT_separate_stencil_usage is not supported
+				ASSERT(!HasExtensionProperty(VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+			}
+			break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
+			{
+				const VkPhysicalDeviceExternalImageFormatInfo *imageFormatInfo = reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo *>(extensionFormatInfo);
+				handleType = &(imageFormatInfo->handleType);
+			}
+			break;
+			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+			{
+				// Explicitly ignored, since VK_EXT_image_drm_format_modifier is not supported
+				ASSERT(!HasExtensionProperty(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+			}
+			break;
+			default:
+				WARN("pImageFormatInfo->pNext sType = %s", vk::Stringify(extensionFormatInfo->sType).c_str());
+				break;
 		}
 
 		extensionFormatInfo = extensionFormatInfo->pNext;
 	}
 
-	VkBaseOutStructure* extensionProperties = reinterpret_cast<VkBaseOutStructure*>(pImageFormatProperties->pNext);
+	VkBaseOutStructure *extensionProperties = reinterpret_cast<VkBaseOutStructure *>(pImageFormatProperties->pNext);
 
 	while(extensionProperties)
 	{
 		switch(extensionProperties->sType)
 		{
-		case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
-		{
-			auto properties = reinterpret_cast<VkExternalImageFormatProperties*>(extensionProperties);
-			vk::Cast(physicalDevice)->getProperties(handleType, properties);
-		}
-		break;
-		case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
-		{
-			auto properties = reinterpret_cast<VkSamplerYcbcrConversionImageFormatProperties*>(extensionProperties);
-			vk::Cast(physicalDevice)->getProperties(properties);
-		}
-		break;
-		case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
-		{
-			// Explicitly ignored, since VK_AMD_texture_gather_bias_lod is not supported
-			ASSERT(!HasExtensionProperty(VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME, deviceExtensionProperties,
-			                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
-		}
-		break;
-		default:
-			WARN("pImageFormatProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
+			case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
+			{
+				auto properties = reinterpret_cast<VkExternalImageFormatProperties *>(extensionProperties);
+				vk::Cast(physicalDevice)->getProperties(handleType, properties);
+			}
 			break;
+			case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
+			{
+				auto properties = reinterpret_cast<VkSamplerYcbcrConversionImageFormatProperties *>(extensionProperties);
+				vk::Cast(physicalDevice)->getProperties(properties);
+			}
+			break;
+			case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
+			{
+				// Explicitly ignored, since VK_AMD_texture_gather_bias_lod is not supported
+				ASSERT(!HasExtensionProperty(VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME, deviceExtensionProperties,
+				                             sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0])));
+			}
+			break;
+			default:
+				WARN("pImageFormatProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
+				break;
 		}
 
 		extensionProperties = extensionProperties->pNext;
 	}
 
 	return vkGetPhysicalDeviceImageFormatProperties(physicalDevice,
-		                                            pImageFormatInfo->format,
-		                                            pImageFormatInfo->type,
-		                                            pImageFormatInfo->tiling,
-		                                            pImageFormatInfo->usage,
-		                                            pImageFormatInfo->flags,
-		                                            &(pImageFormatProperties->imageFormatProperties));
+	                                                pImageFormatInfo->format,
+	                                                pImageFormatInfo->type,
+	                                                pImageFormatInfo->tiling,
+	                                                pImageFormatInfo->usage,
+	                                                pImageFormatInfo->flags,
+	                                                &(pImageFormatProperties->imageFormatProperties));
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2 *pQueueFamilyProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t* pQueueFamilyPropertyCount = %p, VkQueueFamilyProperties2* pQueueFamilyProperties = %p)",
-		physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
+	      physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
 
 	if(pQueueFamilyProperties)
 	{
-		auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pQueueFamilyProperties->pNext);
+		auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pQueueFamilyProperties->pNext);
 		while(extInfo)
 		{
 			WARN("pQueueFamilyProperties->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -2913,11 +2900,11 @@
 	}
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkPhysicalDeviceMemoryProperties2* pMemoryProperties = %p)", physicalDevice, pMemoryProperties);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pMemoryProperties->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pMemoryProperties->pNext);
 	while(extInfo)
 	{
 		WARN("pMemoryProperties->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -2927,14 +2914,14 @@
 	vkGetPhysicalDeviceMemoryProperties(physicalDevice, &(pMemoryProperties->memoryProperties));
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo, uint32_t *pPropertyCount, VkSparseImageFormatProperties2 *pProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo = %p, uint32_t* pPropertyCount = %p, VkSparseImageFormatProperties2* pProperties = %p)",
-	     physicalDevice, pFormatInfo, pPropertyCount, pProperties);
+	      physicalDevice, pFormatInfo, pPropertyCount, pProperties);
 
 	if(pProperties)
 	{
-		auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pProperties->pNext);
+		auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pProperties->pNext);
 		while(extInfo)
 		{
 			WARN("pProperties->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -2949,17 +2936,17 @@
 VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags)
 {
 	TRACE("(VkDevice device = %p, VkCommandPool commandPool = %p, VkCommandPoolTrimFlags flags = %d)",
-	      device, static_cast<void*>(commandPool), flags);
+	      device, static_cast<void *>(commandPool), flags);
 
 	vk::Cast(commandPool)->trim(flags);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue)
+VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue)
 {
 	TRACE("(VkDevice device = %p, const VkDeviceQueueInfo2* pQueueInfo = %p, VkQueue* pQueue = %p)",
 	      device, pQueueInfo, pQueue);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pQueueInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pQueueInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pQueueInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -2981,12 +2968,12 @@
 	}
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion)
 {
 	TRACE("(VkDevice device = %p, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkSamplerYcbcrConversion* pYcbcrConversion = %p)",
-		    device, pCreateInfo, pAllocator, pYcbcrConversion);
+	      device, pCreateInfo, pAllocator, pYcbcrConversion);
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pCreateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pCreateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -2996,15 +2983,15 @@
 	return vk::SamplerYcbcrConversion::Create(pAllocator, pCreateInfo, pYcbcrConversion);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkSamplerYcbcrConversion ycbcrConversion = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(ycbcrConversion), pAllocator);
+	      device, static_cast<void *>(ycbcrConversion), pAllocator);
 
 	vk::destroy(ycbcrConversion, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
 {
 	TRACE("(VkDevice device = %p, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate = %p)",
 	      device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
@@ -3014,7 +3001,7 @@
 		UNIMPLEMENTED("pCreateInfo->flags || (pCreateInfo->templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET)");
 	}
 
-	auto extInfo = reinterpret_cast<VkBaseInStructure const*>(pCreateInfo->pNext);
+	auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pCreateInfo->pNext);
 	while(extInfo)
 	{
 		WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
@@ -3024,23 +3011,23 @@
 	return vk::DescriptorUpdateTemplate::Create(pAllocator, pCreateInfo, pDescriptorUpdateTemplate);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkDescriptorUpdateTemplate descriptorUpdateTemplate = %p, const VkAllocationCallbacks* pAllocator = %p)",
-	      device, static_cast<void*>(descriptorUpdateTemplate), pAllocator);
+	      device, static_cast<void *>(descriptorUpdateTemplate), pAllocator);
 
 	vk::destroy(descriptorUpdateTemplate, pAllocator);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData)
+VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData)
 {
 	TRACE("(VkDevice device = %p, VkDescriptorSet descriptorSet = %p, VkDescriptorUpdateTemplate descriptorUpdateTemplate = %p, const void* pData = %p)",
-	      device, static_cast<void*>(descriptorSet), static_cast<void*>(descriptorUpdateTemplate), pData);
+	      device, static_cast<void *>(descriptorSet), static_cast<void *>(descriptorUpdateTemplate), pData);
 
 	vk::Cast(descriptorUpdateTemplate)->updateDescriptorSet(vk::Cast(device), descriptorSet, pData);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo, VkExternalBufferProperties *pExternalBufferProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo = %p, VkExternalBufferProperties* pExternalBufferProperties = %p)",
 	      physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
@@ -3048,7 +3035,7 @@
 	vk::Cast(physicalDevice)->getProperties(pExternalBufferInfo, pExternalBufferProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo, VkExternalFenceProperties *pExternalFenceProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo = %p, VkExternalFenceProperties* pExternalFenceProperties = %p)",
 	      physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
@@ -3056,7 +3043,7 @@
 	vk::Cast(physicalDevice)->getProperties(pExternalFenceInfo, pExternalFenceProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
+VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo, VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo = %p, VkExternalSemaphoreProperties* pExternalSemaphoreProperties = %p)",
 	      physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
@@ -3064,10 +3051,10 @@
 	vk::Cast(physicalDevice)->getProperties(pExternalSemaphoreInfo, pExternalSemaphoreProperties);
 }
 
-VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport)
+VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, VkDescriptorSetLayoutSupport *pSupport)
 {
 	TRACE("(VkDevice device = %p, const VkDescriptorSetLayoutCreateInfo* pCreateInfo = %p, VkDescriptorSetLayoutSupport* pSupport = %p)",
-	        device, pCreateInfo, pSupport);
+	      device, pCreateInfo, pSupport);
 
 	vk::Cast(device)->getDescriptorSetLayoutSupport(pCreateInfo, pSupport);
 }
@@ -3075,72 +3062,72 @@
 VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern)
 {
 	TRACE("(VkCommandBuffer commandBuffer = %p, uint32_t lineStippleFactor = %u, uint16_t lineStipplePattern = %u",
-			commandBuffer, lineStippleFactor, lineStipplePattern);
+	      commandBuffer, lineStippleFactor, lineStipplePattern);
 
 	UNIMPLEMENTED("Line stipple not supported");
 }
 
 #ifdef VK_USE_PLATFORM_XCB_KHR
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface)
 {
 	TRACE("(VkInstance instance = %p, VkXcbSurfaceCreateInfoKHR* pCreateInfo = %p, VkAllocationCallbacks* pAllocator = %p, VkSurface* pSurface = %p)",
-			instance, pCreateInfo, pAllocator, pSurface);
+	      instance, pCreateInfo, pAllocator, pSurface);
 
 	return vk::XcbSurfaceKHR::Create(pAllocator, pCreateInfo, pSurface);
 }
 
-VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id)
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t *connection, xcb_visualid_t visual_id)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t queueFamilyIndex = %d, xcb_connection_t* connection = %p, xcb_visualid_t visual_id = %d)",
-		physicalDevice, int(queueFamilyIndex), connection, int(visual_id));
+	      physicalDevice, int(queueFamilyIndex), connection, int(visual_id));
 
 	return VK_TRUE;
 }
 #endif
 
 #ifdef VK_USE_PLATFORM_XLIB_KHR
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface)
 {
 	TRACE("(VkInstance instance = %p, VkXlibSurfaceCreateInfoKHR* pCreateInfo = %p, VkAllocationCallbacks* pAllocator = %p, VkSurface* pSurface = %p)",
-			instance, pCreateInfo, pAllocator, pSurface);
+	      instance, pCreateInfo, pAllocator, pSurface);
 
 	return vk::XlibSurfaceKHR::Create(pAllocator, pCreateInfo, pSurface);
 }
 
-VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID)
+VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display *dpy, VisualID visualID)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t queueFamilyIndex = %d, Display* dpy = %p, VisualID visualID = %lu)",
-		  physicalDevice, int(queueFamilyIndex), dpy, visualID);
+	      physicalDevice, int(queueFamilyIndex), dpy, visualID);
 
 	return VK_TRUE;
 }
 #endif
 
 #ifdef VK_USE_PLATFORM_MACOS_MVK
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface)
 {
-    TRACE("(VkInstance instance = %p, VkMacOSSurfaceCreateInfoMVK* pCreateInfo = %p, VkAllocationCallbacks* pAllocator = %p, VkSurface* pSurface = %p)",
-          instance, pCreateInfo, pAllocator, pSurface);
+	TRACE("(VkInstance instance = %p, VkMacOSSurfaceCreateInfoMVK* pCreateInfo = %p, VkAllocationCallbacks* pAllocator = %p, VkSurface* pSurface = %p)",
+	      instance, pCreateInfo, pAllocator, pSurface);
 
-    return vk::MacOSSurfaceMVK::Create(pAllocator, pCreateInfo, pSurface);
+	return vk::MacOSSurfaceMVK::Create(pAllocator, pCreateInfo, pSurface);
 }
 #endif
 
 #ifdef VK_USE_PLATFORM_METAL_EXT
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(VkInstance instance, const VkMetalSurfaceCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface)
 {
-    TRACE("(VkInstance instance = %p, VkMetalSurfaceCreateInfoEXT* pCreateInfo = %p, VkAllocationCallbacks* pAllocator = %p, VkSurface* pSurface = %p)",
-          instance, pCreateInfo, pAllocator, pSurface);
+	TRACE("(VkInstance instance = %p, VkMetalSurfaceCreateInfoEXT* pCreateInfo = %p, VkAllocationCallbacks* pAllocator = %p, VkSurface* pSurface = %p)",
+	      instance, pCreateInfo, pAllocator, pSurface);
 
-    return vk::MetalSurfaceEXT::Create(pAllocator, pCreateInfo, pSurface);
+	return vk::MetalSurfaceEXT::Create(pAllocator, pCreateInfo, pSurface);
 }
 #endif
 
 #ifdef VK_USE_PLATFORM_WIN32_KHR
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface)
 {
 	TRACE("(VkInstance instance = %p, VkWin32SurfaceCreateInfoKHR* pCreateInfo = %p, VkAllocationCallbacks* pAllocator = %p, VkSurface* pSurface = %p)",
-			instance, pCreateInfo, pAllocator, pSurface);
+	      instance, pCreateInfo, pAllocator, pSurface);
 
 	return vk::Win32SurfaceKHR::Create(pAllocator, pCreateInfo, pSurface);
 }
@@ -3148,42 +3135,42 @@
 VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t queueFamilyIndex = %d)",
-		physicalDevice, queueFamilyIndex);
+	      physicalDevice, queueFamilyIndex);
 	return VK_TRUE;
 }
 #endif
 
 #ifndef __ANDROID__
-VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator)
 {
-    TRACE("(VkInstance instance = %p, VkSurfaceKHR surface = %p, const VkAllocationCallbacks* pAllocator = %p)",
-            instance, static_cast<void*>(surface), pAllocator);
+	TRACE("(VkInstance instance = %p, VkSurfaceKHR surface = %p, const VkAllocationCallbacks* pAllocator = %p)",
+	      instance, static_cast<void *>(surface), pAllocator);
 
-    vk::destroy(surface, pAllocator);
+	vk::destroy(surface, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, uint32_t queueFamilyIndex = %d, VkSurface surface = %p, VKBool32* pSupported = %p)",
-			physicalDevice, int(queueFamilyIndex), static_cast<void*>(surface), pSupported);
+	      physicalDevice, int(queueFamilyIndex), static_cast<void *>(surface), pSupported);
 
-	*pSupported =  VK_TRUE;
+	*pSupported = VK_TRUE;
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkSurfaceKHR surface = %p, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities = %p)",
-			physicalDevice, static_cast<void*>(surface), pSurfaceCapabilities);
+	      physicalDevice, static_cast<void *>(surface), pSurfaceCapabilities);
 
 	vk::Cast(surface)->getSurfaceCapabilities(pSurfaceCapabilities);
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkSurfaceKHR surface = %p. uint32_t* pSurfaceFormatCount = %p, VkSurfaceFormatKHR* pSurfaceFormats = %p)",
-			physicalDevice, static_cast<void*>(surface), pSurfaceFormatCount, pSurfaceFormats);
+	      physicalDevice, static_cast<void *>(surface), pSurfaceFormatCount, pSurfaceFormats);
 
 	if(!pSurfaceFormats)
 	{
@@ -3194,10 +3181,10 @@
 	return vk::Cast(surface)->getSurfaceFormats(pSurfaceFormatCount, pSurfaceFormats);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkSurfaceKHR surface = %p uint32_t* pPresentModeCount = %p, VkPresentModeKHR* pPresentModes = %p)",
-			physicalDevice, static_cast<void*>(surface), pPresentModeCount, pPresentModes);
+	      physicalDevice, static_cast<void *>(surface), pPresentModeCount, pPresentModes);
 
 	if(!pPresentModes)
 	{
@@ -3208,10 +3195,10 @@
 	return vk::Cast(surface)->getPresentModes(pPresentModeCount, pPresentModes);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain)
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain)
 {
 	TRACE("(VkDevice device = %p, const VkSwapchainCreateInfoKHR* pCreateInfo = %p, const VkAllocationCallbacks* pAllocator = %p, VkSwapchainKHR* pSwapchain = %p)",
-			device, pCreateInfo, pAllocator, pSwapchain);
+	      device, pCreateInfo, pAllocator, pSwapchain);
 
 	if(pCreateInfo->oldSwapchain)
 	{
@@ -3244,18 +3231,18 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator)
+VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator)
 {
 	TRACE("(VkDevice device = %p, VkSwapchainKHR swapchain = %p, const VkAllocationCallbacks* pAllocator = %p)",
-			device, static_cast<void*>(swapchain), pAllocator);
+	      device, static_cast<void *>(swapchain), pAllocator);
 
 	vk::destroy(swapchain, pAllocator);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages)
 {
 	TRACE("(VkDevice device = %p, VkSwapchainKHR swapchain = %p, uint32_t* pSwapchainImageCount = %p, VkImage* pSwapchainImages = %p)",
-			device, static_cast<void*>(swapchain), pSwapchainImageCount, pSwapchainImages);
+	      device, static_cast<void *>(swapchain), pSwapchainImageCount, pSwapchainImages);
 
 	if(!pSwapchainImages)
 	{
@@ -3266,18 +3253,18 @@
 	return vk::Cast(swapchain)->getImages(pSwapchainImageCount, pSwapchainImages);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex)
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex)
 {
 	TRACE("(VkDevice device = %p, VkSwapchainKHR swapchain = %p, uint64_t timeout = %d, VkSemaphore semaphore = %p, VkFence fence = %p, uint32_t* pImageIndex = %p)",
-			device, static_cast<void*>(swapchain), int(timeout), static_cast<void*>(semaphore), static_cast<void*>(fence), pImageIndex);
+	      device, static_cast<void *>(swapchain), int(timeout), static_cast<void *>(semaphore), static_cast<void *>(fence), pImageIndex);
 
 	return vk::Cast(swapchain)->getNextImage(timeout, vk::Cast(semaphore), vk::Cast(fence), pImageIndex);
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo)
+VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo)
 {
 	TRACE("(VkQueue queue = %p, const VkPresentInfoKHR* pPresentInfo = %p)",
-			queue, pPresentInfo);
+	      queue, pPresentInfo);
 
 	return vk::Cast(queue)->present(pPresentInfo);
 }
@@ -3285,7 +3272,7 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex)
 {
 	TRACE("(VkDevice device = %p, const VkAcquireNextImageInfoKHR *pAcquireInfo = %p, uint32_t *pImageIndex = %p",
-			device, pAcquireInfo, pImageIndex);
+	      device, pAcquireInfo, pImageIndex);
 
 	return vk::Cast(pAcquireInfo->swapchain)->getNextImage(pAcquireInfo->timeout, vk::Cast(pAcquireInfo->semaphore), vk::Cast(pAcquireInfo->fence), pImageIndex);
 }
@@ -3293,7 +3280,7 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR *pDeviceGroupPresentCapabilities)
 {
 	TRACE("(VkDevice device = %p, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities = %p)",
-			device, pDeviceGroupPresentCapabilities);
+	      device, pDeviceGroupPresentCapabilities);
 
 	for(int i = 0; i < VK_MAX_DEVICE_GROUP_SIZE; i++)
 	{
@@ -3310,29 +3297,28 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR *pModes)
 {
 	TRACE("(VkDevice device = %p, VkSurfaceKHR surface = %p, VkDeviceGroupPresentModeFlagsKHR *pModes = %p)",
-			device, static_cast<void*>(surface), pModes);
+	      device, static_cast<void *>(surface), pModes);
 
 	*pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pRectCount, VkRect2D *pRects)
 {
 	TRACE("(VkPhysicalDevice physicalDevice = %p, VkSurfaceKHR surface = %p, uint32_t* pRectCount = %p, VkRect2D* pRects = %p)",
-			physicalDevice, static_cast<void*>(surface), pRectCount, pRects);
+	      physicalDevice, static_cast<void *>(surface), pRectCount, pRects);
 
 	return vk::Cast(surface)->getPresentRectangles(pRectCount, pRects);
 }
 
-
-#endif    // ! __ANDROID__
+#endif  // ! __ANDROID__
 
 #ifdef __ANDROID__
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainGrallocUsage2ANDROID(VkDevice device, VkFormat format, VkImageUsageFlags imageUsage, VkSwapchainImageUsageFlagsANDROID swapchainUsage, uint64_t* grallocConsumerUsage, uint64_t* grallocProducerUsage)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainGrallocUsage2ANDROID(VkDevice device, VkFormat format, VkImageUsageFlags imageUsage, VkSwapchainImageUsageFlagsANDROID swapchainUsage, uint64_t *grallocConsumerUsage, uint64_t *grallocProducerUsage)
 {
 	TRACE("(VkDevice device = %p, VkFormat format = %d, VkImageUsageFlags imageUsage = %d, VkSwapchainImageUsageFlagsANDROID swapchainUsage = %d, uint64_t* grallocConsumerUsage = %p, uin64_t* grallocProducerUsage = %p)",
-			device, format, imageUsage, swapchainUsage, grallocConsumerUsage, grallocProducerUsage);
+	      device, format, imageUsage, swapchainUsage, grallocConsumerUsage, grallocProducerUsage);
 
 	*grallocConsumerUsage = 0;
 	*grallocProducerUsage = GRALLOC1_PRODUCER_USAGE_CPU_WRITE_OFTEN;
@@ -3340,10 +3326,10 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainGrallocUsageANDROID(VkDevice device, VkFormat format, VkImageUsageFlags imageUsage, int* grallocUsage)
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainGrallocUsageANDROID(VkDevice device, VkFormat format, VkImageUsageFlags imageUsage, int *grallocUsage)
 {
 	TRACE("(VkDevice device = %p, VkFormat format = %d, VkImageUsageFlags imageUsage = %d, int* grallocUsage = %p)",
-			device, format, imageUsage, grallocUsage);
+	      device, format, imageUsage, grallocUsage);
 
 	*grallocUsage = GRALLOC_USAGE_SW_WRITE_OFTEN;
 
@@ -3353,7 +3339,7 @@
 VKAPI_ATTR VkResult VKAPI_CALL vkAcquireImageANDROID(VkDevice device, VkImage image, int nativeFenceFd, VkSemaphore semaphore, VkFence fence)
 {
 	TRACE("(VkDevice device = %p, VkImage image = %p, int nativeFenceFd = %d, VkSemaphore semaphore = %p, VkFence fence = %p)",
-			device, static_cast<void*>(image), nativeFenceFd, static_cast<void*>(semaphore), static_cast<void*>(fence));
+	      device, static_cast<void *>(image), nativeFenceFd, static_cast<void *>(semaphore), static_cast<void *>(fence));
 
 	if(nativeFenceFd >= 0)
 	{
@@ -3374,10 +3360,10 @@
 	return VK_SUCCESS;
 }
 
-VKAPI_ATTR VkResult VKAPI_CALL vkQueueSignalReleaseImageANDROID(VkQueue queue, uint32_t waitSemaphoreCount, const VkSemaphore* pWaitSemaphores, VkImage image, int* pNativeFenceFd)
+VKAPI_ATTR VkResult VKAPI_CALL vkQueueSignalReleaseImageANDROID(VkQueue queue, uint32_t waitSemaphoreCount, const VkSemaphore *pWaitSemaphores, VkImage image, int *pNativeFenceFd)
 {
 	TRACE("(VkQueue queue = %p, uint32_t waitSemaphoreCount = %d, const VkSemaphore* pWaitSemaphores = %p, VkImage image = %p, int* pNativeFenceFd = %p)",
-			queue, waitSemaphoreCount, pWaitSemaphores, static_cast<void*>(image), pNativeFenceFd);
+	      queue, waitSemaphoreCount, pWaitSemaphores, static_cast<void *>(image), pNativeFenceFd);
 
 	// This is a hack to deal with screen tearing for now.
 	// Need to correctly implement threading using VkSemaphore
@@ -3388,6 +3374,5 @@
 
 	return vk::Cast(image)->prepareForExternalUseANDROID();
 }
-#endif // __ANDROID__
-
+#endif  // __ANDROID__
 }
diff --git a/src/Vulkan/main.cpp b/src/Vulkan/main.cpp
index 9a75f05..7037d1a 100644
--- a/src/Vulkan/main.cpp
+++ b/src/Vulkan/main.cpp
@@ -15,32 +15,32 @@
 // main.cpp: DLL entry point.
 
 #if defined(_WIN32)
-#include "resource.h"
-#include <windows.h>
+#	include "resource.h"
+#	include <windows.h>
 
-#ifdef DEBUGGER_WAIT_DIALOG
+#	ifdef DEBUGGER_WAIT_DIALOG
 static INT_PTR CALLBACK DebuggerWaitDialogProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
 {
 	RECT rect;
 
 	switch(uMsg)
 	{
-	case WM_INITDIALOG:
-		GetWindowRect(GetDesktopWindow(), &rect);
-		SetWindowPos(hwnd, HWND_TOP, rect.right / 2, rect.bottom / 2, 0, 0, SWP_NOSIZE);
-		SetTimer(hwnd, 1, 100, NULL);
-		return TRUE;
-	case WM_COMMAND:
-		if(LOWORD(wParam) == IDCANCEL)
-		{
-			EndDialog(hwnd, 0);
-		}
-		break;
-	case WM_TIMER:
-		if(IsDebuggerPresent())
-		{
-			EndDialog(hwnd, 0);
-		}
+		case WM_INITDIALOG:
+			GetWindowRect(GetDesktopWindow(), &rect);
+			SetWindowPos(hwnd, HWND_TOP, rect.right / 2, rect.bottom / 2, 0, 0, SWP_NOSIZE);
+			SetTimer(hwnd, 1, 100, NULL);
+			return TRUE;
+		case WM_COMMAND:
+			if(LOWORD(wParam) == IDCANCEL)
+			{
+				EndDialog(hwnd, 0);
+			}
+			break;
+		case WM_TIMER:
+			if(IsDebuggerPresent())
+			{
+				EndDialog(hwnd, 0);
+			}
 	}
 
 	return FALSE;
@@ -51,18 +51,18 @@
 	if(!IsDebuggerPresent())
 	{
 		HRSRC dialog = FindResource(instance, MAKEINTRESOURCE(IDD_DIALOG1), RT_DIALOG);
-		DLGTEMPLATE *dialogTemplate = (DLGTEMPLATE*)LoadResource(instance, dialog);
+		DLGTEMPLATE *dialogTemplate = (DLGTEMPLATE *)LoadResource(instance, dialog);
 		DialogBoxIndirect(instance, dialogTemplate, NULL, DebuggerWaitDialogProc);
 	}
 }
-#endif
+#	endif
 
 extern "C" BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
 {
 	switch(reason)
 	{
-	case DLL_PROCESS_ATTACH:
-		#ifdef DEBUGGER_WAIT_DIALOG
+		case DLL_PROCESS_ATTACH:
+#	ifdef DEBUGGER_WAIT_DIALOG
 		{
 			char disable_debugger_wait_dialog[] = "0";
 			GetEnvironmentVariable("SWIFTSHADER_DISABLE_DEBUGGER_WAIT_DIALOG", disable_debugger_wait_dialog, sizeof(disable_debugger_wait_dialog));
@@ -72,13 +72,13 @@
 				WaitForDebugger(instance);
 			}
 		}
-		#endif
+#	endif
 		break;
-	case DLL_THREAD_ATTACH:
-	case DLL_THREAD_DETACH:
-	case DLL_PROCESS_DETACH:
-	default:
-		break;
+		case DLL_THREAD_ATTACH:
+		case DLL_THREAD_DETACH:
+		case DLL_PROCESS_DETACH:
+		default:
+			break;
 	}
 
 	return TRUE;