Remove spaces after control statements keywords

Spaces are useful to separate independent constructs, but can cause
confusion when added between dependent ones. For example "a*b [i]"
is hard for humans to read correctly at a glance. "a*b[i]" is better,
and "a * b[i]" is the easiest to understand immediately.

Control statements are no different. "if (a)if (b)x;" is hard to parse.
"if (a) if (b) x;" is better, but "if(a) if(b) x;" leaves no confusion
of what belongs where.

This recommendation also follows the 'zero one infinity' rule of thumb:
https://en.wikipedia.org/wiki/Zero_one_infinity_rule
Whether we write "a + b" or "a  +  b", they are equally readable, and
the additional spaces may help with alignment of surrounding
expressions. "for  (int i : c)" on the other hand makes the keyword
unintentionally even more dissociated from its header than
"for (int i : c)" already does.

The argument that the space helps set it apart from function calls seems
moot when practically every editor supports keyword highlighting,
function names are typically longer than 2-3 characters, and function
calls are not followed by curly brackets (which while optional for
singular statements, are still recommended for reasons other than this
one).

Bug: b/144825072
Change-Id: I3432fadae8e5604123f5c537097323504fecbc8c
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/39588
Tested-by: Nicolas Capens <nicolascapens@google.com>
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Reviewed-by: Ben Clayton <bclayton@google.com>
diff --git a/src/Device/Blitter.cpp b/src/Device/Blitter.cpp
index 1391188..3103616 100644
--- a/src/Device/Blitter.cpp
+++ b/src/Device/Blitter.cpp
@@ -48,14 +48,14 @@
 	}
 
 	float *pPixel = static_cast<float *>(pixel);
-	if (viewFormat.isUnsignedNormalized())
+	if(viewFormat.isUnsignedNormalized())
 	{
 		pPixel[0] = sw::clamp(pPixel[0], 0.0f, 1.0f);
 		pPixel[1] = sw::clamp(pPixel[1], 0.0f, 1.0f);
 		pPixel[2] = sw::clamp(pPixel[2], 0.0f, 1.0f);
 		pPixel[3] = sw::clamp(pPixel[3], 0.0f, 1.0f);
 	}
-	else if (viewFormat.isSignedNormalized())
+	else if(viewFormat.isSignedNormalized())
 	{
 		pPixel[0] = sw::clamp(pPixel[0], -1.0f, 1.0f);
 		pPixel[1] = sw::clamp(pPixel[1], -1.0f, 1.0f);
@@ -119,12 +119,12 @@
 			0, 0, // sWidth, sHeight
 		};
 
-		if (renderArea && dest->is3DSlice())
+		if(renderArea && dest->is3DSlice())
 		{
 			// Reinterpret layers as depth slices
 			subresLayers.baseArrayLayer = 0;
 			subresLayers.layerCount = 1;
-			for (uint32_t depth = subresourceRange.baseArrayLayer; depth <= lastLayer; depth++)
+			for(uint32_t depth = subresourceRange.baseArrayLayer; depth <= lastLayer; depth++)
 			{
 				data.dest = dest->getTexelPointer({0, 0, static_cast<int32_t>(depth)}, subresLayers);
 				blitRoutine(&data);
diff --git a/src/Device/Context.cpp b/src/Device/Context.cpp
index e41ce74..03987f4 100644
--- a/src/Device/Context.cpp
+++ b/src/Device/Context.cpp
@@ -242,9 +242,9 @@
 bool Context::allTargetsColorClamp() const
 {
 	// TODO: remove all of this and support VkPhysicalDeviceFeatures::independentBlend instead
-	for (int i = 0; i < RENDERTARGETS; i++)
+	for(int i = 0; i < RENDERTARGETS; i++)
 	{
-		if (renderTarget[i] && renderTarget[i]->getFormat().isFloatFormat())
+		if(renderTarget[i] && renderTarget[i]->getFormat().isFloatFormat())
 		{
 			return false;
 		}
@@ -371,7 +371,7 @@
 {
 	ASSERT((index >= 0) && (index < RENDERTARGETS));
 
-	switch (blendState[index].blendOperationAlpha)
+	switch(blendState[index].blendOperationAlpha)
 	{
 	case VK_BLEND_OP_ADD:
 	case VK_BLEND_OP_SUBTRACT:
@@ -392,7 +392,7 @@
 {
 	ASSERT((index >= 0) && (index < RENDERTARGETS));
 
-	switch (blendState[index].blendOperationAlpha)
+	switch(blendState[index].blendOperationAlpha)
 	{
 	case VK_BLEND_OP_ADD:
 	case VK_BLEND_OP_SUBTRACT:
@@ -413,12 +413,12 @@
 {
 	ASSERT((index >= 0) && (index < RENDERTARGETS));
 
-	switch (blendState[index].blendOperationAlpha)
+	switch(blendState[index].blendOperationAlpha)
 	{
 	case VK_BLEND_OP_ADD:
-		if (sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+		if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 			{
 				return VK_BLEND_OP_ZERO_EXT;
 			}
@@ -427,9 +427,9 @@
 				return VK_BLEND_OP_DST_EXT;
 			}
 		}
-		else if (sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
+		else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 			{
 				return VK_BLEND_OP_SRC_EXT;
 			}
@@ -440,7 +440,7 @@
 		}
 		else
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 			{
 				return VK_BLEND_OP_SRC_EXT;
 			}
@@ -450,13 +450,13 @@
 			}
 		}
 	case VK_BLEND_OP_SUBTRACT:
-		if (sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && allTargetsColorClamp())
+		if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && allTargetsColorClamp())
 		{
 			return VK_BLEND_OP_ZERO_EXT;   // Negative, clamped to zero
 		}
-		else if (sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
+		else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 			{
 				return VK_BLEND_OP_SRC_EXT;
 			}
@@ -467,7 +467,7 @@
 		}
 		else
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 			{
 				return VK_BLEND_OP_SRC_EXT;
 			}
@@ -477,9 +477,9 @@
 			}
 		}
 	case VK_BLEND_OP_REVERSE_SUBTRACT:
-		if (sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+		if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 			{
 				return VK_BLEND_OP_ZERO_EXT;
 			}
@@ -488,9 +488,9 @@
 				return VK_BLEND_OP_DST_EXT;
 			}
 		}
-		else if (sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
+		else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && allTargetsColorClamp())
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && allTargetsColorClamp())
 			{
 				return VK_BLEND_OP_ZERO_EXT;   // Negative, clamped to zero
 			}
@@ -501,7 +501,7 @@
 		}
 		else
 		{
-			if (destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && allTargetsColorClamp())
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && allTargetsColorClamp())
 			{
 				return VK_BLEND_OP_ZERO_EXT;   // Negative, clamped to zero
 			}
@@ -537,9 +537,9 @@
 
 bool Context::colorWriteActive() const
 {
-	for (int i = 0; i < RENDERTARGETS; i++)
+	for(int i = 0; i < RENDERTARGETS; i++)
 	{
-		if (colorWriteActive(i))
+		if(colorWriteActive(i))
 		{
 			return true;
 		}
diff --git a/src/Device/QuadRasterizer.cpp b/src/Device/QuadRasterizer.cpp
index 5b5c499..e589e15 100644
--- a/src/Device/QuadRasterizer.cpp
+++ b/src/Device/QuadRasterizer.cpp
@@ -143,28 +143,28 @@
 				Dw = *Pointer<Float4>(primitive + OFFSET(Primitive,w.C), 16) + yyyy * *Pointer<Float4>(primitive + OFFSET(Primitive,w.B), 16);
 			}
 
-			if (spirvShader)
+			if(spirvShader)
 			{
-				for (int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
+				for(int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
 				{
-					if (spirvShader->inputs[interpolant].Type == SpirvShader::ATTRIBTYPE_UNUSED)
+					if(spirvShader->inputs[interpolant].Type == SpirvShader::ATTRIBTYPE_UNUSED)
 						continue;
 
 					Dv[interpolant] = *Pointer<Float4>(primitive + OFFSET(Primitive, V[interpolant].C), 16);
-					if (!spirvShader->inputs[interpolant].Flat)
+					if(!spirvShader->inputs[interpolant].Flat)
 					{
 						Dv[interpolant] +=
 								yyyy * *Pointer<Float4>(primitive + OFFSET(Primitive, V[interpolant].B), 16);
 					}
 				}
 
-				for (unsigned int i = 0; i < state.numClipDistances; i++)
+				for(unsigned int i = 0; i < state.numClipDistances; i++)
 				{
 					DclipDistance[i] = *Pointer<Float4>(primitive + OFFSET(Primitive, clipDistance[i].C), 16) +
 								yyyy * *Pointer<Float4>(primitive + OFFSET(Primitive, clipDistance[i].B), 16);
 				}
 
-				for (unsigned int i = 0; i < state.numCullDistances; i++)
+				for(unsigned int i = 0; i < state.numCullDistances; i++)
 				{
 					DcullDistance[i] = *Pointer<Float4>(primitive + OFFSET(Primitive, cullDistance[i].C), 16) +
 								yyyy * *Pointer<Float4>(primitive + OFFSET(Primitive, cullDistance[i].B), 16);
@@ -190,7 +190,7 @@
 
 				for(unsigned int q = 0; q < state.multiSample; q++)
 				{
-					if (state.multiSampleMask & (1<<q))
+					if(state.multiSampleMask & (1<<q))
 					{
 						unsigned int i = state.multiSampledBresenham ? 0 : q;
 						Short4 mask = CmpGT(xxxx, xLeft[i]) & CmpGT(xRight[i], xxxx);
diff --git a/src/Device/Renderer.cpp b/src/Device/Renderer.cpp
index 383330f..80a2662 100644
--- a/src/Device/Renderer.cpp
+++ b/src/Device/Renderer.cpp
@@ -421,7 +421,7 @@
 		events = nullptr;
 	}
 
-	if (occlusionQuery != nullptr)
+	if(occlusionQuery != nullptr)
 	{
 		for(int cluster = 0; cluster < MaxClusterCount; cluster++)
 		{
@@ -450,14 +450,14 @@
 		ticket.done();
 	});
 
-	for (unsigned int batchId = 0; batchId < numBatches; batchId++)
+	for(unsigned int batchId = 0; batchId < numBatches; batchId++)
 	{
 		auto batch = draw->batchDataPool->borrow();
 		batch->id = batchId;
 		batch->firstPrimitive = batch->id * numPrimitivesPerBatch;
 		batch->numPrimitives = std::min(batch->firstPrimitive + numPrimitivesPerBatch, numPrimitives) - batch->firstPrimitive;
 
-		for (int cluster = 0; cluster < MaxClusterCount; cluster++)
+		for(int cluster = 0; cluster < MaxClusterCount; cluster++)
 		{
 			batch->clusterTickets[cluster] = std::move(clusterQueues[cluster].take());
 		}
@@ -466,18 +466,18 @@
 
 			processVertices(draw.get(), batch.get());
 
-			if (!draw->setupState.rasterizerDiscard)
+			if(!draw->setupState.rasterizerDiscard)
 			{
 				processPrimitives(draw.get(), batch.get());
 
-				if (batch->numVisible > 0)
+				if(batch->numVisible > 0)
 				{
 					processPixels(draw, batch, finally);
 					return;
 				}
 			}
 
-			for (int cluster = 0; cluster < MaxClusterCount; cluster++)
+			for(int cluster = 0; cluster < MaxClusterCount; cluster++)
 			{
 				batch->clusterTickets[cluster].done();
 			}
@@ -506,7 +506,7 @@
 	vertexTask.primitiveStart = batch->firstPrimitive;
 	// We're only using batch compaction for points, not lines
 	vertexTask.vertexCount = batch->numPrimitives * ((draw->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST) ? 1 : 3);
-	if (vertexTask.vertexCache.drawCall != draw->id)
+	if(vertexTask.vertexCache.drawCall != draw->id)
 	{
 		vertexTask.vertexCache.clear();
 		vertexTask.vertexCache.drawCall = draw->id;
@@ -534,7 +534,7 @@
 		std::shared_ptr<marl::Finally> finally;
 	};
 	auto data = std::make_shared<Data>(draw, batch, finally);
-	for (int cluster = 0; cluster < MaxClusterCount; cluster++)
+	for(int cluster = 0; cluster < MaxClusterCount; cluster++)
 	{
 		batch->clusterTickets[cluster].onCall([data, cluster]
 		{
@@ -601,7 +601,7 @@
 	}
 
 	// setBatchIndices() takes care of the point case, since it's different due to the compaction
-	if (topology != VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
+	if(topology != VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
 	{
 		// Repeat the last index to allow for SIMD width overrun.
 		triangleIndicesOut[triangleCount][0] = triangleIndicesOut[triangleCount - 1][2];
@@ -1178,7 +1178,7 @@
 	for(uint32_t i = 0; i < vk::MAX_VERTEX_INPUT_BINDINGS; i++)
 	{
 		auto &attrib = inputs[i];
-		if (attrib.count && attrib.instanceStride && (attrib.instanceStride < attrib.robustnessSize))
+		if(attrib.count && attrib.instanceStride && (attrib.instanceStride < attrib.robustnessSize))
 		{
 			// Under the casts: attrib.buffer += attrib.instanceStride
 			attrib.buffer = (void const *)((uintptr_t)attrib.buffer + attrib.instanceStride);
diff --git a/src/Device/SetupProcessor.cpp b/src/Device/SetupProcessor.cpp
index df55e1a..e5057b9 100644
--- a/src/Device/SetupProcessor.cpp
+++ b/src/Device/SetupProcessor.cpp
@@ -84,9 +84,9 @@
 	state.numClipDistances = context->vertexShader->getNumOutputClipDistances();
 	state.numCullDistances = context->vertexShader->getNumOutputCullDistances();
 
-	if (context->pixelShader)
+	if(context->pixelShader)
 	{
-		for (int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
+		for(int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
 		{
 			state.gradient[interpolant] = context->pixelShader->inputs[interpolant];
 		}
diff --git a/src/Pipeline/ComputeProgram.cpp b/src/Pipeline/ComputeProgram.cpp
index dd45d17..1d6d0a3 100644
--- a/src/Pipeline/ComputeProgram.cpp
+++ b/src/Pipeline/ComputeProgram.cpp
@@ -58,7 +58,7 @@
 	routine->setInputBuiltin(shader, spv::BuiltInNumWorkgroups, [&](const SpirvShader::BuiltinMapping& builtin, Array<SIMD::Float>& value)
 	{
 		auto numWorkgroups = *Pointer<Int4>(data + OFFSET(Data, numWorkgroups));
-		for (uint32_t component = 0; component < builtin.SizeInComponents; component++)
+		for(uint32_t component = 0; component < builtin.SizeInComponents; component++)
 		{
 			value[builtin.FirstComponent + component] =
 				As<SIMD::Float>(SIMD::Int(Extract(numWorkgroups, component)));
@@ -67,7 +67,7 @@
 
 	routine->setInputBuiltin(shader, spv::BuiltInWorkgroupId, [&](const SpirvShader::BuiltinMapping& builtin, Array<SIMD::Float>& value)
 	{
-		for (uint32_t component = 0; component < builtin.SizeInComponents; component++)
+		for(uint32_t component = 0; component < builtin.SizeInComponents; component++)
 		{
 			value[builtin.FirstComponent + component] =
 				As<SIMD::Float>(SIMD::Int(workgroupID[component]));
@@ -77,7 +77,7 @@
 	routine->setInputBuiltin(shader, spv::BuiltInWorkgroupSize, [&](const SpirvShader::BuiltinMapping& builtin, Array<SIMD::Float>& value)
 	{
 		auto workgroupSize = *Pointer<Int4>(data + OFFSET(Data, workgroupSize));
-		for (uint32_t component = 0; component < builtin.SizeInComponents; component++)
+		for(uint32_t component = 0; component < builtin.SizeInComponents; component++)
 		{
 			value[builtin.FirstComponent + component] =
 				As<SIMD::Float>(SIMD::Int(Extract(workgroupSize, component)));
@@ -134,7 +134,7 @@
 
 	routine->setInputBuiltin(shader, spv::BuiltInLocalInvocationId, [&](const SpirvShader::BuiltinMapping& builtin, Array<SIMD::Float>& value)
 	{
-		for (uint32_t component = 0; component < builtin.SizeInComponents; component++)
+		for(uint32_t component = 0; component < builtin.SizeInComponents; component++)
 		{
 			value[builtin.FirstComponent + component] =
 				As<SIMD::Float>(localInvocationID[component]);
@@ -148,7 +148,7 @@
 		wgID = Insert(wgID, workgroupID[Y], Y);
 		wgID = Insert(wgID, workgroupID[Z], Z);
 		auto localBase = workgroupSize * wgID;
-		for (uint32_t component = 0; component < builtin.SizeInComponents; component++)
+		for(uint32_t component = 0; component < builtin.SizeInComponents; component++)
 		{
 			auto globalInvocationID = SIMD::Int(Extract(localBase, component)) + localInvocationID[component];
 			value[builtin.FirstComponent + component] = As<SIMD::Float>(globalInvocationID);
@@ -228,7 +228,7 @@
 
 	auto groupCount = groupCountX * groupCountY * groupCountZ;
 
-	for (uint32_t batchID = 0; batchID < batchCount && batchID < groupCount; batchID++)
+	for(uint32_t batchID = 0; batchID < batchCount && batchID < groupCount; batchID++)
 	{
 		wg.add(1);
 		marl::schedule([=, &data]
@@ -236,7 +236,7 @@
 			defer(wg.done());
 			std::vector<uint8_t> workgroupMemory(shader->workgroupMemory.size());
 
-			for (uint32_t groupIndex = batchID; groupIndex < groupCount; groupIndex += batchCount)
+			for(uint32_t groupIndex = batchID; groupIndex < groupCount; groupIndex += batchCount)
 			{
 				auto modulo = groupIndex;
 				auto groupOffsetZ = modulo / (groupCountX * groupCountY);
@@ -253,7 +253,7 @@
 				using Coroutine = std::unique_ptr<rr::Stream<SpirvShader::YieldResult>>;
 				std::queue<Coroutine> coroutines;
 
-				if (modes.ContainsControlBarriers)
+				if(modes.ContainsControlBarriers)
 				{
 					// Make a function call per subgroup so each subgroup
 					// can yield, bringing all subgroups to the barrier
@@ -270,13 +270,13 @@
 					coroutines.push(std::move(coroutine));
 				}
 
-				while (coroutines.size() > 0)
+				while(coroutines.size() > 0)
 				{
 					auto coroutine = std::move(coroutines.front());
 					coroutines.pop();
 
 					SpirvShader::YieldResult result;
-					if (coroutine->await(result))
+					if(coroutine->await(result))
 					{
 						// TODO: Consider result (when the enum is more than 1 entry).
 						coroutines.push(std::move(coroutine));
diff --git a/src/Pipeline/Constants.cpp b/src/Pipeline/Constants.cpp
index c38507d..6eaf92d 100644
--- a/src/Pipeline/Constants.cpp
+++ b/src/Pipeline/Constants.cpp
@@ -246,7 +246,7 @@
 		mask565Q[i] = word4((i & 0x1 ? 0x001F : 0) | (i & 0x2 ? 0x07E0 : 0) | (i & 0x4 ? 0xF800 : 0));
 	}
 
-	for (int i = 0; i < 16; i++)
+	for(int i = 0; i < 16; i++)
 	{
 		mask5551Q[i] = word4((i & 0x1 ? 0x001F : 0) | (i & 0x2 ? 0x03E0 : 0) | (i & 0x4 ? 0x7C00 : 0) | (i & 8 ? 0x8000 : 0));
 	}
@@ -264,7 +264,7 @@
 		maskD01X[i][3] =  -(i >> 1 & 1);
 	}
 
-	for (int i = 0; i < 16; i++)
+	for(int i = 0; i < 16; i++)
 	{
 		mask10Q[i][0] = mask10Q[i][1] =
 				(i & 0x1 ? 0x3FF : 0) |
diff --git a/src/Pipeline/PixelProgram.cpp b/src/Pipeline/PixelProgram.cpp
index 81c137e..332f82e 100644
--- a/src/Pipeline/PixelProgram.cpp
+++ b/src/Pipeline/PixelProgram.cpp
@@ -109,7 +109,7 @@
 	routine.constants = *Pointer<Pointer<Byte>>(data + OFFSET(DrawData, constants));
 
 	auto it = spirvShader->inputBuiltins.find(spv::BuiltInFrontFacing);
-	if (it != spirvShader->inputBuiltins.end())
+	if(it != spirvShader->inputBuiltins.end())
 	{
 		ASSERT(it->second.SizeInComponents == 1);
 		auto frontFacing = Int4(*Pointer<Int>(primitive + OFFSET(Primitive, clockwiseMask)));
@@ -117,13 +117,13 @@
 	}
 
 	it = spirvShader->inputBuiltins.find(spv::BuiltInSampleMask);
-	if (it != spirvShader->inputBuiltins.end())
+	if(it != spirvShader->inputBuiltins.end())
 	{
 		static_assert(SIMD::Width == 4, "Expects SIMD width to be 4");
 		Int4 laneBits = Int4(1, 2, 4, 8);
 
 		Int4 inputSampleMask = Int4(1) & CmpNEQ(Int4(cMask[0]) & laneBits, Int4(0));
-		for (auto i = 1u; i < state.multiSample; i++)
+		for(auto i = 1u; i < state.multiSample; i++)
 		{
 			inputSampleMask |= Int4(1 << i) & CmpNEQ(Int4(cMask[i]) & laneBits, Int4(0));
 		}
@@ -131,7 +131,7 @@
 		routine.getVariable(it->second.Id)[it->second.FirstComponent] = As<Float4>(inputSampleMask);
 		// Sample mask input is an array, as the spec contemplates MSAA levels higher than 32.
 		// Fill any non-zero indices with 0.
-		for (auto i = 1u; i < it->second.SizeInComponents; i++)
+		for(auto i = 1u; i < it->second.SizeInComponents; i++)
 			routine.getVariable(it->second.Id)[it->second.FirstComponent + i] = Float4(0);
 	}
 
@@ -156,25 +156,25 @@
 
 	if(spirvShader->getModes().ContainsKill)
 	{
-		for (auto i = 0u; i < state.multiSample; i++)
+		for(auto i = 0u; i < state.multiSample; i++)
 		{
 			cMask[i] &= ~routine.killMask;
 		}
 	}
 
 	it = spirvShader->outputBuiltins.find(spv::BuiltInSampleMask);
-	if (it != spirvShader->outputBuiltins.end())
+	if(it != spirvShader->outputBuiltins.end())
 	{
 		auto outputSampleMask = As<SIMD::Int>(routine.getVariable(it->second.Id)[it->second.FirstComponent]);
 
-		for (auto i = 0u; i < state.multiSample; i++)
+		for(auto i = 0u; i < state.multiSample; i++)
 		{
 			cMask[i] &= SignMask(CmpNEQ(outputSampleMask & SIMD::Int(1<<i), SIMD::Int(0)));
 		}
 	}
 
 	it = spirvShader->outputBuiltins.find(spv::BuiltInFragDepth);
-	if (it != spirvShader->outputBuiltins.end())
+	if(it != spirvShader->outputBuiltins.end())
 	{
 		oDepth = Min(Max(routine.getVariable(it->second.Id)[it->second.FirstComponent], Float4(0.0f)), Float4(1.0f));
 	}
diff --git a/src/Pipeline/PixelRoutine.cpp b/src/Pipeline/PixelRoutine.cpp
index db57ae4..6b06146 100644
--- a/src/Pipeline/PixelRoutine.cpp
+++ b/src/Pipeline/PixelRoutine.cpp
@@ -33,7 +33,7 @@
 	  routine(pipelineLayout),
 	  descriptorSets(descriptorSets)
 {
-	if (spirvShader)
+	if(spirvShader)
 	{
 		spirvShader->emitProlog(&routine);
 
@@ -137,14 +137,14 @@
 			}
 		}
 
-		if (spirvShader)
+		if(spirvShader)
 		{
-			for (int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
+			for(int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
 			{
 				auto const &input = spirvShader->inputs[interpolant];
-				if (input.Type != SpirvShader::ATTRIBTYPE_UNUSED)
+				if(input.Type != SpirvShader::ATTRIBTYPE_UNUSED)
 				{
-					if (input.Centroid && state.multiSample > 1)
+					if(input.Centroid && state.multiSample > 1)
 					{
 						routine.inputs[interpolant] =
 								interpolateCentroid(XXXX, YYYY, rhwCentroid,
@@ -163,14 +163,14 @@
 
 			setBuiltins(x, y, z, w, cMask);
 
-			for (uint32_t i = 0; i < state.numClipDistances; i++)
+			for(uint32_t i = 0; i < state.numClipDistances; i++)
 			{
 				auto distance = interpolate(xxxx, DclipDistance[i], rhw,
 											primitive + OFFSET(Primitive, clipDistance[i]),
 											false, true, false);
 
 				auto clipMask = SignMask(CmpGE(distance, SIMD::Float(0)));
-				for (auto ms = 0u; ms < state.multiSample; ms++)
+				for(auto ms = 0u; ms < state.multiSample; ms++)
 				{
 					// TODO: Fragments discarded by clipping do not exist at
 					// all -- they should not be counted in queries or have
@@ -179,12 +179,12 @@
 					cMask[ms] &= clipMask;
 				}
 
-				if (spirvShader->getUsedCapabilities().ClipDistance)
+				if(spirvShader->getUsedCapabilities().ClipDistance)
 				{
 					auto it = spirvShader->inputBuiltins.find(spv::BuiltInClipDistance);
 					if(it != spirvShader->inputBuiltins.end())
 					{
-						if (i < it->second.SizeInComponents)
+						if(i < it->second.SizeInComponents)
 						{
 							routine.getVariable(it->second.Id)[it->second.FirstComponent + i] = distance;
 						}
@@ -192,14 +192,14 @@
 				}
 			}
 
-			if (spirvShader->getUsedCapabilities().CullDistance)
+			if(spirvShader->getUsedCapabilities().CullDistance)
 			{
 				auto it = spirvShader->inputBuiltins.find(spv::BuiltInCullDistance);
 				if(it != spirvShader->inputBuiltins.end())
 				{
-					for (uint32_t i = 0; i < state.numCullDistances; i++)
+					for(uint32_t i = 0; i < state.numCullDistances; i++)
 					{
-						if (i < it->second.SizeInComponents)
+						if(i < it->second.SizeInComponents)
 						{
 							routine.getVariable(it->second.Id)[it->second.FirstComponent + i] =
 									interpolate(xxxx, DcullDistance[i], rhw,
@@ -213,7 +213,7 @@
 
 		Bool alphaPass = true;
 
-		if (spirvShader)
+		if(spirvShader)
 		{
 			bool earlyFragTests = (spirvShader && spirvShader->getModes().EarlyFragmentTests);
 			applyShader(cMask, earlyFragTests ? sMask : cMask, earlyDepthTest ? zMask : cMask);
@@ -544,7 +544,7 @@
 		return true;
 	}
 
-	if (state.depthFormat == VK_FORMAT_D16_UNORM)
+	if(state.depthFormat == VK_FORMAT_D16_UNORM)
 		return depthTest16(zBuffer, q, x, z, sMask, zMask, cMask);
 	else
 		return depthTest32F(zBuffer, q, x, z, sMask, zMask, cMask);
@@ -647,7 +647,7 @@
 		return;
 	}
 
-	if (state.depthFormat == VK_FORMAT_D16_UNORM)
+	if(state.depthFormat == VK_FORMAT_D16_UNORM)
 		writeDepth16(zBuffer, q, x, z, zMask);
 	else
 		writeDepth32F(zBuffer, q, x, z, zMask);
@@ -1639,7 +1639,7 @@
 
 			Int2 value = *Pointer<Int2>(buffer, 16);
 			Int2 mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
-			if (rgbaWriteMask != 0xF)
+			if(rgbaWriteMask != 0xF)
 			{
 				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
 			}
@@ -1649,7 +1649,7 @@
 
 			value = *Pointer<Int2>(buffer, 16);
 			mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
-			if (rgbaWriteMask != 0xF)
+			if(rgbaWriteMask != 0xF)
 			{
 				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
 			}
@@ -2564,7 +2564,7 @@
 		}
 		break;
 	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		if ((rgbaWriteMask & 0x0000000F) != 0x0)
+		if((rgbaWriteMask & 0x0000000F) != 0x0)
 		{
 			Int2 mergedMask, packedCol, value;
 			Int4 packed = ((As<Int4>(oC.w) & Int4(0x3)) << 30) |
@@ -2575,7 +2575,7 @@
 			buffer += 4 * x;
 			value = *Pointer<Int2>(buffer, 16);
 			mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
-			if (rgbaWriteMask != 0xF)
+			if(rgbaWriteMask != 0xF)
 			{
 				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
 			}
@@ -2585,7 +2585,7 @@
 
 			value = *Pointer<Int2>(buffer, 16);
 			mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
-			if (rgbaWriteMask != 0xF)
+			if(rgbaWriteMask != 0xF)
 			{
 				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
 			}
diff --git a/src/Pipeline/SamplerCore.cpp b/src/Pipeline/SamplerCore.cpp
index 86228a5..b2c2268 100644
--- a/src/Pipeline/SamplerCore.cpp
+++ b/src/Pipeline/SamplerCore.cpp
@@ -33,7 +33,7 @@
 	case VK_COMPONENT_SWIZZLE_A:    f = c.w; break;
 	case VK_COMPONENT_SWIZZLE_ZERO: f = sw::Float4(0.0f, 0.0f, 0.0f, 0.0f); break;
 	case VK_COMPONENT_SWIZZLE_ONE:
-		if (integer)
+		if(integer)
 		{
 			f = rr::As<sw::Float4>(sw::Int4(1, 1, 1, 1));
 		}
@@ -155,9 +155,9 @@
 	{
 		c = sampleFloatFilter(texture, uuuu, vvvv, wwww, qqqq, offset, cubeArrayCoord, sampleId, lod, anisotropy, uDelta, vDelta, function);
 
-		if (!hasFloatTexture() && !hasUnnormalizedIntegerTexture() && !state.compareEnable)
+		if(!hasFloatTexture() && !hasUnnormalizedIntegerTexture() && !state.compareEnable)
 		{
-			switch (state.textureFormat)
+			switch(state.textureFormat)
 			{
 			case VK_FORMAT_R5G6B5_UNORM_PACK16:
 				c.x *= Float4(1.0f / 0xF800);
@@ -200,7 +200,7 @@
 				c.w *= Float4(1.0f / 0xFF00u);
 				break;
 			default:
-				for (int component = 0; component < textureComponentCount(); component++)
+				for(int component = 0; component < textureComponentCount(); component++)
 				{
 					c[component] *= Float4(hasUnsignedTextureComponent(component) ? 1.0f / 0xFFFF : 1.0f / 0x7FFF);
 				}
@@ -211,7 +211,7 @@
 	{
 		Vector4s cs = sampleFilter(texture, uuuu, vvvv, wwww, offset, cubeArrayCoord, sampleId, lod, anisotropy, uDelta, vDelta, function);
 
-		switch (state.textureFormat)
+		switch(state.textureFormat)
 		{
 		case VK_FORMAT_R5G6B5_UNORM_PACK16:
 			c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF800);
@@ -1646,7 +1646,7 @@
 	}
 	else ASSERT(false);
 
-	if (state.textureFormat.isSRGBformat())
+	if(state.textureFormat.isSRGBformat())
 	{
 		for(int i = 0; i < textureComponentCount(); i++)
 		{
@@ -1830,7 +1830,7 @@
 
 	if(hasFloatTexture() || has32bitIntegerTextureComponents())
 	{
-		switch (state.textureFormat)
+		switch(state.textureFormat)
 		{
 		case VK_FORMAT_R16_SFLOAT:
 			t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 2));
diff --git a/src/Pipeline/SetupRoutine.cpp b/src/Pipeline/SetupRoutine.cpp
index 9288565..d3307df 100644
--- a/src/Pipeline/SetupRoutine.cpp
+++ b/src/Pipeline/SetupRoutine.cpp
@@ -444,9 +444,9 @@
 			*Pointer<Float4>(primitive + OFFSET(Primitive,z.C), 16) = C;
 		}
 
-		for (int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
+		for(int interpolant = 0; interpolant < MAX_INTERFACE_COMPONENTS; interpolant++)
 		{
-			if (state.gradient[interpolant].Type != SpirvShader::ATTRIBTYPE_UNUSED)
+			if(state.gradient[interpolant].Type != SpirvShader::ATTRIBTYPE_UNUSED)
 			{
 				setupGradient(primitive, tri, w012, M, v0, v1, v2,
 						OFFSET(Vertex, v[interpolant]),
@@ -456,7 +456,7 @@
 			}
 		}
 
-		for (unsigned int i = 0; i < state.numClipDistances; i++)
+		for(unsigned int i = 0; i < state.numClipDistances; i++)
 		{
 			setupGradient(primitive, tri, w012, M, v0, v1, v2,
 					OFFSET(Vertex, clipDistance[i]),
@@ -464,7 +464,7 @@
 					false, true);
 		}
 
-		for (unsigned int i = 0; i < state.numCullDistances; i++)
+		for(unsigned int i = 0; i < state.numCullDistances; i++)
 		{
 			setupGradient(primitive, tri, w012, M, v0, v1, v2,
 					OFFSET(Vertex, cullDistance[i]),
diff --git a/src/Pipeline/ShaderCore.cpp b/src/Pipeline/ShaderCore.cpp
index 10502fd..65b2084 100644
--- a/src/Pipeline/ShaderCore.cpp
+++ b/src/Pipeline/ShaderCore.cpp
@@ -873,14 +873,14 @@
 
 Pointer& Pointer::operator += (int i)
 {
-	for (int el = 0; el < SIMD::Width; el++) { staticOffsets[el] += i; }
+	for(int el = 0; el < SIMD::Width; el++) { staticOffsets[el] += i; }
 	return *this;
 }
 
 Pointer& Pointer::operator *= (int i)
 {
-	for (int el = 0; el < SIMD::Width; el++) { staticOffsets[el] *= i; }
-	if (hasDynamicOffsets)
+	for(int el = 0; el < SIMD::Width; el++) { staticOffsets[el] *= i; }
+	if(hasDynamicOffsets)
 	{
 		dynamicOffsets *= SIMD::Int(i);
 	}
@@ -900,12 +900,12 @@
 {
 	ASSERT(accessSize > 0);
 
-	if (isStaticallyInBounds(accessSize, robustness))
+	if(isStaticallyInBounds(accessSize, robustness))
 	{
 		return SIMD::Int(0xffffffff);
 	}
 
-	if (!hasDynamicOffsets && !hasDynamicLimit)
+	if(!hasDynamicOffsets && !hasDynamicLimit)
 	{
 		// Common fast paths.
 		static_assert(SIMD::Width == 4, "Expects SIMD::Width to be 4");
@@ -921,14 +921,14 @@
 
 bool Pointer::isStaticallyInBounds(unsigned int accessSize, OutOfBoundsBehavior robustness) const
 {
-	if (hasDynamicOffsets)
+	if(hasDynamicOffsets)
 	{
 		return false;
 	}
 
-	if (hasDynamicLimit)
+	if(hasDynamicLimit)
 	{
-		if (hasStaticEqualOffsets() || hasStaticSequentialOffsets(accessSize))
+		if(hasStaticEqualOffsets() || hasStaticSequentialOffsets(accessSize))
 		{
 			switch(robustness)
 			{
@@ -944,9 +944,9 @@
 		}
 	}
 
-	for (int i = 0; i < SIMD::Width; i++)
+	for(int i = 0; i < SIMD::Width; i++)
 	{
-		if (staticOffsets[i] + accessSize - 1 >= staticLimit)
+		if(staticOffsets[i] + accessSize - 1 >= staticLimit)
 		{
 			return false;
 		}
@@ -964,7 +964,7 @@
 // (N+0*step, N+1*step, N+2*step, N+3*step)
 rr::Bool Pointer::hasSequentialOffsets(unsigned int step) const
 {
-	if (hasDynamicOffsets)
+	if(hasDynamicOffsets)
 	{
 		auto o = offsets();
 		static_assert(SIMD::Width == 4, "Expects SIMD::Width to be 4");
@@ -977,13 +977,13 @@
 // sequential (N+0*step, N+1*step, N+2*step, N+3*step)
 bool Pointer::hasStaticSequentialOffsets(unsigned int step) const
 {
-	if (hasDynamicOffsets)
+	if(hasDynamicOffsets)
 	{
 		return false;
 	}
-	for (int i = 1; i < SIMD::Width; i++)
+	for(int i = 1; i < SIMD::Width; i++)
 	{
-		if (staticOffsets[i-1] + int32_t(step) != staticOffsets[i]) { return false; }
+		if(staticOffsets[i-1] + int32_t(step) != staticOffsets[i]) { return false; }
 	}
 	return true;
 }
@@ -991,7 +991,7 @@
 // Returns true if all offsets are equal (N, N, N, N)
 rr::Bool Pointer::hasEqualOffsets() const
 {
-	if (hasDynamicOffsets)
+	if(hasDynamicOffsets)
 	{
 		auto o = offsets();
 		static_assert(SIMD::Width == 4, "Expects SIMD::Width to be 4");
@@ -1004,13 +1004,13 @@
 // (N, N, N, N)
 bool Pointer::hasStaticEqualOffsets() const
 {
-	if (hasDynamicOffsets)
+	if(hasDynamicOffsets)
 	{
 		return false;
 	}
-	for (int i = 1; i < SIMD::Width; i++)
+	for(int i = 1; i < SIMD::Width; i++)
 	{
-		if (staticOffsets[i-1] != staticOffsets[i]) { return false; }
+		if(staticOffsets[i-1] != staticOffsets[i]) { return false; }
 	}
 	return true;
 }
diff --git a/src/Pipeline/ShaderCore.hpp b/src/Pipeline/ShaderCore.hpp
index fb056ff..4026d27 100644
--- a/src/Pipeline/ShaderCore.hpp
+++ b/src/Pipeline/ShaderCore.hpp
@@ -287,17 +287,17 @@
 {
 	using EL = typename Element<T>::type;
 
-	if (isStaticallyInBounds(sizeof(float), robustness))
+	if(isStaticallyInBounds(sizeof(float), robustness))
 	{
 		// All elements are statically known to be in-bounds.
 		// We can avoid costly conditional on masks.
 
-		if (hasStaticSequentialOffsets(sizeof(float)))
+		if(hasStaticSequentialOffsets(sizeof(float)))
 		{
 			// Offsets are sequential. Perform regular load.
 			return rr::Load(rr::Pointer<T>(base + staticOffsets[0]), alignment, atomic, order);
 		}
-		if (hasStaticEqualOffsets())
+		if(hasStaticEqualOffsets())
 		{
 			// Load one, replicate.
 			return T(*rr::Pointer<EL>(base + staticOffsets[0], alignment));
@@ -320,9 +320,9 @@
 
 	auto offs = offsets();
 
-	if (!atomic && order == std::memory_order_relaxed)
+	if(!atomic && order == std::memory_order_relaxed)
 	{
-		if (hasStaticEqualOffsets())
+		if(hasStaticEqualOffsets())
 		{
 			// Load one, replicate.
 			// Be careful of the case where the post-bounds-check mask
@@ -349,7 +349,7 @@
 			break;
 		}
 
-		if (hasStaticSequentialOffsets(sizeof(float)))
+		if(hasStaticSequentialOffsets(sizeof(float)))
 		{
 			return rr::MaskedLoad(rr::Pointer<T>(base + staticOffsets[0]), mask, alignment, zeroMaskedLanes);
 		}
@@ -376,7 +376,7 @@
 		{
 			// Divergent offsets or masked lanes.
 			out = T(0);
-			for (int i = 0; i < SIMD::Width; i++)
+			for(int i = 0; i < SIMD::Width; i++)
 			{
 				If(Extract(mask, i) != 0)
 				{
@@ -409,11 +409,11 @@
 		break;
 	}
 
-	if (!atomic && order == std::memory_order_relaxed)
+	if(!atomic && order == std::memory_order_relaxed)
 	{
-		if (hasStaticEqualOffsets())
+		if(hasStaticEqualOffsets())
 		{
-			If (AnyTrue(mask))
+			If(AnyTrue(mask))
 			{
 				// All equal. One of these writes will win -- elect the winning lane.
 				auto v0111 = SIMD::Int(0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -426,9 +426,9 @@
 				*rr::Pointer<EL>(base + staticOffsets[0], alignment) = As<EL>(scalarVal);
 			}
 		}
-		else if (hasStaticSequentialOffsets(sizeof(float)))
+		else if(hasStaticSequentialOffsets(sizeof(float)))
 		{
-			if (isStaticallyInBounds(sizeof(float), robustness))
+			if(isStaticallyInBounds(sizeof(float), robustness))
 			{
 				// Pointer has no elements OOB, and the store is not atomic.
 				// Perform a RMW.
@@ -458,7 +458,7 @@
 		Else
 		{
 			// Divergent offsets or masked lanes.
-			for (int i = 0; i < SIMD::Width; i++)
+			for(int i = 0; i < SIMD::Width; i++)
 			{
 				If(Extract(mask, i) != 0)
 				{
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index 4dc26d5..11b3770 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -37,12 +37,12 @@
 {
 	ASSERT(insns.size() > 0);
 
-	if (renderPass)
+	if(renderPass)
 	{
 		// capture formats of any input attachments present
 		auto subpass = renderPass->getSubpass(subpassIndex);
 		inputAttachmentFormats.reserve(subpass.inputAttachmentCount);
-		for (auto i = 0u; i < subpass.inputAttachmentCount; i++)
+		for(auto i = 0u; i < subpass.inputAttachmentCount; i++)
 		{
 			auto attachmentIndex = subpass.pInputAttachments[i].attachment;
 			inputAttachmentFormats.push_back(attachmentIndex != VK_ATTACHMENT_UNUSED
@@ -57,11 +57,11 @@
 	Block::ID currentBlock;
 	InsnIterator blockStart;
 
-	for (auto insn : *this)
+	for(auto insn : *this)
 	{
 		spv::Op opcode = insn.opcode();
 
-		switch (opcode)
+		switch(opcode)
 		{
 		case spv::OpEntryPoint:
 		{
@@ -69,7 +69,7 @@
 			auto id = Function::ID(insn.word(2));
 			auto name = insn.string(3);
 			auto stage = executionModelToStage(executionModel);
-			if (stage == pipelineStage && strcmp(name, entryPointName) == 0)
+			if(stage == pipelineStage && strcmp(name, entryPointName) == 0)
 			{
 				ASSERT_MSG(entryPoint == 0, "Duplicate entry point with name '%s' and stage %d", name, int(stage));
 				entryPoint = id;
@@ -105,7 +105,7 @@
 				break;
 			}
 
-			if (decoration == spv::DecorationCentroid)
+			if(decoration == spv::DecorationCentroid)
 				modes.NeedsCentroid = true;
 			break;
 		}
@@ -118,12 +118,12 @@
 			uint32_t value = insn.wordCount() > 4 ? insn.word(4) : 0;
 
 			auto &d = memberDecorations[targetId];
-			if (memberIndex >= d.size())
+			if(memberIndex >= d.size())
 				d.resize(memberIndex + 1);    // on demand; exact size would require another pass...
 
 			d[memberIndex].Apply(decoration, value);
 
-			if (decoration == spv::DecorationCentroid)
+			if(decoration == spv::DecorationCentroid)
 				modes.NeedsCentroid = true;
 			break;
 		}
@@ -139,7 +139,7 @@
 			uint32_t group = insn.word(1);
 			auto const &groupDecorations = decorations[group];
 			auto const &descriptorGroupDecorations = descriptorDecorations[group];
-			for (auto i = 2u; i < insn.wordCount(); i++)
+			for(auto i = 2u; i < insn.wordCount(); i++)
 			{
 				// Remaining operands are targets to apply the group to.
 				uint32_t target = insn.word(i);
@@ -153,12 +153,12 @@
 		case spv::OpGroupMemberDecorate:
 		{
 			auto const &srcDecorations = decorations[insn.word(1)];
-			for (auto i = 2u; i < insn.wordCount(); i += 2)
+			for(auto i = 2u; i < insn.wordCount(); i += 2)
 			{
 				// remaining operands are pairs of <id>, literal for members to apply to.
 				auto &d = memberDecorations[insn.word(i)];
 				auto memberIndex = insn.word(i + 1);
-				if (memberIndex >= d.size())
+				if(memberIndex >= d.size())
 					d.resize(memberIndex + 1);    // on demand resize, see above...
 				d[memberIndex].Apply(srcDecorations);
 			}
@@ -191,7 +191,7 @@
 			functions[currentFunction].blocks[currentBlock] = Block(blockStart, blockEnd);
 			currentBlock = Block::ID(0);
 
-			if (opcode == spv::OpKill)
+			if(opcode == spv::OpKill)
 			{
 				modes.ContainsKill = true;
 			}
@@ -233,7 +233,7 @@
 			ASSERT(getType(typeId).definition.opcode() == spv::OpTypePointer);
 			ASSERT(getType(typeId).storageClass == storageClass);
 
-			switch (storageClass)
+			switch(storageClass)
 			{
 			case spv::StorageClassInput:
 			case spv::StorageClassOutput:
@@ -298,7 +298,7 @@
 			// OpConstantNull forms a constant of arbitrary type, all zeros.
 			auto &object = CreateConstant(insn);
 			auto &objectTy = getType(object.type);
-			for (auto i = 0u; i < objectTy.sizeInComponents; i++)
+			for(auto i = 0u; i < objectTy.sizeInComponents; i++)
 			{
 				object.constantValue[i] = 0;
 			}
@@ -309,11 +309,11 @@
 		{
 			auto &object = CreateConstant(insn);
 			auto offset = 0u;
-			for (auto i = 0u; i < insn.wordCount() - 3; i++)
+			for(auto i = 0u; i < insn.wordCount() - 3; i++)
 			{
 				auto &constituent = getObject(insn.word(i + 3));
 				auto &constituentTy = getType(constituent.type);
-				for (auto j = 0u; j < constituentTy.sizeInComponents; j++)
+				for(auto j = 0u; j < constituentTy.sizeInComponents; j++)
 				{
 					object.constantValue[offset++] = constituent.constantValue[j];
 				}
@@ -321,7 +321,7 @@
 
 			auto objectId = Object::ID(insn.word(2));
 			auto decorationsIt = decorations.find(objectId);
-			if (decorationsIt != decorations.end() &&
+			if(decorationsIt != decorations.end() &&
 				decorationsIt->second.BuiltIn == spv::BuiltInWorkgroupSize)
 			{
 				// https://www.khronos.org/registry/vulkan/specs/1.1/html/vkspec.html#interfaces-builtin-variables :
@@ -346,7 +346,7 @@
 		case spv::OpCapability:
 		{
 			auto capability = static_cast<spv::Capability>(insn.word(1));
-			switch (capability)
+			switch(capability)
 			{
 			case spv::CapabilityMatrix: capabilities.Matrix = true; break;
 			case spv::CapabilityShader: capabilities.Shader = true; break;
@@ -388,9 +388,9 @@
 			function.result = Type::ID(insn.word(1));
 			function.type = Type::ID(insn.word(4));
 			// Scan forward to find the function's label.
-			for (auto it = insn; it != end() && function.entry == 0; it++)
+			for(auto it = insn; it != end() && function.entry == 0; it++)
 			{
-				switch (it.opcode())
+				switch(it.opcode())
 				{
 				case spv::OpFunction:
 				case spv::OpFunctionParameter:
@@ -415,7 +415,7 @@
 			// We will only support the GLSL 450 extended instruction set, so no point in tracking the ID we assign it.
 			// Valid shaders will not attempt to import any other instruction sets.
 			auto ext = insn.string(2);
-			if (0 != strcmp("GLSL.std.450", ext))
+			if(0 != strcmp("GLSL.std.450", ext))
 			{
 				UNSUPPORTED("SPIR-V Extension: %s", ext);
 			}
@@ -473,7 +473,7 @@
 
 				DefineResult(insn);
 
-				if (opcode == spv::OpAccessChain || opcode == spv::OpInBoundsAccessChain)
+				if(opcode == spv::OpAccessChain || opcode == spv::OpInBoundsAccessChain)
 				{
 					Decorations dd{};
 					ApplyDecorationsForAccessChain(&dd, &descriptorDecorations[resultId], pointerId, insn.wordCount() - 4, insn.wordPointer(4));
@@ -668,12 +668,12 @@
 			auto ext = insn.string(1);
 			// Part of core SPIR-V 1.3. Vulkan 1.1 implementations must also accept the pre-1.3
 			// extension per Appendix A, `Vulkan Environment for SPIR-V`.
-			if (!strcmp(ext, "SPV_KHR_storage_buffer_storage_class")) break;
-			if (!strcmp(ext, "SPV_KHR_shader_draw_parameters")) break;
-			if (!strcmp(ext, "SPV_KHR_16bit_storage")) break;
-			if (!strcmp(ext, "SPV_KHR_variable_pointers")) break;
-			if (!strcmp(ext, "SPV_KHR_device_group")) break;
-			if (!strcmp(ext, "SPV_KHR_multiview")) break;
+			if(!strcmp(ext, "SPV_KHR_storage_buffer_storage_class")) break;
+			if(!strcmp(ext, "SPV_KHR_shader_draw_parameters")) break;
+			if(!strcmp(ext, "SPV_KHR_16bit_storage")) break;
+			if(!strcmp(ext, "SPV_KHR_variable_pointers")) break;
+			if(!strcmp(ext, "SPV_KHR_device_group")) break;
+			if(!strcmp(ext, "SPV_KHR_multiview")) break;
 			UNSUPPORTED("SPIR-V Extension: %s", ext);
 			break;
 		}
@@ -684,7 +684,7 @@
 	}
 
 	ASSERT_MSG(entryPoint != 0, "Entry point '%s' not found", entryPointName);
-	for (auto &it : functions)
+	for(auto &it : functions)
 	{
 		it.second.AssignBlockFields();
 	}
@@ -700,16 +700,16 @@
 
 	// A structure is a builtin block if it has a builtin
 	// member. All members of such a structure are builtins.
-	switch (insn.opcode())
+	switch(insn.opcode())
 	{
 	case spv::OpTypeStruct:
 	{
 		auto d = memberDecorations.find(resultId);
-		if (d != memberDecorations.end())
+		if(d != memberDecorations.end())
 		{
-			for (auto &m : d->second)
+			for(auto &m : d->second)
 			{
-				if (m.HasBuiltIn)
+				if(m.HasBuiltIn)
 				{
 					type.isBuiltInBlock = true;
 					break;
@@ -767,7 +767,7 @@
 	ASSERT(object.opcode() == spv::OpVariable);
 	Object::ID resultId = object.definition.word(2);
 
-	if (objectTy.isBuiltInBlock)
+	if(objectTy.isBuiltInBlock)
 	{
 		// walk the builtin block, registering each of its members separately.
 		auto m = memberDecorations.find(objectTy.element);
@@ -775,11 +775,11 @@
 		auto &structType = pointeeTy.definition;
 		auto offset = 0u;
 		auto word = 2u;
-		for (auto &member : m->second)
+		for(auto &member : m->second)
 		{
 			auto &memberType = getType(structType.word(word));
 
-			if (member.HasBuiltIn)
+			if(member.HasBuiltIn)
 			{
 				builtinInterface[member.BuiltIn] = {resultId, offset, memberType.sizeInComponents};
 			}
@@ -791,7 +791,7 @@
 	}
 
 	auto d = decorations.find(resultId);
-	if (d != decorations.end() && d->second.HasBuiltIn)
+	if(d != decorations.end() && d->second.HasBuiltIn)
 	{
 		builtinInterface[d->second.BuiltIn] = {resultId, 0, pointeeTy.sizeInComponents};
 	}
@@ -817,7 +817,7 @@
 void SpirvShader::ProcessExecutionMode(InsnIterator insn)
 {
 	auto mode = static_cast<spv::ExecutionMode>(insn.word(2));
-	switch (mode)
+	switch(mode)
 	{
 	case spv::ExecutionModeEarlyFragmentTests:
 		modes.EarlyFragmentTests = true;
@@ -852,7 +852,7 @@
 	// Types are always built from the bottom up (with the exception of forward ptrs, which
 	// don't appear in Vulkan shaders. Therefore, we can always assume our component parts have
 	// already been described (and so their sizes determined)
-	switch (insn.opcode())
+	switch(insn.opcode())
 	{
 	case spv::OpTypeVoid:
 	case spv::OpTypeSampler:
@@ -887,7 +887,7 @@
 	case spv::OpTypeStruct:
 	{
 		uint32_t size = 0;
-		for (uint32_t i = 2u; i < insn.wordCount(); i++)
+		for(uint32_t i = 2u; i < insn.wordCount(); i++)
 		{
 			size += getType(insn.word(i)).sizeInComponents;
 		}
@@ -927,14 +927,14 @@
 	case spv::OpTypePointer:
 		return VisitInterfaceInner(obj.definition.word(3), d, f);
 	case spv::OpTypeMatrix:
-		for (auto i = 0u; i < obj.definition.word(3); i++, d.Location++)
+		for(auto i = 0u; i < obj.definition.word(3); i++, d.Location++)
 		{
 			// consumes same components of N consecutive locations
 			VisitInterfaceInner(obj.definition.word(2), d, f);
 		}
 		return d.Location;
 	case spv::OpTypeVector:
-		for (auto i = 0u; i < obj.definition.word(3); i++, d.Component++)
+		for(auto i = 0u; i < obj.definition.word(3); i++, d.Component++)
 		{
 			// consumes N consecutive components in the same location
 			VisitInterfaceInner(obj.definition.word(2), d, f);
@@ -952,7 +952,7 @@
 	case spv::OpTypeStruct:
 	{
 		// iterate over members, which may themselves have Location/Component decorations
-		for (auto i = 0u; i < obj.definition.wordCount() - 2; i++)
+		for(auto i = 0u; i < obj.definition.wordCount() - 2; i++)
 		{
 			ApplyDecorationsForIdMember(&d, id, i);
 			d.Location = VisitInterfaceInner(obj.definition.word(i + 2), d, f);
@@ -963,7 +963,7 @@
 	case spv::OpTypeArray:
 	{
 		auto arraySize = GetConstScalarInt(obj.definition.word(3));
-		for (auto i = 0u; i < arraySize; i++)
+		for(auto i = 0u; i < arraySize; i++)
 		{
 			d.Location = VisitInterfaceInner(obj.definition.word(2), d, f);
 		}
@@ -993,11 +993,11 @@
 	ApplyDecorationsForId(d, baseObject.type);
 	auto typeId = getType(baseObject.type).element;
 
-	for (auto i = 0u; i < numIndexes; i++)
+	for(auto i = 0u; i < numIndexes; i++)
 	{
 		ApplyDecorationsForId(d, typeId);
 		auto & type = getType(typeId);
-		switch (type.opcode())
+		switch(type.opcode())
 		{
 		case spv::OpTypeStruct:
 		{
@@ -1008,7 +1008,7 @@
 		}
 		case spv::OpTypeArray:
 		case spv::OpTypeRuntimeArray:
-			if (dd->InputAttachmentIndex >= 0)
+			if(dd->InputAttachmentIndex >= 0)
 			{
 				dd->InputAttachmentIndex += GetConstScalarInt(indexIds[i]);
 			}
@@ -1037,10 +1037,10 @@
 	ApplyDecorationsForId(&d, baseObject.type);
 
 	uint32_t arrayIndex = 0;
-	if (baseObject.kind == Object::Kind::DescriptorSet)
+	if(baseObject.kind == Object::Kind::DescriptorSet)
 	{
 		auto type = getType(typeId).definition.opcode();
-		if (type == spv::OpTypeArray || type == spv::OpTypeRuntimeArray)
+		if(type == spv::OpTypeArray || type == spv::OpTypeRuntimeArray)
 		{
 			ASSERT(getObject(indexIds[0]).kind == Object::Kind::Constant);
 			arrayIndex = GetConstScalarInt(indexIds[0]);
@@ -1055,12 +1055,12 @@
 
 	int constantOffset = 0;
 
-	for (auto i = 0u; i < numIndexes; i++)
+	for(auto i = 0u; i < numIndexes; i++)
 	{
 		auto & type = getType(typeId);
 		ApplyDecorationsForId(&d, typeId);
 
-		switch (type.definition.opcode())
+		switch(type.definition.opcode())
 		{
 		case spv::OpTypeStruct:
 		{
@@ -1077,7 +1077,7 @@
 			// TODO: b/127950082: Check bounds.
 			ASSERT(d.HasArrayStride);
 			auto & obj = getObject(indexIds[i]);
-			if (obj.kind == Object::Kind::Constant)
+			if(obj.kind == Object::Kind::Constant)
 			{
 				constantOffset += d.ArrayStride * GetConstScalarInt(indexIds[i]);
 			}
@@ -1095,7 +1095,7 @@
 			d.InsideMatrix = true;
 			auto columnStride = (d.HasRowMajor && d.RowMajor) ? static_cast<int32_t>(sizeof(float)) : d.MatrixStride;
 			auto & obj = getObject(indexIds[i]);
-			if (obj.kind == Object::Kind::Constant)
+			if(obj.kind == Object::Kind::Constant)
 			{
 				constantOffset += columnStride * GetConstScalarInt(indexIds[i]);
 			}
@@ -1110,7 +1110,7 @@
 		{
 			auto elemStride = (d.InsideMatrix && d.HasRowMajor && d.RowMajor) ? d.MatrixStride : static_cast<int32_t>(sizeof(float));
 			auto & obj = getObject(indexIds[i]);
-			if (obj.kind == Object::Kind::Constant)
+			if(obj.kind == Object::Kind::Constant)
 			{
 				constantOffset += elemStride * GetConstScalarInt(indexIds[i]);
 			}
@@ -1141,7 +1141,7 @@
 
 	int constantOffset = 0;
 
-	for (auto i = 0u; i < numIndexes; i++)
+	for(auto i = 0u; i < numIndexes; i++)
 	{
 		auto & type = getType(typeId);
 		switch(type.opcode())
@@ -1150,7 +1150,7 @@
 		{
 			int memberIndex = GetConstScalarInt(indexIds[i]);
 			int offsetIntoStruct = 0;
-			for (auto j = 0; j < memberIndex; j++) {
+			for(auto j = 0; j < memberIndex; j++) {
 				auto memberType = type.definition.word(2u + j);
 				offsetIntoStruct += getType(memberType).sizeInComponents * sizeof(float);
 			}
@@ -1165,11 +1165,11 @@
 		case spv::OpTypeRuntimeArray:
 		{
 			// TODO: b/127950082: Check bounds.
-			if (getType(baseObject.type).storageClass == spv::StorageClassUniformConstant)
+			if(getType(baseObject.type).storageClass == spv::StorageClassUniformConstant)
 			{
 				// indexing into an array of descriptors.
 				auto &obj = getObject(indexIds[i]);
-				if (obj.kind != Object::Kind::Constant)
+				if(obj.kind != Object::Kind::Constant)
 				{
 					UNSUPPORTED("SPIR-V SampledImageArrayDynamicIndexing Capability");
 				}
@@ -1185,7 +1185,7 @@
 			{
 				auto stride = getType(type.element).sizeInComponents * static_cast<uint32_t>(sizeof(float));
 				auto & obj = getObject(indexIds[i]);
-				if (obj.kind == Object::Kind::Constant)
+				if(obj.kind == Object::Kind::Constant)
 				{
 					ptr += stride * GetConstScalarInt(indexIds[i]);
 				}
@@ -1203,7 +1203,7 @@
 		}
 	}
 
-	if (constantOffset != 0)
+	if(constantOffset != 0)
 	{
 		ptr += constantOffset;
 	}
@@ -1214,7 +1214,7 @@
 {
 	uint32_t componentOffset = 0;
 
-	for (auto i = 0u; i < numIndexes; i++)
+	for(auto i = 0u; i < numIndexes; i++)
 	{
 		auto & type = getType(typeId);
 		switch(type.opcode())
@@ -1223,7 +1223,7 @@
 		{
 			int memberIndex = indexes[i];
 			int offsetIntoStruct = 0;
-			for (auto j = 0; j < memberIndex; j++) {
+			for(auto j = 0; j < memberIndex; j++) {
 				auto memberType = type.definition.word(2u + j);
 				offsetIntoStruct += getType(memberType).sizeInComponents;
 			}
@@ -1253,7 +1253,7 @@
 
 void SpirvShader::Decorations::Apply(spv::Decoration decoration, uint32_t arg)
 {
-	switch (decoration)
+	switch(decoration)
 	{
 	case spv::DecorationLocation:
 		HasLocation = true;
@@ -1313,43 +1313,43 @@
 void SpirvShader::Decorations::Apply(const sw::SpirvShader::Decorations &src)
 {
 	// Apply a decoration group to this set of decorations
-	if (src.HasBuiltIn)
+	if(src.HasBuiltIn)
 	{
 		HasBuiltIn = true;
 		BuiltIn = src.BuiltIn;
 	}
 
-	if (src.HasLocation)
+	if(src.HasLocation)
 	{
 		HasLocation = true;
 		Location = src.Location;
 	}
 
-	if (src.HasComponent)
+	if(src.HasComponent)
 	{
 		HasComponent = true;
 		Component = src.Component;
 	}
 
-	if (src.HasOffset)
+	if(src.HasOffset)
 	{
 		HasOffset = true;
 		Offset = src.Offset;
 	}
 
-	if (src.HasArrayStride)
+	if(src.HasArrayStride)
 	{
 		HasArrayStride = true;
 		ArrayStride = src.ArrayStride;
 	}
 
-	if (src.HasMatrixStride)
+	if(src.HasMatrixStride)
 	{
 		HasMatrixStride = true;
 		MatrixStride = src.MatrixStride;
 	}
 
-	if (src.HasRowMajor)
+	if(src.HasRowMajor)
 	{
 		HasRowMajor = true;
 		RowMajor = src.RowMajor;
@@ -1376,7 +1376,7 @@
 		Binding = src.Binding;
 	}
 
-	if (src.InputAttachmentIndex >= 0)
+	if(src.InputAttachmentIndex >= 0)
 	{
 		InputAttachmentIndex = src.InputAttachmentIndex;
 	}
@@ -1385,14 +1385,14 @@
 void SpirvShader::ApplyDecorationsForId(Decorations *d, TypeOrObjectID id) const
 {
 	auto it = decorations.find(id);
-	if (it != decorations.end())
+	if(it != decorations.end())
 		d->Apply(it->second);
 }
 
 void SpirvShader::ApplyDecorationsForIdMember(Decorations *d, Type::ID id, uint32_t member) const
 {
 	auto it = memberDecorations.find(id);
-	if (it != memberDecorations.end() && member < it->second.size())
+	if(it != memberDecorations.end() && member < it->second.size())
 	{
 		d->Apply(it->second[member]);
 	}
@@ -1405,7 +1405,7 @@
 	auto &object = defs[resultId];
 	object.type = typeId;
 
-	switch (getType(typeId).opcode())
+	switch(getType(typeId).opcode())
 	{
 	case spv::OpTypePointer:
 	case spv::OpTypeImage:
@@ -1455,9 +1455,9 @@
 
 void SpirvShader::emitProlog(SpirvRoutine *routine) const
 {
-	for (auto insn : *this)
+	for(auto insn : *this)
 	{
-		switch (insn.opcode())
+		switch(insn.opcode())
 		{
 		case spv::OpVariable:
 		{
@@ -1511,9 +1511,9 @@
 
 	// Emit everything up to the first label
 	// TODO: Separate out dispatch of block from non-block instructions?
-	for (auto insn : *this)
+	for(auto insn : *this)
 	{
-		if (insn.opcode() == spv::OpLabel)
+		if(insn.opcode() == spv::OpLabel)
 		{
 			break;
 		}
@@ -1526,10 +1526,10 @@
 
 void SpirvShader::EmitInstructions(InsnIterator begin, InsnIterator end, EmitState *state) const
 {
-	for (auto insn = begin; insn != end; insn++)
+	for(auto insn = begin; insn != end; insn++)
 	{
 		auto res = EmitInstruction(insn, state);
-		switch (res)
+		switch(res)
 		{
 		case EmitResult::Continue:
 			continue;
@@ -1546,7 +1546,7 @@
 {
 	auto opcode = insn.opcode();
 
-	switch (opcode)
+	switch(opcode)
 	{
 	case spv::OpTypeVoid:
 	case spv::OpTypeInt:
@@ -1945,14 +1945,14 @@
 	auto &dst = state->createIntermediate(insn.word(2), type.sizeInComponents);
 	auto offset = 0u;
 
-	for (auto i = 0u; i < insn.wordCount() - 3; i++)
+	for(auto i = 0u; i < insn.wordCount() - 3; i++)
 	{
 		Object::ID srcObjectId = insn.word(3u + i);
 		auto & srcObject = getObject(srcObjectId);
 		auto & srcObjectTy = getType(srcObject.type);
 		GenericValue srcObjectAccess(this, state, srcObjectId);
 
-		for (auto j = 0u; j < srcObjectTy.sizeInComponents; j++)
+		for(auto j = 0u; j < srcObjectTy.sizeInComponents; j++)
 		{
 			dst.move(offset++, srcObjectAccess.Float(j));
 		}
@@ -1974,17 +1974,17 @@
 	GenericValue newPartObjectAccess(this, state, insn.word(3));
 
 	// old components before
-	for (auto i = 0u; i < firstNewComponent; i++)
+	for(auto i = 0u; i < firstNewComponent; i++)
 	{
 		dst.move(i, srcObjectAccess.Float(i));
 	}
 	// new part
-	for (auto i = 0u; i < newPartObjectTy.sizeInComponents; i++)
+	for(auto i = 0u; i < newPartObjectTy.sizeInComponents; i++)
 	{
 		dst.move(firstNewComponent + i, newPartObjectAccess.Float(i));
 	}
 	// old components after
-	for (auto i = firstNewComponent + newPartObjectTy.sizeInComponents; i < type.sizeInComponents; i++)
+	for(auto i = firstNewComponent + newPartObjectTy.sizeInComponents; i < type.sizeInComponents; i++)
 	{
 		dst.move(i, srcObjectAccess.Float(i));
 	}
@@ -2001,7 +2001,7 @@
 	auto firstComponent = WalkLiteralAccessChain(compositeTypeId, insn.wordCount() - 4, insn.wordPointer(4));
 
 	GenericValue compositeObjectAccess(this, state, insn.word(3));
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
 		dst.move(i, compositeObjectAccess.Float(firstComponent + i));
 	}
@@ -2021,16 +2021,16 @@
 	GenericValue firstHalfAccess(this, state, insn.word(3));
 	GenericValue secondHalfAccess(this, state, insn.word(4));
 
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
 		auto selector = insn.word(5 + i);
-		if (selector == static_cast<uint32_t>(-1))
+		if(selector == static_cast<uint32_t>(-1))
 		{
 			// Undefined value. Until we decide to do real undef values, zero is as good
 			// a value as any
 			dst.move(i, RValue<SIMD::Float>(0.0f));
 		}
-		else if (selector < firstHalfType.sizeInComponents)
+		else if(selector < firstHalfType.sizeInComponents)
 		{
 			dst.move(i, firstHalfAccess.Float(selector));
 		}
@@ -2054,7 +2054,7 @@
 
 	SIMD::UInt v = SIMD::UInt(0);
 
-	for (auto i = 0u; i < srcType.sizeInComponents; i++)
+	for(auto i = 0u; i < srcType.sizeInComponents; i++)
 	{
 		v |= CmpEQ(index.UInt(0), SIMD::UInt(i)) & src.UInt(i);
 	}
@@ -2072,7 +2072,7 @@
 	GenericValue component(this, state, insn.word(4));
 	GenericValue index(this, state, insn.word(5));
 
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
 		SIMD::UInt mask = CmpEQ(SIMD::UInt(i), index.UInt(0));
 		dst.move(i, (src.UInt(i) & ~mask) | (component.UInt(0) & mask));
@@ -2089,7 +2089,7 @@
 	auto lhs = GenericValue(this, state, insn.word(4));
 	auto rhs = GenericValue(this, state, insn.word(5));
 
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
 		auto sel = cond.Int(condIsScalar ? 0 : i);
 		dst.move(i, (sel & lhs.Int(i)) | (~sel & rhs.Int(i)));   // TODO: IfThenElse()
@@ -2108,7 +2108,7 @@
 
 	SIMD::UInt result = src.UInt(0);
 
-	for (auto i = 1u; i < srcType.sizeInComponents; i++)
+	for(auto i = 1u; i < srcType.sizeInComponents; i++)
 	{
 		result |= src.UInt(i);
 	}
@@ -2127,7 +2127,7 @@
 
 	SIMD::UInt result = src.UInt(0);
 
-	for (auto i = 1u; i < srcType.sizeInComponents; i++)
+	for(auto i = 1u; i < srcType.sizeInComponents; i++)
 	{
 		result &= src.UInt(i);
 	}
@@ -2151,14 +2151,14 @@
 
 	SIMD::UInt x(0);
 	auto mask = state->activeLaneMask() & state->storesAndAtomicsMask();
-	for (int j = 0; j < SIMD::Width; j++)
+	for(int j = 0; j < SIMD::Width; j++)
 	{
 		If(Extract(mask, j) != 0)
 		{
 			auto offset = Extract(ptrOffsets, j);
 			auto laneValue = Extract(value, j);
 			UInt v;
-			switch (insn.opcode())
+			switch(insn.opcode())
 			{
 			case spv::OpAtomicIAdd:
 			case spv::OpAtomicIIncrement:
@@ -2223,7 +2223,7 @@
 
 	SIMD::UInt x(0);
 	auto mask = state->activeLaneMask() & state->storesAndAtomicsMask();
-	for (int j = 0; j < SIMD::Width; j++)
+	for(int j = 0; j < SIMD::Width; j++)
 	{
 		If(Extract(mask, j) != 0)
 		{
@@ -2244,7 +2244,7 @@
 	auto ty = getType(insn.word(1));
 	auto &dst = state->createIntermediate(insn.word(2), ty.sizeInComponents);
 	auto src = GenericValue(this, state, insn.word(3));
-	for (uint32_t i = 0; i < ty.sizeInComponents; i++)
+	for(uint32_t i = 0; i < ty.sizeInComponents; i++)
 	{
 		dst.move(i, src.Int(i));
 	}
@@ -2294,16 +2294,16 @@
 
 void SpirvShader::emitEpilog(SpirvRoutine *routine) const
 {
-	for (auto insn : *this)
+	for(auto insn : *this)
 	{
-		switch (insn.opcode())
+		switch(insn.opcode())
 		{
 		case spv::OpVariable:
 		{
 			Object::ID resultId = insn.word(2);
 			auto &object = getObject(resultId);
 			auto &objectTy = getType(object.type);
-			if (object.kind == Object::Kind::InterfaceVariable && objectTy.storageClass == spv::StorageClassOutput)
+			if(object.kind == Object::Kind::InterfaceVariable && objectTy.storageClass == spv::StorageClassOutput)
 			{
 				auto &dst = routine->getVariable(resultId);
 				int offset = 0;
@@ -2329,7 +2329,7 @@
 
 VkShaderStageFlagBits SpirvShader::executionModelToStage(spv::ExecutionModel model)
 {
-	switch (model)
+	switch(model)
 	{
 	case spv::ExecutionModelVertex:                 return VK_SHADER_STAGE_VERTEX_BIT;
 	// case spv::ExecutionModelTessellationControl:    return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
diff --git a/src/Pipeline/SpirvShader.hpp b/src/Pipeline/SpirvShader.hpp
index 154ffd7..e3d3af7 100644
--- a/src/Pipeline/SpirvShader.hpp
+++ b/src/Pipeline/SpirvShader.hpp
@@ -516,7 +516,7 @@
 	// outputted by this shader.
 	unsigned int getNumOutputClipDistances() const
 	{
-		if (getUsedCapabilities().ClipDistance)
+		if(getUsedCapabilities().ClipDistance)
 		{
 			auto it = outputBuiltins.find(spv::BuiltInClipDistance);
 			if(it != outputBuiltins.end())
@@ -531,7 +531,7 @@
 	// outputted by this shader.
 	unsigned int getNumOutputCullDistances() const
 	{
-		if (getUsedCapabilities().CullDistance)
+		if(getUsedCapabilities().CullDistance)
 		{
 			auto it = outputBuiltins.find(spv::BuiltInCullDistance);
 			if(it != outputBuiltins.end())
@@ -916,7 +916,7 @@
 
 		RValue<SIMD::Float> Float(uint32_t i) const
 		{
-			if (intermediate)
+			if(intermediate)
 			{
 				return intermediate->Float(i);
 			}
@@ -930,7 +930,7 @@
 
 		RValue<SIMD::Int> Int(uint32_t i) const
 		{
-			if (intermediate)
+			if(intermediate)
 			{
 				return intermediate->Int(i);
 			}
@@ -940,7 +940,7 @@
 
 		RValue<SIMD::UInt> UInt(uint32_t i) const
 		{
-			if (intermediate)
+			if(intermediate)
 			{
 				return intermediate->UInt(i);
 			}
@@ -1162,7 +1162,7 @@
 	inline void setInputBuiltin(SpirvShader const *shader, spv::BuiltIn id, F&& f)
 	{
 		auto it = shader->inputBuiltins.find(id);
-		if (it != shader->inputBuiltins.end())
+		if(it != shader->inputBuiltins.end())
 		{
 			const auto& builtin = it->second;
 			f(builtin, getVariable(builtin.Id));
diff --git a/src/Pipeline/SpirvShaderArithmetic.cpp b/src/Pipeline/SpirvShaderArithmetic.cpp
index 5d9b980..218df0e 100644
--- a/src/Pipeline/SpirvShaderArithmetic.cpp
+++ b/src/Pipeline/SpirvShaderArithmetic.cpp
@@ -27,7 +27,7 @@
 	auto lhs = GenericValue(this, state, insn.word(3));
 	auto rhs = GenericValue(this, state, insn.word(4));
 
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
 		dst.move(i, lhs.Float(i) * rhs.Float(0));
 	}
@@ -43,10 +43,10 @@
 	auto rhs = GenericValue(this, state, insn.word(4));
 	auto rhsType = getType(rhs.type);
 
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
 		SIMD::Float v = lhs.Float(i) * rhs.Float(0);
-		for (auto j = 1u; j < rhsType.sizeInComponents; j++)
+		for(auto j = 1u; j < rhsType.sizeInComponents; j++)
 		{
 			v += lhs.Float(i + type.sizeInComponents * j) * rhs.Float(j);
 		}
@@ -64,10 +64,10 @@
 	auto rhs = GenericValue(this, state, insn.word(4));
 	auto lhsType = getType(lhs.type);
 
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
 		SIMD::Float v = lhs.Float(0) * rhs.Float(i * lhsType.sizeInComponents);
-		for (auto j = 1u; j < lhsType.sizeInComponents; j++)
+		for(auto j = 1u; j < lhsType.sizeInComponents; j++)
 		{
 			v += lhs.Float(j) * rhs.Float(i * lhsType.sizeInComponents + j);
 		}
@@ -88,12 +88,12 @@
 	auto numRows = getType(type.definition.word(2)).definition.word(3);
 	auto numAdds = getType(getObject(insn.word(3)).type).definition.word(3);
 
-	for (auto row = 0u; row < numRows; row++)
+	for(auto row = 0u; row < numRows; row++)
 	{
-		for (auto col = 0u; col < numColumns; col++)
+		for(auto col = 0u; col < numColumns; col++)
 		{
 			SIMD::Float v = SIMD::Float(0);
-			for (auto i = 0u; i < numAdds; i++)
+			for(auto i = 0u; i < numAdds; i++)
 			{
 				v += lhs.Float(i * numRows + row) * rhs.Float(col * numAdds + i);
 			}
@@ -122,9 +122,9 @@
 	auto numRows = lhsType.definition.word(3);
 	auto numCols = rhsType.definition.word(3);
 
-	for (auto col = 0u; col < numCols; col++)
+	for(auto col = 0u; col < numCols; col++)
 	{
-		for (auto row = 0u; row < numRows; row++)
+		for(auto row = 0u; row < numRows; row++)
 		{
 			dst.move(col * numRows + row, lhs.Float(row) * rhs.Float(col));
 		}
@@ -142,9 +142,9 @@
 	auto numCols = type.definition.word(3);
 	auto numRows = getType(type.definition.word(2)).sizeInComponents;
 
-	for (auto col = 0u; col < numCols; col++)
+	for(auto col = 0u; col < numCols; col++)
 	{
-		for (auto row = 0u; row < numRows; row++)
+		for(auto row = 0u; row < numRows; row++)
 		{
 			dst.move(col * numRows + row, mat.Float(row * numCols + col));
 		}
@@ -159,9 +159,9 @@
 	auto &dst = state->createIntermediate(insn.word(2), type.sizeInComponents);
 	auto src = GenericValue(this, state, insn.word(3));
 
-	for (auto i = 0u; i < type.sizeInComponents; i++)
+	for(auto i = 0u; i < type.sizeInComponents; i++)
 	{
-		switch (insn.opcode())
+		switch(insn.opcode())
 		{
 		case spv::OpNot:
 		case spv::OpLogicalNot:		// logical not == bitwise not due to all-bits boolean representation
@@ -186,7 +186,7 @@
 			auto one = SIMD::UInt(1);
 			auto v = src.UInt(i);
 			SIMD::UInt out = (v >> offset) & Bitmask32(count);
-			if (insn.opcode() == spv::OpBitFieldSExtract)
+			if(insn.opcode() == spv::OpBitFieldSExtract)
 			{
 				auto sign = out & NthBit32(count - one);
 				auto sext = ~(sign - one);
@@ -324,9 +324,9 @@
 	auto lhs = GenericValue(this, state, insn.word(3));
 	auto rhs = GenericValue(this, state, insn.word(4));
 
-	for (auto i = 0u; i < lhsType.sizeInComponents; i++)
+	for(auto i = 0u; i < lhsType.sizeInComponents; i++)
 	{
-		switch (insn.opcode())
+		switch(insn.opcode())
 		{
 		case spv::OpIAdd:
 			dst.move(i, lhs.Int(i) + rhs.Int(i));
@@ -536,7 +536,7 @@
 {
 	SIMD::Float d = x.Float(0) * y.Float(0);
 
-	for (auto i = 1u; i < numComponents; i++)
+	for(auto i = 1u; i < numComponents; i++)
 	{
 		d += x.Float(i) * y.Float(i);
 	}
diff --git a/src/Pipeline/SpirvShaderControlFlow.cpp b/src/Pipeline/SpirvShaderControlFlow.cpp
index 117e8f8..e7b1fd7 100644
--- a/src/Pipeline/SpirvShaderControlFlow.cpp
+++ b/src/Pipeline/SpirvShaderControlFlow.cpp
@@ -31,19 +31,19 @@
 
 	// Walk the instructions to find the last two of the block.
 	InsnIterator insns[2];
-	for (auto insn : *this)
+	for(auto insn : *this)
 	{
 		insns[0] = insns[1];
 		insns[1] = insn;
 	}
 
-	switch (insns[1].opcode())
+	switch(insns[1].opcode())
 	{
 		case spv::OpBranch:
 			branchInstruction = insns[1];
 			outs.emplace(Block::ID(branchInstruction.word(1)));
 
-			switch (insns[0].opcode())
+			switch(insns[0].opcode())
 			{
 				case spv::OpLoopMerge:
 					kind = Loop;
@@ -63,7 +63,7 @@
 			outs.emplace(Block::ID(branchInstruction.word(2)));
 			outs.emplace(Block::ID(branchInstruction.word(3)));
 
-			switch (insns[0].opcode())
+			switch(insns[0].opcode())
 			{
 				case spv::OpSelectionMerge:
 					kind = StructuredBranchConditional;
@@ -87,12 +87,12 @@
 		case spv::OpSwitch:
 			branchInstruction = insns[1];
 			outs.emplace(Block::ID(branchInstruction.word(2)));
-			for (uint32_t w = 4; w < branchInstruction.wordCount(); w += 2)
+			for(uint32_t w = 4; w < branchInstruction.wordCount(); w += 2)
 			{
 				outs.emplace(Block::ID(branchInstruction.word(w)));
 			}
 
-			switch (insns[0].opcode())
+			switch(insns[0].opcode())
 			{
 				case spv::OpSelectionMerge:
 					kind = StructuredSwitch;
@@ -113,10 +113,10 @@
 
 void SpirvShader::Function::TraverseReachableBlocks(Block::ID id, SpirvShader::Block::Set& reachable) const
 {
-	if (reachable.count(id) == 0)
+	if(reachable.count(id) == 0)
 	{
 		reachable.emplace(id);
-		for (auto out : getBlock(id).outs)
+		for(auto out : getBlock(id).outs)
 		{
 			TraverseReachableBlocks(out, reachable);
 		}
@@ -128,20 +128,20 @@
 	Block::Set reachable;
 	TraverseReachableBlocks(entry, reachable);
 
-	for (auto &it : blocks)
+	for(auto &it : blocks)
 	{
 		auto &blockId = it.first;
 		auto &block = it.second;
-		if (reachable.count(blockId) > 0)
+		if(reachable.count(blockId) > 0)
 		{
-			for (auto &outId : it.second.outs)
+			for(auto &outId : it.second.outs)
 			{
 				auto outIt = blocks.find(outId);
 				ASSERT_MSG(outIt != blocks.end(), "Block %d has a non-existent out %d", blockId.value(), outId.value());
 				auto &out = outIt->second;
 				out.ins.emplace(blockId);
 			}
-			if (block.kind == Block::Loop)
+			if(block.kind == Block::Loop)
 			{
 				auto mergeIt = blocks.find(block.mergeBlock);
 				ASSERT_MSG(mergeIt != blocks.end(), "Loop block %d has a non-existent merge block %d", blockId.value(), block.mergeBlock.value());
@@ -154,9 +154,9 @@
 void SpirvShader::Function::ForeachBlockDependency(Block::ID blockId, std::function<void(Block::ID)> f) const
 {
 	auto block = getBlock(blockId);
-	for (auto dep : block.ins)
+	for(auto dep : block.ins)
 	{
-		if (block.kind != Block::Loop ||                 // if not a loop...
+		if(block.kind != Block::Loop ||                 // if not a loop...
 			!ExistsPath(blockId, dep, block.mergeBlock)) // or a loop and not a loop back edge
 		{
 			f(dep);
@@ -173,14 +173,14 @@
 	std::queue<Block::ID> pending;
 	pending.emplace(from);
 
-	while (pending.size() > 0)
+	while(pending.size() > 0)
 	{
 		auto id = pending.front();
 		pending.pop();
-		for (auto out : getBlock(id).outs)
+		for(auto out : getBlock(id).outs)
 		{
-			if (seen.count(out) != 0) { continue; }
-			if (out == to) { return true; }
+			if(seen.count(out) != 0) { continue; }
+			if(out == to) { return true; }
 			pending.emplace(out);
 		}
 		seen.emplace(id);
@@ -198,7 +198,7 @@
 {
 	auto edge = Block::Edge{from, to};
 	auto it = edgeActiveLaneMasks.find(edge);
-	if (it == edgeActiveLaneMasks.end())
+	if(it == edgeActiveLaneMasks.end())
 	{
 		edgeActiveLaneMasks.emplace(edge, mask);
 	}
@@ -226,12 +226,12 @@
 	std::deque<Block::ID> pending;
 	state->pending = &pending;
 	pending.push_front(id);
-	while (pending.size() > 0)
+	while(pending.size() > 0)
 	{
 		auto id = pending.front();
 
 		auto const &block = function.getBlock(id);
-		if (id == ignore)
+		if(id == ignore)
 		{
 			pending.pop_front();
 			continue;
@@ -241,14 +241,14 @@
 		auto depsDone = true;
 		function.ForeachBlockDependency(id, [&](Block::ID dep)
 		{
-			if (state->visited.count(dep) == 0)
+			if(state->visited.count(dep) == 0)
 			{
 				state->pending->push_front(dep);
 				depsDone = false;
 			}
 		});
 
-		if (!depsDone)
+		if(!depsDone)
 		{
 			continue;
 		}
@@ -257,7 +257,7 @@
 
 		state->block = id;
 
-		switch (block.kind)
+		switch(block.kind)
 		{
 			case Block::Simple:
 			case Block::StructuredBranchConditional:
@@ -285,16 +285,16 @@
 	auto blockId = state->block;
 	auto block = function.getBlock(blockId);
 
-	if (!state->visited.emplace(blockId).second)
+	if(!state->visited.emplace(blockId).second)
 	{
 		return; // Already generated this block.
 	}
 
-	if (blockId != function.entry)
+	if(blockId != function.entry)
 	{
 		// Set the activeLaneMask.
 		SIMD::Int activeLaneMask(0);
-		for (auto in : block.ins)
+		for(auto in : block.ins)
 		{
 			auto inMask = GetActiveLaneMaskEdge(state, in, blockId);
 			activeLaneMask |= inMask;
@@ -304,9 +304,9 @@
 
 	EmitInstructions(block.begin(), block.end(), state);
 
-	for (auto out : block.outs)
+	for(auto out : block.outs)
 	{
-		if (state->visited.count(out) == 0)
+		if(state->visited.count(out) == 0)
 		{
 			state->pending->push_back(out);
 		}
@@ -321,7 +321,7 @@
 	auto mergeBlockId = block.mergeBlock;
 	auto &mergeBlock = function.getBlock(mergeBlockId);
 
-	if (!state->visited.emplace(blockId).second)
+	if(!state->visited.emplace(blockId).second)
 	{
 		return; // Already emitted this loop.
 	}
@@ -333,9 +333,9 @@
 
 	// incomingBlocks are block ins that are not back-edges.
 	std::unordered_set<Block::ID> incomingBlocks;
-	for (auto in : block.ins)
+	for(auto in : block.ins)
 	{
-		if (loopBlocks.count(in) == 0)
+		if(loopBlocks.count(in) == 0)
 		{
 			incomingBlocks.emplace(in);
 		}
@@ -343,9 +343,9 @@
 
 	// Emit the loop phi instructions, and initialize them with a value from
 	// the incoming blocks.
-	for (auto insn = block.begin(); insn != block.mergeInstruction; insn++)
+	for(auto insn = block.begin(); insn != block.mergeInstruction; insn++)
 	{
-		if (insn.opcode() == spv::OpPhi)
+		if(insn.opcode() == spv::OpPhi)
 		{
 			StorePhi(blockId, insn, state, incomingBlocks);
 		}
@@ -354,7 +354,7 @@
 	// loopActiveLaneMask is the mask of lanes that are continuing to loop.
 	// This is initialized with the incoming active lane masks.
 	SIMD::Int loopActiveLaneMask = SIMD::Int(0);
-	for (auto in : incomingBlocks)
+	for(auto in : incomingBlocks)
 	{
 		loopActiveLaneMask |= GetActiveLaneMaskEdge(state, in, blockId);
 	}
@@ -362,7 +362,7 @@
 	// mergeActiveLaneMasks contains edge lane masks for the merge block.
 	// This is the union of all edge masks across all iterations of the loop.
 	std::unordered_map<Block::ID, SIMD::Int> mergeActiveLaneMasks;
-	for (auto in : function.getBlock(mergeBlockId).ins)
+	for(auto in : function.getBlock(mergeBlockId).ins)
 	{
 		mergeActiveLaneMasks.emplace(in, SIMD::Int(0));
 	}
@@ -379,9 +379,9 @@
 	state->setActiveLaneMask(loopActiveLaneMask);
 
 	// Emit the non-phi loop header block's instructions.
-	for (auto insn = block.begin(); insn != block.end(); insn++)
+	for(auto insn = block.begin(); insn != block.end(); insn++)
 	{
-		if (insn.opcode() == spv::OpPhi)
+		if(insn.opcode() == spv::OpPhi)
 		{
 			LoadPhi(insn, state);
 		}
@@ -393,7 +393,7 @@
 
 	// Emit all blocks between the loop header and the merge block, but
 	// don't emit the merge block yet.
-	for (auto out : block.outs)
+	for(auto out : block.outs)
 	{
 		EmitBlocks(out, state, mergeBlockId);
 	}
@@ -403,29 +403,29 @@
 
 	// Rebuild the loopActiveLaneMask from the loop back edges.
 	loopActiveLaneMask = SIMD::Int(0);
-	for (auto in : block.ins)
+	for(auto in : block.ins)
 	{
-		if (function.ExistsPath(blockId, in, mergeBlockId))
+		if(function.ExistsPath(blockId, in, mergeBlockId))
 		{
 			loopActiveLaneMask |= GetActiveLaneMaskEdge(state, in, blockId);
 		}
 	}
 
 	// Add active lanes to the merge lane mask.
-	for (auto in : function.getBlock(mergeBlockId).ins)
+	for(auto in : function.getBlock(mergeBlockId).ins)
 	{
 		auto edge = Block::Edge{in, mergeBlockId};
 		auto it = state->edgeActiveLaneMasks.find(edge);
-		if (it != state->edgeActiveLaneMasks.end())
+		if(it != state->edgeActiveLaneMasks.end())
 		{
 			mergeActiveLaneMasks[in] |= it->second;
 		}
 	}
 
 	// Update loop phi values.
-	for (auto insn = block.begin(); insn != block.mergeInstruction; insn++)
+	for(auto insn = block.begin(); insn != block.mergeInstruction; insn++)
 	{
-		if (insn.opcode() == spv::OpPhi)
+		if(insn.opcode() == spv::OpPhi)
 		{
 			StorePhi(blockId, insn, state, loopBlocks);
 		}
@@ -438,10 +438,10 @@
 	// Consider the following:
 	//
 	//     int phi_source = 0;
-	//     for (uint i = 0; i < 4; i++)
+	//     for(uint i = 0; i < 4; i++)
 	//     {
 	//         phi_source = 0;
-	//         if (gl_GlobalInvocationID.x % 4 == i) // divergent control flow
+	//         if(gl_GlobalInvocationID.x % 4 == i) // divergent control flow
 	//         {
 	//             phi_source = 42; // single lane assignment.
 	//             break; // activeLaneMask for [loop->merge] is active for a single lane.
@@ -455,9 +455,9 @@
 	// only have a single lane assigned. However by 'phi' value in the merge
 	// block needs to be assigned the union of all the per-lane assignments
 	// of phi_source when that lane exited the loop.
-	for (auto insn = mergeBlock.begin(); insn != mergeBlock.end(); insn++)
+	for(auto insn = mergeBlock.begin(); insn != mergeBlock.end(); insn++)
 	{
-		if (insn.opcode() == spv::OpPhi)
+		if(insn.opcode() == spv::OpPhi)
 		{
 			StorePhi(mergeBlockId, insn, state, loopBlocks);
 		}
@@ -471,7 +471,7 @@
 	// Continue emitting from the merge block.
 	Nucleus::setInsertBlock(mergeBasicBlock);
 	state->pending->push_back(mergeBlockId);
-	for (auto it : mergeActiveLaneMasks)
+	for(auto it : mergeActiveLaneMasks)
 	{
 		state->addActiveLaneMaskEdge(it.first, mergeBlockId, it.second);
 	}
@@ -525,7 +525,7 @@
 	// Gather up the case label matches and calculate defaultLaneMask.
 	std::vector<RValue<SIMD::Int>> caseLabelMatches;
 	caseLabelMatches.reserve(numCases);
-	for (uint32_t i = 0; i < numCases; i++)
+	for(uint32_t i = 0; i < numCases; i++)
 	{
 		auto label = block.branchInstruction.word(i * 2 + 3);
 		auto caseBlockId = Block::ID(block.branchInstruction.word(i * 2 + 4));
@@ -573,22 +573,22 @@
 	ASSERT(function.blocks.size() == 1);
 	spv::Op wrapOpKill[] = { spv::OpLabel, spv::OpKill };
 
-	for (auto block : function.blocks)
+	for(auto block : function.blocks)
 	{
 		int insnNumber = 0;
-		for (auto blockInsn : block.second)
+		for(auto blockInsn : block.second)
 		{
-			if (insnNumber > 1)
+			if(insnNumber > 1)
 			{
 				UNIMPLEMENTED("Function block number of instructions: %d", insnNumber);
 				return EmitResult::Continue;
 			}
-			if (blockInsn.opcode() != wrapOpKill[insnNumber++])
+			if(blockInsn.opcode() != wrapOpKill[insnNumber++])
 			{
 				UNIMPLEMENTED("Function block instruction %d : %s", insnNumber - 1, OpcodeName(blockInsn.opcode()).c_str());
 				return EmitResult::Continue;
 			}
-			if (blockInsn.opcode() == spv::OpKill)
+			if(blockInsn.opcode() == spv::OpKill)
 			{
 				EmitInstruction(blockInsn, state);
 			}
@@ -606,7 +606,7 @@
 	// just always emit the full fence.
 	Fence(semantics);
 
-	switch (executionScope)
+	switch(executionScope)
 	{
 	case spv::ScopeWorkgroup:
 		Yield(YieldResult::ControlBarrier);
@@ -626,7 +626,7 @@
 {
 	auto &function = getFunction(state->function);
 	auto currentBlock = function.getBlock(state->block);
-	if (!currentBlock.isLoopMerge)
+	if(!currentBlock.isLoopMerge)
 	{
 		// If this is a loop merge block, then don't attempt to update the
 		// phi values from the ins. EmitLoop() has had to take special care
@@ -664,12 +664,12 @@
 	ASSERT(storageIt != state->routine->phis.end());
 	auto &storage = storageIt->second;
 
-	for (uint32_t w = 3; w < insn.wordCount(); w += 2)
+	for(uint32_t w = 3; w < insn.wordCount(); w += 2)
 	{
 		auto varId = Object::ID(insn.word(w + 0));
 		auto blockId = Block::ID(insn.word(w + 1));
 
-		if (filter.count(blockId) == 0)
+		if(filter.count(blockId) == 0)
 		{
 			continue;
 		}
@@ -677,7 +677,7 @@
 		auto mask = GetActiveLaneMaskEdge(state, blockId, currentBlock);
 		auto in = GenericValue(this, state, varId);
 
-		for (uint32_t i = 0; i < type.sizeInComponents; i++)
+		for(uint32_t i = 0; i < type.sizeInComponents; i++)
 		{
 			storage[i] = As<SIMD::Float>((As<SIMD::Int>(storage[i]) & ~mask) | (in.Int(i) & mask));
 		}
@@ -686,7 +686,7 @@
 
 void SpirvShader::Fence(spv::MemorySemanticsMask semantics) const
 {
-	if (semantics == spv::MemorySemanticsMaskNone)
+	if(semantics == spv::MemorySemanticsMaskNone)
 	{
 		return; //no-op
 	}
diff --git a/src/Pipeline/SpirvShaderGLSLstd450.cpp b/src/Pipeline/SpirvShaderGLSLstd450.cpp
index 858765f..50ae6a5 100644
--- a/src/Pipeline/SpirvShaderGLSLstd450.cpp
+++ b/src/Pipeline/SpirvShaderGLSLstd450.cpp
@@ -32,12 +32,12 @@
 	auto &dst = state->createIntermediate(insn.word(2), type.sizeInComponents);
 	auto extInstIndex = static_cast<GLSLstd450>(insn.word(4));
 
-	switch (extInstIndex)
+	switch(extInstIndex)
 	{
 	case GLSLstd450FAbs:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Abs(src.Float(i)));
 		}
@@ -46,7 +46,7 @@
 	case GLSLstd450SAbs:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Abs(src.Int(i)));
 		}
@@ -64,7 +64,7 @@
 	case GLSLstd450Floor:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Floor(src.Float(i)));
 		}
@@ -73,7 +73,7 @@
 	case GLSLstd450Trunc:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Trunc(src.Float(i)));
 		}
@@ -82,7 +82,7 @@
 	case GLSLstd450Ceil:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Ceil(src.Float(i)));
 		}
@@ -91,7 +91,7 @@
 	case GLSLstd450Fract:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Frac(src.Float(i)));
 		}
@@ -100,7 +100,7 @@
 	case GLSLstd450Round:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Round(src.Float(i)));
 		}
@@ -109,7 +109,7 @@
 	case GLSLstd450RoundEven:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto x = Round(src.Float(i));
 			// dst = round(src) + ((round(src) < src) * 2 - 1) * (fract(src) == 0.5) * isOdd(round(src));
@@ -122,7 +122,7 @@
 	{
 		auto lhs = GenericValue(this, state, insn.word(5));
 		auto rhs = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Min(lhs.Float(i), rhs.Float(i)));
 		}
@@ -132,7 +132,7 @@
 	{
 		auto lhs = GenericValue(this, state, insn.word(5));
 		auto rhs = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Max(lhs.Float(i), rhs.Float(i)));
 		}
@@ -142,7 +142,7 @@
 	{
 		auto lhs = GenericValue(this, state, insn.word(5));
 		auto rhs = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Min(lhs.Int(i), rhs.Int(i)));
 		}
@@ -152,7 +152,7 @@
 	{
 		auto lhs = GenericValue(this, state, insn.word(5));
 		auto rhs = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Max(lhs.Int(i), rhs.Int(i)));
 		}
@@ -162,7 +162,7 @@
 	{
 		auto lhs = GenericValue(this, state, insn.word(5));
 		auto rhs = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Min(lhs.UInt(i), rhs.UInt(i)));
 		}
@@ -172,7 +172,7 @@
 	{
 		auto lhs = GenericValue(this, state, insn.word(5));
 		auto rhs = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Max(lhs.UInt(i), rhs.UInt(i)));
 		}
@@ -182,7 +182,7 @@
 	{
 		auto edge = GenericValue(this, state, insn.word(5));
 		auto x = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, CmpNLT(x.Float(i), edge.Float(i)) & As<SIMD::Int>(SIMD::Float(1.0f)));
 		}
@@ -193,7 +193,7 @@
 		auto edge0 = GenericValue(this, state, insn.word(5));
 		auto edge1 = GenericValue(this, state, insn.word(6));
 		auto x = GenericValue(this, state, insn.word(7));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto tx = Min(Max((x.Float(i) - edge0.Float(i)) /
 					(edge1.Float(i) - edge0.Float(i)), SIMD::Float(0.0f)), SIMD::Float(1.0f));
@@ -206,7 +206,7 @@
 		auto x = GenericValue(this, state, insn.word(5));
 		auto y = GenericValue(this, state, insn.word(6));
 		auto a = GenericValue(this, state, insn.word(7));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, a.Float(i) * (y.Float(i) - x.Float(i)) + x.Float(i));
 		}
@@ -217,7 +217,7 @@
 		auto x = GenericValue(this, state, insn.word(5));
 		auto minVal = GenericValue(this, state, insn.word(6));
 		auto maxVal = GenericValue(this, state, insn.word(7));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Min(Max(x.Float(i), minVal.Float(i)), maxVal.Float(i)));
 		}
@@ -228,7 +228,7 @@
 		auto x = GenericValue(this, state, insn.word(5));
 		auto minVal = GenericValue(this, state, insn.word(6));
 		auto maxVal = GenericValue(this, state, insn.word(7));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Min(Max(x.Int(i), minVal.Int(i)), maxVal.Int(i)));
 		}
@@ -239,7 +239,7 @@
 		auto x = GenericValue(this, state, insn.word(5));
 		auto minVal = GenericValue(this, state, insn.word(6));
 		auto maxVal = GenericValue(this, state, insn.word(7));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Min(Max(x.UInt(i), minVal.UInt(i)), maxVal.UInt(i)));
 		}
@@ -248,7 +248,7 @@
 	case GLSLstd450FSign:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto neg = As<SIMD::Int>(CmpLT(src.Float(i), SIMD::Float(-0.0f))) & As<SIMD::Int>(SIMD::Float(-1.0f));
 			auto pos = As<SIMD::Int>(CmpNLE(src.Float(i), SIMD::Float(+0.0f))) & As<SIMD::Int>(SIMD::Float(1.0f));
@@ -259,7 +259,7 @@
 	case GLSLstd450SSign:
 	{
 		auto src = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto neg = CmpLT(src.Int(i), SIMD::Int(0)) & SIMD::Int(-1);
 			auto pos = CmpNLE(src.Int(i), SIMD::Int(0)) & SIMD::Int(1);
@@ -274,7 +274,7 @@
 
 		SIMD::Float d = Dot(type.sizeInComponents, I, N);
 
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, I.Float(i) - SIMD::Float(2.0f) * d * N.Float(i));
 		}
@@ -291,7 +291,7 @@
 		SIMD::Int pos = CmpNLT(k, SIMD::Float(0.0f));
 		SIMD::Float t = (eta.Float(0) * d + Sqrt(k));
 
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, pos & As<SIMD::Int>(eta.Float(0) * I.Float(i) - t * N.Float(i)));
 		}
@@ -306,7 +306,7 @@
 		SIMD::Float d = Dot(type.sizeInComponents, I, Nref);
 		SIMD::Int neg = CmpLT(d, SIMD::Float(0.0f));
 
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto n = N.Float(i);
 			dst.move(i, (neg & As<SIMD::Int>(n)) | (~neg & As<SIMD::Int>(-n)));
@@ -327,7 +327,7 @@
 		SIMD::Float d = Dot(getType(getObject(insn.word(5)).type).sizeInComponents, x, x);
 		SIMD::Float invLength = SIMD::Float(1.0f) / Sqrt(d);
 
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, invLength * x.Float(i));
 		}
@@ -342,7 +342,7 @@
 		// sqrt(dot(p0-p1, p0-p1))
 		SIMD::Float d = (p0.Float(0) - p1.Float(0)) * (p0.Float(0) - p1.Float(0));
 
-		for (auto i = 1u; i < p0Type.sizeInComponents; i++)
+		for(auto i = 1u; i < p0Type.sizeInComponents; i++)
 		{
 			d += (p0.Float(i) - p1.Float(i)) * (p0.Float(i) - p1.Float(i));
 		}
@@ -363,13 +363,13 @@
 		// - Eliminate lane masking and assume interleaving.
 		auto robustness = OutOfBoundsBehavior::UndefinedBehavior;
 
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			SIMD::Float whole, frac;
 			std::tie(whole, frac) = Modf(val.Float(i));
 			dst.move(i, frac);
 			auto p = ptr + (i * sizeof(float));
-			if (interleavedByLane) { p = InterleaveByLane(p); }
+			if(interleavedByLane) { p = InterleaveByLane(p); }
 			p.Store(whole, robustness, state->activeLaneMask());
 		}
 		break;
@@ -379,7 +379,7 @@
 		auto val = GenericValue(this, state, insn.word(5));
 		auto valTy = getType(val.type);
 
-		for (auto i = 0u; i < valTy.sizeInComponents; i++)
+		for(auto i = 0u; i < valTy.sizeInComponents; i++)
 		{
 			SIMD::Float whole, frac;
 			std::tie(whole, frac) = Modf(val.Float(i));
@@ -482,7 +482,7 @@
 		auto a = GenericValue(this, state, insn.word(5));
 		auto b = GenericValue(this, state, insn.word(6));
 		auto c = GenericValue(this, state, insn.word(7));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, FMA(a.Float(i), b.Float(i), c.Float(i)));
 		}
@@ -501,7 +501,7 @@
 		// - Eliminate lane masking and assume interleaving.
 		auto robustness = OutOfBoundsBehavior::UndefinedBehavior;
 
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			SIMD::Float significand;
 			SIMD::Int exponent;
@@ -510,7 +510,7 @@
 			dst.move(i, significand);
 
 			auto p = ptr + (i * sizeof(float));
-			if (interleavedByLane) { p = InterleaveByLane(p); }
+			if(interleavedByLane) { p = InterleaveByLane(p); }
 			p.Store(exponent, robustness, state->activeLaneMask());
 		}
 		break;
@@ -519,7 +519,7 @@
 	{
 		auto val = GenericValue(this, state, insn.word(5));
 		auto numComponents = getType(val.type).sizeInComponents;
-		for (auto i = 0u; i < numComponents; i++)
+		for(auto i = 0u; i < numComponents; i++)
 		{
 			auto significandAndExponent = Frexp(val.Float(i));
 			dst.move(i, significandAndExponent.first);
@@ -531,7 +531,7 @@
 	{
 		auto significand = GenericValue(this, state, insn.word(5));
 		auto exponent = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			// Assumes IEEE 754
 			auto in = significand.Float(i);
@@ -564,7 +564,7 @@
 	case GLSLstd450Radians:
 	{
 		auto degrees = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, degrees.Float(i) * SIMD::Float(PI / 180.0f));
 		}
@@ -573,7 +573,7 @@
 	case GLSLstd450Degrees:
 	{
 		auto radians = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, radians.Float(i) * SIMD::Float(180.0f / PI));
 		}
@@ -582,7 +582,7 @@
 	case GLSLstd450Sin:
 	{
 		auto radians = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Sin(radians.Float(i)));
 		}
@@ -591,7 +591,7 @@
 	case GLSLstd450Cos:
 	{
 		auto radians = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Cos(radians.Float(i)));
 		}
@@ -600,7 +600,7 @@
 	case GLSLstd450Tan:
 	{
 		auto radians = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Tan(radians.Float(i)));
 		}
@@ -609,7 +609,7 @@
 	case GLSLstd450Asin:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Asin(val.Float(i)));
 		}
@@ -618,7 +618,7 @@
 	case GLSLstd450Acos:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Acos(val.Float(i)));
 		}
@@ -627,7 +627,7 @@
 	case GLSLstd450Atan:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Atan(val.Float(i)));
 		}
@@ -636,7 +636,7 @@
 	case GLSLstd450Sinh:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Sinh(val.Float(i)));
 		}
@@ -645,7 +645,7 @@
 	case GLSLstd450Cosh:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Cosh(val.Float(i)));
 		}
@@ -654,7 +654,7 @@
 	case GLSLstd450Tanh:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Tanh(val.Float(i)));
 		}
@@ -663,7 +663,7 @@
 	case GLSLstd450Asinh:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Asinh(val.Float(i)));
 		}
@@ -672,7 +672,7 @@
 	case GLSLstd450Acosh:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Acosh(val.Float(i)));
 		}
@@ -681,7 +681,7 @@
 	case GLSLstd450Atanh:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Atanh(val.Float(i)));
 		}
@@ -691,7 +691,7 @@
 	{
 		auto x = GenericValue(this, state, insn.word(5));
 		auto y = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Atan2(x.Float(i), y.Float(i)));
 		}
@@ -701,7 +701,7 @@
 	{
 		auto x = GenericValue(this, state, insn.word(5));
 		auto y = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Pow(x.Float(i), y.Float(i)));
 		}
@@ -710,7 +710,7 @@
 	case GLSLstd450Exp:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Exp(val.Float(i)));
 		}
@@ -719,7 +719,7 @@
 	case GLSLstd450Log:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Log(val.Float(i)));
 		}
@@ -728,7 +728,7 @@
 	case GLSLstd450Exp2:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Exp2(val.Float(i)));
 		}
@@ -737,7 +737,7 @@
 	case GLSLstd450Log2:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Log2(val.Float(i)));
 		}
@@ -746,7 +746,7 @@
 	case GLSLstd450Sqrt:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, Sqrt(val.Float(i)));
 		}
@@ -757,16 +757,16 @@
 		auto val = GenericValue(this, state, insn.word(5));
 		Decorations d;
 		ApplyDecorationsForId(&d, insn.word(5));
-		if (d.RelaxedPrecision)
+		if(d.RelaxedPrecision)
 		{
-			for (auto i = 0u; i < type.sizeInComponents; i++)
+			for(auto i = 0u; i < type.sizeInComponents; i++)
 			{
 				dst.move(i, RcpSqrt_pp(val.Float(i)));
 			}
 		}
 		else
 		{
-			for (auto i = 0u; i < type.sizeInComponents; i++)
+			for(auto i = 0u; i < type.sizeInComponents; i++)
 			{
 				dst.move(i, SIMD::Float(1.0f) / Sqrt(val.Float(i)));
 			}
@@ -777,7 +777,7 @@
 	{
 		auto mat = GenericValue(this, state, insn.word(5));
 		auto numComponents = getType(mat.type).sizeInComponents;
-		switch (numComponents)
+		switch(numComponents)
 		{
 		case 4: // 2x2
 			dst.move(0, Determinant(
@@ -806,14 +806,14 @@
 	{
 		auto mat = GenericValue(this, state, insn.word(5));
 		auto numComponents = getType(mat.type).sizeInComponents;
-		switch (numComponents)
+		switch(numComponents)
 		{
 		case 4: // 2x2
 		{
 			auto inv = MatrixInverse(
 				mat.Float(0), mat.Float(1),
 				mat.Float(2), mat.Float(3));
-			for (uint32_t i = 0; i < inv.size(); i++)
+			for(uint32_t i = 0; i < inv.size(); i++)
 			{
 				dst.move(i, inv[i]);
 			}
@@ -825,7 +825,7 @@
 				mat.Float(0), mat.Float(1), mat.Float(2),
 				mat.Float(3), mat.Float(4), mat.Float(5),
 				mat.Float(6), mat.Float(7), mat.Float(8));
-			for (uint32_t i = 0; i < inv.size(); i++)
+			for(uint32_t i = 0; i < inv.size(); i++)
 			{
 				dst.move(i, inv[i]);
 			}
@@ -838,7 +838,7 @@
 				mat.Float(4),  mat.Float(5),  mat.Float(6),  mat.Float(7),
 				mat.Float(8),  mat.Float(9),  mat.Float(10), mat.Float(11),
 				mat.Float(12), mat.Float(13), mat.Float(14), mat.Float(15));
-			for (uint32_t i = 0; i < inv.size(); i++)
+			for(uint32_t i = 0; i < inv.size(); i++)
 			{
 				dst.move(i, inv[i]);
 			}
@@ -867,7 +867,7 @@
 	case GLSLstd450FindILsb:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto v = val.UInt(i);
 			dst.move(i, Cttz(v, true) | CmpEQ(v, SIMD::UInt(0)));
@@ -877,7 +877,7 @@
 	case GLSLstd450FindSMsb:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto v = val.UInt(i) ^ As<SIMD::UInt>(CmpLT(val.Int(i), SIMD::Int(0)));
 			dst.move(i, SIMD::UInt(31) - Ctlz(v, false));
@@ -887,7 +887,7 @@
 	case GLSLstd450FindUMsb:
 	{
 		auto val = GenericValue(this, state, insn.word(5));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, SIMD::UInt(31) - Ctlz(val.UInt(i), false));
 		}
@@ -912,7 +912,7 @@
 	{
 		auto x = GenericValue(this, state, insn.word(5));
 		auto y = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, NMin(x.Float(i), y.Float(i)));
 		}
@@ -922,7 +922,7 @@
 	{
 		auto x = GenericValue(this, state, insn.word(5));
 		auto y = GenericValue(this, state, insn.word(6));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, NMax(x.Float(i), y.Float(i)));
 		}
@@ -933,7 +933,7 @@
 		auto x = GenericValue(this, state, insn.word(5));
 		auto minVal = GenericValue(this, state, insn.word(6));
 		auto maxVal = GenericValue(this, state, insn.word(7));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto clamp = NMin(NMax(x.Float(i), minVal.Float(i)), maxVal.Float(i));
 			dst.move(i, clamp);
diff --git a/src/Pipeline/SpirvShaderGroup.cpp b/src/Pipeline/SpirvShaderGroup.cpp
index 2ef7db6..cc46949 100644
--- a/src/Pipeline/SpirvShaderGroup.cpp
+++ b/src/Pipeline/SpirvShaderGroup.cpp
@@ -35,12 +35,12 @@
 	{
 		SpirvShader::GenericValue value(shader, state, insn.word(5));
 		auto &type = shader->getType(SpirvShader::Type::ID(insn.word(1)));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			auto mask = As<SIMD::UInt>(state->activeLaneMask());
 			SIMD::UInt v_uint = (value.UInt(i) & mask) | (As<SIMD::UInt>(identity) & ~mask);
 			TYPE v = As<TYPE>(v_uint);
-			switch (spv::GroupOperation(insn.word(4)))
+			switch(spv::GroupOperation(insn.word(4)))
 			{
 			case spv::GroupOperationReduce:
 			{
@@ -87,7 +87,7 @@
 
 	auto &dst = state->createIntermediate(resultId, type.sizeInComponents);
 
-	switch (insn.opcode())
+	switch(insn.opcode())
 	{
 	case spv::OpGroupNonUniformElect:
 	{
@@ -122,11 +122,11 @@
 		auto res = SIMD::UInt(0xffffffff);
 		SIMD::UInt active = As<SIMD::UInt>(state->activeLaneMask());
 		SIMD::UInt inactive = ~active;
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			SIMD::UInt v = value.UInt(i) & active;
 			SIMD::UInt filled = v;
-			for (int j = 0; j < SIMD::Width - 1; j++)
+			for(int j = 0; j < SIMD::Width - 1; j++)
 			{
 				filled |= filled.yzwx & inactive; // Populate inactive 'holes' with a live value
 			}
@@ -142,7 +142,7 @@
 		auto id = SIMD::Int(GetConstScalarInt(insn.word(5)));
 		GenericValue value(this, state, valueId);
 		auto mask = CmpEQ(id, SIMD::Int(0, 1, 2, 3));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, OrAll(value.Int(i) & mask));
 		}
@@ -160,7 +160,7 @@
 		//   elect = active & ~(active.Oxyz | active.OOxy | active.OOOx)
 		auto v0111 = SIMD::Int(0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
 		auto elect = active & ~(v0111 & (active.xxyz | active.xxxy | active.xxxx));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			dst.move(i, OrAll(value.Int(i) & elect));
 		}
@@ -215,7 +215,7 @@
 		ASSERT(type.sizeInComponents == 1);
 		ASSERT(getType(getObject(valueId).type).sizeInComponents == 4);
 		GenericValue value(this, state, valueId);
-		switch (operation)
+		switch(operation)
 		{
 		case spv::GroupOperationReduce:
 			dst.move(0, CountBits(value.UInt(0) & SIMD::UInt(15)));
@@ -260,7 +260,7 @@
 		auto y = CmpEQ(SIMD::Int(1), id.Int(0));
 		auto z = CmpEQ(SIMD::Int(2), id.Int(0));
 		auto w = CmpEQ(SIMD::Int(3), id.Int(0));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			SIMD::Int v = value.Int(i);
 			dst.move(i, (x & v.xxxx) | (y & v.yyyy) | (z & v.zzzz) | (w & v.wwww));
@@ -276,7 +276,7 @@
 		auto y = CmpEQ(SIMD::Int(1), SIMD::Int(0, 1, 2, 3) ^ mask.Int(0));
 		auto z = CmpEQ(SIMD::Int(2), SIMD::Int(0, 1, 2, 3) ^ mask.Int(0));
 		auto w = CmpEQ(SIMD::Int(3), SIMD::Int(0, 1, 2, 3) ^ mask.Int(0));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			SIMD::Int v = value.Int(i);
 			dst.move(i, (x & v.xxxx) | (y & v.yyyy) | (z & v.zzzz) | (w & v.wwww));
@@ -292,7 +292,7 @@
 		auto d1 = CmpEQ(SIMD::Int(1), delta.Int(0));
 		auto d2 = CmpEQ(SIMD::Int(2), delta.Int(0));
 		auto d3 = CmpEQ(SIMD::Int(3), delta.Int(0));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			SIMD::Int v = value.Int(i);
 			dst.move(i, (d0 & v.xyzw) | (d1 & v.xxyz) | (d2 & v.xxxy) | (d3 & v.xxxx));
@@ -308,7 +308,7 @@
 		auto d1 = CmpEQ(SIMD::Int(1), delta.Int(0));
 		auto d2 = CmpEQ(SIMD::Int(2), delta.Int(0));
 		auto d3 = CmpEQ(SIMD::Int(3), delta.Int(0));
-		for (auto i = 0u; i < type.sizeInComponents; i++)
+		for(auto i = 0u; i < type.sizeInComponents; i++)
 		{
 			SIMD::Int v = value.Int(i);
 			dst.move(i, (d0 & v.xyzw) | (d1 & v.yzww) | (d2 & v.zwww) | (d3 & v.wwww));
diff --git a/src/Pipeline/SpirvShaderImage.cpp b/src/Pipeline/SpirvShaderImage.cpp
index f2828f8..5e7edcf 100644
--- a/src/Pipeline/SpirvShaderImage.cpp
+++ b/src/Pipeline/SpirvShaderImage.cpp
@@ -25,7 +25,7 @@
 
 VkFormat SpirvFormatToVulkanFormat(spv::ImageFormat format)
 {
-	switch (format)
+	switch(format)
 	{
 	case spv::ImageFormatRgba32f: return VK_FORMAT_R32G32B32A32_SFLOAT;
 	case spv::ImageFormatRgba32i: return VK_FORMAT_R32G32B32A32_SINT;
@@ -259,7 +259,7 @@
 			in[i] = dyValue.Float(j);
 		}
 	}
-	else if (instruction.samplerMethod == Fetch)
+	else if(instruction.samplerMethod == Fetch)
 	{
 		// The instruction didn't provide a lod operand, but the sampler's Fetch
 		// function requires one to be present. If no lod is supplied, the default
@@ -302,7 +302,7 @@
 	Array<SIMD::Float> out(4);
 	Call<ImageSampler>(cache.function, texture, sampler, &in[0], &out[0], state->routine->constants);
 
-	for (auto i = 0u; i < resultType.sizeInComponents; i++) { result.move(i, out[i]); }
+	for(auto i = 0u; i < resultType.sizeInComponents; i++) { result.move(i, out[i]); }
 
 	return EmitResult::Continue;
 }
@@ -357,7 +357,7 @@
 	Pointer<Int> extent;
 	Int arrayLayers;
 
-	switch (bindingLayout.descriptorType)
+	switch(bindingLayout.descriptorType)
 	{
 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
@@ -380,26 +380,26 @@
 
 	auto dimensions = resultTy.sizeInComponents - (isArrayed ? 1 : 0);
 	std::vector<Int> out;
-	if (lodId != 0)
+	if(lodId != 0)
 	{
 		auto lodVal = GenericValue(this, state, lodId);
 		ASSERT(getType(lodVal.type).sizeInComponents == 1);
 		auto lod = lodVal.Int(0);
 		auto one = SIMD::Int(1);
-		for (uint32_t i = 0; i < dimensions; i++)
+		for(uint32_t i = 0; i < dimensions; i++)
 		{
 			dst.move(i, Max(SIMD::Int(extent[i]) >> lod, one));
 		}
 	}
 	else
 	{
-		for (uint32_t i = 0; i < dimensions; i++)
+		for(uint32_t i = 0; i < dimensions; i++)
 		{
 			dst.move(i, SIMD::Int(extent[i]));
 		}
 	}
 
-	if (isArrayed)
+	if(isArrayed)
 	{
 		auto numElements = isCubeMap ? (arrayLayers / 6) : RValue<Int>(arrayLayers);
 		dst.move(dimensions, SIMD::Int(numElements));
@@ -419,7 +419,7 @@
 
 	Pointer<Byte> descriptor = state->getPointer(imageId).base;
 	Int mipLevels = 0;
-	switch (bindingLayout.descriptorType)
+	switch(bindingLayout.descriptorType)
 	{
 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
 	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
@@ -453,7 +453,7 @@
 
 	Pointer<Byte> descriptor = state->getPointer(imageId).base;
 	Int sampleCount = 0;
-	switch (bindingLayout.descriptorType)
+	switch(bindingLayout.descriptorType)
 	{
 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
 		sampleCount = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, sampleCount)); // uint32_t
@@ -483,12 +483,12 @@
 	SIMD::Int u = coordinate.Int(0);
 	SIMD::Int v = SIMD::Int(0);
 
-	if (getType(coordinate.type).sizeInComponents > 1)
+	if(getType(coordinate.type).sizeInComponents > 1)
 	{
 		v = coordinate.Int(1);
 	}
 
-	if (dim == spv::DimSubpassData)
+	if(dim == spv::DimSubpassData)
 	{
 		u += routine->windowSpacePosition[0];
 		v += routine->windowSpacePosition[1];
@@ -507,26 +507,26 @@
 										: OFFSET(vk::StorageImageDescriptor, samplePitchBytes))));
 
 	ptr += u * SIMD::Int(texelSize);
-	if (dims > 1)
+	if(dims > 1)
 	{
 		ptr += v * rowPitch;
 	}
-	if (dims > 2)
+	if(dims > 2)
 	{
 		ptr += coordinate.Int(2) * slicePitch;
 	}
-	if (isArrayed)
+	if(isArrayed)
 	{
 		ptr += coordinate.Int(dims) * slicePitch;
 	}
 
-	if (dim == spv::DimSubpassData)
+	if(dim == spv::DimSubpassData)
 	{
 		// Multiview input attachment access is to the layer corresponding to the current view
 		ptr += SIMD::Int(routine->viewID) * slicePitch;
 	}
 
-	if (sampleId.value())
+	if(sampleId.value())
 	{
 		GenericValue sample(this, state, sampleId);
 		ptr += sample.Int(0) * samplePitch;
@@ -545,11 +545,11 @@
 
 	Object::ID sampleId = 0;
 
-	if (insn.wordCount() > 5)
+	if(insn.wordCount() > 5)
 	{
 		int operand = 6;
 		auto imageOperands = insn.word(5);
-		if (imageOperands & spv::ImageOperandsSampleMask)
+		if(imageOperands & spv::ImageOperandsSampleMask)
 		{
 			sampleId = insn.word(operand++);
 			imageOperands &= ~spv::ImageOperandsSampleMask;
@@ -576,7 +576,7 @@
 	auto useStencilAspect = (vkFormat == VK_FORMAT_D32_SFLOAT_S8_UINT &&
 			getType(imageType.definition.word(2)).opcode() == spv::OpTypeInt);
 
-	if (useStencilAspect)
+	if(useStencilAspect)
 	{
 		vkFormat = VK_FORMAT_S8_UINT;
 	}
@@ -604,7 +604,7 @@
 	// Round up texel size: for formats smaller than 32 bits per texel, we will emit a bunch
 	// of (overlapping) 32b loads here, and each lane will pick out what it needs from the low bits.
 	// TODO: specialize for small formats?
-	for (auto i = 0; i < (texelSize + 3)/4; i++)
+	for(auto i = 0; i < (texelSize + 3)/4; i++)
 	{
 		packed[i] = texelPtr.Load<SIMD::Int>(robustness, state->activeLaneMask(), false, std::memory_order_relaxed, std::min(texelSize, 4));
 		texelPtr += sizeof(float);
@@ -850,7 +850,7 @@
 	auto numPackedElements = 0u;
 	int texelSize = 0;
 	auto format = static_cast<spv::ImageFormat>(imageType.definition.word(8));
-	switch (format)
+	switch(format)
 	{
 	case spv::ImageFormatRgba32f:
 	case spv::ImageFormatRgba32i:
@@ -957,7 +957,7 @@
 	// SPIR-V 1.4: "If the coordinates are outside the image, the memory location that is accessed is undefined."
 	auto robustness = OutOfBoundsBehavior::UndefinedValue;
 
-	for (auto i = 0u; i < numPackedElements; i++)
+	for(auto i = 0u; i < numPackedElements; i++)
 	{
 		texelPtr.Store(packed[i], robustness, state->activeLaneMask());
 		texelPtr += sizeof(float);
diff --git a/src/Pipeline/SpirvShaderMemory.cpp b/src/Pipeline/SpirvShaderMemory.cpp
index 806fb78..c172d3c 100644
--- a/src/Pipeline/SpirvShaderMemory.cpp
+++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -60,7 +60,7 @@
 	VisitMemoryObject(pointerId, [&](const MemoryElement& el)
 	{
 		auto p = ptr + el.offset;
-		if (interleavedByLane) { p = InterleaveByLane(p); }  // TODO: Interleave once, then add offset?
+		if(interleavedByLane) { p = InterleaveByLane(p); }  // TODO: Interleave once, then add offset?
 		dst.move(el.index, p.Load<SIMD::Float>(robustness, state->activeLaneMask(), atomic, memoryOrder));
 	});
 
@@ -92,19 +92,19 @@
 	auto robustness = state->getOutOfBoundsBehavior(pointerTy.storageClass);
 
 	SIMD::Int mask = state->activeLaneMask();
-	if (!StoresInHelperInvocation(pointerTy.storageClass))
+	if(!StoresInHelperInvocation(pointerTy.storageClass))
 	{
 		mask = mask & state->storesAndAtomicsMask();
 	}
 
-	if (object.kind == Object::Kind::Constant)
+	if(object.kind == Object::Kind::Constant)
 	{
 		// Constant source data.
 		const uint32_t *src = object.constantValue.get();
 		VisitMemoryObject(pointerId, [&](const MemoryElement& el)
 		{
 			auto p = ptr + el.offset;
-			if (interleavedByLane) { p = InterleaveByLane(p); }
+			if(interleavedByLane) { p = InterleaveByLane(p); }
 			p.Store(SIMD::Int(src[el.index]), robustness, mask, atomic, memoryOrder);
 		});
 	}
@@ -115,7 +115,7 @@
 		VisitMemoryObject(pointerId, [&](const MemoryElement& el)
 		{
 			auto p = ptr + el.offset;
-			if (interleavedByLane) { p = InterleaveByLane(p); }
+			if(interleavedByLane) { p = InterleaveByLane(p); }
 			p.Store(src.Float(el.index), robustness, mask, atomic, memoryOrder);
 		});
 	}
@@ -130,7 +130,7 @@
 	auto &object = getObject(resultId);
 	auto &objectTy = getType(object.type);
 
-	switch (objectTy.storageClass)
+	switch(objectTy.storageClass)
 	{
 	case spv::StorageClassOutput:
 	case spv::StorageClassPrivate:
@@ -153,7 +153,7 @@
 	}
 	case spv::StorageClassInput:
 	{
-		if (object.kind == Object::Kind::InterfaceVariable)
+		if(object.kind == Object::Kind::InterfaceVariable)
 		{
 			auto &dst = routine->getVariable(resultId);
 			int offset = 0;
@@ -178,7 +178,7 @@
 
 		uint32_t arrayIndex = 0;  // TODO(b/129523279)
 		auto setLayout = routine->pipelineLayout->getDescriptorSetLayout(d.DescriptorSet);
-		if (setLayout->hasBinding(d.Binding))
+		if(setLayout->hasBinding(d.Binding))
 		{
 			uint32_t bindingOffset = static_cast<uint32_t>(setLayout->getBindingOffset(d.Binding, arrayIndex));
 			Pointer<Byte> set = routine->descriptorSets[d.DescriptorSet];  // DescriptorSet*
@@ -203,7 +203,7 @@
 		// Note: the module may contain descriptor set references that are not suitable for this implementation -- using a set index higher than the number
 		// of descriptor set binding points we support. As long as the selected entrypoint doesn't actually touch the out of range binding points, this
 		// is valid. In this case make the value nullptr to make it easier to diagnose an attempt to dereference it.
-		if (d.DescriptorSet < vk::MAX_BOUND_DESCRIPTOR_SETS)
+		if(d.DescriptorSet < vk::MAX_BOUND_DESCRIPTOR_SETS)
 		{
 			state->createPointer(resultId, SIMD::Pointer(routine->descriptorSets[d.DescriptorSet], size));
 		}
@@ -223,14 +223,14 @@
 		break;
 	}
 
-	if (insn.wordCount() > 4)
+	if(insn.wordCount() > 4)
 	{
 		Object::ID initializerId = insn.word(4);
-		if (getObject(initializerId).kind != Object::Kind::Constant)
+		if(getObject(initializerId).kind != Object::Kind::Constant)
 		{
 			UNIMPLEMENTED("Non-constant initializers not yet implemented");
 		}
-		switch (objectTy.storageClass)
+		switch(objectTy.storageClass)
 		{
 		case spv::StorageClassOutput:
 		case spv::StorageClassPrivate:
@@ -242,7 +242,7 @@
 			VisitMemoryObject(resultId, [&](const MemoryElement& el)
 			{
 				auto p = ptr + el.offset;
-				if (interleavedByLane) { p = InterleaveByLane(p); }
+				if(interleavedByLane) { p = InterleaveByLane(p); }
 				auto robustness = OutOfBoundsBehavior::UndefinedBehavior;  // Local variables are always within bounds.
 				p.Store(initialValue.Float(el.index), robustness, state->activeLaneMask());
 			});
@@ -282,8 +282,8 @@
 
 		auto dst = dstPtr + dstOffset;
 		auto src = srcPtr + srcOffset;
-		if (dstInterleavedByLane) { dst = InterleaveByLane(dst); }
-		if (srcInterleavedByLane) { src = InterleaveByLane(src); }
+		if(dstInterleavedByLane) { dst = InterleaveByLane(dst); }
+		if(srcInterleavedByLane) { src = InterleaveByLane(src); }
 
 		// TODO(b/131224163): Optimize based on src/dst storage classes.
 		auto robustness = OutOfBoundsBehavior::RobustBufferAccess;
@@ -308,13 +308,13 @@
 	ApplyDecorationsForId(&d, id);
 	auto const &type = getType(id);
 
-	if (d.HasOffset)
+	if(d.HasOffset)
 	{
 		offset += d.Offset;
 		d.HasOffset = false;
 	}
 
-	switch (type.opcode())
+	switch(type.opcode())
 	{
 	case spv::OpTypePointer:
 		VisitMemoryObjectInner(type.definition.word(3), d, index, offset, f);
@@ -327,7 +327,7 @@
 	case spv::OpTypeVector:
 	{
 		auto elemStride = (d.InsideMatrix && d.HasRowMajor && d.RowMajor) ? d.MatrixStride : static_cast<int32_t>(sizeof(float));
-		for (auto i = 0u; i < type.definition.word(3); i++)
+		for(auto i = 0u; i < type.definition.word(3); i++)
 		{
 			VisitMemoryObjectInner(type.definition.word(2), d, index, offset + elemStride * i, f);
 		}
@@ -337,7 +337,7 @@
 	{
 		auto columnStride = (d.HasRowMajor && d.RowMajor) ? static_cast<int32_t>(sizeof(float)) : d.MatrixStride;
 		d.InsideMatrix = true;
-		for (auto i = 0u; i < type.definition.word(3); i++)
+		for(auto i = 0u; i < type.definition.word(3); i++)
 		{
 			ASSERT(d.HasMatrixStride);
 			VisitMemoryObjectInner(type.definition.word(2), d, index, offset + columnStride * i, f);
@@ -345,7 +345,7 @@
 		break;
 	}
 	case spv::OpTypeStruct:
-		for (auto i = 0u; i < type.definition.wordCount() - 2; i++)
+		for(auto i = 0u; i < type.definition.wordCount() - 2; i++)
 		{
 			ApplyDecorationsForIdMember(&d, id, i);
 			VisitMemoryObjectInner(type.definition.word(i + 2), d, index, offset, f);
@@ -354,7 +354,7 @@
 	case spv::OpTypeArray:
 	{
 		auto arraySize = GetConstScalarInt(type.definition.word(3));
-		for (auto i = 0u; i < arraySize; i++)
+		for(auto i = 0u; i < arraySize; i++)
 		{
 			ASSERT(d.HasArrayStride);
 			VisitMemoryObjectInner(type.definition.word(2), d, index, offset + i * d.ArrayStride, f);
@@ -370,7 +370,7 @@
 {
 	auto typeId = getObject(id).type;
 	auto const & type = getType(typeId);
-	if (IsExplicitLayout(type.storageClass))
+	if(IsExplicitLayout(type.storageClass))
 	{
 		Decorations d{};
 		ApplyDecorationsForId(&d, id);
@@ -381,7 +381,7 @@
 	{
 		// Objects without explicit layout are tightly packed.
 		auto &elType = getType(type.element);
-		for (auto index = 0u; index < elType.sizeInComponents; index++)
+		for(auto index = 0u; index < elType.sizeInComponents; index++)
 		{
 			auto offset = static_cast<uint32_t>(index * sizeof(float));
 			f({index, offset, elType});
@@ -393,7 +393,7 @@
 {
 	auto routine = state->routine;
 	auto &object = getObject(id);
-	switch (object.kind)
+	switch(object.kind)
 	{
 		case Object::Kind::Pointer:
 		case Object::Kind::InterfaceVariable:
@@ -414,7 +414,7 @@
 			Pointer<Byte> descriptor = set.base + bindingOffset; // BufferDescriptor*
 			Pointer<Byte> data = *Pointer<Pointer<Byte>>(descriptor + OFFSET(vk::BufferDescriptor, ptr)); // void*
 			Int size = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, sizeInBytes));
-			if (setLayout->isBindingDynamic(d.Binding))
+			if(setLayout->isBindingDynamic(d.Binding))
 			{
 				uint32_t dynamicBindingIndex =
 					routine->pipelineLayout->getDynamicOffsetBase(d.DescriptorSet) +
@@ -444,7 +444,7 @@
 		spv::MemorySemanticsAcquireReleaseMask |
 		spv::MemorySemanticsSequentiallyConsistentMask
 	);
-	switch (control)
+	switch(control)
 	{
 	case spv::MemorySemanticsMaskNone:                   return std::memory_order_relaxed;
 	case spv::MemorySemanticsAcquireMask:                return std::memory_order_acquire;
@@ -461,7 +461,7 @@
 
 bool SpirvShader::StoresInHelperInvocation(spv::StorageClass storageClass)
 {
-	switch (storageClass)
+	switch(storageClass)
 	{
 	case spv::StorageClassUniform:
 	case spv::StorageClassStorageBuffer:
@@ -474,7 +474,7 @@
 
 bool SpirvShader::IsExplicitLayout(spv::StorageClass storageClass)
 {
-	switch (storageClass)
+	switch(storageClass)
 	{
 	case spv::StorageClassUniform:
 	case spv::StorageClassStorageBuffer:
@@ -497,7 +497,7 @@
 
 bool SpirvShader::IsStorageInterleavedByLane(spv::StorageClass storageClass)
 {
-	switch (storageClass)
+	switch(storageClass)
 	{
 	case spv::StorageClassUniform:
 	case spv::StorageClassStorageBuffer:
diff --git a/src/Pipeline/SpirvShaderSampling.cpp b/src/Pipeline/SpirvShaderSampling.cpp
index de26e00..1d9694a 100644
--- a/src/Pipeline/SpirvShaderSampling.cpp
+++ b/src/Pipeline/SpirvShaderSampling.cpp
@@ -129,7 +129,7 @@
 			uvw[i] = in[i];
 		}
 
-		if (instruction.isDref())
+		if(instruction.isDref())
 		{
 			q = in[i];
 			i++;
diff --git a/src/Pipeline/SpirvShaderSpec.cpp b/src/Pipeline/SpirvShaderSpec.cpp
index 4654754..2d3c402 100644
--- a/src/Pipeline/SpirvShaderSpec.cpp
+++ b/src/Pipeline/SpirvShaderSpec.cpp
@@ -22,7 +22,7 @@
 {
 	auto opcode = static_cast<spv::Op>(insn.word(3));
 
-	switch (opcode)
+	switch(opcode)
 	{
 	case spv::OpIAdd:
 	case spv::OpISub:
@@ -73,7 +73,7 @@
 		auto const &left = getObject(insn.word(5));
 		auto const &right = getObject(insn.word(6));
 
-		for (auto i = 0u; i < getType(result.type).sizeInComponents; i++)
+		for(auto i = 0u; i < getType(result.type).sizeInComponents; i++)
 		{
 			auto sel = cond.constantValue[condIsScalar ? 0 : i];
 			result.constantValue[i] = sel ? left.constantValue[i] : right.constantValue[i];
@@ -87,7 +87,7 @@
 		auto const &compositeObject = getObject(insn.word(4));
 		auto firstComponent = WalkLiteralAccessChain(compositeObject.type, insn.wordCount() - 5, insn.wordPointer(5));
 
-		for (auto i = 0u; i < getType(result.type).sizeInComponents; i++)
+		for(auto i = 0u; i < getType(result.type).sizeInComponents; i++)
 		{
 			result.constantValue[i] = compositeObject.constantValue[firstComponent + i];
 		}
@@ -102,17 +102,17 @@
 		auto firstNewComponent = WalkLiteralAccessChain(result.type, insn.wordCount() - 6, insn.wordPointer(6));
 
 		// old components before
-		for (auto i = 0u; i < firstNewComponent; i++)
+		for(auto i = 0u; i < firstNewComponent; i++)
 		{
 			result.constantValue[i] = oldObject.constantValue[i];
 		}
 		// new part
-		for (auto i = 0u; i < getType(newPart.type).sizeInComponents; i++)
+		for(auto i = 0u; i < getType(newPart.type).sizeInComponents; i++)
 		{
 			result.constantValue[firstNewComponent + i] = newPart.constantValue[i];
 		}
 		// old components after
-		for (auto i = firstNewComponent + getType(newPart.type).sizeInComponents; i < getType(result.type).sizeInComponents; i++)
+		for(auto i = firstNewComponent + getType(newPart.type).sizeInComponents; i < getType(result.type).sizeInComponents; i++)
 		{
 			result.constantValue[i] = oldObject.constantValue[i];
 		}
@@ -125,15 +125,15 @@
 		auto const &firstHalf = getObject(insn.word(4));
 		auto const &secondHalf = getObject(insn.word(5));
 
-		for (auto i = 0u; i < getType(result.type).sizeInComponents; i++)
+		for(auto i = 0u; i < getType(result.type).sizeInComponents; i++)
 		{
 			auto selector = insn.word(6 + i);
-			if (selector == static_cast<uint32_t>(-1))
+			if(selector == static_cast<uint32_t>(-1))
 			{
 				// Undefined value, we'll use zero
 				result.constantValue[i] = 0;
 			}
-			else if (selector < getType(firstHalf.type).sizeInComponents)
+			else if(selector < getType(firstHalf.type).sizeInComponents)
 			{
 				result.constantValue[i] = firstHalf.constantValue[selector];
 			}
@@ -161,12 +161,12 @@
 	auto const &lhs = getObject(insn.word(4));
 	auto size = getType(lhs.type).sizeInComponents;
 
-	for (auto i = 0u; i < size; i++)
+	for(auto i = 0u; i < size; i++)
 	{
 		auto &v = result.constantValue[i];
 		auto l = lhs.constantValue[i];
 
-		switch (opcode)
+		switch(opcode)
 		{
 		case spv::OpSConvert:
 		case spv::OpFConvert:
@@ -212,13 +212,13 @@
 	auto const &rhs = getObject(insn.word(5));
 	auto size = getType(lhs.type).sizeInComponents;
 
-	for (auto i = 0u; i < size; i++)
+	for(auto i = 0u; i < size; i++)
 	{
 		auto &v = result.constantValue[i];
 		auto l = lhs.constantValue[i];
 		auto r = rhs.constantValue[i];
 
-		switch (opcode)
+		switch(opcode)
 		{
 		case spv::OpIAdd:
 			v = l + r;
@@ -236,21 +236,21 @@
 			v = (r == 0) ? 0 : l % r;
 			break;
 		case spv::OpSDiv:
-			if (r == 0) r = UINT32_MAX;
-			if (l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
+			if(r == 0) r = UINT32_MAX;
+			if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
 			v = static_cast<int32_t>(l) / static_cast<int32_t>(r);
 			break;
 		case spv::OpSRem:
-			if (r == 0) r = UINT32_MAX;
-			if (l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
+			if(r == 0) r = UINT32_MAX;
+			if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
 			v = static_cast<int32_t>(l) % static_cast<int32_t>(r);
 			break;
 		case spv::OpSMod:
-			if (r == 0) r = UINT32_MAX;
-			if (l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
+			if(r == 0) r = UINT32_MAX;
+			if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
 			// Test if a signed-multiply would be negative.
 			v = static_cast<int32_t>(l) % static_cast<int32_t>(r);
-			if ((v & 0x80000000) != (r & 0x80000000))
+			if((v & 0x80000000) != (r & 0x80000000))
 				v += r;
 			break;
 		case spv::OpShiftRightLogical:
diff --git a/src/Pipeline/VertexProgram.cpp b/src/Pipeline/VertexProgram.cpp
index 90886a5..b2a731a 100644
--- a/src/Pipeline/VertexProgram.cpp
+++ b/src/Pipeline/VertexProgram.cpp
@@ -66,7 +66,7 @@
 void VertexProgram::program(Pointer<UInt> &batch, UInt& vertexCount)
 {
 	auto it = spirvShader->inputBuiltins.find(spv::BuiltInVertexIndex);
-	if (it != spirvShader->inputBuiltins.end())
+	if(it != spirvShader->inputBuiltins.end())
 	{
 		assert(it->second.SizeInComponents == 1);
 
diff --git a/src/Pipeline/VertexRoutine.cpp b/src/Pipeline/VertexRoutine.cpp
index 8bf0218..85c395a 100644
--- a/src/Pipeline/VertexRoutine.cpp
+++ b/src/Pipeline/VertexRoutine.cpp
@@ -149,10 +149,10 @@
 	cullMask = Int(15);
 
 	auto it = spirvShader->outputBuiltins.find(spv::BuiltInCullDistance);
-	if (it != spirvShader->outputBuiltins.end())
+	if(it != spirvShader->outputBuiltins.end())
 	{
 		auto count = spirvShader->getNumOutputCullDistances();
-		for (uint32_t i = 0; i < count; i++)
+		for(uint32_t i = 0; i < count; i++)
 		{
 			auto const &distance = routine.getVariable(it->second.Id)[it->second.FirstComponent + i];
 			auto mask = SignMask(CmpGE(distance, SIMD::Float(0)));
@@ -178,7 +178,7 @@
 	Pointer<Byte> source3 = buffer + offsets.w;
 
 	UInt4 zero(0);
-	if (robustBufferAccess)
+	if(robustBufferAccess)
 	{
 		// TODO(b/141124876): Optimize for wide-vector gather operations.
 		UInt4 limits = offsets + UInt4(stream.bytesPerAttrib());
diff --git a/src/Reactor/Coroutine.hpp b/src/Reactor/Coroutine.hpp
index 211d68b..8bd601e 100644
--- a/src/Reactor/Coroutine.hpp
+++ b/src/Reactor/Coroutine.hpp
@@ -89,7 +89,7 @@
 //       Yield(Int(1));
 //       Int current = 1;
 //       Int next = 1;
-//       While (true) {
+//       While(true) {
 //           Yield(next);
 //           auto tmp = current + next;
 //           current = next;
@@ -101,7 +101,7 @@
 //   auto s = coroutine();
 //
 //   // Grab the first 20 yielded values and print them.
-//   for (int i = 0; i < 20; i++)
+//   for(int i = 0; i < 20; i++)
 //   {
 //       int val = 0;
 //       s->await(val);
diff --git a/src/Reactor/DebugAndroid.cpp b/src/Reactor/DebugAndroid.cpp
index c511fc3..2a6569c 100644
--- a/src/Reactor/DebugAndroid.cpp
+++ b/src/Reactor/DebugAndroid.cpp
@@ -31,7 +31,7 @@
 	{
 		ALOGE("Waiting for debugger: gdbserver :${PORT} --attach %u. Look for thread %u", getpid(), gettid());
 		volatile int waiting = 1;
-		while (waiting) {
+		while(waiting) {
 			sleep(1);
 		}
 	}
diff --git a/src/Reactor/DebugAndroid.hpp b/src/Reactor/DebugAndroid.hpp
index eced194..bb7451e 100644
--- a/src/Reactor/DebugAndroid.hpp
+++ b/src/Reactor/DebugAndroid.hpp
@@ -49,7 +49,7 @@
 void AndroidEnterDebugger();
 
 #define ASSERT(E) do { \
-		if (!(E)) { \
+		if(!(E)) { \
 			ALOGE("badness: assertion_failed %s in %s at %s:%d", #E,	\
 				  __FUNCTION__, __FILE__, __LINE__);					\
 			AndroidEnterDebugger();										\
diff --git a/src/Reactor/EmulatedReactor.cpp b/src/Reactor/EmulatedReactor.cpp
index efdb5a3..3740224 100644
--- a/src/Reactor/EmulatedReactor.cpp
+++ b/src/Reactor/EmulatedReactor.cpp
@@ -49,7 +49,7 @@
 	Pointer<Byte> baseBytePtr = base;
 
 	out = T(0);
-	for (int i = 0; i < 4; i++)
+	for(int i = 0; i < 4; i++)
 	{
 		If(Extract(mask, i) != 0)
 		{
@@ -72,7 +72,7 @@
 
 	Pointer<Byte> baseBytePtr = base;
 
-	for (int i = 0; i < 4; i++)
+	for(int i = 0; i < 4; i++)
 	{
 		If(Extract(mask, i) != 0)
 		{
diff --git a/src/Reactor/ExecutableMemory.cpp b/src/Reactor/ExecutableMemory.cpp
index 8d5ec55..b6cee9c 100644
--- a/src/Reactor/ExecutableMemory.cpp
+++ b/src/Reactor/ExecutableMemory.cpp
@@ -38,7 +38,7 @@
 #undef allocate
 #undef deallocate
 
-#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined (_M_X64)) && !defined(__x86__)
+#if(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined (_M_X64)) && !defined(__x86__)
 #define __x86__
 #endif
 
@@ -91,7 +91,7 @@
 #if defined(_WIN32)
 DWORD permissionsToProtectMode(int permissions)
 {
-	switch (permissions) {
+	switch(permissions) {
 		case PERMISSION_READ:
 		  return PAGE_READONLY;
 		case PERMISSION_EXECUTE:
@@ -111,15 +111,15 @@
 int permissionsToMmapProt(int permissions)
 {
 	int result = 0;
-	if (permissions & PERMISSION_READ)
+	if(permissions & PERMISSION_READ)
 	{
 		result |= PROT_READ;
 	}
-	if (permissions & PERMISSION_WRITE)
+	if(permissions & PERMISSION_WRITE)
 	{
 		result |= PROT_WRITE;
 	}
-	if (permissions & PERMISSION_EXECUTE)
+	if(permissions & PERMISSION_EXECUTE)
 	{
 		result |= PROT_EXEC;
 	}
@@ -177,15 +177,15 @@
 #if defined(__Fuchsia__)
 zx_vm_option_t permissionsToZxVmOptions(int permissions) {
 	zx_vm_option_t result = 0;
-	if (permissions & PERMISSION_READ)
+	if(permissions & PERMISSION_READ)
 	{
 		result |= ZX_VM_PERM_READ;
 	}
-	if (permissions & PERMISSION_WRITE)
+	if(permissions & PERMISSION_WRITE)
 	{
 		result |= ZX_VM_PERM_WRITE;
 	}
-	if (permissions & PERMISSION_EXECUTE)
+	if(permissions & PERMISSION_EXECUTE)
 	{
 		result |= ZX_VM_PERM_EXECUTE;
 	}
@@ -277,10 +277,10 @@
 		}
 	#elif defined(__Fuchsia__)
 		zx_handle_t vmo;
-		if (zx_vmo_create(length, 0, &vmo) != ZX_OK) {
+		if(zx_vmo_create(length, 0, &vmo) != ZX_OK) {
 			return nullptr;
 		}
-		if (need_exec &&
+		if(need_exec &&
 		    zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK)
 		{
 			return nullptr;
@@ -290,7 +290,7 @@
 			zx_vmar_root_self(), permissionsToZxVmOptions(permissions), 0, vmo,
 			0, length, &reservation);
 		zx_handle_close(vmo);
-		if (status != ZX_OK) {
+		if(status != ZX_OK) {
 			return nullptr;
 		}
 
@@ -327,7 +327,7 @@
 
 void protectMemoryPages(void *memory, size_t bytes, int permissions)
 {
-	if (bytes == 0)
+	if(bytes == 0)
 		return;
 	bytes = roundUp(bytes, memoryPageSize());
 
diff --git a/src/Reactor/LLVMReactor.cpp b/src/Reactor/LLVMReactor.cpp
index c76ff55..4b6bb77 100644
--- a/src/Reactor/LLVMReactor.cpp
+++ b/src/Reactor/LLVMReactor.cpp
@@ -153,7 +153,7 @@
 {
 	std::unique_lock<std::mutex> lock(mutex);
 	auto it = map.find(key);
-	if (it != map.end())
+	if(it != map.end())
 	{
 		return it->second;
 	}
@@ -244,9 +244,9 @@
 #endif
 
 	std::vector<std::string> mattrs;
-	for (auto &feature : features)
+	for(auto &feature : features)
 	{
-		if (feature.second) { mattrs.push_back(feature.first()); }
+		if(feature.second) { mattrs.push_back(feature.first()); }
 	}
 
 	const char* march = nullptr;
@@ -289,7 +289,7 @@
 
 llvm::CodeGenOpt::Level JITGlobals::toLLVM(rr::Optimization::Level level)
 {
-	switch (level)
+	switch(level)
 	{
 		case rr::Optimization::Level::None:       return ::llvm::CodeGenOpt::None;
 		case rr::Optimization::Level::Less:       return ::llvm::CodeGenOpt::Less;
@@ -333,7 +333,7 @@
 			purpose == llvm::SectionMemoryManager::AllocationPurpose::Code;
 		void* addr = rr::allocateMemoryPages(
 			numBytes, flagsToPermissions(flags), need_exec);
-		if (!addr)
+		if(!addr)
 			return llvm::sys::MemoryBlock();
 		return llvm::sys::MemoryBlock(addr, numBytes);
 	}
@@ -362,15 +362,15 @@
 private:
 	int flagsToPermissions(unsigned flags) {
 		int result = 0;
-		if (flags & llvm::sys::Memory::MF_READ)
+		if(flags & llvm::sys::Memory::MF_READ)
 		{
 			result |= rr::PERMISSION_READ;
 		}
-		if (flags & llvm::sys::Memory::MF_WRITE)
+		if(flags & llvm::sys::Memory::MF_WRITE)
 		{
 			result |= rr::PERMISSION_WRITE;
 		}
-		if (flags & llvm::sys::Memory::MF_EXEC)
+		if(flags & llvm::sys::Memory::MF_EXEC)
 		{
 			result |= rr::PERMISSION_EXECUTE;
 		}
@@ -402,7 +402,7 @@
 			session,
 			[&](const std::string &name) {
 				void *func = rr::resolveExternalSymbol(name.c_str());
-				if (func != nullptr)
+				if(func != nullptr)
 				{
 					return llvm::JITSymbol(
 						reinterpret_cast<uintptr_t>(func), llvm::JITSymbolFlags::Absolute);
@@ -410,7 +410,7 @@
 				return objLayer.findSymbol(name, true);
 			},
 			[](llvm::Error err) {
-				if (err)
+				if(err)
 				{
 					// TODO: Log the symbol resolution errors.
 					return;
@@ -438,7 +438,7 @@
 		addresses(count)
 	{
 		std::vector<std::string> mangledNames(count);
-		for (size_t i = 0; i < count; i++)
+		for(size_t i = 0; i < count; i++)
 		{
 			auto func = funcs[i];
 			static size_t numEmittedFunctions = 0;
@@ -461,7 +461,7 @@
 		llvm::cantFail(compileLayer.addModule(moduleKey, std::move(module)));
 
 		// Resolve the function addresses.
-		for (size_t i = 0; i < count; i++)
+		for(size_t i = 0; i < count; i++)
 		{
 			auto symbol = compileLayer.findSymbolIn(moduleKey, mangledNames[i], false);
 			if(auto address = symbol.getAddress())
@@ -502,7 +502,7 @@
 	{
 
 #ifdef ENABLE_RR_DEBUG_INFO
-		if (debugInfo != nullptr)
+		if(debugInfo != nullptr)
 		{
 			return; // Don't optimize if we're generating debug info.
 		}
@@ -702,7 +702,7 @@
 	unsigned numBits = ty->getScalarSizeInBits();
 
 	llvm::Value *max, *min, *extX, *extY;
-	if (isSigned)
+	if(isSigned)
 	{
 		max = llvm::ConstantInt::get(extTy, (1LL << (numBits - 1)) - 1, true);
 		min = llvm::ConstantInt::get(extTy, (-1LL << (numBits - 1)), true);
@@ -739,7 +739,7 @@
 {
 	llvm::Type *ty = x->getType();
 	llvm::Constant *one;
-	if (llvm::VectorType *vectorTy = llvm::dyn_cast<llvm::VectorType>(ty))
+	if(llvm::VectorType *vectorTy = llvm::dyn_cast<llvm::VectorType>(ty))
 	{
 		one = llvm::ConstantVector::getSplat(
 			vectorTy->getNumElements(),
@@ -797,7 +797,7 @@
 
 	llvm::SmallVector<uint32_t, 16> evenIdx;
 	llvm::SmallVector<uint32_t, 16> oddIdx;
-	for (uint64_t i = 0, n = ty->getNumElements(); i < n; i += 2)
+	for(uint64_t i = 0, n = ty->getNumElements(); i < n; i += 2)
 	{
 		evenIdx.push_back(i);
 		oddIdx.push_back(i + 1);
@@ -819,7 +819,7 @@
 	uint64_t truncNumBits = dstElemTy->getIntegerBitWidth();
 	ASSERT_MSG(truncNumBits < 64, "shift 64 must be handled separately. truncNumBits: %d", int(truncNumBits));
 	llvm::Constant *max, *min;
-	if (isSigned)
+	if(isSigned)
 	{
 		max = llvm::ConstantInt::get(srcTy, (1LL << (truncNumBits - 1)) - 1, true);
 		min = llvm::ConstantInt::get(srcTy, (-1LL << (truncNumBits - 1)), true);
@@ -852,7 +852,7 @@
 
 	llvm::Value *ret = jit->builder->CreateZExt(
 		jit->builder->CreateExtractElement(cmp, static_cast<uint64_t>(0)), retTy);
-	for (uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
+	for(uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
 	{
 		llvm::Value *elem = jit->builder->CreateZExt(
 			jit->builder->CreateExtractElement(cmp, i), retTy);
@@ -869,7 +869,7 @@
 
 	llvm::Value *ret = jit->builder->CreateZExt(
 		jit->builder->CreateExtractElement(cmp, static_cast<uint64_t>(0)), retTy);
-	for (uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
+	for(uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
 	{
 		llvm::Value *elem = jit->builder->CreateZExt(
 			jit->builder->CreateExtractElement(cmp, i), retTy);
@@ -879,7 +879,7 @@
 }
 #endif  // !defined(__i386__) && !defined(__x86_64__)
 
-#if (LLVM_VERSION_MAJOR >= 8) || (!defined(__i386__) && !defined(__x86_64__))
+#if(LLVM_VERSION_MAJOR >= 8) || (!defined(__i386__) && !defined(__x86_64__))
 llvm::Value *lowerPUADDSAT(llvm::Value *x, llvm::Value *y)
 {
 	#if LLVM_VERSION_MAJOR >= 8
@@ -923,7 +923,7 @@
 	llvm::VectorType *extTy = llvm::VectorType::getExtendedElementVectorType(ty);
 
 	llvm::Value *extX, *extY;
-	if (sext)
+	if(sext)
 	{
 		extX = jit->builder->CreateSExt(x, extTy);
 		extY = jit->builder->CreateSExt(y, extTy);
@@ -1051,12 +1051,12 @@
 static uint32_t sync_fetch_and_op(uint32_t volatile *ptr, uint32_t val, F f)
 {
 	// Build an arbitrary op out of looped CAS
-	for (;;)
+	for(;;)
 	{
 		uint32_t expected = *ptr;
 		uint32_t desired = f(expected, val);
 
-		if (expected == __sync_val_compare_and_swap_4(ptr, expected, desired))
+		if(expected == __sync_val_compare_and_swap_4(ptr, expected, desired))
 			return expected;
 	}
 }
@@ -1068,7 +1068,7 @@
 	{
 		static void load(size_t size, void *ptr, void *ret, llvm::AtomicOrdering ordering)
 		{
-			switch (size)
+			switch(size)
 			{
 				case 1: atomicLoad<uint8_t>(ptr, ret, ordering); break;
 				case 2: atomicLoad<uint16_t>(ptr, ret, ordering); break;
@@ -1080,7 +1080,7 @@
 		}
 		static void store(size_t size, void *ptr, void *ret, llvm::AtomicOrdering ordering)
 		{
-			switch (size)
+			switch(size)
 			{
 				case 1: atomicStore<uint8_t>(ptr, ret, ordering); break;
 				case 2: atomicStore<uint16_t>(ptr, ret, ordering); break;
@@ -1211,7 +1211,7 @@
 	// Trim off any underscores from the start of the symbol. LLVM likes
 	// to append these on macOS.
 	const char* trimmed = name;
-	while (trimmed[0] == '_') { trimmed++; }
+	while(trimmed[0] == '_') { trimmed++; }
 
 	auto it = resolver.functions.find(trimmed);
 	// Missing functions will likely make the module fail in exciting non-obvious ways.
@@ -1396,7 +1396,7 @@
 	}
 
 #ifdef ENABLE_RR_DEBUG_INFO
-	if (jit->debugInfo != nullptr)
+	if(jit->debugInfo != nullptr)
 	{
 		jit->debugInfo->Finalize();
 	}
@@ -1696,11 +1696,11 @@
 			auto elTy = T(type);
 			ASSERT(V(ptr)->getType()->getContainedType(0) == elTy);
 
-			if (!atomic)
+			if(!atomic)
 			{
 				return V(jit->builder->CreateAlignedLoad(V(ptr), alignment, isVolatile));
 			}
-			else if (elTy->isIntegerTy() || elTy->isPointerTy())
+			else if(elTy->isIntegerTy() || elTy->isPointerTy())
 			{
 				// Integers and pointers can be atomically loaded by setting
 				// the ordering constraint on the load instruction.
@@ -1708,7 +1708,7 @@
 				load->setAtomic(atomicOrdering(atomic, memoryOrder));
 				return V(load);
 			}
-			else if (elTy->isFloatTy() || elTy->isDoubleTy())
+			else if(elTy->isFloatTy() || elTy->isDoubleTy())
 			{
 				// LLVM claims to support atomic loads of float types as
 				// above, but certain backends cannot deal with this.
@@ -1780,18 +1780,18 @@
 			auto elTy = T(type);
 			ASSERT(V(ptr)->getType()->getContainedType(0) == elTy);
 
-			if (!atomic)
+			if(!atomic)
 			{
 				jit->builder->CreateAlignedStore(V(value), V(ptr), alignment, isVolatile);
 			}
-			else if (elTy->isIntegerTy() || elTy->isPointerTy())
+			else if(elTy->isIntegerTy() || elTy->isPointerTy())
 			{
 				// Integers and pointers can be atomically stored by setting
 				// the ordering constraint on the store instruction.
 				auto store = jit->builder->CreateAlignedStore(V(value), V(ptr), alignment, isVolatile);
 				store->setAtomic(atomicOrdering(atomic, memoryOrder));
 			}
-			else if (elTy->isFloatTy() || elTy->isDoubleTy())
+			else if(elTy->isFloatTy() || elTy->isDoubleTy())
 			{
 				// LLVM claims to support atomic stores of float types as
 				// above, but certain backends cannot deal with this.
@@ -3888,7 +3888,7 @@
 	auto funcTy = ::llvm::FunctionType::get(T(Float::getType()), ::llvm::ArrayRef<llvm::Type*>(T(Float::getType())), false);
 	auto func = jit->module->getOrInsertFunction(name, funcTy);
 	llvm::Value *out = ::llvm::UndefValue::get(T(Float4::getType()));
-	for (uint64_t i = 0; i < 4; i++)
+	for(uint64_t i = 0; i < 4; i++)
 	{
 		auto el = jit->builder->CreateCall(func, V(Nucleus::createExtractElement(v.value, Float::getType(), i)));
 		out = V(Nucleus::createInsertElement(V(out), V(el), i));
@@ -3949,7 +3949,7 @@
 	auto funcTy = ::llvm::FunctionType::get(T(Float::getType()), paramTys, false);
 	auto func = jit->module->getOrInsertFunction("atan2f", funcTy);
 	llvm::Value *out = ::llvm::UndefValue::get(T(Float4::getType()));
-	for (uint64_t i = 0; i < 4; i++)
+	for(uint64_t i = 0; i < 4; i++)
 	{
 		auto el = jit->builder->CreateCall2(func, ARGS(
 				V(Nucleus::createExtractElement(x.value, Float::getType(), i)),
@@ -4057,14 +4057,14 @@
 Value* Call(RValue<Pointer<Byte>> fptr, Type* retTy, std::initializer_list<Value*> args, std::initializer_list<Type*> argTys)
 {
 	::llvm::SmallVector<::llvm::Type*, 8> paramTys;
-	for (auto ty : argTys) { paramTys.push_back(T(ty)); }
+	for(auto ty : argTys) { paramTys.push_back(T(ty)); }
 	auto funcTy = ::llvm::FunctionType::get(T(retTy), paramTys, false);
 
 	auto funcPtrTy = funcTy->getPointerTo();
 	auto funcPtr = jit->builder->CreatePointerCast(V(fptr.value), funcPtrTy);
 
 	::llvm::SmallVector<::llvm::Value*, 8> arguments;
-	for (auto arg : args) { arguments.push_back(V(arg)); }
+	for(auto arg : args) { arguments.push_back(V(arg)); }
 	return V(jit->builder->CreateCall(funcPtr, arguments));
 }
 
@@ -4558,7 +4558,7 @@
 {
 	std::vector<Value*> elements;
 	elements.reserve(n);
-	for (int i = 0; i < n; i++)
+	for(int i = 0; i < n; i++)
 	{
 		auto el = V(jit->builder->CreateExtractElement(V(vec), i));
 		elements.push_back(el);
@@ -4573,9 +4573,9 @@
 	auto intTy = ::llvm::Type::getIntNTy(jit->context, sizeof(int) * 8); // Natural integer width.
 	std::vector<Value*> elements;
 	elements.reserve(vals.size());
-	for (auto v : vals)
+	for(auto v : vals)
 	{
-		if (isSigned)
+		if(isSigned)
 		{
 			elements.push_back(V(jit->builder->CreateSExt(V(v), intTy)));
 		}
@@ -4593,7 +4593,7 @@
 	auto doubleTy = ::llvm::Type::getDoubleTy(jit->context);
 	std::vector<Value*> elements;
 	elements.reserve(vals.size());
-	for (auto v : vals)
+	for(auto v : vals)
 	{
 		elements.push_back(V(jit->builder->CreateFPExt(V(v), doubleTy)));
 	}
@@ -4628,14 +4628,14 @@
 
 	// Build the printf format message string.
 	std::string str;
-	if (file != nullptr) { str += (line > 0) ? "%s:%d " : "%s "; }
-	if (function != nullptr) { str += "%s "; }
+	if(file != nullptr) { str += (line > 0) ? "%s:%d " : "%s "; }
+	if(function != nullptr) { str += "%s "; }
 	str += fmt;
 
 	// Perform subsitution on all '{n}' bracketed indices in the format
 	// message.
 	int i = 0;
-	for (const PrintValue& arg : args)
+	for(const PrintValue& arg : args)
 	{
 		str = replace(str, "{" + std::to_string(i++) + "}", arg.format);
 	}
@@ -4646,23 +4646,23 @@
 	vals.push_back(jit->builder->CreateGlobalStringPtr(str));
 
 	// Add optional file, line and function info if provided.
-	if (file != nullptr)
+	if(file != nullptr)
 	{
 		vals.push_back(jit->builder->CreateGlobalStringPtr(file));
-		if (line > 0)
+		if(line > 0)
 		{
 			vals.push_back(::llvm::ConstantInt::get(intTy, line));
 		}
 	}
-	if (function != nullptr)
+	if(function != nullptr)
 	{
 		vals.push_back(jit->builder->CreateGlobalStringPtr(function));
 	}
 
 	// Add all format arguments.
-	for (const PrintValue& arg : args)
+	for(const PrintValue& arg : args)
 	{
-		for (auto val : arg.values)
+		for(auto val : arg.values)
 		{
 			vals.push_back(V(val));
 		}
@@ -4683,7 +4683,7 @@
 void EmitDebugLocation()
 {
 #ifdef ENABLE_RR_DEBUG_INFO
-	if (jit->debugInfo != nullptr)
+	if(jit->debugInfo != nullptr)
 	{
 		jit->debugInfo->EmitLocation();
 	}
@@ -4693,7 +4693,7 @@
 void EmitDebugVariable(Value* value)
 {
 #ifdef ENABLE_RR_DEBUG_INFO
-	if (jit->debugInfo != nullptr)
+	if(jit->debugInfo != nullptr)
 	{
 		jit->debugInfo->EmitVariable(value);
 	}
@@ -4703,7 +4703,7 @@
 void FlushDebug()
 {
 #ifdef ENABLE_RR_DEBUG_INFO
-	if (jit->debugInfo != nullptr)
+	if(jit->debugInfo != nullptr)
 	{
 		jit->debugInfo->Flush();
 	}
@@ -4761,7 +4761,7 @@
 	//
 	//    bool coroutine_await(CoroutineHandle* handle, YieldType* out)
 	//    {
-	//        if (llvm.coro.done(handle))
+	//        if(llvm.coro.done(handle))
 	//        {
 	//            return false;
 	//        }
@@ -4823,7 +4823,7 @@
 	//
 	//    end:
 	//        SuspendAction action = llvm.coro.suspend(none, true /* final */);  // <-- RESUME POINT
-	//        switch (action)
+	//        switch(action)
 	//        {
 	//        case SuspendActionResume:
 	//            UNREACHABLE(); // Illegal to resume after final suspend.
@@ -4916,7 +4916,7 @@
 
 void Nucleus::yield(Value* val)
 {
-	if (jit->coroutine.id == nullptr)
+	if(jit->coroutine.id == nullptr)
 	{
 		// First call to yield().
 		// Promote the function to a full coroutine.
@@ -4927,7 +4927,7 @@
 	//      promise = val;
 	//
 	//      auto action = llvm.coro.suspend(none, false /* final */); // <-- RESUME POINT
-	//      switch (action)
+	//      switch(action)
 	//      {
 	//      case SuspendActionResume:
 	//          goto resume;
@@ -4969,7 +4969,7 @@
 std::shared_ptr<Routine> Nucleus::acquireCoroutine(const char *name, const Config::Edit &cfgEdit /* = Config::Edit::None */)
 {
 	bool isCoroutine = jit->coroutine.id != nullptr;
-	if (isCoroutine)
+	if(isCoroutine)
 	{
 		jit->builder->CreateBr(jit->coroutine.endBlock);
 	}
@@ -4988,7 +4988,7 @@
 	}
 
 #ifdef ENABLE_RR_DEBUG_INFO
-	if (jit->debugInfo != nullptr)
+	if(jit->debugInfo != nullptr)
 	{
 		jit->debugInfo->Finalize();
 	}
@@ -5001,7 +5001,7 @@
 		jit->module->print(file, 0);
 	}
 
-	if (isCoroutine)
+	if(isCoroutine)
 	{
 		// Run manadory coroutine transforms.
 		llvm::legacy::PassManager pm;
diff --git a/src/Reactor/LLVMReactorDebugInfo.cpp b/src/Reactor/LLVMReactorDebugInfo.cpp
index ee090cc..db97080 100644
--- a/src/Reactor/LLVMReactorDebugInfo.cpp
+++ b/src/Reactor/LLVMReactorDebugInfo.cpp
@@ -104,7 +104,7 @@
 
 void DebugInfo::Finalize()
 {
-	while (diScope.size() > 0)
+	while(diScope.size() > 0)
 	{
 		emitPending(diScope.back(), builder);
 		diScope.pop_back();
@@ -120,12 +120,12 @@
 
 #ifdef ENABLE_RR_EMIT_PRINT_LOCATION
 	static Location lastLocation;
-	if (backtrace.size() == 0)
+	if(backtrace.size() == 0)
 	{
 		return;
 	}
 	Location currLocation = backtrace[backtrace.size() - 1];
-	if (currLocation != lastLocation)
+	if(currLocation != lastLocation)
 	{
 		rr::Print("rr> {0} [{1}:{2}]\n", currLocation.function.name.c_str(), currLocation.function.file.c_str(), currLocation.line);
 		lastLocation = std::move(currLocation);
@@ -142,7 +142,7 @@
 {
 	auto shrink = [this](size_t newsize)
 	{
-		while (diScope.size() > newsize)
+		while(diScope.size() > newsize)
 		{
 			auto &scope = diScope.back();
 			LOG("- STACK(%d): di: %p, location: %s:%d",
@@ -154,18 +154,18 @@
 		}
 	};
 
-	if (backtrace.size() < diScope.size())
+	if(backtrace.size() < diScope.size())
 	{
 		shrink(backtrace.size());
 	}
 
-	for (size_t i = 0; i < diScope.size(); i++)
+	for(size_t i = 0; i < diScope.size(); i++)
 	{
 		auto &scope = diScope[i];
 		auto const &oldLocation = scope.location;
 		auto const &newLocation = backtrace[i];
 
-		if (oldLocation.function != newLocation.function)
+		if(oldLocation.function != newLocation.function)
 		{
 			LOG("  STACK(%d): Changed function %s -> %s", int(i),
 				oldLocation.function.name.c_str(), newLocation.function.name.c_str());
@@ -173,7 +173,7 @@
 			break;
 		}
 
-		if (oldLocation.line > newLocation.line)
+		if(oldLocation.line > newLocation.line)
 		{
 			// Create a new di block to shadow all the variables in the loop.
 			auto file = getOrCreateFile(newLocation.function.file.c_str());
@@ -189,7 +189,7 @@
 		scope.location = newLocation;
 	}
 
-	while (backtrace.size() > diScope.size())
+	while(backtrace.size() > diScope.size())
 	{
 		auto i = diScope.size();
 		auto location = backtrace[i];
@@ -223,7 +223,7 @@
 
 llvm::DILocation* DebugInfo::getLocation(const Backtrace &backtrace, size_t i)
 {
-	if (backtrace.size() == 0) { return nullptr; }
+	if(backtrace.size() == 0) { return nullptr; }
 	assert(backtrace.size() == diScope.size());
 	return llvm::DILocation::get(
 		*context,
@@ -239,18 +239,18 @@
 	auto const& backtrace = getCallerBacktrace();
 	syncScope(backtrace);
 
-	for (int i = backtrace.size() - 1; i >= 0; i--)
+	for(int i = backtrace.size() - 1; i >= 0; i--)
 	{
 		auto const &location = backtrace[i];
 		auto tokens = getOrParseFileTokens(location.function.file.c_str());
 		auto tokIt = tokens->find(location.line);
-		if (tokIt == tokens->end())
+		if(tokIt == tokens->end())
 		{
 			break;
 		}
 		auto token = tokIt->second;
 		auto name = token.identifier;
-		if (token.kind == Token::Return)
+		if(token.kind == Token::Return)
 		{
 			// This is a:
 			//
@@ -269,7 +269,7 @@
 		}
 
 		auto &scope = diScope[i];
-		if (scope.pending.location != location)
+		if(scope.pending.location != location)
 		{
 			emitPending(scope, builder);
 		}
@@ -278,7 +278,7 @@
 		auto block = builder->GetInsertBlock();
 
 		auto insertAfter = block->size() > 0 ? &block->back() : nullptr;
-		while (insertAfter != nullptr && insertAfter->isTerminator())
+		while(insertAfter != nullptr && insertAfter->isTerminator())
 		{
 			insertAfter = insertAfter->getPrevNode();
 		}
@@ -292,7 +292,7 @@
 		scope.pending.insertAfter = insertAfter;
 		scope.pending.scope = scope.di;
 
-		if (token.kind == Token::Return)
+		if(token.kind == Token::Return)
 		{
 			// Insert a noop instruction so the debugger can inspect the
 			// return value before the function scope closes.
@@ -308,12 +308,12 @@
 void DebugInfo::emitPending(Scope &scope, IRBuilder *builder)
 {
 	auto const &pending = scope.pending;
-	if (pending.value == nullptr)
+	if(pending.value == nullptr)
 	{
 		return;
 	}
 
-	if (!scope.symbols.emplace(pending.name).second)
+	if(!scope.symbols.emplace(pending.name).second)
 	{
 		return;
 	}
@@ -326,7 +326,7 @@
 	auto value = pending.value;
 
 	IRBuilder::InsertPointGuard guard(*builder);
-	if (pending.insertAfter != nullptr)
+	if(pending.insertAfter != nullptr)
 	{
 		builder->SetInsertPoint(pending.block, ++pending.insertAfter->getIterator());
 	}
@@ -336,7 +336,7 @@
 	}
 	builder->SetCurrentDebugLocation(pending.diLocation);
 
-	if (!isAlloca)
+	if(!isAlloca)
 	{
 		// While insertDbgValueIntrinsic should be enough to declare a
 		// variable with no storage, variables of RValues can share the same
@@ -362,9 +362,9 @@
 	auto diVar = diBuilder->createAutoVariable(scope.di, pending.name, diFile, pending.location.line, diType);
 
 	auto di = diBuilder->insertDeclare(value, diVar, diBuilder->createExpression(), pending.diLocation, pending.block);
-	if (pending.insertAfter != nullptr) { di->moveAfter(pending.insertAfter); }
+	if(pending.insertAfter != nullptr) { di->moveAfter(pending.insertAfter); }
 
-	if (pending.addNopOnNextLine)
+	if(pending.addNopOnNextLine)
 	{
 		builder->SetCurrentDebugLocation(llvm::DILocation::get(
 			*context,
@@ -451,9 +451,9 @@
 	// Note that bs::stacktrace() effectively returns a vector of addresses; bs::frame construction is where
 	// the heavy lifting is done: resolving the function name, file and line number.
 	namespace bs = boost::stacktrace;
-	for (bs::frame frame : bs::stacktrace())
+	for(bs::frame frame : bs::stacktrace())
 	{
-		if (shouldSkipFile(frame.source_file()))
+		if(shouldSkipFile(frame.source_file()))
 		{
 			continue;
 		}
@@ -464,7 +464,7 @@
 		location.line = frame.source_line();
 		locations.push_back(location);
 
-		if (limit > 0 && locations.size() >= limit)
+		if(limit > 0 && locations.size() >= limit)
 		{
 			break;
 		}
@@ -478,7 +478,7 @@
 llvm::DIType *DebugInfo::getOrCreateType(llvm::Type* type)
 {
 	auto it = diTypes.find(type);
-	if (it != diTypes.end()) { return it->second; }
+	if(it != diTypes.end()) { return it->second; }
 
 	if(type->isPointerTy())
 	{
@@ -496,7 +496,7 @@
 llvm::DIFile *DebugInfo::getOrCreateFile(const char* path)
 {
 	auto it = diFiles.find(path);
-	if (it != diFiles.end()) { return it->second; }
+	if(it != diFiles.end()) { return it->second; }
 	auto dirAndName = splitPath(path);
 	auto file = diBuilder->createFile(dirAndName.second, dirAndName.first);
 	diFiles.emplace(path, file);
@@ -508,7 +508,7 @@
 	static std::regex reLocalDecl(
 		"^" // line start
 		"\\s*" // initial whitespace
-		"(?:For\\s*\\(\\s*)?" // optional 'For ('
+		"(?:For\\s*\\(\\s*)?" // optional 'For('
 		"((?:\\w+(?:<[^>]+>)?)(?:::\\w+(?:<[^>]+>)?)*)" // type (match group 1)
 		"\\s+" // whitespace between type and name
 		"(\\w+)" // identifier (match group 2)
@@ -516,7 +516,7 @@
 		"(\\[.*\\])?"); // optional array suffix (match group 3)
 
 	auto it = fileTokens.find(path);
-	if (it != fileTokens.end())
+	if(it != fileTokens.end())
 	{
 		return it->second.get();
 	}
@@ -526,16 +526,16 @@
 	std::ifstream file(path);
 	std::string line;
 	int lineCount = 0;
-	while (std::getline(file, line))
+	while(std::getline(file, line))
 	{
 		lineCount++;
 		std::smatch match;
-		if (std::regex_search(line, match, reLocalDecl) && match.size() > 3)
+		if(std::regex_search(line, match, reLocalDecl) && match.size() > 3)
 		{
 			bool isArray = match.str(3) != "";
-			if (!isArray) // Cannot deal with C-arrays of values.
+			if(!isArray) // Cannot deal with C-arrays of values.
 			{
-				if (match.str(1) == "return")
+				if(match.str(1) == "return")
 				{
 					(*tokens)[lineCount] = Token{Token::Return};
 				}
diff --git a/src/Reactor/Optimizer.cpp b/src/Reactor/Optimizer.cpp
index 7cc3540..8b74f82 100644
--- a/src/Reactor/Optimizer.cpp
+++ b/src/Reactor/Optimizer.cpp
@@ -381,7 +381,7 @@
 			Ice::Operand *storeValue = nullptr;
 			bool unmatchedLoads = false;
 
-			for (auto& loadStoreInst : getLoadStoreInsts(singleBasicBlock))
+			for(auto& loadStoreInst : getLoadStoreInsts(singleBasicBlock))
 			{
 				Ice::Inst* inst = loadStoreInst.inst;
 
diff --git a/src/Reactor/Print.hpp b/src/Reactor/Print.hpp
index ca06f4e..ff4ad81 100644
--- a/src/Reactor/Print.hpp
+++ b/src/Reactor/Print.hpp
@@ -56,7 +56,7 @@
 	static std::vector<Value*> val(const T* list, int count) {
 		std::vector<Value*> values;
 		values.reserve(count);
-		for (int i = 0; i < count; i++)
+		for(int i = 0; i < count; i++)
 		{
 			auto v = val(list[i]);
 			values.insert(values.end(), v.begin(), v.end());
@@ -70,9 +70,9 @@
 	static std::string fmt(const T* list, int count)
 	{
 		std::string out = "[";
-		for (int i = 0; i < count; i++)
+		for(int i = 0; i < count; i++)
 		{
-			if (i > 0) { out += ", "; }
+			if(i > 0) { out += ", "; }
 			out += fmt(list[i]);
 		}
 		return out + "]";
@@ -149,7 +149,7 @@
 	{
 		std::vector< std::vector<Value*> > lists = {val(v)...};
 		std::vector<Value*> joined;
-		for (const auto& list : lists)
+		for(const auto& list : lists)
 		{
 			joined.insert(joined.end(), list.begin(), list.end());
 		}
diff --git a/src/Reactor/Reactor.cpp b/src/Reactor/Reactor.cpp
index 7767c0f..c088e7b 100644
--- a/src/Reactor/Reactor.cpp
+++ b/src/Reactor/Reactor.cpp
@@ -31,8 +31,8 @@
 							UnaryPredicate pred)
 {
 	ForwardIterator result = first;
-	while (first!=last) {
-		if (!pred(*first)) {
+	while(first!=last) {
+		if(!pred(*first)) {
 			*result = std::move(*first);
 			++result;
 		}
@@ -49,7 +49,7 @@
 
 Config Config::Edit::apply(const Config &cfg) const
 {
-	if (this == &None) { return cfg; }
+	if(this == &None) { return cfg; }
 
 	auto level = optLevelChanged ? optLevel : cfg.optimization.getLevel();
 	auto passes = cfg.optimization.getPasses();
@@ -60,9 +60,9 @@
 template <typename T>
 void rr::Config::Edit::apply(const std::vector<std::pair<ListEdit, T>> & edits, std::vector<T>& list) const
 {
-	for (auto & edit : edits)
+	for(auto & edit : edits)
 	{
-		switch (edit.first)
+		switch(edit.first)
 		{
 		case ListEdit::Add:
 			list.push_back(edit.second);
diff --git a/src/Reactor/Reactor.hpp b/src/Reactor/Reactor.hpp
index 42cbb7c..fc55c51 100644
--- a/src/Reactor/Reactor.hpp
+++ b/src/Reactor/Reactor.hpp
@@ -3458,7 +3458,7 @@
 	BasicBlock *end__ = Nucleus::createBasicBlock();    \
 	Nucleus::createCondBr((cond).value, end__, body__); \
 	Nucleus::setInsertBlock(end__);                     \
-} do {} while (false) // Require a semi-colon at the end of the Until()
+} do {} while(false) // Require a semi-colon at the end of the Until()
 
 enum {IF_BLOCK__, ELSE_CLAUSE__, ELSE_BLOCK__, IFELSE_NUM__};
 
diff --git a/src/Reactor/ReactorUnitTests.cpp b/src/Reactor/ReactorUnitTests.cpp
index afcb6e3..8ffeb7c 100644
--- a/src/Reactor/ReactorUnitTests.cpp
+++ b/src/Reactor/ReactorUnitTests.cpp
@@ -1177,7 +1177,7 @@
 	{
 		Pointer<Byte> ptrA = function.Arg<0>();
 		Pointer<Byte> ptrB = function.Arg<1>();
-		If (ptrA == ptrB)
+		If(ptrA == ptrB)
 		{
 			Return(1);
 		}
@@ -1213,7 +1213,7 @@
 		Return(Float(a) + b);
 	}
 
-	if (auto routine = function("one"))
+	if(auto routine = function("one"))
 	{
 		float result = routine(1, 2.f);
 		EXPECT_EQ(result, 3.f);
@@ -1232,7 +1232,7 @@
 		Return(Float(a) + b + Float(c) + d);
 	}
 
-	if (auto routine = function("one"))
+	if(auto routine = function("one"))
 	{
 		float result = routine(1, 2.f, 3, 4.f);
 		EXPECT_EQ(result, 10.f);
@@ -1252,7 +1252,7 @@
 		Return(Float(a) + b + Float(c) + d + Float(e));
 	}
 
-	if (auto routine = function("one"))
+	if(auto routine = function("one"))
 	{
 		float result = routine(1, 2.f, 3, 4.f, 5);
 		EXPECT_EQ(result, 15.f);
@@ -1277,7 +1277,7 @@
 		Return(Float(a) + b + Float(c) + d + Float(e) + f + Float(g) + h + Float(i) + j);
 	}
 
-	if (auto routine = function("one"))
+	if(auto routine = function("one"))
 	{
 		float result = routine(1, 2.f, 3, 4.f, 5, 6.f, 7, 8.f, 9, 10.f);
 		EXPECT_EQ(result, 55.f);
@@ -1785,7 +1785,7 @@
 			PtrInt base;
 			base.i = 0x10000;
 
-			for (int i = 0; i < 5; i++)
+			for(int i = 0; i < 5; i++)
 			{
 				PtrInt reference;
 				reference.p = &base.p[i];
@@ -1805,7 +1805,7 @@
 
 TEST(ReactorUnitTests, Coroutines_Fibonacci)
 {
-	if (!rr::Caps.CoroutinesSupported)
+	if(!rr::Caps.CoroutinesSupported)
 	{
 		SUCCEED() << "Coroutines not supported";
 		return;
@@ -1817,7 +1817,7 @@
 		Yield(Int(1));
 		Int current = 1;
 		Int next = 1;
-		While (true) {
+		While(true) {
 			Yield(next);
 			auto tmp = current + next;
 			current = next;
@@ -1836,7 +1836,7 @@
 
 	auto count = sizeof(expected) / sizeof(expected[0]);
 
-	for (size_t i = 0; i < count; i++)
+	for(size_t i = 0; i < count; i++)
 	{
 		int out = 0;
 		EXPECT_EQ(coroutine->await(out), true);
@@ -1846,7 +1846,7 @@
 
 TEST(ReactorUnitTests, Coroutines_Parameters)
 {
-	if (!rr::Caps.CoroutinesSupported)
+	if(!rr::Caps.CoroutinesSupported)
 	{
 		SUCCEED() << "Coroutines not supported";
 		return;
@@ -1903,7 +1903,7 @@
 
 		auto routine = function("one");
 
-		for (auto&& v : GetParam().testValues)
+		for(auto&& v : GetParam().testValues)
 		{
 			SCOPED_TRACE(v);
 			EXPECT_FLOAT_EQ(routine(v), GetParam().refFunc(v));
@@ -1924,7 +1924,7 @@
 
 		auto routine = function("one");
 
-		for (auto&& v : GetParam().testValues)
+		for(auto&& v : GetParam().testValues)
 		{
 			SCOPED_TRACE(v);
 			float4_value result = invokeRoutine(routine, float4_value{ v });
@@ -1951,7 +1951,7 @@
 
 		auto routine = function("one");
 
-		for (auto&& v : GetParam().testValues)
+		for(auto&& v : GetParam().testValues)
 		{
 			SCOPED_TRACE(v);
 			float4_value result = invokeRoutine(routine, float4_value{ v.first }, float4_value{ v.second });
@@ -2013,7 +2013,7 @@
 		auto callable = (uint32_t(*)(uint32_t))routine->getEntry();
 
 
-		for (uint32_t i = 0; i < 31; ++i) {
+		for(uint32_t i = 0; i < 31; ++i) {
 			uint32_t result = callable(1 << i);
 			EXPECT_EQ(result, 31 - i);
 		}
@@ -2037,7 +2037,7 @@
 
 		uint32_t x[4];
 
-		for (uint32_t i = 0; i < 31; ++i) {
+		for(uint32_t i = 0; i < 31; ++i) {
 			callable(x, 1 << i);
 			EXPECT_EQ(x[0], 31 - i);
 			EXPECT_EQ(x[1], 31 - i);
@@ -2070,7 +2070,7 @@
 		auto callable = (uint32_t(*)(uint32_t))routine->getEntry();
 
 
-		for (uint32_t i = 0; i < 31; ++i) {
+		for(uint32_t i = 0; i < 31; ++i) {
 			uint32_t result = callable(1 << i);
 			EXPECT_EQ(result, i);
 		}
@@ -2094,7 +2094,7 @@
 
 		uint32_t x[4];
 
-		for (uint32_t i = 0; i < 31; ++i) {
+		for(uint32_t i = 0; i < 31; ++i) {
 			callable(x, 1 << i);
 			EXPECT_EQ(x[0], i);
 			EXPECT_EQ(x[1], i);
diff --git a/src/Reactor/Routine.hpp b/src/Reactor/Routine.hpp
index 922d3ab..f8083b1 100644
--- a/src/Reactor/Routine.hpp
+++ b/src/Reactor/Routine.hpp
@@ -41,7 +41,7 @@
 	explicit RoutineT(const std::shared_ptr<Routine>& routine)
 		: routine(routine)
 	{
-		if (routine)
+		if(routine)
 		{
 			callable = reinterpret_cast<CallableType>(const_cast<void*>(routine->getEntry(0)));
 		}
diff --git a/src/Reactor/SubzeroReactor.cpp b/src/Reactor/SubzeroReactor.cpp
index 4b09748..6201ab1 100644
--- a/src/Reactor/SubzeroReactor.cpp
+++ b/src/Reactor/SubzeroReactor.cpp
@@ -89,7 +89,7 @@
 
 static Ice::OptLevel toIce(rr::Optimization::Level level)
 {
-	switch (level)
+	switch(level)
 	{
 		// Note that Opt_0 and Opt_1 are not implemented by Subzero
 		case rr::Optimization::Level::None:       return Ice::Opt_m1;
@@ -592,7 +592,7 @@
 	static llvm::raw_os_ostream cout(std::cout);
 	static llvm::raw_os_ostream cerr(std::cerr);
 
-	if (subzeroEmitTextAsm)
+	if(subzeroEmitTextAsm)
 	{
 		// Decorate text asm with liveness info
 		Flags.setDecorateAsm(true);
@@ -648,7 +648,7 @@
 
 std::shared_ptr<Routine> Nucleus::acquireRoutine(const char *name, const Config::Edit &cfgEdit /* = Config::Edit::None */)
 {
-	if (subzeroDumpEnabled)
+	if(subzeroDumpEnabled)
 	{
 		// Output dump strings immediately, rather than once buffer is full. Useful for debugging.
 		context->getStrDump().SetUnbuffered();
@@ -678,7 +678,7 @@
 
 	::context->emitFileHeader();
 
-	if (subzeroEmitTextAsm)
+	if(subzeroEmitTextAsm)
 	{
 		::function->emit();
 	}
@@ -3529,7 +3529,7 @@
 
 RValue<Pointer<Byte>> ConstantPointer(void const * ptr)
 {
-	if (sizeof(void*) == 8)
+	if(sizeof(void*) == 8)
 	{
 		return RValue<Pointer<Byte>>(V(::context->getConstantInt64(reinterpret_cast<intptr_t>(ptr))));
 	}
@@ -3549,12 +3549,12 @@
 Value* Call(RValue<Pointer<Byte>> fptr, Type* retTy, std::initializer_list<Value*> args, std::initializer_list<Type*> argTys)
 {
 	Ice::Variable *ret = nullptr;
-	if (retTy != nullptr)
+	if(retTy != nullptr)
 	{
 		ret = ::function->makeVariable(T(retTy));
 	}
 	auto call = Ice::InstCall::create(::function, args.size(), ret, V(fptr.value), false);
-	for (auto arg : args)
+	for(auto arg : args)
 	{
 		call->addArg(V(arg));
 	}
@@ -3696,7 +3696,7 @@
 
 RValue<UInt> Ctlz(RValue<UInt> x, bool isZeroUndef)
 {
-	if (emulateIntrinsics)
+	if(emulateIntrinsics)
 	{
 		UNIMPLEMENTED("Subzero Ctlz()"); return UInt(0);
 	}
@@ -3715,7 +3715,7 @@
 
 RValue<UInt4> Ctlz(RValue<UInt4> x, bool isZeroUndef)
 {
-	if (emulateIntrinsics)
+	if(emulateIntrinsics)
 	{
 		UNIMPLEMENTED("Subzero Ctlz()"); return UInt4(0);
 	}
@@ -3733,7 +3733,7 @@
 
 RValue<UInt> Cttz(RValue<UInt> x, bool isZeroUndef)
 {
-	if (emulateIntrinsics)
+	if(emulateIntrinsics)
 	{
 		UNIMPLEMENTED("Subzero Cttz()"); return UInt(0);
 	}
@@ -3752,7 +3752,7 @@
 
 RValue<UInt4> Cttz(RValue<UInt4> x, bool isZeroUndef)
 {
-	if (emulateIntrinsics)
+	if(emulateIntrinsics)
 	{
 		UNIMPLEMENTED("Subzero Cttz()"); return UInt4(0);
 	}
diff --git a/src/System/DebugAndroid.cpp b/src/System/DebugAndroid.cpp
index c511fc3..2a6569c 100644
--- a/src/System/DebugAndroid.cpp
+++ b/src/System/DebugAndroid.cpp
@@ -31,7 +31,7 @@
 	{
 		ALOGE("Waiting for debugger: gdbserver :${PORT} --attach %u. Look for thread %u", getpid(), gettid());
 		volatile int waiting = 1;
-		while (waiting) {
+		while(waiting) {
 			sleep(1);
 		}
 	}
diff --git a/src/System/DebugAndroid.hpp b/src/System/DebugAndroid.hpp
index eced194..bb7451e 100644
--- a/src/System/DebugAndroid.hpp
+++ b/src/System/DebugAndroid.hpp
@@ -49,7 +49,7 @@
 void AndroidEnterDebugger();
 
 #define ASSERT(E) do { \
-		if (!(E)) { \
+		if(!(E)) { \
 			ALOGE("badness: assertion_failed %s in %s at %s:%d", #E,	\
 				  __FUNCTION__, __FILE__, __LINE__);					\
 			AndroidEnterDebugger();										\
diff --git a/src/System/GrallocAndroid.cpp b/src/System/GrallocAndroid.cpp
index c877e9933..7b1b4ee 100644
--- a/src/System/GrallocAndroid.cpp
+++ b/src/System/GrallocAndroid.cpp
@@ -89,7 +89,7 @@
 		{
 			int32_t fenceFd = -1;
 			int error = m_gralloc1_unlock(m_gralloc1_device, handle, &fenceFd);
-			if (!error)
+			if(!error)
 			{
 				sync_wait(fenceFd, -1);
 				close(fenceFd);
diff --git a/src/System/Linux/MemFd.cpp b/src/System/Linux/MemFd.cpp
index a8f99e2..268e678 100644
--- a/src/System/Linux/MemFd.cpp
+++ b/src/System/Linux/MemFd.cpp
@@ -50,7 +50,7 @@
 
 int LinuxMemFd::exportFd() const
 {
-	if (fd_ < 0)
+	if(fd_ < 0)
 	{
 		return fd_;
 	}
@@ -70,13 +70,13 @@
 	// In the event of no system call this returns -1 with errno set
 	// as ENOSYS.
 	fd_ = syscall(__NR_memfd_create, name, MFD_CLOEXEC);
-	if (fd_ < 0)
+	if(fd_ < 0)
 	{
 		TRACE("memfd_create() returned %d: %s", errno, strerror(errno));
 		return false;
 	}
 	// Ensure there is enough space.
-	if (size > 0 && ::ftruncate(fd_, size) < 0)
+	if(size > 0 && ::ftruncate(fd_, size) < 0)
 	{
 		TRACE("ftruncate() %lld returned %d: %s", (long long)size, errno, strerror(errno));
 		close();
@@ -88,12 +88,12 @@
 
 void LinuxMemFd::close()
 {
-	if (fd_ >= 0)
+	if(fd_ >= 0)
 	{
 		// WARNING: Never retry on close() failure, even with EINTR, see
 		// https://lwn.net/Articles/576478/ for example.
 		int ret = ::close(fd_);
-		if (ret < 0) {
+		if(ret < 0) {
 			TRACE("LinuxMemFd::close() failed with: %s", strerror(errno));
 			assert(false);
 		}
diff --git a/src/System/Memory.cpp b/src/System/Memory.cpp
index e637a55..0ed36e0 100644
--- a/src/System/Memory.cpp
+++ b/src/System/Memory.cpp
@@ -36,7 +36,7 @@
 #undef allocate
 #undef deallocate
 
-#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined (_M_X64)) && !defined(__x86__)
+#if(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || defined (_M_X64)) && !defined(__x86__)
 #define __x86__
 #endif
 
diff --git a/src/System/Synchronization.hpp b/src/System/Synchronization.hpp
index 1a8c585..e1b9e1f 100644
--- a/src/System/Synchronization.hpp
+++ b/src/System/Synchronization.hpp
@@ -166,7 +166,7 @@
 std::pair<T, bool> Chan<T>::tryTake()
 {
 	std::unique_lock<std::mutex> lock(mutex);
-	if (queue.size() == 0)
+	if(queue.size() == 0)
 	{
 		return std::make_pair(T{}, false);
 	}
diff --git a/src/Vulkan/Debug/Context.cpp b/src/Vulkan/Debug/Context.cpp
index ce844eb..aa62304 100644
--- a/src/Vulkan/Debug/Context.cpp
+++ b/src/Vulkan/Debug/Context.cpp
@@ -100,7 +100,7 @@
 void Broadcaster::modify(F&& f)
 {
 	std::unique_lock<std::recursive_mutex> lock(mutex);
-	if (listenersInUse > 0)
+	if(listenersInUse > 0)
 	{
 		// The listeners map is current being iterated over.
 		// Make a copy before making the edit.
diff --git a/src/Vulkan/VkBuffer.cpp b/src/Vulkan/VkBuffer.cpp
index ce14bf3..ad24a83 100644
--- a/src/Vulkan/VkBuffer.cpp
+++ b/src/Vulkan/VkBuffer.cpp
@@ -32,9 +32,9 @@
 	}
 
 	const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
-	for (; nextInfo != nullptr; nextInfo = nextInfo->pNext)
+	for(; nextInfo != nullptr; nextInfo = nextInfo->pNext)
 	{
-		if (nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO)
+		if(nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO)
 		{
 			const auto* externalInfo = reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(nextInfo);
 			supportedExternalMemoryHandleTypes = externalInfo->handleTypes;
diff --git a/src/Vulkan/VkBufferView.cpp b/src/Vulkan/VkBufferView.cpp
index 085acfe..7b007e2 100644
--- a/src/Vulkan/VkBufferView.cpp
+++ b/src/Vulkan/VkBufferView.cpp
@@ -21,7 +21,7 @@
 BufferView::BufferView(const VkBufferViewCreateInfo* pCreateInfo, void* mem) :
     buffer(vk::Cast(pCreateInfo->buffer)), format(pCreateInfo->format), offset(pCreateInfo->offset)
 {
-    if (pCreateInfo->range == VK_WHOLE_SIZE)
+    if(pCreateInfo->range == VK_WHOLE_SIZE)
     {
         range = buffer->getSize() - offset;
     }
diff --git a/src/Vulkan/VkCommandBuffer.cpp b/src/Vulkan/VkCommandBuffer.cpp
index e025b4b..08178cd 100644
--- a/src/Vulkan/VkCommandBuffer.cpp
+++ b/src/Vulkan/VkCommandBuffer.cpp
@@ -515,7 +515,7 @@
 		executionState.renderer->setBlendConstant(pipeline->hasDynamicState(VK_DYNAMIC_STATE_BLEND_CONSTANTS) ?
 		                                          executionState.dynamicState.blendConstants : pipeline->getBlendConstants());
 
-		if (pipeline->hasDynamicState(VK_DYNAMIC_STATE_DEPTH_BIAS))
+		if(pipeline->hasDynamicState(VK_DYNAMIC_STATE_DEPTH_BIAS))
 		{
 			// If the depth bias clamping feature is not enabled, depthBiasClamp must be 0.0
 			ASSERT(executionState.dynamicState.depthBiasClamp == 0.0f);
@@ -523,7 +523,7 @@
 			context.depthBias = executionState.dynamicState.depthBiasConstantFactor;
 			context.slopeDepthBias = executionState.dynamicState.depthBiasSlopeFactor;
 		}
-		if (pipeline->hasDynamicState(VK_DYNAMIC_STATE_DEPTH_BOUNDS) && context.depthBoundsTestEnable)
+		if(pipeline->hasDynamicState(VK_DYNAMIC_STATE_DEPTH_BOUNDS) && context.depthBoundsTestEnable)
 		{
 			// Unless the VK_EXT_depth_range_unrestricted extension is enabled minDepthBounds and maxDepthBounds must be between 0.0 and 1.0, inclusive
 			ASSERT(executionState.dynamicState.minDepthBounds >= 0.0f &&
@@ -533,17 +533,17 @@
 
 			UNIMPLEMENTED("depthBoundsTestEnable");
 		}
-		if (pipeline->hasDynamicState(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK) && context.stencilEnable)
+		if(pipeline->hasDynamicState(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK) && context.stencilEnable)
 		{
 			context.frontStencil.compareMask = executionState.dynamicState.compareMask[0];
 			context.backStencil.compareMask = executionState.dynamicState.compareMask[1];
 		}
-		if (pipeline->hasDynamicState(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK) && context.stencilEnable)
+		if(pipeline->hasDynamicState(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK) && context.stencilEnable)
 		{
 			context.frontStencil.writeMask = executionState.dynamicState.writeMask[0];
 			context.backStencil.writeMask = executionState.dynamicState.writeMask[1];
 		}
-		if (pipeline->hasDynamicState(VK_DYNAMIC_STATE_STENCIL_REFERENCE) && context.stencilEnable)
+		if(pipeline->hasDynamicState(VK_DYNAMIC_STATE_STENCIL_REFERENCE) && context.stencilEnable)
 		{
 			context.frontStencil.reference = executionState.dynamicState.reference[0];
 			context.backStencil.reference = executionState.dynamicState.reference[1];
@@ -554,13 +554,13 @@
 		context.occlusionEnabled = executionState.renderer->hasOcclusionQuery();
 
 		std::vector<std::pair<uint32_t, void *>> indexBuffers;
-		if (indexed)
+		if(indexed)
 		{
 			void *indexBuffer = executionState.indexBufferBinding.buffer->getOffsetPointer(
 					executionState.indexBufferBinding.offset + first * bytesPerIndex(executionState));
-			if (pipeline->hasPrimitiveRestartEnable())
+			if(pipeline->hasPrimitiveRestartEnable())
 			{
-				switch (executionState.indexType)
+				switch(executionState.indexType)
 				{
 				case VK_INDEX_TYPE_UINT16:
 					processPrimitiveRestart(static_cast<uint16_t *>(indexBuffer), count, pipeline, indexBuffers);
@@ -582,16 +582,16 @@
 			indexBuffers.push_back({pipeline->computePrimitiveCount(count), nullptr});
 		}
 
-		for (uint32_t instance = firstInstance; instance != firstInstance + instanceCount; instance++)
+		for(uint32_t instance = firstInstance; instance != firstInstance + instanceCount; instance++)
 		{
 			// FIXME: reconsider instances/views nesting.
 			auto viewMask = executionState.renderPass->getViewMask(executionState.subpassIndex);
-			while (viewMask)
+			while(viewMask)
 			{
 				int viewID = sw::log2i(viewMask);
 				viewMask &= ~(1 << viewID);
 
-				for (auto indexBuffer : indexBuffers)
+				for(auto indexBuffer : indexBuffers)
 				{
 					executionState.renderer->draw(&context, executionState.indexType, indexBuffer.first, vertexOffset,
 												  executionState.events, instance, viewID, indexBuffer.second,
@@ -660,7 +660,7 @@
 
 	void play(vk::CommandBuffer::ExecutionState& executionState) override
 	{
-		for (auto drawId = 0u; drawId < drawCount; drawId++)
+		for(auto drawId = 0u; drawId < drawCount; drawId++)
 		{
 			auto cmd = reinterpret_cast<VkDrawIndirectCommand const *>(buffer->getOffsetPointer(offset + drawId * stride));
 			draw(executionState, false, cmd->vertexCount, cmd->instanceCount, 0, cmd->firstVertex, cmd->firstInstance);
@@ -686,7 +686,7 @@
 
 	void play(vk::CommandBuffer::ExecutionState& executionState) override
 	{
-		for (auto drawId = 0u; drawId < drawCount; drawId++)
+		for(auto drawId = 0u; drawId < drawCount; drawId++)
 		{
 			auto cmd = reinterpret_cast<VkDrawIndexedIndirectCommand const *>(buffer->getOffsetPointer(offset + drawId * stride));
 			draw(executionState, true, cmd->indexCount, cmd->instanceCount, cmd->firstIndex, cmd->vertexOffset, cmd->firstInstance);
@@ -1022,7 +1022,7 @@
 		: pipelineBindPoint(pipelineBindPoint), pipelineLayout(pipelineLayout), set(set), descriptorSet(descriptorSet),
 		  dynamicOffsetCount(dynamicOffsetCount)
 	{
-		for (uint32_t i = 0; i < dynamicOffsetCount; i++)
+		for(uint32_t i = 0; i < dynamicOffsetCount; i++)
 		{
 			this->dynamicOffsets[i] = dynamicOffsets[i];
 		}
@@ -1036,7 +1036,7 @@
 		ASSERT_OR_RETURN(dynamicOffsetBase + dynamicOffsetCount <= vk::MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC);
 
 		pipelineState.descriptorSets[set] = descriptorSet;
-		for (uint32_t i = 0; i < dynamicOffsetCount; i++)
+		for(uint32_t i = 0; i < dynamicOffsetCount; i++)
 		{
 			pipelineState.descriptorDynamicOffsets[dynamicOffsetBase + i] = dynamicOffsets[i];
 		}
@@ -1152,7 +1152,7 @@
 
 	void play(vk::CommandBuffer::ExecutionState& executionState) override
 	{
-		if (stage & ~(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
+		if(stage & ~(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
 		{
 			// The `top of pipe` and `draw indirect` stages are handled in command buffer processing so a timestamp write
 			// done in those stages can just be done here without any additional synchronization.
@@ -1691,7 +1691,7 @@
 	for(uint32_t i = 0; i < MAX_VERTEX_INPUT_BINDINGS; i++)
 	{
 		auto &attrib = context.input[i];
-		if (attrib.count)
+		if(attrib.count)
 		{
 			const auto &vertexInput = vertexInputBindings[attrib.binding];
 			VkDeviceSize offset = attrib.offset + vertexInput.offset +
@@ -1713,24 +1713,24 @@
 
 	auto const & subpass = renderPass->getSubpass(subpassIndex);
 
-	for (auto i = 0u; i < subpass.colorAttachmentCount; i++)
+	for(auto i = 0u; i < subpass.colorAttachmentCount; i++)
 	{
 		auto attachmentReference = subpass.pColorAttachments[i];
-		if (attachmentReference.attachment != VK_ATTACHMENT_UNUSED)
+		if(attachmentReference.attachment != VK_ATTACHMENT_UNUSED)
 		{
 			context.renderTarget[i] = renderPassFramebuffer->getAttachment(attachmentReference.attachment);
 		}
 	}
 
 	auto attachmentReference = subpass.pDepthStencilAttachment;
-	if (attachmentReference && attachmentReference->attachment != VK_ATTACHMENT_UNUSED)
+	if(attachmentReference && attachmentReference->attachment != VK_ATTACHMENT_UNUSED)
 	{
 		auto attachment = renderPassFramebuffer->getAttachment(attachmentReference->attachment);
-		if (attachment->hasDepthAspect())
+		if(attachment->hasDepthAspect())
 		{
 			context.depthBuffer = attachment;
 		}
-		if (attachment->hasStencilAspect())
+		if(attachment->hasStencilAspect())
 		{
 			context.stencilBuffer = attachment;
 		}
diff --git a/src/Vulkan/VkDebug.cpp b/src/Vulkan/VkDebug.cpp
index 7b7cf85..02213da 100644
--- a/src/Vulkan/VkDebug.cpp
+++ b/src/Vulkan/VkDebug.cpp
@@ -37,12 +37,12 @@
 	static bool checked = false;
 	static bool res = false;
 
-	if (!checked)
+	if(!checked)
 	{
 		// If a debugger is attached then we're already being ptraced and ptrace
 		// will return a non-zero value.
 		checked = true;
-		if (ptrace(PTRACE_TRACEME, 0, 1, 0) != 0)
+		if(ptrace(PTRACE_TRACEME, 0, 1, 0) != 0)
 		{
 			res = true;
 		}
@@ -144,7 +144,7 @@
 	va_list vararg;
 	va_start(vararg, format);
 
-	if (IsUnderDebugger() && !asserted.exchange(true))
+	if(IsUnderDebugger() && !asserted.exchange(true))
 	{
 		// Abort after tracing and printing to stderr
 		tracev(format, vararg);
@@ -156,7 +156,7 @@
 
 		::abort();
 	}
-	else if (!asserted)
+	else if(!asserted)
 	{
 		tracev(format, vararg);
 		va_end(vararg);
diff --git a/src/Vulkan/VkDescriptorSetLayout.cpp b/src/Vulkan/VkDescriptorSetLayout.cpp
index 73e4d58..8342b44 100644
--- a/src/Vulkan/VkDescriptorSetLayout.cpp
+++ b/src/Vulkan/VkDescriptorSetLayout.cpp
@@ -212,9 +212,9 @@
 uint32_t DescriptorSetLayout::getDynamicDescriptorCount() const
 {
 	uint32_t count = 0;
-	for (size_t i = 0; i < bindingCount; i++)
+	for(size_t i = 0; i < bindingCount; i++)
 	{
-		if (isDynamic(bindings[i].descriptorType))
+		if(isDynamic(bindings[i].descriptorType))
 		{
 			count += bindings[i].descriptorCount;
 		}
@@ -228,9 +228,9 @@
 	ASSERT(isDynamic(bindings[n].descriptorType));
 
 	uint32_t index = 0;
-	for (uint32_t i = 0; i < n; i++)
+	for(uint32_t i = 0; i < n; i++)
 	{
-		if (isDynamic(bindings[i].descriptorType))
+		if(isDynamic(bindings[i].descriptorType))
 		{
 			index += bindings[i].descriptorCount;
 		}
@@ -270,7 +270,7 @@
 
 	ASSERT(reinterpret_cast<intptr_t>(memToWrite) % 16 == 0);  // Each descriptor must be 16-byte aligned.
 
-	if (entry.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
+	if(entry.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
 	{
 		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor*>(memToWrite);
 
@@ -279,18 +279,18 @@
 			auto update = reinterpret_cast<VkDescriptorImageInfo const *>(src + entry.offset + entry.stride * i);
 			// "All consecutive bindings updated via a single VkWriteDescriptorSet structure, except those with a
 			//  descriptorCount of zero, must all either use immutable samplers or must all not use immutable samplers."
-			if (!binding.pImmutableSamplers)
+			if(!binding.pImmutableSamplers)
 			{
 				imageSampler[i].updateSampler(update->sampler);
 			}
 			imageSampler[i].device = device;
 		}
 	}
-	else if (entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
+	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
 	{
 		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor*>(memToWrite);
 
-		for (uint32_t i = 0; i < entry.descriptorCount; i++)
+		for(uint32_t i = 0; i < entry.descriptorCount; i++)
 		{
 			auto update = reinterpret_cast<VkBufferView const *>(src + entry.offset + entry.stride * i);
 			auto bufferView = vk::Cast(*update);
@@ -322,7 +322,7 @@
 			mipmap.onePitchP[1] = mipmap.onePitchP[3] = static_cast<short>(numElements);
 		}
 	}
-	else if (entry.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
+	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
 	         entry.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)
 	{
 		SampledImageDescriptor *imageSampler = reinterpret_cast<SampledImageDescriptor*>(memToWrite);
@@ -430,7 +430,7 @@
 			}
 		}
 	}
-	else if (entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
+	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
 	         entry.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
 	{
 		auto descriptor = reinterpret_cast<StorageImageDescriptor *>(memToWrite);
@@ -449,7 +449,7 @@
 			descriptor[i].sampleCount = imageView->getSampleCount();
 			descriptor[i].sizeInBytes = static_cast<int>(imageView->getImageSizeInBytes());
 
-			if (imageView->getFormat().isStencil())
+			if(imageView->getFormat().isStencil())
 			{
 				descriptor[i].stencilPtr = imageView->getOffsetPointer({0, 0, 0}, VK_IMAGE_ASPECT_STENCIL_BIT, 0, 0);
 				descriptor[i].stencilRowPitchBytes = imageView->rowPitchBytes(VK_IMAGE_ASPECT_STENCIL_BIT, 0);
@@ -460,10 +460,10 @@
 			}
 		}
 	}
-	else if (entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
+	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
 	{
 		auto descriptor = reinterpret_cast<StorageImageDescriptor *>(memToWrite);
-		for (uint32_t i = 0; i < entry.descriptorCount; i++)
+		for(uint32_t i = 0; i < entry.descriptorCount; i++)
 		{
 			auto update = reinterpret_cast<VkBufferView const *>(src + entry.offset + entry.stride * i);
 			auto bufferView = vk::Cast(*update);
@@ -477,13 +477,13 @@
 			descriptor[i].sizeInBytes = bufferView->getRangeInBytes();
 		}
 	}
-	else if (entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
+	else if(entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
 	         entry.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
 	         entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
 	         entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
 	{
 		auto descriptor = reinterpret_cast<BufferDescriptor *>(memToWrite);
-		for (uint32_t i = 0; i < entry.descriptorCount; i++)
+		for(uint32_t i = 0; i < entry.descriptorCount; i++)
 		{
 			auto update = reinterpret_cast<VkDescriptorBufferInfo const *>(src + entry.offset + entry.stride * i);
 			auto buffer = vk::Cast(update->buffer);
@@ -591,7 +591,7 @@
 	e.descriptorCount = writeDescriptorSet.descriptorCount;
 	e.offset = 0;
 	void const *ptr = nullptr;
-	switch (writeDescriptorSet.descriptorType)
+	switch(writeDescriptorSet.descriptorType)
 	{
 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
diff --git a/src/Vulkan/VkDevice.cpp b/src/Vulkan/VkDevice.cpp
index a8fcd9c..55add6e 100644
--- a/src/Vulkan/VkDevice.cpp
+++ b/src/Vulkan/VkDevice.cpp
@@ -189,7 +189,7 @@
 		{
 			return any.isSignalled() ? VK_SUCCESS : VK_TIMEOUT;
 		}
-		else if (infiniteTimeout)
+		else if(infiniteTimeout)
 		{
 			any.wait();
 			return VK_SUCCESS;
diff --git a/src/Vulkan/VkDeviceMemory.cpp b/src/Vulkan/VkDeviceMemory.cpp
index faaa4b5..2c1991a 100644
--- a/src/Vulkan/VkDeviceMemory.cpp
+++ b/src/Vulkan/VkDeviceMemory.cpp
@@ -67,7 +67,7 @@
 static bool parseCreateInfo(const VkMemoryAllocateInfo* pAllocateInfo,
 							ExternalMemoryTraits* pTraits)
 {
-	if (T::supportsAllocateInfo(pAllocateInfo))
+	if(T::supportsAllocateInfo(pAllocateInfo))
 	{
 		pTraits->typeFlagBit = T::typeFlagBit;
 		pTraits->instanceSize = sizeof(T);
@@ -99,7 +99,7 @@
 	VkResult allocate(size_t size, void** pBuffer) override
 	{
 		void* buffer = vk::allocate(size, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY);
-		if (!buffer)
+		if(!buffer)
 			return VK_ERROR_OUT_OF_DEVICE_MEMORY;
 
 		*pBuffer = buffer;
@@ -133,7 +133,7 @@
 					   ExternalMemoryTraits*       pTraits)
 {
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-	if (parseCreateInfo<OpaqueFdExternalMemory>(pAllocateInfo, pTraits))
+	if(parseCreateInfo<OpaqueFdExternalMemory>(pAllocateInfo, pTraits))
 	{
 		return;
 	}
@@ -154,7 +154,7 @@
 
 void DeviceMemory::destroy(const VkAllocationCallbacks* pAllocator)
 {
-	if (buffer)
+	if(buffer)
 	{
 		external->deallocate(buffer, size);
 		buffer = nullptr;
@@ -173,7 +173,7 @@
 VkResult DeviceMemory::allocate()
 {
 	VkResult result = VK_SUCCESS;
-	if (!buffer)
+	if(!buffer)
 	{
 		result = external->allocate(size, &buffer);
 	}
@@ -202,14 +202,14 @@
 bool DeviceMemory::checkExternalMemoryHandleType(
 		VkExternalMemoryHandleTypeFlags supportedHandleTypes) const
 {
-	if (!supportedHandleTypes)
+	if(!supportedHandleTypes)
 	{
 		// This image or buffer does not need to be stored on external
 		// memory, so this check should always pass.
 		return true;
 	}
 	VkExternalMemoryHandleTypeFlagBits handle_type_bit = external->getFlagBit();
-	if (!handle_type_bit)
+	if(!handle_type_bit)
 	{
 		// This device memory is not external and can accomodate
 		// any image or buffer as well.
diff --git a/src/Vulkan/VkDeviceMemoryExternalLinux.hpp b/src/Vulkan/VkDeviceMemoryExternalLinux.hpp
index 620037d..a9f0d0d 100644
--- a/src/Vulkan/VkDeviceMemoryExternalLinux.hpp
+++ b/src/Vulkan/VkDeviceMemoryExternalLinux.hpp
@@ -37,15 +37,15 @@
 		AllocateInfo(const VkMemoryAllocateInfo* pAllocateInfo)
 		{
 			const auto* createInfo = reinterpret_cast<const VkBaseInStructure*>(pAllocateInfo->pNext);
-			while (createInfo)
+			while(createInfo)
 			{
-				switch (createInfo->sType)
+				switch(createInfo->sType)
 				{
 				case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
 					{
 						const auto* importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR*>(createInfo);
 
-						if (importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+						if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 						{
 							UNIMPLEMENTED("importInfo->handleType");
 						}
@@ -57,7 +57,7 @@
 					{
 						const auto* exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo*>(createInfo);
 
-						if (exportInfo->handleTypes != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+						if(exportInfo->handleTypes != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 						{
 							UNIMPLEMENTED("exportInfo->handleTypes");
 						}
@@ -93,10 +93,10 @@
 
 	VkResult allocate(size_t size, void** pBuffer) override
 	{
-		if (allocateInfo.importFd)
+		if(allocateInfo.importFd)
 		{
 			memfd.importFd(allocateInfo.fd);
-			if (!memfd.isValid())
+			if(!memfd.isValid())
 			{
 				return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 			}
@@ -107,14 +107,14 @@
 			static int counter = 0;
 			char name[40];
 			snprintf(name, sizeof(name), "SwiftShader.Memory.%d", ++counter);
-			if (!memfd.allocate(name, size))
+			if(!memfd.allocate(name, size))
 			{
 				TRACE("memfd.allocate() returned %s", strerror(errno));
 				return VK_ERROR_OUT_OF_DEVICE_MEMORY;
 			}
 		}
 		void* addr = memfd.mapReadWrite(0, size);
-		if (!addr)
+		if(!addr)
 		{
 			return VK_ERROR_MEMORY_MAP_FAILED;
 		}
@@ -135,7 +135,7 @@
 	VkResult exportFd(int* pFd) const override
 	{
 		int fd = memfd.exportFd();
-		if (fd < 0)
+		if(fd < 0)
 		{
 			return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 		}
diff --git a/src/Vulkan/VkFence.hpp b/src/Vulkan/VkFence.hpp
index af8becd..170798b 100644
--- a/src/Vulkan/VkFence.hpp
+++ b/src/Vulkan/VkFence.hpp
@@ -69,7 +69,7 @@
 	void finish() override
 	{
 		ASSERT(!event.isSignalled());
-		if (wg.done())
+		if(wg.done())
 		{
 			event.signal();
 		}
diff --git a/src/Vulkan/VkFormat.cpp b/src/Vulkan/VkFormat.cpp
index dc7a71e..efb3929 100644
--- a/src/Vulkan/VkFormat.cpp
+++ b/src/Vulkan/VkFormat.cpp
@@ -144,8 +144,8 @@
 	// isDepth / isStencil etc to check for their aspect
 
 	VkImageAspectFlags aspects = 0;
-	if (isDepth()) aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
-	if (isStencil()) aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+	if(isDepth()) aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
+	if(isStencil()) aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
 
 	// YCbCr formats
 	switch(format)
@@ -162,7 +162,7 @@
 	}
 
 	// Anything else is "color".
-	if (!aspects) aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
+	if(!aspects) aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
 	return aspects;
 }
 
diff --git a/src/Vulkan/VkFramebuffer.cpp b/src/Vulkan/VkFramebuffer.cpp
index 33be291..96804c1 100644
--- a/src/Vulkan/VkFramebuffer.cpp
+++ b/src/Vulkan/VkFramebuffer.cpp
@@ -41,22 +41,22 @@
 	ASSERT(attachmentCount == renderPass->getAttachmentCount());
 
 	const uint32_t count = std::min(clearValueCount, attachmentCount);
-	for (uint32_t i = 0; i < count; i++)
+	for(uint32_t i = 0; i < count; i++)
 	{
 		const VkAttachmentDescription attachment = renderPass->getAttachment(i);
 
 		VkImageAspectFlags aspectMask = Format(attachment.format).getAspects();
-		if (attachment.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
+		if(attachment.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
 			aspectMask &= VK_IMAGE_ASPECT_STENCIL_BIT;
-		if (attachment.stencilLoadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
+		if(attachment.stencilLoadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
 			aspectMask &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
 
-		if (!aspectMask || !renderPass->isAttachmentUsed(i))
+		if(!aspectMask || !renderPass->isAttachmentUsed(i))
 		{
 			continue;
 		}
 
-		if (renderPass->isMultiView())
+		if(renderPass->isMultiView())
 		{
 			attachments[i]->clearWithLayerMask(pClearValues[i], aspectMask, renderArea,
 											   renderPass->getAttachmentViewMask(i));
@@ -72,17 +72,17 @@
 {
 	VkSubpassDescription subpass = renderPass->getSubpass(subpassIndex);
 
-	if (attachment.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
+	if(attachment.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
 	{
 		ASSERT(attachment.colorAttachment < subpass.colorAttachmentCount);
 		uint32_t attachmentIndex = subpass.pColorAttachments[attachment.colorAttachment].attachment;
 
-		if (attachmentIndex != VK_ATTACHMENT_UNUSED)
+		if(attachmentIndex != VK_ATTACHMENT_UNUSED)
 		{
 			ASSERT(attachmentIndex < attachmentCount);
 			ImageView *imageView = attachments[attachmentIndex];
 
-			if (renderPass->isMultiView())
+			if(renderPass->isMultiView())
 			{
 				imageView->clearWithLayerMask(attachment.clearValue, attachment.aspectMask, rect.rect,
 											  renderPass->getViewMask(subpassIndex));
@@ -93,16 +93,16 @@
 			}
 		}
 	}
-	else if (attachment.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))
+	else if(attachment.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))
 	{
 		uint32_t attachmentIndex = subpass.pDepthStencilAttachment->attachment;
 
-		if (attachmentIndex != VK_ATTACHMENT_UNUSED)
+		if(attachmentIndex != VK_ATTACHMENT_UNUSED)
 		{
 			ASSERT(attachmentIndex < attachmentCount);
 			ImageView *imageView = attachments[attachmentIndex];
 
-			if (renderPass->isMultiView())
+			if(renderPass->isMultiView())
 			{
 				imageView->clearWithLayerMask(attachment.clearValue, attachment.aspectMask, rect.rect,
 											  renderPass->getViewMask(subpassIndex));
@@ -131,7 +131,7 @@
 			if(resolveAttachment != VK_ATTACHMENT_UNUSED)
 			{
 				ImageView *imageView = attachments[subpass.pColorAttachments[i].attachment];
-				if (renderPass->isMultiView())
+				if(renderPass->isMultiView())
 				{
 					imageView->resolveWithLayerMask(attachments[resolveAttachment],
 													renderPass->getViewMask(subpassIndex));
diff --git a/src/Vulkan/VkGetProcAddress.cpp b/src/Vulkan/VkGetProcAddress.cpp
index 46d19cd..fd870c7 100644
--- a/src/Vulkan/VkGetProcAddress.cpp
+++ b/src/Vulkan/VkGetProcAddress.cpp
@@ -464,7 +464,7 @@
 
 	int OpenDevice(const hw_module_t *module, const char *id, hw_device_t **device)
 	{
-		if (strcmp(id, HWVULKAN_DEVICE_0) != 0) return -ENOENT;
+		if(strcmp(id, HWVULKAN_DEVICE_0) != 0) return -ENOENT;
 		*device = &hal_device.common;
 		return 0;
 	}
diff --git a/src/Vulkan/VkImage.cpp b/src/Vulkan/VkImage.cpp
index 3e78447..9cde705 100644
--- a/src/Vulkan/VkImage.cpp
+++ b/src/Vulkan/VkImage.cpp
@@ -132,9 +132,9 @@
 	}
 
 	const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
-	for (; nextInfo != nullptr; nextInfo = nextInfo->pNext)
+	for(; nextInfo != nullptr; nextInfo = nextInfo->pNext)
 	{
-		if (nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO)
+		if(nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO)
 		{
 			const auto* externalInfo = reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(nextInfo);
 			supportedExternalMemoryHandleTypes = externalInfo->handleTypes;
@@ -225,7 +225,7 @@
 void Image::getSubresourceLayout(const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const
 {
 	// By spec, aspectMask has a single bit set.
-	if (!((pSubresource->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
+	if(!((pSubresource->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
 	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
 	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
 	      (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
@@ -248,7 +248,7 @@
 	// Image copy does not perform any conversion, it simply copies memory from
 	// an image to another image that has the same number of bytes per pixel.
 
-	if (!((region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
+	if(!((region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
 	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
 	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
 	      (region.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
@@ -258,7 +258,7 @@
 		UNSUPPORTED("srcSubresource.aspectMask %X", region.srcSubresource.aspectMask);
 	}
 
-	if (!((region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
+	if(!((region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
 	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
 	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) ||
 	      (region.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) ||
diff --git a/src/Vulkan/VkImageView.cpp b/src/Vulkan/VkImageView.cpp
index 27c2f38..905e4a0 100644
--- a/src/Vulkan/VkImageView.cpp
+++ b/src/Vulkan/VkImageView.cpp
@@ -157,7 +157,7 @@
 
 void ImageView::clearWithLayerMask(const VkClearValue &clearValue, VkImageAspectFlags aspectMask, const VkRect2D &renderArea, uint32_t layerMask)
 {
-	while (layerMask)
+	while(layerMask)
 	{
 		uint32_t layer = sw::log2i(layerMask);
 		layerMask &= ~(1 << layer);
@@ -229,7 +229,7 @@
 
 void ImageView::resolveWithLayerMask(ImageView *resolveAttachment, uint32_t layerMask)
 {
-	while (layerMask)
+	while(layerMask)
 	{
 		int layer = sw::log2i(layerMask);
 		layerMask &= ~(1 << layer);
diff --git a/src/Vulkan/VkImageView.hpp b/src/Vulkan/VkImageView.hpp
index 86adedd..a922946 100644
--- a/src/Vulkan/VkImageView.hpp
+++ b/src/Vulkan/VkImageView.hpp
@@ -57,7 +57,7 @@
 
 	int getSampleCount() const
 	{
-		switch (image->getSampleCountFlagBits())
+		switch(image->getSampleCountFlagBits())
 		{
 		case VK_SAMPLE_COUNT_1_BIT: return 1;
 		case VK_SAMPLE_COUNT_4_BIT: return 4;
diff --git a/src/Vulkan/VkInstance.cpp b/src/Vulkan/VkInstance.cpp
index 70dee28..235a3eb 100644
--- a/src/Vulkan/VkInstance.cpp
+++ b/src/Vulkan/VkInstance.cpp
@@ -29,13 +29,13 @@
 
 VkResult Instance::getPhysicalDevices(uint32_t *pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices) const
 {
-	if (!pPhysicalDevices)
+	if(!pPhysicalDevices)
 	{
 		*pPhysicalDeviceCount = 1;
 		return VK_SUCCESS;
 	}
 
-	if (*pPhysicalDeviceCount < 1)
+	if(*pPhysicalDeviceCount < 1)
 	{
 		return VK_INCOMPLETE;
 	}
@@ -49,13 +49,13 @@
 VkResult Instance::getPhysicalDeviceGroups(uint32_t *pPhysicalDeviceGroupCount,
                                            VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties) const
 {
-	if (!pPhysicalDeviceGroupProperties)
+	if(!pPhysicalDeviceGroupProperties)
 	{
 		*pPhysicalDeviceGroupCount = 1;
 		return VK_SUCCESS;
 	}
 
-	if (*pPhysicalDeviceGroupCount < 1)
+	if(*pPhysicalDeviceGroupCount < 1)
 	{
 		return VK_INCOMPLETE;
 	}
diff --git a/src/Vulkan/VkPhysicalDevice.cpp b/src/Vulkan/VkPhysicalDevice.cpp
index 0d1fd76..e30b6ac 100644
--- a/src/Vulkan/VkPhysicalDevice.cpp
+++ b/src/Vulkan/VkPhysicalDevice.cpp
@@ -25,7 +25,7 @@
 static void setExternalMemoryProperties(VkExternalMemoryHandleTypeFlagBits handleType, VkExternalMemoryProperties* properties)
 {
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-	if (handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+	if(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
 		properties->compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
 		properties->exportFromImportedHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
@@ -385,7 +385,7 @@
 void PhysicalDevice::getProperties(const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties) const
 {
 #if SWIFTSHADER_EXTERNAL_SEMAPHORE_OPAQUE_FD
-	if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
+	if(pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
 		pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
 		pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
@@ -394,7 +394,7 @@
 	}
 #endif
 #if VK_USE_PLATFORM_FUCHSIA
-	if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA)
+	if(pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA)
 	{
 		pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
 		pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
@@ -785,7 +785,7 @@
 			VkFormatProperties props;
 			getFormatProperties(format, &props);
 			auto features = tiling == VK_IMAGE_TILING_LINEAR ? props.linearTilingFeatures : props.optimalTilingFeatures;
-			if (features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+			if(features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
 			{
 				// Only renderable formats make sense for multisample
 				pImageFormatProperties->sampleCounts = getSampleCounts();
diff --git a/src/Vulkan/VkPipeline.cpp b/src/Vulkan/VkPipeline.cpp
index f72d229..f35b203 100644
--- a/src/Vulkan/VkPipeline.cpp
+++ b/src/Vulkan/VkPipeline.cpp
@@ -165,7 +165,7 @@
 	spvtools::Optimizer opt{SPV_ENV_VULKAN_1_1};
 
 	opt.SetMessageConsumer([](spv_message_level_t level, const char*, const spv_position_t& p, const char* m) {
-		switch (level)
+		switch(level)
 		{
 		case SPV_MSG_FATAL:          vk::warn("SPIR-V FATAL: %d:%d %s\n", int(p.line), int(p.column), m);
 		case SPV_MSG_INTERNAL_ERROR: vk::warn("SPIR-V INTERNAL_ERROR: %d:%d %s\n", int(p.line), int(p.column), m);
@@ -178,10 +178,10 @@
 	});
 
 	// If the pipeline uses specialization, apply the specializations before freezing
-	if (specializationInfo)
+	if(specializationInfo)
 	{
 		std::unordered_map<uint32_t, std::vector<uint32_t>> specializations;
-		for (auto i = 0u; i < specializationInfo->mapEntryCount; ++i)
+		for(auto i = 0u; i < specializationInfo->mapEntryCount; ++i)
 		{
 			auto const &e = specializationInfo->pMapEntries[i];
 			auto value_ptr =
@@ -198,7 +198,7 @@
 	std::vector<uint32_t> optimized;
 	opt.Run(code.data(), code.size(), &optimized);
 
-	if (false) {
+	if(false) {
 		spvtools::SpirvTools core(SPV_ENV_VULKAN_1_1);
 		std::string preOpt;
 		core.Disassemble(code, &preOpt, SPV_BINARY_TO_TEXT_OPTION_NONE);
@@ -395,7 +395,7 @@
 	const VkPipelineMultisampleStateCreateInfo* multisampleState = pCreateInfo->pMultisampleState;
 	if(multisampleState)
 	{
-		switch (multisampleState->rasterizationSamples)
+		switch(multisampleState->rasterizationSamples)
 		{
 		case VK_SAMPLE_COUNT_1_BIT:
 			context.sampleCount = 1;
@@ -407,7 +407,7 @@
 			UNIMPLEMENTED("Unsupported sample count");
 		}
 
-		if (multisampleState->pSampleMask)
+		if(multisampleState->pSampleMask)
 		{
 			context.sampleMask = multisampleState->pSampleMask[0];
 		}
@@ -465,7 +465,7 @@
 			blendConstants.a = colorBlendState->blendConstants[3];
 		}
 
-		for (auto i = 0u; i < colorBlendState->attachmentCount; i++)
+		for(auto i = 0u; i < colorBlendState->attachmentCount; i++)
 		{
 			const VkPipelineColorBlendAttachmentState& attachment = colorBlendState->pAttachments[i];
 			context.colorWriteMask[i] = attachment.colorWriteMask;
@@ -528,9 +528,9 @@
 
 void GraphicsPipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkGraphicsPipelineCreateInfo* pCreateInfo, PipelineCache* pPipelineCache)
 {
-	for (auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++)
+	for(auto pStage = pCreateInfo->pStages; pStage != pCreateInfo->pStages + pCreateInfo->stageCount; pStage++)
 	{
-		if (pStage->flags != 0)
+		if(pStage->flags != 0)
 		{
 			UNIMPLEMENTED("pStage->flags");
 		}
diff --git a/src/Vulkan/VkPipelineLayout.cpp b/src/Vulkan/VkPipelineLayout.cpp
index 6d42c34..4394c7e 100644
--- a/src/Vulkan/VkPipelineLayout.cpp
+++ b/src/Vulkan/VkPipelineLayout.cpp
@@ -37,7 +37,7 @@
 
 	dynamicOffsetBases = reinterpret_cast<uint32_t*>(hostMem);
 	uint32_t dynamicOffsetBase = 0;
-	for (uint32_t i = 0; i < setLayoutCount; i++)
+	for(uint32_t i = 0; i < setLayoutCount; i++)
 	{
 		uint32_t dynamicDescriptorCount = setLayouts[i]->getDynamicDescriptorCount();
 		ASSERT_OR_RETURN((dynamicOffsetBase + dynamicDescriptorCount) <= MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC);
diff --git a/src/Vulkan/VkQueryPool.cpp b/src/Vulkan/VkQueryPool.cpp
index 6c4f975..f31aa8b 100644
--- a/src/Vulkan/VkQueryPool.cpp
+++ b/src/Vulkan/VkQueryPool.cpp
@@ -46,7 +46,7 @@
 
 void Query::finish()
 {
-	if (wg.done())
+	if(wg.done())
 	{
 		auto prevState = state.exchange(FINISHED);
 		ASSERT(prevState == ACTIVE);
diff --git a/src/Vulkan/VkQueue.cpp b/src/Vulkan/VkQueue.cpp
index 4c5f798..36fb0a8 100644
--- a/src/Vulkan/VkQueue.cpp
+++ b/src/Vulkan/VkQueue.cpp
@@ -115,7 +115,7 @@
 
 void Queue::submitQueue(const Task& task)
 {
-	if (renderer == nullptr)
+	if(renderer == nullptr)
 	{
 		renderer.reset(new sw::Renderer(device));
 	}
@@ -144,7 +144,7 @@
 		}
 	}
 
-	if (task.pSubmits)
+	if(task.pSubmits)
 	{
 		toDelete.put(task.pSubmits);
 	}
@@ -202,10 +202,10 @@
 
 void Queue::garbageCollect()
 {
-	while (true)
+	while(true)
 	{
 		auto v = toDelete.tryTake();
-		if (!v.second) { break; }
+		if(!v.second) { break; }
 		vk::deallocate(v.first, DEVICE_MEMORY);
 	}
 }
@@ -226,11 +226,11 @@
 	for(uint32_t i = 0; i < presentInfo->swapchainCount; i++)
 	{
 		VkResult res = vk::Cast(presentInfo->pSwapchains[i])->present(presentInfo->pImageIndices[i]);
-		if (presentInfo->pResults != nullptr)
+		if(presentInfo->pResults != nullptr)
 		{
 			presentInfo->pResults[i] = res;
 		}
-		if (res != VK_SUCCESS)
+		if(res != VK_SUCCESS)
 			result = res;
 	}
 
diff --git a/src/Vulkan/VkRenderPass.cpp b/src/Vulkan/VkRenderPass.cpp
index 27de7a6..3fc4d90 100644
--- a/src/Vulkan/VkRenderPass.cpp
+++ b/src/Vulkan/VkRenderPass.cpp
@@ -48,7 +48,7 @@
 
 		attachmentViewMasks = reinterpret_cast<uint32_t *>(hostMemory);
 		hostMemory += pCreateInfo->attachmentCount * sizeof(uint32_t);
-		for (auto i = 0u; i < pCreateInfo->attachmentCount; i++)
+		for(auto i = 0u; i < pCreateInfo->attachmentCount; i++)
 		{
 			attachmentFirstUse[i] = -1;
 			attachmentViewMasks[i] = 0;
@@ -56,20 +56,20 @@
 	}
 
 	const VkBaseInStructure* extensionCreateInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
-	while (extensionCreateInfo)
+	while(extensionCreateInfo)
 	{
-		switch (extensionCreateInfo->sType)
+		switch(extensionCreateInfo->sType)
 		{
 		case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
 		{
 			// Renderpass uses multiview if this structure is present AND some subpass specifies
 			// a nonzero view mask
 			auto const *multiviewCreateInfo = reinterpret_cast<VkRenderPassMultiviewCreateInfo const *>(extensionCreateInfo);
-			for (auto i = 0u; i < pCreateInfo->subpassCount; i++)
+			for(auto i = 0u; i < pCreateInfo->subpassCount; i++)
 			{
 				masks[i] = multiviewCreateInfo->pViewMasks[i];
 				// This is now a multiview renderpass, so make the masks available
-				if (masks[i])
+				if(masks[i])
 					viewMasks = masks;
 			}
 
@@ -101,9 +101,9 @@
 			       pCreateInfo->pSubpasses[i].pInputAttachments, inputAttachmentsSize);
 			hostMemory += inputAttachmentsSize;
 
-			for (auto j = 0u; j < subpasses[i].inputAttachmentCount; j++)
+			for(auto j = 0u; j < subpasses[i].inputAttachmentCount; j++)
 			{
-				if (subpass.pInputAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
+				if(subpass.pInputAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
 					MarkFirstUse(subpass.pInputAttachments[j].attachment, i);
 			}
 		}
@@ -124,11 +124,11 @@
 				hostMemory += colorAttachmentsSize;
 			}
 
-			for (auto j = 0u; j < subpasses[i].colorAttachmentCount; j++)
+			for(auto j = 0u; j < subpasses[i].colorAttachmentCount; j++)
 			{
-				if (subpass.pColorAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
+				if(subpass.pColorAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
 					MarkFirstUse(subpass.pColorAttachments[j].attachment, i);
-				if (subpass.pResolveAttachments &&
+				if(subpass.pResolveAttachments &&
 					subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED)
 					MarkFirstUse(subpass.pResolveAttachments[j].attachment, i);
 			}
@@ -141,7 +141,7 @@
 				subpass.pDepthStencilAttachment, sizeof(VkAttachmentReference));
 			hostMemory += sizeof(VkAttachmentReference);
 
-			if (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)
+			if(subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)
 				MarkFirstUse(subpass.pDepthStencilAttachment->attachment, i);
 		}
 
@@ -153,9 +153,9 @@
 			       pCreateInfo->pSubpasses[i].pPreserveAttachments, preserveAttachmentSize);
 			hostMemory += preserveAttachmentSize;
 
-			for (auto j = 0u; j < subpasses[i].preserveAttachmentCount; j++)
+			for(auto j = 0u; j < subpasses[i].preserveAttachmentCount; j++)
 			{
-				if (subpass.pPreserveAttachments[j] != VK_ATTACHMENT_UNUSED)
+				if(subpass.pPreserveAttachments[j] != VK_ATTACHMENT_UNUSED)
 					MarkFirstUse(subpass.pPreserveAttachments[j], i);
 			}
 		}
@@ -213,10 +213,10 @@
 	// FIXME: we may not actually need to track attachmentFirstUse if we're going to eagerly
 	//  clear attachments at the start of the renderpass; can use attachmentViewMasks always instead.
 
-	if (attachmentFirstUse[attachment] == -1)
+	if(attachmentFirstUse[attachment] == -1)
 		attachmentFirstUse[attachment] = subpass;
 
-	if (isMultiView())
+	if(isMultiView())
 		attachmentViewMasks[attachment] |= viewMasks[subpass];
 }
 
diff --git a/src/Vulkan/VkSemaphore.cpp b/src/Vulkan/VkSemaphore.cpp
index eb7231e..e921e4e 100644
--- a/src/Vulkan/VkSemaphore.cpp
+++ b/src/Vulkan/VkSemaphore.cpp
@@ -46,13 +46,13 @@
 	// the pCreateInfo->pNext chain indicates it needs to be exported.
 	Impl(const VkSemaphoreCreateInfo* pCreateInfo) {
 		bool exportSemaphore = false;
-		for (const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
+		for(const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
 			 nextInfo != nullptr; nextInfo = nextInfo->pNext)
 		{
-			if (nextInfo->sType == VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO)
+			if(nextInfo->sType == VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO)
 			{
 				const auto* exportInfo = reinterpret_cast<const VkExportSemaphoreCreateInfo *>(nextInfo);
-				if (exportInfo->handleTypes != External::kExternalSemaphoreHandleType)
+				if(exportInfo->handleTypes != External::kExternalSemaphoreHandleType)
 				{
 					UNIMPLEMENTED("exportInfo->handleTypes");
 				}
@@ -61,7 +61,7 @@
 			}
 		}
 
-		if (exportSemaphore)
+		if(exportSemaphore)
 		{
 			allocateExternalNoInit();
 			external->init();
@@ -75,7 +75,7 @@
 	// Deallocate the External semaphore if any.
 	void deallocateExternal()
 	{
-		if (external)
+		if(external)
 		{
 			external->~External();
 			external = nullptr;
@@ -93,9 +93,9 @@
 
 	void wait()
 	{
-		if (external)
+		if(external)
 		{
-			if (!external->tryWait())
+			if(!external->tryWait())
 			{
 				// Dispatch the external wait to a background thread.
 				// Even if this creates a new thread on each
@@ -110,7 +110,7 @@
 			// If the import was temporary, reset the semaphore to its
 			// permanent state by getting rid of |external|.
 			// See "6.4.5. Importing Semaphore Payloads" in Vulkan 1.1 spec.
-			if (temporaryImport)
+			if(temporaryImport)
 			{
 				deallocateExternal();
 				temporaryImport = false;
@@ -124,7 +124,7 @@
 
 	void signal()
 	{
-		if (external)
+		if(external)
 		{
 			// Assumes that signalling an external semaphore is non-blocking,
 			// so it can be performed directly either from a fiber or thread.
@@ -152,7 +152,7 @@
 	{
 		// Signal the marl condition variable only.
 		std::unique_lock<std::mutex> lock(mutex);
-		if (!signaled)
+		if(!signaled)
 		{
 			signaled = true;
 			condition.notify_one();
@@ -203,12 +203,12 @@
 VkResult Semaphore::importFd(int fd, bool temporaryImport)
 {
 	std::unique_lock<std::mutex> lock(impl->mutex);
-	if (!impl->external)
+	if(!impl->external)
 	{
 		impl->allocateExternalNoInit();
 	}
 	VkResult result = impl->external->importFd(fd);
-	if (result != VK_SUCCESS)
+	if(result != VK_SUCCESS)
 	{
 		impl->deallocateExternal();
 	}
@@ -222,7 +222,7 @@
 VkResult Semaphore::exportFd(int* pFd) const
 {
 	std::unique_lock<std::mutex> lock(impl->mutex);
-	if (!impl->external)
+	if(!impl->external)
 	{
 		TRACE("Cannot export non-external semaphore");
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
@@ -235,7 +235,7 @@
 VkResult Semaphore::importHandle(zx_handle_t handle, bool temporaryImport)
 {
 	std::unique_lock<std::mutex> lock(impl->mutex);
-	if (!impl->external)
+	if(!impl->external)
 	{
 		impl->allocateExternalNoInit();
 	}
@@ -248,7 +248,7 @@
 VkResult Semaphore::exportHandle(zx_handle_t *pHandle) const
 {
 	std::unique_lock<std::mutex> lock(impl->mutex);
-	if (!impl->external)
+	if(!impl->external)
 	{
 		TRACE("Cannot export non-external semaphore");
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
diff --git a/src/Vulkan/VkSemaphoreExternalFuchsia.hpp b/src/Vulkan/VkSemaphoreExternalFuchsia.hpp
index 4da9d39..d42787a 100644
--- a/src/Vulkan/VkSemaphoreExternalFuchsia.hpp
+++ b/src/Vulkan/VkSemaphoreExternalFuchsia.hpp
@@ -44,7 +44,7 @@
 	void init()
 	{
 		zx_status_t status = zx_event_create(0, &handle);
-		if (status != ZX_OK)
+		if(status != ZX_OK)
 		{
 			ABORT("zx_event_create() returned %d", status);
 		}
@@ -60,7 +60,7 @@
 	{
 		zx_handle_t new_handle = ZX_HANDLE_INVALID;
 		zx_status_t status = zx_handle_duplicate(handle, ZX_RIGHT_SAME_RIGHTS, &new_handle);
-		if (status != ZX_OK)
+		if(status != ZX_OK)
 		{
 			TRACE("zx_handle_duplicate() returned %d", status);
 			return VK_ERROR_INVALID_EXTERNAL_HANDLE;
@@ -74,17 +74,17 @@
 		zx_signals_t observed = 0;
 		zx_status_t status = zx_object_wait_one(
 				handle, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, &observed);
-		if (status != ZX_OK)
+		if(status != ZX_OK)
 		{
 			ABORT("zx_object_wait_one() returned %d", status);
 		}
-		if (observed != ZX_EVENT_SIGNALED)
+		if(observed != ZX_EVENT_SIGNALED)
 		{
 			ABORT("zx_object_wait_one() returned observed %x (%x expected)", observed, ZX_EVENT_SIGNALED);
 		}
 		// Need to unsignal the event now, as required by the Vulkan spec.
 		status = zx_object_signal(handle, ZX_EVENT_SIGNALED, 0);
-		if (status != ZX_OK)
+		if(status != ZX_OK)
 		{
 			ABORT("zx_object_signal() returned %d", status);
 		}
@@ -95,17 +95,17 @@
 		zx_signals_t observed = 0;
 		zx_status_t status = zx_object_wait_one(
 				handle, ZX_EVENT_SIGNALED, zx_clock_get_monotonic(), &observed);
-		if (status != ZX_OK)
+		if(status != ZX_OK)
 		{
 			ABORT("zx_object_wait_one() returned %d", status);
 		}
-		if (observed != ZX_EVENT_SIGNALED)
+		if(observed != ZX_EVENT_SIGNALED)
 		{
 			return false;
 		}
 		// Need to unsignal the event now, as required by the Vulkan spec.
 		status = zx_object_signal(handle, ZX_EVENT_SIGNALED, 0);
-		if (status != ZX_OK)
+		if(status != ZX_OK)
 		{
 			ABORT("zx_object_signal() returned %d", status);
 		}
@@ -115,7 +115,7 @@
 	void signal()
 	{
 		zx_status_t status = zx_object_signal(handle, 0, ZX_EVENT_SIGNALED);
-		if (status != ZX_OK)
+		if(status != ZX_OK)
 		{
 			ABORT("zx_object_signal() returned %d", status);
 		}
diff --git a/src/Vulkan/VkSemaphoreExternalLinux.hpp b/src/Vulkan/VkSemaphoreExternalLinux.hpp
index 9ba6c65..47b18c1 100644
--- a/src/Vulkan/VkSemaphoreExternalLinux.hpp
+++ b/src/Vulkan/VkSemaphoreExternalLinux.hpp
@@ -85,7 +85,7 @@
 	void wait()
 	{
 		pthread_mutex_lock(&mutex);
-		while (!signaled)
+		while(!signaled)
 		{
 			pthread_cond_wait(&cond, &mutex);
 		}
@@ -104,7 +104,7 @@
 	{
 		pthread_mutex_lock(&mutex);
 		bool result = signaled;
-		if (result)
+		if(result)
 		{
 			signaled = false;
 		}
@@ -150,7 +150,7 @@
 		static int counter = 0;
 		char name[40];
 		snprintf(name, sizeof(name), "SwiftShader.Semaphore.%d", ++counter);
-		if (!memfd.allocate(name, size))
+		if(!memfd.allocate(name, size))
 		{
 			ABORT("memfd.allocate() returned %s", strerror(errno));
 		}
@@ -172,7 +172,7 @@
 	VkResult exportFd(int* pFd) const
 	{
 		int fd = memfd.exportFd();
-		if (fd < 0)
+		if(fd < 0)
 		{
 			return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 		}
@@ -199,9 +199,9 @@
 	// Unmap the semaphore if needed and close its file descriptor.
 	void close()
 	{
-		if (semaphore)
+		if(semaphore)
 		{
-			if (semaphore->deref())
+			if(semaphore->deref())
 			{
 				semaphore->~SharedSemaphore();
 			}
@@ -216,12 +216,12 @@
 	{
 		// Map the region into memory and point the semaphore to it.
 		void* addr = memfd.mapReadWrite(0, size);
-		if (!addr)
+		if(!addr)
 		{
 			ABORT("mmap() failed: %s", strerror(errno));
 		}
 		semaphore = reinterpret_cast<SharedSemaphore *>(addr);
-		if (needInitialization)
+		if(needInitialization)
 		{
 			new (semaphore) SharedSemaphore();
 		}
diff --git a/src/Vulkan/VkStringify.cpp b/src/Vulkan/VkStringify.cpp
index e7ac2d4..b7c0b63 100644
--- a/src/Vulkan/VkStringify.cpp
+++ b/src/Vulkan/VkStringify.cpp
@@ -457,7 +457,7 @@
 #undef INSERT_ELEMENT
 	};
 	auto it = strings.find(value);
-	if (it != strings.end())
+	if(it != strings.end())
 	{
 		return std::string(it->second);
 	}
diff --git a/src/Vulkan/libVulkan.cpp b/src/Vulkan/libVulkan.cpp
index a01f392..ae9a9b8 100644
--- a/src/Vulkan/libVulkan.cpp
+++ b/src/Vulkan/libVulkan.cpp
@@ -138,7 +138,7 @@
 	static std::weak_ptr<marl::Scheduler> schedulerWeak;
 	std::unique_lock<std::mutex> lock(mutex);
 	auto scheduler = schedulerWeak.lock();
-	if (!scheduler)
+	if(!scheduler)
 	{
 		scheduler = std::make_shared<marl::Scheduler>();
 		scheduler->setThreadInitializer([] {
@@ -273,9 +273,9 @@
 	}
 
 	uint32_t extensionPropertiesCount = sizeof(instanceExtensionProperties) / sizeof(instanceExtensionProperties[0]);
-	for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i)
+	for(uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i)
 	{
-		if (!HasExtensionProperty(pCreateInfo->ppEnabledExtensionNames[i], instanceExtensionProperties, extensionPropertiesCount))
+		if(!HasExtensionProperty(pCreateInfo->ppEnabledExtensionNames[i], instanceExtensionProperties, extensionPropertiesCount))
 		{
 			return VK_ERROR_EXTENSION_NOT_PRESENT;
 		}
@@ -362,7 +362,7 @@
 	vk::Cast(physicalDevice)->getFormatProperties(format, &properties);
 
 	VkFormatFeatureFlags features;
-	switch (tiling)
+	switch(tiling)
 	{
 	case VK_IMAGE_TILING_LINEAR:
 		features = properties.linearTilingFeatures;
@@ -377,43 +377,43 @@
 		features = 0;
 	}
 
-	if (features == 0)
+	if(features == 0)
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
 
 	// Check for usage conflict with features
-	if ((usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
+	if((usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
 
-	if ((usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
+	if((usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
 
-	if ((usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
+	if((usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
 
-	if ((usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+	if((usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
 
-	if ((usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) && !(features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)))
+	if((usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) && !(features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)))
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
 
-	if ((usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) && !(features & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT))
+	if((usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) && !(features & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT))
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
 
-	if ((usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) && !(features & VK_FORMAT_FEATURE_TRANSFER_DST_BIT))
+	if((usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) && !(features & VK_FORMAT_FEATURE_TRANSFER_DST_BIT))
 	{
 		return VK_ERROR_FORMAT_NOT_SUPPORTED;
 	}
@@ -513,9 +513,9 @@
 	}
 
 	uint32_t extensionPropertiesCount = sizeof(deviceExtensionProperties) / sizeof(deviceExtensionProperties[0]);
-	for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i)
+	for(uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i)
 	{
-		if (!HasExtensionProperty(pCreateInfo->ppEnabledExtensionNames[i], deviceExtensionProperties, extensionPropertiesCount))
+		if(!HasExtensionProperty(pCreateInfo->ppEnabledExtensionNames[i], deviceExtensionProperties, extensionPropertiesCount))
 		{
 			return VK_ERROR_EXTENSION_NOT_PRESENT;
 		}
@@ -598,7 +598,7 @@
 			{
 				const VkPhysicalDeviceMultiviewFeatures* multiviewFeatures = reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(extensionCreateInfo);
 
-				if (multiviewFeatures->multiviewGeometryShader ||
+				if(multiviewFeatures->multiviewGeometryShader ||
 				    multiviewFeatures->multiviewTessellationShader)
 				{
 					return VK_ERROR_FEATURE_NOT_PRESENT;
@@ -609,7 +609,7 @@
 			{
 				const VkPhysicalDeviceShaderDrawParametersFeatures* shaderDrawParametersFeatures = reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures*>(extensionCreateInfo);
 
-				if (shaderDrawParametersFeatures->shaderDrawParameters)
+				if(shaderDrawParametersFeatures->shaderDrawParameters)
 				{
 					return VK_ERROR_FEATURE_NOT_PRESENT;
 				}
@@ -813,7 +813,7 @@
 		case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
 		{
 			auto* importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR *>(allocationInfo);
-			if (importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+			if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 			{
 				UNSUPPORTED("importInfo->handleType %u", importInfo->handleType);
 				return VK_ERROR_INVALID_EXTERNAL_HANDLE;
@@ -823,7 +823,7 @@
 		case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
 		{
 			auto* exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(allocationInfo);
-			if (exportInfo->handleTypes != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+			if(exportInfo->handleTypes != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 			{
 				UNSUPPORTED("exportInfo->handleTypes %u", exportInfo->handleTypes);
 				return VK_ERROR_INVALID_EXTERNAL_HANDLE;
@@ -870,7 +870,7 @@
 	TRACE("(VkDevice device = %p, const VkMemoryGetFdInfoKHR* getFdInfo = %p, int* pFd = %p",
 		  device, getFdInfo, pFd);
 
-	if (getFdInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+	if(getFdInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
 		UNSUPPORTED("pGetFdInfo->handleType %u", getFdInfo->handleType);
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
@@ -883,13 +883,13 @@
 	TRACE("(VkDevice device = %p, VkExternalMemoryHandleTypeFlagBits handleType = %x, int fd = %d, VkMemoryFdPropertiesKHR* pMemoryFdProperties = %p)",
 		  device, handleType, fd, pMemoryFdProperties);
 
-	if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+	if(handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
 		UNSUPPORTED("handleType %u", handleType);
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 	}
 
-	if (fd < 0)
+	if(fd < 0)
 	{
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 	}
@@ -961,7 +961,7 @@
 	TRACE("(VkDevice device = %p, VkBuffer buffer = %p, VkDeviceMemory memory = %p, VkDeviceSize memoryOffset = %d)",
 		    device, static_cast<void*>(buffer), static_cast<void*>(memory), int(memoryOffset));
 
-	if (!vk::Cast(buffer)->canBindToMemory(vk::Cast(memory)))
+	if(!vk::Cast(buffer)->canBindToMemory(vk::Cast(memory)))
 	{
 		UNSUPPORTED("vkBindBufferMemory with invalid external memory");
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
@@ -975,7 +975,7 @@
 	TRACE("(VkDevice device = %p, VkImage image = %p, VkDeviceMemory memory = %p, VkDeviceSize memoryOffset = %d)",
 		    device, static_cast<void*>(image), static_cast<void*>(memory), int(memoryOffset));
 
-	if (!vk::Cast(image)->canBindToMemory(vk::Cast(memory)))
+	if(!vk::Cast(image)->canBindToMemory(vk::Cast(memory)))
 	{
 		UNSUPPORTED("vkBindImageMemory with invalid external memory");
 		return VK_ERROR_INVALID_EXTERNAL_HANDLE;
@@ -1104,7 +1104,7 @@
 	TRACE("(VkDevice device = %p, const VkSemaphoreGetFdInfoKHR* pGetFdInfo = %p, int* pFd = %p)",
 	      device, static_cast<const void*>(pGetFdInfo), static_cast<void*>(pFd));
 
-	if (pGetFdInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
+	if(pGetFdInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
 		UNIMPLEMENTED("pGetFdInfo->handleType");
 	}
@@ -1117,7 +1117,7 @@
 	TRACE("(VkDevice device = %p, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreInfo = %p",
 	      device, static_cast<const void*>(pImportSemaphoreInfo));
 
-	if (pImportSemaphoreInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
+	if(pImportSemaphoreInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT)
 	{
 		UNIMPLEMENTED("pImportSemaphoreInfo->handleType");
 	}
@@ -1135,7 +1135,7 @@
 	TRACE("(VkDevice device = %p, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pImportSemaphoreZirconHandleInfo = %p)",
 	      device, pImportSemaphoreZirconHandleInfo);
 
-	if (pImportSemaphoreZirconHandleInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA)
+	if(pImportSemaphoreZirconHandleInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA)
 	{
 		UNIMPLEMENTED("pImportSemaphoreZirconHandleInfo->handleType");
 	}
@@ -1154,7 +1154,7 @@
 	TRACE("(VkDevice device = %p, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pGetZirconHandleInfo = %p, zx_handle_t* pZirconHandle = %p)",
 	      device, static_cast<const void*>(pGetZirconHandleInfo), static_cast<void*>(pZirconHandle));
 
-	if (pGetZirconHandleInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA)
+	if(pGetZirconHandleInfo->handleType != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA)
 	{
 		UNIMPLEMENTED("pGetZirconHandleInfo->handleType");
 	}
@@ -1258,9 +1258,9 @@
 		    device, pCreateInfo, pAllocator, pBuffer);
 
 	auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext);
-	while (nextInfo)
+	while(nextInfo)
 	{
-		switch (nextInfo->sType)
+		switch(nextInfo->sType)
 		{
 		case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
 			// Do nothing. Should be handled by vk::Buffer::Create().
@@ -1361,9 +1361,9 @@
 	VkResult result = vk::Image::Create(pAllocator, pCreateInfo, pImage, vk::Cast(device));
 
 #ifdef __ANDROID__
-	if (swapchainImage)
+	if(swapchainImage)
 	{
-		if (result != VK_SUCCESS)
+		if(result != VK_SUCCESS)
 		{
 			return result;
 		}
@@ -2408,14 +2408,14 @@
 			extInfo = extInfo->pNext;
 		}
 
-		if (!vk::Cast(pBindInfos[i].buffer)->canBindToMemory(vk::Cast(pBindInfos[i].memory)))
+		if(!vk::Cast(pBindInfos[i].buffer)->canBindToMemory(vk::Cast(pBindInfos[i].memory)))
 		{
 			UNSUPPORTED("vkBindBufferMemory2 with invalid external memory");
 			return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 		}
 	}
 
-	for (uint32_t i = 0; i < bindInfoCount; i++)
+	for(uint32_t i = 0; i < bindInfoCount; i++)
 	{
 		vk::Cast(pBindInfos[i].buffer)->bind(vk::Cast(pBindInfos[i].memory), pBindInfos[i].memoryOffset);
 	}
@@ -2430,7 +2430,7 @@
 
 	for(uint32_t i = 0; i < bindInfoCount; i++)
 	{
-		if (!vk::Cast(pBindInfos[i].image)->canBindToMemory(vk::Cast(pBindInfos[i].memory)))
+		if(!vk::Cast(pBindInfos[i].image)->canBindToMemory(vk::Cast(pBindInfos[i].memory)))
 		{
 			UNSUPPORTED("vkBindImageMemory2 with invalid external memory");
 			return VK_ERROR_OUT_OF_DEVICE_MEMORY;
@@ -2443,9 +2443,9 @@
 		VkDeviceSize offset = pBindInfos[i].memoryOffset;
 
 		auto extInfo = reinterpret_cast<VkBaseInStructure const *>(pBindInfos[i].pNext);
-		while (extInfo)
+		while(extInfo)
 		{
-			switch (extInfo->sType)
+			switch(extInfo->sType)
 			{
 			case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
 				/* Do nothing */
@@ -3295,7 +3295,7 @@
 	TRACE("(VkDevice device = %p, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities = %p)",
 			device, pDeviceGroupPresentCapabilities);
 
-	for (int i = 0; i < VK_MAX_DEVICE_GROUP_SIZE; i++)
+	for(int i = 0; i < VK_MAX_DEVICE_GROUP_SIZE; i++)
 	{
 		// The only real physical device in the presentation group is device 0,
 		// and it can present to itself.
diff --git a/src/WSI/MetalSurface.mm b/src/WSI/MetalSurface.mm
index 6aec803..c31248a 100644
--- a/src/WSI/MetalSurface.mm
+++ b/src/WSI/MetalSurface.mm
@@ -36,7 +36,7 @@
         {
             UNREACHABLE("MetalLayer::init(): not called from main thread");
         }
-        if ([obj isKindOfClass: [CAMetalLayer class]])
+        if([obj isKindOfClass: [CAMetalLayer class]])
         {
             layer = (CAMetalLayer*)[obj retain];
             layer.framebufferOnly = false;
@@ -147,7 +147,7 @@
             VkExtent2D windowExtent = metalLayer->getExtent();
             VkExtent3D extent = image->getImage()->getMipLevelExtent(VK_IMAGE_ASPECT_COLOR_BIT, 0);
 
-            if (windowExtent.width != extent.width || windowExtent.height != extent.height)
+            if(windowExtent.width != extent.width || windowExtent.height != extent.height)
             {
                 return VK_ERROR_OUT_OF_DATE_KHR;
             }
diff --git a/src/WSI/VkSurfaceKHR.cpp b/src/WSI/VkSurfaceKHR.cpp
index 438d559..c1b0d25 100644
--- a/src/WSI/VkSurfaceKHR.cpp
+++ b/src/WSI/VkSurfaceKHR.cpp
@@ -196,13 +196,13 @@
 
 VkResult SurfaceKHR::getPresentRectangles(uint32_t *pRectCount, VkRect2D *pRects) const
 {
-	if (!pRects)
+	if(!pRects)
 	{
 		*pRectCount = 1;
 		return VK_SUCCESS;
 	}
 
-	if (*pRectCount < 1)
+	if(*pRectCount < 1)
 	{
 		return VK_INCOMPLETE;
 	}
diff --git a/src/WSI/Win32SurfaceKHR.cpp b/src/WSI/Win32SurfaceKHR.cpp
index 7ce07aa..27ab614 100644
--- a/src/WSI/Win32SurfaceKHR.cpp
+++ b/src/WSI/Win32SurfaceKHR.cpp
@@ -91,7 +91,7 @@
 
 	VkExtent3D extent = image->getImage()->getMipLevelExtent(VK_IMAGE_ASPECT_COLOR_BIT, 0);
 
-	if (windowExtent.width != extent.width || windowExtent.height != extent.height)
+	if(windowExtent.width != extent.width || windowExtent.height != extent.height)
 	{
 		return VK_ERROR_OUT_OF_DATE_KHR;
 	}
@@ -110,19 +110,19 @@
 void Win32SurfaceKHR::lazyCreateFrameBuffer()
 {
 	auto currWindowExtent = getWindowSize(hwnd);
-	if (currWindowExtent.width == windowExtent.width && currWindowExtent.height == windowExtent.height)
+	if(currWindowExtent.width == windowExtent.width && currWindowExtent.height == windowExtent.height)
 	{
 		return;
 	}
 
 	windowExtent = currWindowExtent;
 
-	if (framebuffer)
+	if(framebuffer)
 	{
 		destroyFrameBuffer();
 	}
 
-	if (windowExtent.width == 0 || windowExtent.height == 0)
+	if(windowExtent.width == 0 || windowExtent.height == 0)
 	{
 		return;
 	}
diff --git a/src/WSI/XcbSurfaceKHR.cpp b/src/WSI/XcbSurfaceKHR.cpp
index db68358..3379e86 100644
--- a/src/WSI/XcbSurfaceKHR.cpp
+++ b/src/WSI/XcbSurfaceKHR.cpp
@@ -67,12 +67,12 @@
 	{
 		static auto exports = []
 		{
-			if (getProcAddress(RTLD_DEFAULT, "xcb_create_gc"))
+			if(getProcAddress(RTLD_DEFAULT, "xcb_create_gc"))
 			{
 				return std::unique_ptr<LibXcbExports>(new LibXcbExports(RTLD_DEFAULT));
 			}
 
-			if (auto lib = loadLibrary("libxcb.so.1"))
+			if(auto lib = loadLibrary("libxcb.so.1"))
 			{
 				return std::unique_ptr<LibXcbExports>(new LibXcbExports(lib));
 			}
@@ -149,7 +149,7 @@
 		free(geom);
 		VkExtent3D extent = image->getImage()->getMipLevelExtent(VK_IMAGE_ASPECT_COLOR_BIT, 0);
 
-		if (windowExtent.width != extent.width || windowExtent.height != extent.height)
+		if(windowExtent.width != extent.width || windowExtent.height != extent.height)
 		{
 			return VK_ERROR_OUT_OF_DATE_KHR;
 		}
diff --git a/src/WSI/XlibSurfaceKHR.cpp b/src/WSI/XlibSurfaceKHR.cpp
index b3a0275..ffa60e2 100644
--- a/src/WSI/XlibSurfaceKHR.cpp
+++ b/src/WSI/XlibSurfaceKHR.cpp
@@ -96,7 +96,7 @@
 			VkExtent2D windowExtent = {static_cast<uint32_t>(attr.width), static_cast<uint32_t>(attr.height)};
 			VkExtent3D extent = image->getImage()->getMipLevelExtent(VK_IMAGE_ASPECT_COLOR_BIT, 0);
 
-			if (windowExtent.width != extent.width || windowExtent.height != extent.height)
+			if(windowExtent.width != extent.width || windowExtent.height != extent.height)
 			{
 				return VK_ERROR_OUT_OF_DATE_KHR;
 			}