Format switch statements consistently

Each non-fallthrough case should end with a break (or return). If a
scope is needed because local variables are defined, it should end
before this break.

This avoids bugs such as:

switch(i)
{
case 0:
    if(c)
    {
        // lots
        // of
        // code

        // Easy to misread as always breaking instead of conditionally
        // falling through due to not spotting the if(c).
        break;
    }

case 1:
    // ...
}

The new scope should also be indented. It makes it easier to spot where
each case ends and where the switch ends. This is achieved by setting
IndentCaseBlocks to true.

Lastly, the case labels themselves should not be indented. Like goto
labels they mark where in the code to jump to, and the code itself is
already indented within the switch block.

Bug: b/144825072
Change-Id: I9a130d1d234795f53b5872e411f1315f56a0e908
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/39551
Commit-Queue: Nicolas Capens <nicolascapens@google.com>
Tested-by: Nicolas Capens <nicolascapens@google.com>
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
diff --git a/.clang-format b/.clang-format
index d5c422b..5a7a390 100644
--- a/.clang-format
+++ b/.clang-format
@@ -71,8 +71,8 @@
   - Regex:           '.*'
     Priority:        5
 IncludeIsMainRegex: '([-_](test|unittest))?$'
-IndentCaseBlocks: false
-IndentCaseLabels: true
+IndentCaseBlocks: true
+IndentCaseLabels: false
 IndentPPDirectives: AfterHash
 IndentWidth:     4
 IndentWrappedFunctionNames: false
diff --git a/src/Device/BC_Decoder.cpp b/src/Device/BC_Decoder.cpp
index 3a0c220..b2a7241 100644
--- a/src/Device/BC_Decoder.cpp
+++ b/src/Device/BC_Decoder.cpp
@@ -950,35 +950,35 @@
 		{
 			switch(desc.type)
 			{
-				case Mode:
-					modeDesc = desc.modeDesc;
-					ASSERT(modeDesc.number == mode);
+			case Mode:
+				modeDesc = desc.modeDesc;
+				ASSERT(modeDesc.number == mode);
 
-					e[0].size[0] = e[0].size[1] = e[0].size[2] = modeDesc.endpointBits;
-					for(int i = 0; i < RGBfChannels; i++)
+				e[0].size[0] = e[0].size[1] = e[0].size[2] = modeDesc.endpointBits;
+				for(int i = 0; i < RGBfChannels; i++)
+				{
+					if(modeDesc.hasDelta)
 					{
-						if(modeDesc.hasDelta)
-						{
-							e[1].size[i] = e[2].size[i] = e[3].size[i] = modeDesc.deltaBits.channel[i];
-						}
-						else
-						{
-							e[1].size[i] = e[2].size[i] = e[3].size[i] = modeDesc.endpointBits;
-						}
+						e[1].size[i] = e[2].size[i] = e[3].size[i] = modeDesc.deltaBits.channel[i];
 					}
-					break;
-				case Partition:
-					partition |= data.consumeBits(desc.MSB, desc.LSB);
-					break;
-				case EP0:
-				case EP1:
-				case EP2:
-				case EP3:
-					e[desc.type].channel[desc.channel] |= data.consumeBits(desc.MSB, desc.LSB);
-					break;
-				default:
-					ASSERT_MSG(false, "Unexpected enum value: %d", (int)desc.type);
-					return;
+					else
+					{
+						e[1].size[i] = e[2].size[i] = e[3].size[i] = modeDesc.endpointBits;
+					}
+				}
+				break;
+			case Partition:
+				partition |= data.consumeBits(desc.MSB, desc.LSB);
+				break;
+			case EP0:
+			case EP1:
+			case EP2:
+			case EP3:
+				e[desc.type].channel[desc.channel] |= data.consumeBits(desc.MSB, desc.LSB);
+				break;
+			default:
+				ASSERT_MSG(false, "Unexpected enum value: %d", (int)desc.type);
+				return;
 			}
 		}
 
@@ -1532,17 +1532,17 @@
 
 				switch(Get(mode.Rotation()))
 				{
-					default:
-						break;
-					case 1:
-						std::swap(output.a, output.rgb.r);
-						break;
-					case 2:
-						std::swap(output.a, output.rgb.g);
-						break;
-					case 3:
-						std::swap(output.a, output.rgb.b);
-						break;
+				default:
+					break;
+				case 1:
+					std::swap(output.a, output.rgb.r);
+					break;
+				case 2:
+					std::swap(output.a, output.rgb.g);
+					break;
+				case 3:
+					std::swap(output.a, output.rgb.b);
+					break;
 				}
 
 				auto out = reinterpret_cast<Color *>(dst + sizeof(Color) * x + dstPitch * y);
@@ -1555,12 +1555,12 @@
 	{
 		switch(mode.NS)
 		{
-			default:
-				return 0;
-			case 2:
-				return PartitionTable2[partitionIdx][texelIndex];
-			case 3:
-				return PartitionTable3[partitionIdx][texelIndex];
+		default:
+			return 0;
+		case 2:
+			return PartitionTable2[partitionIdx][texelIndex];
+		case 3:
+			return PartitionTable3[partitionIdx][texelIndex];
 		}
 	}
 
@@ -1574,12 +1574,12 @@
 		// of partition here.
 		switch(subsetIdx)
 		{
-			default:
-				return 0;
-			case 1:
-				return mode.NS == 2 ? AnchorTable2[partitionIdx] : AnchorTable3a[partitionIdx];
-			case 2:
-				return AnchorTable3b[partitionIdx];
+		default:
+			return 0;
+		case 1:
+			return mode.NS == 2 ? AnchorTable2[partitionIdx] : AnchorTable3a[partitionIdx];
+		case 2:
+			return AnchorTable3b[partitionIdx];
 		}
 	}
 
@@ -1643,7 +1643,7 @@
 
 	switch(n)
 	{
-		case 1:  // BC1
+	case 1:  // BC1
 		{
 			const BC_color *color = reinterpret_cast<const BC_color *>(src);
 			for(int y = 0; y < h; y += BlockHeight, dst += dy)
@@ -1656,7 +1656,7 @@
 			}
 		}
 		break;
-		case 2:  // BC2
+	case 2:  // BC2
 		{
 			const BC_alpha *alpha = reinterpret_cast<const BC_alpha *>(src);
 			const BC_color *color = reinterpret_cast<const BC_color *>(src + 8);
@@ -1671,7 +1671,7 @@
 			}
 		}
 		break;
-		case 3:  // BC3
+	case 3:  // BC3
 		{
 			const BC_channel *alpha = reinterpret_cast<const BC_channel *>(src);
 			const BC_color *color = reinterpret_cast<const BC_color *>(src + 8);
@@ -1686,7 +1686,7 @@
 			}
 		}
 		break;
-		case 4:  // BC4
+	case 4:  // BC4
 		{
 			const BC_channel *red = reinterpret_cast<const BC_channel *>(src);
 			for(int y = 0; y < h; y += BlockHeight, dst += dy)
@@ -1699,7 +1699,7 @@
 			}
 		}
 		break;
-		case 5:  // BC5
+	case 5:  // BC5
 		{
 			const BC_channel *red = reinterpret_cast<const BC_channel *>(src);
 			const BC_channel *green = reinterpret_cast<const BC_channel *>(src + 8);
@@ -1714,7 +1714,7 @@
 			}
 		}
 		break;
-		case 6:  // BC6H
+	case 6:  // BC6H
 		{
 			const BC6H::Block *block = reinterpret_cast<const BC6H::Block *>(src);
 			for(int y = 0; y < h; y += BlockHeight, dst += dy)
@@ -1727,7 +1727,7 @@
 			}
 		}
 		break;
-		case 7:  // BC7
+	case 7:  // BC7
 		{
 			const BC7::Block *block = reinterpret_cast<const BC7::Block *>(src);
 			for(int y = 0; y < h; y += BlockHeight, dst += dy)
@@ -1740,8 +1740,8 @@
 			}
 		}
 		break;
-		default:
-			return false;
+	default:
+		return false;
 	}
 
 	return true;
diff --git a/src/Device/Blitter.cpp b/src/Device/Blitter.cpp
index abd5a91..1a70606 100644
--- a/src/Device/Blitter.cpp
+++ b/src/Device/Blitter.cpp
@@ -196,46 +196,46 @@
 	VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(subresourceRange.aspectMask);
 	switch(viewFormat)
 	{
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			packed = ((uint16_t)(31 * c.b + 0.5f) << 0) |
-			         ((uint16_t)(63 * c.g + 0.5f) << 5) |
-			         ((uint16_t)(31 * c.r + 0.5f) << 11);
-			break;
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-			packed = ((uint16_t)(31 * c.r + 0.5f) << 0) |
-			         ((uint16_t)(63 * c.g + 0.5f) << 5) |
-			         ((uint16_t)(31 * c.b + 0.5f) << 11);
-			break;
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-			packed = ((uint32_t)(255 * c.a + 0.5f) << 24) |
-			         ((uint32_t)(255 * c.b + 0.5f) << 16) |
-			         ((uint32_t)(255 * c.g + 0.5f) << 8) |
-			         ((uint32_t)(255 * c.r + 0.5f) << 0);
-			break;
-		case VK_FORMAT_B8G8R8A8_UNORM:
-			packed = ((uint32_t)(255 * c.a + 0.5f) << 24) |
-			         ((uint32_t)(255 * c.r + 0.5f) << 16) |
-			         ((uint32_t)(255 * c.g + 0.5f) << 8) |
-			         ((uint32_t)(255 * c.b + 0.5f) << 0);
-			break;
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			packed = R11G11B10F(c.rgb);
-			break;
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-			packed = RGB9E5(c.rgb);
-			break;
-		case VK_FORMAT_D32_SFLOAT:
-			ASSERT(clearFormat == VK_FORMAT_D32_SFLOAT);
-			packed = c.d_as_u32;  // float reinterpreted as uint32
-			break;
-		case VK_FORMAT_S8_UINT:
-			ASSERT(clearFormat == VK_FORMAT_S8_UINT);
-			packed = static_cast<uint8_t>(c.s);
-			break;
-		default:
-			return false;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		packed = ((uint16_t)(31 * c.b + 0.5f) << 0) |
+		         ((uint16_t)(63 * c.g + 0.5f) << 5) |
+		         ((uint16_t)(31 * c.r + 0.5f) << 11);
+		break;
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		packed = ((uint16_t)(31 * c.r + 0.5f) << 0) |
+		         ((uint16_t)(63 * c.g + 0.5f) << 5) |
+		         ((uint16_t)(31 * c.b + 0.5f) << 11);
+		break;
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+		packed = ((uint32_t)(255 * c.a + 0.5f) << 24) |
+		         ((uint32_t)(255 * c.b + 0.5f) << 16) |
+		         ((uint32_t)(255 * c.g + 0.5f) << 8) |
+		         ((uint32_t)(255 * c.r + 0.5f) << 0);
+		break;
+	case VK_FORMAT_B8G8R8A8_UNORM:
+		packed = ((uint32_t)(255 * c.a + 0.5f) << 24) |
+		         ((uint32_t)(255 * c.r + 0.5f) << 16) |
+		         ((uint32_t)(255 * c.g + 0.5f) << 8) |
+		         ((uint32_t)(255 * c.b + 0.5f) << 0);
+		break;
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		packed = R11G11B10F(c.rgb);
+		break;
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		packed = RGB9E5(c.rgb);
+		break;
+	case VK_FORMAT_D32_SFLOAT:
+		ASSERT(clearFormat == VK_FORMAT_D32_SFLOAT);
+		packed = c.d_as_u32;  // float reinterpreted as uint32
+		break;
+	case VK_FORMAT_S8_UINT:
+		ASSERT(clearFormat == VK_FORMAT_S8_UINT);
+		packed = static_cast<uint8_t>(c.s);
+		break;
+	default:
+		return false;
 	}
 
 	VkImageSubresource subres = {
@@ -281,32 +281,32 @@
 
 					switch(viewFormat.bytes())
 					{
-						case 4:
-							for(uint32_t i = 0; i < area.extent.height; i++)
-							{
-								ASSERT(d < dest->end());
-								sw::clear((uint32_t *)d, packed, area.extent.width);
-								d += rowPitchBytes;
-							}
-							break;
-						case 2:
-							for(uint32_t i = 0; i < area.extent.height; i++)
-							{
-								ASSERT(d < dest->end());
-								sw::clear((uint16_t *)d, static_cast<uint16_t>(packed), area.extent.width);
-								d += rowPitchBytes;
-							}
-							break;
-						case 1:
-							for(uint32_t i = 0; i < area.extent.height; i++)
-							{
-								ASSERT(d < dest->end());
-								memset(d, packed, area.extent.width);
-								d += rowPitchBytes;
-							}
-							break;
-						default:
-							assert(false);
+					case 4:
+						for(uint32_t i = 0; i < area.extent.height; i++)
+						{
+							ASSERT(d < dest->end());
+							sw::clear((uint32_t *)d, packed, area.extent.width);
+							d += rowPitchBytes;
+						}
+						break;
+					case 2:
+						for(uint32_t i = 0; i < area.extent.height; i++)
+						{
+							ASSERT(d < dest->end());
+							sw::clear((uint16_t *)d, static_cast<uint16_t>(packed), area.extent.width);
+							d += rowPitchBytes;
+						}
+						break;
+					case 1:
+						for(uint32_t i = 0; i < area.extent.height; i++)
+						{
+							ASSERT(d < dest->end());
+							memset(d, packed, area.extent.width);
+							d += rowPitchBytes;
+						}
+						break;
+					default:
+						assert(false);
 					}
 
 					slice += slicePitchBytes;
@@ -325,186 +325,186 @@
 
 	switch(state.sourceFormat)
 	{
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-			c.w = Float(Int(*Pointer<Byte>(element)) & Int(0xF));
-			c.x = Float((Int(*Pointer<Byte>(element)) >> 4) & Int(0xF));
-			c.y = Float(Int(*Pointer<Byte>(element + 1)) & Int(0xF));
-			c.z = Float((Int(*Pointer<Byte>(element + 1)) >> 4) & Int(0xF));
-			break;
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SNORM:
-			c.x = Float(Int(*Pointer<SByte>(element)));
-			c.w = float(0x7F);
-			break;
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SRGB:
-			c.x = Float(Int(*Pointer<Byte>(element)));
-			c.w = float(0xFF);
-			break;
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SNORM:
-			c.x = Float(Int(*Pointer<Short>(element)));
-			c.w = float(0x7FFF);
-			break;
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_UINT:
-			c.x = Float(Int(*Pointer<UShort>(element)));
-			c.w = float(0xFFFF);
-			break;
-		case VK_FORMAT_R32_SINT:
-			c.x = Float(*Pointer<Int>(element));
-			c.w = float(0x7FFFFFFF);
-			break;
-		case VK_FORMAT_R32_UINT:
-			c.x = Float(*Pointer<UInt>(element));
-			c.w = float(0xFFFFFFFF);
-			break;
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-			c = Float4(*Pointer<Byte4>(element)).zyxw;
-			break;
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-			c = Float4(*Pointer<SByte4>(element));
-			break;
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-			c = Float4(*Pointer<Byte4>(element));
-			break;
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-			c = Float4(*Pointer<Short4>(element));
-			break;
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_UINT:
-			c = Float4(*Pointer<UShort4>(element));
-			break;
-		case VK_FORMAT_R32G32B32A32_SINT:
-			c = Float4(*Pointer<Int4>(element));
-			break;
-		case VK_FORMAT_R32G32B32A32_UINT:
-			c = Float4(*Pointer<UInt4>(element));
-			break;
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SNORM:
-			c.x = Float(Int(*Pointer<SByte>(element + 0)));
-			c.y = Float(Int(*Pointer<SByte>(element + 1)));
-			c.w = float(0x7F);
-			break;
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SRGB:
-			c.x = Float(Int(*Pointer<Byte>(element + 0)));
-			c.y = Float(Int(*Pointer<Byte>(element + 1)));
-			c.w = float(0xFF);
-			break;
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SNORM:
-			c.x = Float(Int(*Pointer<Short>(element + 0)));
-			c.y = Float(Int(*Pointer<Short>(element + 2)));
-			c.w = float(0x7FFF);
-			break;
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_UINT:
-			c.x = Float(Int(*Pointer<UShort>(element + 0)));
-			c.y = Float(Int(*Pointer<UShort>(element + 2)));
-			c.w = float(0xFFFF);
-			break;
-		case VK_FORMAT_R32G32_SINT:
-			c.x = Float(*Pointer<Int>(element + 0));
-			c.y = Float(*Pointer<Int>(element + 4));
-			c.w = float(0x7FFFFFFF);
-			break;
-		case VK_FORMAT_R32G32_UINT:
-			c.x = Float(*Pointer<UInt>(element + 0));
-			c.y = Float(*Pointer<UInt>(element + 4));
-			c.w = float(0xFFFFFFFF);
-			break;
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-			c = *Pointer<Float4>(element);
-			break;
-		case VK_FORMAT_R32G32_SFLOAT:
-			c.x = *Pointer<Float>(element + 0);
-			c.y = *Pointer<Float>(element + 4);
-			break;
-		case VK_FORMAT_R32_SFLOAT:
-			c.x = *Pointer<Float>(element);
-			break;
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-			c.w = Float(*Pointer<Half>(element + 6));
-		case VK_FORMAT_R16G16B16_SFLOAT:
-			c.z = Float(*Pointer<Half>(element + 4));
-		case VK_FORMAT_R16G16_SFLOAT:
-			c.y = Float(*Pointer<Half>(element + 2));
-		case VK_FORMAT_R16_SFLOAT:
-			c.x = Float(*Pointer<Half>(element));
-			break;
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			c = r11g11b10Unpack(*Pointer<UInt>(element));
-			break;
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-			// This type contains a common 5 bit exponent (E) and a 9 bit the mantissa for R, G and B.
-			c.x = Float(*Pointer<UInt>(element) & UInt(0x000001FF));          // R's mantissa (bits 0-8)
-			c.y = Float((*Pointer<UInt>(element) & UInt(0x0003FE00)) >> 9);   // G's mantissa (bits 9-17)
-			c.z = Float((*Pointer<UInt>(element) & UInt(0x07FC0000)) >> 18);  // B's mantissa (bits 18-26)
-			c *= Float4(
-			    // 2^E, using the exponent (bits 27-31) and treating it as an unsigned integer value
-			    Float(UInt(1) << ((*Pointer<UInt>(element) & UInt(0xF8000000)) >> 27)) *
-			    // Since the 9 bit mantissa values currently stored in RGB were converted straight
-			    // from int to float (in the [0, 1<<9] range instead of the [0, 1] range), they
-			    // are (1 << 9) times too high.
-			    // Also, the exponent has 5 bits and we compute the exponent bias of floating point
-			    // formats using "2^(k-1) - 1", so, in this case, the exponent bias is 2^(5-1)-1 = 15
-			    // Exponent bias (15) + number of mantissa bits per component (9) = 24
-			    Float(1.0f / (1 << 24)));
-			c.w = 1.0f;
-			break;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			c.x = Float(Int((*Pointer<UShort>(element) & UShort(0xF800)) >> UShort(11)));
-			c.y = Float(Int((*Pointer<UShort>(element) & UShort(0x07E0)) >> UShort(5)));
-			c.z = Float(Int(*Pointer<UShort>(element) & UShort(0x001F)));
-			break;
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			c.w = Float(Int((*Pointer<UShort>(element) & UShort(0x8000)) >> UShort(15)));
-			c.x = Float(Int((*Pointer<UShort>(element) & UShort(0x7C00)) >> UShort(10)));
-			c.y = Float(Int((*Pointer<UShort>(element) & UShort(0x03E0)) >> UShort(5)));
-			c.z = Float(Int(*Pointer<UShort>(element) & UShort(0x001F)));
-			break;
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-			c.x = Float(Int((*Pointer<UInt>(element) & UInt(0x000003FF))));
-			c.y = Float(Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10));
-			c.z = Float(Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20));
-			c.w = Float(Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30));
-			break;
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			c.z = Float(Int((*Pointer<UInt>(element) & UInt(0x000003FF))));
-			c.y = Float(Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10));
-			c.x = Float(Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20));
-			c.w = Float(Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30));
-			break;
-		case VK_FORMAT_D16_UNORM:
-			c.x = Float(Int((*Pointer<UShort>(element))));
-			break;
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-			c.x = Float(Int((*Pointer<UInt>(element) & UInt(0xFFFFFF00)) >> 8));
-			break;
-		case VK_FORMAT_D32_SFLOAT:
-			c.x = *Pointer<Float>(element);
-			break;
-		case VK_FORMAT_S8_UINT:
-			c.x = Float(Int(*Pointer<Byte>(element)));
-			break;
-		default:
-			UNSUPPORTED("Blitter source format %d", (int)state.sourceFormat);
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		c.w = Float(Int(*Pointer<Byte>(element)) & Int(0xF));
+		c.x = Float((Int(*Pointer<Byte>(element)) >> 4) & Int(0xF));
+		c.y = Float(Int(*Pointer<Byte>(element + 1)) & Int(0xF));
+		c.z = Float((Int(*Pointer<Byte>(element + 1)) >> 4) & Int(0xF));
+		break;
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SNORM:
+		c.x = Float(Int(*Pointer<SByte>(element)));
+		c.w = float(0x7F);
+		break;
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SRGB:
+		c.x = Float(Int(*Pointer<Byte>(element)));
+		c.w = float(0xFF);
+		break;
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SNORM:
+		c.x = Float(Int(*Pointer<Short>(element)));
+		c.w = float(0x7FFF);
+		break;
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_UINT:
+		c.x = Float(Int(*Pointer<UShort>(element)));
+		c.w = float(0xFFFF);
+		break;
+	case VK_FORMAT_R32_SINT:
+		c.x = Float(*Pointer<Int>(element));
+		c.w = float(0x7FFFFFFF);
+		break;
+	case VK_FORMAT_R32_UINT:
+		c.x = Float(*Pointer<UInt>(element));
+		c.w = float(0xFFFFFFFF);
+		break;
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+		c = Float4(*Pointer<Byte4>(element)).zyxw;
+		break;
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+		c = Float4(*Pointer<SByte4>(element));
+		break;
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+		c = Float4(*Pointer<Byte4>(element));
+		break;
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+		c = Float4(*Pointer<Short4>(element));
+		break;
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_UINT:
+		c = Float4(*Pointer<UShort4>(element));
+		break;
+	case VK_FORMAT_R32G32B32A32_SINT:
+		c = Float4(*Pointer<Int4>(element));
+		break;
+	case VK_FORMAT_R32G32B32A32_UINT:
+		c = Float4(*Pointer<UInt4>(element));
+		break;
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SNORM:
+		c.x = Float(Int(*Pointer<SByte>(element + 0)));
+		c.y = Float(Int(*Pointer<SByte>(element + 1)));
+		c.w = float(0x7F);
+		break;
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SRGB:
+		c.x = Float(Int(*Pointer<Byte>(element + 0)));
+		c.y = Float(Int(*Pointer<Byte>(element + 1)));
+		c.w = float(0xFF);
+		break;
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SNORM:
+		c.x = Float(Int(*Pointer<Short>(element + 0)));
+		c.y = Float(Int(*Pointer<Short>(element + 2)));
+		c.w = float(0x7FFF);
+		break;
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_UINT:
+		c.x = Float(Int(*Pointer<UShort>(element + 0)));
+		c.y = Float(Int(*Pointer<UShort>(element + 2)));
+		c.w = float(0xFFFF);
+		break;
+	case VK_FORMAT_R32G32_SINT:
+		c.x = Float(*Pointer<Int>(element + 0));
+		c.y = Float(*Pointer<Int>(element + 4));
+		c.w = float(0x7FFFFFFF);
+		break;
+	case VK_FORMAT_R32G32_UINT:
+		c.x = Float(*Pointer<UInt>(element + 0));
+		c.y = Float(*Pointer<UInt>(element + 4));
+		c.w = float(0xFFFFFFFF);
+		break;
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+		c = *Pointer<Float4>(element);
+		break;
+	case VK_FORMAT_R32G32_SFLOAT:
+		c.x = *Pointer<Float>(element + 0);
+		c.y = *Pointer<Float>(element + 4);
+		break;
+	case VK_FORMAT_R32_SFLOAT:
+		c.x = *Pointer<Float>(element);
+		break;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		c.w = Float(*Pointer<Half>(element + 6));
+	case VK_FORMAT_R16G16B16_SFLOAT:
+		c.z = Float(*Pointer<Half>(element + 4));
+	case VK_FORMAT_R16G16_SFLOAT:
+		c.y = Float(*Pointer<Half>(element + 2));
+	case VK_FORMAT_R16_SFLOAT:
+		c.x = Float(*Pointer<Half>(element));
+		break;
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		c = r11g11b10Unpack(*Pointer<UInt>(element));
+		break;
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+		// This type contains a common 5 bit exponent (E) and a 9 bit the mantissa for R, G and B.
+		c.x = Float(*Pointer<UInt>(element) & UInt(0x000001FF));          // R's mantissa (bits 0-8)
+		c.y = Float((*Pointer<UInt>(element) & UInt(0x0003FE00)) >> 9);   // G's mantissa (bits 9-17)
+		c.z = Float((*Pointer<UInt>(element) & UInt(0x07FC0000)) >> 18);  // B's mantissa (bits 18-26)
+		c *= Float4(
+		    // 2^E, using the exponent (bits 27-31) and treating it as an unsigned integer value
+		    Float(UInt(1) << ((*Pointer<UInt>(element) & UInt(0xF8000000)) >> 27)) *
+		    // Since the 9 bit mantissa values currently stored in RGB were converted straight
+		    // from int to float (in the [0, 1<<9] range instead of the [0, 1] range), they
+		    // are (1 << 9) times too high.
+		    // Also, the exponent has 5 bits and we compute the exponent bias of floating point
+		    // formats using "2^(k-1) - 1", so, in this case, the exponent bias is 2^(5-1)-1 = 15
+		    // Exponent bias (15) + number of mantissa bits per component (9) = 24
+		    Float(1.0f / (1 << 24)));
+		c.w = 1.0f;
+		break;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		c.x = Float(Int((*Pointer<UShort>(element) & UShort(0xF800)) >> UShort(11)));
+		c.y = Float(Int((*Pointer<UShort>(element) & UShort(0x07E0)) >> UShort(5)));
+		c.z = Float(Int(*Pointer<UShort>(element) & UShort(0x001F)));
+		break;
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		c.w = Float(Int((*Pointer<UShort>(element) & UShort(0x8000)) >> UShort(15)));
+		c.x = Float(Int((*Pointer<UShort>(element) & UShort(0x7C00)) >> UShort(10)));
+		c.y = Float(Int((*Pointer<UShort>(element) & UShort(0x03E0)) >> UShort(5)));
+		c.z = Float(Int(*Pointer<UShort>(element) & UShort(0x001F)));
+		break;
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		c.x = Float(Int((*Pointer<UInt>(element) & UInt(0x000003FF))));
+		c.y = Float(Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10));
+		c.z = Float(Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20));
+		c.w = Float(Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30));
+		break;
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		c.z = Float(Int((*Pointer<UInt>(element) & UInt(0x000003FF))));
+		c.y = Float(Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10));
+		c.x = Float(Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20));
+		c.w = Float(Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30));
+		break;
+	case VK_FORMAT_D16_UNORM:
+		c.x = Float(Int((*Pointer<UShort>(element))));
+		break;
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+		c.x = Float(Int((*Pointer<UInt>(element) & UInt(0xFFFFFF00)) >> 8));
+		break;
+	case VK_FORMAT_D32_SFLOAT:
+		c.x = *Pointer<Float>(element);
+		break;
+	case VK_FORMAT_S8_UINT:
+		c.x = Float(Int(*Pointer<Byte>(element)));
+		break;
+	default:
+		UNSUPPORTED("Blitter source format %d", (int)state.sourceFormat);
 	}
 
 	return c;
@@ -520,150 +520,150 @@
 
 	switch(state.destFormat)
 	{
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-			if(writeR | writeG)
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+		if(writeR | writeG)
+		{
+			if(!writeR)
 			{
-				if(!writeR)
-				{
-					*Pointer<Byte>(element) = (Byte(RoundInt(Float(c.y))) & Byte(0xF)) |
-					                          (*Pointer<Byte>(element) & Byte(0xF0));
-				}
-				else if(!writeG)
-				{
-					*Pointer<Byte>(element) = (*Pointer<Byte>(element) & Byte(0xF)) |
-					                          (Byte(RoundInt(Float(c.x))) << Byte(4));
-				}
-				else
-				{
-					*Pointer<Byte>(element) = (Byte(RoundInt(Float(c.y))) & Byte(0xF)) |
-					                          (Byte(RoundInt(Float(c.x))) << Byte(4));
-				}
+				*Pointer<Byte>(element) = (Byte(RoundInt(Float(c.y))) & Byte(0xF)) |
+				                          (*Pointer<Byte>(element) & Byte(0xF0));
 			}
-			break;
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-			if(writeR || writeG || writeB || writeA)
+			else if(!writeG)
 			{
-				*Pointer<UShort>(element) = (writeR ? ((UShort(RoundInt(Float(c.x))) & UShort(0xF)) << UShort(12)) : (*Pointer<UShort>(element) & UShort(0x000F))) |
-				                            (writeG ? ((UShort(RoundInt(Float(c.y))) & UShort(0xF)) << UShort(8)) : (*Pointer<UShort>(element) & UShort(0x00F0))) |
-				                            (writeB ? ((UShort(RoundInt(Float(c.z))) & UShort(0xF)) << UShort(4)) : (*Pointer<UShort>(element) & UShort(0x0F00))) |
-				                            (writeA ? (UShort(RoundInt(Float(c.w))) & UShort(0xF)) : (*Pointer<UShort>(element) & UShort(0xF000)));
-			}
-			break;
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-			if(writeRGBA)
-			{
-				*Pointer<UShort>(element) = UShort(RoundInt(Float(c.w)) & Int(0xF)) |
-				                            UShort((RoundInt(Float(c.x)) & Int(0xF)) << 4) |
-				                            UShort((RoundInt(Float(c.y)) & Int(0xF)) << 8) |
-				                            UShort((RoundInt(Float(c.z)) & Int(0xF)) << 12);
+				*Pointer<Byte>(element) = (*Pointer<Byte>(element) & Byte(0xF)) |
+				                          (Byte(RoundInt(Float(c.x))) << Byte(4));
 			}
 			else
 			{
-				unsigned short mask = (writeA ? 0x000F : 0x0000) |
-				                      (writeR ? 0x00F0 : 0x0000) |
-				                      (writeG ? 0x0F00 : 0x0000) |
-				                      (writeB ? 0xF000 : 0x0000);
-				unsigned short unmask = ~mask;
-				*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
-				                            ((UShort(RoundInt(Float(c.w)) & Int(0xF)) |
-				                              UShort((RoundInt(Float(c.x)) & Int(0xF)) << 4) |
-				                              UShort((RoundInt(Float(c.y)) & Int(0xF)) << 8) |
-				                              UShort((RoundInt(Float(c.z)) & Int(0xF)) << 12)) &
-				                             UShort(mask));
+				*Pointer<Byte>(element) = (Byte(RoundInt(Float(c.y))) & Byte(0xF)) |
+				                          (Byte(RoundInt(Float(c.x))) << Byte(4));
 			}
-			break;
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-			if(writeRGBA)
-			{
-				Short4 c0 = RoundShort4(c.zyxw);
-				*Pointer<Byte4>(element) = Byte4(PackUnsigned(c0, c0));
-			}
-			else
-			{
-				if(writeB) { *Pointer<Byte>(element + 0) = Byte(RoundInt(Float(c.z))); }
-				if(writeG) { *Pointer<Byte>(element + 1) = Byte(RoundInt(Float(c.y))); }
-				if(writeR) { *Pointer<Byte>(element + 2) = Byte(RoundInt(Float(c.x))); }
-				if(writeA) { *Pointer<Byte>(element + 3) = Byte(RoundInt(Float(c.w))); }
-			}
-			break;
-		case VK_FORMAT_B8G8R8_SNORM:
-			if(writeB) { *Pointer<SByte>(element + 0) = SByte(RoundInt(Float(c.z))); }
-			if(writeG) { *Pointer<SByte>(element + 1) = SByte(RoundInt(Float(c.y))); }
-			if(writeR) { *Pointer<SByte>(element + 2) = SByte(RoundInt(Float(c.x))); }
-			break;
-		case VK_FORMAT_B8G8R8_UNORM:
-		case VK_FORMAT_B8G8R8_SRGB:
+		}
+		break;
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+		if(writeR || writeG || writeB || writeA)
+		{
+			*Pointer<UShort>(element) = (writeR ? ((UShort(RoundInt(Float(c.x))) & UShort(0xF)) << UShort(12)) : (*Pointer<UShort>(element) & UShort(0x000F))) |
+			                            (writeG ? ((UShort(RoundInt(Float(c.y))) & UShort(0xF)) << UShort(8)) : (*Pointer<UShort>(element) & UShort(0x00F0))) |
+			                            (writeB ? ((UShort(RoundInt(Float(c.z))) & UShort(0xF)) << UShort(4)) : (*Pointer<UShort>(element) & UShort(0x0F00))) |
+			                            (writeA ? (UShort(RoundInt(Float(c.w))) & UShort(0xF)) : (*Pointer<UShort>(element) & UShort(0xF000)));
+		}
+		break;
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		if(writeRGBA)
+		{
+			*Pointer<UShort>(element) = UShort(RoundInt(Float(c.w)) & Int(0xF)) |
+			                            UShort((RoundInt(Float(c.x)) & Int(0xF)) << 4) |
+			                            UShort((RoundInt(Float(c.y)) & Int(0xF)) << 8) |
+			                            UShort((RoundInt(Float(c.z)) & Int(0xF)) << 12);
+		}
+		else
+		{
+			unsigned short mask = (writeA ? 0x000F : 0x0000) |
+			                      (writeR ? 0x00F0 : 0x0000) |
+			                      (writeG ? 0x0F00 : 0x0000) |
+			                      (writeB ? 0xF000 : 0x0000);
+			unsigned short unmask = ~mask;
+			*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
+			                            ((UShort(RoundInt(Float(c.w)) & Int(0xF)) |
+			                              UShort((RoundInt(Float(c.x)) & Int(0xF)) << 4) |
+			                              UShort((RoundInt(Float(c.y)) & Int(0xF)) << 8) |
+			                              UShort((RoundInt(Float(c.z)) & Int(0xF)) << 12)) &
+			                             UShort(mask));
+		}
+		break;
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+		if(writeRGBA)
+		{
+			Short4 c0 = RoundShort4(c.zyxw);
+			*Pointer<Byte4>(element) = Byte4(PackUnsigned(c0, c0));
+		}
+		else
+		{
 			if(writeB) { *Pointer<Byte>(element + 0) = Byte(RoundInt(Float(c.z))); }
 			if(writeG) { *Pointer<Byte>(element + 1) = Byte(RoundInt(Float(c.y))); }
 			if(writeR) { *Pointer<Byte>(element + 2) = Byte(RoundInt(Float(c.x))); }
-			break;
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-			if(writeRGBA)
-			{
-				Short4 c0 = RoundShort4(c);
-				*Pointer<Byte4>(element) = Byte4(PackUnsigned(c0, c0));
-			}
-			else
-			{
-				if(writeR) { *Pointer<Byte>(element + 0) = Byte(RoundInt(Float(c.x))); }
-				if(writeG) { *Pointer<Byte>(element + 1) = Byte(RoundInt(Float(c.y))); }
-				if(writeB) { *Pointer<Byte>(element + 2) = Byte(RoundInt(Float(c.z))); }
-				if(writeA) { *Pointer<Byte>(element + 3) = Byte(RoundInt(Float(c.w))); }
-			}
-			break;
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-			if(writeRGBA)
-			{
-				*Pointer<Float4>(element) = c;
-			}
-			else
-			{
-				if(writeR) { *Pointer<Float>(element) = c.x; }
-				if(writeG) { *Pointer<Float>(element + 4) = c.y; }
-				if(writeB) { *Pointer<Float>(element + 8) = c.z; }
-				if(writeA) { *Pointer<Float>(element + 12) = c.w; }
-			}
-			break;
-		case VK_FORMAT_R32G32B32_SFLOAT:
+			if(writeA) { *Pointer<Byte>(element + 3) = Byte(RoundInt(Float(c.w))); }
+		}
+		break;
+	case VK_FORMAT_B8G8R8_SNORM:
+		if(writeB) { *Pointer<SByte>(element + 0) = SByte(RoundInt(Float(c.z))); }
+		if(writeG) { *Pointer<SByte>(element + 1) = SByte(RoundInt(Float(c.y))); }
+		if(writeR) { *Pointer<SByte>(element + 2) = SByte(RoundInt(Float(c.x))); }
+		break;
+	case VK_FORMAT_B8G8R8_UNORM:
+	case VK_FORMAT_B8G8R8_SRGB:
+		if(writeB) { *Pointer<Byte>(element + 0) = Byte(RoundInt(Float(c.z))); }
+		if(writeG) { *Pointer<Byte>(element + 1) = Byte(RoundInt(Float(c.y))); }
+		if(writeR) { *Pointer<Byte>(element + 2) = Byte(RoundInt(Float(c.x))); }
+		break;
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		if(writeRGBA)
+		{
+			Short4 c0 = RoundShort4(c);
+			*Pointer<Byte4>(element) = Byte4(PackUnsigned(c0, c0));
+		}
+		else
+		{
+			if(writeR) { *Pointer<Byte>(element + 0) = Byte(RoundInt(Float(c.x))); }
+			if(writeG) { *Pointer<Byte>(element + 1) = Byte(RoundInt(Float(c.y))); }
+			if(writeB) { *Pointer<Byte>(element + 2) = Byte(RoundInt(Float(c.z))); }
+			if(writeA) { *Pointer<Byte>(element + 3) = Byte(RoundInt(Float(c.w))); }
+		}
+		break;
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+		if(writeRGBA)
+		{
+			*Pointer<Float4>(element) = c;
+		}
+		else
+		{
 			if(writeR) { *Pointer<Float>(element) = c.x; }
 			if(writeG) { *Pointer<Float>(element + 4) = c.y; }
 			if(writeB) { *Pointer<Float>(element + 8) = c.z; }
-			break;
-		case VK_FORMAT_R32G32_SFLOAT:
-			if(writeR && writeG)
-			{
-				*Pointer<Float2>(element) = Float2(c);
-			}
-			else
-			{
-				if(writeR) { *Pointer<Float>(element) = c.x; }
-				if(writeG) { *Pointer<Float>(element + 4) = c.y; }
-			}
-			break;
-		case VK_FORMAT_R32_SFLOAT:
+			if(writeA) { *Pointer<Float>(element + 12) = c.w; }
+		}
+		break;
+	case VK_FORMAT_R32G32B32_SFLOAT:
+		if(writeR) { *Pointer<Float>(element) = c.x; }
+		if(writeG) { *Pointer<Float>(element + 4) = c.y; }
+		if(writeB) { *Pointer<Float>(element + 8) = c.z; }
+		break;
+	case VK_FORMAT_R32G32_SFLOAT:
+		if(writeR && writeG)
+		{
+			*Pointer<Float2>(element) = Float2(c);
+		}
+		else
+		{
 			if(writeR) { *Pointer<Float>(element) = c.x; }
-			break;
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-			if(writeA) { *Pointer<Half>(element + 6) = Half(c.w); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16B16_SFLOAT:
-			if(writeB) { *Pointer<Half>(element + 4) = Half(c.z); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16_SFLOAT:
-			if(writeG) { *Pointer<Half>(element + 2) = Half(c.y); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16_SFLOAT:
-			if(writeR) { *Pointer<Half>(element) = Half(c.x); }
-			break;
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+			if(writeG) { *Pointer<Float>(element + 4) = c.y; }
+		}
+		break;
+	case VK_FORMAT_R32_SFLOAT:
+		if(writeR) { *Pointer<Float>(element) = c.x; }
+		break;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		if(writeA) { *Pointer<Half>(element + 6) = Half(c.w); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16B16_SFLOAT:
+		if(writeB) { *Pointer<Half>(element + 4) = Half(c.z); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16_SFLOAT:
+		if(writeG) { *Pointer<Half>(element + 2) = Half(c.y); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16_SFLOAT:
+		if(writeR) { *Pointer<Half>(element) = Half(c.x); }
+		break;
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
 		{
 			UInt rgb = r11g11b10Pack(c);
 
@@ -676,7 +676,7 @@
 			*Pointer<UInt>(element) = (rgb & mask) | (old & ~mask);
 		}
 		break;
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
 		{
 			ASSERT(writeRGBA);  // Can't sensibly write just part of this format.
 
@@ -722,295 +722,295 @@
 			*Pointer<UInt>(element) = E5B9G9R9;
 		}
 		break;
-		case VK_FORMAT_B8G8R8A8_SNORM:
-			if(writeB) { *Pointer<SByte>(element) = SByte(RoundInt(Float(c.z))); }
-			if(writeG) { *Pointer<SByte>(element + 1) = SByte(RoundInt(Float(c.y))); }
-			if(writeR) { *Pointer<SByte>(element + 2) = SByte(RoundInt(Float(c.x))); }
-			if(writeA) { *Pointer<SByte>(element + 3) = SByte(RoundInt(Float(c.w))); }
-			break;
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-			if(writeA) { *Pointer<SByte>(element + 3) = SByte(RoundInt(Float(c.w))); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8B8_SINT:
-		case VK_FORMAT_R8G8B8_SNORM:
-		case VK_FORMAT_R8G8B8_SSCALED:
-			if(writeB) { *Pointer<SByte>(element + 2) = SByte(RoundInt(Float(c.z))); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_SSCALED:
-			if(writeG) { *Pointer<SByte>(element + 1) = SByte(RoundInt(Float(c.y))); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_SSCALED:
-			if(writeR) { *Pointer<SByte>(element) = SByte(RoundInt(Float(c.x))); }
-			break;
-		case VK_FORMAT_R8G8B8_UINT:
-		case VK_FORMAT_R8G8B8_UNORM:
-		case VK_FORMAT_R8G8B8_USCALED:
-		case VK_FORMAT_R8G8B8_SRGB:
-			if(writeB) { *Pointer<Byte>(element + 2) = Byte(RoundInt(Float(c.z))); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8G8_SRGB:
-			if(writeG) { *Pointer<Byte>(element + 1) = Byte(RoundInt(Float(c.y))); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_R8_SRGB:
-			if(writeR) { *Pointer<Byte>(element) = Byte(RoundInt(Float(c.x))); }
-			break;
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-			if(writeRGBA)
-			{
-				*Pointer<Short4>(element) = Short4(RoundInt(c));
-			}
-			else
-			{
-				if(writeR) { *Pointer<Short>(element) = Short(RoundInt(Float(c.x))); }
-				if(writeG) { *Pointer<Short>(element + 2) = Short(RoundInt(Float(c.y))); }
-				if(writeB) { *Pointer<Short>(element + 4) = Short(RoundInt(Float(c.z))); }
-				if(writeA) { *Pointer<Short>(element + 6) = Short(RoundInt(Float(c.w))); }
-			}
-			break;
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+		if(writeB) { *Pointer<SByte>(element) = SByte(RoundInt(Float(c.z))); }
+		if(writeG) { *Pointer<SByte>(element + 1) = SByte(RoundInt(Float(c.y))); }
+		if(writeR) { *Pointer<SByte>(element + 2) = SByte(RoundInt(Float(c.x))); }
+		if(writeA) { *Pointer<SByte>(element + 3) = SByte(RoundInt(Float(c.w))); }
+		break;
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		if(writeA) { *Pointer<SByte>(element + 3) = SByte(RoundInt(Float(c.w))); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8B8_SINT:
+	case VK_FORMAT_R8G8B8_SNORM:
+	case VK_FORMAT_R8G8B8_SSCALED:
+		if(writeB) { *Pointer<SByte>(element + 2) = SByte(RoundInt(Float(c.z))); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_SSCALED:
+		if(writeG) { *Pointer<SByte>(element + 1) = SByte(RoundInt(Float(c.y))); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_SSCALED:
+		if(writeR) { *Pointer<SByte>(element) = SByte(RoundInt(Float(c.x))); }
+		break;
+	case VK_FORMAT_R8G8B8_UINT:
+	case VK_FORMAT_R8G8B8_UNORM:
+	case VK_FORMAT_R8G8B8_USCALED:
+	case VK_FORMAT_R8G8B8_SRGB:
+		if(writeB) { *Pointer<Byte>(element + 2) = Byte(RoundInt(Float(c.z))); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8G8_SRGB:
+		if(writeG) { *Pointer<Byte>(element + 1) = Byte(RoundInt(Float(c.y))); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_R8_SRGB:
+		if(writeR) { *Pointer<Byte>(element) = Byte(RoundInt(Float(c.x))); }
+		break;
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+		if(writeRGBA)
+		{
+			*Pointer<Short4>(element) = Short4(RoundInt(c));
+		}
+		else
+		{
 			if(writeR) { *Pointer<Short>(element) = Short(RoundInt(Float(c.x))); }
 			if(writeG) { *Pointer<Short>(element + 2) = Short(RoundInt(Float(c.y))); }
 			if(writeB) { *Pointer<Short>(element + 4) = Short(RoundInt(Float(c.z))); }
-			break;
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_SSCALED:
-			if(writeR && writeG)
-			{
-				*Pointer<Short2>(element) = Short2(Short4(RoundInt(c)));
-			}
-			else
-			{
-				if(writeR) { *Pointer<Short>(element) = Short(RoundInt(Float(c.x))); }
-				if(writeG) { *Pointer<Short>(element + 2) = Short(RoundInt(Float(c.y))); }
-			}
-			break;
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_SSCALED:
+			if(writeA) { *Pointer<Short>(element + 6) = Short(RoundInt(Float(c.w))); }
+		}
+		break;
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16_SSCALED:
+		if(writeR) { *Pointer<Short>(element) = Short(RoundInt(Float(c.x))); }
+		if(writeG) { *Pointer<Short>(element + 2) = Short(RoundInt(Float(c.y))); }
+		if(writeB) { *Pointer<Short>(element + 4) = Short(RoundInt(Float(c.z))); }
+		break;
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_SSCALED:
+		if(writeR && writeG)
+		{
+			*Pointer<Short2>(element) = Short2(Short4(RoundInt(c)));
+		}
+		else
+		{
 			if(writeR) { *Pointer<Short>(element) = Short(RoundInt(Float(c.x))); }
-			break;
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-			if(writeRGBA)
-			{
-				*Pointer<UShort4>(element) = UShort4(RoundInt(c));
-			}
-			else
-			{
-				if(writeR) { *Pointer<UShort>(element) = UShort(RoundInt(Float(c.x))); }
-				if(writeG) { *Pointer<UShort>(element + 2) = UShort(RoundInt(Float(c.y))); }
-				if(writeB) { *Pointer<UShort>(element + 4) = UShort(RoundInt(Float(c.z))); }
-				if(writeA) { *Pointer<UShort>(element + 6) = UShort(RoundInt(Float(c.w))); }
-			}
-			break;
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16_USCALED:
+			if(writeG) { *Pointer<Short>(element + 2) = Short(RoundInt(Float(c.y))); }
+		}
+		break;
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_SSCALED:
+		if(writeR) { *Pointer<Short>(element) = Short(RoundInt(Float(c.x))); }
+		break;
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+		if(writeRGBA)
+		{
+			*Pointer<UShort4>(element) = UShort4(RoundInt(c));
+		}
+		else
+		{
 			if(writeR) { *Pointer<UShort>(element) = UShort(RoundInt(Float(c.x))); }
 			if(writeG) { *Pointer<UShort>(element + 2) = UShort(RoundInt(Float(c.y))); }
 			if(writeB) { *Pointer<UShort>(element + 4) = UShort(RoundInt(Float(c.z))); }
-			break;
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_USCALED:
-			if(writeR && writeG)
-			{
-				*Pointer<UShort2>(element) = UShort2(UShort4(RoundInt(c)));
-			}
-			else
-			{
-				if(writeR) { *Pointer<UShort>(element) = UShort(RoundInt(Float(c.x))); }
-				if(writeG) { *Pointer<UShort>(element + 2) = UShort(RoundInt(Float(c.y))); }
-			}
-			break;
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_USCALED:
+			if(writeA) { *Pointer<UShort>(element + 6) = UShort(RoundInt(Float(c.w))); }
+		}
+		break;
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16_USCALED:
+		if(writeR) { *Pointer<UShort>(element) = UShort(RoundInt(Float(c.x))); }
+		if(writeG) { *Pointer<UShort>(element + 2) = UShort(RoundInt(Float(c.y))); }
+		if(writeB) { *Pointer<UShort>(element + 4) = UShort(RoundInt(Float(c.z))); }
+		break;
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_USCALED:
+		if(writeR && writeG)
+		{
+			*Pointer<UShort2>(element) = UShort2(UShort4(RoundInt(c)));
+		}
+		else
+		{
 			if(writeR) { *Pointer<UShort>(element) = UShort(RoundInt(Float(c.x))); }
-			break;
-		case VK_FORMAT_R32G32B32A32_SINT:
-			if(writeRGBA)
-			{
-				*Pointer<Int4>(element) = RoundInt(c);
-			}
-			else
-			{
-				if(writeR) { *Pointer<Int>(element) = RoundInt(Float(c.x)); }
-				if(writeG) { *Pointer<Int>(element + 4) = RoundInt(Float(c.y)); }
-				if(writeB) { *Pointer<Int>(element + 8) = RoundInt(Float(c.z)); }
-				if(writeA) { *Pointer<Int>(element + 12) = RoundInt(Float(c.w)); }
-			}
-			break;
-		case VK_FORMAT_R32G32B32_SINT:
-			if(writeB) { *Pointer<Int>(element + 8) = RoundInt(Float(c.z)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R32G32_SINT:
-			if(writeG) { *Pointer<Int>(element + 4) = RoundInt(Float(c.y)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R32_SINT:
+			if(writeG) { *Pointer<UShort>(element + 2) = UShort(RoundInt(Float(c.y))); }
+		}
+		break;
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_USCALED:
+		if(writeR) { *Pointer<UShort>(element) = UShort(RoundInt(Float(c.x))); }
+		break;
+	case VK_FORMAT_R32G32B32A32_SINT:
+		if(writeRGBA)
+		{
+			*Pointer<Int4>(element) = RoundInt(c);
+		}
+		else
+		{
 			if(writeR) { *Pointer<Int>(element) = RoundInt(Float(c.x)); }
-			break;
-		case VK_FORMAT_R32G32B32A32_UINT:
-			if(writeRGBA)
-			{
-				*Pointer<UInt4>(element) = UInt4(RoundInt(c));
-			}
-			else
-			{
-				if(writeR) { *Pointer<UInt>(element) = As<UInt>(RoundInt(Float(c.x))); }
-				if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(RoundInt(Float(c.y))); }
-				if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(RoundInt(Float(c.z))); }
-				if(writeA) { *Pointer<UInt>(element + 12) = As<UInt>(RoundInt(Float(c.w))); }
-			}
-			break;
-		case VK_FORMAT_R32G32B32_UINT:
-			if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(RoundInt(Float(c.z))); }
-			// [[fallthrough]]
-		case VK_FORMAT_R32G32_UINT:
-			if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(RoundInt(Float(c.y))); }
-			// [[fallthrough]]
-		case VK_FORMAT_R32_UINT:
+			if(writeG) { *Pointer<Int>(element + 4) = RoundInt(Float(c.y)); }
+			if(writeB) { *Pointer<Int>(element + 8) = RoundInt(Float(c.z)); }
+			if(writeA) { *Pointer<Int>(element + 12) = RoundInt(Float(c.w)); }
+		}
+		break;
+	case VK_FORMAT_R32G32B32_SINT:
+		if(writeB) { *Pointer<Int>(element + 8) = RoundInt(Float(c.z)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R32G32_SINT:
+		if(writeG) { *Pointer<Int>(element + 4) = RoundInt(Float(c.y)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R32_SINT:
+		if(writeR) { *Pointer<Int>(element) = RoundInt(Float(c.x)); }
+		break;
+	case VK_FORMAT_R32G32B32A32_UINT:
+		if(writeRGBA)
+		{
+			*Pointer<UInt4>(element) = UInt4(RoundInt(c));
+		}
+		else
+		{
 			if(writeR) { *Pointer<UInt>(element) = As<UInt>(RoundInt(Float(c.x))); }
-			break;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			if(writeR && writeG && writeB)
-			{
-				*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c.xyzz), { 11, 5, 0, 0 }));
-			}
-			else
-			{
-				unsigned short mask = (writeB ? 0x001F : 0x0000) | (writeG ? 0x07E0 : 0x0000) | (writeR ? 0xF800 : 0x0000);
-				unsigned short unmask = ~mask;
-				*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
-				                            (UShort(PackFields(RoundInt(c.xyzz), { 11, 5, 0, 0 })) &
-				                             UShort(mask));
-			}
-			break;
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-			if(writeRGBA)
-			{
-				*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c), { 11, 6, 1, 0 }));
-			}
-			else
-			{
-				unsigned short mask = (writeA ? 0x8000 : 0x0000) |
-				                      (writeR ? 0x7C00 : 0x0000) |
-				                      (writeG ? 0x03E0 : 0x0000) |
-				                      (writeB ? 0x001F : 0x0000);
-				unsigned short unmask = ~mask;
-				*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
-				                            (UShort(PackFields(RoundInt(c), { 11, 6, 1, 0 })) &
-				                             UShort(mask));
-			}
-			break;
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-			if(writeRGBA)
-			{
-				*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c), { 1, 6, 11, 0 }));
-			}
-			else
-			{
-				unsigned short mask = (writeA ? 0x8000 : 0x0000) |
-				                      (writeR ? 0x7C00 : 0x0000) |
-				                      (writeG ? 0x03E0 : 0x0000) |
-				                      (writeB ? 0x001F : 0x0000);
-				unsigned short unmask = ~mask;
-				*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
-				                            (UShort(PackFields(RoundInt(c), { 1, 6, 11, 0 })) &
-				                             UShort(mask));
-			}
-			break;
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			if(writeRGBA)
-			{
-				*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c), { 10, 5, 0, 15 }));
-			}
-			else
-			{
-				unsigned short mask = (writeA ? 0x8000 : 0x0000) |
-				                      (writeR ? 0x7C00 : 0x0000) |
-				                      (writeG ? 0x03E0 : 0x0000) |
-				                      (writeB ? 0x001F : 0x0000);
-				unsigned short unmask = ~mask;
-				*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
-				                            (UShort(PackFields(RoundInt(c), { 10, 5, 0, 15 })) &
-				                             UShort(mask));
-			}
-			break;
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-			if(writeRGBA)
-			{
-				*Pointer<UInt>(element) = As<UInt>(PackFields(RoundInt(c), { 0, 10, 20, 30 }));
-			}
-			else
-			{
-				unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
-				                    (writeB ? 0x3FF00000 : 0x0000) |
-				                    (writeG ? 0x000FFC00 : 0x0000) |
-				                    (writeR ? 0x000003FF : 0x0000);
-				unsigned int unmask = ~mask;
-				*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
-				                          (As<UInt>(PackFields(RoundInt(c), { 0, 10, 20, 30 })) &
-				                           UInt(mask));
-			}
-			break;
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-			if(writeRGBA)
-			{
-				*Pointer<UInt>(element) = As<UInt>(PackFields(RoundInt(c), { 20, 10, 0, 30 }));
-			}
-			else
-			{
-				unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
-				                    (writeR ? 0x3FF00000 : 0x0000) |
-				                    (writeG ? 0x000FFC00 : 0x0000) |
-				                    (writeB ? 0x000003FF : 0x0000);
-				unsigned int unmask = ~mask;
-				*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
-				                          (As<UInt>(PackFields(RoundInt(c), { 20, 10, 0, 30 })) &
-				                           UInt(mask));
-			}
-			break;
-		case VK_FORMAT_D16_UNORM:
-			*Pointer<UShort>(element) = UShort(RoundInt(Float(c.x)));
-			break;
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-			*Pointer<UInt>(element) = UInt(RoundInt(Float(c.x)) << 8);
-			break;
-		case VK_FORMAT_D32_SFLOAT:
-			*Pointer<Float>(element) = c.x;
-			break;
-		case VK_FORMAT_S8_UINT:
-			*Pointer<Byte>(element) = Byte(RoundInt(Float(c.x)));
-			break;
-		default:
-			UNSUPPORTED("Blitter destination format %d", (int)state.destFormat);
-			break;
+			if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(RoundInt(Float(c.y))); }
+			if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(RoundInt(Float(c.z))); }
+			if(writeA) { *Pointer<UInt>(element + 12) = As<UInt>(RoundInt(Float(c.w))); }
+		}
+		break;
+	case VK_FORMAT_R32G32B32_UINT:
+		if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(RoundInt(Float(c.z))); }
+		// [[fallthrough]]
+	case VK_FORMAT_R32G32_UINT:
+		if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(RoundInt(Float(c.y))); }
+		// [[fallthrough]]
+	case VK_FORMAT_R32_UINT:
+		if(writeR) { *Pointer<UInt>(element) = As<UInt>(RoundInt(Float(c.x))); }
+		break;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		if(writeR && writeG && writeB)
+		{
+			*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c.xyzz), { 11, 5, 0, 0 }));
+		}
+		else
+		{
+			unsigned short mask = (writeB ? 0x001F : 0x0000) | (writeG ? 0x07E0 : 0x0000) | (writeR ? 0xF800 : 0x0000);
+			unsigned short unmask = ~mask;
+			*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
+			                            (UShort(PackFields(RoundInt(c.xyzz), { 11, 5, 0, 0 })) &
+			                             UShort(mask));
+		}
+		break;
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+		if(writeRGBA)
+		{
+			*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c), { 11, 6, 1, 0 }));
+		}
+		else
+		{
+			unsigned short mask = (writeA ? 0x8000 : 0x0000) |
+			                      (writeR ? 0x7C00 : 0x0000) |
+			                      (writeG ? 0x03E0 : 0x0000) |
+			                      (writeB ? 0x001F : 0x0000);
+			unsigned short unmask = ~mask;
+			*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
+			                            (UShort(PackFields(RoundInt(c), { 11, 6, 1, 0 })) &
+			                             UShort(mask));
+		}
+		break;
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+		if(writeRGBA)
+		{
+			*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c), { 1, 6, 11, 0 }));
+		}
+		else
+		{
+			unsigned short mask = (writeA ? 0x8000 : 0x0000) |
+			                      (writeR ? 0x7C00 : 0x0000) |
+			                      (writeG ? 0x03E0 : 0x0000) |
+			                      (writeB ? 0x001F : 0x0000);
+			unsigned short unmask = ~mask;
+			*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
+			                            (UShort(PackFields(RoundInt(c), { 1, 6, 11, 0 })) &
+			                             UShort(mask));
+		}
+		break;
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		if(writeRGBA)
+		{
+			*Pointer<UShort>(element) = UShort(PackFields(RoundInt(c), { 10, 5, 0, 15 }));
+		}
+		else
+		{
+			unsigned short mask = (writeA ? 0x8000 : 0x0000) |
+			                      (writeR ? 0x7C00 : 0x0000) |
+			                      (writeG ? 0x03E0 : 0x0000) |
+			                      (writeB ? 0x001F : 0x0000);
+			unsigned short unmask = ~mask;
+			*Pointer<UShort>(element) = (*Pointer<UShort>(element) & UShort(unmask)) |
+			                            (UShort(PackFields(RoundInt(c), { 10, 5, 0, 15 })) &
+			                             UShort(mask));
+		}
+		break;
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		if(writeRGBA)
+		{
+			*Pointer<UInt>(element) = As<UInt>(PackFields(RoundInt(c), { 0, 10, 20, 30 }));
+		}
+		else
+		{
+			unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
+			                    (writeB ? 0x3FF00000 : 0x0000) |
+			                    (writeG ? 0x000FFC00 : 0x0000) |
+			                    (writeR ? 0x000003FF : 0x0000);
+			unsigned int unmask = ~mask;
+			*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
+			                          (As<UInt>(PackFields(RoundInt(c), { 0, 10, 20, 30 })) &
+			                           UInt(mask));
+		}
+		break;
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		if(writeRGBA)
+		{
+			*Pointer<UInt>(element) = As<UInt>(PackFields(RoundInt(c), { 20, 10, 0, 30 }));
+		}
+		else
+		{
+			unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
+			                    (writeR ? 0x3FF00000 : 0x0000) |
+			                    (writeG ? 0x000FFC00 : 0x0000) |
+			                    (writeB ? 0x000003FF : 0x0000);
+			unsigned int unmask = ~mask;
+			*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
+			                          (As<UInt>(PackFields(RoundInt(c), { 20, 10, 0, 30 })) &
+			                           UInt(mask));
+		}
+		break;
+	case VK_FORMAT_D16_UNORM:
+		*Pointer<UShort>(element) = UShort(RoundInt(Float(c.x)));
+		break;
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+		*Pointer<UInt>(element) = UInt(RoundInt(Float(c.x)) << 8);
+		break;
+	case VK_FORMAT_D32_SFLOAT:
+		*Pointer<Float>(element) = c.x;
+		break;
+	case VK_FORMAT_S8_UINT:
+		*Pointer<Byte>(element) = Byte(RoundInt(Float(c.x)));
+		break;
+	default:
+		UNSUPPORTED("Blitter destination format %d", (int)state.destFormat);
+		break;
 	}
 }
 
@@ -1020,75 +1020,75 @@
 
 	switch(state.sourceFormat)
 	{
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_SINT:
-			c = Insert(c, Int(*Pointer<SByte>(element + 3)), 3);
-			c = Insert(c, Int(*Pointer<SByte>(element + 2)), 2);
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8_SINT:
-			c = Insert(c, Int(*Pointer<SByte>(element + 1)), 1);
-			// [[fallthrough]]
-		case VK_FORMAT_R8_SINT:
-			c = Insert(c, Int(*Pointer<SByte>(element)), 0);
-			break;
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000003FF))), 0);
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10), 1);
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20), 2);
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30), 3);
-			break;
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000003FF))), 2);
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10), 1);
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20), 0);
-			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30), 3);
-			break;
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_UINT:
-			c = Insert(c, Int(*Pointer<Byte>(element + 3)), 3);
-			c = Insert(c, Int(*Pointer<Byte>(element + 2)), 2);
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8_UINT:
-			c = Insert(c, Int(*Pointer<Byte>(element + 1)), 1);
-			// [[fallthrough]]
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_S8_UINT:
-			c = Insert(c, Int(*Pointer<Byte>(element)), 0);
-			break;
-		case VK_FORMAT_R16G16B16A16_SINT:
-			c = Insert(c, Int(*Pointer<Short>(element + 6)), 3);
-			c = Insert(c, Int(*Pointer<Short>(element + 4)), 2);
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16_SINT:
-			c = Insert(c, Int(*Pointer<Short>(element + 2)), 1);
-			// [[fallthrough]]
-		case VK_FORMAT_R16_SINT:
-			c = Insert(c, Int(*Pointer<Short>(element)), 0);
-			break;
-		case VK_FORMAT_R16G16B16A16_UINT:
-			c = Insert(c, Int(*Pointer<UShort>(element + 6)), 3);
-			c = Insert(c, Int(*Pointer<UShort>(element + 4)), 2);
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16_UINT:
-			c = Insert(c, Int(*Pointer<UShort>(element + 2)), 1);
-			// [[fallthrough]]
-		case VK_FORMAT_R16_UINT:
-			c = Insert(c, Int(*Pointer<UShort>(element)), 0);
-			break;
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-			c = *Pointer<Int4>(element);
-			break;
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-			c = Insert(c, *Pointer<Int>(element + 4), 1);
-			// [[fallthrough]]
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-			c = Insert(c, *Pointer<Int>(element), 0);
-			break;
-		default:
-			UNSUPPORTED("Blitter source format %d", (int)state.sourceFormat);
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_SINT:
+		c = Insert(c, Int(*Pointer<SByte>(element + 3)), 3);
+		c = Insert(c, Int(*Pointer<SByte>(element + 2)), 2);
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8_SINT:
+		c = Insert(c, Int(*Pointer<SByte>(element + 1)), 1);
+		// [[fallthrough]]
+	case VK_FORMAT_R8_SINT:
+		c = Insert(c, Int(*Pointer<SByte>(element)), 0);
+		break;
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000003FF))), 0);
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10), 1);
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20), 2);
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30), 3);
+		break;
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000003FF))), 2);
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10), 1);
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20), 0);
+		c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30), 3);
+		break;
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_UINT:
+		c = Insert(c, Int(*Pointer<Byte>(element + 3)), 3);
+		c = Insert(c, Int(*Pointer<Byte>(element + 2)), 2);
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8_UINT:
+		c = Insert(c, Int(*Pointer<Byte>(element + 1)), 1);
+		// [[fallthrough]]
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_S8_UINT:
+		c = Insert(c, Int(*Pointer<Byte>(element)), 0);
+		break;
+	case VK_FORMAT_R16G16B16A16_SINT:
+		c = Insert(c, Int(*Pointer<Short>(element + 6)), 3);
+		c = Insert(c, Int(*Pointer<Short>(element + 4)), 2);
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16_SINT:
+		c = Insert(c, Int(*Pointer<Short>(element + 2)), 1);
+		// [[fallthrough]]
+	case VK_FORMAT_R16_SINT:
+		c = Insert(c, Int(*Pointer<Short>(element)), 0);
+		break;
+	case VK_FORMAT_R16G16B16A16_UINT:
+		c = Insert(c, Int(*Pointer<UShort>(element + 6)), 3);
+		c = Insert(c, Int(*Pointer<UShort>(element + 4)), 2);
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16_UINT:
+		c = Insert(c, Int(*Pointer<UShort>(element + 2)), 1);
+		// [[fallthrough]]
+	case VK_FORMAT_R16_UINT:
+		c = Insert(c, Int(*Pointer<UShort>(element)), 0);
+		break;
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+		c = *Pointer<Int4>(element);
+		break;
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+		c = Insert(c, *Pointer<Int>(element + 4), 1);
+		// [[fallthrough]]
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+		c = Insert(c, *Pointer<Int>(element), 0);
+		break;
+	default:
+		UNSUPPORTED("Blitter source format %d", (int)state.sourceFormat);
 	}
 
 	return c;
@@ -1104,235 +1104,235 @@
 
 	switch(state.destFormat)
 	{
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			c = Min(As<UInt4>(c), UInt4(0x03FF, 0x03FF, 0x03FF, 0x0003));
-			break;
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8_UINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_R8G8B8_USCALED:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_S8_UINT:
-			c = Min(As<UInt4>(c), UInt4(0xFF));
-			break;
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-		case VK_FORMAT_R16G16B16_USCALED:
-		case VK_FORMAT_R16G16_USCALED:
-		case VK_FORMAT_R16_USCALED:
-			c = Min(As<UInt4>(c), UInt4(0xFFFF));
-			break;
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_R8G8B8_SSCALED:
-		case VK_FORMAT_R8G8_SSCALED:
-		case VK_FORMAT_R8_SSCALED:
-			c = Min(Max(c, Int4(-0x80)), Int4(0x7F));
-			break;
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-		case VK_FORMAT_R16G16B16_SSCALED:
-		case VK_FORMAT_R16G16_SSCALED:
-		case VK_FORMAT_R16_SSCALED:
-			c = Min(Max(c, Int4(-0x8000)), Int4(0x7FFF));
-			break;
-		default:
-			break;
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		c = Min(As<UInt4>(c), UInt4(0x03FF, 0x03FF, 0x03FF, 0x0003));
+		break;
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8_UINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_R8G8B8_USCALED:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_S8_UINT:
+		c = Min(As<UInt4>(c), UInt4(0xFF));
+		break;
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+	case VK_FORMAT_R16G16B16_USCALED:
+	case VK_FORMAT_R16G16_USCALED:
+	case VK_FORMAT_R16_USCALED:
+		c = Min(As<UInt4>(c), UInt4(0xFFFF));
+		break;
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_R8G8B8_SSCALED:
+	case VK_FORMAT_R8G8_SSCALED:
+	case VK_FORMAT_R8_SSCALED:
+		c = Min(Max(c, Int4(-0x80)), Int4(0x7F));
+		break;
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+	case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_R16G16_SSCALED:
+	case VK_FORMAT_R16_SSCALED:
+		c = Min(Max(c, Int4(-0x8000)), Int4(0x7FFF));
+		break;
+	default:
+		break;
 	}
 
 	switch(state.destFormat)
 	{
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SSCALED:
-			if(writeA) { *Pointer<SByte>(element + 3) = SByte(Extract(c, 3)); }
-			// [[fallthrough]]
-		case VK_FORMAT_B8G8R8_SINT:
-		case VK_FORMAT_B8G8R8_SSCALED:
-			if(writeB) { *Pointer<SByte>(element) = SByte(Extract(c, 2)); }
-			if(writeG) { *Pointer<SByte>(element + 1) = SByte(Extract(c, 1)); }
-			if(writeR) { *Pointer<SByte>(element + 2) = SByte(Extract(c, 0)); }
-			break;
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-			if(writeA) { *Pointer<SByte>(element + 3) = SByte(Extract(c, 3)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8B8_SINT:
-		case VK_FORMAT_R8G8B8_SSCALED:
-			if(writeB) { *Pointer<SByte>(element + 2) = SByte(Extract(c, 2)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SSCALED:
-			if(writeG) { *Pointer<SByte>(element + 1) = SByte(Extract(c, 1)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SSCALED:
-			if(writeR) { *Pointer<SByte>(element) = SByte(Extract(c, 0)); }
-			break;
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-			if(writeRGBA)
-			{
-				*Pointer<UInt>(element) = As<UInt>(PackFields(c, { 0, 10, 20, 30 }));
-			}
-			else
-			{
-				unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
-				                    (writeB ? 0x3FF00000 : 0x0000) |
-				                    (writeG ? 0x000FFC00 : 0x0000) |
-				                    (writeR ? 0x000003FF : 0x0000);
-				unsigned int unmask = ~mask;
-				*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
-				                          (As<UInt>(PackFields(c, { 0, 10, 20, 30 })) & UInt(mask));
-			}
-			break;
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-			if(writeRGBA)
-			{
-				*Pointer<UInt>(element) = As<UInt>(PackFields(c, { 20, 10, 0, 30 }));
-			}
-			else
-			{
-				unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
-				                    (writeR ? 0x3FF00000 : 0x0000) |
-				                    (writeG ? 0x000FFC00 : 0x0000) |
-				                    (writeB ? 0x000003FF : 0x0000);
-				unsigned int unmask = ~mask;
-				*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
-				                          (As<UInt>(PackFields(c, { 20, 10, 0, 30 })) & UInt(mask));
-			}
-			break;
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_USCALED:
-			if(writeA) { *Pointer<Byte>(element + 3) = Byte(Extract(c, 3)); }
-			// [[fallthrough]]
-		case VK_FORMAT_B8G8R8_UINT:
-		case VK_FORMAT_B8G8R8_USCALED:
-		case VK_FORMAT_B8G8R8_SRGB:
-			if(writeB) { *Pointer<Byte>(element) = Byte(Extract(c, 2)); }
-			if(writeG) { *Pointer<Byte>(element + 1) = Byte(Extract(c, 1)); }
-			if(writeR) { *Pointer<Byte>(element + 2) = Byte(Extract(c, 0)); }
-			break;
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-			if(writeA) { *Pointer<Byte>(element + 3) = Byte(Extract(c, 3)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8B8_UINT:
-		case VK_FORMAT_R8G8B8_USCALED:
-			if(writeB) { *Pointer<Byte>(element + 2) = Byte(Extract(c, 2)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_USCALED:
-			if(writeG) { *Pointer<Byte>(element + 1) = Byte(Extract(c, 1)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_S8_UINT:
-			if(writeR) { *Pointer<Byte>(element) = Byte(Extract(c, 0)); }
-			break;
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-			if(writeA) { *Pointer<Short>(element + 6) = Short(Extract(c, 3)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16_SSCALED:
-			if(writeB) { *Pointer<Short>(element + 4) = Short(Extract(c, 2)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SSCALED:
-			if(writeG) { *Pointer<Short>(element + 2) = Short(Extract(c, 1)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SSCALED:
-			if(writeR) { *Pointer<Short>(element) = Short(Extract(c, 0)); }
-			break;
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-			if(writeA) { *Pointer<UShort>(element + 6) = UShort(Extract(c, 3)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16_USCALED:
-			if(writeB) { *Pointer<UShort>(element + 4) = UShort(Extract(c, 2)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_USCALED:
-			if(writeG) { *Pointer<UShort>(element + 2) = UShort(Extract(c, 1)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_USCALED:
-			if(writeR) { *Pointer<UShort>(element) = UShort(Extract(c, 0)); }
-			break;
-		case VK_FORMAT_R32G32B32A32_SINT:
-			if(writeRGBA)
-			{
-				*Pointer<Int4>(element) = c;
-			}
-			else
-			{
-				if(writeR) { *Pointer<Int>(element) = Extract(c, 0); }
-				if(writeG) { *Pointer<Int>(element + 4) = Extract(c, 1); }
-				if(writeB) { *Pointer<Int>(element + 8) = Extract(c, 2); }
-				if(writeA) { *Pointer<Int>(element + 12) = Extract(c, 3); }
-			}
-			break;
-		case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SSCALED:
+		if(writeA) { *Pointer<SByte>(element + 3) = SByte(Extract(c, 3)); }
+		// [[fallthrough]]
+	case VK_FORMAT_B8G8R8_SINT:
+	case VK_FORMAT_B8G8R8_SSCALED:
+		if(writeB) { *Pointer<SByte>(element) = SByte(Extract(c, 2)); }
+		if(writeG) { *Pointer<SByte>(element + 1) = SByte(Extract(c, 1)); }
+		if(writeR) { *Pointer<SByte>(element + 2) = SByte(Extract(c, 0)); }
+		break;
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+		if(writeA) { *Pointer<SByte>(element + 3) = SByte(Extract(c, 3)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8B8_SINT:
+	case VK_FORMAT_R8G8B8_SSCALED:
+		if(writeB) { *Pointer<SByte>(element + 2) = SByte(Extract(c, 2)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SSCALED:
+		if(writeG) { *Pointer<SByte>(element + 1) = SByte(Extract(c, 1)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SSCALED:
+		if(writeR) { *Pointer<SByte>(element) = SByte(Extract(c, 0)); }
+		break;
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+		if(writeRGBA)
+		{
+			*Pointer<UInt>(element) = As<UInt>(PackFields(c, { 0, 10, 20, 30 }));
+		}
+		else
+		{
+			unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
+			                    (writeB ? 0x3FF00000 : 0x0000) |
+			                    (writeG ? 0x000FFC00 : 0x0000) |
+			                    (writeR ? 0x000003FF : 0x0000);
+			unsigned int unmask = ~mask;
+			*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
+			                          (As<UInt>(PackFields(c, { 0, 10, 20, 30 })) & UInt(mask));
+		}
+		break;
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+		if(writeRGBA)
+		{
+			*Pointer<UInt>(element) = As<UInt>(PackFields(c, { 20, 10, 0, 30 }));
+		}
+		else
+		{
+			unsigned int mask = (writeA ? 0xC0000000 : 0x0000) |
+			                    (writeR ? 0x3FF00000 : 0x0000) |
+			                    (writeG ? 0x000FFC00 : 0x0000) |
+			                    (writeB ? 0x000003FF : 0x0000);
+			unsigned int unmask = ~mask;
+			*Pointer<UInt>(element) = (*Pointer<UInt>(element) & UInt(unmask)) |
+			                          (As<UInt>(PackFields(c, { 20, 10, 0, 30 })) & UInt(mask));
+		}
+		break;
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_USCALED:
+		if(writeA) { *Pointer<Byte>(element + 3) = Byte(Extract(c, 3)); }
+		// [[fallthrough]]
+	case VK_FORMAT_B8G8R8_UINT:
+	case VK_FORMAT_B8G8R8_USCALED:
+	case VK_FORMAT_B8G8R8_SRGB:
+		if(writeB) { *Pointer<Byte>(element) = Byte(Extract(c, 2)); }
+		if(writeG) { *Pointer<Byte>(element + 1) = Byte(Extract(c, 1)); }
+		if(writeR) { *Pointer<Byte>(element + 2) = Byte(Extract(c, 0)); }
+		break;
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+		if(writeA) { *Pointer<Byte>(element + 3) = Byte(Extract(c, 3)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8B8_UINT:
+	case VK_FORMAT_R8G8B8_USCALED:
+		if(writeB) { *Pointer<Byte>(element + 2) = Byte(Extract(c, 2)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_USCALED:
+		if(writeG) { *Pointer<Byte>(element + 1) = Byte(Extract(c, 1)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_S8_UINT:
+		if(writeR) { *Pointer<Byte>(element) = Byte(Extract(c, 0)); }
+		break;
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+		if(writeA) { *Pointer<Short>(element + 6) = Short(Extract(c, 3)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16_SSCALED:
+		if(writeB) { *Pointer<Short>(element + 4) = Short(Extract(c, 2)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SSCALED:
+		if(writeG) { *Pointer<Short>(element + 2) = Short(Extract(c, 1)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SSCALED:
+		if(writeR) { *Pointer<Short>(element) = Short(Extract(c, 0)); }
+		break;
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+		if(writeA) { *Pointer<UShort>(element + 6) = UShort(Extract(c, 3)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16_USCALED:
+		if(writeB) { *Pointer<UShort>(element + 4) = UShort(Extract(c, 2)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_USCALED:
+		if(writeG) { *Pointer<UShort>(element + 2) = UShort(Extract(c, 1)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_USCALED:
+		if(writeR) { *Pointer<UShort>(element) = UShort(Extract(c, 0)); }
+		break;
+	case VK_FORMAT_R32G32B32A32_SINT:
+		if(writeRGBA)
+		{
+			*Pointer<Int4>(element) = c;
+		}
+		else
+		{
 			if(writeR) { *Pointer<Int>(element) = Extract(c, 0); }
 			if(writeG) { *Pointer<Int>(element + 4) = Extract(c, 1); }
 			if(writeB) { *Pointer<Int>(element + 8) = Extract(c, 2); }
-			break;
-		case VK_FORMAT_R32G32_SINT:
-			if(writeR) { *Pointer<Int>(element) = Extract(c, 0); }
-			if(writeG) { *Pointer<Int>(element + 4) = Extract(c, 1); }
-			break;
-		case VK_FORMAT_R32_SINT:
-			if(writeR) { *Pointer<Int>(element) = Extract(c, 0); }
-			break;
-		case VK_FORMAT_R32G32B32A32_UINT:
-			if(writeRGBA)
-			{
-				*Pointer<UInt4>(element) = As<UInt4>(c);
-			}
-			else
-			{
-				if(writeR) { *Pointer<UInt>(element) = As<UInt>(Extract(c, 0)); }
-				if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(Extract(c, 1)); }
-				if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(Extract(c, 2)); }
-				if(writeA) { *Pointer<UInt>(element + 12) = As<UInt>(Extract(c, 3)); }
-			}
-			break;
-		case VK_FORMAT_R32G32B32_UINT:
-			if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(Extract(c, 2)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R32G32_UINT:
-			if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(Extract(c, 1)); }
-			// [[fallthrough]]
-		case VK_FORMAT_R32_UINT:
+			if(writeA) { *Pointer<Int>(element + 12) = Extract(c, 3); }
+		}
+		break;
+	case VK_FORMAT_R32G32B32_SINT:
+		if(writeR) { *Pointer<Int>(element) = Extract(c, 0); }
+		if(writeG) { *Pointer<Int>(element + 4) = Extract(c, 1); }
+		if(writeB) { *Pointer<Int>(element + 8) = Extract(c, 2); }
+		break;
+	case VK_FORMAT_R32G32_SINT:
+		if(writeR) { *Pointer<Int>(element) = Extract(c, 0); }
+		if(writeG) { *Pointer<Int>(element + 4) = Extract(c, 1); }
+		break;
+	case VK_FORMAT_R32_SINT:
+		if(writeR) { *Pointer<Int>(element) = Extract(c, 0); }
+		break;
+	case VK_FORMAT_R32G32B32A32_UINT:
+		if(writeRGBA)
+		{
+			*Pointer<UInt4>(element) = As<UInt4>(c);
+		}
+		else
+		{
 			if(writeR) { *Pointer<UInt>(element) = As<UInt>(Extract(c, 0)); }
-			break;
-		default:
-			UNSUPPORTED("Blitter destination format %d", (int)state.destFormat);
+			if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(Extract(c, 1)); }
+			if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(Extract(c, 2)); }
+			if(writeA) { *Pointer<UInt>(element + 12) = As<UInt>(Extract(c, 3)); }
+		}
+		break;
+	case VK_FORMAT_R32G32B32_UINT:
+		if(writeB) { *Pointer<UInt>(element + 8) = As<UInt>(Extract(c, 2)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R32G32_UINT:
+		if(writeG) { *Pointer<UInt>(element + 4) = As<UInt>(Extract(c, 1)); }
+		// [[fallthrough]]
+	case VK_FORMAT_R32_UINT:
+		if(writeR) { *Pointer<UInt>(element) = As<UInt>(Extract(c, 0)); }
+		break;
+	default:
+		UNSUPPORTED("Blitter destination format %d", (int)state.destFormat);
 	}
 }
 
@@ -1348,14 +1348,14 @@
 		// then the whole range of the int or uint color must be scaled between 0 and 1.
 		switch(state.sourceFormat)
 		{
-			case VK_FORMAT_R32G32B32A32_SINT:
-				unscale = float4(static_cast<float>(0x7FFFFFFF));
-				break;
-			case VK_FORMAT_R32G32B32A32_UINT:
-				unscale = float4(static_cast<float>(0xFFFFFFFF));
-				break;
-			default:
-				UNSUPPORTED("Blitter source format %d", (int)state.sourceFormat);
+		case VK_FORMAT_R32G32B32A32_SINT:
+			unscale = float4(static_cast<float>(0x7FFFFFFF));
+			break;
+		case VK_FORMAT_R32G32B32A32_UINT:
+			unscale = float4(static_cast<float>(0xFFFFFFFF));
+			break;
+		default:
+			UNSUPPORTED("Blitter source format %d", (int)state.sourceFormat);
 		}
 	}
 	else
diff --git a/src/Device/Context.cpp b/src/Device/Context.cpp
index cfb8567..2cf2d19 100644
--- a/src/Device/Context.cpp
+++ b/src/Device/Context.cpp
@@ -25,20 +25,20 @@
 {
 	switch(topology)
 	{
-		case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
-			return vertexCount;
-		case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
-			return vertexCount / 2;
-		case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
-			return std::max<uint32_t>(vertexCount, 1) - 1;
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
-			return vertexCount / 3;
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
-			return std::max<uint32_t>(vertexCount, 2) - 2;
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
-			return std::max<uint32_t>(vertexCount, 2) - 2;
-		default:
-			UNSUPPORTED("VkPrimitiveTopology %d", int(topology));
+	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+		return vertexCount;
+	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+		return vertexCount / 2;
+	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+		return std::max<uint32_t>(vertexCount, 1) - 1;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+		return vertexCount / 3;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+		return std::max<uint32_t>(vertexCount, 2) - 2;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+		return std::max<uint32_t>(vertexCount, 2) - 2;
+	default:
+		UNSUPPORTED("VkPrimitiveTopology %d", int(topology));
 	}
 
 	return 0;
@@ -114,14 +114,14 @@
 		{
 			switch(indexType)
 			{
-				case VK_INDEX_TYPE_UINT16:
-					ProcessPrimitiveRestart(static_cast<uint16_t *>(indexBuffer), topology, count, indexBuffers);
-					break;
-				case VK_INDEX_TYPE_UINT32:
-					ProcessPrimitiveRestart(static_cast<uint32_t *>(indexBuffer), topology, count, indexBuffers);
-					break;
-				default:
-					UNSUPPORTED("VkIndexType %d", int(indexType));
+			case VK_INDEX_TYPE_UINT16:
+				ProcessPrimitiveRestart(static_cast<uint16_t *>(indexBuffer), topology, count, indexBuffers);
+				break;
+			case VK_INDEX_TYPE_UINT32:
+				ProcessPrimitiveRestart(static_cast<uint32_t *>(indexBuffer), topology, count, indexBuffers);
+				break;
+			default:
+				UNSUPPORTED("VkIndexType %d", int(indexType));
 			}
 		}
 		else
@@ -266,20 +266,20 @@
 			VkDynamicState dynamicState = pCreateInfo->pDynamicState->pDynamicStates[i];
 			switch(dynamicState)
 			{
-				case VK_DYNAMIC_STATE_VIEWPORT:
-				case VK_DYNAMIC_STATE_SCISSOR:
-				case VK_DYNAMIC_STATE_LINE_WIDTH:
-				case VK_DYNAMIC_STATE_DEPTH_BIAS:
-				case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
-				case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
-				case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
-				case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
-				case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
-					ASSERT(dynamicState < (sizeof(dynamicStateFlags) * 8));
-					dynamicStateFlags |= (1 << dynamicState);
-					break;
-				default:
-					UNSUPPORTED("VkDynamicState %d", int(dynamicState));
+			case VK_DYNAMIC_STATE_VIEWPORT:
+			case VK_DYNAMIC_STATE_SCISSOR:
+			case VK_DYNAMIC_STATE_LINE_WIDTH:
+			case VK_DYNAMIC_STATE_DEPTH_BIAS:
+			case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
+			case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
+			case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
+			case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
+			case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
+				ASSERT(dynamicState < (sizeof(dynamicStateFlags) * 8));
+				dynamicStateFlags |= (1 << dynamicState);
+				break;
+			default:
+				UNSUPPORTED("VkDynamicState %d", int(dynamicState));
 			}
 		}
 	}
@@ -348,22 +348,22 @@
 		// are not enumerated in the official Vulkan header
 		switch((long)(extensionCreateInfo->sType))
 		{
-			case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+		case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
 			{
 				const VkPipelineRasterizationLineStateCreateInfoEXT *lineStateCreateInfo = reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT *>(extensionCreateInfo);
 				lineRasterizationMode = lineStateCreateInfo->lineRasterizationMode;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
+		case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
 			{
 				const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *provokingVertexModeCreateInfo =
 				    reinterpret_cast<const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *>(extensionCreateInfo);
 				provokingVertexMode = provokingVertexModeCreateInfo->provokingVertexMode;
 			}
 			break;
-			default:
-				WARN("pCreateInfo->pRasterizationState->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+		default:
+			WARN("pCreateInfo->pRasterizationState->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -422,14 +422,14 @@
 
 		switch(multisampleState->rasterizationSamples)
 		{
-			case VK_SAMPLE_COUNT_1_BIT:
-				sampleCount = 1;
-				break;
-			case VK_SAMPLE_COUNT_4_BIT:
-				sampleCount = 4;
-				break;
-			default:
-				UNSUPPORTED("Unsupported sample count");
+		case VK_SAMPLE_COUNT_1_BIT:
+			sampleCount = 1;
+			break;
+		case VK_SAMPLE_COUNT_4_BIT:
+			sampleCount = 4;
+			break;
+		default:
+			UNSUPPORTED("Unsupported sample count");
 		}
 
 		VkSampleMask sampleMask;
@@ -522,17 +522,17 @@
 {
 	switch(topology)
 	{
-		case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
-			return true;
-		case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
-		case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
-			return false;
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
-			return polygonModeAware ? (polygonMode == VK_POLYGON_MODE_POINT) : false;
-		default:
-			UNSUPPORTED("topology %d", int(topology));
+	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+		return true;
+	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+		return false;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+		return polygonModeAware ? (polygonMode == VK_POLYGON_MODE_POINT) : false;
+	default:
+		UNSUPPORTED("topology %d", int(topology));
 	}
 	return false;
 }
@@ -541,17 +541,17 @@
 {
 	switch(topology)
 	{
-		case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
-			return false;
-		case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
-		case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
-			return true;
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
-			return polygonModeAware ? (polygonMode == VK_POLYGON_MODE_LINE) : false;
-		default:
-			UNSUPPORTED("topology %d", int(topology));
+	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+		return false;
+	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+		return true;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+		return polygonModeAware ? (polygonMode == VK_POLYGON_MODE_LINE) : false;
+	default:
+		UNSUPPORTED("topology %d", int(topology));
 	}
 	return false;
 }
@@ -560,16 +560,16 @@
 {
 	switch(topology)
 	{
-		case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
-		case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
-		case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
-			return false;
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
-			return polygonModeAware ? (polygonMode == VK_POLYGON_MODE_FILL) : true;
-		default:
-			UNSUPPORTED("topology %d", int(topology));
+	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+		return false;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+		return polygonModeAware ? (polygonMode == VK_POLYGON_MODE_FILL) : true;
+	default:
+		UNSUPPORTED("topology %d", int(topology));
 	}
 	return false;
 }
@@ -700,16 +700,16 @@
 
 	switch(blendState[index].blendOperation)
 	{
-		case VK_BLEND_OP_ADD:
-		case VK_BLEND_OP_SUBTRACT:
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			return blendState[index].sourceBlendFactor;
-		case VK_BLEND_OP_MIN:
-			return VK_BLEND_FACTOR_ONE;
-		case VK_BLEND_OP_MAX:
-			return VK_BLEND_FACTOR_ONE;
-		default:
-			ASSERT(false);
+	case VK_BLEND_OP_ADD:
+	case VK_BLEND_OP_SUBTRACT:
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		return blendState[index].sourceBlendFactor;
+	case VK_BLEND_OP_MIN:
+		return VK_BLEND_FACTOR_ONE;
+	case VK_BLEND_OP_MAX:
+		return VK_BLEND_FACTOR_ONE;
+	default:
+		ASSERT(false);
 	}
 
 	return blendState[index].sourceBlendFactor;
@@ -723,16 +723,16 @@
 
 	switch(blendState[index].blendOperation)
 	{
-		case VK_BLEND_OP_ADD:
-		case VK_BLEND_OP_SUBTRACT:
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			return blendState[index].destBlendFactor;
-		case VK_BLEND_OP_MIN:
-			return VK_BLEND_FACTOR_ONE;
-		case VK_BLEND_OP_MAX:
-			return VK_BLEND_FACTOR_ONE;
-		default:
-			ASSERT(false);
+	case VK_BLEND_OP_ADD:
+	case VK_BLEND_OP_SUBTRACT:
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		return blendState[index].destBlendFactor;
+	case VK_BLEND_OP_MIN:
+		return VK_BLEND_FACTOR_ONE;
+	case VK_BLEND_OP_MAX:
+		return VK_BLEND_FACTOR_ONE;
+	default:
+		ASSERT(false);
 	}
 
 	return blendState[index].destBlendFactor;
@@ -746,107 +746,107 @@
 
 	switch(blendState[index].blendOperation)
 	{
-		case VK_BLEND_OP_ADD:
-			if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+	case VK_BLEND_OP_ADD:
+		if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
 			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_ZERO_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_DST_EXT;
-				}
-			}
-			else if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ONE)
-			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_ADD;
-				}
+				return VK_BLEND_OP_ZERO_EXT;
 			}
 			else
 			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_ADD;
-				}
+				return VK_BLEND_OP_DST_EXT;
 			}
-		case VK_BLEND_OP_SUBTRACT:
-			if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
+		}
+		else if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ONE)
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_ADD;
+			}
+		}
+		else
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_ADD;
+			}
+		}
+	case VK_BLEND_OP_SUBTRACT:
+		if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
+		{
+			return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
+		}
+		else if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ONE)
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_SUBTRACT;
+			}
+		}
+		else
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_SUBTRACT;
+			}
+		}
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_ZERO_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_DST_EXT;
+			}
+		}
+		else if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ONE)
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
 			{
 				return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
 			}
-			else if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ONE)
+			else
 			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_SUBTRACT;
-				}
+				return VK_BLEND_OP_REVERSE_SUBTRACT;
+			}
+		}
+		else
+		{
+			if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
+			{
+				return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
 			}
 			else
 			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_SUBTRACT;
-				}
+				return VK_BLEND_OP_REVERSE_SUBTRACT;
 			}
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
-			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_ZERO_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_DST_EXT;
-				}
-			}
-			else if(sourceBlendFactor(index) == VK_BLEND_FACTOR_ONE)
-			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
-				{
-					return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
-				}
-				else
-				{
-					return VK_BLEND_OP_REVERSE_SUBTRACT;
-				}
-			}
-			else
-			{
-				if(destBlendFactor(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
-				{
-					return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
-				}
-				else
-				{
-					return VK_BLEND_OP_REVERSE_SUBTRACT;
-				}
-			}
-		case VK_BLEND_OP_MIN:
-			return VK_BLEND_OP_MIN;
-		case VK_BLEND_OP_MAX:
-			return VK_BLEND_OP_MAX;
-		default:
-			ASSERT(false);
+		}
+	case VK_BLEND_OP_MIN:
+		return VK_BLEND_OP_MIN;
+	case VK_BLEND_OP_MAX:
+		return VK_BLEND_OP_MAX;
+	default:
+		ASSERT(false);
 	}
 
 	return blendState[index].blendOperation;
@@ -858,16 +858,16 @@
 
 	switch(blendState[index].blendOperationAlpha)
 	{
-		case VK_BLEND_OP_ADD:
-		case VK_BLEND_OP_SUBTRACT:
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			return blendState[index].sourceBlendFactorAlpha;
-		case VK_BLEND_OP_MIN:
-			return VK_BLEND_FACTOR_ONE;
-		case VK_BLEND_OP_MAX:
-			return VK_BLEND_FACTOR_ONE;
-		default:
-			ASSERT(false);
+	case VK_BLEND_OP_ADD:
+	case VK_BLEND_OP_SUBTRACT:
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		return blendState[index].sourceBlendFactorAlpha;
+	case VK_BLEND_OP_MIN:
+		return VK_BLEND_FACTOR_ONE;
+	case VK_BLEND_OP_MAX:
+		return VK_BLEND_FACTOR_ONE;
+	default:
+		ASSERT(false);
 	}
 
 	return blendState[index].sourceBlendFactorAlpha;
@@ -879,16 +879,16 @@
 
 	switch(blendState[index].blendOperationAlpha)
 	{
-		case VK_BLEND_OP_ADD:
-		case VK_BLEND_OP_SUBTRACT:
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			return blendState[index].destBlendFactorAlpha;
-		case VK_BLEND_OP_MIN:
-			return VK_BLEND_FACTOR_ONE;
-		case VK_BLEND_OP_MAX:
-			return VK_BLEND_FACTOR_ONE;
-		default:
-			ASSERT(false);
+	case VK_BLEND_OP_ADD:
+	case VK_BLEND_OP_SUBTRACT:
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		return blendState[index].destBlendFactorAlpha;
+	case VK_BLEND_OP_MIN:
+		return VK_BLEND_FACTOR_ONE;
+	case VK_BLEND_OP_MAX:
+		return VK_BLEND_FACTOR_ONE;
+	default:
+		ASSERT(false);
 	}
 
 	return blendState[index].destBlendFactorAlpha;
@@ -900,107 +900,107 @@
 
 	switch(blendState[index].blendOperationAlpha)
 	{
-		case VK_BLEND_OP_ADD:
-			if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+	case VK_BLEND_OP_ADD:
+		if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
 			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_ZERO_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_DST_EXT;
-				}
-			}
-			else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
-			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_ADD;
-				}
+				return VK_BLEND_OP_ZERO_EXT;
 			}
 			else
 			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_ADD;
-				}
+				return VK_BLEND_OP_DST_EXT;
 			}
-		case VK_BLEND_OP_SUBTRACT:
-			if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
+		}
+		else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_ADD;
+			}
+		}
+		else
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_ADD;
+			}
+		}
+	case VK_BLEND_OP_SUBTRACT:
+		if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
+		{
+			return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
+		}
+		else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_SUBTRACT;
+			}
+		}
+		else
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_SRC_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_SUBTRACT;
+			}
+		}
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
+			{
+				return VK_BLEND_OP_ZERO_EXT;
+			}
+			else
+			{
+				return VK_BLEND_OP_DST_EXT;
+			}
+		}
+		else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
 			{
 				return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
 			}
-			else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
+			else
 			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_SUBTRACT;
-				}
+				return VK_BLEND_OP_REVERSE_SUBTRACT;
+			}
+		}
+		else
+		{
+			if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
+			{
+				return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
 			}
 			else
 			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_SRC_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_SUBTRACT;
-				}
+				return VK_BLEND_OP_REVERSE_SUBTRACT;
 			}
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
-			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO)
-				{
-					return VK_BLEND_OP_ZERO_EXT;
-				}
-				else
-				{
-					return VK_BLEND_OP_DST_EXT;
-				}
-			}
-			else if(sourceBlendFactorAlpha(index) == VK_BLEND_FACTOR_ONE)
-			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
-				{
-					return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
-				}
-				else
-				{
-					return VK_BLEND_OP_REVERSE_SUBTRACT;
-				}
-			}
-			else
-			{
-				if(destBlendFactorAlpha(index) == VK_BLEND_FACTOR_ZERO && attachments.isColorClamped(index))
-				{
-					return VK_BLEND_OP_ZERO_EXT;  // Negative, clamped to zero
-				}
-				else
-				{
-					return VK_BLEND_OP_REVERSE_SUBTRACT;
-				}
-			}
-		case VK_BLEND_OP_MIN:
-			return VK_BLEND_OP_MIN;
-		case VK_BLEND_OP_MAX:
-			return VK_BLEND_OP_MAX;
-		default:
-			ASSERT(false);
+		}
+	case VK_BLEND_OP_MIN:
+		return VK_BLEND_OP_MIN;
+	case VK_BLEND_OP_MAX:
+		return VK_BLEND_OP_MAX;
+	default:
+		ASSERT(false);
 	}
 
 	return blendState[index].blendOperationAlpha;
diff --git a/src/Device/ETC_Decoder.cpp b/src/Device/ETC_Decoder.cpp
index a5c8549..4fea526 100644
--- a/src/Device/ETC_Decoder.cpp
+++ b/src/Device/ETC_Decoder.cpp
@@ -623,22 +623,22 @@
 	{
 		switch(x * 4 + y)
 		{
-			case 0: return ma;
-			case 1: return mb;
-			case 2: return mc1 << 1 | mc2;
-			case 3: return md;
-			case 4: return me;
-			case 5: return mf1 << 2 | mf2;
-			case 6: return mg;
-			case 7: return mh;
-			case 8: return mi;
-			case 9: return mj;
-			case 10: return mk1 << 1 | mk2;
-			case 11: return ml;
-			case 12: return mm;
-			case 13: return mn1 << 2 | mn2;
-			case 14: return mo;
-			default: return mp;  // 15
+		case 0: return ma;
+		case 1: return mb;
+		case 2: return mc1 << 1 | mc2;
+		case 3: return md;
+		case 4: return me;
+		case 5: return mf1 << 2 | mf2;
+		case 6: return mg;
+		case 7: return mh;
+		case 8: return mi;
+		case 9: return mj;
+		case 10: return mk1 << 1 | mk2;
+		case 11: return ml;
+		case 12: return mm;
+		case 13: return mn1 << 2 | mn2;
+		case 14: return mo;
+		default: return mp;  // 15
 		}
 	}
 
@@ -676,58 +676,58 @@
 
 	switch(inputType)
 	{
-		case ETC_R_SIGNED:
-		case ETC_R_UNSIGNED:
-			for(int y = 0; y < h; y += 4)
+	case ETC_R_SIGNED:
+	case ETC_R_UNSIGNED:
+		for(int y = 0; y < h; y += 4)
+		{
+			unsigned char *dstRow = dst + (y * dstPitch);
+			for(int x = 0; x < w; x += 4, sources[0]++)
 			{
-				unsigned char *dstRow = dst + (y * dstPitch);
-				for(int x = 0; x < w; x += 4, sources[0]++)
-				{
-					ETC2::DecodeBlock(sources, dstRow + (x * dstBpp), 1, x, y, w, h, dstPitch, inputType == ETC_R_SIGNED, true);
-				}
+				ETC2::DecodeBlock(sources, dstRow + (x * dstBpp), 1, x, y, w, h, dstPitch, inputType == ETC_R_SIGNED, true);
 			}
-			break;
-		case ETC_RG_SIGNED:
-		case ETC_RG_UNSIGNED:
-			sources[1] = sources[0] + 1;
-			for(int y = 0; y < h; y += 4)
+		}
+		break;
+	case ETC_RG_SIGNED:
+	case ETC_RG_UNSIGNED:
+		sources[1] = sources[0] + 1;
+		for(int y = 0; y < h; y += 4)
+		{
+			unsigned char *dstRow = dst + (y * dstPitch);
+			for(int x = 0; x < w; x += 4, sources[0] += 2, sources[1] += 2)
 			{
-				unsigned char *dstRow = dst + (y * dstPitch);
-				for(int x = 0; x < w; x += 4, sources[0] += 2, sources[1] += 2)
-				{
-					ETC2::DecodeBlock(sources, dstRow + (x * dstBpp), 2, x, y, w, h, dstPitch, inputType == ETC_RG_SIGNED, true);
-				}
+				ETC2::DecodeBlock(sources, dstRow + (x * dstBpp), 2, x, y, w, h, dstPitch, inputType == ETC_RG_SIGNED, true);
 			}
-			break;
-		case ETC_RGB:
-		case ETC_RGB_PUNCHTHROUGH_ALPHA:
-			for(int y = 0; y < h; y += 4)
+		}
+		break;
+	case ETC_RGB:
+	case ETC_RGB_PUNCHTHROUGH_ALPHA:
+		for(int y = 0; y < h; y += 4)
+		{
+			unsigned char *dstRow = dst + (y * dstPitch);
+			for(int x = 0; x < w; x += 4, sources[0]++)
 			{
-				unsigned char *dstRow = dst + (y * dstPitch);
-				for(int x = 0; x < w; x += 4, sources[0]++)
-				{
-					sources[0]->decodeBlock(dstRow + (x * dstBpp), x, y, w, h, dstPitch, alphaValues, inputType == ETC_RGB_PUNCHTHROUGH_ALPHA);
-				}
+				sources[0]->decodeBlock(dstRow + (x * dstBpp), x, y, w, h, dstPitch, alphaValues, inputType == ETC_RGB_PUNCHTHROUGH_ALPHA);
 			}
-			break;
-		case ETC_RGBA:
-			for(int y = 0; y < h; y += 4)
+		}
+		break;
+	case ETC_RGBA:
+		for(int y = 0; y < h; y += 4)
+		{
+			unsigned char *dstRow = dst + (y * dstPitch);
+			for(int x = 0; x < w; x += 4)
 			{
-				unsigned char *dstRow = dst + (y * dstPitch);
-				for(int x = 0; x < w; x += 4)
-				{
-					// Decode Alpha
-					ETC2::DecodeBlock(&sources[0], &(alphaValues[0][0]), 1, x, y, w, h, 4, false, false);
-					sources[0]++;  // RGBA packets are 128 bits, so move on to the next 64 bit packet to decode the RGB color
+				// Decode Alpha
+				ETC2::DecodeBlock(&sources[0], &(alphaValues[0][0]), 1, x, y, w, h, 4, false, false);
+				sources[0]++;  // RGBA packets are 128 bits, so move on to the next 64 bit packet to decode the RGB color
 
-					// Decode RGB
-					sources[0]->decodeBlock(dstRow + (x * dstBpp), x, y, w, h, dstPitch, alphaValues, false);
-					sources[0]++;
-				}
+				// Decode RGB
+				sources[0]->decodeBlock(dstRow + (x * dstBpp), x, y, w, h, dstPitch, alphaValues, false);
+				sources[0]++;
 			}
-			break;
-		default:
-			return false;
+		}
+		break;
+	default:
+		return false;
 	}
 
 	return true;
diff --git a/src/Device/Renderer.cpp b/src/Device/Renderer.cpp
index 064df1f..554fb22 100644
--- a/src/Device/Renderer.cpp
+++ b/src/Device/Renderer.cpp
@@ -54,7 +54,7 @@
 
 	switch(topology)
 	{
-		case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
+	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
 		{
 			auto index = start;
 			auto pointBatch = &(batch[0][0]);
@@ -69,9 +69,9 @@
 			{
 				*pointBatch++ = indices[index];
 			}
-			break;
 		}
-		case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
+		break;
+	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
 		{
 			auto index = 2 * start;
 			for(unsigned int i = 0; i < triangleCount; i++)
@@ -82,9 +82,9 @@
 
 				index += 2;
 			}
-			break;
 		}
-		case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+		break;
+	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
 		{
 			auto index = start;
 			for(unsigned int i = 0; i < triangleCount; i++)
@@ -95,9 +95,9 @@
 
 				index += 1;
 			}
-			break;
 		}
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
+		break;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
 		{
 			auto index = 3 * start;
 			for(unsigned int i = 0; i < triangleCount; i++)
@@ -108,9 +108,9 @@
 
 				index += 3;
 			}
-			break;
 		}
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+		break;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
 		{
 			auto index = start;
 			for(unsigned int i = 0; i < triangleCount; i++)
@@ -121,9 +121,9 @@
 
 				index += 1;
 			}
-			break;
 		}
-		case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
+		break;
+	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
 		{
 			auto index = start + 1;
 			for(unsigned int i = 0; i < triangleCount; i++)
@@ -134,11 +134,11 @@
 
 				index += 1;
 			}
-			break;
 		}
-		default:
-			ASSERT(false);
-			return false;
+		break;
+	default:
+		ASSERT(false);
+		return false;
 	}
 
 	return true;
@@ -229,20 +229,20 @@
 	{
 		switch(pipelineState.getPolygonMode())
 		{
-			case VK_POLYGON_MODE_FILL:
-				setupPrimitives = &DrawCall::setupSolidTriangles;
-				break;
-			case VK_POLYGON_MODE_LINE:
-				setupPrimitives = &DrawCall::setupWireframeTriangles;
-				numPrimitivesPerBatch /= 3;
-				break;
-			case VK_POLYGON_MODE_POINT:
-				setupPrimitives = &DrawCall::setupPointTriangles;
-				numPrimitivesPerBatch /= 3;
-				break;
-			default:
-				UNSUPPORTED("polygon mode: %d", int(pipelineState.getPolygonMode()));
-				return;
+		case VK_POLYGON_MODE_FILL:
+			setupPrimitives = &DrawCall::setupSolidTriangles;
+			break;
+		case VK_POLYGON_MODE_LINE:
+			setupPrimitives = &DrawCall::setupWireframeTriangles;
+			numPrimitivesPerBatch /= 3;
+			break;
+		case VK_POLYGON_MODE_POINT:
+			setupPrimitives = &DrawCall::setupPointTriangles;
+			numPrimitivesPerBatch /= 3;
+			break;
+		default:
+			UNSUPPORTED("polygon mode: %d", int(pipelineState.getPolygonMode()));
+			return;
 		}
 	}
 	else if(pipelineState.isDrawLine(false))
@@ -361,15 +361,15 @@
 		{
 			switch(attachments.depthBuffer->getFormat(VK_IMAGE_ASPECT_DEPTH_BIT))
 			{
-				case VK_FORMAT_D16_UNORM:
-					data->minimumResolvableDepthDifference = 1.0f / 0xFFFF;
-					break;
-				case VK_FORMAT_D32_SFLOAT:
-					// The minimum resolvable depth difference is determined per-polygon for floating-point depth
-					// buffers. DrawData::minimumResolvableDepthDifference is unused.
-					break;
-				default:
-					UNSUPPORTED("Depth format: %d", int(attachments.depthBuffer->getFormat(VK_IMAGE_ASPECT_DEPTH_BIT)));
+			case VK_FORMAT_D16_UNORM:
+				data->minimumResolvableDepthDifference = 1.0f / 0xFFFF;
+				break;
+			case VK_FORMAT_D32_SFLOAT:
+				// The minimum resolvable depth difference is determined per-polygon for floating-point depth
+				// buffers. DrawData::minimumResolvableDepthDifference is unused.
+				break;
+			default:
+				UNSUPPORTED("Depth format: %d", int(attachments.depthBuffer->getFormat(VK_IMAGE_ASPECT_DEPTH_BIT)));
 			}
 		}
 	}
@@ -625,22 +625,22 @@
 	{
 		switch(indexType)
 		{
-			case VK_INDEX_TYPE_UINT16:
-				if(!setBatchIndices(triangleIndicesOut, topology, provokingVertexMode, static_cast<const uint16_t *>(primitiveIndices), start, triangleCount))
-				{
-					return;
-				}
-				break;
-			case VK_INDEX_TYPE_UINT32:
-				if(!setBatchIndices(triangleIndicesOut, topology, provokingVertexMode, static_cast<const uint32_t *>(primitiveIndices), start, triangleCount))
-				{
-					return;
-				}
-				break;
-				break;
-			default:
-				ASSERT(false);
+		case VK_INDEX_TYPE_UINT16:
+			if(!setBatchIndices(triangleIndicesOut, topology, provokingVertexMode, static_cast<const uint16_t *>(primitiveIndices), start, triangleCount))
+			{
 				return;
+			}
+			break;
+		case VK_INDEX_TYPE_UINT32:
+			if(!setBatchIndices(triangleIndicesOut, topology, provokingVertexMode, static_cast<const uint32_t *>(primitiveIndices), start, triangleCount))
+			{
+				return;
+			}
+			break;
+			break;
+		default:
+			ASSERT(false);
+			return;
 		}
 	}
 
diff --git a/src/Device/Sampler.hpp b/src/Device/Sampler.hpp
index 87111d7..712977d 100644
--- a/src/Device/Sampler.hpp
+++ b/src/Device/Sampler.hpp
@@ -118,17 +118,17 @@
 	{
 		switch(textureType)
 		{
-			case VK_IMAGE_VIEW_TYPE_1D:
-			case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-				return true;
-			case VK_IMAGE_VIEW_TYPE_2D:
-			case VK_IMAGE_VIEW_TYPE_3D:
-			case VK_IMAGE_VIEW_TYPE_CUBE:
-			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-			case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-				return false;
-			default:
-				UNSUPPORTED("VkImageViewType %d", (int)textureType);
+		case VK_IMAGE_VIEW_TYPE_1D:
+		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+			return true;
+		case VK_IMAGE_VIEW_TYPE_2D:
+		case VK_IMAGE_VIEW_TYPE_3D:
+		case VK_IMAGE_VIEW_TYPE_CUBE:
+		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+			return false;
+		default:
+			UNSUPPORTED("VkImageViewType %d", (int)textureType);
 		}
 
 		return false;
@@ -138,17 +138,17 @@
 	{
 		switch(textureType)
 		{
-			case VK_IMAGE_VIEW_TYPE_2D:
-			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-				return true;
-			case VK_IMAGE_VIEW_TYPE_1D:
-			case VK_IMAGE_VIEW_TYPE_3D:
-			case VK_IMAGE_VIEW_TYPE_CUBE:
-			case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-			case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-				return false;
-			default:
-				UNSUPPORTED("VkImageViewType %d", (int)textureType);
+		case VK_IMAGE_VIEW_TYPE_2D:
+		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+			return true;
+		case VK_IMAGE_VIEW_TYPE_1D:
+		case VK_IMAGE_VIEW_TYPE_3D:
+		case VK_IMAGE_VIEW_TYPE_CUBE:
+		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+			return false;
+		default:
+			UNSUPPORTED("VkImageViewType %d", (int)textureType);
 		}
 
 		return false;
@@ -158,17 +158,17 @@
 	{
 		switch(textureType)
 		{
-			case VK_IMAGE_VIEW_TYPE_3D:
-				return true;
-			case VK_IMAGE_VIEW_TYPE_1D:
-			case VK_IMAGE_VIEW_TYPE_2D:
-			case VK_IMAGE_VIEW_TYPE_CUBE:
-			case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-			case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-				return false;
-			default:
-				UNSUPPORTED("VkImageViewType %d", (int)textureType);
+		case VK_IMAGE_VIEW_TYPE_3D:
+			return true;
+		case VK_IMAGE_VIEW_TYPE_1D:
+		case VK_IMAGE_VIEW_TYPE_2D:
+		case VK_IMAGE_VIEW_TYPE_CUBE:
+		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+			return false;
+		default:
+			UNSUPPORTED("VkImageViewType %d", (int)textureType);
 		}
 
 		return false;
@@ -178,17 +178,17 @@
 	{
 		switch(textureType)
 		{
-			case VK_IMAGE_VIEW_TYPE_CUBE:
-			case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-				return true;
-			case VK_IMAGE_VIEW_TYPE_1D:
-			case VK_IMAGE_VIEW_TYPE_2D:
-			case VK_IMAGE_VIEW_TYPE_3D:
-			case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-				return false;
-			default:
-				UNSUPPORTED("VkImageViewType %d", (int)textureType);
+		case VK_IMAGE_VIEW_TYPE_CUBE:
+		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+			return true;
+		case VK_IMAGE_VIEW_TYPE_1D:
+		case VK_IMAGE_VIEW_TYPE_2D:
+		case VK_IMAGE_VIEW_TYPE_3D:
+		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+			return false;
+		default:
+			UNSUPPORTED("VkImageViewType %d", (int)textureType);
 		}
 
 		return false;
@@ -198,17 +198,17 @@
 	{
 		switch(textureType)
 		{
-			case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-			case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-				return true;
-			case VK_IMAGE_VIEW_TYPE_1D:
-			case VK_IMAGE_VIEW_TYPE_2D:
-			case VK_IMAGE_VIEW_TYPE_3D:
-			case VK_IMAGE_VIEW_TYPE_CUBE:
-				return false;
-			default:
-				UNSUPPORTED("VkImageViewType %d", (int)textureType);
+		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+			return true;
+		case VK_IMAGE_VIEW_TYPE_1D:
+		case VK_IMAGE_VIEW_TYPE_2D:
+		case VK_IMAGE_VIEW_TYPE_3D:
+		case VK_IMAGE_VIEW_TYPE_CUBE:
+			return false;
+		default:
+			UNSUPPORTED("VkImageViewType %d", (int)textureType);
 		}
 
 		return false;
diff --git a/src/Pipeline/PixelProgram.cpp b/src/Pipeline/PixelProgram.cpp
index c42af5f..58f6e5a 100644
--- a/src/Pipeline/PixelProgram.cpp
+++ b/src/Pipeline/PixelProgram.cpp
@@ -278,80 +278,80 @@
 		auto format = state.targetFormat[index];
 		switch(format)
 		{
-			case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			case VK_FORMAT_B8G8R8A8_UNORM:
-			case VK_FORMAT_B8G8R8A8_SRGB:
-			case VK_FORMAT_R8G8B8A8_UNORM:
-			case VK_FORMAT_R8G8B8A8_SRGB:
-			case VK_FORMAT_R8G8_UNORM:
-			case VK_FORMAT_R8_UNORM:
-			case VK_FORMAT_R16G16_UNORM:
-			case VK_FORMAT_R16G16B16A16_UNORM:
-			case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-			case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-			case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-			case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-				for(unsigned int q = sampleLoopInit; q < sampleLoopEnd; q++)
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+			for(unsigned int q = sampleLoopInit; q < sampleLoopEnd; q++)
+			{
+				if(state.multiSampleMask & (1 << q))
 				{
-					if(state.multiSampleMask & (1 << q))
-					{
-						Pointer<Byte> buffer = cBuffer[index] + q * *Pointer<Int>(data + OFFSET(DrawData, colorSliceB[index]));
-						Vector4s color;
+					Pointer<Byte> buffer = cBuffer[index] + q * *Pointer<Int>(data + OFFSET(DrawData, colorSliceB[index]));
+					Vector4s color;
 
-						color.x = convertFixed16(c[index].x, false);
-						color.y = convertFixed16(c[index].y, false);
-						color.z = convertFixed16(c[index].z, false);
-						color.w = convertFixed16(c[index].w, false);
+					color.x = convertFixed16(c[index].x, false);
+					color.y = convertFixed16(c[index].y, false);
+					color.z = convertFixed16(c[index].z, false);
+					color.w = convertFixed16(c[index].w, false);
 
-						alphaBlend(index, buffer, color, x);
-						writeColor(index, buffer, x, color, sMask[q], zMask[q], cMask[q]);
-					}
+					alphaBlend(index, buffer, color, x);
+					writeColor(index, buffer, x, color, sMask[q], zMask[q], cMask[q]);
 				}
-				break;
-			case VK_FORMAT_R16_SFLOAT:
-			case VK_FORMAT_R16G16_SFLOAT:
-			case VK_FORMAT_R16G16B16A16_SFLOAT:
-			case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			case VK_FORMAT_R32_SFLOAT:
-			case VK_FORMAT_R32G32_SFLOAT:
-			case VK_FORMAT_R32G32B32A32_SFLOAT:
-			case VK_FORMAT_R32_SINT:
-			case VK_FORMAT_R32G32_SINT:
-			case VK_FORMAT_R32G32B32A32_SINT:
-			case VK_FORMAT_R32_UINT:
-			case VK_FORMAT_R32G32_UINT:
-			case VK_FORMAT_R32G32B32A32_UINT:
-			case VK_FORMAT_R16_SINT:
-			case VK_FORMAT_R16G16_SINT:
-			case VK_FORMAT_R16G16B16A16_SINT:
-			case VK_FORMAT_R16_UINT:
-			case VK_FORMAT_R16G16_UINT:
-			case VK_FORMAT_R16G16B16A16_UINT:
-			case VK_FORMAT_R8_SINT:
-			case VK_FORMAT_R8G8_SINT:
-			case VK_FORMAT_R8G8B8A8_SINT:
-			case VK_FORMAT_R8_UINT:
-			case VK_FORMAT_R8G8_UINT:
-			case VK_FORMAT_R8G8B8A8_UINT:
-			case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-			case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-			case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-			case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-				for(unsigned int q = sampleLoopInit; q < sampleLoopEnd; q++)
+			}
+			break;
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+			for(unsigned int q = sampleLoopInit; q < sampleLoopEnd; q++)
+			{
+				if(state.multiSampleMask & (1 << q))
 				{
-					if(state.multiSampleMask & (1 << q))
-					{
-						Pointer<Byte> buffer = cBuffer[index] + q * *Pointer<Int>(data + OFFSET(DrawData, colorSliceB[index]));
-						Vector4f color = c[index];
+					Pointer<Byte> buffer = cBuffer[index] + q * *Pointer<Int>(data + OFFSET(DrawData, colorSliceB[index]));
+					Vector4f color = c[index];
 
-						alphaBlend(index, buffer, color, x);
-						writeColor(index, buffer, x, color, sMask[q], zMask[q], cMask[q]);
-					}
+					alphaBlend(index, buffer, color, x);
+					writeColor(index, buffer, x, color, sMask[q], zMask[q], cMask[q]);
 				}
-				break;
-			default:
-				UNSUPPORTED("VkFormat: %d", int(format));
+			}
+			break;
+		default:
+			UNSUPPORTED("VkFormat: %d", int(format));
 		}
 	}
 }
@@ -367,63 +367,63 @@
 
 		switch(state.targetFormat[index])
 		{
-			case VK_FORMAT_UNDEFINED:
-				break;
-			case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			case VK_FORMAT_B8G8R8A8_UNORM:
-			case VK_FORMAT_B8G8R8A8_SRGB:
-			case VK_FORMAT_R8G8B8A8_UNORM:
-			case VK_FORMAT_R8G8B8A8_SRGB:
-			case VK_FORMAT_R8G8_UNORM:
-			case VK_FORMAT_R8_UNORM:
-			case VK_FORMAT_R16G16_UNORM:
-			case VK_FORMAT_R16G16B16A16_UNORM:
-			case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-			case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-			case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-			case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-				oC[index].x = Max(oC[index].x, Float4(0.0f));
-				oC[index].x = Min(oC[index].x, Float4(1.0f));
-				oC[index].y = Max(oC[index].y, Float4(0.0f));
-				oC[index].y = Min(oC[index].y, Float4(1.0f));
-				oC[index].z = Max(oC[index].z, Float4(0.0f));
-				oC[index].z = Min(oC[index].z, Float4(1.0f));
-				oC[index].w = Max(oC[index].w, Float4(0.0f));
-				oC[index].w = Min(oC[index].w, Float4(1.0f));
-				break;
-			case VK_FORMAT_R32_SFLOAT:
-			case VK_FORMAT_R32G32_SFLOAT:
-			case VK_FORMAT_R32G32B32A32_SFLOAT:
-			case VK_FORMAT_R32_SINT:
-			case VK_FORMAT_R32G32_SINT:
-			case VK_FORMAT_R32G32B32A32_SINT:
-			case VK_FORMAT_R32_UINT:
-			case VK_FORMAT_R32G32_UINT:
-			case VK_FORMAT_R32G32B32A32_UINT:
-			case VK_FORMAT_R16_SFLOAT:
-			case VK_FORMAT_R16G16_SFLOAT:
-			case VK_FORMAT_R16G16B16A16_SFLOAT:
-			case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			case VK_FORMAT_R16_SINT:
-			case VK_FORMAT_R16G16_SINT:
-			case VK_FORMAT_R16G16B16A16_SINT:
-			case VK_FORMAT_R16_UINT:
-			case VK_FORMAT_R16G16_UINT:
-			case VK_FORMAT_R16G16B16A16_UINT:
-			case VK_FORMAT_R8_SINT:
-			case VK_FORMAT_R8G8_SINT:
-			case VK_FORMAT_R8G8B8A8_SINT:
-			case VK_FORMAT_R8_UINT:
-			case VK_FORMAT_R8G8_UINT:
-			case VK_FORMAT_R8G8B8A8_UINT:
-			case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-			case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-			case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-			case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-				break;
-			default:
-				UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
+		case VK_FORMAT_UNDEFINED:
+			break;
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R16G16_UNORM:
+		case VK_FORMAT_R16G16B16A16_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+			oC[index].x = Max(oC[index].x, Float4(0.0f));
+			oC[index].x = Min(oC[index].x, Float4(1.0f));
+			oC[index].y = Max(oC[index].y, Float4(0.0f));
+			oC[index].y = Min(oC[index].y, Float4(1.0f));
+			oC[index].z = Max(oC[index].z, Float4(0.0f));
+			oC[index].z = Min(oC[index].z, Float4(1.0f));
+			oC[index].w = Max(oC[index].w, Float4(0.0f));
+			oC[index].w = Min(oC[index].w, Float4(1.0f));
+			break;
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_R32G32_UINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+		case VK_FORMAT_R16_SFLOAT:
+		case VK_FORMAT_R16G16_SFLOAT:
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		case VK_FORMAT_R16_SINT:
+		case VK_FORMAT_R16G16_SINT:
+		case VK_FORMAT_R16G16B16A16_SINT:
+		case VK_FORMAT_R16_UINT:
+		case VK_FORMAT_R16G16_UINT:
+		case VK_FORMAT_R16G16B16A16_UINT:
+		case VK_FORMAT_R8_SINT:
+		case VK_FORMAT_R8G8_SINT:
+		case VK_FORMAT_R8G8B8A8_SINT:
+		case VK_FORMAT_R8_UINT:
+		case VK_FORMAT_R8G8_UINT:
+		case VK_FORMAT_R8G8B8A8_UINT:
+		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+			break;
+		default:
+			UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
 		}
 	}
 }
diff --git a/src/Pipeline/PixelRoutine.cpp b/src/Pipeline/PixelRoutine.cpp
index f94155e..f1c55be 100644
--- a/src/Pipeline/PixelRoutine.cpp
+++ b/src/Pipeline/PixelRoutine.cpp
@@ -390,43 +390,43 @@
 
 	switch(stencilCompareMode)
 	{
-		case VK_COMPARE_OP_ALWAYS:
-			value = Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
-			break;
-		case VK_COMPARE_OP_NEVER:
-			value = Byte8(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
-			break;
-		case VK_COMPARE_OP_LESS:  // a < b ~ b > a
-			value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
-			value = CmpGT(As<SByte8>(value), *Pointer<SByte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ)));
-			break;
-		case VK_COMPARE_OP_EQUAL:
-			value = CmpEQ(value, *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedQ)));
-			break;
-		case VK_COMPARE_OP_NOT_EQUAL:  // a != b ~ !(a == b)
-			value = CmpEQ(value, *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedQ)));
-			value ^= Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
-			break;
-		case VK_COMPARE_OP_LESS_OR_EQUAL:  // a <= b ~ (b > a) || (a == b)
-			equal = value;
-			equal = CmpEQ(equal, *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedQ)));
-			value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
-			value = CmpGT(As<SByte8>(value), *Pointer<SByte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ)));
-			value |= equal;
-			break;
-		case VK_COMPARE_OP_GREATER:  // a > b
-			equal = *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ));
-			value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
-			equal = CmpGT(As<SByte8>(equal), As<SByte8>(value));
-			value = equal;
-			break;
-		case VK_COMPARE_OP_GREATER_OR_EQUAL:  // a >= b ~ !(a < b) ~ !(b > a)
-			value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
-			value = CmpGT(As<SByte8>(value), *Pointer<SByte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ)));
-			value ^= Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
-			break;
-		default:
-			UNSUPPORTED("VkCompareOp: %d", int(stencilCompareMode));
+	case VK_COMPARE_OP_ALWAYS:
+		value = Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
+		break;
+	case VK_COMPARE_OP_NEVER:
+		value = Byte8(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+		break;
+	case VK_COMPARE_OP_LESS:  // a < b ~ b > a
+		value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
+		value = CmpGT(As<SByte8>(value), *Pointer<SByte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ)));
+		break;
+	case VK_COMPARE_OP_EQUAL:
+		value = CmpEQ(value, *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedQ)));
+		break;
+	case VK_COMPARE_OP_NOT_EQUAL:  // a != b ~ !(a == b)
+		value = CmpEQ(value, *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedQ)));
+		value ^= Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
+		break;
+	case VK_COMPARE_OP_LESS_OR_EQUAL:  // a <= b ~ (b > a) || (a == b)
+		equal = value;
+		equal = CmpEQ(equal, *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedQ)));
+		value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
+		value = CmpGT(As<SByte8>(value), *Pointer<SByte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ)));
+		value |= equal;
+		break;
+	case VK_COMPARE_OP_GREATER:  // a > b
+		equal = *Pointer<Byte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ));
+		value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
+		equal = CmpGT(As<SByte8>(equal), As<SByte8>(value));
+		value = equal;
+		break;
+	case VK_COMPARE_OP_GREATER_OR_EQUAL:  // a >= b ~ !(a < b) ~ !(b > a)
+		value += Byte8(0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80);
+		value = CmpGT(As<SByte8>(value), *Pointer<SByte8>(data + OFFSET(DrawData, stencil[isBack].referenceMaskedSignedQ)));
+		value ^= Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
+		break;
+	default:
+		UNSUPPORTED("VkCompareOp: %d", int(stencilCompareMode));
 	}
 }
 
@@ -458,45 +458,45 @@
 
 	switch(state.depthCompareMode)
 	{
-		case VK_COMPARE_OP_ALWAYS:
-			// Optimized
-			break;
-		case VK_COMPARE_OP_NEVER:
-			// Optimized
-			break;
-		case VK_COMPARE_OP_EQUAL:
-			zTest = CmpEQ(zValue, Z);
-			break;
-		case VK_COMPARE_OP_NOT_EQUAL:
-			zTest = CmpNEQ(zValue, Z);
-			break;
-		case VK_COMPARE_OP_LESS:
-			zTest = CmpNLE(zValue, Z);
-			break;
-		case VK_COMPARE_OP_GREATER_OR_EQUAL:
-			zTest = CmpLE(zValue, Z);
-			break;
-		case VK_COMPARE_OP_LESS_OR_EQUAL:
-			zTest = CmpNLT(zValue, Z);
-			break;
-		case VK_COMPARE_OP_GREATER:
-			zTest = CmpLT(zValue, Z);
-			break;
-		default:
-			UNSUPPORTED("VkCompareOp: %d", int(state.depthCompareMode));
+	case VK_COMPARE_OP_ALWAYS:
+		// Optimized
+		break;
+	case VK_COMPARE_OP_NEVER:
+		// Optimized
+		break;
+	case VK_COMPARE_OP_EQUAL:
+		zTest = CmpEQ(zValue, Z);
+		break;
+	case VK_COMPARE_OP_NOT_EQUAL:
+		zTest = CmpNEQ(zValue, Z);
+		break;
+	case VK_COMPARE_OP_LESS:
+		zTest = CmpNLE(zValue, Z);
+		break;
+	case VK_COMPARE_OP_GREATER_OR_EQUAL:
+		zTest = CmpLE(zValue, Z);
+		break;
+	case VK_COMPARE_OP_LESS_OR_EQUAL:
+		zTest = CmpNLT(zValue, Z);
+		break;
+	case VK_COMPARE_OP_GREATER:
+		zTest = CmpLT(zValue, Z);
+		break;
+	default:
+		UNSUPPORTED("VkCompareOp: %d", int(state.depthCompareMode));
 	}
 
 	switch(state.depthCompareMode)
 	{
-		case VK_COMPARE_OP_ALWAYS:
-			zMask = cMask;
-			break;
-		case VK_COMPARE_OP_NEVER:
-			zMask = 0x0;
-			break;
-		default:
-			zMask = SignMask(zTest) & cMask;
-			break;
+	case VK_COMPARE_OP_ALWAYS:
+		zMask = cMask;
+		break;
+	case VK_COMPARE_OP_NEVER:
+		zMask = 0x0;
+		break;
+	default:
+		zMask = SignMask(zTest) & cMask;
+		break;
 	}
 
 	if(state.stencilActive)
@@ -540,45 +540,45 @@
 
 	switch(state.depthCompareMode)
 	{
-		case VK_COMPARE_OP_ALWAYS:
-			// Optimized
-			break;
-		case VK_COMPARE_OP_NEVER:
-			// Optimized
-			break;
-		case VK_COMPARE_OP_EQUAL:
-			zTest = Int4(CmpEQ(zValue, Z));
-			break;
-		case VK_COMPARE_OP_NOT_EQUAL:
-			zTest = ~Int4(CmpEQ(zValue, Z));
-			break;
-		case VK_COMPARE_OP_LESS:
-			zTest = Int4(CmpGT(zValue, Z));
-			break;
-		case VK_COMPARE_OP_GREATER_OR_EQUAL:
-			zTest = ~Int4(CmpGT(zValue, Z));
-			break;
-		case VK_COMPARE_OP_LESS_OR_EQUAL:
-			zTest = ~Int4(CmpGT(Z, zValue));
-			break;
-		case VK_COMPARE_OP_GREATER:
-			zTest = Int4(CmpGT(Z, zValue));
-			break;
-		default:
-			UNSUPPORTED("VkCompareOp: %d", int(state.depthCompareMode));
+	case VK_COMPARE_OP_ALWAYS:
+		// Optimized
+		break;
+	case VK_COMPARE_OP_NEVER:
+		// Optimized
+		break;
+	case VK_COMPARE_OP_EQUAL:
+		zTest = Int4(CmpEQ(zValue, Z));
+		break;
+	case VK_COMPARE_OP_NOT_EQUAL:
+		zTest = ~Int4(CmpEQ(zValue, Z));
+		break;
+	case VK_COMPARE_OP_LESS:
+		zTest = Int4(CmpGT(zValue, Z));
+		break;
+	case VK_COMPARE_OP_GREATER_OR_EQUAL:
+		zTest = ~Int4(CmpGT(zValue, Z));
+		break;
+	case VK_COMPARE_OP_LESS_OR_EQUAL:
+		zTest = ~Int4(CmpGT(Z, zValue));
+		break;
+	case VK_COMPARE_OP_GREATER:
+		zTest = Int4(CmpGT(Z, zValue));
+		break;
+	default:
+		UNSUPPORTED("VkCompareOp: %d", int(state.depthCompareMode));
 	}
 
 	switch(state.depthCompareMode)
 	{
-		case VK_COMPARE_OP_ALWAYS:
-			zMask = cMask;
-			break;
-		case VK_COMPARE_OP_NEVER:
-			zMask = 0x0;
-			break;
-		default:
-			zMask = SignMask(zTest) & cMask;
-			break;
+	case VK_COMPARE_OP_ALWAYS:
+		zMask = cMask;
+		break;
+	case VK_COMPARE_OP_NEVER:
+		zMask = 0x0;
+		break;
+	default:
+		zMask = SignMask(zTest) & cMask;
+		break;
 	}
 
 	if(state.stencilActive)
@@ -885,32 +885,32 @@
 {
 	switch(operation)
 	{
-		case VK_STENCIL_OP_KEEP:
-			output = bufferValue;
-			break;
-		case VK_STENCIL_OP_ZERO:
-			output = Byte8(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
-			break;
-		case VK_STENCIL_OP_REPLACE:
-			output = stencilReplaceRef(isBack);
-			break;
-		case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
-			output = AddSat(bufferValue, Byte8(1, 1, 1, 1, 1, 1, 1, 1));
-			break;
-		case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
-			output = SubSat(bufferValue, Byte8(1, 1, 1, 1, 1, 1, 1, 1));
-			break;
-		case VK_STENCIL_OP_INVERT:
-			output = bufferValue ^ Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
-			break;
-		case VK_STENCIL_OP_INCREMENT_AND_WRAP:
-			output = bufferValue + Byte8(1, 1, 1, 1, 1, 1, 1, 1);
-			break;
-		case VK_STENCIL_OP_DECREMENT_AND_WRAP:
-			output = bufferValue - Byte8(1, 1, 1, 1, 1, 1, 1, 1);
-			break;
-		default:
-			UNSUPPORTED("VkStencilOp: %d", int(operation));
+	case VK_STENCIL_OP_KEEP:
+		output = bufferValue;
+		break;
+	case VK_STENCIL_OP_ZERO:
+		output = Byte8(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+		break;
+	case VK_STENCIL_OP_REPLACE:
+		output = stencilReplaceRef(isBack);
+		break;
+	case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
+		output = AddSat(bufferValue, Byte8(1, 1, 1, 1, 1, 1, 1, 1));
+		break;
+	case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
+		output = SubSat(bufferValue, Byte8(1, 1, 1, 1, 1, 1, 1, 1));
+		break;
+	case VK_STENCIL_OP_INVERT:
+		output = bufferValue ^ Byte8(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
+		break;
+	case VK_STENCIL_OP_INCREMENT_AND_WRAP:
+		output = bufferValue + Byte8(1, 1, 1, 1, 1, 1, 1, 1);
+		break;
+	case VK_STENCIL_OP_DECREMENT_AND_WRAP:
+		output = bufferValue - Byte8(1, 1, 1, 1, 1, 1, 1, 1);
+		break;
+	default:
+		UNSUPPORTED("VkStencilOp: %d", int(operation));
 	}
 }
 
@@ -918,80 +918,80 @@
 {
 	switch(blendFactorActive)
 	{
-		case VK_BLEND_FACTOR_ZERO:
-			// Optimized
-			break;
-		case VK_BLEND_FACTOR_ONE:
-			// Optimized
-			break;
-		case VK_BLEND_FACTOR_SRC_COLOR:
-			blendFactor.x = current.x;
-			blendFactor.y = current.y;
-			blendFactor.z = current.z;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
-			blendFactor.x = Short4(0xFFFFu) - current.x;
-			blendFactor.y = Short4(0xFFFFu) - current.y;
-			blendFactor.z = Short4(0xFFFFu) - current.z;
-			break;
-		case VK_BLEND_FACTOR_DST_COLOR:
-			blendFactor.x = pixel.x;
-			blendFactor.y = pixel.y;
-			blendFactor.z = pixel.z;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
-			blendFactor.x = Short4(0xFFFFu) - pixel.x;
-			blendFactor.y = Short4(0xFFFFu) - pixel.y;
-			blendFactor.z = Short4(0xFFFFu) - pixel.z;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA:
-			blendFactor.x = current.w;
-			blendFactor.y = current.w;
-			blendFactor.z = current.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
-			blendFactor.x = Short4(0xFFFFu) - current.w;
-			blendFactor.y = Short4(0xFFFFu) - current.w;
-			blendFactor.z = Short4(0xFFFFu) - current.w;
-			break;
-		case VK_BLEND_FACTOR_DST_ALPHA:
-			blendFactor.x = pixel.w;
-			blendFactor.y = pixel.w;
-			blendFactor.z = pixel.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
-			blendFactor.x = Short4(0xFFFFu) - pixel.w;
-			blendFactor.y = Short4(0xFFFFu) - pixel.w;
-			blendFactor.z = Short4(0xFFFFu) - pixel.w;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
-			blendFactor.x = Short4(0xFFFFu) - pixel.w;
-			blendFactor.x = Min(As<UShort4>(blendFactor.x), As<UShort4>(current.w));
-			blendFactor.y = blendFactor.x;
-			blendFactor.z = blendFactor.x;
-			break;
-		case VK_BLEND_FACTOR_CONSTANT_COLOR:
-			blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[0]));
-			blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[1]));
-			blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[2]));
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
-			blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[0]));
-			blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[1]));
-			blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[2]));
-			break;
-		case VK_BLEND_FACTOR_CONSTANT_ALPHA:
-			blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
-			blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
-			blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
-			blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
-			blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
-			blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
-			break;
-		default:
-			UNSUPPORTED("VkBlendFactor: %d", int(blendFactorActive));
+	case VK_BLEND_FACTOR_ZERO:
+		// Optimized
+		break;
+	case VK_BLEND_FACTOR_ONE:
+		// Optimized
+		break;
+	case VK_BLEND_FACTOR_SRC_COLOR:
+		blendFactor.x = current.x;
+		blendFactor.y = current.y;
+		blendFactor.z = current.z;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
+		blendFactor.x = Short4(0xFFFFu) - current.x;
+		blendFactor.y = Short4(0xFFFFu) - current.y;
+		blendFactor.z = Short4(0xFFFFu) - current.z;
+		break;
+	case VK_BLEND_FACTOR_DST_COLOR:
+		blendFactor.x = pixel.x;
+		blendFactor.y = pixel.y;
+		blendFactor.z = pixel.z;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
+		blendFactor.x = Short4(0xFFFFu) - pixel.x;
+		blendFactor.y = Short4(0xFFFFu) - pixel.y;
+		blendFactor.z = Short4(0xFFFFu) - pixel.z;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA:
+		blendFactor.x = current.w;
+		blendFactor.y = current.w;
+		blendFactor.z = current.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
+		blendFactor.x = Short4(0xFFFFu) - current.w;
+		blendFactor.y = Short4(0xFFFFu) - current.w;
+		blendFactor.z = Short4(0xFFFFu) - current.w;
+		break;
+	case VK_BLEND_FACTOR_DST_ALPHA:
+		blendFactor.x = pixel.w;
+		blendFactor.y = pixel.w;
+		blendFactor.z = pixel.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
+		blendFactor.x = Short4(0xFFFFu) - pixel.w;
+		blendFactor.y = Short4(0xFFFFu) - pixel.w;
+		blendFactor.z = Short4(0xFFFFu) - pixel.w;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
+		blendFactor.x = Short4(0xFFFFu) - pixel.w;
+		blendFactor.x = Min(As<UShort4>(blendFactor.x), As<UShort4>(current.w));
+		blendFactor.y = blendFactor.x;
+		blendFactor.z = blendFactor.x;
+		break;
+	case VK_BLEND_FACTOR_CONSTANT_COLOR:
+		blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[0]));
+		blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[1]));
+		blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[2]));
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
+		blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[0]));
+		blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[1]));
+		blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[2]));
+		break;
+	case VK_BLEND_FACTOR_CONSTANT_ALPHA:
+		blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
+		blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
+		blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
+		blendFactor.x = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
+		blendFactor.y = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
+		blendFactor.z = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
+		break;
+	default:
+		UNSUPPORTED("VkBlendFactor: %d", int(blendFactorActive));
 	}
 }
 
@@ -999,49 +999,49 @@
 {
 	switch(blendFactorAlphaActive)
 	{
-		case VK_BLEND_FACTOR_ZERO:
-			// Optimized
-			break;
-		case VK_BLEND_FACTOR_ONE:
-			// Optimized
-			break;
-		case VK_BLEND_FACTOR_SRC_COLOR:
-			blendFactor.w = current.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
-			blendFactor.w = Short4(0xFFFFu) - current.w;
-			break;
-		case VK_BLEND_FACTOR_DST_COLOR:
-			blendFactor.w = pixel.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
-			blendFactor.w = Short4(0xFFFFu) - pixel.w;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA:
-			blendFactor.w = current.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
-			blendFactor.w = Short4(0xFFFFu) - current.w;
-			break;
-		case VK_BLEND_FACTOR_DST_ALPHA:
-			blendFactor.w = pixel.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
-			blendFactor.w = Short4(0xFFFFu) - pixel.w;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
-			blendFactor.w = Short4(0xFFFFu);
-			break;
-		case VK_BLEND_FACTOR_CONSTANT_COLOR:
-		case VK_BLEND_FACTOR_CONSTANT_ALPHA:
-			blendFactor.w = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
-			blendFactor.w = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
-			break;
-		default:
-			UNSUPPORTED("VkBlendFactor: %d", int(blendFactorAlphaActive));
+	case VK_BLEND_FACTOR_ZERO:
+		// Optimized
+		break;
+	case VK_BLEND_FACTOR_ONE:
+		// Optimized
+		break;
+	case VK_BLEND_FACTOR_SRC_COLOR:
+		blendFactor.w = current.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
+		blendFactor.w = Short4(0xFFFFu) - current.w;
+		break;
+	case VK_BLEND_FACTOR_DST_COLOR:
+		blendFactor.w = pixel.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
+		blendFactor.w = Short4(0xFFFFu) - pixel.w;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA:
+		blendFactor.w = current.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
+		blendFactor.w = Short4(0xFFFFu) - current.w;
+		break;
+	case VK_BLEND_FACTOR_DST_ALPHA:
+		blendFactor.w = pixel.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
+		blendFactor.w = Short4(0xFFFFu) - pixel.w;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
+		blendFactor.w = Short4(0xFFFFu);
+		break;
+	case VK_BLEND_FACTOR_CONSTANT_COLOR:
+	case VK_BLEND_FACTOR_CONSTANT_ALPHA:
+		blendFactor.w = *Pointer<Short4>(data + OFFSET(DrawData, factor.blendConstant4W[3]));
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
+		blendFactor.w = *Pointer<Short4>(data + OFFSET(DrawData, factor.invBlendConstant4W[3]));
+		break;
+	default:
+		UNSUPPORTED("VkBlendFactor: %d", int(blendFactorAlphaActive));
 	}
 }
 
@@ -1061,126 +1061,126 @@
 
 	switch(state.targetFormat[index])
 	{
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			buffer += 2 * x;
-			buffer2 = buffer + pitchB;
-			c01 = As<Short4>(Int2(*Pointer<Int>(buffer), *Pointer<Int>(buffer2)));
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		buffer += 2 * x;
+		buffer2 = buffer + pitchB;
+		c01 = As<Short4>(Int2(*Pointer<Int>(buffer), *Pointer<Int>(buffer2)));
 
-			pixel.x = (c01 & Short4(0x7C00u)) << 1;
-			pixel.y = (c01 & Short4(0x03E0u)) << 6;
-			pixel.z = (c01 & Short4(0x001Fu)) << 11;
-			pixel.w = (c01 & Short4(0x8000u)) >> 15;
+		pixel.x = (c01 & Short4(0x7C00u)) << 1;
+		pixel.y = (c01 & Short4(0x03E0u)) << 6;
+		pixel.z = (c01 & Short4(0x001Fu)) << 11;
+		pixel.w = (c01 & Short4(0x8000u)) >> 15;
 
-			// Expand to 16 bit range
-			pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 5);
-			pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 10);
-			pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 5);
-			pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 10);
-			pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 5);
-			pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 10);
-			break;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			buffer += 2 * x;
-			buffer2 = buffer + pitchB;
-			c01 = As<Short4>(Int2(*Pointer<Int>(buffer), *Pointer<Int>(buffer2)));
+		// Expand to 16 bit range
+		pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 5);
+		pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 10);
+		pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 5);
+		pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 10);
+		pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 5);
+		pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 10);
+		break;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		buffer += 2 * x;
+		buffer2 = buffer + pitchB;
+		c01 = As<Short4>(Int2(*Pointer<Int>(buffer), *Pointer<Int>(buffer2)));
 
-			pixel.x = c01 & Short4(0xF800u);
-			pixel.y = (c01 & Short4(0x07E0u)) << 5;
-			pixel.z = (c01 & Short4(0x001Fu)) << 11;
-			pixel.w = Short4(0xFFFFu);
+		pixel.x = c01 & Short4(0xF800u);
+		pixel.y = (c01 & Short4(0x07E0u)) << 5;
+		pixel.z = (c01 & Short4(0x001Fu)) << 11;
+		pixel.w = Short4(0xFFFFu);
 
-			// Expand to 16 bit range
-			pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 5);
-			pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 10);
-			pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 6);
-			pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 12);
-			pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 5);
-			pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 10);
-			break;
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-			buffer += 4 * x;
-			c01 = *Pointer<Short4>(buffer);
-			buffer += pitchB;
-			c23 = *Pointer<Short4>(buffer);
-			pixel.z = c01;
-			pixel.y = c01;
-			pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(c23));
-			pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(c23));
-			pixel.x = pixel.z;
-			pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.y));
-			pixel.x = UnpackHigh(As<Byte8>(pixel.x), As<Byte8>(pixel.y));
-			pixel.y = pixel.z;
-			pixel.w = pixel.x;
-			pixel.x = UnpackLow(As<Byte8>(pixel.x), As<Byte8>(pixel.x));
-			pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(pixel.y));
-			pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.z));
-			pixel.w = UnpackHigh(As<Byte8>(pixel.w), As<Byte8>(pixel.w));
-			break;
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-			buffer += 4 * x;
-			c01 = *Pointer<Short4>(buffer);
-			buffer += pitchB;
-			c23 = *Pointer<Short4>(buffer);
-			pixel.z = c01;
-			pixel.y = c01;
-			pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(c23));
-			pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(c23));
-			pixel.x = pixel.z;
-			pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.y));
-			pixel.x = UnpackHigh(As<Byte8>(pixel.x), As<Byte8>(pixel.y));
-			pixel.y = pixel.z;
-			pixel.w = pixel.x;
-			pixel.x = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.z));
-			pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(pixel.y));
-			pixel.z = UnpackLow(As<Byte8>(pixel.w), As<Byte8>(pixel.w));
-			pixel.w = UnpackHigh(As<Byte8>(pixel.w), As<Byte8>(pixel.w));
-			break;
-		case VK_FORMAT_R8_UNORM:
-			buffer += 1 * x;
-			pixel.x = Insert(pixel.x, *Pointer<Short>(buffer), 0);
-			buffer += pitchB;
-			pixel.x = Insert(pixel.x, *Pointer<Short>(buffer), 1);
-			pixel.x = UnpackLow(As<Byte8>(pixel.x), As<Byte8>(pixel.x));
-			pixel.y = Short4(0x0000);
-			pixel.z = Short4(0x0000);
-			pixel.w = Short4(0xFFFFu);
-			break;
-		case VK_FORMAT_R8G8_UNORM:
-			buffer += 2 * x;
-			c01 = As<Short4>(Insert(As<Int2>(c01), *Pointer<Int>(buffer), 0));
-			buffer += pitchB;
-			c01 = As<Short4>(Insert(As<Int2>(c01), *Pointer<Int>(buffer), 1));
-			pixel.x = (c01 & Short4(0x00FFu)) | (c01 << 8);
-			pixel.y = (c01 & Short4(0xFF00u)) | As<Short4>(As<UShort4>(c01) >> 8);
-			pixel.z = Short4(0x0000u);
-			pixel.w = Short4(0xFFFFu);
-			break;
-		case VK_FORMAT_R16G16B16A16_UNORM:
-			buffer += 8 * x;
-			pixel.x = *Pointer<Short4>(buffer + 0);
-			pixel.y = *Pointer<Short4>(buffer + 8);
-			buffer += pitchB;
-			pixel.z = *Pointer<Short4>(buffer + 0);
-			pixel.w = *Pointer<Short4>(buffer + 8);
-			transpose4x4(pixel.x, pixel.y, pixel.z, pixel.w);
-			break;
-		case VK_FORMAT_R16G16_UNORM:
-			buffer += 4 * x;
-			pixel.x = *Pointer<Short4>(buffer);
-			buffer += pitchB;
-			pixel.y = *Pointer<Short4>(buffer);
-			pixel.z = pixel.x;
-			pixel.x = As<Short4>(UnpackLow(pixel.x, pixel.y));
-			pixel.z = As<Short4>(UnpackHigh(pixel.z, pixel.y));
-			pixel.y = pixel.z;
-			pixel.x = As<Short4>(UnpackLow(pixel.x, pixel.z));
-			pixel.y = As<Short4>(UnpackHigh(pixel.y, pixel.z));
-			pixel.z = Short4(0xFFFFu);
-			pixel.w = Short4(0xFFFFu);
-			break;
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		// Expand to 16 bit range
+		pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 5);
+		pixel.x |= As<Short4>(As<UShort4>(pixel.x) >> 10);
+		pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 6);
+		pixel.y |= As<Short4>(As<UShort4>(pixel.y) >> 12);
+		pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 5);
+		pixel.z |= As<Short4>(As<UShort4>(pixel.z) >> 10);
+		break;
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+		buffer += 4 * x;
+		c01 = *Pointer<Short4>(buffer);
+		buffer += pitchB;
+		c23 = *Pointer<Short4>(buffer);
+		pixel.z = c01;
+		pixel.y = c01;
+		pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(c23));
+		pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(c23));
+		pixel.x = pixel.z;
+		pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.y));
+		pixel.x = UnpackHigh(As<Byte8>(pixel.x), As<Byte8>(pixel.y));
+		pixel.y = pixel.z;
+		pixel.w = pixel.x;
+		pixel.x = UnpackLow(As<Byte8>(pixel.x), As<Byte8>(pixel.x));
+		pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(pixel.y));
+		pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.z));
+		pixel.w = UnpackHigh(As<Byte8>(pixel.w), As<Byte8>(pixel.w));
+		break;
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+		buffer += 4 * x;
+		c01 = *Pointer<Short4>(buffer);
+		buffer += pitchB;
+		c23 = *Pointer<Short4>(buffer);
+		pixel.z = c01;
+		pixel.y = c01;
+		pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(c23));
+		pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(c23));
+		pixel.x = pixel.z;
+		pixel.z = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.y));
+		pixel.x = UnpackHigh(As<Byte8>(pixel.x), As<Byte8>(pixel.y));
+		pixel.y = pixel.z;
+		pixel.w = pixel.x;
+		pixel.x = UnpackLow(As<Byte8>(pixel.z), As<Byte8>(pixel.z));
+		pixel.y = UnpackHigh(As<Byte8>(pixel.y), As<Byte8>(pixel.y));
+		pixel.z = UnpackLow(As<Byte8>(pixel.w), As<Byte8>(pixel.w));
+		pixel.w = UnpackHigh(As<Byte8>(pixel.w), As<Byte8>(pixel.w));
+		break;
+	case VK_FORMAT_R8_UNORM:
+		buffer += 1 * x;
+		pixel.x = Insert(pixel.x, *Pointer<Short>(buffer), 0);
+		buffer += pitchB;
+		pixel.x = Insert(pixel.x, *Pointer<Short>(buffer), 1);
+		pixel.x = UnpackLow(As<Byte8>(pixel.x), As<Byte8>(pixel.x));
+		pixel.y = Short4(0x0000);
+		pixel.z = Short4(0x0000);
+		pixel.w = Short4(0xFFFFu);
+		break;
+	case VK_FORMAT_R8G8_UNORM:
+		buffer += 2 * x;
+		c01 = As<Short4>(Insert(As<Int2>(c01), *Pointer<Int>(buffer), 0));
+		buffer += pitchB;
+		c01 = As<Short4>(Insert(As<Int2>(c01), *Pointer<Int>(buffer), 1));
+		pixel.x = (c01 & Short4(0x00FFu)) | (c01 << 8);
+		pixel.y = (c01 & Short4(0xFF00u)) | As<Short4>(As<UShort4>(c01) >> 8);
+		pixel.z = Short4(0x0000u);
+		pixel.w = Short4(0xFFFFu);
+		break;
+	case VK_FORMAT_R16G16B16A16_UNORM:
+		buffer += 8 * x;
+		pixel.x = *Pointer<Short4>(buffer + 0);
+		pixel.y = *Pointer<Short4>(buffer + 8);
+		buffer += pitchB;
+		pixel.z = *Pointer<Short4>(buffer + 0);
+		pixel.w = *Pointer<Short4>(buffer + 8);
+		transpose4x4(pixel.x, pixel.y, pixel.z, pixel.w);
+		break;
+	case VK_FORMAT_R16G16_UNORM:
+		buffer += 4 * x;
+		pixel.x = *Pointer<Short4>(buffer);
+		buffer += pitchB;
+		pixel.y = *Pointer<Short4>(buffer);
+		pixel.z = pixel.x;
+		pixel.x = As<Short4>(UnpackLow(pixel.x, pixel.y));
+		pixel.z = As<Short4>(UnpackHigh(pixel.z, pixel.y));
+		pixel.y = pixel.z;
+		pixel.x = As<Short4>(UnpackLow(pixel.x, pixel.z));
+		pixel.y = As<Short4>(UnpackHigh(pixel.y, pixel.z));
+		pixel.z = Short4(0xFFFFu);
+		pixel.w = Short4(0xFFFFu);
+		break;
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		{
 			Int4 v = Int4(0);
 			buffer += 4 * x;
@@ -1193,7 +1193,7 @@
 			pixel = a2b10g10r10Unpack(v);
 		}
 		break;
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
 		{
 			Int4 v = Int4(0);
 			v = Insert(v, *Pointer<Int>(buffer + 4 * x), 0);
@@ -1205,8 +1205,8 @@
 			pixel = a2r10g10b10Unpack(v);
 		}
 		break;
-		default:
-			UNSUPPORTED("VkFormat %d", int(state.targetFormat[index]));
+	default:
+		UNSUPPORTED("VkFormat %d", int(state.targetFormat[index]));
 	}
 
 	if(isSRGB(index))
@@ -1250,46 +1250,46 @@
 
 	switch(state.blendState[index].blendOperation)
 	{
-		case VK_BLEND_OP_ADD:
-			current.x = AddSat(As<UShort4>(current.x), As<UShort4>(pixel.x));
-			current.y = AddSat(As<UShort4>(current.y), As<UShort4>(pixel.y));
-			current.z = AddSat(As<UShort4>(current.z), As<UShort4>(pixel.z));
-			break;
-		case VK_BLEND_OP_SUBTRACT:
-			current.x = SubSat(As<UShort4>(current.x), As<UShort4>(pixel.x));
-			current.y = SubSat(As<UShort4>(current.y), As<UShort4>(pixel.y));
-			current.z = SubSat(As<UShort4>(current.z), As<UShort4>(pixel.z));
-			break;
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			current.x = SubSat(As<UShort4>(pixel.x), As<UShort4>(current.x));
-			current.y = SubSat(As<UShort4>(pixel.y), As<UShort4>(current.y));
-			current.z = SubSat(As<UShort4>(pixel.z), As<UShort4>(current.z));
-			break;
-		case VK_BLEND_OP_MIN:
-			current.x = Min(As<UShort4>(current.x), As<UShort4>(pixel.x));
-			current.y = Min(As<UShort4>(current.y), As<UShort4>(pixel.y));
-			current.z = Min(As<UShort4>(current.z), As<UShort4>(pixel.z));
-			break;
-		case VK_BLEND_OP_MAX:
-			current.x = Max(As<UShort4>(current.x), As<UShort4>(pixel.x));
-			current.y = Max(As<UShort4>(current.y), As<UShort4>(pixel.y));
-			current.z = Max(As<UShort4>(current.z), As<UShort4>(pixel.z));
-			break;
-		case VK_BLEND_OP_SRC_EXT:
-			// No operation
-			break;
-		case VK_BLEND_OP_DST_EXT:
-			current.x = pixel.x;
-			current.y = pixel.y;
-			current.z = pixel.z;
-			break;
-		case VK_BLEND_OP_ZERO_EXT:
-			current.x = Short4(0x0000);
-			current.y = Short4(0x0000);
-			current.z = Short4(0x0000);
-			break;
-		default:
-			UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperation));
+	case VK_BLEND_OP_ADD:
+		current.x = AddSat(As<UShort4>(current.x), As<UShort4>(pixel.x));
+		current.y = AddSat(As<UShort4>(current.y), As<UShort4>(pixel.y));
+		current.z = AddSat(As<UShort4>(current.z), As<UShort4>(pixel.z));
+		break;
+	case VK_BLEND_OP_SUBTRACT:
+		current.x = SubSat(As<UShort4>(current.x), As<UShort4>(pixel.x));
+		current.y = SubSat(As<UShort4>(current.y), As<UShort4>(pixel.y));
+		current.z = SubSat(As<UShort4>(current.z), As<UShort4>(pixel.z));
+		break;
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		current.x = SubSat(As<UShort4>(pixel.x), As<UShort4>(current.x));
+		current.y = SubSat(As<UShort4>(pixel.y), As<UShort4>(current.y));
+		current.z = SubSat(As<UShort4>(pixel.z), As<UShort4>(current.z));
+		break;
+	case VK_BLEND_OP_MIN:
+		current.x = Min(As<UShort4>(current.x), As<UShort4>(pixel.x));
+		current.y = Min(As<UShort4>(current.y), As<UShort4>(pixel.y));
+		current.z = Min(As<UShort4>(current.z), As<UShort4>(pixel.z));
+		break;
+	case VK_BLEND_OP_MAX:
+		current.x = Max(As<UShort4>(current.x), As<UShort4>(pixel.x));
+		current.y = Max(As<UShort4>(current.y), As<UShort4>(pixel.y));
+		current.z = Max(As<UShort4>(current.z), As<UShort4>(pixel.z));
+		break;
+	case VK_BLEND_OP_SRC_EXT:
+		// No operation
+		break;
+	case VK_BLEND_OP_DST_EXT:
+		current.x = pixel.x;
+		current.y = pixel.y;
+		current.z = pixel.z;
+		break;
+	case VK_BLEND_OP_ZERO_EXT:
+		current.x = Short4(0x0000);
+		current.y = Short4(0x0000);
+		current.z = Short4(0x0000);
+		break;
+	default:
+		UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperation));
 	}
 
 	blendFactorAlpha(sourceFactor, current, pixel, state.blendState[index].sourceBlendFactorAlpha);
@@ -1307,32 +1307,32 @@
 
 	switch(state.blendState[index].blendOperationAlpha)
 	{
-		case VK_BLEND_OP_ADD:
-			current.w = AddSat(As<UShort4>(current.w), As<UShort4>(pixel.w));
-			break;
-		case VK_BLEND_OP_SUBTRACT:
-			current.w = SubSat(As<UShort4>(current.w), As<UShort4>(pixel.w));
-			break;
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			current.w = SubSat(As<UShort4>(pixel.w), As<UShort4>(current.w));
-			break;
-		case VK_BLEND_OP_MIN:
-			current.w = Min(As<UShort4>(current.w), As<UShort4>(pixel.w));
-			break;
-		case VK_BLEND_OP_MAX:
-			current.w = Max(As<UShort4>(current.w), As<UShort4>(pixel.w));
-			break;
-		case VK_BLEND_OP_SRC_EXT:
-			// No operation
-			break;
-		case VK_BLEND_OP_DST_EXT:
-			current.w = pixel.w;
-			break;
-		case VK_BLEND_OP_ZERO_EXT:
-			current.w = Short4(0x0000);
-			break;
-		default:
-			UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperationAlpha));
+	case VK_BLEND_OP_ADD:
+		current.w = AddSat(As<UShort4>(current.w), As<UShort4>(pixel.w));
+		break;
+	case VK_BLEND_OP_SUBTRACT:
+		current.w = SubSat(As<UShort4>(current.w), As<UShort4>(pixel.w));
+		break;
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		current.w = SubSat(As<UShort4>(pixel.w), As<UShort4>(current.w));
+		break;
+	case VK_BLEND_OP_MIN:
+		current.w = Min(As<UShort4>(current.w), As<UShort4>(pixel.w));
+		break;
+	case VK_BLEND_OP_MAX:
+		current.w = Max(As<UShort4>(current.w), As<UShort4>(pixel.w));
+		break;
+	case VK_BLEND_OP_SRC_EXT:
+		// No operation
+		break;
+	case VK_BLEND_OP_DST_EXT:
+		current.w = pixel.w;
+		break;
+	case VK_BLEND_OP_ZERO_EXT:
+		current.w = Short4(0x0000);
+		break;
+	default:
+		UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperationAlpha));
 	}
 }
 
@@ -1345,39 +1345,39 @@
 
 	switch(state.targetFormat[index])
 	{
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-			current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 8) + Short4(0x0080);
-			current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 8) + Short4(0x0080);
-			current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 8) + Short4(0x0080);
-			current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 8) + Short4(0x0080);
-			break;
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-			current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 10) + Short4(0x0020);
-			current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 10) + Short4(0x0020);
-			current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 10) + Short4(0x0020);
-			current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 2) + Short4(0x2000);
-			break;
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 5) + Short4(0x0400);
-			current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 5) + Short4(0x0400);
-			current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 5) + Short4(0x0400);
-			current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 1) + Short4(0x4000);
-			break;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 5) + Short4(0x0400);
-			current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 6) + Short4(0x0200);
-			current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 5) + Short4(0x0400);
-			break;
-		default:
-			break;
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 8) + Short4(0x0080);
+		current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 8) + Short4(0x0080);
+		current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 8) + Short4(0x0080);
+		current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 8) + Short4(0x0080);
+		break;
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 10) + Short4(0x0020);
+		current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 10) + Short4(0x0020);
+		current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 10) + Short4(0x0020);
+		current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 2) + Short4(0x2000);
+		break;
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 5) + Short4(0x0400);
+		current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 5) + Short4(0x0400);
+		current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 5) + Short4(0x0400);
+		current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 1) + Short4(0x4000);
+		break;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 5) + Short4(0x0400);
+		current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 6) + Short4(0x0200);
+		current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 5) + Short4(0x0400);
+		break;
+	default:
+		break;
 	}
 
 	int rgbaWriteMask = state.colorWriteActive(index) & outputMasks[index];
@@ -1385,7 +1385,7 @@
 
 	switch(state.targetFormat[index])
 	{
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
 		{
 			current.w = current.w & Short4(0x8000u);
 			current.x = As<UShort4>(current.x & Short4(0xF800)) >> 1;
@@ -1395,7 +1395,7 @@
 			current.x = current.x | current.y | current.z | current.w;
 		}
 		break;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
 		{
 			current.x = current.x & Short4(0xF800u);
 			current.y = As<UShort4>(current.y & Short4(0xFC00u)) >> 5;
@@ -1404,101 +1404,101 @@
 			current.x = current.x | current.y | current.z;
 		}
 		break;
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-			if(rgbaWriteMask == 0x7)
-			{
-				current.x = As<Short4>(As<UShort4>(current.x) >> 8);
-				current.y = As<Short4>(As<UShort4>(current.y) >> 8);
-				current.z = As<Short4>(As<UShort4>(current.z) >> 8);
-
-				current.z = As<Short4>(PackUnsigned(current.z, current.x));
-				current.y = As<Short4>(PackUnsigned(current.y, current.y));
-
-				current.x = current.z;
-				current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
-				current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
-				current.y = current.z;
-				current.z = As<Short4>(UnpackLow(current.z, current.x));
-				current.y = As<Short4>(UnpackHigh(current.y, current.x));
-			}
-			else
-			{
-				current.x = As<Short4>(As<UShort4>(current.x) >> 8);
-				current.y = As<Short4>(As<UShort4>(current.y) >> 8);
-				current.z = As<Short4>(As<UShort4>(current.z) >> 8);
-				current.w = As<Short4>(As<UShort4>(current.w) >> 8);
-
-				current.z = As<Short4>(PackUnsigned(current.z, current.x));
-				current.y = As<Short4>(PackUnsigned(current.y, current.w));
-
-				current.x = current.z;
-				current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
-				current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
-				current.y = current.z;
-				current.z = As<Short4>(UnpackLow(current.z, current.x));
-				current.y = As<Short4>(UnpackHigh(current.y, current.x));
-			}
-			break;
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-			if(rgbaWriteMask == 0x7)
-			{
-				current.x = As<Short4>(As<UShort4>(current.x) >> 8);
-				current.y = As<Short4>(As<UShort4>(current.y) >> 8);
-				current.z = As<Short4>(As<UShort4>(current.z) >> 8);
-
-				current.z = As<Short4>(PackUnsigned(current.x, current.z));
-				current.y = As<Short4>(PackUnsigned(current.y, current.y));
-
-				current.x = current.z;
-				current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
-				current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
-				current.y = current.z;
-				current.z = As<Short4>(UnpackLow(current.z, current.x));
-				current.y = As<Short4>(UnpackHigh(current.y, current.x));
-			}
-			else
-			{
-				current.x = As<Short4>(As<UShort4>(current.x) >> 8);
-				current.y = As<Short4>(As<UShort4>(current.y) >> 8);
-				current.z = As<Short4>(As<UShort4>(current.z) >> 8);
-				current.w = As<Short4>(As<UShort4>(current.w) >> 8);
-
-				current.z = As<Short4>(PackUnsigned(current.x, current.z));
-				current.y = As<Short4>(PackUnsigned(current.y, current.w));
-
-				current.x = current.z;
-				current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
-				current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
-				current.y = current.z;
-				current.z = As<Short4>(UnpackLow(current.z, current.x));
-				current.y = As<Short4>(UnpackHigh(current.y, current.x));
-			}
-			break;
-		case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+		if(rgbaWriteMask == 0x7)
+		{
 			current.x = As<Short4>(As<UShort4>(current.x) >> 8);
 			current.y = As<Short4>(As<UShort4>(current.y) >> 8);
-			current.x = As<Short4>(PackUnsigned(current.x, current.x));
+			current.z = As<Short4>(As<UShort4>(current.z) >> 8);
+
+			current.z = As<Short4>(PackUnsigned(current.z, current.x));
 			current.y = As<Short4>(PackUnsigned(current.y, current.y));
-			current.x = UnpackLow(As<Byte8>(current.x), As<Byte8>(current.y));
-			break;
-		case VK_FORMAT_R8_UNORM:
-			current.x = As<Short4>(As<UShort4>(current.x) >> 8);
-			current.x = As<Short4>(PackUnsigned(current.x, current.x));
-			break;
-		case VK_FORMAT_R16G16_UNORM:
-			current.z = current.x;
-			current.x = As<Short4>(UnpackLow(current.x, current.y));
-			current.z = As<Short4>(UnpackHigh(current.z, current.y));
+
+			current.x = current.z;
+			current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
+			current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
 			current.y = current.z;
-			break;
-		case VK_FORMAT_R16G16B16A16_UNORM:
-			transpose4x4(current.x, current.y, current.z, current.w);
-			break;
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+			current.z = As<Short4>(UnpackLow(current.z, current.x));
+			current.y = As<Short4>(UnpackHigh(current.y, current.x));
+		}
+		else
+		{
+			current.x = As<Short4>(As<UShort4>(current.x) >> 8);
+			current.y = As<Short4>(As<UShort4>(current.y) >> 8);
+			current.z = As<Short4>(As<UShort4>(current.z) >> 8);
+			current.w = As<Short4>(As<UShort4>(current.w) >> 8);
+
+			current.z = As<Short4>(PackUnsigned(current.z, current.x));
+			current.y = As<Short4>(PackUnsigned(current.y, current.w));
+
+			current.x = current.z;
+			current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
+			current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
+			current.y = current.z;
+			current.z = As<Short4>(UnpackLow(current.z, current.x));
+			current.y = As<Short4>(UnpackHigh(current.y, current.x));
+		}
+		break;
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		if(rgbaWriteMask == 0x7)
+		{
+			current.x = As<Short4>(As<UShort4>(current.x) >> 8);
+			current.y = As<Short4>(As<UShort4>(current.y) >> 8);
+			current.z = As<Short4>(As<UShort4>(current.z) >> 8);
+
+			current.z = As<Short4>(PackUnsigned(current.x, current.z));
+			current.y = As<Short4>(PackUnsigned(current.y, current.y));
+
+			current.x = current.z;
+			current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
+			current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
+			current.y = current.z;
+			current.z = As<Short4>(UnpackLow(current.z, current.x));
+			current.y = As<Short4>(UnpackHigh(current.y, current.x));
+		}
+		else
+		{
+			current.x = As<Short4>(As<UShort4>(current.x) >> 8);
+			current.y = As<Short4>(As<UShort4>(current.y) >> 8);
+			current.z = As<Short4>(As<UShort4>(current.z) >> 8);
+			current.w = As<Short4>(As<UShort4>(current.w) >> 8);
+
+			current.z = As<Short4>(PackUnsigned(current.x, current.z));
+			current.y = As<Short4>(PackUnsigned(current.y, current.w));
+
+			current.x = current.z;
+			current.z = UnpackLow(As<Byte8>(current.z), As<Byte8>(current.y));
+			current.x = UnpackHigh(As<Byte8>(current.x), As<Byte8>(current.y));
+			current.y = current.z;
+			current.z = As<Short4>(UnpackLow(current.z, current.x));
+			current.y = As<Short4>(UnpackHigh(current.y, current.x));
+		}
+		break;
+	case VK_FORMAT_R8G8_UNORM:
+		current.x = As<Short4>(As<UShort4>(current.x) >> 8);
+		current.y = As<Short4>(As<UShort4>(current.y) >> 8);
+		current.x = As<Short4>(PackUnsigned(current.x, current.x));
+		current.y = As<Short4>(PackUnsigned(current.y, current.y));
+		current.x = UnpackLow(As<Byte8>(current.x), As<Byte8>(current.y));
+		break;
+	case VK_FORMAT_R8_UNORM:
+		current.x = As<Short4>(As<UShort4>(current.x) >> 8);
+		current.x = As<Short4>(PackUnsigned(current.x, current.x));
+		break;
+	case VK_FORMAT_R16G16_UNORM:
+		current.z = current.x;
+		current.x = As<Short4>(UnpackLow(current.x, current.y));
+		current.z = As<Short4>(UnpackHigh(current.z, current.y));
+		current.y = current.z;
+		break;
+	case VK_FORMAT_R16G16B16A16_UNORM:
+		transpose4x4(current.x, current.y, current.z, current.w);
+		break;
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		{
 			auto r = (Int4(current.x) >> 6) & Int4(0x3ff);
 			auto g = (Int4(current.y) >> 6) & Int4(0x3ff);
@@ -1509,9 +1509,9 @@
 			auto c13 = As<Int2>(Int4(packed.ywww));  // TODO: auto c13 = packed.yw;
 			current.x = UnpackLow(c02, c13);
 			current.y = UnpackHigh(c02, c13);
-			break;
 		}
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		break;
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
 		{
 			auto r = (Int4(current.x) >> 6) & Int4(0x3ff);
 			auto g = (Int4(current.y) >> 6) & Int4(0x3ff);
@@ -1522,10 +1522,10 @@
 			auto c13 = As<Int2>(Int4(packed.ywww));  // TODO: auto c13 = packed.yw;
 			current.x = UnpackLow(c02, c13);
 			current.y = UnpackHigh(c02, c13);
-			break;
 		}
-		default:
-			UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
+		break;
+	default:
+		UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
 	}
 
 	Short4 c01 = current.z;
@@ -1552,7 +1552,7 @@
 
 	switch(state.targetFormat[index])
 	{
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
 		{
 			buffer += 2 * x;
 			Int value = *Pointer<Int>(buffer);
@@ -1579,7 +1579,7 @@
 			*Pointer<Int>(buffer) = (c23 & mask23) | (value & ~mask23);
 		}
 		break;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
 		{
 			buffer += 2 * x;
 			Int value = *Pointer<Int>(buffer);
@@ -1606,8 +1606,8 @@
 			*Pointer<Int>(buffer) = (c23 & mask23) | (value & ~mask23);
 		}
 		break;
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SRGB:
 		{
 			buffer += x * 4;
 			Short4 value = *Pointer<Short4>(buffer);
@@ -1631,10 +1631,10 @@
 			*Pointer<Short4>(buffer) = (c23 & mask23) | (value & ~mask23);
 		}
 		break;
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
 		{
 			buffer += x * 4;
 			Short4 value = *Pointer<Short4>(buffer);
@@ -1658,47 +1658,47 @@
 			*Pointer<Short4>(buffer) = (c23 & mask23) | (value & ~mask23);
 		}
 		break;
-		case VK_FORMAT_R8G8_UNORM:
-			if((rgbaWriteMask & 0x00000003) != 0x0)
+	case VK_FORMAT_R8G8_UNORM:
+		if((rgbaWriteMask & 0x00000003) != 0x0)
+		{
+			buffer += 2 * x;
+			Int2 value;
+			value = Insert(value, *Pointer<Int>(buffer), 0);
+			value = Insert(value, *Pointer<Int>(buffer + pitchB), 1);
+
+			Int2 packedCol = As<Int2>(current.x);
+
+			UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q) + xMask * 8);
+			if((rgbaWriteMask & 0x3) != 0x3)
 			{
-				buffer += 2 * x;
-				Int2 value;
-				value = Insert(value, *Pointer<Int>(buffer), 0);
-				value = Insert(value, *Pointer<Int>(buffer + pitchB), 1);
-
-				Int2 packedCol = As<Int2>(current.x);
-
-				UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q) + xMask * 8);
-				if((rgbaWriteMask & 0x3) != 0x3)
-				{
-					Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskB4Q[5 * (rgbaWriteMask & 0x3)][0]));
-					UInt2 rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
-					mergedMask &= rgbaMask;
-				}
-
-				packedCol = As<Int2>((As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(value) & ~mergedMask));
-
-				*Pointer<UInt>(buffer) = As<UInt>(Extract(packedCol, 0));
-				*Pointer<UInt>(buffer + pitchB) = As<UInt>(Extract(packedCol, 1));
+				Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskB4Q[5 * (rgbaWriteMask & 0x3)][0]));
+				UInt2 rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
+				mergedMask &= rgbaMask;
 			}
-			break;
-		case VK_FORMAT_R8_UNORM:
-			if(rgbaWriteMask & 0x00000001)
-			{
-				buffer += 1 * x;
-				Short4 value;
-				value = Insert(value, *Pointer<Short>(buffer), 0);
-				value = Insert(value, *Pointer<Short>(buffer + pitchB), 1);
 
-				current.x &= *Pointer<Short4>(constants + OFFSET(Constants, maskB4Q) + 8 * xMask);
-				value &= *Pointer<Short4>(constants + OFFSET(Constants, invMaskB4Q) + 8 * xMask);
-				current.x |= value;
+			packedCol = As<Int2>((As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(value) & ~mergedMask));
 
-				*Pointer<Short>(buffer) = Extract(current.x, 0);
-				*Pointer<Short>(buffer + pitchB) = Extract(current.x, 1);
-			}
-			break;
-		case VK_FORMAT_R16G16_UNORM:
+			*Pointer<UInt>(buffer) = As<UInt>(Extract(packedCol, 0));
+			*Pointer<UInt>(buffer + pitchB) = As<UInt>(Extract(packedCol, 1));
+		}
+		break;
+	case VK_FORMAT_R8_UNORM:
+		if(rgbaWriteMask & 0x00000001)
+		{
+			buffer += 1 * x;
+			Short4 value;
+			value = Insert(value, *Pointer<Short>(buffer), 0);
+			value = Insert(value, *Pointer<Short>(buffer + pitchB), 1);
+
+			current.x &= *Pointer<Short4>(constants + OFFSET(Constants, maskB4Q) + 8 * xMask);
+			value &= *Pointer<Short4>(constants + OFFSET(Constants, invMaskB4Q) + 8 * xMask);
+			current.x |= value;
+
+			*Pointer<Short>(buffer) = Extract(current.x, 0);
+			*Pointer<Short>(buffer + pitchB) = Extract(current.x, 1);
+		}
+		break;
+	case VK_FORMAT_R16G16_UNORM:
 		{
 			buffer += 4 * x;
 
@@ -1735,7 +1735,7 @@
 			*Pointer<Short4>(buffer) = current.y;
 		}
 		break;
-		case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_UNORM:
 		{
 			buffer += 8 * x;
 
@@ -1810,10 +1810,10 @@
 			}
 		}
 		break;
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-			rgbaWriteMask = bgraWriteMask;
-			// [[fallthrough]]
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		rgbaWriteMask = bgraWriteMask;
+		// [[fallthrough]]
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		{
 			buffer += 4 * x;
 
@@ -1836,8 +1836,8 @@
 			*Pointer<Int2>(buffer) = (As<Int2>(current.y) & mergedMask) | (value & ~mergedMask);
 		}
 		break;
-		default:
-			UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
+	default:
+		UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
 	}
 }
 
@@ -1845,85 +1845,85 @@
 {
 	switch(blendFactorActive)
 	{
-		case VK_BLEND_FACTOR_ZERO:
-			blendFactor.x = Float4(0);
-			blendFactor.y = Float4(0);
-			blendFactor.z = Float4(0);
-			break;
-		case VK_BLEND_FACTOR_ONE:
-			blendFactor.x = Float4(1);
-			blendFactor.y = Float4(1);
-			blendFactor.z = Float4(1);
-			break;
-		case VK_BLEND_FACTOR_SRC_COLOR:
-			blendFactor.x = oC.x;
-			blendFactor.y = oC.y;
-			blendFactor.z = oC.z;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
-			blendFactor.x = Float4(1.0f) - oC.x;
-			blendFactor.y = Float4(1.0f) - oC.y;
-			blendFactor.z = Float4(1.0f) - oC.z;
-			break;
-		case VK_BLEND_FACTOR_DST_COLOR:
-			blendFactor.x = pixel.x;
-			blendFactor.y = pixel.y;
-			blendFactor.z = pixel.z;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
-			blendFactor.x = Float4(1.0f) - pixel.x;
-			blendFactor.y = Float4(1.0f) - pixel.y;
-			blendFactor.z = Float4(1.0f) - pixel.z;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA:
-			blendFactor.x = oC.w;
-			blendFactor.y = oC.w;
-			blendFactor.z = oC.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
-			blendFactor.x = Float4(1.0f) - oC.w;
-			blendFactor.y = Float4(1.0f) - oC.w;
-			blendFactor.z = Float4(1.0f) - oC.w;
-			break;
-		case VK_BLEND_FACTOR_DST_ALPHA:
-			blendFactor.x = pixel.w;
-			blendFactor.y = pixel.w;
-			blendFactor.z = pixel.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
-			blendFactor.x = Float4(1.0f) - pixel.w;
-			blendFactor.y = Float4(1.0f) - pixel.w;
-			blendFactor.z = Float4(1.0f) - pixel.w;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
-			blendFactor.x = Float4(1.0f) - pixel.w;
-			blendFactor.x = Min(blendFactor.x, oC.w);
-			blendFactor.y = blendFactor.x;
-			blendFactor.z = blendFactor.x;
-			break;
-		case VK_BLEND_FACTOR_CONSTANT_COLOR:
-			blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[0]));
-			blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[1]));
-			blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[2]));
-			break;
-		case VK_BLEND_FACTOR_CONSTANT_ALPHA:
-			blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
-			blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
-			blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
-			blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[0]));
-			blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[1]));
-			blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[2]));
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
-			blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
-			blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
-			blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
-			break;
+	case VK_BLEND_FACTOR_ZERO:
+		blendFactor.x = Float4(0);
+		blendFactor.y = Float4(0);
+		blendFactor.z = Float4(0);
+		break;
+	case VK_BLEND_FACTOR_ONE:
+		blendFactor.x = Float4(1);
+		blendFactor.y = Float4(1);
+		blendFactor.z = Float4(1);
+		break;
+	case VK_BLEND_FACTOR_SRC_COLOR:
+		blendFactor.x = oC.x;
+		blendFactor.y = oC.y;
+		blendFactor.z = oC.z;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
+		blendFactor.x = Float4(1.0f) - oC.x;
+		blendFactor.y = Float4(1.0f) - oC.y;
+		blendFactor.z = Float4(1.0f) - oC.z;
+		break;
+	case VK_BLEND_FACTOR_DST_COLOR:
+		blendFactor.x = pixel.x;
+		blendFactor.y = pixel.y;
+		blendFactor.z = pixel.z;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
+		blendFactor.x = Float4(1.0f) - pixel.x;
+		blendFactor.y = Float4(1.0f) - pixel.y;
+		blendFactor.z = Float4(1.0f) - pixel.z;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA:
+		blendFactor.x = oC.w;
+		blendFactor.y = oC.w;
+		blendFactor.z = oC.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
+		blendFactor.x = Float4(1.0f) - oC.w;
+		blendFactor.y = Float4(1.0f) - oC.w;
+		blendFactor.z = Float4(1.0f) - oC.w;
+		break;
+	case VK_BLEND_FACTOR_DST_ALPHA:
+		blendFactor.x = pixel.w;
+		blendFactor.y = pixel.w;
+		blendFactor.z = pixel.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
+		blendFactor.x = Float4(1.0f) - pixel.w;
+		blendFactor.y = Float4(1.0f) - pixel.w;
+		blendFactor.z = Float4(1.0f) - pixel.w;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
+		blendFactor.x = Float4(1.0f) - pixel.w;
+		blendFactor.x = Min(blendFactor.x, oC.w);
+		blendFactor.y = blendFactor.x;
+		blendFactor.z = blendFactor.x;
+		break;
+	case VK_BLEND_FACTOR_CONSTANT_COLOR:
+		blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[0]));
+		blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[1]));
+		blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[2]));
+		break;
+	case VK_BLEND_FACTOR_CONSTANT_ALPHA:
+		blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
+		blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
+		blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
+		blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[0]));
+		blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[1]));
+		blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[2]));
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
+		blendFactor.x = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
+		blendFactor.y = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
+		blendFactor.z = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
+		break;
 
-		default:
-			UNSUPPORTED("VkBlendFactor: %d", int(blendFactorActive));
+	default:
+		UNSUPPORTED("VkBlendFactor: %d", int(blendFactorActive));
 	}
 }
 
@@ -1931,49 +1931,49 @@
 {
 	switch(blendFactorAlphaActive)
 	{
-		case VK_BLEND_FACTOR_ZERO:
-			blendFactor.w = Float4(0);
-			break;
-		case VK_BLEND_FACTOR_ONE:
-			blendFactor.w = Float4(1);
-			break;
-		case VK_BLEND_FACTOR_SRC_COLOR:
-			blendFactor.w = oC.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
-			blendFactor.w = Float4(1.0f) - oC.w;
-			break;
-		case VK_BLEND_FACTOR_DST_COLOR:
-			blendFactor.w = pixel.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
-			blendFactor.w = Float4(1.0f) - pixel.w;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA:
-			blendFactor.w = oC.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
-			blendFactor.w = Float4(1.0f) - oC.w;
-			break;
-		case VK_BLEND_FACTOR_DST_ALPHA:
-			blendFactor.w = pixel.w;
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
-			blendFactor.w = Float4(1.0f) - pixel.w;
-			break;
-		case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
-			blendFactor.w = Float4(1.0f);
-			break;
-		case VK_BLEND_FACTOR_CONSTANT_COLOR:
-		case VK_BLEND_FACTOR_CONSTANT_ALPHA:
-			blendFactor.w = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
-			break;
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
-		case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
-			blendFactor.w = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
-			break;
-		default:
-			UNSUPPORTED("VkBlendFactor: %d", int(blendFactorAlphaActive));
+	case VK_BLEND_FACTOR_ZERO:
+		blendFactor.w = Float4(0);
+		break;
+	case VK_BLEND_FACTOR_ONE:
+		blendFactor.w = Float4(1);
+		break;
+	case VK_BLEND_FACTOR_SRC_COLOR:
+		blendFactor.w = oC.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
+		blendFactor.w = Float4(1.0f) - oC.w;
+		break;
+	case VK_BLEND_FACTOR_DST_COLOR:
+		blendFactor.w = pixel.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
+		blendFactor.w = Float4(1.0f) - pixel.w;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA:
+		blendFactor.w = oC.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
+		blendFactor.w = Float4(1.0f) - oC.w;
+		break;
+	case VK_BLEND_FACTOR_DST_ALPHA:
+		blendFactor.w = pixel.w;
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
+		blendFactor.w = Float4(1.0f) - pixel.w;
+		break;
+	case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
+		blendFactor.w = Float4(1.0f);
+		break;
+	case VK_BLEND_FACTOR_CONSTANT_COLOR:
+	case VK_BLEND_FACTOR_CONSTANT_ALPHA:
+		blendFactor.w = *Pointer<Float4>(data + OFFSET(DrawData, factor.blendConstant4F[3]));
+		break;
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
+	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
+		blendFactor.w = *Pointer<Float4>(data + OFFSET(DrawData, factor.invBlendConstant4F[3]));
+		break;
+	default:
+		UNSUPPORTED("VkBlendFactor: %d", int(blendFactorAlphaActive));
 	}
 }
 
@@ -2012,97 +2012,97 @@
 
 	switch(state.targetFormat[index])
 	{
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SFLOAT:
-			// FIXME: movlps
-			buffer += 4 * x;
-			pixel.x.x = *Pointer<Float>(buffer + 0);
-			pixel.x.y = *Pointer<Float>(buffer + 4);
-			buffer += pitchB;
-			// FIXME: movhps
-			pixel.x.z = *Pointer<Float>(buffer + 0);
-			pixel.x.w = *Pointer<Float>(buffer + 4);
-			pixel.y = pixel.z = pixel.w = one;
-			break;
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-			buffer += 8 * x;
-			pixel.x = *Pointer<Float4>(buffer, 16);
-			buffer += pitchB;
-			pixel.y = *Pointer<Float4>(buffer, 16);
-			pixel.z = pixel.x;
-			pixel.x = ShuffleLowHigh(pixel.x, pixel.y, 0x0202);
-			pixel.z = ShuffleLowHigh(pixel.z, pixel.y, 0x1313);
-			pixel.y = pixel.z;
-			pixel.z = pixel.w = one;
-			break;
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-			buffer += 16 * x;
-			pixel.x = *Pointer<Float4>(buffer + 0, 16);
-			pixel.y = *Pointer<Float4>(buffer + 16, 16);
-			buffer += pitchB;
-			pixel.z = *Pointer<Float4>(buffer + 0, 16);
-			pixel.w = *Pointer<Float4>(buffer + 16, 16);
-			transpose4x4(pixel.x, pixel.y, pixel.z, pixel.w);
-			break;
-		case VK_FORMAT_R16_SFLOAT:
-			buffer += 2 * x;
-			pixel.x.x = Float(*Pointer<Half>(buffer + 0));
-			pixel.x.y = Float(*Pointer<Half>(buffer + 2));
-			buffer += pitchB;
-			pixel.x.z = Float(*Pointer<Half>(buffer + 0));
-			pixel.x.w = Float(*Pointer<Half>(buffer + 2));
-			pixel.y = pixel.z = pixel.w = one;
-			break;
-		case VK_FORMAT_R16G16_SFLOAT:
-			buffer += 4 * x;
-			pixel.x.x = Float(*Pointer<Half>(buffer + 0));
-			pixel.y.x = Float(*Pointer<Half>(buffer + 2));
-			pixel.x.y = Float(*Pointer<Half>(buffer + 4));
-			pixel.y.y = Float(*Pointer<Half>(buffer + 6));
-			buffer += pitchB;
-			pixel.x.z = Float(*Pointer<Half>(buffer + 0));
-			pixel.y.z = Float(*Pointer<Half>(buffer + 2));
-			pixel.x.w = Float(*Pointer<Half>(buffer + 4));
-			pixel.y.w = Float(*Pointer<Half>(buffer + 6));
-			pixel.z = pixel.w = one;
-			break;
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-			buffer += 8 * x;
-			pixel.x.x = Float(*Pointer<Half>(buffer + 0x0));
-			pixel.y.x = Float(*Pointer<Half>(buffer + 0x2));
-			pixel.z.x = Float(*Pointer<Half>(buffer + 0x4));
-			pixel.w.x = Float(*Pointer<Half>(buffer + 0x6));
-			pixel.x.y = Float(*Pointer<Half>(buffer + 0x8));
-			pixel.y.y = Float(*Pointer<Half>(buffer + 0xa));
-			pixel.z.y = Float(*Pointer<Half>(buffer + 0xc));
-			pixel.w.y = Float(*Pointer<Half>(buffer + 0xe));
-			buffer += pitchB;
-			pixel.x.z = Float(*Pointer<Half>(buffer + 0x0));
-			pixel.y.z = Float(*Pointer<Half>(buffer + 0x2));
-			pixel.z.z = Float(*Pointer<Half>(buffer + 0x4));
-			pixel.w.z = Float(*Pointer<Half>(buffer + 0x6));
-			pixel.x.w = Float(*Pointer<Half>(buffer + 0x8));
-			pixel.y.w = Float(*Pointer<Half>(buffer + 0xa));
-			pixel.z.w = Float(*Pointer<Half>(buffer + 0xc));
-			pixel.w.w = Float(*Pointer<Half>(buffer + 0xe));
-			break;
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			buffer += 4 * x;
-			pixel.x = r11g11b10Unpack(*Pointer<UInt>(buffer + 0));
-			pixel.y = r11g11b10Unpack(*Pointer<UInt>(buffer + 4));
-			buffer += pitchB;
-			pixel.z = r11g11b10Unpack(*Pointer<UInt>(buffer + 0));
-			pixel.w = r11g11b10Unpack(*Pointer<UInt>(buffer + 4));
-			transpose4x3(pixel.x, pixel.y, pixel.z, pixel.w);
-			pixel.w = one;
-			break;
-		default:
-			UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SFLOAT:
+		// FIXME: movlps
+		buffer += 4 * x;
+		pixel.x.x = *Pointer<Float>(buffer + 0);
+		pixel.x.y = *Pointer<Float>(buffer + 4);
+		buffer += pitchB;
+		// FIXME: movhps
+		pixel.x.z = *Pointer<Float>(buffer + 0);
+		pixel.x.w = *Pointer<Float>(buffer + 4);
+		pixel.y = pixel.z = pixel.w = one;
+		break;
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+		buffer += 8 * x;
+		pixel.x = *Pointer<Float4>(buffer, 16);
+		buffer += pitchB;
+		pixel.y = *Pointer<Float4>(buffer, 16);
+		pixel.z = pixel.x;
+		pixel.x = ShuffleLowHigh(pixel.x, pixel.y, 0x0202);
+		pixel.z = ShuffleLowHigh(pixel.z, pixel.y, 0x1313);
+		pixel.y = pixel.z;
+		pixel.z = pixel.w = one;
+		break;
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+		buffer += 16 * x;
+		pixel.x = *Pointer<Float4>(buffer + 0, 16);
+		pixel.y = *Pointer<Float4>(buffer + 16, 16);
+		buffer += pitchB;
+		pixel.z = *Pointer<Float4>(buffer + 0, 16);
+		pixel.w = *Pointer<Float4>(buffer + 16, 16);
+		transpose4x4(pixel.x, pixel.y, pixel.z, pixel.w);
+		break;
+	case VK_FORMAT_R16_SFLOAT:
+		buffer += 2 * x;
+		pixel.x.x = Float(*Pointer<Half>(buffer + 0));
+		pixel.x.y = Float(*Pointer<Half>(buffer + 2));
+		buffer += pitchB;
+		pixel.x.z = Float(*Pointer<Half>(buffer + 0));
+		pixel.x.w = Float(*Pointer<Half>(buffer + 2));
+		pixel.y = pixel.z = pixel.w = one;
+		break;
+	case VK_FORMAT_R16G16_SFLOAT:
+		buffer += 4 * x;
+		pixel.x.x = Float(*Pointer<Half>(buffer + 0));
+		pixel.y.x = Float(*Pointer<Half>(buffer + 2));
+		pixel.x.y = Float(*Pointer<Half>(buffer + 4));
+		pixel.y.y = Float(*Pointer<Half>(buffer + 6));
+		buffer += pitchB;
+		pixel.x.z = Float(*Pointer<Half>(buffer + 0));
+		pixel.y.z = Float(*Pointer<Half>(buffer + 2));
+		pixel.x.w = Float(*Pointer<Half>(buffer + 4));
+		pixel.y.w = Float(*Pointer<Half>(buffer + 6));
+		pixel.z = pixel.w = one;
+		break;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		buffer += 8 * x;
+		pixel.x.x = Float(*Pointer<Half>(buffer + 0x0));
+		pixel.y.x = Float(*Pointer<Half>(buffer + 0x2));
+		pixel.z.x = Float(*Pointer<Half>(buffer + 0x4));
+		pixel.w.x = Float(*Pointer<Half>(buffer + 0x6));
+		pixel.x.y = Float(*Pointer<Half>(buffer + 0x8));
+		pixel.y.y = Float(*Pointer<Half>(buffer + 0xa));
+		pixel.z.y = Float(*Pointer<Half>(buffer + 0xc));
+		pixel.w.y = Float(*Pointer<Half>(buffer + 0xe));
+		buffer += pitchB;
+		pixel.x.z = Float(*Pointer<Half>(buffer + 0x0));
+		pixel.y.z = Float(*Pointer<Half>(buffer + 0x2));
+		pixel.z.z = Float(*Pointer<Half>(buffer + 0x4));
+		pixel.w.z = Float(*Pointer<Half>(buffer + 0x6));
+		pixel.x.w = Float(*Pointer<Half>(buffer + 0x8));
+		pixel.y.w = Float(*Pointer<Half>(buffer + 0xa));
+		pixel.z.w = Float(*Pointer<Half>(buffer + 0xc));
+		pixel.w.w = Float(*Pointer<Half>(buffer + 0xe));
+		break;
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		buffer += 4 * x;
+		pixel.x = r11g11b10Unpack(*Pointer<UInt>(buffer + 0));
+		pixel.y = r11g11b10Unpack(*Pointer<UInt>(buffer + 4));
+		buffer += pitchB;
+		pixel.z = r11g11b10Unpack(*Pointer<UInt>(buffer + 0));
+		pixel.w = r11g11b10Unpack(*Pointer<UInt>(buffer + 4));
+		transpose4x3(pixel.x, pixel.y, pixel.z, pixel.w);
+		pixel.w = one;
+		break;
+	default:
+		UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
 	}
 
 	// Final Color = ObjectColor * SourceBlendFactor + PixelColor * DestinationBlendFactor
@@ -2122,46 +2122,46 @@
 
 	switch(state.blendState[index].blendOperation)
 	{
-		case VK_BLEND_OP_ADD:
-			oC.x += pixel.x;
-			oC.y += pixel.y;
-			oC.z += pixel.z;
-			break;
-		case VK_BLEND_OP_SUBTRACT:
-			oC.x -= pixel.x;
-			oC.y -= pixel.y;
-			oC.z -= pixel.z;
-			break;
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			oC.x = pixel.x - oC.x;
-			oC.y = pixel.y - oC.y;
-			oC.z = pixel.z - oC.z;
-			break;
-		case VK_BLEND_OP_MIN:
-			oC.x = Min(oC.x, pixel.x);
-			oC.y = Min(oC.y, pixel.y);
-			oC.z = Min(oC.z, pixel.z);
-			break;
-		case VK_BLEND_OP_MAX:
-			oC.x = Max(oC.x, pixel.x);
-			oC.y = Max(oC.y, pixel.y);
-			oC.z = Max(oC.z, pixel.z);
-			break;
-		case VK_BLEND_OP_SRC_EXT:
-			// No operation
-			break;
-		case VK_BLEND_OP_DST_EXT:
-			oC.x = pixel.x;
-			oC.y = pixel.y;
-			oC.z = pixel.z;
-			break;
-		case VK_BLEND_OP_ZERO_EXT:
-			oC.x = Float4(0.0f);
-			oC.y = Float4(0.0f);
-			oC.z = Float4(0.0f);
-			break;
-		default:
-			UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperation));
+	case VK_BLEND_OP_ADD:
+		oC.x += pixel.x;
+		oC.y += pixel.y;
+		oC.z += pixel.z;
+		break;
+	case VK_BLEND_OP_SUBTRACT:
+		oC.x -= pixel.x;
+		oC.y -= pixel.y;
+		oC.z -= pixel.z;
+		break;
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		oC.x = pixel.x - oC.x;
+		oC.y = pixel.y - oC.y;
+		oC.z = pixel.z - oC.z;
+		break;
+	case VK_BLEND_OP_MIN:
+		oC.x = Min(oC.x, pixel.x);
+		oC.y = Min(oC.y, pixel.y);
+		oC.z = Min(oC.z, pixel.z);
+		break;
+	case VK_BLEND_OP_MAX:
+		oC.x = Max(oC.x, pixel.x);
+		oC.y = Max(oC.y, pixel.y);
+		oC.z = Max(oC.z, pixel.z);
+		break;
+	case VK_BLEND_OP_SRC_EXT:
+		// No operation
+		break;
+	case VK_BLEND_OP_DST_EXT:
+		oC.x = pixel.x;
+		oC.y = pixel.y;
+		oC.z = pixel.z;
+		break;
+	case VK_BLEND_OP_ZERO_EXT:
+		oC.x = Float4(0.0f);
+		oC.y = Float4(0.0f);
+		oC.z = Float4(0.0f);
+		break;
+	default:
+		UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperation));
 	}
 
 	blendFactorAlpha(sourceFactor, oC, pixel, state.blendState[index].sourceBlendFactorAlpha);
@@ -2172,33 +2172,33 @@
 
 	switch(state.blendState[index].blendOperationAlpha)
 	{
-		case VK_BLEND_OP_ADD:
-			oC.w += pixel.w;
-			break;
-		case VK_BLEND_OP_SUBTRACT:
-			oC.w -= pixel.w;
-			break;
-		case VK_BLEND_OP_REVERSE_SUBTRACT:
-			pixel.w -= oC.w;
-			oC.w = pixel.w;
-			break;
-		case VK_BLEND_OP_MIN:
-			oC.w = Min(oC.w, pixel.w);
-			break;
-		case VK_BLEND_OP_MAX:
-			oC.w = Max(oC.w, pixel.w);
-			break;
-		case VK_BLEND_OP_SRC_EXT:
-			// No operation
-			break;
-		case VK_BLEND_OP_DST_EXT:
-			oC.w = pixel.w;
-			break;
-		case VK_BLEND_OP_ZERO_EXT:
-			oC.w = Float4(0.0f);
-			break;
-		default:
-			UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperationAlpha));
+	case VK_BLEND_OP_ADD:
+		oC.w += pixel.w;
+		break;
+	case VK_BLEND_OP_SUBTRACT:
+		oC.w -= pixel.w;
+		break;
+	case VK_BLEND_OP_REVERSE_SUBTRACT:
+		pixel.w -= oC.w;
+		oC.w = pixel.w;
+		break;
+	case VK_BLEND_OP_MIN:
+		oC.w = Min(oC.w, pixel.w);
+		break;
+	case VK_BLEND_OP_MAX:
+		oC.w = Max(oC.w, pixel.w);
+		break;
+	case VK_BLEND_OP_SRC_EXT:
+		// No operation
+		break;
+	case VK_BLEND_OP_DST_EXT:
+		oC.w = pixel.w;
+		break;
+	case VK_BLEND_OP_ZERO_EXT:
+		oC.w = Float4(0.0f);
+		break;
+	default:
+		UNSUPPORTED("VkBlendOp: %d", int(state.blendState[index].blendOperationAlpha));
 	}
 
 	if(format.isUnsignedComponent(0)) { oC.x = Max(oC.x, Float4(0.0f)); }
@@ -2211,45 +2211,45 @@
 {
 	switch(state.targetFormat[index])
 	{
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			break;
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_UINT:
-			oC.z = oC.x;
-			oC.x = UnpackLow(oC.x, oC.y);
-			oC.z = UnpackHigh(oC.z, oC.y);
-			oC.y = oC.z;
-			break;
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-			transpose4x4(oC.x, oC.y, oC.z, oC.w);
-			break;
-		default:
-			UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		break;
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_UINT:
+		oC.z = oC.x;
+		oC.x = UnpackLow(oC.x, oC.y);
+		oC.z = UnpackHigh(oC.z, oC.y);
+		oC.y = oC.z;
+		break;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		transpose4x4(oC.x, oC.y, oC.z, oC.w);
+		break;
+	default:
+		UNSUPPORTED("VkFormat: %d", int(state.targetFormat[index]));
 	}
 
 	int rgbaWriteMask = state.colorWriteActive(index) & outputMasks[index];
@@ -2279,561 +2279,561 @@
 
 	switch(targetFormat)
 	{
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-			if(rgbaWriteMask & 0x00000001)
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+		if(rgbaWriteMask & 0x00000001)
+		{
+			buffer += 4 * x;
+
+			// FIXME: movlps
+			value.x = *Pointer<Float>(buffer + 0);
+			value.y = *Pointer<Float>(buffer + 4);
+
+			buffer += pitchB;
+
+			// FIXME: movhps
+			value.z = *Pointer<Float>(buffer + 0);
+			value.w = *Pointer<Float>(buffer + 4);
+
+			oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X) + xMask * 16, 16));
+			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X) + xMask * 16, 16));
+			oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
+
+			// FIXME: movhps
+			*Pointer<Float>(buffer + 0) = oC.x.z;
+			*Pointer<Float>(buffer + 4) = oC.x.w;
+
+			buffer -= pitchB;
+
+			// FIXME: movlps
+			*Pointer<Float>(buffer + 0) = oC.x.x;
+			*Pointer<Float>(buffer + 4) = oC.x.y;
+		}
+		break;
+	case VK_FORMAT_R16_SFLOAT:
+		if(rgbaWriteMask & 0x00000001)
+		{
+			buffer += 2 * x;
+
+			value = Insert(value, Float(*Pointer<Half>(buffer + 0)), 0);
+			value = Insert(value, Float(*Pointer<Half>(buffer + 2)), 1);
+
+			buffer += pitchB;
+
+			value = Insert(value, Float(*Pointer<Half>(buffer + 0)), 2);
+			value = Insert(value, Float(*Pointer<Half>(buffer + 2)), 3);
+
+			oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X) + xMask * 16, 16));
+			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X) + xMask * 16, 16));
+			oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
+
+			*Pointer<Half>(buffer + 0) = Half(oC.x.z);
+			*Pointer<Half>(buffer + 2) = Half(oC.x.w);
+
+			buffer -= pitchB;
+
+			*Pointer<Half>(buffer + 0) = Half(oC.x.x);
+			*Pointer<Half>(buffer + 2) = Half(oC.x.y);
+		}
+		break;
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_UINT:
+		if(rgbaWriteMask & 0x00000001)
+		{
+			buffer += 2 * x;
+
+			UShort4 xyzw;
+			xyzw = As<UShort4>(Insert(As<Int2>(xyzw), *Pointer<Int>(buffer), 0));
+
+			buffer += pitchB;
+
+			xyzw = As<UShort4>(Insert(As<Int2>(xyzw), *Pointer<Int>(buffer), 1));
+			value = As<Float4>(Int4(xyzw));
+
+			oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X) + xMask * 16, 16));
+			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X) + xMask * 16, 16));
+			oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
+
+			if(targetFormat == VK_FORMAT_R16_SINT)
 			{
-				buffer += 4 * x;
-
-				// FIXME: movlps
-				value.x = *Pointer<Float>(buffer + 0);
-				value.y = *Pointer<Float>(buffer + 4);
-
-				buffer += pitchB;
-
-				// FIXME: movhps
-				value.z = *Pointer<Float>(buffer + 0);
-				value.w = *Pointer<Float>(buffer + 4);
-
-				oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X) + xMask * 16, 16));
-				value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X) + xMask * 16, 16));
-				oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
-
-				// FIXME: movhps
-				*Pointer<Float>(buffer + 0) = oC.x.z;
-				*Pointer<Float>(buffer + 4) = oC.x.w;
+				Float component = oC.x.z;
+				*Pointer<Short>(buffer + 0) = Short(As<Int>(component));
+				component = oC.x.w;
+				*Pointer<Short>(buffer + 2) = Short(As<Int>(component));
 
 				buffer -= pitchB;
 
-				// FIXME: movlps
-				*Pointer<Float>(buffer + 0) = oC.x.x;
-				*Pointer<Float>(buffer + 4) = oC.x.y;
+				component = oC.x.x;
+				*Pointer<Short>(buffer + 0) = Short(As<Int>(component));
+				component = oC.x.y;
+				*Pointer<Short>(buffer + 2) = Short(As<Int>(component));
 			}
-			break;
-		case VK_FORMAT_R16_SFLOAT:
-			if(rgbaWriteMask & 0x00000001)
+			else  // VK_FORMAT_R16_UINT
 			{
-				buffer += 2 * x;
-
-				value = Insert(value, Float(*Pointer<Half>(buffer + 0)), 0);
-				value = Insert(value, Float(*Pointer<Half>(buffer + 2)), 1);
-
-				buffer += pitchB;
-
-				value = Insert(value, Float(*Pointer<Half>(buffer + 0)), 2);
-				value = Insert(value, Float(*Pointer<Half>(buffer + 2)), 3);
-
-				oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X) + xMask * 16, 16));
-				value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X) + xMask * 16, 16));
-				oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
-
-				*Pointer<Half>(buffer + 0) = Half(oC.x.z);
-				*Pointer<Half>(buffer + 2) = Half(oC.x.w);
+				Float component = oC.x.z;
+				*Pointer<UShort>(buffer + 0) = UShort(As<Int>(component));
+				component = oC.x.w;
+				*Pointer<UShort>(buffer + 2) = UShort(As<Int>(component));
 
 				buffer -= pitchB;
 
-				*Pointer<Half>(buffer + 0) = Half(oC.x.x);
-				*Pointer<Half>(buffer + 2) = Half(oC.x.y);
+				component = oC.x.x;
+				*Pointer<UShort>(buffer + 0) = UShort(As<Int>(component));
+				component = oC.x.y;
+				*Pointer<UShort>(buffer + 2) = UShort(As<Int>(component));
 			}
-			break;
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_UINT:
-			if(rgbaWriteMask & 0x00000001)
+		}
+		break;
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_UINT:
+		if(rgbaWriteMask & 0x00000001)
+		{
+			buffer += x;
+
+			UInt xyzw, packedCol;
+
+			xyzw = UInt(*Pointer<UShort>(buffer)) & 0xFFFF;
+			buffer += pitchB;
+			xyzw |= UInt(*Pointer<UShort>(buffer)) << 16;
+
+			Short4 tmpCol = Short4(As<Int4>(oC.x));
+			if(targetFormat == VK_FORMAT_R8_SINT)
 			{
-				buffer += 2 * x;
-
-				UShort4 xyzw;
-				xyzw = As<UShort4>(Insert(As<Int2>(xyzw), *Pointer<Int>(buffer), 0));
-
-				buffer += pitchB;
-
-				xyzw = As<UShort4>(Insert(As<Int2>(xyzw), *Pointer<Int>(buffer), 1));
-				value = As<Float4>(Int4(xyzw));
-
-				oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X) + xMask * 16, 16));
-				value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X) + xMask * 16, 16));
-				oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
-
-				if(targetFormat == VK_FORMAT_R16_SINT)
-				{
-					Float component = oC.x.z;
-					*Pointer<Short>(buffer + 0) = Short(As<Int>(component));
-					component = oC.x.w;
-					*Pointer<Short>(buffer + 2) = Short(As<Int>(component));
-
-					buffer -= pitchB;
-
-					component = oC.x.x;
-					*Pointer<Short>(buffer + 0) = Short(As<Int>(component));
-					component = oC.x.y;
-					*Pointer<Short>(buffer + 2) = Short(As<Int>(component));
-				}
-				else  // VK_FORMAT_R16_UINT
-				{
-					Float component = oC.x.z;
-					*Pointer<UShort>(buffer + 0) = UShort(As<Int>(component));
-					component = oC.x.w;
-					*Pointer<UShort>(buffer + 2) = UShort(As<Int>(component));
-
-					buffer -= pitchB;
-
-					component = oC.x.x;
-					*Pointer<UShort>(buffer + 0) = UShort(As<Int>(component));
-					component = oC.x.y;
-					*Pointer<UShort>(buffer + 2) = UShort(As<Int>(component));
-				}
+				tmpCol = As<Short4>(PackSigned(tmpCol, tmpCol));
 			}
-			break;
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_UINT:
-			if(rgbaWriteMask & 0x00000001)
+			else
 			{
-				buffer += x;
-
-				UInt xyzw, packedCol;
-
-				xyzw = UInt(*Pointer<UShort>(buffer)) & 0xFFFF;
-				buffer += pitchB;
-				xyzw |= UInt(*Pointer<UShort>(buffer)) << 16;
-
-				Short4 tmpCol = Short4(As<Int4>(oC.x));
-				if(targetFormat == VK_FORMAT_R8_SINT)
-				{
-					tmpCol = As<Short4>(PackSigned(tmpCol, tmpCol));
-				}
-				else
-				{
-					tmpCol = As<Short4>(PackUnsigned(tmpCol, tmpCol));
-				}
-				packedCol = Extract(As<Int2>(tmpCol), 0);
-
-				packedCol = (packedCol & *Pointer<UInt>(constants + OFFSET(Constants, maskB4Q) + 8 * xMask)) |
-				            (xyzw & *Pointer<UInt>(constants + OFFSET(Constants, invMaskB4Q) + 8 * xMask));
-
-				*Pointer<UShort>(buffer) = UShort(packedCol >> 16);
-				buffer -= pitchB;
-				*Pointer<UShort>(buffer) = UShort(packedCol);
+				tmpCol = As<Short4>(PackUnsigned(tmpCol, tmpCol));
 			}
-			break;
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-			buffer += 8 * x;
+			packedCol = Extract(As<Int2>(tmpCol), 0);
 
-			value = *Pointer<Float4>(buffer);
+			packedCol = (packedCol & *Pointer<UInt>(constants + OFFSET(Constants, maskB4Q) + 8 * xMask)) |
+			            (xyzw & *Pointer<UInt>(constants + OFFSET(Constants, invMaskB4Q) + 8 * xMask));
 
-			if((rgbaWriteMask & 0x00000003) != 0x00000003)
+			*Pointer<UShort>(buffer) = UShort(packedCol >> 16);
+			buffer -= pitchB;
+			*Pointer<UShort>(buffer) = UShort(packedCol);
+		}
+		break;
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+		buffer += 8 * x;
+
+		value = *Pointer<Float4>(buffer);
+
+		if((rgbaWriteMask & 0x00000003) != 0x00000003)
+		{
+			Float4 masked = value;
+			oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[rgbaWriteMask & 0x3][0])));
+			masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[~rgbaWriteMask & 0x3][0])));
+			oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(masked));
+		}
+
+		oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskQ01X) + xMask * 16, 16));
+		value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskQ01X) + xMask * 16, 16));
+		oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
+		*Pointer<Float4>(buffer) = oC.x;
+
+		buffer += pitchB;
+
+		value = *Pointer<Float4>(buffer);
+
+		if((rgbaWriteMask & 0x00000003) != 0x00000003)
+		{
+			Float4 masked;
+
+			masked = value;
+			oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[rgbaWriteMask & 0x3][0])));
+			masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[~rgbaWriteMask & 0x3][0])));
+			oC.y = As<Float4>(As<Int4>(oC.y) | As<Int4>(masked));
+		}
+
+		oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskQ23X) + xMask * 16, 16));
+		value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskQ23X) + xMask * 16, 16));
+		oC.y = As<Float4>(As<Int4>(oC.y) | As<Int4>(value));
+		*Pointer<Float4>(buffer) = oC.y;
+		break;
+	case VK_FORMAT_R16G16_SFLOAT:
+		if((rgbaWriteMask & 0x00000003) != 0x0)
+		{
+			buffer += 4 * x;
+
+			UInt2 rgbaMask;
+			UInt2 packedCol;
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.y))) << 16) | UInt(As<UShort>(Half(oC.x.x))), 0);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.w))) << 16) | UInt(As<UShort>(Half(oC.x.z))), 1);
+
+			UShort4 value = *Pointer<UShort4>(buffer);
+			UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
+			if((rgbaWriteMask & 0x3) != 0x3)
+			{
+				Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask & 0x3][0]));
+				rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
+				mergedMask &= rgbaMask;
+			}
+			*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (As<UInt2>(value) & ~mergedMask);
+
+			buffer += pitchB;
+
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.y))) << 16) | UInt(As<UShort>(Half(oC.y.x))), 0);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.w))) << 16) | UInt(As<UShort>(Half(oC.y.z))), 1);
+			value = *Pointer<UShort4>(buffer);
+			mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
+			if((rgbaWriteMask & 0x3) != 0x3)
+			{
+				mergedMask &= rgbaMask;
+			}
+			*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (As<UInt2>(value) & ~mergedMask);
+		}
+		break;
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_UINT:
+		if((rgbaWriteMask & 0x00000003) != 0x0)
+		{
+			buffer += 4 * x;
+
+			UInt2 rgbaMask;
+			UShort4 packedCol = UShort4(As<Int4>(oC.x));
+			UShort4 value = *Pointer<UShort4>(buffer);
+			UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
+			if((rgbaWriteMask & 0x3) != 0x3)
+			{
+				Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask & 0x3][0]));
+				rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
+				mergedMask &= rgbaMask;
+			}
+			*Pointer<UInt2>(buffer) = (As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(value) & ~mergedMask);
+
+			buffer += pitchB;
+
+			packedCol = UShort4(As<Int4>(oC.y));
+			value = *Pointer<UShort4>(buffer);
+			mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
+			if((rgbaWriteMask & 0x3) != 0x3)
+			{
+				mergedMask &= rgbaMask;
+			}
+			*Pointer<UInt2>(buffer) = (As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(value) & ~mergedMask);
+		}
+		break;
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_UINT:
+		if((rgbaWriteMask & 0x00000003) != 0x0)
+		{
+			buffer += 2 * x;
+
+			Int2 xyzw, packedCol;
+
+			xyzw = Insert(xyzw, *Pointer<Int>(buffer), 0);
+			buffer += pitchB;
+			xyzw = Insert(xyzw, *Pointer<Int>(buffer), 1);
+
+			if(targetFormat == VK_FORMAT_R8G8_SINT)
+			{
+				packedCol = As<Int2>(PackSigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
+			}
+			else
+			{
+				packedCol = As<Int2>(PackUnsigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
+			}
+
+			UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q) + xMask * 8);
+			if((rgbaWriteMask & 0x3) != 0x3)
+			{
+				Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskB4Q[5 * (rgbaWriteMask & 0x3)][0]));
+				UInt2 rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
+				mergedMask &= rgbaMask;
+			}
+
+			packedCol = As<Int2>((As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(xyzw) & ~mergedMask));
+
+			*Pointer<UInt>(buffer) = As<UInt>(Extract(packedCol, 1));
+			buffer -= pitchB;
+			*Pointer<UInt>(buffer) = As<UInt>(Extract(packedCol, 0));
+		}
+		break;
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+		buffer += 16 * x;
+
+		{
+			value = *Pointer<Float4>(buffer, 16);
+
+			if(rgbaWriteMask != 0x0000000F)
 			{
 				Float4 masked = value;
-				oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[rgbaWriteMask & 0x3][0])));
-				masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[~rgbaWriteMask & 0x3][0])));
+				oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
+				masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
 				oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(masked));
 			}
 
-			oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskQ01X) + xMask * 16, 16));
-			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskQ01X) + xMask * 16, 16));
+			oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskX0X) + xMask * 16, 16));
+			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX0X) + xMask * 16, 16));
 			oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
-			*Pointer<Float4>(buffer) = oC.x;
+			*Pointer<Float4>(buffer, 16) = oC.x;
+		}
 
-			buffer += pitchB;
+		{
+			value = *Pointer<Float4>(buffer + 16, 16);
 
-			value = *Pointer<Float4>(buffer);
-
-			if((rgbaWriteMask & 0x00000003) != 0x00000003)
+			if(rgbaWriteMask != 0x0000000F)
 			{
-				Float4 masked;
-
-				masked = value;
-				oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[rgbaWriteMask & 0x3][0])));
-				masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, maskD01X[~rgbaWriteMask & 0x3][0])));
+				Float4 masked = value;
+				oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
+				masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
 				oC.y = As<Float4>(As<Int4>(oC.y) | As<Int4>(masked));
 			}
 
-			oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskQ23X) + xMask * 16, 16));
-			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskQ23X) + xMask * 16, 16));
+			oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskX1X) + xMask * 16, 16));
+			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX1X) + xMask * 16, 16));
 			oC.y = As<Float4>(As<Int4>(oC.y) | As<Int4>(value));
-			*Pointer<Float4>(buffer) = oC.y;
-			break;
-		case VK_FORMAT_R16G16_SFLOAT:
-			if((rgbaWriteMask & 0x00000003) != 0x0)
+			*Pointer<Float4>(buffer + 16, 16) = oC.y;
+		}
+
+		buffer += pitchB;
+
+		{
+			value = *Pointer<Float4>(buffer, 16);
+
+			if(rgbaWriteMask != 0x0000000F)
 			{
-				buffer += 4 * x;
-
-				UInt2 rgbaMask;
-				UInt2 packedCol;
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.y))) << 16) | UInt(As<UShort>(Half(oC.x.x))), 0);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.w))) << 16) | UInt(As<UShort>(Half(oC.x.z))), 1);
-
-				UShort4 value = *Pointer<UShort4>(buffer);
-				UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
-				if((rgbaWriteMask & 0x3) != 0x3)
-				{
-					Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask & 0x3][0]));
-					rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (As<UInt2>(value) & ~mergedMask);
-
-				buffer += pitchB;
-
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.y))) << 16) | UInt(As<UShort>(Half(oC.y.x))), 0);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.w))) << 16) | UInt(As<UShort>(Half(oC.y.z))), 1);
-				value = *Pointer<UShort4>(buffer);
-				mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
-				if((rgbaWriteMask & 0x3) != 0x3)
-				{
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (As<UInt2>(value) & ~mergedMask);
-			}
-			break;
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_UINT:
-			if((rgbaWriteMask & 0x00000003) != 0x0)
-			{
-				buffer += 4 * x;
-
-				UInt2 rgbaMask;
-				UShort4 packedCol = UShort4(As<Int4>(oC.x));
-				UShort4 value = *Pointer<UShort4>(buffer);
-				UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
-				if((rgbaWriteMask & 0x3) != 0x3)
-				{
-					Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask & 0x3][0]));
-					rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt2>(buffer) = (As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(value) & ~mergedMask);
-
-				buffer += pitchB;
-
-				packedCol = UShort4(As<Int4>(oC.y));
-				value = *Pointer<UShort4>(buffer);
-				mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
-				if((rgbaWriteMask & 0x3) != 0x3)
-				{
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt2>(buffer) = (As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(value) & ~mergedMask);
-			}
-			break;
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_UINT:
-			if((rgbaWriteMask & 0x00000003) != 0x0)
-			{
-				buffer += 2 * x;
-
-				Int2 xyzw, packedCol;
-
-				xyzw = Insert(xyzw, *Pointer<Int>(buffer), 0);
-				buffer += pitchB;
-				xyzw = Insert(xyzw, *Pointer<Int>(buffer), 1);
-
-				if(targetFormat == VK_FORMAT_R8G8_SINT)
-				{
-					packedCol = As<Int2>(PackSigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
-				}
-				else
-				{
-					packedCol = As<Int2>(PackUnsigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
-				}
-
-				UInt2 mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q) + xMask * 8);
-				if((rgbaWriteMask & 0x3) != 0x3)
-				{
-					Int tmpMask = *Pointer<Int>(constants + OFFSET(Constants, maskB4Q[5 * (rgbaWriteMask & 0x3)][0]));
-					UInt2 rgbaMask = As<UInt2>(Int2(tmpMask, tmpMask));
-					mergedMask &= rgbaMask;
-				}
-
-				packedCol = As<Int2>((As<UInt2>(packedCol) & mergedMask) | (As<UInt2>(xyzw) & ~mergedMask));
-
-				*Pointer<UInt>(buffer) = As<UInt>(Extract(packedCol, 1));
-				buffer -= pitchB;
-				*Pointer<UInt>(buffer) = As<UInt>(Extract(packedCol, 0));
-			}
-			break;
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-			buffer += 16 * x;
-
-			{
-				value = *Pointer<Float4>(buffer, 16);
-
-				if(rgbaWriteMask != 0x0000000F)
-				{
-					Float4 masked = value;
-					oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
-					masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
-					oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(masked));
-				}
-
-				oC.x = As<Float4>(As<Int4>(oC.x) & *Pointer<Int4>(constants + OFFSET(Constants, maskX0X) + xMask * 16, 16));
-				value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX0X) + xMask * 16, 16));
-				oC.x = As<Float4>(As<Int4>(oC.x) | As<Int4>(value));
-				*Pointer<Float4>(buffer, 16) = oC.x;
+				Float4 masked = value;
+				oC.z = As<Float4>(As<Int4>(oC.z) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
+				masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
+				oC.z = As<Float4>(As<Int4>(oC.z) | As<Int4>(masked));
 			}
 
+			oC.z = As<Float4>(As<Int4>(oC.z) & *Pointer<Int4>(constants + OFFSET(Constants, maskX2X) + xMask * 16, 16));
+			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX2X) + xMask * 16, 16));
+			oC.z = As<Float4>(As<Int4>(oC.z) | As<Int4>(value));
+			*Pointer<Float4>(buffer, 16) = oC.z;
+		}
+
+		{
+			value = *Pointer<Float4>(buffer + 16, 16);
+
+			if(rgbaWriteMask != 0x0000000F)
 			{
-				value = *Pointer<Float4>(buffer + 16, 16);
-
-				if(rgbaWriteMask != 0x0000000F)
-				{
-					Float4 masked = value;
-					oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
-					masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
-					oC.y = As<Float4>(As<Int4>(oC.y) | As<Int4>(masked));
-				}
-
-				oC.y = As<Float4>(As<Int4>(oC.y) & *Pointer<Int4>(constants + OFFSET(Constants, maskX1X) + xMask * 16, 16));
-				value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX1X) + xMask * 16, 16));
-				oC.y = As<Float4>(As<Int4>(oC.y) | As<Int4>(value));
-				*Pointer<Float4>(buffer + 16, 16) = oC.y;
+				Float4 masked = value;
+				oC.w = As<Float4>(As<Int4>(oC.w) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
+				masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
+				oC.w = As<Float4>(As<Int4>(oC.w) | As<Int4>(masked));
 			}
 
+			oC.w = As<Float4>(As<Int4>(oC.w) & *Pointer<Int4>(constants + OFFSET(Constants, maskX3X) + xMask * 16, 16));
+			value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX3X) + xMask * 16, 16));
+			oC.w = As<Float4>(As<Int4>(oC.w) | As<Int4>(value));
+			*Pointer<Float4>(buffer + 16, 16) = oC.w;
+		}
+		break;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		if((rgbaWriteMask & 0x0000000F) != 0x0)
+		{
+			buffer += 8 * x;
+
+			UInt4 rgbaMask;
+			UInt4 value = *Pointer<UInt4>(buffer);
+			UInt4 packedCol;
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.y))) << 16) | UInt(As<UShort>(Half(oC.x.x))), 0);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.w))) << 16) | UInt(As<UShort>(Half(oC.x.z))), 1);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.y))) << 16) | UInt(As<UShort>(Half(oC.y.x))), 2);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.w))) << 16) | UInt(As<UShort>(Half(oC.y.z))), 3);
+			UInt4 mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ01X) + xMask * 16);
+			if((rgbaWriteMask & 0xF) != 0xF)
+			{
+				UInt2 tmpMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask][0]));
+				rgbaMask = UInt4(tmpMask, tmpMask);
+				mergedMask &= rgbaMask;
+			}
+			*Pointer<UInt4>(buffer) = (packedCol & mergedMask) | (As<UInt4>(value) & ~mergedMask);
+
 			buffer += pitchB;
 
+			value = *Pointer<UInt4>(buffer);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.z.y))) << 16) | UInt(As<UShort>(Half(oC.z.x))), 0);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.z.w))) << 16) | UInt(As<UShort>(Half(oC.z.z))), 1);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.w.y))) << 16) | UInt(As<UShort>(Half(oC.w.x))), 2);
+			packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.w.w))) << 16) | UInt(As<UShort>(Half(oC.w.z))), 3);
+			mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ23X) + xMask * 16);
+			if((rgbaWriteMask & 0xF) != 0xF)
 			{
-				value = *Pointer<Float4>(buffer, 16);
-
-				if(rgbaWriteMask != 0x0000000F)
-				{
-					Float4 masked = value;
-					oC.z = As<Float4>(As<Int4>(oC.z) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
-					masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
-					oC.z = As<Float4>(As<Int4>(oC.z) | As<Int4>(masked));
-				}
-
-				oC.z = As<Float4>(As<Int4>(oC.z) & *Pointer<Int4>(constants + OFFSET(Constants, maskX2X) + xMask * 16, 16));
-				value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX2X) + xMask * 16, 16));
-				oC.z = As<Float4>(As<Int4>(oC.z) | As<Int4>(value));
-				*Pointer<Float4>(buffer, 16) = oC.z;
+				mergedMask &= rgbaMask;
 			}
+			*Pointer<UInt4>(buffer) = (packedCol & mergedMask) | (As<UInt4>(value) & ~mergedMask);
+		}
+		break;
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		if((rgbaWriteMask & 0x7) != 0x0)
+		{
+			buffer += 4 * x;
 
+			UInt4 packedCol;
+			packedCol = Insert(packedCol, r11g11b10Pack(oC.x), 0);
+			packedCol = Insert(packedCol, r11g11b10Pack(oC.y), 1);
+			packedCol = Insert(packedCol, r11g11b10Pack(oC.z), 2);
+			packedCol = Insert(packedCol, r11g11b10Pack(oC.w), 3);
+
+			UInt4 value;
+			value = Insert(value, *Pointer<UInt>(buffer + 0), 0);
+			value = Insert(value, *Pointer<UInt>(buffer + 4), 1);
+			buffer += pitchB;
+			value = Insert(value, *Pointer<UInt>(buffer + 0), 2);
+			value = Insert(value, *Pointer<UInt>(buffer + 4), 3);
+
+			UInt4 mask = *Pointer<UInt4>(constants + OFFSET(Constants, maskD4X[0][0]) + xMask * 16, 16);
+			if((rgbaWriteMask & 0x7) != 0x7)
 			{
-				value = *Pointer<Float4>(buffer + 16, 16);
-
-				if(rgbaWriteMask != 0x0000000F)
-				{
-					Float4 masked = value;
-					oC.w = As<Float4>(As<Int4>(oC.w) & *Pointer<Int4>(constants + OFFSET(Constants, maskD4X[rgbaWriteMask][0])));
-					masked = As<Float4>(As<Int4>(masked) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskD4X[rgbaWriteMask][0])));
-					oC.w = As<Float4>(As<Int4>(oC.w) | As<Int4>(masked));
-				}
-
-				oC.w = As<Float4>(As<Int4>(oC.w) & *Pointer<Int4>(constants + OFFSET(Constants, maskX3X) + xMask * 16, 16));
-				value = As<Float4>(As<Int4>(value) & *Pointer<Int4>(constants + OFFSET(Constants, invMaskX3X) + xMask * 16, 16));
-				oC.w = As<Float4>(As<Int4>(oC.w) | As<Int4>(value));
-				*Pointer<Float4>(buffer + 16, 16) = oC.w;
+				mask &= *Pointer<UInt4>(constants + OFFSET(Constants, mask11X[rgbaWriteMask & 0x7][0]), 16);
 			}
-			break;
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-			if((rgbaWriteMask & 0x0000000F) != 0x0)
+			value = (packedCol & mask) | (value & ~mask);
+
+			*Pointer<UInt>(buffer + 0) = value.z;
+			*Pointer<UInt>(buffer + 4) = value.w;
+			buffer -= pitchB;
+			*Pointer<UInt>(buffer + 0) = value.x;
+			*Pointer<UInt>(buffer + 4) = value.y;
+		}
+		break;
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+		if((rgbaWriteMask & 0x0000000F) != 0x0)
+		{
+			buffer += 8 * x;
+
+			UInt4 rgbaMask;
+			UShort8 value = *Pointer<UShort8>(buffer);
+			UShort8 packedCol = UShort8(UShort4(As<Int4>(oC.x)), UShort4(As<Int4>(oC.y)));
+			UInt4 mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ01X) + xMask * 16);
+			if((rgbaWriteMask & 0xF) != 0xF)
 			{
-				buffer += 8 * x;
-
-				UInt4 rgbaMask;
-				UInt4 value = *Pointer<UInt4>(buffer);
-				UInt4 packedCol;
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.y))) << 16) | UInt(As<UShort>(Half(oC.x.x))), 0);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.x.w))) << 16) | UInt(As<UShort>(Half(oC.x.z))), 1);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.y))) << 16) | UInt(As<UShort>(Half(oC.y.x))), 2);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.y.w))) << 16) | UInt(As<UShort>(Half(oC.y.z))), 3);
-				UInt4 mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ01X) + xMask * 16);
-				if((rgbaWriteMask & 0xF) != 0xF)
-				{
-					UInt2 tmpMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask][0]));
-					rgbaMask = UInt4(tmpMask, tmpMask);
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt4>(buffer) = (packedCol & mergedMask) | (As<UInt4>(value) & ~mergedMask);
-
-				buffer += pitchB;
-
-				value = *Pointer<UInt4>(buffer);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.z.y))) << 16) | UInt(As<UShort>(Half(oC.z.x))), 0);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.z.w))) << 16) | UInt(As<UShort>(Half(oC.z.z))), 1);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.w.y))) << 16) | UInt(As<UShort>(Half(oC.w.x))), 2);
-				packedCol = Insert(packedCol, (UInt(As<UShort>(Half(oC.w.w))) << 16) | UInt(As<UShort>(Half(oC.w.z))), 3);
-				mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ23X) + xMask * 16);
-				if((rgbaWriteMask & 0xF) != 0xF)
-				{
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt4>(buffer) = (packedCol & mergedMask) | (As<UInt4>(value) & ~mergedMask);
+				UInt2 tmpMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask][0]));
+				rgbaMask = UInt4(tmpMask, tmpMask);
+				mergedMask &= rgbaMask;
 			}
-			break;
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			if((rgbaWriteMask & 0x7) != 0x0)
+			*Pointer<UInt4>(buffer) = (As<UInt4>(packedCol) & mergedMask) | (As<UInt4>(value) & ~mergedMask);
+
+			buffer += pitchB;
+
+			value = *Pointer<UShort8>(buffer);
+			packedCol = UShort8(UShort4(As<Int4>(oC.z)), UShort4(As<Int4>(oC.w)));
+			mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ23X) + xMask * 16);
+			if((rgbaWriteMask & 0xF) != 0xF)
 			{
-				buffer += 4 * x;
-
-				UInt4 packedCol;
-				packedCol = Insert(packedCol, r11g11b10Pack(oC.x), 0);
-				packedCol = Insert(packedCol, r11g11b10Pack(oC.y), 1);
-				packedCol = Insert(packedCol, r11g11b10Pack(oC.z), 2);
-				packedCol = Insert(packedCol, r11g11b10Pack(oC.w), 3);
-
-				UInt4 value;
-				value = Insert(value, *Pointer<UInt>(buffer + 0), 0);
-				value = Insert(value, *Pointer<UInt>(buffer + 4), 1);
-				buffer += pitchB;
-				value = Insert(value, *Pointer<UInt>(buffer + 0), 2);
-				value = Insert(value, *Pointer<UInt>(buffer + 4), 3);
-
-				UInt4 mask = *Pointer<UInt4>(constants + OFFSET(Constants, maskD4X[0][0]) + xMask * 16, 16);
-				if((rgbaWriteMask & 0x7) != 0x7)
-				{
-					mask &= *Pointer<UInt4>(constants + OFFSET(Constants, mask11X[rgbaWriteMask & 0x7][0]), 16);
-				}
-				value = (packedCol & mask) | (value & ~mask);
-
-				*Pointer<UInt>(buffer + 0) = value.z;
-				*Pointer<UInt>(buffer + 4) = value.w;
-				buffer -= pitchB;
-				*Pointer<UInt>(buffer + 0) = value.x;
-				*Pointer<UInt>(buffer + 4) = value.y;
+				mergedMask &= rgbaMask;
 			}
-			break;
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-			if((rgbaWriteMask & 0x0000000F) != 0x0)
+			*Pointer<UInt4>(buffer) = (As<UInt4>(packedCol) & mergedMask) | (As<UInt4>(value) & ~mergedMask);
+		}
+		break;
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		if((rgbaWriteMask & 0x0000000F) != 0x0)
+		{
+			UInt2 value, packedCol, mergedMask;
+
+			buffer += 4 * x;
+
+			bool isSigned = targetFormat == VK_FORMAT_R8G8B8A8_SINT || targetFormat == VK_FORMAT_A8B8G8R8_SINT_PACK32;
+
+			if(isSigned)
 			{
-				buffer += 8 * x;
-
-				UInt4 rgbaMask;
-				UShort8 value = *Pointer<UShort8>(buffer);
-				UShort8 packedCol = UShort8(UShort4(As<Int4>(oC.x)), UShort4(As<Int4>(oC.y)));
-				UInt4 mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ01X) + xMask * 16);
-				if((rgbaWriteMask & 0xF) != 0xF)
-				{
-					UInt2 tmpMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskW4Q[rgbaWriteMask][0]));
-					rgbaMask = UInt4(tmpMask, tmpMask);
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt4>(buffer) = (As<UInt4>(packedCol) & mergedMask) | (As<UInt4>(value) & ~mergedMask);
-
-				buffer += pitchB;
-
-				value = *Pointer<UShort8>(buffer);
-				packedCol = UShort8(UShort4(As<Int4>(oC.z)), UShort4(As<Int4>(oC.w)));
-				mergedMask = *Pointer<UInt4>(constants + OFFSET(Constants, maskQ23X) + xMask * 16);
-				if((rgbaWriteMask & 0xF) != 0xF)
-				{
-					mergedMask &= rgbaMask;
-				}
-				*Pointer<UInt4>(buffer) = (As<UInt4>(packedCol) & mergedMask) | (As<UInt4>(value) & ~mergedMask);
+				packedCol = As<UInt2>(PackSigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
 			}
-			break;
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-			if((rgbaWriteMask & 0x0000000F) != 0x0)
+			else
 			{
-				UInt2 value, packedCol, mergedMask;
-
-				buffer += 4 * x;
-
-				bool isSigned = targetFormat == VK_FORMAT_R8G8B8A8_SINT || targetFormat == VK_FORMAT_A8B8G8R8_SINT_PACK32;
-
-				if(isSigned)
-				{
-					packedCol = As<UInt2>(PackSigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
-				}
-				else
-				{
-					packedCol = As<UInt2>(PackUnsigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
-				}
-				value = *Pointer<UInt2>(buffer, 16);
-				mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
-				if(rgbaWriteMask != 0xF)
-				{
-					mergedMask &= *Pointer<UInt2>(constants + OFFSET(Constants, maskB4Q[rgbaWriteMask][0]));
-				}
-				*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (value & ~mergedMask);
-
-				buffer += pitchB;
-
-				if(isSigned)
-				{
-					packedCol = As<UInt2>(PackSigned(Short4(As<Int4>(oC.z)), Short4(As<Int4>(oC.w))));
-				}
-				else
-				{
-					packedCol = As<UInt2>(PackUnsigned(Short4(As<Int4>(oC.z)), Short4(As<Int4>(oC.w))));
-				}
-				value = *Pointer<UInt2>(buffer, 16);
-				mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
-				if(rgbaWriteMask != 0xF)
-				{
-					mergedMask &= *Pointer<UInt2>(constants + OFFSET(Constants, maskB4Q[rgbaWriteMask][0]));
-				}
-				*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (value & ~mergedMask);
+				packedCol = As<UInt2>(PackUnsigned(Short4(As<Int4>(oC.x)), Short4(As<Int4>(oC.y))));
 			}
-			break;
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-			if((rgbaWriteMask & 0x0000000F) != 0x0)
+			value = *Pointer<UInt2>(buffer, 16);
+			mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
+			if(rgbaWriteMask != 0xF)
 			{
-				Int2 mergedMask, packedCol, value;
-				Int4 packed = ((As<Int4>(oC.w) & Int4(0x3)) << 30) |
-				              ((As<Int4>(oC.z) & Int4(0x3ff)) << 20) |
-				              ((As<Int4>(oC.y) & Int4(0x3ff)) << 10) |
-				              ((As<Int4>(oC.x) & Int4(0x3ff)));
-
-				buffer += 4 * x;
-				value = *Pointer<Int2>(buffer, 16);
-				mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
-				if(rgbaWriteMask != 0xF)
-				{
-					mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
-				}
-				*Pointer<Int2>(buffer) = (As<Int2>(packed) & mergedMask) | (value & ~mergedMask);
-
-				buffer += pitchB;
-
-				value = *Pointer<Int2>(buffer, 16);
-				mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
-				if(rgbaWriteMask != 0xF)
-				{
-					mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
-				}
-				*Pointer<Int2>(buffer) = (As<Int2>(Int4(packed.zwww)) & mergedMask) | (value & ~mergedMask);
+				mergedMask &= *Pointer<UInt2>(constants + OFFSET(Constants, maskB4Q[rgbaWriteMask][0]));
 			}
-			break;
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			if((bgraWriteMask & 0x0000000F) != 0x0)
+			*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (value & ~mergedMask);
+
+			buffer += pitchB;
+
+			if(isSigned)
 			{
-				Int2 mergedMask, packedCol, value;
-				Int4 packed = ((As<Int4>(oC.w) & Int4(0x3)) << 30) |
-				              ((As<Int4>(oC.x) & Int4(0x3ff)) << 20) |
-				              ((As<Int4>(oC.y) & Int4(0x3ff)) << 10) |
-				              ((As<Int4>(oC.z) & Int4(0x3ff)));
-
-				buffer += 4 * x;
-				value = *Pointer<Int2>(buffer, 16);
-				mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
-				if(bgraWriteMask != 0xF)
-				{
-					mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[bgraWriteMask][0]));
-				}
-				*Pointer<Int2>(buffer) = (As<Int2>(packed) & mergedMask) | (value & ~mergedMask);
-
-				buffer += *Pointer<Int>(data + OFFSET(DrawData, colorPitchB[index]));
-
-				value = *Pointer<Int2>(buffer, 16);
-				mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
-				if(bgraWriteMask != 0xF)
-				{
-					mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[bgraWriteMask][0]));
-				}
-				*Pointer<Int2>(buffer) = (As<Int2>(Int4(packed.zwww)) & mergedMask) | (value & ~mergedMask);
+				packedCol = As<UInt2>(PackSigned(Short4(As<Int4>(oC.z)), Short4(As<Int4>(oC.w))));
 			}
-			break;
-		default:
-			UNSUPPORTED("VkFormat: %d", int(targetFormat));
+			else
+			{
+				packedCol = As<UInt2>(PackUnsigned(Short4(As<Int4>(oC.z)), Short4(As<Int4>(oC.w))));
+			}
+			value = *Pointer<UInt2>(buffer, 16);
+			mergedMask = *Pointer<UInt2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
+			if(rgbaWriteMask != 0xF)
+			{
+				mergedMask &= *Pointer<UInt2>(constants + OFFSET(Constants, maskB4Q[rgbaWriteMask][0]));
+			}
+			*Pointer<UInt2>(buffer) = (packedCol & mergedMask) | (value & ~mergedMask);
+		}
+		break;
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		if((rgbaWriteMask & 0x0000000F) != 0x0)
+		{
+			Int2 mergedMask, packedCol, value;
+			Int4 packed = ((As<Int4>(oC.w) & Int4(0x3)) << 30) |
+			              ((As<Int4>(oC.z) & Int4(0x3ff)) << 20) |
+			              ((As<Int4>(oC.y) & Int4(0x3ff)) << 10) |
+			              ((As<Int4>(oC.x) & Int4(0x3ff)));
+
+			buffer += 4 * x;
+			value = *Pointer<Int2>(buffer, 16);
+			mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
+			if(rgbaWriteMask != 0xF)
+			{
+				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
+			}
+			*Pointer<Int2>(buffer) = (As<Int2>(packed) & mergedMask) | (value & ~mergedMask);
+
+			buffer += pitchB;
+
+			value = *Pointer<Int2>(buffer, 16);
+			mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
+			if(rgbaWriteMask != 0xF)
+			{
+				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[rgbaWriteMask][0]));
+			}
+			*Pointer<Int2>(buffer) = (As<Int2>(Int4(packed.zwww)) & mergedMask) | (value & ~mergedMask);
+		}
+		break;
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		if((bgraWriteMask & 0x0000000F) != 0x0)
+		{
+			Int2 mergedMask, packedCol, value;
+			Int4 packed = ((As<Int4>(oC.w) & Int4(0x3)) << 30) |
+			              ((As<Int4>(oC.x) & Int4(0x3ff)) << 20) |
+			              ((As<Int4>(oC.y) & Int4(0x3ff)) << 10) |
+			              ((As<Int4>(oC.z) & Int4(0x3ff)));
+
+			buffer += 4 * x;
+			value = *Pointer<Int2>(buffer, 16);
+			mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
+			if(bgraWriteMask != 0xF)
+			{
+				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[bgraWriteMask][0]));
+			}
+			*Pointer<Int2>(buffer) = (As<Int2>(packed) & mergedMask) | (value & ~mergedMask);
+
+			buffer += *Pointer<Int>(data + OFFSET(DrawData, colorPitchB[index]));
+
+			value = *Pointer<Int2>(buffer, 16);
+			mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
+			if(bgraWriteMask != 0xF)
+			{
+				mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[bgraWriteMask][0]));
+			}
+			*Pointer<Int2>(buffer) = (As<Int2>(Int4(packed.zwww)) & mergedMask) | (value & ~mergedMask);
+		}
+		break;
+	default:
+		UNSUPPORTED("VkFormat: %d", int(targetFormat));
 	}
 }
 
diff --git a/src/Pipeline/SamplerCore.cpp b/src/Pipeline/SamplerCore.cpp
index 2a80211..646d875 100644
--- a/src/Pipeline/SamplerCore.cpp
+++ b/src/Pipeline/SamplerCore.cpp
@@ -36,10 +36,10 @@
 	Float4 a;  // Array layer coordinate
 	switch(state.textureType)
 	{
-		case VK_IMAGE_VIEW_TYPE_1D_ARRAY: a = uvwa[1]; break;
-		case VK_IMAGE_VIEW_TYPE_2D_ARRAY: a = uvwa[2]; break;
-		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: a = uvwa[3]; break;
-		default: break;
+	case VK_IMAGE_VIEW_TYPE_1D_ARRAY: a = uvwa[1]; break;
+	case VK_IMAGE_VIEW_TYPE_2D_ARRAY: a = uvwa[2]; break;
+	case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: a = uvwa[3]; break;
+	default: break;
 	}
 
 	Float lod;
@@ -138,93 +138,31 @@
 		{
 			switch(state.textureFormat)
 			{
-				case VK_FORMAT_R5G6B5_UNORM_PACK16:
-					c.x *= Float4(1.0f / 0xF800);
-					c.y *= Float4(1.0f / 0xFC00);
-					c.z *= Float4(1.0f / 0xF800);
-					break;
-				case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-					c.x *= Float4(1.0f / 0xF000);
-					c.y *= Float4(1.0f / 0xF000);
-					c.z *= Float4(1.0f / 0xF000);
-					c.w *= Float4(1.0f / 0xF000);
-					break;
-				case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-					c.x *= Float4(1.0f / 0xF800);
-					c.y *= Float4(1.0f / 0xF800);
-					c.z *= Float4(1.0f / 0xF800);
-					c.w *= Float4(1.0f / 0x8000);
-					break;
-				case VK_FORMAT_R8_SNORM:
-				case VK_FORMAT_R8G8_SNORM:
-				case VK_FORMAT_R8G8B8A8_SNORM:
-				case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-					c.x = Max(c.x * Float4(1.0f / 0x7F00), Float4(-1.0f));
-					c.y = Max(c.y * Float4(1.0f / 0x7F00), Float4(-1.0f));
-					c.z = Max(c.z * Float4(1.0f / 0x7F00), Float4(-1.0f));
-					c.w = Max(c.w * Float4(1.0f / 0x7F00), Float4(-1.0f));
-					break;
-				case VK_FORMAT_R8_UNORM:
-				case VK_FORMAT_R8G8_UNORM:
-				case VK_FORMAT_R8G8B8A8_UNORM:
-				case VK_FORMAT_B8G8R8A8_UNORM:
-				case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-				case VK_FORMAT_B8G8R8A8_SRGB:
-				case VK_FORMAT_R8G8B8A8_SRGB:
-				case VK_FORMAT_R8_SRGB:
-				case VK_FORMAT_R8G8_SRGB:
-					c.x *= Float4(1.0f / 0xFF00u);
-					c.y *= Float4(1.0f / 0xFF00u);
-					c.z *= Float4(1.0f / 0xFF00u);
-					c.w *= Float4(1.0f / 0xFF00u);
-					break;
-				case VK_FORMAT_R16_SNORM:
-				case VK_FORMAT_R16G16_SNORM:
-				case VK_FORMAT_R16G16B16A16_SNORM:
-					c.x = Max(c.x * Float4(1.0f / 0x7FFF), Float4(-1.0f));
-					c.y = Max(c.y * Float4(1.0f / 0x7FFF), Float4(-1.0f));
-					c.z = Max(c.z * Float4(1.0f / 0x7FFF), Float4(-1.0f));
-					c.w = Max(c.w * Float4(1.0f / 0x7FFF), Float4(-1.0f));
-					break;
-				default:
-					for(int component = 0; component < textureComponentCount(); component++)
-					{
-						c[component] *= Float4(hasUnsignedTextureComponent(component) ? 1.0f / 0xFFFF : 1.0f / 0x7FFF);
-					}
-			}
-		}
-	}
-	else  // 16-bit filtering.
-	{
-		Vector4s cs = sampleFilter(texture, u, v, w, a, offset, sample, lod, anisotropy, uDelta, vDelta, function);
-
-		switch(state.textureFormat)
-		{
 			case VK_FORMAT_R5G6B5_UNORM_PACK16:
-				c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF800);
-				c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xFC00);
-				c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF800);
+				c.x *= Float4(1.0f / 0xF800);
+				c.y *= Float4(1.0f / 0xFC00);
+				c.z *= Float4(1.0f / 0xF800);
 				break;
 			case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-				c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF000);
-				c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xF000);
-				c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF000);
-				c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0xF000);
+				c.x *= Float4(1.0f / 0xF000);
+				c.y *= Float4(1.0f / 0xF000);
+				c.z *= Float4(1.0f / 0xF000);
+				c.w *= Float4(1.0f / 0xF000);
 				break;
 			case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-				c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF800);
-				c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xF800);
-				c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF800);
-				c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0x8000);
+				c.x *= Float4(1.0f / 0xF800);
+				c.y *= Float4(1.0f / 0xF800);
+				c.z *= Float4(1.0f / 0xF800);
+				c.w *= Float4(1.0f / 0x8000);
 				break;
 			case VK_FORMAT_R8_SNORM:
 			case VK_FORMAT_R8G8_SNORM:
 			case VK_FORMAT_R8G8B8A8_SNORM:
 			case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-				c.x = Max(Float4(cs.x) * Float4(1.0f / 0x7F00), Float4(-1.0f));
-				c.y = Max(Float4(cs.y) * Float4(1.0f / 0x7F00), Float4(-1.0f));
-				c.z = Max(Float4(cs.z) * Float4(1.0f / 0x7F00), Float4(-1.0f));
-				c.w = Max(Float4(cs.w) * Float4(1.0f / 0x7F00), Float4(-1.0f));
+				c.x = Max(c.x * Float4(1.0f / 0x7F00), Float4(-1.0f));
+				c.y = Max(c.y * Float4(1.0f / 0x7F00), Float4(-1.0f));
+				c.z = Max(c.z * Float4(1.0f / 0x7F00), Float4(-1.0f));
+				c.w = Max(c.w * Float4(1.0f / 0x7F00), Float4(-1.0f));
 				break;
 			case VK_FORMAT_R8_UNORM:
 			case VK_FORMAT_R8G8_UNORM:
@@ -235,31 +173,93 @@
 			case VK_FORMAT_R8G8B8A8_SRGB:
 			case VK_FORMAT_R8_SRGB:
 			case VK_FORMAT_R8G8_SRGB:
-				c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xFF00u);
-				c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xFF00u);
-				c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xFF00u);
-				c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0xFF00u);
+				c.x *= Float4(1.0f / 0xFF00u);
+				c.y *= Float4(1.0f / 0xFF00u);
+				c.z *= Float4(1.0f / 0xFF00u);
+				c.w *= Float4(1.0f / 0xFF00u);
 				break;
 			case VK_FORMAT_R16_SNORM:
 			case VK_FORMAT_R16G16_SNORM:
 			case VK_FORMAT_R16G16B16A16_SNORM:
-				c.x = Max(Float4(cs.x) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
-				c.y = Max(Float4(cs.y) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
-				c.z = Max(Float4(cs.z) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
-				c.w = Max(Float4(cs.w) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+				c.x = Max(c.x * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+				c.y = Max(c.y * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+				c.z = Max(c.z * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+				c.w = Max(c.w * Float4(1.0f / 0x7FFF), Float4(-1.0f));
 				break;
 			default:
 				for(int component = 0; component < textureComponentCount(); component++)
 				{
-					if(hasUnsignedTextureComponent(component))
-					{
-						convertUnsigned16(c[component], cs[component]);
-					}
-					else
-					{
-						convertSigned15(c[component], cs[component]);
-					}
+					c[component] *= Float4(hasUnsignedTextureComponent(component) ? 1.0f / 0xFFFF : 1.0f / 0x7FFF);
 				}
+			}
+		}
+	}
+	else  // 16-bit filtering.
+	{
+		Vector4s cs = sampleFilter(texture, u, v, w, a, offset, sample, lod, anisotropy, uDelta, vDelta, function);
+
+		switch(state.textureFormat)
+		{
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+			c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF800);
+			c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xFC00);
+			c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF800);
+			break;
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+			c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF000);
+			c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xF000);
+			c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF000);
+			c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0xF000);
+			break;
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+			c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF800);
+			c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xF800);
+			c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF800);
+			c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0x8000);
+			break;
+		case VK_FORMAT_R8_SNORM:
+		case VK_FORMAT_R8G8_SNORM:
+		case VK_FORMAT_R8G8B8A8_SNORM:
+		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+			c.x = Max(Float4(cs.x) * Float4(1.0f / 0x7F00), Float4(-1.0f));
+			c.y = Max(Float4(cs.y) * Float4(1.0f / 0x7F00), Float4(-1.0f));
+			c.z = Max(Float4(cs.z) * Float4(1.0f / 0x7F00), Float4(-1.0f));
+			c.w = Max(Float4(cs.w) * Float4(1.0f / 0x7F00), Float4(-1.0f));
+			break;
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_R8G8B8A8_UNORM:
+		case VK_FORMAT_B8G8R8A8_UNORM:
+		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		case VK_FORMAT_B8G8R8A8_SRGB:
+		case VK_FORMAT_R8G8B8A8_SRGB:
+		case VK_FORMAT_R8_SRGB:
+		case VK_FORMAT_R8G8_SRGB:
+			c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xFF00u);
+			c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xFF00u);
+			c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xFF00u);
+			c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0xFF00u);
+			break;
+		case VK_FORMAT_R16_SNORM:
+		case VK_FORMAT_R16G16_SNORM:
+		case VK_FORMAT_R16G16B16A16_SNORM:
+			c.x = Max(Float4(cs.x) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+			c.y = Max(Float4(cs.y) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+			c.z = Max(Float4(cs.z) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+			c.w = Max(Float4(cs.w) * Float4(1.0f / 0x7FFF), Float4(-1.0f));
+			break;
+		default:
+			for(int component = 0; component < textureComponentCount(); component++)
+			{
+				if(hasUnsignedTextureComponent(component))
+				{
+					convertUnsigned16(c[component], cs[component]);
+				}
+				else
+				{
+					convertSigned15(c[component], cs[component]);
+				}
+			}
 		}
 	}
 
@@ -303,22 +303,22 @@
 {
 	switch(swizzle)
 	{
-		default: UNSUPPORTED("VkComponentSwizzle %d", (int)swizzle);
-		case VK_COMPONENT_SWIZZLE_R: return c.x;
-		case VK_COMPONENT_SWIZZLE_G: return c.y;
-		case VK_COMPONENT_SWIZZLE_B: return c.z;
-		case VK_COMPONENT_SWIZZLE_A: return c.w;
-		case VK_COMPONENT_SWIZZLE_ZERO: return Float4(0.0f, 0.0f, 0.0f, 0.0f);
-		case VK_COMPONENT_SWIZZLE_ONE:
-			if(integer)
-			{
-				return Float4(As<Float4>(sw::Int4(1, 1, 1, 1)));
-			}
-			else
-			{
-				return Float4(1.0f, 1.0f, 1.0f, 1.0f);
-			}
-			break;
+	default: UNSUPPORTED("VkComponentSwizzle %d", (int)swizzle);
+	case VK_COMPONENT_SWIZZLE_R: return c.x;
+	case VK_COMPONENT_SWIZZLE_G: return c.y;
+	case VK_COMPONENT_SWIZZLE_B: return c.z;
+	case VK_COMPONENT_SWIZZLE_A: return c.w;
+	case VK_COMPONENT_SWIZZLE_ZERO: return Float4(0.0f, 0.0f, 0.0f, 0.0f);
+	case VK_COMPONENT_SWIZZLE_ONE:
+		if(integer)
+		{
+			return Float4(As<Float4>(sw::Int4(1, 1, 1, 1)));
+		}
+		else
+		{
+			return Float4(1.0f, 1.0f, 1.0f, 1.0f);
+		}
+		break;
 	}
 };
 
@@ -339,20 +339,20 @@
 	{
 		switch(count)
 		{
-			case -1: return uvw - offset;
-			case 0: return uvw;
-			case +1: return uvw + offset;
-			case 2: return uvw + offset + offset;
+		case -1: return uvw - offset;
+		case 0: return uvw;
+		case +1: return uvw + offset;
+		case 2: return uvw + offset + offset;
 		}
 	}
 	else  // Clamp or mirror
 	{
 		switch(count)
 		{
-			case -1: return SubSat(As<UShort4>(uvw), As<UShort4>(offset));
-			case 0: return uvw;
-			case +1: return AddSat(As<UShort4>(uvw), As<UShort4>(offset));
-			case 2: return AddSat(AddSat(As<UShort4>(uvw), As<UShort4>(offset)), As<UShort4>(offset));
+		case -1: return SubSat(As<UShort4>(uvw), As<UShort4>(offset));
+		case 0: return uvw;
+		case +1: return AddSat(As<UShort4>(uvw), As<UShort4>(offset));
+		case 2: return AddSat(AddSat(As<UShort4>(uvw), As<UShort4>(offset)), As<UShort4>(offset));
 		}
 	}
 
@@ -710,16 +710,16 @@
 			VkComponentSwizzle swizzle = gatherSwizzle();
 			switch(swizzle)
 			{
-				case VK_COMPONENT_SWIZZLE_ZERO:
-				case VK_COMPONENT_SWIZZLE_ONE:
-					// Handled at the final component swizzle.
-					break;
-				default:
-					c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R];
-					c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R];
-					c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R];
-					c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R];
-					break;
+			case VK_COMPONENT_SWIZZLE_ZERO:
+			case VK_COMPONENT_SWIZZLE_ONE:
+				// Handled at the final component swizzle.
+				break;
+			default:
+				c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R];
+				c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R];
+				c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R];
+				c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R];
+				break;
 			}
 		}
 	}
@@ -1047,16 +1047,16 @@
 			VkComponentSwizzle swizzle = gatherSwizzle();
 			switch(swizzle)
 			{
-				case VK_COMPONENT_SWIZZLE_ZERO:
-				case VK_COMPONENT_SWIZZLE_ONE:
-					// Handled at the final component swizzle.
-					break;
-				default:
-					c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R];
-					c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R];
-					c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R];
-					c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R];
-					break;
+			case VK_COMPONENT_SWIZZLE_ZERO:
+			case VK_COMPONENT_SWIZZLE_ONE:
+				// Handled at the final component swizzle.
+				break;
+			default:
+				c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R];
+				c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R];
+				c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R];
+				c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R];
+				break;
 			}
 		}
 	}
@@ -1375,19 +1375,19 @@
 
 	switch(mode)
 	{
-		case AddressingMode::ADDRESSING_WRAP:
-			tmp = (tmp + whd * Int4(-MIN_TEXEL_OFFSET)) % whd;
-			break;
-		case AddressingMode::ADDRESSING_CLAMP:
-		case AddressingMode::ADDRESSING_MIRROR:
-		case AddressingMode::ADDRESSING_MIRRORONCE:
-		case AddressingMode::ADDRESSING_BORDER:  // FIXME: Implement and test ADDRESSING_MIRROR, ADDRESSING_MIRRORONCE, ADDRESSING_BORDER
-			tmp = Min(Max(tmp, Int4(0)), whd - Int4(1));
-			break;
-		case AddressingMode::ADDRESSING_SEAMLESS:
-			ASSERT(false);  // Cube sampling doesn't support offset.
-		default:
-			ASSERT(false);
+	case AddressingMode::ADDRESSING_WRAP:
+		tmp = (tmp + whd * Int4(-MIN_TEXEL_OFFSET)) % whd;
+		break;
+	case AddressingMode::ADDRESSING_CLAMP:
+	case AddressingMode::ADDRESSING_MIRROR:
+	case AddressingMode::ADDRESSING_MIRRORONCE:
+	case AddressingMode::ADDRESSING_BORDER:  // FIXME: Implement and test ADDRESSING_MIRROR, ADDRESSING_MIRRORONCE, ADDRESSING_BORDER
+		tmp = Min(Max(tmp, Int4(0)), whd - Int4(1));
+		break;
+	case AddressingMode::ADDRESSING_SEAMLESS:
+		ASSERT(false);  // Cube sampling doesn't support offset.
+	default:
+		ASSERT(false);
 	}
 
 	return As<Short4>(UShort4(tmp));
@@ -1506,32 +1506,32 @@
 
 		switch(state.textureFormat)
 		{
-			case VK_FORMAT_R5G6B5_UNORM_PACK16:
-				c.z = (c.x & Short4(0x001Fu)) << 11;
-				c.y = (c.x & Short4(0x07E0u)) << 5;
-				c.x = (c.x & Short4(0xF800u));
-				break;
-			case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-				c.w = (c.x << 12) & Short4(0xF000u);
-				c.z = (c.x) & Short4(0xF000u);
-				c.y = (c.x << 4) & Short4(0xF000u);
-				c.x = (c.x << 8) & Short4(0xF000u);
-				break;
-			case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-				c.w = (c.x) & Short4(0x8000u);
-				c.z = (c.x << 11) & Short4(0xF800u);
-				c.y = (c.x << 6) & Short4(0xF800u);
-				c.x = (c.x << 1) & Short4(0xF800u);
-				break;
-			default:
-				ASSERT(false);
+		case VK_FORMAT_R5G6B5_UNORM_PACK16:
+			c.z = (c.x & Short4(0x001Fu)) << 11;
+			c.y = (c.x & Short4(0x07E0u)) << 5;
+			c.x = (c.x & Short4(0xF800u));
+			break;
+		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+			c.w = (c.x << 12) & Short4(0xF000u);
+			c.z = (c.x) & Short4(0xF000u);
+			c.y = (c.x << 4) & Short4(0xF000u);
+			c.x = (c.x << 8) & Short4(0xF000u);
+			break;
+		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+			c.w = (c.x) & Short4(0x8000u);
+			c.z = (c.x << 11) & Short4(0xF800u);
+			c.y = (c.x << 6) & Short4(0xF800u);
+			c.x = (c.x << 1) & Short4(0xF800u);
+			break;
+		default:
+			ASSERT(false);
 		}
 	}
 	else if(has8bitTextureComponents())
 	{
 		switch(textureComponentCount())
 		{
-			case 4:
+		case 4:
 			{
 				Byte4 c0 = Pointer<Byte4>(buffer)[index[0]];
 				Byte4 c1 = Pointer<Byte4>(buffer)[index[1]];
@@ -1542,86 +1542,86 @@
 
 				switch(state.textureFormat)
 				{
-					case VK_FORMAT_B8G8R8A8_UNORM:
-					case VK_FORMAT_B8G8R8A8_SRGB:
-						c.z = As<Short4>(UnpackLow(c.x, c.y));
-						c.x = As<Short4>(UnpackHigh(c.x, c.y));
-						c.y = c.z;
-						c.w = c.x;
-						c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z));
-						c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y));
-						c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x));
-						c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w));
-						break;
-					case VK_FORMAT_R8G8B8A8_UNORM:
-					case VK_FORMAT_R8G8B8A8_SNORM:
-					case VK_FORMAT_R8G8B8A8_SINT:
-					case VK_FORMAT_R8G8B8A8_SRGB:
-					case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-					case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-					case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-					case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-						c.z = As<Short4>(UnpackHigh(c.x, c.y));
-						c.x = As<Short4>(UnpackLow(c.x, c.y));
-						c.y = c.x;
-						c.w = c.z;
-						c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x));
-						c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y));
-						c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z));
-						c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w));
-						// Propagate sign bit
-						if(state.textureFormat == VK_FORMAT_R8G8B8A8_SINT ||
-						   state.textureFormat == VK_FORMAT_A8B8G8R8_SINT_PACK32)
-						{
-							c.x >>= 8;
-							c.y >>= 8;
-							c.z >>= 8;
-							c.w >>= 8;
-						}
-						break;
-					case VK_FORMAT_R8G8B8A8_UINT:
-					case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-						c.z = As<Short4>(UnpackHigh(c.x, c.y));
-						c.x = As<Short4>(UnpackLow(c.x, c.y));
-						c.y = c.x;
-						c.w = c.z;
-						c.x = UnpackLow(As<Byte8>(c.x), As<Byte8>(Short4(0)));
-						c.y = UnpackHigh(As<Byte8>(c.y), As<Byte8>(Short4(0)));
-						c.z = UnpackLow(As<Byte8>(c.z), As<Byte8>(Short4(0)));
-						c.w = UnpackHigh(As<Byte8>(c.w), As<Byte8>(Short4(0)));
-						break;
-					default:
-						ASSERT(false);
+				case VK_FORMAT_B8G8R8A8_UNORM:
+				case VK_FORMAT_B8G8R8A8_SRGB:
+					c.z = As<Short4>(UnpackLow(c.x, c.y));
+					c.x = As<Short4>(UnpackHigh(c.x, c.y));
+					c.y = c.z;
+					c.w = c.x;
+					c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z));
+					c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y));
+					c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x));
+					c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w));
+					break;
+				case VK_FORMAT_R8G8B8A8_UNORM:
+				case VK_FORMAT_R8G8B8A8_SNORM:
+				case VK_FORMAT_R8G8B8A8_SINT:
+				case VK_FORMAT_R8G8B8A8_SRGB:
+				case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+				case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+				case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+				case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+					c.z = As<Short4>(UnpackHigh(c.x, c.y));
+					c.x = As<Short4>(UnpackLow(c.x, c.y));
+					c.y = c.x;
+					c.w = c.z;
+					c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x));
+					c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y));
+					c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z));
+					c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w));
+					// Propagate sign bit
+					if(state.textureFormat == VK_FORMAT_R8G8B8A8_SINT ||
+					   state.textureFormat == VK_FORMAT_A8B8G8R8_SINT_PACK32)
+					{
+						c.x >>= 8;
+						c.y >>= 8;
+						c.z >>= 8;
+						c.w >>= 8;
+					}
+					break;
+				case VK_FORMAT_R8G8B8A8_UINT:
+				case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+					c.z = As<Short4>(UnpackHigh(c.x, c.y));
+					c.x = As<Short4>(UnpackLow(c.x, c.y));
+					c.y = c.x;
+					c.w = c.z;
+					c.x = UnpackLow(As<Byte8>(c.x), As<Byte8>(Short4(0)));
+					c.y = UnpackHigh(As<Byte8>(c.y), As<Byte8>(Short4(0)));
+					c.z = UnpackLow(As<Byte8>(c.z), As<Byte8>(Short4(0)));
+					c.w = UnpackHigh(As<Byte8>(c.w), As<Byte8>(Short4(0)));
+					break;
+				default:
+					ASSERT(false);
 				}
 			}
 			break;
-			case 2:
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0);
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1);
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2);
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3);
+		case 2:
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0);
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1);
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2);
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3);
 
-				switch(state.textureFormat)
-				{
-					case VK_FORMAT_R8G8_UNORM:
-					case VK_FORMAT_R8G8_SNORM:
-					case VK_FORMAT_R8G8_SRGB:
-						c.y = (c.x & Short4(0xFF00u));
-						c.x = (c.x << 8);
-						break;
-					case VK_FORMAT_R8G8_SINT:
-						c.y = c.x >> 8;
-						c.x = (c.x << 8) >> 8;  // Propagate sign bit
-						break;
-					case VK_FORMAT_R8G8_UINT:
-						c.y = As<Short4>(As<UShort4>(c.x) >> 8);
-						c.x &= Short4(0x00FFu);
-						break;
-					default:
-						ASSERT(false);
-				}
+			switch(state.textureFormat)
+			{
+			case VK_FORMAT_R8G8_UNORM:
+			case VK_FORMAT_R8G8_SNORM:
+			case VK_FORMAT_R8G8_SRGB:
+				c.y = (c.x & Short4(0xFF00u));
+				c.x = (c.x << 8);
 				break;
-			case 1:
+			case VK_FORMAT_R8G8_SINT:
+				c.y = c.x >> 8;
+				c.x = (c.x << 8) >> 8;  // Propagate sign bit
+				break;
+			case VK_FORMAT_R8G8_UINT:
+				c.y = As<Short4>(As<UShort4>(c.x) >> 8);
+				c.x &= Short4(0x00FFu);
+				break;
+			default:
+				ASSERT(false);
+			}
+			break;
+		case 1:
 			{
 				Int c0 = Int(*Pointer<Byte>(buffer + index[0]));
 				Int c1 = Int(*Pointer<Byte>(buffer + index[1]));
@@ -1631,9 +1631,9 @@
 
 				switch(state.textureFormat)
 				{
-					case VK_FORMAT_R8_SINT:
-					case VK_FORMAT_R8_UINT:
-					case VK_FORMAT_S8_UINT:
+				case VK_FORMAT_R8_SINT:
+				case VK_FORMAT_R8_UINT:
+				case VK_FORMAT_S8_UINT:
 					{
 						Int zero(0);
 						c.x = Unpack(As<Byte4>(c0), As<Byte4>(zero));
@@ -1644,51 +1644,51 @@
 						}
 					}
 					break;
-					case VK_FORMAT_R8_SNORM:
-					case VK_FORMAT_R8_UNORM:
-					case VK_FORMAT_R8_SRGB:
-						// TODO: avoid populating the low bits at all.
-						c.x = Unpack(As<Byte4>(c0));
-						c.x &= Short4(0xFF00u);
-						break;
-					default:
-						c.x = Unpack(As<Byte4>(c0));
-						break;
+				case VK_FORMAT_R8_SNORM:
+				case VK_FORMAT_R8_UNORM:
+				case VK_FORMAT_R8_SRGB:
+					// TODO: avoid populating the low bits at all.
+					c.x = Unpack(As<Byte4>(c0));
+					c.x &= Short4(0xFF00u);
+					break;
+				default:
+					c.x = Unpack(As<Byte4>(c0));
+					break;
 				}
 			}
 			break;
-			default:
-				ASSERT(false);
+		default:
+			ASSERT(false);
 		}
 	}
 	else if(has16bitTextureComponents())
 	{
 		switch(textureComponentCount())
 		{
-			case 4:
-				c.x = Pointer<Short4>(buffer)[index[0]];
-				c.y = Pointer<Short4>(buffer)[index[1]];
-				c.z = Pointer<Short4>(buffer)[index[2]];
-				c.w = Pointer<Short4>(buffer)[index[3]];
-				transpose4x4(c.x, c.y, c.z, c.w);
-				break;
-			case 2:
-				c.x = *Pointer<Short4>(buffer + 4 * index[0]);
-				c.x = As<Short4>(UnpackLow(c.x, *Pointer<Short4>(buffer + 4 * index[1])));
-				c.z = *Pointer<Short4>(buffer + 4 * index[2]);
-				c.z = As<Short4>(UnpackLow(c.z, *Pointer<Short4>(buffer + 4 * index[3])));
-				c.y = c.x;
-				c.x = UnpackLow(As<Int2>(c.x), As<Int2>(c.z));
-				c.y = UnpackHigh(As<Int2>(c.y), As<Int2>(c.z));
-				break;
-			case 1:
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0);
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1);
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2);
-				c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3);
-				break;
-			default:
-				ASSERT(false);
+		case 4:
+			c.x = Pointer<Short4>(buffer)[index[0]];
+			c.y = Pointer<Short4>(buffer)[index[1]];
+			c.z = Pointer<Short4>(buffer)[index[2]];
+			c.w = Pointer<Short4>(buffer)[index[3]];
+			transpose4x4(c.x, c.y, c.z, c.w);
+			break;
+		case 2:
+			c.x = *Pointer<Short4>(buffer + 4 * index[0]);
+			c.x = As<Short4>(UnpackLow(c.x, *Pointer<Short4>(buffer + 4 * index[1])));
+			c.z = *Pointer<Short4>(buffer + 4 * index[2]);
+			c.z = As<Short4>(UnpackLow(c.z, *Pointer<Short4>(buffer + 4 * index[3])));
+			c.y = c.x;
+			c.x = UnpackLow(As<Int2>(c.x), As<Int2>(c.z));
+			c.y = UnpackHigh(As<Int2>(c.y), As<Int2>(c.z));
+			break;
+		case 1:
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0);
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1);
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2);
+			c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3);
+			break;
+		default:
+			ASSERT(false);
 		}
 	}
 	else if(state.textureFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
@@ -1865,20 +1865,20 @@
 
 				switch(state.ycbcrModel)
 				{
-					case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709:
-						Kb = 0.0722f;
-						Kr = 0.2126f;
-						break;
-					case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601:
-						Kb = 0.114f;
-						Kr = 0.299f;
-						break;
-					case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020:
-						Kb = 0.0593f;
-						Kr = 0.2627f;
-						break;
-					default:
-						UNSUPPORTED("ycbcrModel %d", int(state.ycbcrModel));
+				case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709:
+					Kb = 0.0722f;
+					Kr = 0.2126f;
+					break;
+				case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601:
+					Kb = 0.114f;
+					Kr = 0.299f;
+					break;
+				case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020:
+					Kb = 0.0593f;
+					Kr = 0.2627f;
+					break;
+				default:
+					UNSUPPORTED("ycbcrModel %d", int(state.ycbcrModel));
 				}
 
 				const float Kg = 1.0f - Kr - Kb;
@@ -1930,74 +1930,74 @@
 
 		switch(state.textureFormat)
 		{
-			case VK_FORMAT_R16_SFLOAT:
-				t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 2));
-				t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 2));
-				t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 2));
-				t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 2));
+		case VK_FORMAT_R16_SFLOAT:
+			t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 2));
+			t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 2));
+			t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 2));
+			t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 2));
 
-				c.x.x = Extract(As<Float4>(halfToFloatBits(t0)), 0);
-				c.x.y = Extract(As<Float4>(halfToFloatBits(t1)), 0);
-				c.x.z = Extract(As<Float4>(halfToFloatBits(t2)), 0);
-				c.x.w = Extract(As<Float4>(halfToFloatBits(t3)), 0);
-				break;
-			case VK_FORMAT_R16G16_SFLOAT:
-				t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 4));
-				t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 4));
-				t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 4));
-				t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 4));
+			c.x.x = Extract(As<Float4>(halfToFloatBits(t0)), 0);
+			c.x.y = Extract(As<Float4>(halfToFloatBits(t1)), 0);
+			c.x.z = Extract(As<Float4>(halfToFloatBits(t2)), 0);
+			c.x.w = Extract(As<Float4>(halfToFloatBits(t3)), 0);
+			break;
+		case VK_FORMAT_R16G16_SFLOAT:
+			t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 4));
+			t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 4));
+			t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 4));
+			t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 4));
 
-				// FIXME: shuffles
-				c.x = As<Float4>(halfToFloatBits(t0));
-				c.y = As<Float4>(halfToFloatBits(t1));
-				c.z = As<Float4>(halfToFloatBits(t2));
-				c.w = As<Float4>(halfToFloatBits(t3));
-				transpose4x4(c.x, c.y, c.z, c.w);
-				break;
-			case VK_FORMAT_R16G16B16A16_SFLOAT:
-				t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 8));
-				t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 8));
-				t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 8));
-				t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 8));
+			// FIXME: shuffles
+			c.x = As<Float4>(halfToFloatBits(t0));
+			c.y = As<Float4>(halfToFloatBits(t1));
+			c.z = As<Float4>(halfToFloatBits(t2));
+			c.w = As<Float4>(halfToFloatBits(t3));
+			transpose4x4(c.x, c.y, c.z, c.w);
+			break;
+		case VK_FORMAT_R16G16B16A16_SFLOAT:
+			t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 8));
+			t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 8));
+			t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 8));
+			t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 8));
 
-				c.x = As<Float4>(halfToFloatBits(t0));
-				c.y = As<Float4>(halfToFloatBits(t1));
-				c.z = As<Float4>(halfToFloatBits(t2));
-				c.w = As<Float4>(halfToFloatBits(t3));
-				transpose4x4(c.x, c.y, c.z, c.w);
-				break;
-			case VK_FORMAT_R32_SFLOAT:
-			case VK_FORMAT_R32_SINT:
-			case VK_FORMAT_R32_UINT:
-			case VK_FORMAT_D32_SFLOAT:
-				// FIXME: Optimal shuffling?
-				c.x.x = *Pointer<Float>(buffer + index[0] * 4);
-				c.x.y = *Pointer<Float>(buffer + index[1] * 4);
-				c.x.z = *Pointer<Float>(buffer + index[2] * 4);
-				c.x.w = *Pointer<Float>(buffer + index[3] * 4);
-				break;
-			case VK_FORMAT_R32G32_SFLOAT:
-			case VK_FORMAT_R32G32_SINT:
-			case VK_FORMAT_R32G32_UINT:
-				// FIXME: Optimal shuffling?
-				c.x.xy = *Pointer<Float4>(buffer + index[0] * 8);
-				c.x.zw = *Pointer<Float4>(buffer + index[1] * 8 - 8);
-				c.z.xy = *Pointer<Float4>(buffer + index[2] * 8);
-				c.z.zw = *Pointer<Float4>(buffer + index[3] * 8 - 8);
-				c.y = c.x;
-				c.x = Float4(c.x.xz, c.z.xz);
-				c.y = Float4(c.y.yw, c.z.yw);
-				break;
-			case VK_FORMAT_R32G32B32A32_SFLOAT:
-			case VK_FORMAT_R32G32B32A32_SINT:
-			case VK_FORMAT_R32G32B32A32_UINT:
-				c.x = *Pointer<Float4>(buffer + index[0] * 16, 16);
-				c.y = *Pointer<Float4>(buffer + index[1] * 16, 16);
-				c.z = *Pointer<Float4>(buffer + index[2] * 16, 16);
-				c.w = *Pointer<Float4>(buffer + index[3] * 16, 16);
-				transpose4x4(c.x, c.y, c.z, c.w);
-				break;
-			case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+			c.x = As<Float4>(halfToFloatBits(t0));
+			c.y = As<Float4>(halfToFloatBits(t1));
+			c.z = As<Float4>(halfToFloatBits(t2));
+			c.w = As<Float4>(halfToFloatBits(t3));
+			transpose4x4(c.x, c.y, c.z, c.w);
+			break;
+		case VK_FORMAT_R32_SFLOAT:
+		case VK_FORMAT_R32_SINT:
+		case VK_FORMAT_R32_UINT:
+		case VK_FORMAT_D32_SFLOAT:
+			// FIXME: Optimal shuffling?
+			c.x.x = *Pointer<Float>(buffer + index[0] * 4);
+			c.x.y = *Pointer<Float>(buffer + index[1] * 4);
+			c.x.z = *Pointer<Float>(buffer + index[2] * 4);
+			c.x.w = *Pointer<Float>(buffer + index[3] * 4);
+			break;
+		case VK_FORMAT_R32G32_SFLOAT:
+		case VK_FORMAT_R32G32_SINT:
+		case VK_FORMAT_R32G32_UINT:
+			// FIXME: Optimal shuffling?
+			c.x.xy = *Pointer<Float4>(buffer + index[0] * 8);
+			c.x.zw = *Pointer<Float4>(buffer + index[1] * 8 - 8);
+			c.z.xy = *Pointer<Float4>(buffer + index[2] * 8);
+			c.z.zw = *Pointer<Float4>(buffer + index[3] * 8 - 8);
+			c.y = c.x;
+			c.x = Float4(c.x.xz, c.z.xz);
+			c.y = Float4(c.y.yw, c.z.yw);
+			break;
+		case VK_FORMAT_R32G32B32A32_SFLOAT:
+		case VK_FORMAT_R32G32B32A32_SINT:
+		case VK_FORMAT_R32G32B32A32_UINT:
+			c.x = *Pointer<Float4>(buffer + index[0] * 16, 16);
+			c.y = *Pointer<Float4>(buffer + index[1] * 16, 16);
+			c.z = *Pointer<Float4>(buffer + index[2] * 16, 16);
+			c.w = *Pointer<Float4>(buffer + index[3] * 16, 16);
+			transpose4x4(c.x, c.y, c.z, c.w);
+			break;
+		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
 			{
 				Float4 t;  // TODO: add Insert(UInt4, RValue<UInt>)
 				t.x = *Pointer<Float>(buffer + index[0] * 4);
@@ -2009,9 +2009,9 @@
 				c.x = Float4(t0 & UInt4(0x1FF)) * c.w;
 				c.y = Float4((t0 >> 9) & UInt4(0x1FF)) * c.w;
 				c.z = Float4((t0 >> 18) & UInt4(0x1FF)) * c.w;
-				break;
 			}
-			case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+			break;
+		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
 			{
 				Float4 t;  // TODO: add Insert(UInt4, RValue<UInt>)
 				t.x = *Pointer<Float>(buffer + index[0] * 4);
@@ -2022,10 +2022,10 @@
 				c.x = As<Float4>(halfToFloatBits((t0 << 4) & UInt4(0x7FF0)));
 				c.y = As<Float4>(halfToFloatBits((t0 >> 7) & UInt4(0x7FF0)));
 				c.z = As<Float4>(halfToFloatBits((t0 >> 17) & UInt4(0x7FE0)));
-				break;
 			}
-			default:
-				UNSUPPORTED("Format %d", VkFormat(state.textureFormat));
+			break;
+		default:
+			UNSUPPORTED("Format %d", VkFormat(state.textureFormat));
 		}
 	}
 	else
@@ -2078,15 +2078,15 @@
 
 		switch(state.compareOp)
 		{
-			case VK_COMPARE_OP_LESS_OR_EQUAL: boolean = CmpLE(ref, c.x); break;
-			case VK_COMPARE_OP_GREATER_OR_EQUAL: boolean = CmpNLT(ref, c.x); break;
-			case VK_COMPARE_OP_LESS: boolean = CmpLT(ref, c.x); break;
-			case VK_COMPARE_OP_GREATER: boolean = CmpNLE(ref, c.x); break;
-			case VK_COMPARE_OP_EQUAL: boolean = CmpEQ(ref, c.x); break;
-			case VK_COMPARE_OP_NOT_EQUAL: boolean = CmpNEQ(ref, c.x); break;
-			case VK_COMPARE_OP_ALWAYS: boolean = Int4(-1); break;
-			case VK_COMPARE_OP_NEVER: boolean = Int4(0); break;
-			default: ASSERT(false);
+		case VK_COMPARE_OP_LESS_OR_EQUAL: boolean = CmpLE(ref, c.x); break;
+		case VK_COMPARE_OP_GREATER_OR_EQUAL: boolean = CmpNLT(ref, c.x); break;
+		case VK_COMPARE_OP_LESS: boolean = CmpLT(ref, c.x); break;
+		case VK_COMPARE_OP_GREATER: boolean = CmpNLE(ref, c.x); break;
+		case VK_COMPARE_OP_EQUAL: boolean = CmpEQ(ref, c.x); break;
+		case VK_COMPARE_OP_NOT_EQUAL: boolean = CmpNEQ(ref, c.x); break;
+		case VK_COMPARE_OP_ALWAYS: boolean = Int4(-1); break;
+		case VK_COMPARE_OP_NEVER: boolean = Int4(0); break;
+		default: ASSERT(false);
 		}
 
 		c.x = As<Float4>(boolean & As<Int4>(Float4(1.0f)));
@@ -2114,29 +2114,29 @@
 
 	switch(state.border)
 	{
-		case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
-		case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
-			borderRGB = Int4(0);
-			borderA = Int4(0);
-			break;
-		case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
-			borderRGB = Int4(0);
-			borderA = float_one;
-			break;
-		case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
-			borderRGB = Int4(0);
-			borderA = Int4(1);
-			break;
-		case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
-			borderRGB = float_one;
-			borderA = float_one;
-			break;
-		case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
-			borderRGB = Int4(1);
-			borderA = Int4(1);
-			break;
-		default:
-			UNSUPPORTED("sint/uint/sfloat border: %u", state.border);
+	case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
+	case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
+		borderRGB = Int4(0);
+		borderA = Int4(0);
+		break;
+	case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
+		borderRGB = Int4(0);
+		borderA = float_one;
+		break;
+	case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
+		borderRGB = Int4(0);
+		borderA = Int4(1);
+		break;
+	case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
+		borderRGB = float_one;
+		borderA = float_one;
+		break;
+	case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
+		borderRGB = Int4(1);
+		borderA = Int4(1);
+		break;
+	default:
+		UNSUPPORTED("sint/uint/sfloat border: %u", state.border);
 	}
 
 	Vector4f out;
@@ -2297,17 +2297,17 @@
 		{
 			switch(addressingMode)
 			{
-				case ADDRESSING_CLAMP:
-					coord = Min(Max(coord, Float4(0.0f)), Float4(dim) * As<Float4>(Int4(oneBits)));
-					break;
-				case ADDRESSING_BORDER:
-					// Don't map to a valid range here.
-					break;
-				default:
-					// "If unnormalizedCoordinates is VK_TRUE, addressModeU and addressModeV must each be
-					//  either VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE or VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER"
-					UNREACHABLE("addressingMode %d", int(addressingMode));
-					break;
+			case ADDRESSING_CLAMP:
+				coord = Min(Max(coord, Float4(0.0f)), Float4(dim) * As<Float4>(Int4(oneBits)));
+				break;
+			case ADDRESSING_BORDER:
+				// Don't map to a valid range here.
+				break;
+			default:
+				// "If unnormalizedCoordinates is VK_TRUE, addressModeU and addressModeV must each be
+				//  either VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE or VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER"
+				UNREACHABLE("addressingMode %d", int(addressingMode));
+				break;
 			}
 		}
 		else if(state.textureFilter == FILTER_GATHER && addressingMode == ADDRESSING_MIRROR)
@@ -2340,18 +2340,18 @@
 			{
 				switch(addressingMode)
 				{
-					case ADDRESSING_CLAMP:
-					case ADDRESSING_SEAMLESS:
-						// While cube face coordinates are nominally already in the [0.0, 1.0] range
-						// due to the projection, and numerical imprecision is tolerated due to the
-						// border of pixels for seamless filtering, the projection doesn't cause
-						// range normalization for Inf and NaN values. So we always clamp.
-						{
-							Float4 one = As<Float4>(Int4(oneBits));
-							coord = Min(Max(coord, Float4(0.0f)), one);
-						}
-						break;
-					case ADDRESSING_MIRROR:
+				case ADDRESSING_CLAMP:
+				case ADDRESSING_SEAMLESS:
+					// While cube face coordinates are nominally already in the [0.0, 1.0] range
+					// due to the projection, and numerical imprecision is tolerated due to the
+					// border of pixels for seamless filtering, the projection doesn't cause
+					// range normalization for Inf and NaN values. So we always clamp.
+					{
+						Float4 one = As<Float4>(Int4(oneBits));
+						coord = Min(Max(coord, Float4(0.0f)), one);
+					}
+					break;
+				case ADDRESSING_MIRROR:
 					{
 						Float4 half = As<Float4>(Int4(halfBits));
 						Float4 one = As<Float4>(Int4(oneBits));
@@ -2359,7 +2359,7 @@
 						coord = one - Abs(two * Frac(coord * half) - one);
 					}
 					break;
-					case ADDRESSING_MIRRORONCE:
+				case ADDRESSING_MIRRORONCE:
 					{
 						Float4 half = As<Float4>(Int4(halfBits));
 						Float4 one = As<Float4>(Int4(oneBits));
@@ -2367,12 +2367,12 @@
 						coord = one - Abs(two * Frac(Min(Max(coord, -one), two) * half) - one);
 					}
 					break;
-					case ADDRESSING_BORDER:
-						// Don't map to a valid range here.
-						break;
-					default:  // Wrap
-						coord = Frac(coord);
-						break;
+				case ADDRESSING_BORDER:
+					// Don't map to a valid range here.
+					break;
+				default:  // Wrap
+					coord = Frac(coord);
+					break;
 				}
 			}
 
@@ -2431,35 +2431,35 @@
 		{
 			switch(addressingMode)
 			{
-				case ADDRESSING_SEAMLESS:
-					UNREACHABLE("addressingMode %d", int(addressingMode));  // Cube sampling doesn't support offset.
-				case ADDRESSING_MIRROR:
-				case ADDRESSING_MIRRORONCE:
-					// TODO: Implement ADDRESSING_MIRROR and ADDRESSING_MIRRORONCE.
-					// Fall through to Clamp.
-				case ADDRESSING_CLAMP:
-					xyz0 = Min(Max(xyz0, Int4(0)), maxXYZ);
-					xyz1 = Min(Max(xyz1, Int4(0)), maxXYZ);
-					break;
-				default:  // Wrap
-					xyz0 = mod(xyz0, dim);
-					xyz1 = mod(xyz1, dim);
-					break;
+			case ADDRESSING_SEAMLESS:
+				UNREACHABLE("addressingMode %d", int(addressingMode));  // Cube sampling doesn't support offset.
+			case ADDRESSING_MIRROR:
+			case ADDRESSING_MIRRORONCE:
+				// TODO: Implement ADDRESSING_MIRROR and ADDRESSING_MIRRORONCE.
+				// Fall through to Clamp.
+			case ADDRESSING_CLAMP:
+				xyz0 = Min(Max(xyz0, Int4(0)), maxXYZ);
+				xyz1 = Min(Max(xyz1, Int4(0)), maxXYZ);
+				break;
+			default:  // Wrap
+				xyz0 = mod(xyz0, dim);
+				xyz1 = mod(xyz1, dim);
+				break;
 			}
 		}
 		else if(state.textureFilter != FILTER_POINT)
 		{
 			switch(addressingMode)
 			{
-				case ADDRESSING_SEAMLESS:
-					break;
-				case ADDRESSING_MIRROR:
-				case ADDRESSING_MIRRORONCE:
-				case ADDRESSING_CLAMP:
-					xyz0 = Max(xyz0, Int4(0));
-					xyz1 = Min(xyz1, maxXYZ);
-					break;
-				default:  // Wrap
+			case ADDRESSING_SEAMLESS:
+				break;
+			case ADDRESSING_MIRROR:
+			case ADDRESSING_MIRRORONCE:
+			case ADDRESSING_CLAMP:
+				xyz0 = Max(xyz0, Int4(0));
+				xyz1 = Min(xyz1, maxXYZ);
+				break;
+			default:  // Wrap
 				{
 					Int4 under = CmpLT(xyz0, Int4(0));
 					xyz0 = (under & maxXYZ) | (~under & xyz0);  // xyz < 0 ? dim - 1 : xyz   // TODO: IfThenElse()
@@ -2586,13 +2586,13 @@
 {
 	switch(state.gatherComponent)
 	{
-		case 0: return state.swizzle.r;
-		case 1: return state.swizzle.g;
-		case 2: return state.swizzle.b;
-		case 3: return state.swizzle.a;
-		default:
-			UNREACHABLE("Invalid component");
-			return VK_COMPONENT_SWIZZLE_R;
+	case 0: return state.swizzle.r;
+	case 1: return state.swizzle.g;
+	case 2: return state.swizzle.b;
+	case 3: return state.swizzle.a;
+	default:
+		UNREACHABLE("Invalid component");
+		return VK_COMPONENT_SWIZZLE_R;
 	}
 }
 
diff --git a/src/Pipeline/ShaderCore.cpp b/src/Pipeline/ShaderCore.cpp
index 7a01ffc..b384af0 100644
--- a/src/Pipeline/ShaderCore.cpp
+++ b/src/Pipeline/ShaderCore.cpp
@@ -55,10 +55,10 @@
 {
 	switch(i)
 	{
-		case 0: return x;
-		case 1: return y;
-		case 2: return z;
-		case 3: return w;
+	case 0: return x;
+	case 1: return y;
+	case 2: return z;
+	case 3: return w;
 	}
 
 	return x;
@@ -98,10 +98,10 @@
 {
 	switch(i)
 	{
-		case 0: return x;
-		case 1: return y;
-		case 2: return z;
-		case 3: return w;
+	case 0: return x;
+	case 1: return y;
+	case 2: return z;
+	case 3: return w;
 	}
 
 	return x;
@@ -141,10 +141,10 @@
 {
 	switch(i)
 	{
-		case 0: return x;
-		case 1: return y;
-		case 2: return z;
-		case 3: return w;
+	case 0: return x;
+	case 1: return y;
+	case 2: return z;
+	case 3: return w;
 	}
 
 	return x;
@@ -551,10 +551,10 @@
 {
 	switch(N)
 	{
-		case 1: transpose4x1(row0, row1, row2, row3); break;
-		case 2: transpose4x2(row0, row1, row2, row3); break;
-		case 3: transpose4x3(row0, row1, row2, row3); break;
-		case 4: transpose4x4(row0, row1, row2, row3); break;
+	case 1: transpose4x1(row0, row1, row2, row3); break;
+	case 2: transpose4x2(row0, row1, row2, row3); break;
+	case 3: transpose4x3(row0, row1, row2, row3); break;
+	case 4: transpose4x4(row0, row1, row2, row3); break;
 	}
 }
 
@@ -1029,14 +1029,14 @@
 		{
 			switch(robustness)
 			{
-				case OutOfBoundsBehavior::UndefinedBehavior:
-					// With this robustness setting the application/compiler guarantees in-bounds accesses on active lanes,
-					// but since it can't know in advance which branches are taken this must be true even for inactives lanes.
-					return true;
-				case OutOfBoundsBehavior::Nullify:
-				case OutOfBoundsBehavior::RobustBufferAccess:
-				case OutOfBoundsBehavior::UndefinedValue:
-					return false;
+			case OutOfBoundsBehavior::UndefinedBehavior:
+				// With this robustness setting the application/compiler guarantees in-bounds accesses on active lanes,
+				// but since it can't know in advance which branches are taken this must be true even for inactives lanes.
+				return true;
+			case OutOfBoundsBehavior::Nullify:
+			case OutOfBoundsBehavior::RobustBufferAccess:
+			case OutOfBoundsBehavior::UndefinedValue:
+				return false;
 			}
 		}
 	}
diff --git a/src/Pipeline/ShaderCore.hpp b/src/Pipeline/ShaderCore.hpp
index b749ada..bb9205e 100644
--- a/src/Pipeline/ShaderCore.hpp
+++ b/src/Pipeline/ShaderCore.hpp
@@ -339,14 +339,14 @@
 	{
 		switch(robustness)
 		{
-			case OutOfBoundsBehavior::Nullify:
-			case OutOfBoundsBehavior::RobustBufferAccess:
-			case OutOfBoundsBehavior::UndefinedValue:
-				mask &= isInBounds(sizeof(float), robustness);  // Disable out-of-bounds reads.
-				break;
-			case OutOfBoundsBehavior::UndefinedBehavior:
-				// Nothing to do. Application/compiler must guarantee no out-of-bounds accesses.
-				break;
+		case OutOfBoundsBehavior::Nullify:
+		case OutOfBoundsBehavior::RobustBufferAccess:
+		case OutOfBoundsBehavior::UndefinedValue:
+			mask &= isInBounds(sizeof(float), robustness);  // Disable out-of-bounds reads.
+			break;
+		case OutOfBoundsBehavior::UndefinedBehavior:
+			// Nothing to do. Application/compiler must guarantee no out-of-bounds accesses.
+			break;
 		}
 	}
 
@@ -371,14 +371,14 @@
 		bool zeroMaskedLanes = true;
 		switch(robustness)
 		{
-			case OutOfBoundsBehavior::Nullify:
-			case OutOfBoundsBehavior::RobustBufferAccess:  // Must either return an in-bounds value, or zero.
-				zeroMaskedLanes = true;
-				break;
-			case OutOfBoundsBehavior::UndefinedValue:
-			case OutOfBoundsBehavior::UndefinedBehavior:
-				zeroMaskedLanes = false;
-				break;
+		case OutOfBoundsBehavior::Nullify:
+		case OutOfBoundsBehavior::RobustBufferAccess:  // Must either return an in-bounds value, or zero.
+			zeroMaskedLanes = true;
+			break;
+		case OutOfBoundsBehavior::UndefinedValue:
+		case OutOfBoundsBehavior::UndefinedBehavior:
+			zeroMaskedLanes = false;
+			break;
 		}
 
 		if(hasStaticSequentialOffsets(sizeof(float)))
@@ -431,14 +431,14 @@
 
 	switch(robustness)
 	{
-		case OutOfBoundsBehavior::Nullify:
-		case OutOfBoundsBehavior::RobustBufferAccess:       // TODO: Allows writing anywhere within bounds. Could be faster than masking.
-		case OutOfBoundsBehavior::UndefinedValue:           // Should not be used for store operations. Treat as robust buffer access.
-			mask &= isInBounds(sizeof(float), robustness);  // Disable out-of-bounds writes.
-			break;
-		case OutOfBoundsBehavior::UndefinedBehavior:
-			// Nothing to do. Application/compiler must guarantee no out-of-bounds accesses.
-			break;
+	case OutOfBoundsBehavior::Nullify:
+	case OutOfBoundsBehavior::RobustBufferAccess:       // TODO: Allows writing anywhere within bounds. Could be faster than masking.
+	case OutOfBoundsBehavior::UndefinedValue:           // Should not be used for store operations. Treat as robust buffer access.
+		mask &= isInBounds(sizeof(float), robustness);  // Disable out-of-bounds writes.
+		break;
+	case OutOfBoundsBehavior::UndefinedBehavior:
+		// Nothing to do. Application/compiler must guarantee no out-of-bounds accesses.
+		break;
 	}
 
 	if(!atomic && order == std::memory_order_relaxed)
diff --git a/src/Pipeline/SpirvShader.cpp b/src/Pipeline/SpirvShader.cpp
index 9f33153..f82f1e8 100644
--- a/src/Pipeline/SpirvShader.cpp
+++ b/src/Pipeline/SpirvShader.cpp
@@ -74,7 +74,7 @@
 
 		switch(opcode)
 		{
-			case spv::OpEntryPoint:
+		case spv::OpEntryPoint:
 			{
 				executionModel = spv::ExecutionModel(insn.word(1));
 				auto id = Function::ID(insn.word(2));
@@ -85,14 +85,14 @@
 					ASSERT_MSG(entryPoint == 0, "Duplicate entry point with name '%s' and stage %d", name, int(stage));
 					entryPoint = id;
 				}
-				break;
 			}
+			break;
 
-			case spv::OpExecutionMode:
-				ProcessExecutionMode(insn);
-				break;
+		case spv::OpExecutionMode:
+			ProcessExecutionMode(insn);
+			break;
 
-			case spv::OpDecorate:
+		case spv::OpDecorate:
 			{
 				TypeOrObjectID targetId = insn.word(1);
 				auto decoration = static_cast<spv::Decoration>(insn.word(2));
@@ -102,29 +102,31 @@
 
 				switch(decoration)
 				{
-					case spv::DecorationDescriptorSet:
-						descriptorDecorations[targetId].DescriptorSet = value;
-						break;
-					case spv::DecorationBinding:
-						descriptorDecorations[targetId].Binding = value;
-						break;
-					case spv::DecorationInputAttachmentIndex:
-						descriptorDecorations[targetId].InputAttachmentIndex = value;
-						break;
-					case spv::DecorationSample:
-						modes.ContainsSampleQualifier = true;
-						break;
-					default:
-						// Only handling descriptor decorations here.
-						break;
+				case spv::DecorationDescriptorSet:
+					descriptorDecorations[targetId].DescriptorSet = value;
+					break;
+				case spv::DecorationBinding:
+					descriptorDecorations[targetId].Binding = value;
+					break;
+				case spv::DecorationInputAttachmentIndex:
+					descriptorDecorations[targetId].InputAttachmentIndex = value;
+					break;
+				case spv::DecorationSample:
+					modes.ContainsSampleQualifier = true;
+					break;
+				default:
+					// Only handling descriptor decorations here.
+					break;
 				}
 
 				if(decoration == spv::DecorationCentroid)
+				{
 					modes.NeedsCentroid = true;
-				break;
+				}
 			}
+			break;
 
-			case spv::OpMemberDecorate:
+		case spv::OpMemberDecorate:
 			{
 				Type::ID targetId = insn.word(1);
 				auto memberIndex = insn.word(2);
@@ -138,11 +140,13 @@
 				d[memberIndex].Apply(decoration, value);
 
 				if(decoration == spv::DecorationCentroid)
+				{
 					modes.NeedsCentroid = true;
-				break;
+				}
 			}
+			break;
 
-			case spv::OpDecorateId:
+		case spv::OpDecorateId:
 			{
 				auto decoration = static_cast<spv::Decoration>(insn.word(2));
 
@@ -151,23 +155,21 @@
 				// by HLSL to build the graphics pipeline with shader reflection. At the driver level,
 				// the CounterBuffer decoration does nothing, so we can safely ignore both decorations.
 				ASSERT(decoration == spv::DecorationUniformId || decoration == spv::DecorationCounterBuffer);
-				break;
 			}
+			break;
 
-			case spv::OpDecorateString:
-			case spv::OpMemberDecorateString:
-			{
-				// We assume these are for HLSL semantics, ignore them.
-				break;
-			}
+		case spv::OpDecorateString:
+		case spv::OpMemberDecorateString:
+			// We assume these are for HLSL semantics, ignore them.
+			break;
 
-			case spv::OpDecorationGroup:
-				// Nothing to do here. We don't need to record the definition of the group; we'll just have
-				// the bundle of decorations float around. If we were to ever walk the decorations directly,
-				// we might think about introducing this as a real Object.
-				break;
+		case spv::OpDecorationGroup:
+			// Nothing to do here. We don't need to record the definition of the group; we'll just have
+			// the bundle of decorations float around. If we were to ever walk the decorations directly,
+			// we might think about introducing this as a real Object.
+			break;
 
-			case spv::OpGroupDecorate:
+		case spv::OpGroupDecorate:
 			{
 				uint32_t group = insn.word(1);
 				auto const &groupDecorations = decorations[group];
@@ -179,11 +181,10 @@
 					decorations[target].Apply(groupDecorations);
 					descriptorDecorations[target].Apply(descriptorGroupDecorations);
 				}
-
-				break;
 			}
+			break;
 
-			case spv::OpGroupMemberDecorate:
+		case spv::OpGroupMemberDecorate:
 			{
 				auto const &srcDecorations = decorations[insn.word(1)];
 				for(auto i = 2u; i < insn.wordCount(); i += 2)
@@ -195,27 +196,27 @@
 						d.resize(memberIndex + 1);  // on demand resize, see above...
 					d[memberIndex].Apply(srcDecorations);
 				}
-				break;
 			}
+			break;
 
-			case spv::OpLabel:
+		case spv::OpLabel:
 			{
 				ASSERT(currentBlock.value() == 0);
 				currentBlock = Block::ID(insn.word(1));
 				blockStart = insn;
-				break;
 			}
+			break;
 
-			// Branch Instructions (subset of Termination Instructions):
-			case spv::OpBranch:
-			case spv::OpBranchConditional:
-			case spv::OpSwitch:
-			case spv::OpReturn:
-				// [[fallthrough]]
+		// Branch Instructions (subset of Termination Instructions):
+		case spv::OpBranch:
+		case spv::OpBranchConditional:
+		case spv::OpSwitch:
+		case spv::OpReturn:
+			// [[fallthrough]]
 
-			// Termination instruction:
-			case spv::OpKill:
-			case spv::OpUnreachable:
+		// Termination instruction:
+		case spv::OpKill:
+		case spv::OpUnreachable:
 			{
 				ASSERT(currentBlock.value() != 0);
 				ASSERT(currentFunction.value() != 0);
@@ -229,31 +230,31 @@
 				{
 					modes.ContainsKill = true;
 				}
-				break;
 			}
+			break;
 
-			case spv::OpLoopMerge:
-			case spv::OpSelectionMerge:
-				break;  // Nothing to do in analysis pass.
+		case spv::OpLoopMerge:
+		case spv::OpSelectionMerge:
+			break;  // Nothing to do in analysis pass.
 
-			case spv::OpTypeVoid:
-			case spv::OpTypeBool:
-			case spv::OpTypeInt:
-			case spv::OpTypeFloat:
-			case spv::OpTypeVector:
-			case spv::OpTypeMatrix:
-			case spv::OpTypeImage:
-			case spv::OpTypeSampler:
-			case spv::OpTypeSampledImage:
-			case spv::OpTypeArray:
-			case spv::OpTypeRuntimeArray:
-			case spv::OpTypeStruct:
-			case spv::OpTypePointer:
-			case spv::OpTypeFunction:
-				DeclareType(insn);
-				break;
+		case spv::OpTypeVoid:
+		case spv::OpTypeBool:
+		case spv::OpTypeInt:
+		case spv::OpTypeFloat:
+		case spv::OpTypeVector:
+		case spv::OpTypeMatrix:
+		case spv::OpTypeImage:
+		case spv::OpTypeSampler:
+		case spv::OpTypeSampledImage:
+		case spv::OpTypeArray:
+		case spv::OpTypeRuntimeArray:
+		case spv::OpTypeStruct:
+		case spv::OpTypePointer:
+		case spv::OpTypeFunction:
+			DeclareType(insn);
+			break;
 
-			case spv::OpVariable:
+		case spv::OpVariable:
 			{
 				Type::ID typeId = insn.word(1);
 				Object::ID resultId = insn.word(2);
@@ -268,64 +269,64 @@
 
 				switch(storageClass)
 				{
-					case spv::StorageClassInput:
-					case spv::StorageClassOutput:
-						ProcessInterfaceVariable(object);
-						break;
+				case spv::StorageClassInput:
+				case spv::StorageClassOutput:
+					ProcessInterfaceVariable(object);
+					break;
 
-					case spv::StorageClassUniform:
-					case spv::StorageClassStorageBuffer:
-						object.kind = Object::Kind::DescriptorSet;
-						break;
+				case spv::StorageClassUniform:
+				case spv::StorageClassStorageBuffer:
+					object.kind = Object::Kind::DescriptorSet;
+					break;
 
-					case spv::StorageClassPushConstant:
-					case spv::StorageClassPrivate:
-					case spv::StorageClassFunction:
-					case spv::StorageClassUniformConstant:
-						break;  // Correctly handled.
+				case spv::StorageClassPushConstant:
+				case spv::StorageClassPrivate:
+				case spv::StorageClassFunction:
+				case spv::StorageClassUniformConstant:
+					break;  // Correctly handled.
 
-					case spv::StorageClassWorkgroup:
+				case spv::StorageClassWorkgroup:
 					{
 						auto &elTy = getType(getType(typeId).element);
 						auto sizeInBytes = elTy.componentCount * static_cast<uint32_t>(sizeof(float));
 						workgroupMemory.allocate(resultId, sizeInBytes);
 						object.kind = Object::Kind::Pointer;
-						break;
 					}
-					case spv::StorageClassAtomicCounter:
-					case spv::StorageClassImage:
-						UNSUPPORTED("StorageClass %d not yet supported", (int)storageClass);
-						break;
+					break;
+				case spv::StorageClassAtomicCounter:
+				case spv::StorageClassImage:
+					UNSUPPORTED("StorageClass %d not yet supported", (int)storageClass);
+					break;
 
-					case spv::StorageClassCrossWorkgroup:
-						UNSUPPORTED("SPIR-V OpenCL Execution Model (StorageClassCrossWorkgroup)");
-						break;
+				case spv::StorageClassCrossWorkgroup:
+					UNSUPPORTED("SPIR-V OpenCL Execution Model (StorageClassCrossWorkgroup)");
+					break;
 
-					case spv::StorageClassGeneric:
-						UNSUPPORTED("SPIR-V GenericPointer Capability (StorageClassGeneric)");
-						break;
+				case spv::StorageClassGeneric:
+					UNSUPPORTED("SPIR-V GenericPointer Capability (StorageClassGeneric)");
+					break;
 
-					default:
-						UNREACHABLE("Unexpected StorageClass %d", storageClass);  // See Appendix A of the Vulkan spec.
-						break;
+				default:
+					UNREACHABLE("Unexpected StorageClass %d", storageClass);  // See Appendix A of the Vulkan spec.
+					break;
 				}
-				break;
 			}
+			break;
 
-			case spv::OpConstant:
-			case spv::OpSpecConstant:
-				CreateConstant(insn).constantValue[0] = insn.word(3);
-				break;
-			case spv::OpConstantFalse:
-			case spv::OpSpecConstantFalse:
-				CreateConstant(insn).constantValue[0] = 0;  // Represent Boolean false as zero.
-				break;
-			case spv::OpConstantTrue:
-			case spv::OpSpecConstantTrue:
-				CreateConstant(insn).constantValue[0] = ~0u;  // Represent Boolean true as all bits set.
-				break;
-			case spv::OpConstantNull:
-			case spv::OpUndef:
+		case spv::OpConstant:
+		case spv::OpSpecConstant:
+			CreateConstant(insn).constantValue[0] = insn.word(3);
+			break;
+		case spv::OpConstantFalse:
+		case spv::OpSpecConstantFalse:
+			CreateConstant(insn).constantValue[0] = 0;  // Represent Boolean false as zero.
+			break;
+		case spv::OpConstantTrue:
+		case spv::OpSpecConstantTrue:
+			CreateConstant(insn).constantValue[0] = ~0u;  // Represent Boolean true as all bits set.
+			break;
+		case spv::OpConstantNull:
+		case spv::OpUndef:
 			{
 				// TODO: consider a real LLVM-level undef. For now, zero is a perfectly good value.
 				// OpConstantNull forms a constant of arbitrary type, all zeros.
@@ -335,10 +336,10 @@
 				{
 					object.constantValue[i] = 0;
 				}
-				break;
 			}
-			case spv::OpConstantComposite:
-			case spv::OpSpecConstantComposite:
+			break;
+		case spv::OpConstantComposite:
+		case spv::OpSpecConstantComposite:
 			{
 				auto &object = CreateConstant(insn);
 				auto offset = 0u;
@@ -370,54 +371,56 @@
 					modes.WorkgroupSizeY = object.constantValue[1];
 					modes.WorkgroupSizeZ = object.constantValue[2];
 				}
-				break;
 			}
-			case spv::OpSpecConstantOp:
-				EvalSpecConstantOp(insn);
-				break;
+			break;
+		case spv::OpSpecConstantOp:
+			EvalSpecConstantOp(insn);
+			break;
 
-			case spv::OpCapability:
+		case spv::OpCapability:
 			{
 				auto capability = static_cast<spv::Capability>(insn.word(1));
 				switch(capability)
 				{
-					case spv::CapabilityMatrix: capabilities.Matrix = true; break;
-					case spv::CapabilityShader: capabilities.Shader = true; break;
-					case spv::CapabilityStorageImageMultisample: capabilities.StorageImageMultisample = true; break;
-					case spv::CapabilityClipDistance: capabilities.ClipDistance = true; break;
-					case spv::CapabilityCullDistance: capabilities.CullDistance = true; break;
-					case spv::CapabilityImageCubeArray: capabilities.ImageCubeArray = true; break;
-					case spv::CapabilitySampleRateShading: capabilities.SampleRateShading = true; break;
-					case spv::CapabilityInputAttachment: capabilities.InputAttachment = true; break;
-					case spv::CapabilitySampled1D: capabilities.Sampled1D = true; break;
-					case spv::CapabilityImage1D: capabilities.Image1D = true; break;
-					case spv::CapabilitySampledBuffer: capabilities.SampledBuffer = true; break;
-					case spv::CapabilitySampledCubeArray: capabilities.SampledCubeArray = true; break;
-					case spv::CapabilityImageBuffer: capabilities.ImageBuffer = true; break;
-					case spv::CapabilityImageMSArray: capabilities.ImageMSArray = true; break;
-					case spv::CapabilityStorageImageExtendedFormats: capabilities.StorageImageExtendedFormats = true; break;
-					case spv::CapabilityImageQuery: capabilities.ImageQuery = true; break;
-					case spv::CapabilityDerivativeControl: capabilities.DerivativeControl = true; break;
-					case spv::CapabilityInterpolationFunction: capabilities.InterpolationFunction = true; break;
-					case spv::CapabilityGroupNonUniform: capabilities.GroupNonUniform = true; break;
-					case spv::CapabilityGroupNonUniformVote: capabilities.GroupNonUniformVote = true; break;
-					case spv::CapabilityGroupNonUniformArithmetic: capabilities.GroupNonUniformArithmetic = true; break;
-					case spv::CapabilityGroupNonUniformBallot: capabilities.GroupNonUniformBallot = true; break;
-					case spv::CapabilityGroupNonUniformShuffle: capabilities.GroupNonUniformShuffle = true; break;
-					case spv::CapabilityGroupNonUniformShuffleRelative: capabilities.GroupNonUniformShuffleRelative = true; break;
-					case spv::CapabilityDeviceGroup: capabilities.DeviceGroup = true; break;
-					case spv::CapabilityMultiView: capabilities.MultiView = true; break;
-					case spv::CapabilityStencilExportEXT: capabilities.StencilExportEXT = true; break;
-					default:
-						UNSUPPORTED("Unsupported capability %u", insn.word(1));
+				case spv::CapabilityMatrix: capabilities.Matrix = true; break;
+				case spv::CapabilityShader: capabilities.Shader = true; break;
+				case spv::CapabilityStorageImageMultisample: capabilities.StorageImageMultisample = true; break;
+				case spv::CapabilityClipDistance: capabilities.ClipDistance = true; break;
+				case spv::CapabilityCullDistance: capabilities.CullDistance = true; break;
+				case spv::CapabilityImageCubeArray: capabilities.ImageCubeArray = true; break;
+				case spv::CapabilitySampleRateShading: capabilities.SampleRateShading = true; break;
+				case spv::CapabilityInputAttachment: capabilities.InputAttachment = true; break;
+				case spv::CapabilitySampled1D: capabilities.Sampled1D = true; break;
+				case spv::CapabilityImage1D: capabilities.Image1D = true; break;
+				case spv::CapabilitySampledBuffer: capabilities.SampledBuffer = true; break;
+				case spv::CapabilitySampledCubeArray: capabilities.SampledCubeArray = true; break;
+				case spv::CapabilityImageBuffer: capabilities.ImageBuffer = true; break;
+				case spv::CapabilityImageMSArray: capabilities.ImageMSArray = true; break;
+				case spv::CapabilityStorageImageExtendedFormats: capabilities.StorageImageExtendedFormats = true; break;
+				case spv::CapabilityImageQuery: capabilities.ImageQuery = true; break;
+				case spv::CapabilityDerivativeControl: capabilities.DerivativeControl = true; break;
+				case spv::CapabilityInterpolationFunction: capabilities.InterpolationFunction = true; break;
+				case spv::CapabilityGroupNonUniform: capabilities.GroupNonUniform = true; break;
+				case spv::CapabilityGroupNonUniformVote: capabilities.GroupNonUniformVote = true; break;
+				case spv::CapabilityGroupNonUniformArithmetic: capabilities.GroupNonUniformArithmetic = true; break;
+				case spv::CapabilityGroupNonUniformBallot: capabilities.GroupNonUniformBallot = true; break;
+				case spv::CapabilityGroupNonUniformShuffle: capabilities.GroupNonUniformShuffle = true; break;
+				case spv::CapabilityGroupNonUniformShuffleRelative: capabilities.GroupNonUniformShuffleRelative = true; break;
+				case spv::CapabilityDeviceGroup: capabilities.DeviceGroup = true; break;
+				case spv::CapabilityMultiView: capabilities.MultiView = true; break;
+				case spv::CapabilityStencilExportEXT: capabilities.StencilExportEXT = true; break;
+				default:
+					UNSUPPORTED("Unsupported capability %u", insn.word(1));
 				}
-				break;  // Various capabilities will be declared, but none affect our code generation at this point.
+
+				// Various capabilities will be declared, but none affect our code generation at this point.
 			}
+			break;
 
-			case spv::OpMemoryModel:
-				break;  // Memory model does not affect our code generation until we decide to do Vulkan Memory Model support.
+		case spv::OpMemoryModel:
+			break;  // Memory model does not affect our code generation until we decide to do Vulkan Memory Model support.
 
-			case spv::OpFunction:
+		case spv::OpFunction:
 			{
 				auto functionId = Function::ID(insn.word(2));
 				ASSERT_MSG(currentFunction == 0, "Functions %d and %d overlap", currentFunction.value(), functionId.value());
@@ -435,14 +438,14 @@
 					}
 				}
 				ASSERT_MSG(function.entry != 0, "Function<%d> has no label", currentFunction.value());
-				break;
 			}
+			break;
 
-			case spv::OpFunctionEnd:
-				currentFunction = 0;
-				break;
+		case spv::OpFunctionEnd:
+			currentFunction = 0;
+			break;
 
-			case spv::OpExtInstImport:
+		case spv::OpExtInstImport:
 			{
 				static constexpr std::pair<const char *, Extension::Name> extensionsByName[] = {
 					{ "GLSL.std.450", Extension::GLSLstd450 },
@@ -468,50 +471,50 @@
 				}
 				extensionsByID.emplace(id, ext);
 				extensionsImported.emplace(ext.name);
-				break;
 			}
-			case spv::OpName:
-			case spv::OpMemberName:
-			case spv::OpSource:
-			case spv::OpSourceContinued:
-			case spv::OpSourceExtension:
-			case spv::OpLine:
-			case spv::OpNoLine:
-			case spv::OpModuleProcessed:
-				// No semantic impact
-				break;
+			break;
+		case spv::OpName:
+		case spv::OpMemberName:
+		case spv::OpSource:
+		case spv::OpSourceContinued:
+		case spv::OpSourceExtension:
+		case spv::OpLine:
+		case spv::OpNoLine:
+		case spv::OpModuleProcessed:
+			// No semantic impact
+			break;
 
-			case spv::OpString:
-				strings.emplace(insn.word(1), insn.string(2));
-				break;
+		case spv::OpString:
+			strings.emplace(insn.word(1), insn.string(2));
+			break;
 
-			case spv::OpFunctionParameter:
-				// These should have all been removed by preprocessing passes. If we see them here,
-				// our assumptions are wrong and we will probably generate wrong code.
-				UNREACHABLE("%s should have already been lowered.", OpcodeName(opcode));
-				break;
+		case spv::OpFunctionParameter:
+			// These should have all been removed by preprocessing passes. If we see them here,
+			// our assumptions are wrong and we will probably generate wrong code.
+			UNREACHABLE("%s should have already been lowered.", OpcodeName(opcode));
+			break;
 
-			case spv::OpFunctionCall:
-				// TODO(b/141246700): Add full support for spv::OpFunctionCall
-				break;
+		case spv::OpFunctionCall:
+			// TODO(b/141246700): Add full support for spv::OpFunctionCall
+			break;
 
-			case spv::OpFConvert:
-				UNSUPPORTED("SPIR-V Float16 or Float64 Capability (OpFConvert)");
-				break;
+		case spv::OpFConvert:
+			UNSUPPORTED("SPIR-V Float16 or Float64 Capability (OpFConvert)");
+			break;
 
-			case spv::OpSConvert:
-				UNSUPPORTED("SPIR-V Int16 or Int64 Capability (OpSConvert)");
-				break;
+		case spv::OpSConvert:
+			UNSUPPORTED("SPIR-V Int16 or Int64 Capability (OpSConvert)");
+			break;
 
-			case spv::OpUConvert:
-				UNSUPPORTED("SPIR-V Int16 or Int64 Capability (OpUConvert)");
-				break;
+		case spv::OpUConvert:
+			UNSUPPORTED("SPIR-V Int16 or Int64 Capability (OpUConvert)");
+			break;
 
-			case spv::OpLoad:
-			case spv::OpAccessChain:
-			case spv::OpInBoundsAccessChain:
-			case spv::OpSampledImage:
-			case spv::OpImage:
+		case spv::OpLoad:
+		case spv::OpAccessChain:
+		case spv::OpInBoundsAccessChain:
+		case spv::OpSampledImage:
+		case spv::OpImage:
 			{
 				// Propagate the descriptor decorations to the result.
 				Object::ID resultId = insn.word(2);
@@ -536,201 +539,201 @@
 			}
 			break;
 
-			case spv::OpCompositeConstruct:
-			case spv::OpCompositeInsert:
-			case spv::OpCompositeExtract:
-			case spv::OpVectorShuffle:
-			case spv::OpVectorTimesScalar:
-			case spv::OpMatrixTimesScalar:
-			case spv::OpMatrixTimesVector:
-			case spv::OpVectorTimesMatrix:
-			case spv::OpMatrixTimesMatrix:
-			case spv::OpOuterProduct:
-			case spv::OpTranspose:
-			case spv::OpVectorExtractDynamic:
-			case spv::OpVectorInsertDynamic:
-			// Unary ops
-			case spv::OpNot:
-			case spv::OpBitFieldInsert:
-			case spv::OpBitFieldSExtract:
-			case spv::OpBitFieldUExtract:
-			case spv::OpBitReverse:
-			case spv::OpBitCount:
-			case spv::OpSNegate:
-			case spv::OpFNegate:
-			case spv::OpLogicalNot:
-			case spv::OpQuantizeToF16:
-			// Binary ops
-			case spv::OpIAdd:
-			case spv::OpISub:
-			case spv::OpIMul:
-			case spv::OpSDiv:
-			case spv::OpUDiv:
-			case spv::OpFAdd:
-			case spv::OpFSub:
-			case spv::OpFMul:
-			case spv::OpFDiv:
-			case spv::OpFMod:
-			case spv::OpFRem:
-			case spv::OpFOrdEqual:
-			case spv::OpFUnordEqual:
-			case spv::OpFOrdNotEqual:
-			case spv::OpFUnordNotEqual:
-			case spv::OpFOrdLessThan:
-			case spv::OpFUnordLessThan:
-			case spv::OpFOrdGreaterThan:
-			case spv::OpFUnordGreaterThan:
-			case spv::OpFOrdLessThanEqual:
-			case spv::OpFUnordLessThanEqual:
-			case spv::OpFOrdGreaterThanEqual:
-			case spv::OpFUnordGreaterThanEqual:
-			case spv::OpSMod:
-			case spv::OpSRem:
-			case spv::OpUMod:
-			case spv::OpIEqual:
-			case spv::OpINotEqual:
-			case spv::OpUGreaterThan:
-			case spv::OpSGreaterThan:
-			case spv::OpUGreaterThanEqual:
-			case spv::OpSGreaterThanEqual:
-			case spv::OpULessThan:
-			case spv::OpSLessThan:
-			case spv::OpULessThanEqual:
-			case spv::OpSLessThanEqual:
-			case spv::OpShiftRightLogical:
-			case spv::OpShiftRightArithmetic:
-			case spv::OpShiftLeftLogical:
-			case spv::OpBitwiseOr:
-			case spv::OpBitwiseXor:
-			case spv::OpBitwiseAnd:
-			case spv::OpLogicalOr:
-			case spv::OpLogicalAnd:
-			case spv::OpLogicalEqual:
-			case spv::OpLogicalNotEqual:
-			case spv::OpUMulExtended:
-			case spv::OpSMulExtended:
-			case spv::OpIAddCarry:
-			case spv::OpISubBorrow:
-			case spv::OpDot:
-			case spv::OpConvertFToU:
-			case spv::OpConvertFToS:
-			case spv::OpConvertSToF:
-			case spv::OpConvertUToF:
-			case spv::OpBitcast:
-			case spv::OpSelect:
-			case spv::OpIsInf:
-			case spv::OpIsNan:
-			case spv::OpAny:
-			case spv::OpAll:
-			case spv::OpDPdx:
-			case spv::OpDPdxCoarse:
-			case spv::OpDPdy:
-			case spv::OpDPdyCoarse:
-			case spv::OpFwidth:
-			case spv::OpFwidthCoarse:
-			case spv::OpDPdxFine:
-			case spv::OpDPdyFine:
-			case spv::OpFwidthFine:
-			case spv::OpAtomicLoad:
-			case spv::OpAtomicIAdd:
-			case spv::OpAtomicISub:
-			case spv::OpAtomicSMin:
-			case spv::OpAtomicSMax:
-			case spv::OpAtomicUMin:
-			case spv::OpAtomicUMax:
-			case spv::OpAtomicAnd:
-			case spv::OpAtomicOr:
-			case spv::OpAtomicXor:
-			case spv::OpAtomicIIncrement:
-			case spv::OpAtomicIDecrement:
-			case spv::OpAtomicExchange:
-			case spv::OpAtomicCompareExchange:
-			case spv::OpPhi:
-			case spv::OpImageSampleImplicitLod:
-			case spv::OpImageSampleExplicitLod:
-			case spv::OpImageSampleDrefImplicitLod:
-			case spv::OpImageSampleDrefExplicitLod:
-			case spv::OpImageSampleProjImplicitLod:
-			case spv::OpImageSampleProjExplicitLod:
-			case spv::OpImageSampleProjDrefImplicitLod:
-			case spv::OpImageSampleProjDrefExplicitLod:
-			case spv::OpImageGather:
-			case spv::OpImageDrefGather:
-			case spv::OpImageFetch:
-			case spv::OpImageQuerySizeLod:
-			case spv::OpImageQuerySize:
-			case spv::OpImageQueryLod:
-			case spv::OpImageQueryLevels:
-			case spv::OpImageQuerySamples:
-			case spv::OpImageRead:
-			case spv::OpImageTexelPointer:
-			case spv::OpGroupNonUniformElect:
-			case spv::OpGroupNonUniformAll:
-			case spv::OpGroupNonUniformAny:
-			case spv::OpGroupNonUniformAllEqual:
-			case spv::OpGroupNonUniformBroadcast:
-			case spv::OpGroupNonUniformBroadcastFirst:
-			case spv::OpGroupNonUniformBallot:
-			case spv::OpGroupNonUniformInverseBallot:
-			case spv::OpGroupNonUniformBallotBitExtract:
-			case spv::OpGroupNonUniformBallotBitCount:
-			case spv::OpGroupNonUniformBallotFindLSB:
-			case spv::OpGroupNonUniformBallotFindMSB:
-			case spv::OpGroupNonUniformShuffle:
-			case spv::OpGroupNonUniformShuffleXor:
-			case spv::OpGroupNonUniformShuffleUp:
-			case spv::OpGroupNonUniformShuffleDown:
-			case spv::OpGroupNonUniformIAdd:
-			case spv::OpGroupNonUniformFAdd:
-			case spv::OpGroupNonUniformIMul:
-			case spv::OpGroupNonUniformFMul:
-			case spv::OpGroupNonUniformSMin:
-			case spv::OpGroupNonUniformUMin:
-			case spv::OpGroupNonUniformFMin:
-			case spv::OpGroupNonUniformSMax:
-			case spv::OpGroupNonUniformUMax:
-			case spv::OpGroupNonUniformFMax:
-			case spv::OpGroupNonUniformBitwiseAnd:
-			case spv::OpGroupNonUniformBitwiseOr:
-			case spv::OpGroupNonUniformBitwiseXor:
-			case spv::OpGroupNonUniformLogicalAnd:
-			case spv::OpGroupNonUniformLogicalOr:
-			case spv::OpGroupNonUniformLogicalXor:
-			case spv::OpCopyObject:
-			case spv::OpCopyLogical:
-			case spv::OpArrayLength:
-				// Instructions that yield an intermediate value or divergent pointer
+		case spv::OpCompositeConstruct:
+		case spv::OpCompositeInsert:
+		case spv::OpCompositeExtract:
+		case spv::OpVectorShuffle:
+		case spv::OpVectorTimesScalar:
+		case spv::OpMatrixTimesScalar:
+		case spv::OpMatrixTimesVector:
+		case spv::OpVectorTimesMatrix:
+		case spv::OpMatrixTimesMatrix:
+		case spv::OpOuterProduct:
+		case spv::OpTranspose:
+		case spv::OpVectorExtractDynamic:
+		case spv::OpVectorInsertDynamic:
+		// Unary ops
+		case spv::OpNot:
+		case spv::OpBitFieldInsert:
+		case spv::OpBitFieldSExtract:
+		case spv::OpBitFieldUExtract:
+		case spv::OpBitReverse:
+		case spv::OpBitCount:
+		case spv::OpSNegate:
+		case spv::OpFNegate:
+		case spv::OpLogicalNot:
+		case spv::OpQuantizeToF16:
+		// Binary ops
+		case spv::OpIAdd:
+		case spv::OpISub:
+		case spv::OpIMul:
+		case spv::OpSDiv:
+		case spv::OpUDiv:
+		case spv::OpFAdd:
+		case spv::OpFSub:
+		case spv::OpFMul:
+		case spv::OpFDiv:
+		case spv::OpFMod:
+		case spv::OpFRem:
+		case spv::OpFOrdEqual:
+		case spv::OpFUnordEqual:
+		case spv::OpFOrdNotEqual:
+		case spv::OpFUnordNotEqual:
+		case spv::OpFOrdLessThan:
+		case spv::OpFUnordLessThan:
+		case spv::OpFOrdGreaterThan:
+		case spv::OpFUnordGreaterThan:
+		case spv::OpFOrdLessThanEqual:
+		case spv::OpFUnordLessThanEqual:
+		case spv::OpFOrdGreaterThanEqual:
+		case spv::OpFUnordGreaterThanEqual:
+		case spv::OpSMod:
+		case spv::OpSRem:
+		case spv::OpUMod:
+		case spv::OpIEqual:
+		case spv::OpINotEqual:
+		case spv::OpUGreaterThan:
+		case spv::OpSGreaterThan:
+		case spv::OpUGreaterThanEqual:
+		case spv::OpSGreaterThanEqual:
+		case spv::OpULessThan:
+		case spv::OpSLessThan:
+		case spv::OpULessThanEqual:
+		case spv::OpSLessThanEqual:
+		case spv::OpShiftRightLogical:
+		case spv::OpShiftRightArithmetic:
+		case spv::OpShiftLeftLogical:
+		case spv::OpBitwiseOr:
+		case spv::OpBitwiseXor:
+		case spv::OpBitwiseAnd:
+		case spv::OpLogicalOr:
+		case spv::OpLogicalAnd:
+		case spv::OpLogicalEqual:
+		case spv::OpLogicalNotEqual:
+		case spv::OpUMulExtended:
+		case spv::OpSMulExtended:
+		case spv::OpIAddCarry:
+		case spv::OpISubBorrow:
+		case spv::OpDot:
+		case spv::OpConvertFToU:
+		case spv::OpConvertFToS:
+		case spv::OpConvertSToF:
+		case spv::OpConvertUToF:
+		case spv::OpBitcast:
+		case spv::OpSelect:
+		case spv::OpIsInf:
+		case spv::OpIsNan:
+		case spv::OpAny:
+		case spv::OpAll:
+		case spv::OpDPdx:
+		case spv::OpDPdxCoarse:
+		case spv::OpDPdy:
+		case spv::OpDPdyCoarse:
+		case spv::OpFwidth:
+		case spv::OpFwidthCoarse:
+		case spv::OpDPdxFine:
+		case spv::OpDPdyFine:
+		case spv::OpFwidthFine:
+		case spv::OpAtomicLoad:
+		case spv::OpAtomicIAdd:
+		case spv::OpAtomicISub:
+		case spv::OpAtomicSMin:
+		case spv::OpAtomicSMax:
+		case spv::OpAtomicUMin:
+		case spv::OpAtomicUMax:
+		case spv::OpAtomicAnd:
+		case spv::OpAtomicOr:
+		case spv::OpAtomicXor:
+		case spv::OpAtomicIIncrement:
+		case spv::OpAtomicIDecrement:
+		case spv::OpAtomicExchange:
+		case spv::OpAtomicCompareExchange:
+		case spv::OpPhi:
+		case spv::OpImageSampleImplicitLod:
+		case spv::OpImageSampleExplicitLod:
+		case spv::OpImageSampleDrefImplicitLod:
+		case spv::OpImageSampleDrefExplicitLod:
+		case spv::OpImageSampleProjImplicitLod:
+		case spv::OpImageSampleProjExplicitLod:
+		case spv::OpImageSampleProjDrefImplicitLod:
+		case spv::OpImageSampleProjDrefExplicitLod:
+		case spv::OpImageGather:
+		case spv::OpImageDrefGather:
+		case spv::OpImageFetch:
+		case spv::OpImageQuerySizeLod:
+		case spv::OpImageQuerySize:
+		case spv::OpImageQueryLod:
+		case spv::OpImageQueryLevels:
+		case spv::OpImageQuerySamples:
+		case spv::OpImageRead:
+		case spv::OpImageTexelPointer:
+		case spv::OpGroupNonUniformElect:
+		case spv::OpGroupNonUniformAll:
+		case spv::OpGroupNonUniformAny:
+		case spv::OpGroupNonUniformAllEqual:
+		case spv::OpGroupNonUniformBroadcast:
+		case spv::OpGroupNonUniformBroadcastFirst:
+		case spv::OpGroupNonUniformBallot:
+		case spv::OpGroupNonUniformInverseBallot:
+		case spv::OpGroupNonUniformBallotBitExtract:
+		case spv::OpGroupNonUniformBallotBitCount:
+		case spv::OpGroupNonUniformBallotFindLSB:
+		case spv::OpGroupNonUniformBallotFindMSB:
+		case spv::OpGroupNonUniformShuffle:
+		case spv::OpGroupNonUniformShuffleXor:
+		case spv::OpGroupNonUniformShuffleUp:
+		case spv::OpGroupNonUniformShuffleDown:
+		case spv::OpGroupNonUniformIAdd:
+		case spv::OpGroupNonUniformFAdd:
+		case spv::OpGroupNonUniformIMul:
+		case spv::OpGroupNonUniformFMul:
+		case spv::OpGroupNonUniformSMin:
+		case spv::OpGroupNonUniformUMin:
+		case spv::OpGroupNonUniformFMin:
+		case spv::OpGroupNonUniformSMax:
+		case spv::OpGroupNonUniformUMax:
+		case spv::OpGroupNonUniformFMax:
+		case spv::OpGroupNonUniformBitwiseAnd:
+		case spv::OpGroupNonUniformBitwiseOr:
+		case spv::OpGroupNonUniformBitwiseXor:
+		case spv::OpGroupNonUniformLogicalAnd:
+		case spv::OpGroupNonUniformLogicalOr:
+		case spv::OpGroupNonUniformLogicalXor:
+		case spv::OpCopyObject:
+		case spv::OpCopyLogical:
+		case spv::OpArrayLength:
+			// Instructions that yield an intermediate value or divergent pointer
+			DefineResult(insn);
+			break;
+
+		case spv::OpExtInst:
+			switch(getExtension(insn.word(3)).name)
+			{
+			case Extension::GLSLstd450:
 				DefineResult(insn);
 				break;
-
-			case spv::OpExtInst:
-				switch(getExtension(insn.word(3)).name)
-				{
-					case Extension::GLSLstd450:
-						DefineResult(insn);
-						break;
-					case Extension::OpenCLDebugInfo100:
-						DefineOpenCLDebugInfo100(insn);
-						break;
-					default:
-						UNREACHABLE("Unexpected Extension name %d", int(getExtension(insn.word(3)).name));
-						break;
-				}
+			case Extension::OpenCLDebugInfo100:
+				DefineOpenCLDebugInfo100(insn);
 				break;
-
-			case spv::OpStore:
-			case spv::OpAtomicStore:
-			case spv::OpImageWrite:
-			case spv::OpCopyMemory:
-			case spv::OpMemoryBarrier:
-				// Don't need to do anything during analysis pass
+			default:
+				UNREACHABLE("Unexpected Extension name %d", int(getExtension(insn.word(3)).name));
 				break;
+			}
+			break;
 
-			case spv::OpControlBarrier:
-				modes.ContainsControlBarriers = true;
-				break;
+		case spv::OpStore:
+		case spv::OpAtomicStore:
+		case spv::OpImageWrite:
+		case spv::OpCopyMemory:
+		case spv::OpMemoryBarrier:
+			// Don't need to do anything during analysis pass
+			break;
 
-			case spv::OpExtension:
+		case spv::OpControlBarrier:
+			modes.ContainsControlBarriers = true;
+			break;
+
+		case spv::OpExtension:
 			{
 				auto ext = insn.string(1);
 				// Part of core SPIR-V 1.3. Vulkan 1.1 implementations must also accept the pre-1.3
@@ -744,11 +747,11 @@
 				if(!strcmp(ext, "SPV_EXT_shader_stencil_export")) break;
 				if(!strcmp(ext, "SPV_KHR_float_controls")) break;
 				UNSUPPORTED("SPIR-V Extension: %s", ext);
-				break;
 			}
+			break;
 
-			default:
-				UNSUPPORTED("%s", OpcodeName(opcode));
+		default:
+			UNSUPPORTED("%s", OpcodeName(opcode));
 		}
 	}
 
@@ -786,7 +789,7 @@
 	// member. All members of such a structure are builtins.
 	switch(insn.opcode())
 	{
-		case spv::OpTypeStruct:
+	case spv::OpTypeStruct:
 		{
 			auto d = memberDecorations.find(resultId);
 			if(d != memberDecorations.end())
@@ -800,27 +803,27 @@
 					}
 				}
 			}
-			break;
 		}
-		case spv::OpTypePointer:
+		break;
+	case spv::OpTypePointer:
 		{
 			Type::ID elementTypeId = insn.word(3);
 			type.element = elementTypeId;
 			type.isBuiltInBlock = getType(elementTypeId).isBuiltInBlock;
 			type.storageClass = static_cast<spv::StorageClass>(insn.word(2));
-			break;
 		}
-		case spv::OpTypeVector:
-		case spv::OpTypeMatrix:
-		case spv::OpTypeArray:
-		case spv::OpTypeRuntimeArray:
+		break;
+	case spv::OpTypeVector:
+	case spv::OpTypeMatrix:
+	case spv::OpTypeArray:
+	case spv::OpTypeRuntimeArray:
 		{
 			Type::ID elementTypeId = insn.word(2);
 			type.element = elementTypeId;
-			break;
 		}
-		default:
-			break;
+		break;
+	default:
+		break;
 	}
 }
 
@@ -930,34 +933,34 @@
 	auto mode = static_cast<spv::ExecutionMode>(insn.word(2));
 	switch(mode)
 	{
-		case spv::ExecutionModeEarlyFragmentTests:
-			modes.EarlyFragmentTests = true;
-			break;
-		case spv::ExecutionModeDepthReplacing:
-			modes.DepthReplacing = true;
-			break;
-		case spv::ExecutionModeDepthGreater:
-			// TODO(b/177915067): Can be used to optimize depth test, currently unused.
-			modes.DepthGreater = true;
-			break;
-		case spv::ExecutionModeDepthLess:
-			// TODO(b/177915067): Can be used to optimize depth test, currently unused.
-			modes.DepthLess = true;
-			break;
-		case spv::ExecutionModeDepthUnchanged:
-			// TODO(b/177915067): Can be used to optimize depth test, currently unused.
-			modes.DepthUnchanged = true;
-			break;
-		case spv::ExecutionModeLocalSize:
-			modes.WorkgroupSizeX = insn.word(3);
-			modes.WorkgroupSizeY = insn.word(4);
-			modes.WorkgroupSizeZ = insn.word(5);
-			break;
-		case spv::ExecutionModeOriginUpperLeft:
-			// This is always the case for a Vulkan shader. Do nothing.
-			break;
-		default:
-			UNREACHABLE("Execution mode: %d", int(mode));
+	case spv::ExecutionModeEarlyFragmentTests:
+		modes.EarlyFragmentTests = true;
+		break;
+	case spv::ExecutionModeDepthReplacing:
+		modes.DepthReplacing = true;
+		break;
+	case spv::ExecutionModeDepthGreater:
+		// TODO(b/177915067): Can be used to optimize depth test, currently unused.
+		modes.DepthGreater = true;
+		break;
+	case spv::ExecutionModeDepthLess:
+		// TODO(b/177915067): Can be used to optimize depth test, currently unused.
+		modes.DepthLess = true;
+		break;
+	case spv::ExecutionModeDepthUnchanged:
+		// TODO(b/177915067): Can be used to optimize depth test, currently unused.
+		modes.DepthUnchanged = true;
+		break;
+	case spv::ExecutionModeLocalSize:
+		modes.WorkgroupSizeX = insn.word(3);
+		modes.WorkgroupSizeY = insn.word(4);
+		modes.WorkgroupSizeZ = insn.word(5);
+		break;
+	case spv::ExecutionModeOriginUpperLeft:
+		// This is always the case for a Vulkan shader. Do nothing.
+		break;
+	default:
+		UNREACHABLE("Execution mode: %d", int(mode));
 	}
 }
 
@@ -968,37 +971,37 @@
 	// already been described (and so their sizes determined)
 	switch(insn.opcode())
 	{
-		case spv::OpTypeVoid:
-		case spv::OpTypeSampler:
-		case spv::OpTypeImage:
-		case spv::OpTypeSampledImage:
-		case spv::OpTypeFunction:
-		case spv::OpTypeRuntimeArray:
-			// Objects that don't consume any space.
-			// Descriptor-backed objects currently only need exist at compile-time.
-			// Runtime arrays don't appear in places where their size would be interesting
-			return 0;
+	case spv::OpTypeVoid:
+	case spv::OpTypeSampler:
+	case spv::OpTypeImage:
+	case spv::OpTypeSampledImage:
+	case spv::OpTypeFunction:
+	case spv::OpTypeRuntimeArray:
+		// Objects that don't consume any space.
+		// Descriptor-backed objects currently only need exist at compile-time.
+		// Runtime arrays don't appear in places where their size would be interesting
+		return 0;
 
-		case spv::OpTypeBool:
-		case spv::OpTypeFloat:
-		case spv::OpTypeInt:
-			// All the fundamental types are 1 component. If we ever add support for 8/16/64-bit components,
-			// we might need to change this, but only 32 bit components are required for Vulkan 1.1.
-			return 1;
+	case spv::OpTypeBool:
+	case spv::OpTypeFloat:
+	case spv::OpTypeInt:
+		// All the fundamental types are 1 component. If we ever add support for 8/16/64-bit components,
+		// we might need to change this, but only 32 bit components are required for Vulkan 1.1.
+		return 1;
 
-		case spv::OpTypeVector:
-		case spv::OpTypeMatrix:
-			// Vectors and matrices both consume element count * element size.
-			return getType(insn.word(2)).componentCount * insn.word(3);
+	case spv::OpTypeVector:
+	case spv::OpTypeMatrix:
+		// Vectors and matrices both consume element count * element size.
+		return getType(insn.word(2)).componentCount * insn.word(3);
 
-		case spv::OpTypeArray:
+	case spv::OpTypeArray:
 		{
 			// Element count * element size. Array sizes come from constant ids.
 			auto arraySize = GetConstScalarInt(insn.word(3));
 			return getType(insn.word(2)).componentCount * arraySize;
 		}
 
-		case spv::OpTypeStruct:
+	case spv::OpTypeStruct:
 		{
 			uint32_t size = 0;
 			for(uint32_t i = 2u; i < insn.wordCount(); i++)
@@ -1008,14 +1011,14 @@
 			return size;
 		}
 
-		case spv::OpTypePointer:
-			// Runtime representation of a pointer is a per-lane index.
-			// Note: clients are expected to look through the pointer if they want the pointee size instead.
-			return 1;
+	case spv::OpTypePointer:
+		// Runtime representation of a pointer is a per-lane index.
+		// Note: clients are expected to look through the pointer if they want the pointee size instead.
+		return 1;
 
-		default:
-			UNREACHABLE("%s", OpcodeName(insn.opcode()));
-			return 0;
+	default:
+		UNREACHABLE("%s", OpcodeName(insn.opcode()));
+		return 0;
 	}
 }
 
@@ -1038,32 +1041,32 @@
 	auto const &obj = getType(id);
 	switch(obj.opcode())
 	{
-		case spv::OpTypePointer:
-			return VisitInterfaceInner(obj.definition.word(3), d, f);
-		case spv::OpTypeMatrix:
-			for(auto i = 0u; i < obj.definition.word(3); i++, d.Location++)
-			{
-				// consumes same components of N consecutive locations
-				VisitInterfaceInner(obj.definition.word(2), d, f);
-			}
-			return d.Location;
-		case spv::OpTypeVector:
-			for(auto i = 0u; i < obj.definition.word(3); i++, d.Component++)
-			{
-				// consumes N consecutive components in the same location
-				VisitInterfaceInner(obj.definition.word(2), d, f);
-			}
-			return d.Location + 1;
-		case spv::OpTypeFloat:
-			f(d, ATTRIBTYPE_FLOAT);
-			return d.Location + 1;
-		case spv::OpTypeInt:
-			f(d, obj.definition.word(3) ? ATTRIBTYPE_INT : ATTRIBTYPE_UINT);
-			return d.Location + 1;
-		case spv::OpTypeBool:
-			f(d, ATTRIBTYPE_UINT);
-			return d.Location + 1;
-		case spv::OpTypeStruct:
+	case spv::OpTypePointer:
+		return VisitInterfaceInner(obj.definition.word(3), d, f);
+	case spv::OpTypeMatrix:
+		for(auto i = 0u; i < obj.definition.word(3); i++, d.Location++)
+		{
+			// consumes same components of N consecutive locations
+			VisitInterfaceInner(obj.definition.word(2), d, f);
+		}
+		return d.Location;
+	case spv::OpTypeVector:
+		for(auto i = 0u; i < obj.definition.word(3); i++, d.Component++)
+		{
+			// consumes N consecutive components in the same location
+			VisitInterfaceInner(obj.definition.word(2), d, f);
+		}
+		return d.Location + 1;
+	case spv::OpTypeFloat:
+		f(d, ATTRIBTYPE_FLOAT);
+		return d.Location + 1;
+	case spv::OpTypeInt:
+		f(d, obj.definition.word(3) ? ATTRIBTYPE_INT : ATTRIBTYPE_UINT);
+		return d.Location + 1;
+	case spv::OpTypeBool:
+		f(d, ATTRIBTYPE_UINT);
+		return d.Location + 1;
+	case spv::OpTypeStruct:
 		{
 			// iterate over members, which may themselves have Location/Component decorations
 			for(auto i = 0u; i < obj.definition.wordCount() - 2; i++)
@@ -1075,7 +1078,7 @@
 			}
 			return d.Location;
 		}
-		case spv::OpTypeArray:
+	case spv::OpTypeArray:
 		{
 			auto arraySize = GetConstScalarInt(obj.definition.word(3));
 			for(auto i = 0u; i < arraySize; i++)
@@ -1084,9 +1087,9 @@
 			}
 			return d.Location;
 		}
-		default:
-			// Intentionally partial; most opcodes do not participate in type hierarchies
-			return 0;
+	default:
+		// Intentionally partial; most opcodes do not participate in type hierarchies
+		return 0;
 	}
 }
 
@@ -1114,30 +1117,30 @@
 		auto &type = getType(typeId);
 		switch(type.opcode())
 		{
-			case spv::OpTypeStruct:
+		case spv::OpTypeStruct:
 			{
 				int memberIndex = GetConstScalarInt(indexIds[i]);
 				ApplyDecorationsForIdMember(d, typeId, memberIndex);
 				typeId = type.definition.word(2u + memberIndex);
-				break;
 			}
-			case spv::OpTypeArray:
-			case spv::OpTypeRuntimeArray:
-				if(dd->InputAttachmentIndex >= 0)
-				{
-					dd->InputAttachmentIndex += GetConstScalarInt(indexIds[i]);
-				}
-				typeId = type.element;
-				break;
-			case spv::OpTypeVector:
-				typeId = type.element;
-				break;
-			case spv::OpTypeMatrix:
-				typeId = type.element;
-				d->InsideMatrix = true;
-				break;
-			default:
-				UNREACHABLE("%s", OpcodeName(type.definition.opcode()));
+			break;
+		case spv::OpTypeArray:
+		case spv::OpTypeRuntimeArray:
+			if(dd->InputAttachmentIndex >= 0)
+			{
+				dd->InputAttachmentIndex += GetConstScalarInt(indexIds[i]);
+			}
+			typeId = type.element;
+			break;
+		case spv::OpTypeVector:
+			typeId = type.element;
+			break;
+		case spv::OpTypeMatrix:
+			typeId = type.element;
+			d->InsideMatrix = true;
+			break;
+		default:
+			UNREACHABLE("%s", OpcodeName(type.definition.opcode()));
 		}
 	}
 }
@@ -1186,17 +1189,17 @@
 
 		switch(type.definition.opcode())
 		{
-			case spv::OpTypeStruct:
+		case spv::OpTypeStruct:
 			{
 				int memberIndex = GetConstScalarInt(indexIds[i]);
 				ApplyDecorationsForIdMember(&d, typeId, memberIndex);
 				ASSERT(d.HasOffset);
 				constantOffset += d.Offset;
 				typeId = type.definition.word(2u + memberIndex);
-				break;
 			}
-			case spv::OpTypeArray:
-			case spv::OpTypeRuntimeArray:
+			break;
+		case spv::OpTypeArray:
+		case spv::OpTypeRuntimeArray:
 			{
 				// TODO: b/127950082: Check bounds.
 				ASSERT(d.HasArrayStride);
@@ -1210,9 +1213,9 @@
 					ptr += SIMD::Int(d.ArrayStride) * state->getIntermediate(indexIds[i]).Int(0);
 				}
 				typeId = type.element;
-				break;
 			}
-			case spv::OpTypeMatrix:
+			break;
+		case spv::OpTypeMatrix:
 			{
 				// TODO: b/127950082: Check bounds.
 				ASSERT(d.HasMatrixStride);
@@ -1228,9 +1231,9 @@
 					ptr += SIMD::Int(columnStride) * state->getIntermediate(indexIds[i]).Int(0);
 				}
 				typeId = type.element;
-				break;
 			}
-			case spv::OpTypeVector:
+			break;
+		case spv::OpTypeVector:
 			{
 				auto elemStride = (d.InsideMatrix && d.HasRowMajor && d.RowMajor) ? d.MatrixStride : static_cast<int32_t>(sizeof(float));
 				auto &obj = getObject(indexIds[i]);
@@ -1243,10 +1246,10 @@
 					ptr += SIMD::Int(elemStride) * state->getIntermediate(indexIds[i]).Int(0);
 				}
 				typeId = type.element;
-				break;
 			}
-			default:
-				UNREACHABLE("%s", OpcodeName(type.definition.opcode()));
+			break;
+		default:
+			UNREACHABLE("%s", OpcodeName(type.definition.opcode()));
 		}
 	}
 
@@ -1270,7 +1273,7 @@
 		auto &type = getType(typeId);
 		switch(type.opcode())
 		{
-			case spv::OpTypeStruct:
+		case spv::OpTypeStruct:
 			{
 				int memberIndex = GetConstScalarInt(indexIds[i]);
 				int offsetIntoStruct = 0;
@@ -1281,13 +1284,13 @@
 				}
 				constantOffset += offsetIntoStruct;
 				typeId = type.definition.word(2u + memberIndex);
-				break;
 			}
+			break;
 
-			case spv::OpTypeVector:
-			case spv::OpTypeMatrix:
-			case spv::OpTypeArray:
-			case spv::OpTypeRuntimeArray:
+		case spv::OpTypeVector:
+		case spv::OpTypeMatrix:
+		case spv::OpTypeArray:
+		case spv::OpTypeRuntimeArray:
 			{
 				// TODO(b/127950082): Check bounds.
 				if(getType(baseObject).storageClass == spv::StorageClassUniformConstant)
@@ -1323,11 +1326,11 @@
 					}
 				}
 				typeId = type.element;
-				break;
 			}
+			break;
 
-			default:
-				UNREACHABLE("%s", OpcodeName(type.opcode()));
+		default:
+			UNREACHABLE("%s", OpcodeName(type.opcode()));
 		}
 	}
 
@@ -1347,7 +1350,7 @@
 		auto &type = getType(typeId);
 		switch(type.opcode())
 		{
-			case spv::OpTypeStruct:
+		case spv::OpTypeStruct:
 			{
 				int memberIndex = indexes[i];
 				int offsetIntoStruct = 0;
@@ -1358,22 +1361,22 @@
 				}
 				componentOffset += offsetIntoStruct;
 				typeId = type.definition.word(2u + memberIndex);
-				break;
 			}
+			break;
 
-			case spv::OpTypeVector:
-			case spv::OpTypeMatrix:
-			case spv::OpTypeArray:
+		case spv::OpTypeVector:
+		case spv::OpTypeMatrix:
+		case spv::OpTypeArray:
 			{
 				auto elementType = type.definition.word(2);
 				auto stride = getType(elementType).componentCount;
 				componentOffset += stride * indexes[i];
 				typeId = elementType;
-				break;
 			}
+			break;
 
-			default:
-				UNREACHABLE("%s", OpcodeName(type.opcode()));
+		default:
+			UNREACHABLE("%s", OpcodeName(type.opcode()));
 		}
 	}
 
@@ -1384,58 +1387,58 @@
 {
 	switch(decoration)
 	{
-		case spv::DecorationLocation:
-			HasLocation = true;
-			Location = static_cast<int32_t>(arg);
-			break;
-		case spv::DecorationComponent:
-			HasComponent = true;
-			Component = arg;
-			break;
-		case spv::DecorationBuiltIn:
-			HasBuiltIn = true;
-			BuiltIn = static_cast<spv::BuiltIn>(arg);
-			break;
-		case spv::DecorationFlat:
-			Flat = true;
-			break;
-		case spv::DecorationNoPerspective:
-			NoPerspective = true;
-			break;
-		case spv::DecorationCentroid:
-			Centroid = true;
-			break;
-		case spv::DecorationBlock:
-			Block = true;
-			break;
-		case spv::DecorationBufferBlock:
-			BufferBlock = true;
-			break;
-		case spv::DecorationOffset:
-			HasOffset = true;
-			Offset = static_cast<int32_t>(arg);
-			break;
-		case spv::DecorationArrayStride:
-			HasArrayStride = true;
-			ArrayStride = static_cast<int32_t>(arg);
-			break;
-		case spv::DecorationMatrixStride:
-			HasMatrixStride = true;
-			MatrixStride = static_cast<int32_t>(arg);
-			break;
-		case spv::DecorationRelaxedPrecision:
-			RelaxedPrecision = true;
-			break;
-		case spv::DecorationRowMajor:
-			HasRowMajor = true;
-			RowMajor = true;
-			break;
-		case spv::DecorationColMajor:
-			HasRowMajor = true;
-			RowMajor = false;
-		default:
-			// Intentionally partial, there are many decorations we just don't care about.
-			break;
+	case spv::DecorationLocation:
+		HasLocation = true;
+		Location = static_cast<int32_t>(arg);
+		break;
+	case spv::DecorationComponent:
+		HasComponent = true;
+		Component = arg;
+		break;
+	case spv::DecorationBuiltIn:
+		HasBuiltIn = true;
+		BuiltIn = static_cast<spv::BuiltIn>(arg);
+		break;
+	case spv::DecorationFlat:
+		Flat = true;
+		break;
+	case spv::DecorationNoPerspective:
+		NoPerspective = true;
+		break;
+	case spv::DecorationCentroid:
+		Centroid = true;
+		break;
+	case spv::DecorationBlock:
+		Block = true;
+		break;
+	case spv::DecorationBufferBlock:
+		BufferBlock = true;
+		break;
+	case spv::DecorationOffset:
+		HasOffset = true;
+		Offset = static_cast<int32_t>(arg);
+		break;
+	case spv::DecorationArrayStride:
+		HasArrayStride = true;
+		ArrayStride = static_cast<int32_t>(arg);
+		break;
+	case spv::DecorationMatrixStride:
+		HasMatrixStride = true;
+		MatrixStride = static_cast<int32_t>(arg);
+		break;
+	case spv::DecorationRelaxedPrecision:
+		RelaxedPrecision = true;
+		break;
+	case spv::DecorationRowMajor:
+		HasRowMajor = true;
+		RowMajor = true;
+		break;
+	case spv::DecorationColMajor:
+		HasRowMajor = true;
+		RowMajor = false;
+	default:
+		// Intentionally partial, there are many decorations we just don't care about.
+		break;
 	}
 }
 
@@ -1535,15 +1538,15 @@
 
 	switch(getType(typeId).opcode())
 	{
-		case spv::OpTypePointer:
-		case spv::OpTypeImage:
-		case spv::OpTypeSampledImage:
-		case spv::OpTypeSampler:
-			object.kind = Object::Kind::Pointer;
-			break;
+	case spv::OpTypePointer:
+	case spv::OpTypeImage:
+	case spv::OpTypeSampledImage:
+	case spv::OpTypeSampler:
+		object.kind = Object::Kind::Pointer;
+		break;
 
-		default:
-			object.kind = Object::Kind::Intermediate;
+	default:
+		object.kind = Object::Kind::Intermediate;
 	}
 
 	object.definition = insn;
@@ -1554,29 +1557,29 @@
 {
 	switch(storageClass)
 	{
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-			// Buffer resource access. robustBufferAccess feature applies.
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+		// Buffer resource access. robustBufferAccess feature applies.
+		return robustBufferAccess ? OutOfBoundsBehavior::RobustBufferAccess
+		                          : OutOfBoundsBehavior::UndefinedBehavior;
+
+	case spv::StorageClassImage:
+		// VK_EXT_image_robustness requires nullifying out-of-bounds accesses.
+		// TODO(b/162327166): Only perform bounds checks when VK_EXT_image_robustness is enabled.
+		return OutOfBoundsBehavior::Nullify;
+
+	case spv::StorageClassInput:
+		if(executionModel == spv::ExecutionModelVertex)
+		{
+			// Vertex attributes follow robustBufferAccess rules.
 			return robustBufferAccess ? OutOfBoundsBehavior::RobustBufferAccess
 			                          : OutOfBoundsBehavior::UndefinedBehavior;
-
-		case spv::StorageClassImage:
-			// VK_EXT_image_robustness requires nullifying out-of-bounds accesses.
-			// TODO(b/162327166): Only perform bounds checks when VK_EXT_image_robustness is enabled.
-			return OutOfBoundsBehavior::Nullify;
-
-		case spv::StorageClassInput:
-			if(executionModel == spv::ExecutionModelVertex)
-			{
-				// Vertex attributes follow robustBufferAccess rules.
-				return robustBufferAccess ? OutOfBoundsBehavior::RobustBufferAccess
-				                          : OutOfBoundsBehavior::UndefinedBehavior;
-			}
-			// Fall through to default case.
-		default:
-			// TODO(b/137183137): Optimize if the pointer resulted from OpInBoundsAccessChain.
-			// TODO(b/131224163): Optimize cases statically known to be within bounds.
-			return OutOfBoundsBehavior::UndefinedValue;
+		}
+		// Fall through to default case.
+	default:
+		// TODO(b/137183137): Optimize if the pointer resulted from OpInBoundsAccessChain.
+		// TODO(b/131224163): Optimize cases statically known to be within bounds.
+		return OutOfBoundsBehavior::UndefinedValue;
 	}
 
 	return OutOfBoundsBehavior::Nullify;
@@ -1590,7 +1593,7 @@
 	{
 		switch(insn.opcode())
 		{
-			case spv::OpVariable:
+		case spv::OpVariable:
 			{
 				auto resultPointerType = getType(insn.resultTypeId());
 				auto pointeeType = getType(resultPointerType.element);
@@ -1599,33 +1602,34 @@
 				{
 					routine->createVariable(insn.resultId(), pointeeType.componentCount);
 				}
-				break;
 			}
-			case spv::OpPhi:
+			break;
+
+		case spv::OpPhi:
 			{
 				auto type = getType(insn.resultTypeId());
 				routine->phis.emplace(insn.resultId(), SpirvRoutine::Variable(type.componentCount));
-				break;
 			}
+			break;
 
-			case spv::OpImageDrefGather:
-			case spv::OpImageFetch:
-			case spv::OpImageGather:
-			case spv::OpImageQueryLod:
-			case spv::OpImageSampleDrefExplicitLod:
-			case spv::OpImageSampleDrefImplicitLod:
-			case spv::OpImageSampleExplicitLod:
-			case spv::OpImageSampleImplicitLod:
-			case spv::OpImageSampleProjDrefExplicitLod:
-			case spv::OpImageSampleProjDrefImplicitLod:
-			case spv::OpImageSampleProjExplicitLod:
-			case spv::OpImageSampleProjImplicitLod:
-				routine->samplerCache.emplace(insn.resultId(), SpirvRoutine::SamplerCache{});
-				break;
+		case spv::OpImageDrefGather:
+		case spv::OpImageFetch:
+		case spv::OpImageGather:
+		case spv::OpImageQueryLod:
+		case spv::OpImageSampleDrefExplicitLod:
+		case spv::OpImageSampleDrefImplicitLod:
+		case spv::OpImageSampleExplicitLod:
+		case spv::OpImageSampleImplicitLod:
+		case spv::OpImageSampleProjDrefExplicitLod:
+		case spv::OpImageSampleProjDrefImplicitLod:
+		case spv::OpImageSampleProjExplicitLod:
+		case spv::OpImageSampleProjImplicitLod:
+			routine->samplerCache.emplace(insn.resultId(), SpirvRoutine::SamplerCache{});
+			break;
 
-			default:
-				// Nothing else produces interface variables, so can all be safely ignored.
-				break;
+		default:
+			// Nothing else produces interface variables, so can all be safely ignored.
+			break;
 		}
 	}
 }
@@ -1659,13 +1663,13 @@
 		auto res = EmitInstruction(insn, state);
 		switch(res)
 		{
-			case EmitResult::Continue:
-				continue;
-			case EmitResult::Terminator:
-				break;
-			default:
-				UNREACHABLE("Unexpected EmitResult %d", int(res));
-				break;
+		case EmitResult::Continue:
+			continue;
+		case EmitResult::Terminator:
+			break;
+		default:
+			UNREACHABLE("Unexpected EmitResult %d", int(res));
+			break;
 		}
 	}
 }
@@ -1692,371 +1696,371 @@
 
 	switch(opcode)
 	{
-		case spv::OpTypeVoid:
-		case spv::OpTypeInt:
-		case spv::OpTypeFloat:
-		case spv::OpTypeBool:
-		case spv::OpTypeVector:
-		case spv::OpTypeArray:
-		case spv::OpTypeRuntimeArray:
-		case spv::OpTypeMatrix:
-		case spv::OpTypeStruct:
-		case spv::OpTypePointer:
-		case spv::OpTypeFunction:
-		case spv::OpTypeImage:
-		case spv::OpTypeSampledImage:
-		case spv::OpTypeSampler:
-		case spv::OpExecutionMode:
-		case spv::OpMemoryModel:
-		case spv::OpFunction:
-		case spv::OpFunctionEnd:
-		case spv::OpConstant:
-		case spv::OpConstantNull:
-		case spv::OpConstantTrue:
-		case spv::OpConstantFalse:
-		case spv::OpConstantComposite:
-		case spv::OpSpecConstant:
-		case spv::OpSpecConstantTrue:
-		case spv::OpSpecConstantFalse:
-		case spv::OpSpecConstantComposite:
-		case spv::OpSpecConstantOp:
-		case spv::OpUndef:
-		case spv::OpExtension:
-		case spv::OpCapability:
-		case spv::OpEntryPoint:
-		case spv::OpExtInstImport:
-		case spv::OpDecorate:
-		case spv::OpMemberDecorate:
-		case spv::OpGroupDecorate:
-		case spv::OpGroupMemberDecorate:
-		case spv::OpDecorationGroup:
-		case spv::OpDecorateId:
-		case spv::OpDecorateString:
-		case spv::OpMemberDecorateString:
-		case spv::OpName:
-		case spv::OpMemberName:
-		case spv::OpSource:
-		case spv::OpSourceContinued:
-		case spv::OpSourceExtension:
-		case spv::OpNoLine:
-		case spv::OpModuleProcessed:
-		case spv::OpString:
-			// Nothing to do at emit time. These are either fully handled at analysis time,
-			// or don't require any work at all.
-			return EmitResult::Continue;
+	case spv::OpTypeVoid:
+	case spv::OpTypeInt:
+	case spv::OpTypeFloat:
+	case spv::OpTypeBool:
+	case spv::OpTypeVector:
+	case spv::OpTypeArray:
+	case spv::OpTypeRuntimeArray:
+	case spv::OpTypeMatrix:
+	case spv::OpTypeStruct:
+	case spv::OpTypePointer:
+	case spv::OpTypeFunction:
+	case spv::OpTypeImage:
+	case spv::OpTypeSampledImage:
+	case spv::OpTypeSampler:
+	case spv::OpExecutionMode:
+	case spv::OpMemoryModel:
+	case spv::OpFunction:
+	case spv::OpFunctionEnd:
+	case spv::OpConstant:
+	case spv::OpConstantNull:
+	case spv::OpConstantTrue:
+	case spv::OpConstantFalse:
+	case spv::OpConstantComposite:
+	case spv::OpSpecConstant:
+	case spv::OpSpecConstantTrue:
+	case spv::OpSpecConstantFalse:
+	case spv::OpSpecConstantComposite:
+	case spv::OpSpecConstantOp:
+	case spv::OpUndef:
+	case spv::OpExtension:
+	case spv::OpCapability:
+	case spv::OpEntryPoint:
+	case spv::OpExtInstImport:
+	case spv::OpDecorate:
+	case spv::OpMemberDecorate:
+	case spv::OpGroupDecorate:
+	case spv::OpGroupMemberDecorate:
+	case spv::OpDecorationGroup:
+	case spv::OpDecorateId:
+	case spv::OpDecorateString:
+	case spv::OpMemberDecorateString:
+	case spv::OpName:
+	case spv::OpMemberName:
+	case spv::OpSource:
+	case spv::OpSourceContinued:
+	case spv::OpSourceExtension:
+	case spv::OpNoLine:
+	case spv::OpModuleProcessed:
+	case spv::OpString:
+		// Nothing to do at emit time. These are either fully handled at analysis time,
+		// or don't require any work at all.
+		return EmitResult::Continue;
 
-		case spv::OpLine:
-			return EmitLine(insn, state);
+	case spv::OpLine:
+		return EmitLine(insn, state);
 
-		case spv::OpLabel:
-			return EmitResult::Continue;
+	case spv::OpLabel:
+		return EmitResult::Continue;
 
-		case spv::OpVariable:
-			return EmitVariable(insn, state);
+	case spv::OpVariable:
+		return EmitVariable(insn, state);
 
-		case spv::OpLoad:
-		case spv::OpAtomicLoad:
-			return EmitLoad(insn, state);
+	case spv::OpLoad:
+	case spv::OpAtomicLoad:
+		return EmitLoad(insn, state);
 
-		case spv::OpStore:
-		case spv::OpAtomicStore:
-			return EmitStore(insn, state);
+	case spv::OpStore:
+	case spv::OpAtomicStore:
+		return EmitStore(insn, state);
 
-		case spv::OpAtomicIAdd:
-		case spv::OpAtomicISub:
-		case spv::OpAtomicSMin:
-		case spv::OpAtomicSMax:
-		case spv::OpAtomicUMin:
-		case spv::OpAtomicUMax:
-		case spv::OpAtomicAnd:
-		case spv::OpAtomicOr:
-		case spv::OpAtomicXor:
-		case spv::OpAtomicIIncrement:
-		case spv::OpAtomicIDecrement:
-		case spv::OpAtomicExchange:
-			return EmitAtomicOp(insn, state);
+	case spv::OpAtomicIAdd:
+	case spv::OpAtomicISub:
+	case spv::OpAtomicSMin:
+	case spv::OpAtomicSMax:
+	case spv::OpAtomicUMin:
+	case spv::OpAtomicUMax:
+	case spv::OpAtomicAnd:
+	case spv::OpAtomicOr:
+	case spv::OpAtomicXor:
+	case spv::OpAtomicIIncrement:
+	case spv::OpAtomicIDecrement:
+	case spv::OpAtomicExchange:
+		return EmitAtomicOp(insn, state);
 
-		case spv::OpAtomicCompareExchange:
-			return EmitAtomicCompareExchange(insn, state);
+	case spv::OpAtomicCompareExchange:
+		return EmitAtomicCompareExchange(insn, state);
 
-		case spv::OpAccessChain:
-		case spv::OpInBoundsAccessChain:
-			return EmitAccessChain(insn, state);
+	case spv::OpAccessChain:
+	case spv::OpInBoundsAccessChain:
+		return EmitAccessChain(insn, state);
 
-		case spv::OpCompositeConstruct:
-			return EmitCompositeConstruct(insn, state);
+	case spv::OpCompositeConstruct:
+		return EmitCompositeConstruct(insn, state);
 
-		case spv::OpCompositeInsert:
-			return EmitCompositeInsert(insn, state);
+	case spv::OpCompositeInsert:
+		return EmitCompositeInsert(insn, state);
 
-		case spv::OpCompositeExtract:
-			return EmitCompositeExtract(insn, state);
+	case spv::OpCompositeExtract:
+		return EmitCompositeExtract(insn, state);
 
-		case spv::OpVectorShuffle:
-			return EmitVectorShuffle(insn, state);
+	case spv::OpVectorShuffle:
+		return EmitVectorShuffle(insn, state);
 
-		case spv::OpVectorExtractDynamic:
-			return EmitVectorExtractDynamic(insn, state);
+	case spv::OpVectorExtractDynamic:
+		return EmitVectorExtractDynamic(insn, state);
 
-		case spv::OpVectorInsertDynamic:
-			return EmitVectorInsertDynamic(insn, state);
+	case spv::OpVectorInsertDynamic:
+		return EmitVectorInsertDynamic(insn, state);
 
-		case spv::OpVectorTimesScalar:
-		case spv::OpMatrixTimesScalar:
-			return EmitVectorTimesScalar(insn, state);
+	case spv::OpVectorTimesScalar:
+	case spv::OpMatrixTimesScalar:
+		return EmitVectorTimesScalar(insn, state);
 
-		case spv::OpMatrixTimesVector:
-			return EmitMatrixTimesVector(insn, state);
+	case spv::OpMatrixTimesVector:
+		return EmitMatrixTimesVector(insn, state);
 
-		case spv::OpVectorTimesMatrix:
-			return EmitVectorTimesMatrix(insn, state);
+	case spv::OpVectorTimesMatrix:
+		return EmitVectorTimesMatrix(insn, state);
 
-		case spv::OpMatrixTimesMatrix:
-			return EmitMatrixTimesMatrix(insn, state);
+	case spv::OpMatrixTimesMatrix:
+		return EmitMatrixTimesMatrix(insn, state);
 
-		case spv::OpOuterProduct:
-			return EmitOuterProduct(insn, state);
+	case spv::OpOuterProduct:
+		return EmitOuterProduct(insn, state);
 
-		case spv::OpTranspose:
-			return EmitTranspose(insn, state);
+	case spv::OpTranspose:
+		return EmitTranspose(insn, state);
 
-		case spv::OpNot:
-		case spv::OpBitFieldInsert:
-		case spv::OpBitFieldSExtract:
-		case spv::OpBitFieldUExtract:
-		case spv::OpBitReverse:
-		case spv::OpBitCount:
-		case spv::OpSNegate:
-		case spv::OpFNegate:
-		case spv::OpLogicalNot:
-		case spv::OpConvertFToU:
-		case spv::OpConvertFToS:
-		case spv::OpConvertSToF:
-		case spv::OpConvertUToF:
-		case spv::OpBitcast:
-		case spv::OpIsInf:
-		case spv::OpIsNan:
-		case spv::OpDPdx:
-		case spv::OpDPdxCoarse:
-		case spv::OpDPdy:
-		case spv::OpDPdyCoarse:
-		case spv::OpFwidth:
-		case spv::OpFwidthCoarse:
-		case spv::OpDPdxFine:
-		case spv::OpDPdyFine:
-		case spv::OpFwidthFine:
-		case spv::OpQuantizeToF16:
-			return EmitUnaryOp(insn, state);
+	case spv::OpNot:
+	case spv::OpBitFieldInsert:
+	case spv::OpBitFieldSExtract:
+	case spv::OpBitFieldUExtract:
+	case spv::OpBitReverse:
+	case spv::OpBitCount:
+	case spv::OpSNegate:
+	case spv::OpFNegate:
+	case spv::OpLogicalNot:
+	case spv::OpConvertFToU:
+	case spv::OpConvertFToS:
+	case spv::OpConvertSToF:
+	case spv::OpConvertUToF:
+	case spv::OpBitcast:
+	case spv::OpIsInf:
+	case spv::OpIsNan:
+	case spv::OpDPdx:
+	case spv::OpDPdxCoarse:
+	case spv::OpDPdy:
+	case spv::OpDPdyCoarse:
+	case spv::OpFwidth:
+	case spv::OpFwidthCoarse:
+	case spv::OpDPdxFine:
+	case spv::OpDPdyFine:
+	case spv::OpFwidthFine:
+	case spv::OpQuantizeToF16:
+		return EmitUnaryOp(insn, state);
 
-		case spv::OpIAdd:
-		case spv::OpISub:
-		case spv::OpIMul:
-		case spv::OpSDiv:
-		case spv::OpUDiv:
-		case spv::OpFAdd:
-		case spv::OpFSub:
-		case spv::OpFMul:
-		case spv::OpFDiv:
-		case spv::OpFMod:
-		case spv::OpFRem:
-		case spv::OpFOrdEqual:
-		case spv::OpFUnordEqual:
-		case spv::OpFOrdNotEqual:
-		case spv::OpFUnordNotEqual:
-		case spv::OpFOrdLessThan:
-		case spv::OpFUnordLessThan:
-		case spv::OpFOrdGreaterThan:
-		case spv::OpFUnordGreaterThan:
-		case spv::OpFOrdLessThanEqual:
-		case spv::OpFUnordLessThanEqual:
-		case spv::OpFOrdGreaterThanEqual:
-		case spv::OpFUnordGreaterThanEqual:
-		case spv::OpSMod:
-		case spv::OpSRem:
-		case spv::OpUMod:
-		case spv::OpIEqual:
-		case spv::OpINotEqual:
-		case spv::OpUGreaterThan:
-		case spv::OpSGreaterThan:
-		case spv::OpUGreaterThanEqual:
-		case spv::OpSGreaterThanEqual:
-		case spv::OpULessThan:
-		case spv::OpSLessThan:
-		case spv::OpULessThanEqual:
-		case spv::OpSLessThanEqual:
-		case spv::OpShiftRightLogical:
-		case spv::OpShiftRightArithmetic:
-		case spv::OpShiftLeftLogical:
-		case spv::OpBitwiseOr:
-		case spv::OpBitwiseXor:
-		case spv::OpBitwiseAnd:
-		case spv::OpLogicalOr:
-		case spv::OpLogicalAnd:
-		case spv::OpLogicalEqual:
-		case spv::OpLogicalNotEqual:
-		case spv::OpUMulExtended:
-		case spv::OpSMulExtended:
-		case spv::OpIAddCarry:
-		case spv::OpISubBorrow:
-			return EmitBinaryOp(insn, state);
+	case spv::OpIAdd:
+	case spv::OpISub:
+	case spv::OpIMul:
+	case spv::OpSDiv:
+	case spv::OpUDiv:
+	case spv::OpFAdd:
+	case spv::OpFSub:
+	case spv::OpFMul:
+	case spv::OpFDiv:
+	case spv::OpFMod:
+	case spv::OpFRem:
+	case spv::OpFOrdEqual:
+	case spv::OpFUnordEqual:
+	case spv::OpFOrdNotEqual:
+	case spv::OpFUnordNotEqual:
+	case spv::OpFOrdLessThan:
+	case spv::OpFUnordLessThan:
+	case spv::OpFOrdGreaterThan:
+	case spv::OpFUnordGreaterThan:
+	case spv::OpFOrdLessThanEqual:
+	case spv::OpFUnordLessThanEqual:
+	case spv::OpFOrdGreaterThanEqual:
+	case spv::OpFUnordGreaterThanEqual:
+	case spv::OpSMod:
+	case spv::OpSRem:
+	case spv::OpUMod:
+	case spv::OpIEqual:
+	case spv::OpINotEqual:
+	case spv::OpUGreaterThan:
+	case spv::OpSGreaterThan:
+	case spv::OpUGreaterThanEqual:
+	case spv::OpSGreaterThanEqual:
+	case spv::OpULessThan:
+	case spv::OpSLessThan:
+	case spv::OpULessThanEqual:
+	case spv::OpSLessThanEqual:
+	case spv::OpShiftRightLogical:
+	case spv::OpShiftRightArithmetic:
+	case spv::OpShiftLeftLogical:
+	case spv::OpBitwiseOr:
+	case spv::OpBitwiseXor:
+	case spv::OpBitwiseAnd:
+	case spv::OpLogicalOr:
+	case spv::OpLogicalAnd:
+	case spv::OpLogicalEqual:
+	case spv::OpLogicalNotEqual:
+	case spv::OpUMulExtended:
+	case spv::OpSMulExtended:
+	case spv::OpIAddCarry:
+	case spv::OpISubBorrow:
+		return EmitBinaryOp(insn, state);
 
-		case spv::OpDot:
-			return EmitDot(insn, state);
+	case spv::OpDot:
+		return EmitDot(insn, state);
 
-		case spv::OpSelect:
-			return EmitSelect(insn, state);
+	case spv::OpSelect:
+		return EmitSelect(insn, state);
 
-		case spv::OpExtInst:
-			return EmitExtendedInstruction(insn, state);
+	case spv::OpExtInst:
+		return EmitExtendedInstruction(insn, state);
 
-		case spv::OpAny:
-			return EmitAny(insn, state);
+	case spv::OpAny:
+		return EmitAny(insn, state);
 
-		case spv::OpAll:
-			return EmitAll(insn, state);
+	case spv::OpAll:
+		return EmitAll(insn, state);
 
-		case spv::OpBranch:
-			return EmitBranch(insn, state);
+	case spv::OpBranch:
+		return EmitBranch(insn, state);
 
-		case spv::OpPhi:
-			return EmitPhi(insn, state);
+	case spv::OpPhi:
+		return EmitPhi(insn, state);
 
-		case spv::OpSelectionMerge:
-		case spv::OpLoopMerge:
-			return EmitResult::Continue;
+	case spv::OpSelectionMerge:
+	case spv::OpLoopMerge:
+		return EmitResult::Continue;
 
-		case spv::OpBranchConditional:
-			return EmitBranchConditional(insn, state);
+	case spv::OpBranchConditional:
+		return EmitBranchConditional(insn, state);
 
-		case spv::OpSwitch:
-			return EmitSwitch(insn, state);
+	case spv::OpSwitch:
+		return EmitSwitch(insn, state);
 
-		case spv::OpUnreachable:
-			return EmitUnreachable(insn, state);
+	case spv::OpUnreachable:
+		return EmitUnreachable(insn, state);
 
-		case spv::OpReturn:
-			return EmitReturn(insn, state);
+	case spv::OpReturn:
+		return EmitReturn(insn, state);
 
-		case spv::OpFunctionCall:
-			return EmitFunctionCall(insn, state);
+	case spv::OpFunctionCall:
+		return EmitFunctionCall(insn, state);
 
-		case spv::OpKill:
-			return EmitKill(insn, state);
+	case spv::OpKill:
+		return EmitKill(insn, state);
 
-		case spv::OpImageSampleImplicitLod:
-			return EmitImageSampleImplicitLod(None, insn, state);
+	case spv::OpImageSampleImplicitLod:
+		return EmitImageSampleImplicitLod(None, insn, state);
 
-		case spv::OpImageSampleExplicitLod:
-			return EmitImageSampleExplicitLod(None, insn, state);
+	case spv::OpImageSampleExplicitLod:
+		return EmitImageSampleExplicitLod(None, insn, state);
 
-		case spv::OpImageSampleDrefImplicitLod:
-			return EmitImageSampleImplicitLod(Dref, insn, state);
+	case spv::OpImageSampleDrefImplicitLod:
+		return EmitImageSampleImplicitLod(Dref, insn, state);
 
-		case spv::OpImageSampleDrefExplicitLod:
-			return EmitImageSampleExplicitLod(Dref, insn, state);
+	case spv::OpImageSampleDrefExplicitLod:
+		return EmitImageSampleExplicitLod(Dref, insn, state);
 
-		case spv::OpImageSampleProjImplicitLod:
-			return EmitImageSampleImplicitLod(Proj, insn, state);
+	case spv::OpImageSampleProjImplicitLod:
+		return EmitImageSampleImplicitLod(Proj, insn, state);
 
-		case spv::OpImageSampleProjExplicitLod:
-			return EmitImageSampleExplicitLod(Proj, insn, state);
+	case spv::OpImageSampleProjExplicitLod:
+		return EmitImageSampleExplicitLod(Proj, insn, state);
 
-		case spv::OpImageSampleProjDrefImplicitLod:
-			return EmitImageSampleImplicitLod(ProjDref, insn, state);
+	case spv::OpImageSampleProjDrefImplicitLod:
+		return EmitImageSampleImplicitLod(ProjDref, insn, state);
 
-		case spv::OpImageSampleProjDrefExplicitLod:
-			return EmitImageSampleExplicitLod(ProjDref, insn, state);
+	case spv::OpImageSampleProjDrefExplicitLod:
+		return EmitImageSampleExplicitLod(ProjDref, insn, state);
 
-		case spv::OpImageGather:
-			return EmitImageGather(None, insn, state);
+	case spv::OpImageGather:
+		return EmitImageGather(None, insn, state);
 
-		case spv::OpImageDrefGather:
-			return EmitImageGather(Dref, insn, state);
+	case spv::OpImageDrefGather:
+		return EmitImageGather(Dref, insn, state);
 
-		case spv::OpImageFetch:
-			return EmitImageFetch(insn, state);
+	case spv::OpImageFetch:
+		return EmitImageFetch(insn, state);
 
-		case spv::OpImageQuerySizeLod:
-			return EmitImageQuerySizeLod(insn, state);
+	case spv::OpImageQuerySizeLod:
+		return EmitImageQuerySizeLod(insn, state);
 
-		case spv::OpImageQuerySize:
-			return EmitImageQuerySize(insn, state);
+	case spv::OpImageQuerySize:
+		return EmitImageQuerySize(insn, state);
 
-		case spv::OpImageQueryLod:
-			return EmitImageQueryLod(insn, state);
+	case spv::OpImageQueryLod:
+		return EmitImageQueryLod(insn, state);
 
-		case spv::OpImageQueryLevels:
-			return EmitImageQueryLevels(insn, state);
+	case spv::OpImageQueryLevels:
+		return EmitImageQueryLevels(insn, state);
 
-		case spv::OpImageQuerySamples:
-			return EmitImageQuerySamples(insn, state);
+	case spv::OpImageQuerySamples:
+		return EmitImageQuerySamples(insn, state);
 
-		case spv::OpImageRead:
-			return EmitImageRead(insn, state);
+	case spv::OpImageRead:
+		return EmitImageRead(insn, state);
 
-		case spv::OpImageWrite:
-			return EmitImageWrite(insn, state);
+	case spv::OpImageWrite:
+		return EmitImageWrite(insn, state);
 
-		case spv::OpImageTexelPointer:
-			return EmitImageTexelPointer(insn, state);
+	case spv::OpImageTexelPointer:
+		return EmitImageTexelPointer(insn, state);
 
-		case spv::OpSampledImage:
-		case spv::OpImage:
-			return EmitSampledImageCombineOrSplit(insn, state);
+	case spv::OpSampledImage:
+	case spv::OpImage:
+		return EmitSampledImageCombineOrSplit(insn, state);
 
-		case spv::OpCopyObject:
-		case spv::OpCopyLogical:
-			return EmitCopyObject(insn, state);
+	case spv::OpCopyObject:
+	case spv::OpCopyLogical:
+		return EmitCopyObject(insn, state);
 
-		case spv::OpCopyMemory:
-			return EmitCopyMemory(insn, state);
+	case spv::OpCopyMemory:
+		return EmitCopyMemory(insn, state);
 
-		case spv::OpControlBarrier:
-			return EmitControlBarrier(insn, state);
+	case spv::OpControlBarrier:
+		return EmitControlBarrier(insn, state);
 
-		case spv::OpMemoryBarrier:
-			return EmitMemoryBarrier(insn, state);
+	case spv::OpMemoryBarrier:
+		return EmitMemoryBarrier(insn, state);
 
-		case spv::OpGroupNonUniformElect:
-		case spv::OpGroupNonUniformAll:
-		case spv::OpGroupNonUniformAny:
-		case spv::OpGroupNonUniformAllEqual:
-		case spv::OpGroupNonUniformBroadcast:
-		case spv::OpGroupNonUniformBroadcastFirst:
-		case spv::OpGroupNonUniformBallot:
-		case spv::OpGroupNonUniformInverseBallot:
-		case spv::OpGroupNonUniformBallotBitExtract:
-		case spv::OpGroupNonUniformBallotBitCount:
-		case spv::OpGroupNonUniformBallotFindLSB:
-		case spv::OpGroupNonUniformBallotFindMSB:
-		case spv::OpGroupNonUniformShuffle:
-		case spv::OpGroupNonUniformShuffleXor:
-		case spv::OpGroupNonUniformShuffleUp:
-		case spv::OpGroupNonUniformShuffleDown:
-		case spv::OpGroupNonUniformIAdd:
-		case spv::OpGroupNonUniformFAdd:
-		case spv::OpGroupNonUniformIMul:
-		case spv::OpGroupNonUniformFMul:
-		case spv::OpGroupNonUniformSMin:
-		case spv::OpGroupNonUniformUMin:
-		case spv::OpGroupNonUniformFMin:
-		case spv::OpGroupNonUniformSMax:
-		case spv::OpGroupNonUniformUMax:
-		case spv::OpGroupNonUniformFMax:
-		case spv::OpGroupNonUniformBitwiseAnd:
-		case spv::OpGroupNonUniformBitwiseOr:
-		case spv::OpGroupNonUniformBitwiseXor:
-		case spv::OpGroupNonUniformLogicalAnd:
-		case spv::OpGroupNonUniformLogicalOr:
-		case spv::OpGroupNonUniformLogicalXor:
-			return EmitGroupNonUniform(insn, state);
+	case spv::OpGroupNonUniformElect:
+	case spv::OpGroupNonUniformAll:
+	case spv::OpGroupNonUniformAny:
+	case spv::OpGroupNonUniformAllEqual:
+	case spv::OpGroupNonUniformBroadcast:
+	case spv::OpGroupNonUniformBroadcastFirst:
+	case spv::OpGroupNonUniformBallot:
+	case spv::OpGroupNonUniformInverseBallot:
+	case spv::OpGroupNonUniformBallotBitExtract:
+	case spv::OpGroupNonUniformBallotBitCount:
+	case spv::OpGroupNonUniformBallotFindLSB:
+	case spv::OpGroupNonUniformBallotFindMSB:
+	case spv::OpGroupNonUniformShuffle:
+	case spv::OpGroupNonUniformShuffleXor:
+	case spv::OpGroupNonUniformShuffleUp:
+	case spv::OpGroupNonUniformShuffleDown:
+	case spv::OpGroupNonUniformIAdd:
+	case spv::OpGroupNonUniformFAdd:
+	case spv::OpGroupNonUniformIMul:
+	case spv::OpGroupNonUniformFMul:
+	case spv::OpGroupNonUniformSMin:
+	case spv::OpGroupNonUniformUMin:
+	case spv::OpGroupNonUniformFMin:
+	case spv::OpGroupNonUniformSMax:
+	case spv::OpGroupNonUniformUMax:
+	case spv::OpGroupNonUniformFMax:
+	case spv::OpGroupNonUniformBitwiseAnd:
+	case spv::OpGroupNonUniformBitwiseOr:
+	case spv::OpGroupNonUniformBitwiseXor:
+	case spv::OpGroupNonUniformLogicalAnd:
+	case spv::OpGroupNonUniformLogicalOr:
+	case spv::OpGroupNonUniformLogicalXor:
+		return EmitGroupNonUniform(insn, state);
 
-		case spv::OpArrayLength:
-			return EmitArrayLength(insn, state);
+	case spv::OpArrayLength:
+		return EmitArrayLength(insn, state);
 
-		default:
-			UNREACHABLE("%s", OpcodeName(opcode));
-			break;
+	default:
+		UNREACHABLE("%s", OpcodeName(opcode));
+		break;
 	}
 
 	return EmitResult::Continue;
@@ -2322,41 +2326,41 @@
 			UInt v;
 			switch(insn.opcode())
 			{
-				case spv::OpAtomicIAdd:
-				case spv::OpAtomicIIncrement:
-					v = AddAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				case spv::OpAtomicISub:
-				case spv::OpAtomicIDecrement:
-					v = SubAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				case spv::OpAtomicAnd:
-					v = AndAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				case spv::OpAtomicOr:
-					v = OrAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				case spv::OpAtomicXor:
-					v = XorAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				case spv::OpAtomicSMin:
-					v = As<UInt>(MinAtomic(Pointer<Int>(&ptr.base[offset]), As<Int>(laneValue), memoryOrder));
-					break;
-				case spv::OpAtomicSMax:
-					v = As<UInt>(MaxAtomic(Pointer<Int>(&ptr.base[offset]), As<Int>(laneValue), memoryOrder));
-					break;
-				case spv::OpAtomicUMin:
-					v = MinAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				case spv::OpAtomicUMax:
-					v = MaxAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				case spv::OpAtomicExchange:
-					v = ExchangeAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
-					break;
-				default:
-					UNREACHABLE("%s", OpcodeName(insn.opcode()));
-					break;
+			case spv::OpAtomicIAdd:
+			case spv::OpAtomicIIncrement:
+				v = AddAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			case spv::OpAtomicISub:
+			case spv::OpAtomicIDecrement:
+				v = SubAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			case spv::OpAtomicAnd:
+				v = AndAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			case spv::OpAtomicOr:
+				v = OrAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			case spv::OpAtomicXor:
+				v = XorAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			case spv::OpAtomicSMin:
+				v = As<UInt>(MinAtomic(Pointer<Int>(&ptr.base[offset]), As<Int>(laneValue), memoryOrder));
+				break;
+			case spv::OpAtomicSMax:
+				v = As<UInt>(MaxAtomic(Pointer<Int>(&ptr.base[offset]), As<Int>(laneValue), memoryOrder));
+				break;
+			case spv::OpAtomicUMin:
+				v = MinAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			case spv::OpAtomicUMax:
+				v = MaxAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			case spv::OpAtomicExchange:
+				v = ExchangeAtomic(Pointer<UInt>(&ptr.base[offset]), laneValue, memoryOrder);
+				break;
+			default:
+				UNREACHABLE("%s", OpcodeName(insn.opcode()));
+				break;
 			}
 			result = Insert(result, v, j);
 		}
@@ -2451,12 +2455,12 @@
 	auto ext = getExtension(insn.word(3));
 	switch(ext.name)
 	{
-		case Extension::GLSLstd450:
-			return EmitExtGLSLstd450(insn, state);
-		case Extension::OpenCLDebugInfo100:
-			return EmitOpenCLDebugInfo100(insn, state);
-		default:
-			UNREACHABLE("Unknown Extension::Name<%d>", int(ext.name));
+	case Extension::GLSLstd450:
+		return EmitExtGLSLstd450(insn, state);
+	case Extension::OpenCLDebugInfo100:
+		return EmitOpenCLDebugInfo100(insn, state);
+	default:
+		UNREACHABLE("Unknown Extension::Name<%d>", int(ext.name));
 	}
 	return EmitResult::Continue;
 }
@@ -2475,7 +2479,7 @@
 	{
 		switch(insn.opcode())
 		{
-			case spv::OpVariable:
+		case spv::OpVariable:
 			{
 				auto &object = getObject(insn.resultId());
 				auto &objectTy = getType(object);
@@ -2489,10 +2493,10 @@
 						               routine->outputs[scalarSlot] = dst[offset++];
 					               });
 				}
-				break;
 			}
-			default:
-				break;
+			break;
+		default:
+			break;
 		}
 	}
 }
@@ -2510,24 +2514,24 @@
 {
 	switch(model)
 	{
-		case spv::ExecutionModelVertex: return VK_SHADER_STAGE_VERTEX_BIT;
-		// case spv::ExecutionModelTessellationControl:    return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
-		// case spv::ExecutionModelTessellationEvaluation: return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
-		// case spv::ExecutionModelGeometry:               return VK_SHADER_STAGE_GEOMETRY_BIT;
-		case spv::ExecutionModelFragment: return VK_SHADER_STAGE_FRAGMENT_BIT;
-		case spv::ExecutionModelGLCompute: return VK_SHADER_STAGE_COMPUTE_BIT;
-		// case spv::ExecutionModelKernel:                 return VkShaderStageFlagBits(0); // Not supported by vulkan.
-		// case spv::ExecutionModelTaskNV:                 return VK_SHADER_STAGE_TASK_BIT_NV;
-		// case spv::ExecutionModelMeshNV:                 return VK_SHADER_STAGE_MESH_BIT_NV;
-		// case spv::ExecutionModelRayGenerationNV:        return VK_SHADER_STAGE_RAYGEN_BIT_NV;
-		// case spv::ExecutionModelIntersectionNV:         return VK_SHADER_STAGE_INTERSECTION_BIT_NV;
-		// case spv::ExecutionModelAnyHitNV:               return VK_SHADER_STAGE_ANY_HIT_BIT_NV;
-		// case spv::ExecutionModelClosestHitNV:           return VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
-		// case spv::ExecutionModelMissNV:                 return VK_SHADER_STAGE_MISS_BIT_NV;
-		// case spv::ExecutionModelCallableNV:             return VK_SHADER_STAGE_CALLABLE_BIT_NV;
-		default:
-			UNSUPPORTED("ExecutionModel: %d", int(model));
-			return VkShaderStageFlagBits(0);
+	case spv::ExecutionModelVertex: return VK_SHADER_STAGE_VERTEX_BIT;
+	// case spv::ExecutionModelTessellationControl:    return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
+	// case spv::ExecutionModelTessellationEvaluation: return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
+	// case spv::ExecutionModelGeometry:               return VK_SHADER_STAGE_GEOMETRY_BIT;
+	case spv::ExecutionModelFragment: return VK_SHADER_STAGE_FRAGMENT_BIT;
+	case spv::ExecutionModelGLCompute: return VK_SHADER_STAGE_COMPUTE_BIT;
+	// case spv::ExecutionModelKernel:                 return VkShaderStageFlagBits(0); // Not supported by vulkan.
+	// case spv::ExecutionModelTaskNV:                 return VK_SHADER_STAGE_TASK_BIT_NV;
+	// case spv::ExecutionModelMeshNV:                 return VK_SHADER_STAGE_MESH_BIT_NV;
+	// case spv::ExecutionModelRayGenerationNV:        return VK_SHADER_STAGE_RAYGEN_BIT_NV;
+	// case spv::ExecutionModelIntersectionNV:         return VK_SHADER_STAGE_INTERSECTION_BIT_NV;
+	// case spv::ExecutionModelAnyHitNV:               return VK_SHADER_STAGE_ANY_HIT_BIT_NV;
+	// case spv::ExecutionModelClosestHitNV:           return VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
+	// case spv::ExecutionModelMissNV:                 return VK_SHADER_STAGE_MISS_BIT_NV;
+	// case spv::ExecutionModelCallableNV:             return VK_SHADER_STAGE_CALLABLE_BIT_NV;
+	default:
+		UNSUPPORTED("ExecutionModel: %d", int(model));
+		return VkShaderStageFlagBits(0);
 	}
 }
 
diff --git a/src/Pipeline/SpirvShaderArithmetic.cpp b/src/Pipeline/SpirvShaderArithmetic.cpp
index 61d7bcc..c2924d3 100644
--- a/src/Pipeline/SpirvShaderArithmetic.cpp
+++ b/src/Pipeline/SpirvShaderArithmetic.cpp
@@ -154,11 +154,11 @@
 	{
 		switch(insn.opcode())
 		{
-			case spv::OpNot:
-			case spv::OpLogicalNot:  // logical not == bitwise not due to all-bits boolean representation
-				dst.move(i, ~src.UInt(i));
-				break;
-			case spv::OpBitFieldInsert:
+		case spv::OpNot:
+		case spv::OpLogicalNot:  // logical not == bitwise not due to all-bits boolean representation
+			dst.move(i, ~src.UInt(i));
+			break;
+		case spv::OpBitFieldInsert:
 			{
 				auto insert = Operand(this, state, insn.word(4)).UInt(i);
 				auto offset = Operand(this, state, insn.word(5)).UInt(0);
@@ -167,10 +167,10 @@
 				auto v = src.UInt(i);
 				auto mask = Bitmask32(offset + count) ^ Bitmask32(offset);
 				dst.move(i, (v & ~mask) | ((insert << offset) & mask));
-				break;
 			}
-			case spv::OpBitFieldSExtract:
-			case spv::OpBitFieldUExtract:
+			break;
+		case spv::OpBitFieldSExtract:
+		case spv::OpBitFieldUExtract:
 			{
 				auto offset = Operand(this, state, insn.word(4)).UInt(0);
 				auto count = Operand(this, state, insn.word(5)).UInt(0);
@@ -184,9 +184,9 @@
 					out |= sext;
 				}
 				dst.move(i, out);
-				break;
 			}
-			case spv::OpBitReverse:
+			break;
+		case spv::OpBitReverse:
 			{
 				// TODO: Add an intrinsic to reactor. Even if there isn't a
 				// single vector instruction, there may be target-dependent
@@ -199,55 +199,55 @@
 				v = ((v >> 8) & SIMD::UInt(0x00FF00FF)) | ((v & SIMD::UInt(0x00FF00FF)) << 8);
 				v = (v >> 16) | (v << 16);
 				dst.move(i, v);
-				break;
 			}
-			case spv::OpBitCount:
-				dst.move(i, CountBits(src.UInt(i)));
-				break;
-			case spv::OpSNegate:
-				dst.move(i, -src.Int(i));
-				break;
-			case spv::OpFNegate:
-				dst.move(i, -src.Float(i));
-				break;
-			case spv::OpConvertFToU:
-				dst.move(i, SIMD::UInt(src.Float(i)));
-				break;
-			case spv::OpConvertFToS:
-				dst.move(i, SIMD::Int(src.Float(i)));
-				break;
-			case spv::OpConvertSToF:
-				dst.move(i, SIMD::Float(src.Int(i)));
-				break;
-			case spv::OpConvertUToF:
-				dst.move(i, SIMD::Float(src.UInt(i)));
-				break;
-			case spv::OpBitcast:
-				dst.move(i, src.Float(i));
-				break;
-			case spv::OpIsInf:
-				dst.move(i, IsInf(src.Float(i)));
-				break;
-			case spv::OpIsNan:
-				dst.move(i, IsNan(src.Float(i)));
-				break;
-			case spv::OpDPdx:
-			case spv::OpDPdxCoarse:
-				// Derivative instructions: FS invocations are laid out like so:
-				//    0 1
-				//    2 3
-				static_assert(SIMD::Width == 4, "All cross-lane instructions will need care when using a different width");
-				dst.move(i, SIMD::Float(Extract(src.Float(i), 1) - Extract(src.Float(i), 0)));
-				break;
-			case spv::OpDPdy:
-			case spv::OpDPdyCoarse:
-				dst.move(i, SIMD::Float(Extract(src.Float(i), 2) - Extract(src.Float(i), 0)));
-				break;
-			case spv::OpFwidth:
-			case spv::OpFwidthCoarse:
-				dst.move(i, SIMD::Float(Abs(Extract(src.Float(i), 1) - Extract(src.Float(i), 0)) + Abs(Extract(src.Float(i), 2) - Extract(src.Float(i), 0))));
-				break;
-			case spv::OpDPdxFine:
+			break;
+		case spv::OpBitCount:
+			dst.move(i, CountBits(src.UInt(i)));
+			break;
+		case spv::OpSNegate:
+			dst.move(i, -src.Int(i));
+			break;
+		case spv::OpFNegate:
+			dst.move(i, -src.Float(i));
+			break;
+		case spv::OpConvertFToU:
+			dst.move(i, SIMD::UInt(src.Float(i)));
+			break;
+		case spv::OpConvertFToS:
+			dst.move(i, SIMD::Int(src.Float(i)));
+			break;
+		case spv::OpConvertSToF:
+			dst.move(i, SIMD::Float(src.Int(i)));
+			break;
+		case spv::OpConvertUToF:
+			dst.move(i, SIMD::Float(src.UInt(i)));
+			break;
+		case spv::OpBitcast:
+			dst.move(i, src.Float(i));
+			break;
+		case spv::OpIsInf:
+			dst.move(i, IsInf(src.Float(i)));
+			break;
+		case spv::OpIsNan:
+			dst.move(i, IsNan(src.Float(i)));
+			break;
+		case spv::OpDPdx:
+		case spv::OpDPdxCoarse:
+			// Derivative instructions: FS invocations are laid out like so:
+			//    0 1
+			//    2 3
+			static_assert(SIMD::Width == 4, "All cross-lane instructions will need care when using a different width");
+			dst.move(i, SIMD::Float(Extract(src.Float(i), 1) - Extract(src.Float(i), 0)));
+			break;
+		case spv::OpDPdy:
+		case spv::OpDPdyCoarse:
+			dst.move(i, SIMD::Float(Extract(src.Float(i), 2) - Extract(src.Float(i), 0)));
+			break;
+		case spv::OpFwidth:
+		case spv::OpFwidthCoarse:
+			dst.move(i, SIMD::Float(Abs(Extract(src.Float(i), 1) - Extract(src.Float(i), 0)) + Abs(Extract(src.Float(i), 2) - Extract(src.Float(i), 0))));
+			break;
+		case spv::OpDPdxFine:
 			{
 				auto firstRow = Extract(src.Float(i), 1) - Extract(src.Float(i), 0);
 				auto secondRow = Extract(src.Float(i), 3) - Extract(src.Float(i), 2);
@@ -255,9 +255,9 @@
 				v = Insert(v, secondRow, 2);
 				v = Insert(v, secondRow, 3);
 				dst.move(i, v);
-				break;
 			}
-			case spv::OpDPdyFine:
+			break;
+		case spv::OpDPdyFine:
 			{
 				auto firstColumn = Extract(src.Float(i), 2) - Extract(src.Float(i), 0);
 				auto secondColumn = Extract(src.Float(i), 3) - Extract(src.Float(i), 1);
@@ -265,9 +265,9 @@
 				v = Insert(v, secondColumn, 1);
 				v = Insert(v, secondColumn, 3);
 				dst.move(i, v);
-				break;
 			}
-			case spv::OpFwidthFine:
+			break;
+		case spv::OpFwidthFine:
 			{
 				auto firstRow = Extract(src.Float(i), 1) - Extract(src.Float(i), 0);
 				auto secondRow = Extract(src.Float(i), 3) - Extract(src.Float(i), 2);
@@ -280,9 +280,9 @@
 				dpdy = Insert(dpdy, secondColumn, 1);
 				dpdy = Insert(dpdy, secondColumn, 3);
 				dst.move(i, Abs(dpdx) + Abs(dpdy));
-				break;
 			}
-			case spv::OpQuantizeToF16:
+			break;
+		case spv::OpQuantizeToF16:
 			{
 				// Note: keep in sync with the specialization constant version in EvalSpecConstantUnaryOp
 				auto abs = Abs(src.Float(i));
@@ -296,10 +296,10 @@
 				v = sign | (isInfOrNan & SIMD::Int(0x7F800000)) | (~isInfOrNan & v);
 				v |= isNaN & SIMD::Int(0x400000);
 				dst.move(i, v);
-				break;
 			}
-			default:
-				UNREACHABLE("%s", OpcodeName(insn.opcode()));
+			break;
+		default:
+			UNREACHABLE("%s", OpcodeName(insn.opcode()));
 		}
 	}
 
@@ -318,40 +318,40 @@
 	{
 		switch(insn.opcode())
 		{
-			case spv::OpIAdd:
-				dst.move(i, lhs.Int(i) + rhs.Int(i));
-				break;
-			case spv::OpISub:
-				dst.move(i, lhs.Int(i) - rhs.Int(i));
-				break;
-			case spv::OpIMul:
-				dst.move(i, lhs.Int(i) * rhs.Int(i));
-				break;
-			case spv::OpSDiv:
+		case spv::OpIAdd:
+			dst.move(i, lhs.Int(i) + rhs.Int(i));
+			break;
+		case spv::OpISub:
+			dst.move(i, lhs.Int(i) - rhs.Int(i));
+			break;
+		case spv::OpIMul:
+			dst.move(i, lhs.Int(i) * rhs.Int(i));
+			break;
+		case spv::OpSDiv:
 			{
 				SIMD::Int a = lhs.Int(i);
 				SIMD::Int b = rhs.Int(i);
 				b = b | CmpEQ(b, SIMD::Int(0));                                       // prevent divide-by-zero
 				a = a | (CmpEQ(a, SIMD::Int(0x80000000)) & CmpEQ(b, SIMD::Int(-1)));  // prevent integer overflow
 				dst.move(i, a / b);
-				break;
 			}
-			case spv::OpUDiv:
+			break;
+		case spv::OpUDiv:
 			{
 				auto zeroMask = As<SIMD::UInt>(CmpEQ(rhs.Int(i), SIMD::Int(0)));
 				dst.move(i, lhs.UInt(i) / (rhs.UInt(i) | zeroMask));
-				break;
 			}
-			case spv::OpSRem:
+			break;
+		case spv::OpSRem:
 			{
 				SIMD::Int a = lhs.Int(i);
 				SIMD::Int b = rhs.Int(i);
 				b = b | CmpEQ(b, SIMD::Int(0));                                       // prevent divide-by-zero
 				a = a | (CmpEQ(a, SIMD::Int(0x80000000)) & CmpEQ(b, SIMD::Int(-1)));  // prevent integer overflow
 				dst.move(i, a % b);
-				break;
 			}
-			case spv::OpSMod:
+			break;
+		case spv::OpSMod:
 			{
 				SIMD::Int a = lhs.Int(i);
 				SIMD::Int b = rhs.Int(i);
@@ -367,142 +367,142 @@
 				auto signDiff = CmpNEQ(CmpGE(a, SIMD::Int(0)), CmpGE(b, SIMD::Int(0)));
 				auto fixedMod = mod + (b & CmpNEQ(mod, SIMD::Int(0)) & signDiff);
 				dst.move(i, As<SIMD::Float>(fixedMod));
-				break;
 			}
-			case spv::OpUMod:
+			break;
+		case spv::OpUMod:
 			{
 				auto zeroMask = As<SIMD::UInt>(CmpEQ(rhs.Int(i), SIMD::Int(0)));
 				dst.move(i, lhs.UInt(i) % (rhs.UInt(i) | zeroMask));
-				break;
 			}
-			case spv::OpIEqual:
-			case spv::OpLogicalEqual:
-				dst.move(i, CmpEQ(lhs.Int(i), rhs.Int(i)));
-				break;
-			case spv::OpINotEqual:
-			case spv::OpLogicalNotEqual:
-				dst.move(i, CmpNEQ(lhs.Int(i), rhs.Int(i)));
-				break;
-			case spv::OpUGreaterThan:
-				dst.move(i, CmpGT(lhs.UInt(i), rhs.UInt(i)));
-				break;
-			case spv::OpSGreaterThan:
-				dst.move(i, CmpGT(lhs.Int(i), rhs.Int(i)));
-				break;
-			case spv::OpUGreaterThanEqual:
-				dst.move(i, CmpGE(lhs.UInt(i), rhs.UInt(i)));
-				break;
-			case spv::OpSGreaterThanEqual:
-				dst.move(i, CmpGE(lhs.Int(i), rhs.Int(i)));
-				break;
-			case spv::OpULessThan:
-				dst.move(i, CmpLT(lhs.UInt(i), rhs.UInt(i)));
-				break;
-			case spv::OpSLessThan:
-				dst.move(i, CmpLT(lhs.Int(i), rhs.Int(i)));
-				break;
-			case spv::OpULessThanEqual:
-				dst.move(i, CmpLE(lhs.UInt(i), rhs.UInt(i)));
-				break;
-			case spv::OpSLessThanEqual:
-				dst.move(i, CmpLE(lhs.Int(i), rhs.Int(i)));
-				break;
-			case spv::OpFAdd:
-				dst.move(i, lhs.Float(i) + rhs.Float(i));
-				break;
-			case spv::OpFSub:
-				dst.move(i, lhs.Float(i) - rhs.Float(i));
-				break;
-			case spv::OpFMul:
-				dst.move(i, lhs.Float(i) * rhs.Float(i));
-				break;
-			case spv::OpFDiv:
-				dst.move(i, lhs.Float(i) / rhs.Float(i));
-				break;
-			case spv::OpFMod:
-				// TODO(b/126873455): inaccurate for values greater than 2^24
-				dst.move(i, lhs.Float(i) - rhs.Float(i) * Floor(lhs.Float(i) / rhs.Float(i)));
-				break;
-			case spv::OpFRem:
-				dst.move(i, lhs.Float(i) % rhs.Float(i));
-				break;
-			case spv::OpFOrdEqual:
-				dst.move(i, CmpEQ(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFUnordEqual:
-				dst.move(i, CmpUEQ(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFOrdNotEqual:
-				dst.move(i, CmpNEQ(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFUnordNotEqual:
-				dst.move(i, CmpUNEQ(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFOrdLessThan:
-				dst.move(i, CmpLT(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFUnordLessThan:
-				dst.move(i, CmpULT(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFOrdGreaterThan:
-				dst.move(i, CmpGT(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFUnordGreaterThan:
-				dst.move(i, CmpUGT(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFOrdLessThanEqual:
-				dst.move(i, CmpLE(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFUnordLessThanEqual:
-				dst.move(i, CmpULE(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFOrdGreaterThanEqual:
-				dst.move(i, CmpGE(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpFUnordGreaterThanEqual:
-				dst.move(i, CmpUGE(lhs.Float(i), rhs.Float(i)));
-				break;
-			case spv::OpShiftRightLogical:
-				dst.move(i, lhs.UInt(i) >> rhs.UInt(i));
-				break;
-			case spv::OpShiftRightArithmetic:
-				dst.move(i, lhs.Int(i) >> rhs.Int(i));
-				break;
-			case spv::OpShiftLeftLogical:
-				dst.move(i, lhs.UInt(i) << rhs.UInt(i));
-				break;
-			case spv::OpBitwiseOr:
-			case spv::OpLogicalOr:
-				dst.move(i, lhs.UInt(i) | rhs.UInt(i));
-				break;
-			case spv::OpBitwiseXor:
-				dst.move(i, lhs.UInt(i) ^ rhs.UInt(i));
-				break;
-			case spv::OpBitwiseAnd:
-			case spv::OpLogicalAnd:
-				dst.move(i, lhs.UInt(i) & rhs.UInt(i));
-				break;
-			case spv::OpSMulExtended:
-				// Extended ops: result is a structure containing two members of the same type as lhs & rhs.
-				// In our flat view then, component i is the i'th component of the first member;
-				// component i + N is the i'th component of the second member.
-				dst.move(i, lhs.Int(i) * rhs.Int(i));
-				dst.move(i + lhsType.componentCount, MulHigh(lhs.Int(i), rhs.Int(i)));
-				break;
-			case spv::OpUMulExtended:
-				dst.move(i, lhs.UInt(i) * rhs.UInt(i));
-				dst.move(i + lhsType.componentCount, MulHigh(lhs.UInt(i), rhs.UInt(i)));
-				break;
-			case spv::OpIAddCarry:
-				dst.move(i, lhs.UInt(i) + rhs.UInt(i));
-				dst.move(i + lhsType.componentCount, CmpLT(dst.UInt(i), lhs.UInt(i)) >> 31);
-				break;
-			case spv::OpISubBorrow:
-				dst.move(i, lhs.UInt(i) - rhs.UInt(i));
-				dst.move(i + lhsType.componentCount, CmpLT(lhs.UInt(i), rhs.UInt(i)) >> 31);
-				break;
-			default:
-				UNREACHABLE("%s", OpcodeName(insn.opcode()));
+			break;
+		case spv::OpIEqual:
+		case spv::OpLogicalEqual:
+			dst.move(i, CmpEQ(lhs.Int(i), rhs.Int(i)));
+			break;
+		case spv::OpINotEqual:
+		case spv::OpLogicalNotEqual:
+			dst.move(i, CmpNEQ(lhs.Int(i), rhs.Int(i)));
+			break;
+		case spv::OpUGreaterThan:
+			dst.move(i, CmpGT(lhs.UInt(i), rhs.UInt(i)));
+			break;
+		case spv::OpSGreaterThan:
+			dst.move(i, CmpGT(lhs.Int(i), rhs.Int(i)));
+			break;
+		case spv::OpUGreaterThanEqual:
+			dst.move(i, CmpGE(lhs.UInt(i), rhs.UInt(i)));
+			break;
+		case spv::OpSGreaterThanEqual:
+			dst.move(i, CmpGE(lhs.Int(i), rhs.Int(i)));
+			break;
+		case spv::OpULessThan:
+			dst.move(i, CmpLT(lhs.UInt(i), rhs.UInt(i)));
+			break;
+		case spv::OpSLessThan:
+			dst.move(i, CmpLT(lhs.Int(i), rhs.Int(i)));
+			break;
+		case spv::OpULessThanEqual:
+			dst.move(i, CmpLE(lhs.UInt(i), rhs.UInt(i)));
+			break;
+		case spv::OpSLessThanEqual:
+			dst.move(i, CmpLE(lhs.Int(i), rhs.Int(i)));
+			break;
+		case spv::OpFAdd:
+			dst.move(i, lhs.Float(i) + rhs.Float(i));
+			break;
+		case spv::OpFSub:
+			dst.move(i, lhs.Float(i) - rhs.Float(i));
+			break;
+		case spv::OpFMul:
+			dst.move(i, lhs.Float(i) * rhs.Float(i));
+			break;
+		case spv::OpFDiv:
+			dst.move(i, lhs.Float(i) / rhs.Float(i));
+			break;
+		case spv::OpFMod:
+			// TODO(b/126873455): inaccurate for values greater than 2^24
+			dst.move(i, lhs.Float(i) - rhs.Float(i) * Floor(lhs.Float(i) / rhs.Float(i)));
+			break;
+		case spv::OpFRem:
+			dst.move(i, lhs.Float(i) % rhs.Float(i));
+			break;
+		case spv::OpFOrdEqual:
+			dst.move(i, CmpEQ(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFUnordEqual:
+			dst.move(i, CmpUEQ(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFOrdNotEqual:
+			dst.move(i, CmpNEQ(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFUnordNotEqual:
+			dst.move(i, CmpUNEQ(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFOrdLessThan:
+			dst.move(i, CmpLT(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFUnordLessThan:
+			dst.move(i, CmpULT(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFOrdGreaterThan:
+			dst.move(i, CmpGT(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFUnordGreaterThan:
+			dst.move(i, CmpUGT(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFOrdLessThanEqual:
+			dst.move(i, CmpLE(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFUnordLessThanEqual:
+			dst.move(i, CmpULE(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFOrdGreaterThanEqual:
+			dst.move(i, CmpGE(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpFUnordGreaterThanEqual:
+			dst.move(i, CmpUGE(lhs.Float(i), rhs.Float(i)));
+			break;
+		case spv::OpShiftRightLogical:
+			dst.move(i, lhs.UInt(i) >> rhs.UInt(i));
+			break;
+		case spv::OpShiftRightArithmetic:
+			dst.move(i, lhs.Int(i) >> rhs.Int(i));
+			break;
+		case spv::OpShiftLeftLogical:
+			dst.move(i, lhs.UInt(i) << rhs.UInt(i));
+			break;
+		case spv::OpBitwiseOr:
+		case spv::OpLogicalOr:
+			dst.move(i, lhs.UInt(i) | rhs.UInt(i));
+			break;
+		case spv::OpBitwiseXor:
+			dst.move(i, lhs.UInt(i) ^ rhs.UInt(i));
+			break;
+		case spv::OpBitwiseAnd:
+		case spv::OpLogicalAnd:
+			dst.move(i, lhs.UInt(i) & rhs.UInt(i));
+			break;
+		case spv::OpSMulExtended:
+			// Extended ops: result is a structure containing two members of the same type as lhs & rhs.
+			// In our flat view then, component i is the i'th component of the first member;
+			// component i + N is the i'th component of the second member.
+			dst.move(i, lhs.Int(i) * rhs.Int(i));
+			dst.move(i + lhsType.componentCount, MulHigh(lhs.Int(i), rhs.Int(i)));
+			break;
+		case spv::OpUMulExtended:
+			dst.move(i, lhs.UInt(i) * rhs.UInt(i));
+			dst.move(i + lhsType.componentCount, MulHigh(lhs.UInt(i), rhs.UInt(i)));
+			break;
+		case spv::OpIAddCarry:
+			dst.move(i, lhs.UInt(i) + rhs.UInt(i));
+			dst.move(i + lhsType.componentCount, CmpLT(dst.UInt(i), lhs.UInt(i)) >> 31);
+			break;
+		case spv::OpISubBorrow:
+			dst.move(i, lhs.UInt(i) - rhs.UInt(i));
+			dst.move(i + lhsType.componentCount, CmpLT(lhs.UInt(i), rhs.UInt(i)) >> 31);
+			break;
+		default:
+			UNREACHABLE("%s", OpcodeName(insn.opcode()));
 		}
 	}
 
diff --git a/src/Pipeline/SpirvShaderControlFlow.cpp b/src/Pipeline/SpirvShaderControlFlow.cpp
index 826d9ff..68375be 100644
--- a/src/Pipeline/SpirvShaderControlFlow.cpp
+++ b/src/Pipeline/SpirvShaderControlFlow.cpp
@@ -45,75 +45,75 @@
 
 	switch(insns[1].opcode())
 	{
-		case spv::OpBranch:
-			branchInstruction = insns[1];
-			outs.emplace(Block::ID(branchInstruction.word(1)));
+	case spv::OpBranch:
+		branchInstruction = insns[1];
+		outs.emplace(Block::ID(branchInstruction.word(1)));
 
-			switch(insns[0].opcode())
-			{
-				case spv::OpLoopMerge:
-					kind = Loop;
-					mergeInstruction = insns[0];
-					mergeBlock = Block::ID(mergeInstruction.word(1));
-					continueTarget = Block::ID(mergeInstruction.word(2));
-					break;
-
-				default:
-					kind = Block::Simple;
-					break;
-			}
-			break;
-
-		case spv::OpBranchConditional:
-			branchInstruction = insns[1];
-			outs.emplace(Block::ID(branchInstruction.word(2)));
-			outs.emplace(Block::ID(branchInstruction.word(3)));
-
-			switch(insns[0].opcode())
-			{
-				case spv::OpSelectionMerge:
-					kind = StructuredBranchConditional;
-					mergeInstruction = insns[0];
-					mergeBlock = Block::ID(mergeInstruction.word(1));
-					break;
-
-				case spv::OpLoopMerge:
-					kind = Loop;
-					mergeInstruction = insns[0];
-					mergeBlock = Block::ID(mergeInstruction.word(1));
-					continueTarget = Block::ID(mergeInstruction.word(2));
-					break;
-
-				default:
-					kind = UnstructuredBranchConditional;
-					break;
-			}
-			break;
-
-		case spv::OpSwitch:
-			branchInstruction = insns[1];
-			outs.emplace(Block::ID(branchInstruction.word(2)));
-			for(uint32_t w = 4; w < branchInstruction.wordCount(); w += 2)
-			{
-				outs.emplace(Block::ID(branchInstruction.word(w)));
-			}
-
-			switch(insns[0].opcode())
-			{
-				case spv::OpSelectionMerge:
-					kind = StructuredSwitch;
-					mergeInstruction = insns[0];
-					mergeBlock = Block::ID(mergeInstruction.word(1));
-					break;
-
-				default:
-					kind = UnstructuredSwitch;
-					break;
-			}
+		switch(insns[0].opcode())
+		{
+		case spv::OpLoopMerge:
+			kind = Loop;
+			mergeInstruction = insns[0];
+			mergeBlock = Block::ID(mergeInstruction.word(1));
+			continueTarget = Block::ID(mergeInstruction.word(2));
 			break;
 
 		default:
+			kind = Block::Simple;
 			break;
+		}
+		break;
+
+	case spv::OpBranchConditional:
+		branchInstruction = insns[1];
+		outs.emplace(Block::ID(branchInstruction.word(2)));
+		outs.emplace(Block::ID(branchInstruction.word(3)));
+
+		switch(insns[0].opcode())
+		{
+		case spv::OpSelectionMerge:
+			kind = StructuredBranchConditional;
+			mergeInstruction = insns[0];
+			mergeBlock = Block::ID(mergeInstruction.word(1));
+			break;
+
+		case spv::OpLoopMerge:
+			kind = Loop;
+			mergeInstruction = insns[0];
+			mergeBlock = Block::ID(mergeInstruction.word(1));
+			continueTarget = Block::ID(mergeInstruction.word(2));
+			break;
+
+		default:
+			kind = UnstructuredBranchConditional;
+			break;
+		}
+		break;
+
+	case spv::OpSwitch:
+		branchInstruction = insns[1];
+		outs.emplace(Block::ID(branchInstruction.word(2)));
+		for(uint32_t w = 4; w < branchInstruction.wordCount(); w += 2)
+		{
+			outs.emplace(Block::ID(branchInstruction.word(w)));
+		}
+
+		switch(insns[0].opcode())
+		{
+		case spv::OpSelectionMerge:
+			kind = StructuredSwitch;
+			mergeInstruction = insns[0];
+			mergeBlock = Block::ID(mergeInstruction.word(1));
+			break;
+
+		default:
+			kind = UnstructuredSwitch;
+			break;
+		}
+		break;
+
+	default:
+		break;
 	}
 }
 
@@ -264,20 +264,20 @@
 
 		switch(block.kind)
 		{
-			case Block::Simple:
-			case Block::StructuredBranchConditional:
-			case Block::UnstructuredBranchConditional:
-			case Block::StructuredSwitch:
-			case Block::UnstructuredSwitch:
-				EmitNonLoop(state);
-				break;
+		case Block::Simple:
+		case Block::StructuredBranchConditional:
+		case Block::UnstructuredBranchConditional:
+		case Block::StructuredSwitch:
+		case Block::UnstructuredSwitch:
+			EmitNonLoop(state);
+			break;
 
-			case Block::Loop:
-				EmitLoop(state);
-				break;
+		case Block::Loop:
+			EmitLoop(state);
+			break;
 
-			default:
-				UNREACHABLE("Unexpected Block Kind: %d", int(block.kind));
+		default:
+			UNREACHABLE("Unexpected Block Kind: %d", int(block.kind));
 		}
 	}
 
@@ -629,15 +629,15 @@
 
 	switch(executionScope)
 	{
-		case spv::ScopeWorkgroup:
-			Yield(YieldResult::ControlBarrier);
-			break;
-		case spv::ScopeSubgroup:
-			break;
-		default:
-			// See Vulkan 1.1 spec, Appendix A, Validation Rules within a Module.
-			UNREACHABLE("Scope for execution must be limited to Workgroup or Subgroup");
-			break;
+	case spv::ScopeWorkgroup:
+		Yield(YieldResult::ControlBarrier);
+		break;
+	case spv::ScopeSubgroup:
+		break;
+	default:
+		// See Vulkan 1.1 spec, Appendix A, Validation Rules within a Module.
+		UNREACHABLE("Scope for execution must be limited to Workgroup or Subgroup");
+		break;
 	}
 
 	return EmitResult::Continue;
diff --git a/src/Pipeline/SpirvShaderDebug.hpp b/src/Pipeline/SpirvShaderDebug.hpp
index 6aa0c7c..5418a8b 100644
--- a/src/Pipeline/SpirvShaderDebug.hpp
+++ b/src/Pipeline/SpirvShaderDebug.hpp
@@ -75,12 +75,12 @@
 	{
 		switch(v.typeHint)
 		{
-			case sw::Intermediate::TypeHint::Float:
-				return PrintValue::Ty<sw::SIMD::Float>::fmt(v.Float(i));
-			case sw::Intermediate::TypeHint::Int:
-				return PrintValue::Ty<sw::SIMD::Int>::fmt(v.Int(i));
-			case sw::Intermediate::TypeHint::UInt:
-				return PrintValue::Ty<sw::SIMD::UInt>::fmt(v.UInt(i));
+		case sw::Intermediate::TypeHint::Float:
+			return PrintValue::Ty<sw::SIMD::Float>::fmt(v.Float(i));
+		case sw::Intermediate::TypeHint::Int:
+			return PrintValue::Ty<sw::SIMD::Int>::fmt(v.Int(i));
+		case sw::Intermediate::TypeHint::UInt:
+			return PrintValue::Ty<sw::SIMD::UInt>::fmt(v.UInt(i));
 		}
 		return "";
 	}
@@ -89,12 +89,12 @@
 	{
 		switch(v.typeHint)
 		{
-			case sw::Intermediate::TypeHint::Float:
-				return PrintValue::Ty<sw::SIMD::Float>::val(v.Float(i));
-			case sw::Intermediate::TypeHint::Int:
-				return PrintValue::Ty<sw::SIMD::Int>::val(v.Int(i));
-			case sw::Intermediate::TypeHint::UInt:
-				return PrintValue::Ty<sw::SIMD::UInt>::val(v.UInt(i));
+		case sw::Intermediate::TypeHint::Float:
+			return PrintValue::Ty<sw::SIMD::Float>::val(v.Float(i));
+		case sw::Intermediate::TypeHint::Int:
+			return PrintValue::Ty<sw::SIMD::Int>::val(v.Int(i));
+		case sw::Intermediate::TypeHint::UInt:
+			return PrintValue::Ty<sw::SIMD::UInt>::val(v.UInt(i));
 		}
 		return {};
 	}
diff --git a/src/Pipeline/SpirvShaderDebugger.cpp b/src/Pipeline/SpirvShaderDebugger.cpp
index 4120b1f..0d3704b 100644
--- a/src/Pipeline/SpirvShaderDebugger.cpp
+++ b/src/Pipeline/SpirvShaderDebugger.cpp
@@ -120,10 +120,10 @@
 {
 	switch(type)
 	{
-		case spv::ExecutionModelGLCompute: return name == "ComputeShader";
-		case spv::ExecutionModelFragment: return name == "FragmentShader";
-		case spv::ExecutionModelVertex: return name == "VertexShader";
-		default: return false;
+	case spv::ExecutionModelGLCompute: return name == "ComputeShader";
+	case spv::ExecutionModelFragment: return name == "FragmentShader";
+	case spv::ExecutionModelVertex: return name == "VertexShader";
+	default: return false;
 	}
 }
 
@@ -382,27 +382,27 @@
 {
 	switch(k)
 	{
-		case Object::Kind::Object: return "Object";
-		case Object::Kind::Declare: return "Declare";
-		case Object::Kind::Expression: return "Expression";
-		case Object::Kind::Function: return "Function";
-		case Object::Kind::InlinedAt: return "InlinedAt";
-		case Object::Kind::GlobalVariable: return "GlobalVariable";
-		case Object::Kind::LocalVariable: return "LocalVariable";
-		case Object::Kind::Member: return "Member";
-		case Object::Kind::Operation: return "Operation";
-		case Object::Kind::Source: return "Source";
-		case Object::Kind::SourceScope: return "SourceScope";
-		case Object::Kind::Value: return "Value";
-		case Object::Kind::TemplateParameter: return "TemplateParameter";
-		case Object::Kind::CompilationUnit: return "CompilationUnit";
-		case Object::Kind::LexicalBlock: return "LexicalBlock";
-		case Object::Kind::BasicType: return "BasicType";
-		case Object::Kind::ArrayType: return "ArrayType";
-		case Object::Kind::VectorType: return "VectorType";
-		case Object::Kind::FunctionType: return "FunctionType";
-		case Object::Kind::CompositeType: return "CompositeType";
-		case Object::Kind::TemplateType: return "TemplateType";
+	case Object::Kind::Object: return "Object";
+	case Object::Kind::Declare: return "Declare";
+	case Object::Kind::Expression: return "Expression";
+	case Object::Kind::Function: return "Function";
+	case Object::Kind::InlinedAt: return "InlinedAt";
+	case Object::Kind::GlobalVariable: return "GlobalVariable";
+	case Object::Kind::LocalVariable: return "LocalVariable";
+	case Object::Kind::Member: return "Member";
+	case Object::Kind::Operation: return "Operation";
+	case Object::Kind::Source: return "Source";
+	case Object::Kind::SourceScope: return "SourceScope";
+	case Object::Kind::Value: return "Value";
+	case Object::Kind::TemplateParameter: return "TemplateParameter";
+	case Object::Kind::CompilationUnit: return "CompilationUnit";
+	case Object::Kind::LexicalBlock: return "LexicalBlock";
+	case Object::Kind::BasicType: return "BasicType";
+	case Object::Kind::ArrayType: return "ArrayType";
+	case Object::Kind::VectorType: return "VectorType";
+	case Object::Kind::FunctionType: return "FunctionType";
+	case Object::Kind::CompositeType: return "CompositeType";
+	case Object::Kind::TemplateType: return "TemplateType";
 	}
 	return "<unknown>";
 }
@@ -563,25 +563,25 @@
 
 		switch(encoding)
 		{
-			case OpenCLDebugInfo100Address:
-				// return vk::dbg::make_reference(*static_cast<void **>(ptr));
-				UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100 OpenCLDebugInfo100Address BasicType");
-				return nullptr;
-			case OpenCLDebugInfo100Boolean:
-				return vk::dbg::make_reference(*static_cast<bool *>(ptr));
-			case OpenCLDebugInfo100Float:
-				return vk::dbg::make_reference(*static_cast<float *>(ptr));
-			case OpenCLDebugInfo100Signed:
-				return vk::dbg::make_reference(*static_cast<int32_t *>(ptr));
-			case OpenCLDebugInfo100SignedChar:
-				return vk::dbg::make_reference(*static_cast<int8_t *>(ptr));
-			case OpenCLDebugInfo100Unsigned:
-				return vk::dbg::make_reference(*static_cast<uint32_t *>(ptr));
-			case OpenCLDebugInfo100UnsignedChar:
-				return vk::dbg::make_reference(*static_cast<uint8_t *>(ptr));
-			default:
-				UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100 encoding %d", int(encoding));
-				return nullptr;
+		case OpenCLDebugInfo100Address:
+			// return vk::dbg::make_reference(*static_cast<void **>(ptr));
+			UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100 OpenCLDebugInfo100Address BasicType");
+			return nullptr;
+		case OpenCLDebugInfo100Boolean:
+			return vk::dbg::make_reference(*static_cast<bool *>(ptr));
+		case OpenCLDebugInfo100Float:
+			return vk::dbg::make_reference(*static_cast<float *>(ptr));
+		case OpenCLDebugInfo100Signed:
+			return vk::dbg::make_reference(*static_cast<int32_t *>(ptr));
+		case OpenCLDebugInfo100SignedChar:
+			return vk::dbg::make_reference(*static_cast<int8_t *>(ptr));
+		case OpenCLDebugInfo100Unsigned:
+			return vk::dbg::make_reference(*static_cast<uint32_t *>(ptr));
+		case OpenCLDebugInfo100UnsignedChar:
+			return vk::dbg::make_reference(*static_cast<uint8_t *>(ptr));
+		default:
+			UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100 encoding %d", int(encoding));
+			return nullptr;
 		}
 	}
 };
@@ -917,10 +917,10 @@
 {
 	switch(def)
 	{
-		case LocalVariable::Definition::Undefined: return "Undefined";
-		case LocalVariable::Definition::Declaration: return "Declaration";
-		case LocalVariable::Definition::Values: return "Values";
-		default: return "<unknown>";
+	case LocalVariable::Definition::Undefined: return "Undefined";
+	case LocalVariable::Definition::Declaration: return "Declaration";
+	case LocalVariable::Definition::Values: return "Values";
+	default: return "<unknown>";
 	}
 }
 
@@ -1562,12 +1562,12 @@
 	auto id = SpirvID<T>(insn.word(2));
 	switch(pass)
 	{
-		case Pass::Define:
-			add(id, std::unique_ptr<debug::Object>(new T()));
-			break;
-		case Pass::Emit:
-			emit(get<T>(id));
-			break;
+	case Pass::Define:
+		add(id, std::unique_ptr<debug::Object>(new T()));
+		break;
+	case Pass::Emit:
+		emit(get<T>(id));
+		break;
 	}
 }
 
@@ -1576,320 +1576,320 @@
 	auto extInstIndex = insn.word(4);
 	switch(extInstIndex)
 	{
-		case OpenCLDebugInfo100DebugInfoNone:
-			if(pass == Pass::Define)
+	case OpenCLDebugInfo100DebugInfoNone:
+		if(pass == Pass::Define)
+		{
+			addNone(debug::Object::ID(insn.word(2)));
+		}
+		break;
+	case OpenCLDebugInfo100DebugCompilationUnit:
+		defineOrEmit(insn, pass, [&](debug::CompilationUnit *cu) {
+			cu->source = get(debug::Source::ID(insn.word(7)));
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeBasic:
+		defineOrEmit(insn, pass, [&](debug::BasicType *type) {
+			type->name_ = shader->getString(insn.word(5));
+			type->size = shader->GetConstScalarInt(insn.word(6));
+			type->encoding = static_cast<OpenCLDebugInfo100DebugBaseTypeAttributeEncoding>(insn.word(7));
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeArray:
+		defineOrEmit(insn, pass, [&](debug::ArrayType *type) {
+			type->base = get(debug::Type::ID(insn.word(5)));
+			type->size = shader->GetConstScalarInt(insn.word(6));
+			for(uint32_t i = 7; i < insn.wordCount(); i++)
 			{
-				addNone(debug::Object::ID(insn.word(2)));
+				// Decompose multi-dimentional into nested single
+				// dimensional arrays. Greatly simplifies logic.
+				auto inner = new debug::ArrayType();
+				inner->base = type->base;
+				type->size = shader->GetConstScalarInt(insn.word(i));
+				type->base = inner;
+				type->ownsBase = true;
+				type = inner;
 			}
-			break;
-		case OpenCLDebugInfo100DebugCompilationUnit:
-			defineOrEmit(insn, pass, [&](debug::CompilationUnit *cu) {
-				cu->source = get(debug::Source::ID(insn.word(7)));
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeBasic:
-			defineOrEmit(insn, pass, [&](debug::BasicType *type) {
-				type->name_ = shader->getString(insn.word(5));
-				type->size = shader->GetConstScalarInt(insn.word(6));
-				type->encoding = static_cast<OpenCLDebugInfo100DebugBaseTypeAttributeEncoding>(insn.word(7));
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeArray:
-			defineOrEmit(insn, pass, [&](debug::ArrayType *type) {
-				type->base = get(debug::Type::ID(insn.word(5)));
-				type->size = shader->GetConstScalarInt(insn.word(6));
-				for(uint32_t i = 7; i < insn.wordCount(); i++)
-				{
-					// Decompose multi-dimentional into nested single
-					// dimensional arrays. Greatly simplifies logic.
-					auto inner = new debug::ArrayType();
-					inner->base = type->base;
-					type->size = shader->GetConstScalarInt(insn.word(i));
-					type->base = inner;
-					type->ownsBase = true;
-					type = inner;
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeVector:
-			defineOrEmit(insn, pass, [&](debug::VectorType *type) {
-				type->base = get(debug::Type::ID(insn.word(5)));
-				type->components = insn.word(6);
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeFunction:
-			defineOrEmit(insn, pass, [&](debug::FunctionType *type) {
-				type->flags = insn.word(5);
-				type->returnTy = getOrNull(debug::Type::ID(insn.word(6)));
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeVector:
+		defineOrEmit(insn, pass, [&](debug::VectorType *type) {
+			type->base = get(debug::Type::ID(insn.word(5)));
+			type->components = insn.word(6);
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeFunction:
+		defineOrEmit(insn, pass, [&](debug::FunctionType *type) {
+			type->flags = insn.word(5);
+			type->returnTy = getOrNull(debug::Type::ID(insn.word(6)));
 
-				// 'Return Type' operand must be a debug type or OpTypeVoid. See
-				// https://www.khronos.org/registry/spir-v/specs/unified1/OpenCL.DebugInfo.100.html#DebugTypeFunction
-				ASSERT_MSG(type->returnTy != nullptr || shader->getType(insn.word(6)).opcode() == spv::Op::OpTypeVoid, "Invalid return type of DebugTypeFunction: %d", insn.word(6));
+			// 'Return Type' operand must be a debug type or OpTypeVoid. See
+			// https://www.khronos.org/registry/spir-v/specs/unified1/OpenCL.DebugInfo.100.html#DebugTypeFunction
+			ASSERT_MSG(type->returnTy != nullptr || shader->getType(insn.word(6)).opcode() == spv::Op::OpTypeVoid, "Invalid return type of DebugTypeFunction: %d", insn.word(6));
 
-				for(uint32_t i = 7; i < insn.wordCount(); i++)
+			for(uint32_t i = 7; i < insn.wordCount(); i++)
+			{
+				type->paramTys.push_back(get(debug::Type::ID(insn.word(i))));
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeComposite:
+		defineOrEmit(insn, pass, [&](debug::CompositeType *type) {
+			type->name_ = shader->getString(insn.word(5));
+			type->tag = static_cast<OpenCLDebugInfo100DebugCompositeType>(insn.word(6));
+			type->source = get(debug::Source::ID(insn.word(7)));
+			type->line = insn.word(8);
+			type->column = insn.word(9);
+			type->parent = get(debug::Object::ID(insn.word(10)));
+			type->linkage = shader->getString(insn.word(11));
+			type->size = isNone(insn.word(12)) ? 0 : shader->GetConstScalarInt(insn.word(12));
+			type->flags = insn.word(13);
+			for(uint32_t i = 14; i < insn.wordCount(); i++)
+			{
+				auto obj = get(debug::Object::ID(insn.word(i)));
+				if(auto member = debug::cast<debug::Member>(obj))  // Can also be Function or TypeInheritance, which we don't care about.
 				{
-					type->paramTys.push_back(get(debug::Type::ID(insn.word(i))));
+					type->members_.push_back(member);
 				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeComposite:
-			defineOrEmit(insn, pass, [&](debug::CompositeType *type) {
-				type->name_ = shader->getString(insn.word(5));
-				type->tag = static_cast<OpenCLDebugInfo100DebugCompositeType>(insn.word(6));
-				type->source = get(debug::Source::ID(insn.word(7)));
-				type->line = insn.word(8);
-				type->column = insn.word(9);
-				type->parent = get(debug::Object::ID(insn.word(10)));
-				type->linkage = shader->getString(insn.word(11));
-				type->size = isNone(insn.word(12)) ? 0 : shader->GetConstScalarInt(insn.word(12));
-				type->flags = insn.word(13);
-				for(uint32_t i = 14; i < insn.wordCount(); i++)
-				{
-					auto obj = get(debug::Object::ID(insn.word(i)));
-					if(auto member = debug::cast<debug::Member>(obj))  // Can also be Function or TypeInheritance, which we don't care about.
-					{
-						type->members_.push_back(member);
-					}
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeMember:
-			defineOrEmit(insn, pass, [&](debug::Member *member) {
-				member->name = shader->getString(insn.word(5));
-				member->type = get(debug::Type::ID(insn.word(6)));
-				member->source = get(debug::Source::ID(insn.word(7)));
-				member->line = insn.word(8);
-				member->column = insn.word(9);
-				member->parent = get(debug::CompositeType::ID(insn.word(10)));
-				member->offset = shader->GetConstScalarInt(insn.word(11));
-				member->size = shader->GetConstScalarInt(insn.word(12));
-				member->flags = insn.word(13);
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeTemplate:
-			defineOrEmit(insn, pass, [&](debug::TemplateType *tpl) {
-				tpl->target = get(debug::Type::ID(insn.word(5)));
-				for(size_t i = 6, c = insn.wordCount(); i < c; i++)
-				{
-					tpl->parameters.emplace_back(get(debug::TemplateParameter::ID(insn.word(i))));
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugTypeTemplateParameter:
-			defineOrEmit(insn, pass, [&](debug::TemplateParameter *param) {
-				param->name = shader->getString(insn.word(5));
-				param->type = get(debug::Type::ID(insn.word(6)));
-				param->value = 0;  // TODO: Get value from OpConstant if "a template value parameter".
-				param->source = get(debug::Source::ID(insn.word(8)));
-				param->line = insn.word(9);
-				param->column = insn.word(10);
-			});
-			break;
-		case OpenCLDebugInfo100DebugGlobalVariable:
-			defineOrEmit(insn, pass, [&](debug::GlobalVariable *var) {
-				var->name = shader->getString(insn.word(5));
-				var->type = get(debug::Type::ID(insn.word(6)));
-				var->source = get(debug::Source::ID(insn.word(7)));
-				var->line = insn.word(8);
-				var->column = insn.word(9);
-				var->parent = get(debug::Scope::ID(insn.word(10)));
-				var->linkage = shader->getString(insn.word(11));
-				var->variable = isNone(insn.word(12)) ? 0 : insn.word(12);
-				var->flags = insn.word(13);
-				// static member declaration: word(14)
-			});
-			break;
-		case OpenCLDebugInfo100DebugFunction:
-			defineOrEmit(insn, pass, [&](debug::Function *func) {
-				func->name = shader->getString(insn.word(5));
-				func->type = get(debug::FunctionType::ID(insn.word(6)));
-				func->source = get(debug::Source::ID(insn.word(7)));
-				func->declLine = insn.word(8);
-				func->declColumn = insn.word(9);
-				func->parent = get(debug::Scope::ID(insn.word(10)));
-				func->linkage = shader->getString(insn.word(11));
-				func->flags = insn.word(12);
-				func->line = insn.word(13);
-				func->function = Function::ID(insn.word(14));
-				// declaration: word(13)
-			});
-			break;
-		case OpenCLDebugInfo100DebugLexicalBlock:
-			defineOrEmit(insn, pass, [&](debug::LexicalBlock *scope) {
-				scope->source = get(debug::Source::ID(insn.word(5)));
-				scope->line = insn.word(6);
-				scope->column = insn.word(7);
-				scope->parent = get(debug::Scope::ID(insn.word(8)));
-				if(insn.wordCount() > 9)
-				{
-					scope->name = shader->getString(insn.word(9));
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugScope:
-			defineOrEmit(insn, pass, [&](debug::SourceScope *ss) {
-				ss->scope = get(debug::Scope::ID(insn.word(5)));
-				if(insn.wordCount() > 6)
-				{
-					ss->inlinedAt = get(debug::InlinedAt::ID(insn.word(6)));
-				}
-				setScope(ss);
-			});
-			break;
-		case OpenCLDebugInfo100DebugNoScope:
-			break;
-		case OpenCLDebugInfo100DebugInlinedAt:
-			defineOrEmit(insn, pass, [&](debug::InlinedAt *ia) {
-				ia->line = insn.word(5);
-				ia->scope = get(debug::Scope::ID(insn.word(6)));
-				if(insn.wordCount() > 7)
-				{
-					ia->inlined = get(debug::InlinedAt::ID(insn.word(7)));
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugLocalVariable:
-			defineOrEmit(insn, pass, [&](debug::LocalVariable *var) {
-				var->name = shader->getString(insn.word(5));
-				var->type = get(debug::Type::ID(insn.word(6)));
-				var->source = get(debug::Source::ID(insn.word(7)));
-				var->line = insn.word(8);
-				var->column = insn.word(9);
-				var->parent = get(debug::Scope::ID(insn.word(10)));
-				if(insn.wordCount() > 11)
-				{
-					var->arg = insn.word(11);
-				}
-				if(auto block = debug::find<debug::LexicalBlock>(var->parent))
-				{
-					block->variables.emplace_back(var);
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugDeclare:
-			defineOrEmit(insn, pass, [&](debug::Declare *decl) {
-				decl->local = get(debug::LocalVariable::ID(insn.word(5)));
-				decl->variable = Object::ID(insn.word(6));
-				decl->expression = get(debug::Expression::ID(insn.word(7)));
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeMember:
+		defineOrEmit(insn, pass, [&](debug::Member *member) {
+			member->name = shader->getString(insn.word(5));
+			member->type = get(debug::Type::ID(insn.word(6)));
+			member->source = get(debug::Source::ID(insn.word(7)));
+			member->line = insn.word(8);
+			member->column = insn.word(9);
+			member->parent = get(debug::CompositeType::ID(insn.word(10)));
+			member->offset = shader->GetConstScalarInt(insn.word(11));
+			member->size = shader->GetConstScalarInt(insn.word(12));
+			member->flags = insn.word(13);
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeTemplate:
+		defineOrEmit(insn, pass, [&](debug::TemplateType *tpl) {
+			tpl->target = get(debug::Type::ID(insn.word(5)));
+			for(size_t i = 6, c = insn.wordCount(); i < c; i++)
+			{
+				tpl->parameters.emplace_back(get(debug::TemplateParameter::ID(insn.word(i))));
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugTypeTemplateParameter:
+		defineOrEmit(insn, pass, [&](debug::TemplateParameter *param) {
+			param->name = shader->getString(insn.word(5));
+			param->type = get(debug::Type::ID(insn.word(6)));
+			param->value = 0;  // TODO: Get value from OpConstant if "a template value parameter".
+			param->source = get(debug::Source::ID(insn.word(8)));
+			param->line = insn.word(9);
+			param->column = insn.word(10);
+		});
+		break;
+	case OpenCLDebugInfo100DebugGlobalVariable:
+		defineOrEmit(insn, pass, [&](debug::GlobalVariable *var) {
+			var->name = shader->getString(insn.word(5));
+			var->type = get(debug::Type::ID(insn.word(6)));
+			var->source = get(debug::Source::ID(insn.word(7)));
+			var->line = insn.word(8);
+			var->column = insn.word(9);
+			var->parent = get(debug::Scope::ID(insn.word(10)));
+			var->linkage = shader->getString(insn.word(11));
+			var->variable = isNone(insn.word(12)) ? 0 : insn.word(12);
+			var->flags = insn.word(13);
+			// static member declaration: word(14)
+		});
+		break;
+	case OpenCLDebugInfo100DebugFunction:
+		defineOrEmit(insn, pass, [&](debug::Function *func) {
+			func->name = shader->getString(insn.word(5));
+			func->type = get(debug::FunctionType::ID(insn.word(6)));
+			func->source = get(debug::Source::ID(insn.word(7)));
+			func->declLine = insn.word(8);
+			func->declColumn = insn.word(9);
+			func->parent = get(debug::Scope::ID(insn.word(10)));
+			func->linkage = shader->getString(insn.word(11));
+			func->flags = insn.word(12);
+			func->line = insn.word(13);
+			func->function = Function::ID(insn.word(14));
+			// declaration: word(13)
+		});
+		break;
+	case OpenCLDebugInfo100DebugLexicalBlock:
+		defineOrEmit(insn, pass, [&](debug::LexicalBlock *scope) {
+			scope->source = get(debug::Source::ID(insn.word(5)));
+			scope->line = insn.word(6);
+			scope->column = insn.word(7);
+			scope->parent = get(debug::Scope::ID(insn.word(8)));
+			if(insn.wordCount() > 9)
+			{
+				scope->name = shader->getString(insn.word(9));
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugScope:
+		defineOrEmit(insn, pass, [&](debug::SourceScope *ss) {
+			ss->scope = get(debug::Scope::ID(insn.word(5)));
+			if(insn.wordCount() > 6)
+			{
+				ss->inlinedAt = get(debug::InlinedAt::ID(insn.word(6)));
+			}
+			setScope(ss);
+		});
+		break;
+	case OpenCLDebugInfo100DebugNoScope:
+		break;
+	case OpenCLDebugInfo100DebugInlinedAt:
+		defineOrEmit(insn, pass, [&](debug::InlinedAt *ia) {
+			ia->line = insn.word(5);
+			ia->scope = get(debug::Scope::ID(insn.word(6)));
+			if(insn.wordCount() > 7)
+			{
+				ia->inlined = get(debug::InlinedAt::ID(insn.word(7)));
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugLocalVariable:
+		defineOrEmit(insn, pass, [&](debug::LocalVariable *var) {
+			var->name = shader->getString(insn.word(5));
+			var->type = get(debug::Type::ID(insn.word(6)));
+			var->source = get(debug::Source::ID(insn.word(7)));
+			var->line = insn.word(8);
+			var->column = insn.word(9);
+			var->parent = get(debug::Scope::ID(insn.word(10)));
+			if(insn.wordCount() > 11)
+			{
+				var->arg = insn.word(11);
+			}
+			if(auto block = debug::find<debug::LexicalBlock>(var->parent))
+			{
+				block->variables.emplace_back(var);
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugDeclare:
+		defineOrEmit(insn, pass, [&](debug::Declare *decl) {
+			decl->local = get(debug::LocalVariable::ID(insn.word(5)));
+			decl->variable = Object::ID(insn.word(6));
+			decl->expression = get(debug::Expression::ID(insn.word(7)));
 
-				decl->local->declaration = decl;
+			decl->local->declaration = decl;
 
-				ASSERT_MSG(decl->local->definition == debug::LocalVariable::Definition::Undefined,
+			ASSERT_MSG(decl->local->definition == debug::LocalVariable::Definition::Undefined,
+			           "DebugLocalVariable '%s' declared at %s:%d was previously defined as %s, now again as %s",
+			           decl->local->name.c_str(),
+			           decl->local->source ? decl->local->source->file.c_str() : "<unknown>",
+			           (int)decl->local->line,
+			           tostring(decl->local->definition),
+			           tostring(debug::LocalVariable::Definition::Declaration));
+			decl->local->definition = debug::LocalVariable::Definition::Declaration;
+		});
+		break;
+	case OpenCLDebugInfo100DebugValue:
+		defineOrEmit(insn, pass, [&](debug::Value *value) {
+			value->local = get(debug::LocalVariable::ID(insn.word(5)));
+			value->value = insn.word(6);
+			value->expression = get(debug::Expression::ID(insn.word(7)));
+
+			if(value->local->definition == debug::LocalVariable::Definition::Undefined)
+			{
+				value->local->definition = debug::LocalVariable::Definition::Values;
+			}
+			else
+			{
+				ASSERT_MSG(value->local->definition == debug::LocalVariable::Definition::Values,
 				           "DebugLocalVariable '%s' declared at %s:%d was previously defined as %s, now again as %s",
-				           decl->local->name.c_str(),
-				           decl->local->source ? decl->local->source->file.c_str() : "<unknown>",
-				           (int)decl->local->line,
-				           tostring(decl->local->definition),
-				           tostring(debug::LocalVariable::Definition::Declaration));
-				decl->local->definition = debug::LocalVariable::Definition::Declaration;
-			});
-			break;
-		case OpenCLDebugInfo100DebugValue:
-			defineOrEmit(insn, pass, [&](debug::Value *value) {
-				value->local = get(debug::LocalVariable::ID(insn.word(5)));
-				value->value = insn.word(6);
-				value->expression = get(debug::Expression::ID(insn.word(7)));
+				           value->local->name.c_str(),
+				           value->local->source ? value->local->source->file.c_str() : "<unknown>",
+				           (int)value->local->line,
+				           tostring(value->local->definition),
+				           tostring(debug::LocalVariable::Definition::Values));
+			}
 
-				if(value->local->definition == debug::LocalVariable::Definition::Undefined)
+			auto node = &value->local->values;
+			for(uint32_t i = 8; i < insn.wordCount(); i++)
+			{
+				auto idx = shader->GetConstScalarInt(insn.word(i));
+				value->indexes.push_back(idx);
+
+				auto it = node->children.find(idx);
+				if(it != node->children.end())
 				{
-					value->local->definition = debug::LocalVariable::Definition::Values;
+					node = it->second.get();
 				}
 				else
 				{
-					ASSERT_MSG(value->local->definition == debug::LocalVariable::Definition::Values,
-					           "DebugLocalVariable '%s' declared at %s:%d was previously defined as %s, now again as %s",
-					           value->local->name.c_str(),
-					           value->local->source ? value->local->source->file.c_str() : "<unknown>",
-					           (int)value->local->line,
-					           tostring(value->local->definition),
-					           tostring(debug::LocalVariable::Definition::Values));
+					auto parent = node;
+					auto child = std::make_unique<debug::LocalVariable::ValueNode>();
+					node = child.get();
+					parent->children.emplace(idx, std::move(child));
 				}
+			}
 
-				auto node = &value->local->values;
-				for(uint32_t i = 8; i < insn.wordCount(); i++)
-				{
-					auto idx = shader->GetConstScalarInt(insn.word(i));
-					value->indexes.push_back(idx);
+			if(node->debugValueIndex == debug::LocalVariable::ValueNode::NoDebugValueIndex)
+			{
+				node->debugValueIndex = numDebugValueSlots++;
+			}
 
-					auto it = node->children.find(idx);
-					if(it != node->children.end())
-					{
-						node = it->second.get();
-					}
-					else
-					{
-						auto parent = node;
-						auto child = std::make_unique<debug::LocalVariable::ValueNode>();
-						node = child.get();
-						parent->children.emplace(idx, std::move(child));
-					}
-				}
+			rr::Pointer<rr::Pointer<Byte>> lastReachedArray = *rr::Pointer<rr::Pointer<rr::Pointer<Byte>>>(
+			    state->routine->dbgState + OFFSET(Impl::Debugger::State, lastReachedDebugValues));
+			rr::Pointer<rr::Pointer<Byte>> lastReached = &lastReachedArray[node->debugValueIndex];
+			*lastReached = rr::ConstantPointer(value);
+		});
+		break;
+	case OpenCLDebugInfo100DebugExpression:
+		defineOrEmit(insn, pass, [&](debug::Expression *expr) {
+			for(uint32_t i = 5; i < insn.wordCount(); i++)
+			{
+				expr->operations.push_back(get(debug::Operation::ID(insn.word(i))));
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugSource:
+		defineOrEmit(insn, pass, [&](debug::Source *source) {
+			source->file = shader->getString(insn.word(5));
+			if(insn.wordCount() > 6)
+			{
+				source->source = shader->getString(insn.word(6));
+				auto file = ctx->lock().createVirtualFile(source->file.c_str(), source->source.c_str());
+				source->dbgFile = file;
+				files.emplace(source->file.c_str(), file);
+			}
+			else
+			{
+				auto file = ctx->lock().createPhysicalFile(source->file.c_str());
+				source->dbgFile = file;
+				files.emplace(source->file.c_str(), file);
+			}
+		});
+		break;
+	case OpenCLDebugInfo100DebugOperation:
+		defineOrEmit(insn, pass, [&](debug::Operation *operation) {
+			operation->opcode = insn.word(5);
+			for(uint32_t i = 6; i < insn.wordCount(); i++)
+			{
+				operation->operands.push_back(insn.word(i));
+			}
+		});
+		break;
 
-				if(node->debugValueIndex == debug::LocalVariable::ValueNode::NoDebugValueIndex)
-				{
-					node->debugValueIndex = numDebugValueSlots++;
-				}
-
-				rr::Pointer<rr::Pointer<Byte>> lastReachedArray = *rr::Pointer<rr::Pointer<rr::Pointer<Byte>>>(
-				    state->routine->dbgState + OFFSET(Impl::Debugger::State, lastReachedDebugValues));
-				rr::Pointer<rr::Pointer<Byte>> lastReached = &lastReachedArray[node->debugValueIndex];
-				*lastReached = rr::ConstantPointer(value);
-			});
-			break;
-		case OpenCLDebugInfo100DebugExpression:
-			defineOrEmit(insn, pass, [&](debug::Expression *expr) {
-				for(uint32_t i = 5; i < insn.wordCount(); i++)
-				{
-					expr->operations.push_back(get(debug::Operation::ID(insn.word(i))));
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugSource:
-			defineOrEmit(insn, pass, [&](debug::Source *source) {
-				source->file = shader->getString(insn.word(5));
-				if(insn.wordCount() > 6)
-				{
-					source->source = shader->getString(insn.word(6));
-					auto file = ctx->lock().createVirtualFile(source->file.c_str(), source->source.c_str());
-					source->dbgFile = file;
-					files.emplace(source->file.c_str(), file);
-				}
-				else
-				{
-					auto file = ctx->lock().createPhysicalFile(source->file.c_str());
-					source->dbgFile = file;
-					files.emplace(source->file.c_str(), file);
-				}
-			});
-			break;
-		case OpenCLDebugInfo100DebugOperation:
-			defineOrEmit(insn, pass, [&](debug::Operation *operation) {
-				operation->opcode = insn.word(5);
-				for(uint32_t i = 6; i < insn.wordCount(); i++)
-				{
-					operation->operands.push_back(insn.word(i));
-				}
-			});
-			break;
-
-		case OpenCLDebugInfo100DebugTypePointer:
-		case OpenCLDebugInfo100DebugTypeQualifier:
-		case OpenCLDebugInfo100DebugTypedef:
-		case OpenCLDebugInfo100DebugTypeEnum:
-		case OpenCLDebugInfo100DebugTypeInheritance:
-		case OpenCLDebugInfo100DebugTypePtrToMember:
-		case OpenCLDebugInfo100DebugTypeTemplateTemplateParameter:
-		case OpenCLDebugInfo100DebugTypeTemplateParameterPack:
-		case OpenCLDebugInfo100DebugFunctionDeclaration:
-		case OpenCLDebugInfo100DebugLexicalBlockDiscriminator:
-		case OpenCLDebugInfo100DebugInlinedVariable:
-		case OpenCLDebugInfo100DebugMacroDef:
-		case OpenCLDebugInfo100DebugMacroUndef:
-		case OpenCLDebugInfo100DebugImportedEntity:
-			UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100 instruction %d", int(extInstIndex));
-			break;
-		default:
-			UNSUPPORTED("OpenCLDebugInfo100 instruction %d", int(extInstIndex));
+	case OpenCLDebugInfo100DebugTypePointer:
+	case OpenCLDebugInfo100DebugTypeQualifier:
+	case OpenCLDebugInfo100DebugTypedef:
+	case OpenCLDebugInfo100DebugTypeEnum:
+	case OpenCLDebugInfo100DebugTypeInheritance:
+	case OpenCLDebugInfo100DebugTypePtrToMember:
+	case OpenCLDebugInfo100DebugTypeTemplateTemplateParameter:
+	case OpenCLDebugInfo100DebugTypeTemplateParameterPack:
+	case OpenCLDebugInfo100DebugFunctionDeclaration:
+	case OpenCLDebugInfo100DebugLexicalBlockDiscriminator:
+	case OpenCLDebugInfo100DebugInlinedVariable:
+	case OpenCLDebugInfo100DebugMacroDef:
+	case OpenCLDebugInfo100DebugMacroUndef:
+	case OpenCLDebugInfo100DebugImportedEntity:
+		UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100 instruction %d", int(extInstIndex));
+		break;
+	default:
+		UNSUPPORTED("OpenCLDebugInfo100 instruction %d", int(extInstIndex));
 	}
 }
 
@@ -1955,8 +1955,8 @@
 	auto mask = state->activeLaneMask();
 	switch(obj.kind)
 	{
-		case Object::Kind::Constant:
-		case Object::Kind::Intermediate:
+	case Object::Kind::Constant:
+	case Object::Kind::Intermediate:
 		{
 			size += objTy.componentCount * sizeof(uint32_t) * sw::SIMD::Width;
 			auto dst = InterleaveByLane(SIMD::Pointer(base, 0));
@@ -1967,20 +1967,20 @@
 				dst += sizeof(uint32_t) * SIMD::Width;
 			}
 			entry.kind = Entry::Kind::Value;
-			break;
 		}
-		case Object::Kind::Pointer:
-		case Object::Kind::InterfaceVariable:
+		break;
+	case Object::Kind::Pointer:
+	case Object::Kind::InterfaceVariable:
 		{
 			size += sizeof(void *) + sizeof(uint32_t) * SIMD::Width;
 			auto ptr = state->getPointer(objId);
 			store(base, ptr.base);
 			store(base + sizeof(void *), ptr.offsets());
 			entry.kind = Entry::Kind::Pointer;
-			break;
 		}
-		default:
-			break;
+		break;
+	default:
+		break;
 	}
 	entries.emplace(objId, entry);
 }
@@ -2067,12 +2067,12 @@
 		{
 			switch(op->opcode)
 			{
-				case OpenCLDebugInfo100Deref:
-					ptr = ptr.dref(shared->lane);
-					break;
-				default:
-					UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100DebugOperation %d", (int)op->opcode);
-					break;
+			case OpenCLDebugInfo100Deref:
+				ptr = ptr.dref(shared->lane);
+				break;
+			default:
+				UNIMPLEMENTED("b/148401179 OpenCLDebugInfo100DebugOperation %d", (int)op->opcode);
+				break;
 			}
 		}
 		value = ty->value(ptr, true);
@@ -2352,18 +2352,18 @@
 
 				switch(var->definition)
 				{
-					case debug::LocalVariable::Definition::Undefined:
+				case debug::LocalVariable::Definition::Undefined:
 					{
 						vc->put(name, var->type->undefined());
-						break;
 					}
-					case debug::LocalVariable::Definition::Declaration:
+					break;
+				case debug::LocalVariable::Definition::Declaration:
 					{
 						auto data = state->debugger->shadow.get(state, var->declaration->variable);
 						vc->put(name, var->type->value(data.dref(lane), true));
-						break;
 					}
-					case debug::LocalVariable::Definition::Values:
+					break;
+				case debug::LocalVariable::Definition::Values:
 					{
 						vc->put(name, std::make_shared<LocalVariableValue>(var, state, lane));
 						break;
@@ -2446,7 +2446,7 @@
 
 	switch(state->debugger->shader->executionModel)
 	{
-		case spv::ExecutionModelGLCompute:
+	case spv::ExecutionModelGLCompute:
 		{
 			buildGlobal("numWorkgroups", state->globals.compute.numWorkgroups);
 			buildGlobal("workgroupID", state->globals.compute.workgroupID);
@@ -2455,26 +2455,26 @@
 			buildGlobal("subgroupIndex", state->globals.compute.subgroupIndex);
 			buildGlobal("globalInvocationId", state->globals.compute.globalInvocationId);
 			buildGlobal("localInvocationIndex", state->globals.compute.localInvocationIndex);
-			break;
 		}
-		case spv::ExecutionModelFragment:
+		break;
+	case spv::ExecutionModelFragment:
 		{
 			buildGlobal("viewIndex", state->globals.fragment.viewIndex);
 			buildGlobal("fragCoord", state->globals.fragment.fragCoord);
 			buildGlobal("pointCoord", state->globals.fragment.pointCoord);
 			buildGlobal("windowSpacePosition", state->globals.fragment.windowSpacePosition);
 			buildGlobal("helperInvocation", state->globals.fragment.helperInvocation);
-			break;
 		}
-		case spv::ExecutionModelVertex:
+		break;
+	case spv::ExecutionModelVertex:
 		{
 			buildGlobal("viewIndex", state->globals.vertex.viewIndex);
 			buildGlobal("instanceIndex", state->globals.vertex.instanceIndex);
 			buildGlobal("vertexIndex", state->globals.vertex.vertexIndex);
-			break;
 		}
-		default:
-			break;
+		break;
+	default:
+		break;
 	}
 }
 
@@ -2499,15 +2499,15 @@
 			auto memory = debugger->shadow.get(state, id);
 			switch(obj.kind)
 			{
-				case Object::Kind::Intermediate:
-				case Object::Kind::Constant:
-					if(auto val = buildSpirvValue(state, memory, objTy, lane))
-					{
-						vc->put(name, val);
-					}
-					break;
-				default:
-					break;  // Not handled yet.
+			case Object::Kind::Intermediate:
+			case Object::Kind::Constant:
+				if(auto val = buildSpirvValue(state, memory, objTy, lane))
+				{
+					vc->put(name, val);
+				}
+				break;
+			default:
+				break;  // Not handled yet.
 			}
 		}
 	});
@@ -2521,11 +2521,11 @@
 
 	switch(type.definition.opcode())
 	{
-		case spv::OpTypeInt:
-			return vk::dbg::make_reference(reinterpret_cast<uint32_t *>(memory.addr)[lane]);
-		case spv::OpTypeFloat:
-			return vk::dbg::make_reference(reinterpret_cast<float *>(memory.addr)[lane]);
-		case spv::OpTypeVector:
+	case spv::OpTypeInt:
+		return vk::dbg::make_reference(reinterpret_cast<uint32_t *>(memory.addr)[lane]);
+	case spv::OpTypeFloat:
+		return vk::dbg::make_reference(reinterpret_cast<float *>(memory.addr)[lane]);
+	case spv::OpTypeVector:
 		{
 			auto elTy = shader->getType(type.element);
 			return vk::dbg::Struct::create("vector", [&](auto &fields) {
@@ -2539,8 +2539,8 @@
 				}
 			});
 		}
-		default:
-			return nullptr;  // Not handled yet
+	default:
+		return nullptr;  // Not handled yet
 	}
 }
 
@@ -2584,10 +2584,10 @@
 	std::string name;
 	switch(executionModel)
 	{
-		case spv::ExecutionModelVertex: name = "VertexShader"; break;
-		case spv::ExecutionModelFragment: name = "FragmentShader"; break;
-		case spv::ExecutionModelGLCompute: name = "ComputeShader"; break;
-		default: name = "SPIR-V Shader"; break;
+	case spv::ExecutionModelVertex: name = "VertexShader"; break;
+	case spv::ExecutionModelFragment: name = "FragmentShader"; break;
+	case spv::ExecutionModelGLCompute: name = "ComputeShader"; break;
+	default: name = "SPIR-V Shader"; break;
 	}
 	static std::atomic<int> id = { 0 };
 	name += std::to_string(id++) + ".spvasm";
@@ -2618,7 +2618,7 @@
 
 		switch(executionModel)
 		{
-			case spv::ExecutionModelGLCompute:
+		case spv::ExecutionModelGLCompute:
 			{
 				auto compute = globals + OFFSET(Globals, compute);
 				store(compute + OFFSET(Globals::Compute, numWorkgroups), routine->numWorkgroups);
@@ -2628,9 +2628,9 @@
 				store(compute + OFFSET(Globals::Compute, subgroupIndex), routine->subgroupIndex);
 				store(compute + OFFSET(Globals::Compute, globalInvocationId), routine->globalInvocationID);
 				store(compute + OFFSET(Globals::Compute, localInvocationIndex), routine->localInvocationIndex);
-				break;
 			}
-			case spv::ExecutionModelFragment:
+			break;
+		case spv::ExecutionModelFragment:
 			{
 				auto fragment = globals + OFFSET(Globals, fragment);
 				store(fragment + OFFSET(Globals::Fragment, viewIndex), routine->viewID);
@@ -2638,18 +2638,18 @@
 				store(fragment + OFFSET(Globals::Fragment, pointCoord), routine->pointCoord);
 				store(fragment + OFFSET(Globals::Fragment, windowSpacePosition), routine->windowSpacePosition);
 				store(fragment + OFFSET(Globals::Fragment, helperInvocation), routine->helperInvocation);
-				break;
 			}
-			case spv::ExecutionModelVertex:
+			break;
+		case spv::ExecutionModelVertex:
 			{
 				auto vertex = globals + OFFSET(Globals, vertex);
 				store(vertex + OFFSET(Globals::Vertex, viewIndex), routine->viewID);
 				store(vertex + OFFSET(Globals::Vertex, instanceIndex), routine->instanceID);
 				store(vertex + OFFSET(Globals::Vertex, vertexIndex), routine->vertexIndex);
-				break;
 			}
-			default:
-				break;
+			break;
+		default:
+			break;
 		}
 	}
 }
@@ -2725,15 +2725,15 @@
 
 	switch(insn.opcode())
 	{
-		case spv::OpVariable:
-		case spv::OpConstant:  // TODO: Move constants out of shadow memory.
-		case spv::OpConstantNull:
-		case spv::OpConstantTrue:
-		case spv::OpConstantFalse:
-		case spv::OpConstantComposite:
-			dbg->shadow.create(this, state, insn.resultId());
-			break;
-		default:
+	case spv::OpVariable:
+	case spv::OpConstant:  // TODO: Move constants out of shadow memory.
+	case spv::OpConstantNull:
+	case spv::OpConstantTrue:
+	case spv::OpConstantFalse:
+	case spv::OpConstantComposite:
+		dbg->shadow.create(this, state, insn.resultId());
+		break;
+	default:
 		{
 			auto resIt = dbg->results.find(insn.wordPointer(0));
 			if(resIt != dbg->results.end())
diff --git a/src/Pipeline/SpirvShaderGLSLstd450.cpp b/src/Pipeline/SpirvShaderGLSLstd450.cpp
index f1644a5..c57fdde 100644
--- a/src/Pipeline/SpirvShaderGLSLstd450.cpp
+++ b/src/Pipeline/SpirvShaderGLSLstd450.cpp
@@ -76,79 +76,79 @@
 
 	switch(extInstIndex)
 	{
-		case GLSLstd450FAbs:
+	case GLSLstd450FAbs:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Abs(src.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450SAbs:
+		break;
+	case GLSLstd450SAbs:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Abs(src.Int(i)));
 			}
-			break;
 		}
-		case GLSLstd450Cross:
+		break;
+	case GLSLstd450Cross:
 		{
 			auto lhs = Operand(this, state, insn.word(5));
 			auto rhs = Operand(this, state, insn.word(6));
 			dst.move(0, lhs.Float(1) * rhs.Float(2) - rhs.Float(1) * lhs.Float(2));
 			dst.move(1, lhs.Float(2) * rhs.Float(0) - rhs.Float(2) * lhs.Float(0));
 			dst.move(2, lhs.Float(0) * rhs.Float(1) - rhs.Float(0) * lhs.Float(1));
-			break;
 		}
-		case GLSLstd450Floor:
+		break;
+	case GLSLstd450Floor:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Floor(src.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Trunc:
+		break;
+	case GLSLstd450Trunc:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Trunc(src.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Ceil:
+		break;
+	case GLSLstd450Ceil:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Ceil(src.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Fract:
+		break;
+	case GLSLstd450Fract:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Frac(src.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Round:
+		break;
+	case GLSLstd450Round:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Round(src.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450RoundEven:
+		break;
+	case GLSLstd450RoundEven:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
@@ -158,9 +158,9 @@
 				dst.move(i, x + ((SIMD::Float(CmpLT(x, src.Float(i)) & SIMD::Int(1)) * SIMD::Float(2.0f)) - SIMD::Float(1.0f)) *
 				                    SIMD::Float(CmpEQ(Frac(src.Float(i)), SIMD::Float(0.5f)) & SIMD::Int(1)) * SIMD::Float(Int4(x) & SIMD::Int(1)));
 			}
-			break;
 		}
-		case GLSLstd450FMin:
+		break;
+	case GLSLstd450FMin:
 		{
 			auto lhs = Operand(this, state, insn.word(5));
 			auto rhs = Operand(this, state, insn.word(6));
@@ -168,9 +168,9 @@
 			{
 				dst.move(i, Min(lhs.Float(i), rhs.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450FMax:
+		break;
+	case GLSLstd450FMax:
 		{
 			auto lhs = Operand(this, state, insn.word(5));
 			auto rhs = Operand(this, state, insn.word(6));
@@ -178,9 +178,9 @@
 			{
 				dst.move(i, Max(lhs.Float(i), rhs.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450SMin:
+		break;
+	case GLSLstd450SMin:
 		{
 			auto lhs = Operand(this, state, insn.word(5));
 			auto rhs = Operand(this, state, insn.word(6));
@@ -188,9 +188,9 @@
 			{
 				dst.move(i, Min(lhs.Int(i), rhs.Int(i)));
 			}
-			break;
 		}
-		case GLSLstd450SMax:
+		break;
+	case GLSLstd450SMax:
 		{
 			auto lhs = Operand(this, state, insn.word(5));
 			auto rhs = Operand(this, state, insn.word(6));
@@ -198,9 +198,9 @@
 			{
 				dst.move(i, Max(lhs.Int(i), rhs.Int(i)));
 			}
-			break;
 		}
-		case GLSLstd450UMin:
+		break;
+	case GLSLstd450UMin:
 		{
 			auto lhs = Operand(this, state, insn.word(5));
 			auto rhs = Operand(this, state, insn.word(6));
@@ -208,9 +208,9 @@
 			{
 				dst.move(i, Min(lhs.UInt(i), rhs.UInt(i)));
 			}
-			break;
 		}
-		case GLSLstd450UMax:
+		break;
+	case GLSLstd450UMax:
 		{
 			auto lhs = Operand(this, state, insn.word(5));
 			auto rhs = Operand(this, state, insn.word(6));
@@ -218,9 +218,9 @@
 			{
 				dst.move(i, Max(lhs.UInt(i), rhs.UInt(i)));
 			}
-			break;
 		}
-		case GLSLstd450Step:
+		break;
+	case GLSLstd450Step:
 		{
 			auto edge = Operand(this, state, insn.word(5));
 			auto x = Operand(this, state, insn.word(6));
@@ -228,9 +228,9 @@
 			{
 				dst.move(i, CmpNLT(x.Float(i), edge.Float(i)) & As<SIMD::Int>(SIMD::Float(1.0f)));
 			}
-			break;
 		}
-		case GLSLstd450SmoothStep:
+		break;
+	case GLSLstd450SmoothStep:
 		{
 			auto edge0 = Operand(this, state, insn.word(5));
 			auto edge1 = Operand(this, state, insn.word(6));
@@ -243,9 +243,9 @@
 				              SIMD::Float(1.0f));
 				dst.move(i, tx * tx * (Float4(3.0f) - Float4(2.0f) * tx));
 			}
-			break;
 		}
-		case GLSLstd450FMix:
+		break;
+	case GLSLstd450FMix:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto y = Operand(this, state, insn.word(6));
@@ -254,9 +254,9 @@
 			{
 				dst.move(i, a.Float(i) * (y.Float(i) - x.Float(i)) + x.Float(i));
 			}
-			break;
 		}
-		case GLSLstd450FClamp:
+		break;
+	case GLSLstd450FClamp:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto minVal = Operand(this, state, insn.word(6));
@@ -265,9 +265,9 @@
 			{
 				dst.move(i, Min(Max(x.Float(i), minVal.Float(i)), maxVal.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450SClamp:
+		break;
+	case GLSLstd450SClamp:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto minVal = Operand(this, state, insn.word(6));
@@ -276,9 +276,9 @@
 			{
 				dst.move(i, Min(Max(x.Int(i), minVal.Int(i)), maxVal.Int(i)));
 			}
-			break;
 		}
-		case GLSLstd450UClamp:
+		break;
+	case GLSLstd450UClamp:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto minVal = Operand(this, state, insn.word(6));
@@ -287,9 +287,9 @@
 			{
 				dst.move(i, Min(Max(x.UInt(i), minVal.UInt(i)), maxVal.UInt(i)));
 			}
-			break;
 		}
-		case GLSLstd450FSign:
+		break;
+	case GLSLstd450FSign:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
@@ -298,9 +298,9 @@
 				auto pos = As<SIMD::Int>(CmpNLE(src.Float(i), SIMD::Float(+0.0f))) & As<SIMD::Int>(SIMD::Float(1.0f));
 				dst.move(i, neg | pos);
 			}
-			break;
 		}
-		case GLSLstd450SSign:
+		break;
+	case GLSLstd450SSign:
 		{
 			auto src = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
@@ -309,9 +309,9 @@
 				auto pos = CmpNLE(src.Int(i), SIMD::Int(0)) & SIMD::Int(1);
 				dst.move(i, neg | pos);
 			}
-			break;
 		}
-		case GLSLstd450Reflect:
+		break;
+	case GLSLstd450Reflect:
 		{
 			auto I = Operand(this, state, insn.word(5));
 			auto N = Operand(this, state, insn.word(6));
@@ -322,9 +322,9 @@
 			{
 				dst.move(i, I.Float(i) - SIMD::Float(2.0f) * d * N.Float(i));
 			}
-			break;
 		}
-		case GLSLstd450Refract:
+		break;
+	case GLSLstd450Refract:
 		{
 			auto I = Operand(this, state, insn.word(5));
 			auto N = Operand(this, state, insn.word(6));
@@ -339,9 +339,9 @@
 			{
 				dst.move(i, pos & As<SIMD::Int>(eta.Float(0) * I.Float(i) - t * N.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450FaceForward:
+		break;
+	case GLSLstd450FaceForward:
 		{
 			auto N = Operand(this, state, insn.word(5));
 			auto I = Operand(this, state, insn.word(6));
@@ -355,17 +355,17 @@
 				auto n = N.Float(i);
 				dst.move(i, (neg & As<SIMD::Int>(n)) | (~neg & As<SIMD::Int>(-n)));
 			}
-			break;
 		}
-		case GLSLstd450Length:
+		break;
+	case GLSLstd450Length:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			SIMD::Float d = Dot(getType(getObject(insn.word(5))).componentCount, x, x);
 
 			dst.move(0, Sqrt(d));
-			break;
 		}
-		case GLSLstd450Normalize:
+		break;
+	case GLSLstd450Normalize:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			SIMD::Float d = Dot(getType(getObject(insn.word(5))).componentCount, x, x);
@@ -375,9 +375,9 @@
 			{
 				dst.move(i, invLength * x.Float(i));
 			}
-			break;
 		}
-		case GLSLstd450Distance:
+		break;
+	case GLSLstd450Distance:
 		{
 			auto p0 = Operand(this, state, insn.word(5));
 			auto p1 = Operand(this, state, insn.word(6));
@@ -391,9 +391,9 @@
 			}
 
 			dst.move(0, Sqrt(d));
-			break;
 		}
-		case GLSLstd450Modf:
+		break;
+	case GLSLstd450Modf:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			auto ptrId = Object::ID(insn.word(6));
@@ -408,9 +408,9 @@
 			}
 
 			Store(ptrId, whole, false, std::memory_order_relaxed, state);
-			break;
 		}
-		case GLSLstd450ModfStruct:
+		break;
+	case GLSLstd450ModfStruct:
 		{
 			auto val = Operand(this, state, insn.word(5));
 
@@ -420,9 +420,9 @@
 				dst.move(i, wholeAndFrac.second);
 				dst.move(val.componentCount + i, wholeAndFrac.first);
 			}
-			break;
 		}
-		case GLSLstd450PackSnorm4x8:
+		break;
+	case GLSLstd450PackSnorm4x8:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, (SIMD::Int(Round(Min(Max(val.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
@@ -436,18 +436,18 @@
 			                ((SIMD::Int(Round(Min(Max(val.Float(3), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
 			                  SIMD::Int(0xFF))
 			                 << 24));
-			break;
 		}
-		case GLSLstd450PackUnorm4x8:
+		break;
+	case GLSLstd450PackUnorm4x8:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, (SIMD::UInt(Round(Min(Max(val.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) |
 			                ((SIMD::UInt(Round(Min(Max(val.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 8) |
 			                ((SIMD::UInt(Round(Min(Max(val.Float(2), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 16) |
 			                ((SIMD::UInt(Round(Min(Max(val.Float(3), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 24));
-			break;
 		}
-		case GLSLstd450PackSnorm2x16:
+		break;
+	case GLSLstd450PackSnorm2x16:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, (SIMD::Int(Round(Min(Max(val.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(32767.0f))) &
@@ -455,9 +455,9 @@
 			                ((SIMD::Int(Round(Min(Max(val.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(32767.0f))) &
 			                  SIMD::Int(0xFFFF))
 			                 << 16));
-			break;
 		}
-		case GLSLstd450PackUnorm2x16:
+		break;
+	case GLSLstd450PackUnorm2x16:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, (SIMD::UInt(Round(Min(Max(val.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(65535.0f))) &
@@ -465,33 +465,33 @@
 			                ((SIMD::UInt(Round(Min(Max(val.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(65535.0f))) &
 			                  SIMD::UInt(0xFFFF))
 			                 << 16));
-			break;
 		}
-		case GLSLstd450PackHalf2x16:
+		break;
+	case GLSLstd450PackHalf2x16:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, floatToHalfBits(val.UInt(0), false) | floatToHalfBits(val.UInt(1), true));
-			break;
 		}
-		case GLSLstd450UnpackSnorm4x8:
+		break;
+	case GLSLstd450UnpackSnorm4x8:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, Min(Max(SIMD::Float(((val.Int(0) << 24) & SIMD::Int(0xFF000000))) * SIMD::Float(1.0f / float(0x7f000000)), SIMD::Float(-1.0f)), SIMD::Float(1.0f)));
 			dst.move(1, Min(Max(SIMD::Float(((val.Int(0) << 16) & SIMD::Int(0xFF000000))) * SIMD::Float(1.0f / float(0x7f000000)), SIMD::Float(-1.0f)), SIMD::Float(1.0f)));
 			dst.move(2, Min(Max(SIMD::Float(((val.Int(0) << 8) & SIMD::Int(0xFF000000))) * SIMD::Float(1.0f / float(0x7f000000)), SIMD::Float(-1.0f)), SIMD::Float(1.0f)));
 			dst.move(3, Min(Max(SIMD::Float(((val.Int(0)) & SIMD::Int(0xFF000000))) * SIMD::Float(1.0f / float(0x7f000000)), SIMD::Float(-1.0f)), SIMD::Float(1.0f)));
-			break;
 		}
-		case GLSLstd450UnpackUnorm4x8:
+		break;
+	case GLSLstd450UnpackUnorm4x8:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, SIMD::Float((val.UInt(0) & SIMD::UInt(0xFF))) * SIMD::Float(1.0f / 255.f));
 			dst.move(1, SIMD::Float(((val.UInt(0) >> 8) & SIMD::UInt(0xFF))) * SIMD::Float(1.0f / 255.f));
 			dst.move(2, SIMD::Float(((val.UInt(0) >> 16) & SIMD::UInt(0xFF))) * SIMD::Float(1.0f / 255.f));
 			dst.move(3, SIMD::Float(((val.UInt(0) >> 24) & SIMD::UInt(0xFF))) * SIMD::Float(1.0f / 255.f));
-			break;
 		}
-		case GLSLstd450UnpackSnorm2x16:
+		break;
+	case GLSLstd450UnpackSnorm2x16:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			// clamp(f / 32767.0, -1.0, 1.0)
@@ -502,24 +502,24 @@
 			dst.move(1, Min(Max(SIMD::Float(As<SIMD::Int>(val.UInt(0) & SIMD::UInt(0xFFFF0000))) * SIMD::Float(1.0f / float(0x7FFF0000)),
 			                    SIMD::Float(-1.0f)),
 			                SIMD::Float(1.0f)));
-			break;
 		}
-		case GLSLstd450UnpackUnorm2x16:
+		break;
+	case GLSLstd450UnpackUnorm2x16:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			// f / 65535.0
 			dst.move(0, SIMD::Float((val.UInt(0) & SIMD::UInt(0x0000FFFF)) << 16) * SIMD::Float(1.0f / float(0xFFFF0000)));
 			dst.move(1, SIMD::Float(val.UInt(0) & SIMD::UInt(0xFFFF0000)) * SIMD::Float(1.0f / float(0xFFFF0000)));
-			break;
 		}
-		case GLSLstd450UnpackHalf2x16:
+		break;
+	case GLSLstd450UnpackHalf2x16:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			dst.move(0, halfToFloatBits(val.UInt(0) & SIMD::UInt(0x0000FFFF)));
 			dst.move(1, halfToFloatBits((val.UInt(0) & SIMD::UInt(0xFFFF0000)) >> 16));
-			break;
 		}
-		case GLSLstd450Fma:
+		break;
+	case GLSLstd450Fma:
 		{
 			auto a = Operand(this, state, insn.word(5));
 			auto b = Operand(this, state, insn.word(6));
@@ -528,9 +528,9 @@
 			{
 				dst.move(i, FMA(a.Float(i), b.Float(i), c.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Frexp:
+		break;
+	case GLSLstd450Frexp:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			auto ptrId = Object::ID(insn.word(6));
@@ -545,9 +545,9 @@
 			}
 
 			Store(ptrId, exp, false, std::memory_order_relaxed, state);
-			break;
 		}
-		case GLSLstd450FrexpStruct:
+		break;
+	case GLSLstd450FrexpStruct:
 		{
 			auto val = Operand(this, state, insn.word(5));
 
@@ -557,9 +557,9 @@
 				dst.move(i, significandAndExponent.first);
 				dst.move(val.componentCount + i, significandAndExponent.second);
 			}
-			break;
 		}
-		case GLSLstd450Ldexp:
+		break;
+	case GLSLstd450Ldexp:
 		{
 			auto significand = Operand(this, state, insn.word(5));
 			auto exponent = Operand(this, state, insn.word(6));
@@ -591,54 +591,54 @@
 
 				dst.move(i, As<SIMD::Float>(v));
 			}
-			break;
 		}
-		case GLSLstd450Radians:
+		break;
+	case GLSLstd450Radians:
 		{
 			auto degrees = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, degrees.Float(i) * SIMD::Float(PI / 180.0f));
 			}
-			break;
 		}
-		case GLSLstd450Degrees:
+		break;
+	case GLSLstd450Degrees:
 		{
 			auto radians = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, radians.Float(i) * SIMD::Float(180.0f / PI));
 			}
-			break;
 		}
-		case GLSLstd450Sin:
+		break;
+	case GLSLstd450Sin:
 		{
 			auto radians = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Sin(radians.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Cos:
+		break;
+	case GLSLstd450Cos:
 		{
 			auto radians = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Cos(radians.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Tan:
+		break;
+	case GLSLstd450Tan:
 		{
 			auto radians = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Tan(radians.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Asin:
+		break;
+	case GLSLstd450Asin:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			Decorations d;
@@ -647,9 +647,9 @@
 			{
 				dst.move(i, Asin(val.Float(i), d.RelaxedPrecision ? Precision::Relaxed : Precision::Full));
 			}
-			break;
 		}
-		case GLSLstd450Acos:
+		break;
+	case GLSLstd450Acos:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			Decorations d;
@@ -658,72 +658,72 @@
 			{
 				dst.move(i, Acos(val.Float(i), d.RelaxedPrecision ? Precision::Relaxed : Precision::Full));
 			}
-			break;
 		}
-		case GLSLstd450Atan:
+		break;
+	case GLSLstd450Atan:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Atan(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Sinh:
+		break;
+	case GLSLstd450Sinh:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Sinh(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Cosh:
+		break;
+	case GLSLstd450Cosh:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Cosh(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Tanh:
+		break;
+	case GLSLstd450Tanh:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Tanh(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Asinh:
+		break;
+	case GLSLstd450Asinh:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Asinh(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Acosh:
+		break;
+	case GLSLstd450Acosh:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Acosh(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Atanh:
+		break;
+	case GLSLstd450Atanh:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Atanh(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Atan2:
+		break;
+	case GLSLstd450Atan2:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto y = Operand(this, state, insn.word(6));
@@ -731,9 +731,9 @@
 			{
 				dst.move(i, Atan2(x.Float(i), y.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Pow:
+		break;
+	case GLSLstd450Pow:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto y = Operand(this, state, insn.word(6));
@@ -741,54 +741,54 @@
 			{
 				dst.move(i, Pow(x.Float(i), y.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Exp:
+		break;
+	case GLSLstd450Exp:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Exp(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Log:
+		break;
+	case GLSLstd450Log:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Log(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Exp2:
+		break;
+	case GLSLstd450Exp2:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Exp2(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Log2:
+		break;
+	case GLSLstd450Log2:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Log2(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450Sqrt:
+		break;
+	case GLSLstd450Sqrt:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, Sqrt(val.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450InverseSqrt:
+		break;
+	case GLSLstd450InverseSqrt:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			Decorations d;
@@ -798,44 +798,44 @@
 			{
 				dst.move(i, RcpSqrt(val.Float(i), d.RelaxedPrecision ? Precision::Relaxed : Precision::Full));
 			}
-			break;
 		}
-		case GLSLstd450Determinant:
+		break;
+	case GLSLstd450Determinant:
 		{
 			auto mat = Operand(this, state, insn.word(5));
 
 			switch(mat.componentCount)
 			{
-				case 4:  // 2x2
-					dst.move(0, Determinant(
-					                mat.Float(0), mat.Float(1),
-					                mat.Float(2), mat.Float(3)));
-					break;
-				case 9:  // 3x3
-					dst.move(0, Determinant(
-					                mat.Float(0), mat.Float(1), mat.Float(2),
-					                mat.Float(3), mat.Float(4), mat.Float(5),
-					                mat.Float(6), mat.Float(7), mat.Float(8)));
-					break;
-				case 16:  // 4x4
-					dst.move(0, Determinant(
-					                mat.Float(0), mat.Float(1), mat.Float(2), mat.Float(3),
-					                mat.Float(4), mat.Float(5), mat.Float(6), mat.Float(7),
-					                mat.Float(8), mat.Float(9), mat.Float(10), mat.Float(11),
-					                mat.Float(12), mat.Float(13), mat.Float(14), mat.Float(15)));
-					break;
-				default:
-					UNREACHABLE("GLSLstd450Determinant can only operate with square matrices. Got %d elements", int(mat.componentCount));
+			case 4:  // 2x2
+				dst.move(0, Determinant(
+				                mat.Float(0), mat.Float(1),
+				                mat.Float(2), mat.Float(3)));
+				break;
+			case 9:  // 3x3
+				dst.move(0, Determinant(
+				                mat.Float(0), mat.Float(1), mat.Float(2),
+				                mat.Float(3), mat.Float(4), mat.Float(5),
+				                mat.Float(6), mat.Float(7), mat.Float(8)));
+				break;
+			case 16:  // 4x4
+				dst.move(0, Determinant(
+				                mat.Float(0), mat.Float(1), mat.Float(2), mat.Float(3),
+				                mat.Float(4), mat.Float(5), mat.Float(6), mat.Float(7),
+				                mat.Float(8), mat.Float(9), mat.Float(10), mat.Float(11),
+				                mat.Float(12), mat.Float(13), mat.Float(14), mat.Float(15)));
+				break;
+			default:
+				UNREACHABLE("GLSLstd450Determinant can only operate with square matrices. Got %d elements", int(mat.componentCount));
 			}
-			break;
 		}
-		case GLSLstd450MatrixInverse:
+		break;
+	case GLSLstd450MatrixInverse:
 		{
 			auto mat = Operand(this, state, insn.word(5));
 
 			switch(mat.componentCount)
 			{
-				case 4:  // 2x2
+			case 4:  // 2x2
 				{
 					auto inv = MatrixInverse(
 					    mat.Float(0), mat.Float(1),
@@ -844,9 +844,9 @@
 					{
 						dst.move(i, inv[i]);
 					}
-					break;
 				}
-				case 9:  // 3x3
+				break;
+			case 9:  // 3x3
 				{
 					auto inv = MatrixInverse(
 					    mat.Float(0), mat.Float(1), mat.Float(2),
@@ -856,9 +856,9 @@
 					{
 						dst.move(i, inv[i]);
 					}
-					break;
 				}
-				case 16:  // 4x4
+				break;
+			case 16:  // 4x4
 				{
 					auto inv = MatrixInverse(
 					    mat.Float(0), mat.Float(1), mat.Float(2), mat.Float(3),
@@ -869,29 +869,29 @@
 					{
 						dst.move(i, inv[i]);
 					}
-					break;
 				}
-				default:
-					UNREACHABLE("GLSLstd450MatrixInverse can only operate with square matrices. Got %d elements", int(mat.componentCount));
+				break;
+			default:
+				UNREACHABLE("GLSLstd450MatrixInverse can only operate with square matrices. Got %d elements", int(mat.componentCount));
 			}
-			break;
 		}
-		case GLSLstd450IMix:
+		break;
+	case GLSLstd450IMix:
 		{
 			UNREACHABLE("GLSLstd450IMix has been removed from the specification");
-			break;
 		}
-		case GLSLstd450PackDouble2x32:
+		break;
+	case GLSLstd450PackDouble2x32:
 		{
 			UNSUPPORTED("SPIR-V Float64 Capability (GLSLstd450PackDouble2x32)");
-			break;
 		}
-		case GLSLstd450UnpackDouble2x32:
+		break;
+	case GLSLstd450UnpackDouble2x32:
 		{
 			UNSUPPORTED("SPIR-V Float64 Capability (GLSLstd450UnpackDouble2x32)");
-			break;
 		}
-		case GLSLstd450FindILsb:
+		break;
+	case GLSLstd450FindILsb:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
@@ -899,9 +899,9 @@
 				auto v = val.UInt(i);
 				dst.move(i, Cttz(v, true) | CmpEQ(v, SIMD::UInt(0)));
 			}
-			break;
 		}
-		case GLSLstd450FindSMsb:
+		break;
+	case GLSLstd450FindSMsb:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
@@ -909,18 +909,18 @@
 				auto v = val.UInt(i) ^ As<SIMD::UInt>(CmpLT(val.Int(i), SIMD::Int(0)));
 				dst.move(i, SIMD::UInt(31) - Ctlz(v, false));
 			}
-			break;
 		}
-		case GLSLstd450FindUMsb:
+		break;
+	case GLSLstd450FindUMsb:
 		{
 			auto val = Operand(this, state, insn.word(5));
 			for(auto i = 0u; i < type.componentCount; i++)
 			{
 				dst.move(i, SIMD::UInt(31) - Ctlz(val.UInt(i), false));
 			}
-			break;
 		}
-		case GLSLstd450InterpolateAtCentroid:
+		break;
+	case GLSLstd450InterpolateAtCentroid:
 		{
 			Decorations d;
 			ApplyDecorationsForId(&d, insn.word(5));
@@ -929,9 +929,9 @@
 			{
 				dst.move(i, Interpolate(ptr, d.Location, 0, i, type.componentCount, state, SpirvShader::Centroid));
 			}
-			break;
 		}
-		case GLSLstd450InterpolateAtSample:
+		break;
+	case GLSLstd450InterpolateAtSample:
 		{
 			Decorations d;
 			ApplyDecorationsForId(&d, insn.word(5));
@@ -940,9 +940,9 @@
 			{
 				dst.move(i, Interpolate(ptr, d.Location, insn.word(6), i, type.componentCount, state, SpirvShader::AtSample));
 			}
-			break;
 		}
-		case GLSLstd450InterpolateAtOffset:
+		break;
+	case GLSLstd450InterpolateAtOffset:
 		{
 			Decorations d;
 			ApplyDecorationsForId(&d, insn.word(5));
@@ -951,9 +951,9 @@
 			{
 				dst.move(i, Interpolate(ptr, d.Location, insn.word(6), i, type.componentCount, state, SpirvShader::AtOffset));
 			}
-			break;
 		}
-		case GLSLstd450NMin:
+		break;
+	case GLSLstd450NMin:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto y = Operand(this, state, insn.word(6));
@@ -961,9 +961,9 @@
 			{
 				dst.move(i, NMin(x.Float(i), y.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450NMax:
+		break;
+	case GLSLstd450NMax:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto y = Operand(this, state, insn.word(6));
@@ -971,9 +971,9 @@
 			{
 				dst.move(i, NMax(x.Float(i), y.Float(i)));
 			}
-			break;
 		}
-		case GLSLstd450NClamp:
+		break;
+	case GLSLstd450NClamp:
 		{
 			auto x = Operand(this, state, insn.word(5));
 			auto minVal = Operand(this, state, insn.word(6));
@@ -983,11 +983,11 @@
 				auto clamp = NMin(NMax(x.Float(i), minVal.Float(i)), maxVal.Float(i));
 				dst.move(i, clamp);
 			}
-			break;
 		}
-		default:
-			UNREACHABLE("ExtInst %d", int(extInstIndex));
-			break;
+		break;
+	default:
+		UNREACHABLE("ExtInst %d", int(extInstIndex));
+		break;
 	}
 
 	return EmitResult::Continue;
@@ -1017,48 +1017,48 @@
 
 	switch(type)
 	{
-		case Centroid:
-			x = interpolationData.xCentroid;
-			y = interpolationData.yCentroid;
-			rhw = interpolationData.rhwCentroid;
-			break;
-		case AtSample:
-			x = SIMD::Float(0.0f);
-			y = SIMD::Float(0.0f);
+	case Centroid:
+		x = interpolationData.xCentroid;
+		y = interpolationData.yCentroid;
+		rhw = interpolationData.rhwCentroid;
+		break;
+	case AtSample:
+		x = SIMD::Float(0.0f);
+		y = SIMD::Float(0.0f);
 
-			if(state->getMultiSampleCount() > 1)
+		if(state->getMultiSampleCount() > 1)
+		{
+			static constexpr int NUM_SAMPLES = 4;
+			ASSERT(state->getMultiSampleCount() == NUM_SAMPLES);
+
+			Array<Float> sampleX(NUM_SAMPLES);
+			Array<Float> sampleY(NUM_SAMPLES);
+			for(int i = 0; i < NUM_SAMPLES; ++i)
 			{
-				static constexpr int NUM_SAMPLES = 4;
-				ASSERT(state->getMultiSampleCount() == NUM_SAMPLES);
-
-				Array<Float> sampleX(NUM_SAMPLES);
-				Array<Float> sampleY(NUM_SAMPLES);
-				for(int i = 0; i < NUM_SAMPLES; ++i)
-				{
-					sampleX[i] = Constants::SampleLocationsX[i];
-					sampleY[i] = Constants::SampleLocationsY[i];
-				}
-
-				auto sampleOperand = Operand(this, state, paramId);
-				ASSERT(sampleOperand.componentCount == 1);
-
-				// If sample does not exist, the position used to interpolate the
-				// input variable is undefined, so we just clamp to avoid OOB accesses.
-				SIMD::Int samples = sampleOperand.Int(0) & SIMD::Int(NUM_SAMPLES - 1);
-
-				for(int i = 0; i < SIMD::Width; ++i)
-				{
-					Int sample = Extract(samples, i);
-					x = Insert(x, sampleX[sample], i);
-					y = Insert(y, sampleY[sample], i);
-				}
+				sampleX[i] = Constants::SampleLocationsX[i];
+				sampleY[i] = Constants::SampleLocationsY[i];
 			}
 
-			x += interpolationData.x;
-			y += interpolationData.y;
-			rhw = interpolationData.rhw;
-			break;
-		case AtOffset:
+			auto sampleOperand = Operand(this, state, paramId);
+			ASSERT(sampleOperand.componentCount == 1);
+
+			// If sample does not exist, the position used to interpolate the
+			// input variable is undefined, so we just clamp to avoid OOB accesses.
+			SIMD::Int samples = sampleOperand.Int(0) & SIMD::Int(NUM_SAMPLES - 1);
+
+			for(int i = 0; i < SIMD::Width; ++i)
+			{
+				Int sample = Extract(samples, i);
+				x = Insert(x, sampleX[sample], i);
+				y = Insert(y, sampleY[sample], i);
+			}
+		}
+
+		x += interpolationData.x;
+		y += interpolationData.y;
+		rhw = interpolationData.rhw;
+		break;
+	case AtOffset:
 		{
 			//  An offset of (0, 0) identifies the center of the pixel.
 			auto offset = Operand(this, state, paramId);
@@ -1069,9 +1069,9 @@
 			rhw = interpolationData.rhw;
 		}
 		break;
-		default:
-			UNREACHABLE("Unknown interpolation type: %d", (int)type);
-			return SIMD::Float(0.0f);
+	default:
+		UNREACHABLE("Unknown interpolation type: %d", (int)type);
+		return SIMD::Float(0.0f);
 	}
 
 	Pointer<Byte> planeEquation = interpolationData.primitive + OFFSET(Primitive, V[interpolant]);
diff --git a/src/Pipeline/SpirvShaderGroup.cpp b/src/Pipeline/SpirvShaderGroup.cpp
index 3750b38..baa2134 100644
--- a/src/Pipeline/SpirvShaderGroup.cpp
+++ b/src/Pipeline/SpirvShaderGroup.cpp
@@ -44,33 +44,33 @@
 			TYPE v = As<TYPE>(v_uint);
 			switch(spv::GroupOperation(insn.word(4)))
 			{
-				case spv::GroupOperationReduce:
+			case spv::GroupOperationReduce:
 				{
 					// NOTE: floating-point add and multiply are not really commutative so
 					//       ensure that all values in the final lanes are identical
 					TYPE v2 = apply(v.xxzz, v.yyww);    // [xy]   [xy]   [zw]   [zw]
 					TYPE v3 = apply(v2.xxxx, v2.zzzz);  // [xyzw] [xyzw] [xyzw] [xyzw]
 					dst.move(i, v3);
-					break;
 				}
-				case spv::GroupOperationInclusiveScan:
+				break;
+			case spv::GroupOperationInclusiveScan:
 				{
 					TYPE v2 = apply(v, Shuffle(v, identity, 0x4012) /* [id, v.y, v.z, v.w] */);      // [x] [xy] [yz]  [zw]
 					TYPE v3 = apply(v2, Shuffle(v2, identity, 0x4401) /* [id,  id, v2.x, v2.y] */);  // [x] [xy] [xyz] [xyzw]
 					dst.move(i, v3);
-					break;
 				}
-				case spv::GroupOperationExclusiveScan:
+				break;
+			case spv::GroupOperationExclusiveScan:
 				{
 					TYPE v2 = apply(v, Shuffle(v, identity, 0x4012) /* [id, v.y, v.z, v.w] */);      // [x] [xy] [yz]  [zw]
 					TYPE v3 = apply(v2, Shuffle(v2, identity, 0x4401) /* [id,  id, v2.x, v2.y] */);  // [x] [xy] [xyz] [xyzw]
 					auto v4 = Shuffle(v3, identity, 0x4012 /* [id, v3.x, v3.y, v3.z] */);            // [i] [x]  [xy]  [xyz]
 					dst.move(i, v4);
-					break;
 				}
-				default:
-					UNSUPPORTED("EmitGroupNonUniform op: %s Group operation: %d",
-					            SpirvShader::OpcodeName(type.opcode()), insn.word(4));
+				break;
+			default:
+				UNSUPPORTED("EmitGroupNonUniform op: %s Group operation: %d",
+				            SpirvShader::OpcodeName(type.opcode()), insn.word(4));
 			}
 		}
 	}
@@ -89,7 +89,7 @@
 
 	switch(insn.opcode())
 	{
-		case spv::OpGroupNonUniformElect:
+	case spv::OpGroupNonUniformElect:
 		{
 			// Result is true only in the active invocation with the lowest id
 			// in the group, otherwise result is false.
@@ -99,24 +99,24 @@
 			auto v0111 = SIMD::Int(0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
 			auto elect = active & ~(v0111 & (active.xxyz | active.xxxy | active.xxxx));
 			dst.move(0, elect);
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformAll:
+	case spv::OpGroupNonUniformAll:
 		{
 			Operand predicate(this, state, insn.word(4));
 			dst.move(0, AndAll(predicate.UInt(0) | ~As<SIMD::UInt>(state->activeLaneMask())));  // Considers helper invocations active. See b/151137030
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformAny:
+	case spv::OpGroupNonUniformAny:
 		{
 			Operand predicate(this, state, insn.word(4));
 			dst.move(0, OrAll(predicate.UInt(0) & As<SIMD::UInt>(state->activeLaneMask())));  // Considers helper invocations active. See b/151137030
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformAllEqual:
+	case spv::OpGroupNonUniformAllEqual:
 		{
 			Operand value(this, state, insn.word(4));
 			auto res = SIMD::UInt(0xffffffff);
@@ -133,10 +133,10 @@
 				res &= AndAll(CmpEQ(filled.xyzw, filled.yzwx));
 			}
 			dst.move(0, res);
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformBroadcast:
+	case spv::OpGroupNonUniformBroadcast:
 		{
 			auto valueId = Object::ID(insn.word(4));
 			auto idId = Object::ID(insn.word(5));
@@ -173,10 +173,10 @@
 					dst.move(i, OrAll(value.UInt(i) & mask));
 				}
 			}
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformBroadcastFirst:
+	case spv::OpGroupNonUniformBroadcastFirst:
 		{
 			auto valueId = Object::ID(insn.word(4));
 			Operand value(this, state, valueId);
@@ -191,10 +191,10 @@
 			{
 				dst.move(i, OrAll(value.Int(i) & elect));
 			}
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformBallot:
+	case spv::OpGroupNonUniformBallot:
 		{
 			ASSERT(type.componentCount == 4);
 			Operand predicate(this, state, insn.word(4));
@@ -202,10 +202,10 @@
 			dst.move(1, SIMD::Int(0));
 			dst.move(2, SIMD::Int(0));
 			dst.move(3, SIMD::Int(0));
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformInverseBallot:
+	case spv::OpGroupNonUniformInverseBallot:
 		{
 			auto valueId = Object::ID(insn.word(4));
 			ASSERT(type.componentCount == 1);
@@ -213,10 +213,10 @@
 			Operand value(this, state, valueId);
 			auto bit = (value.Int(0) >> SIMD::Int(0, 1, 2, 3)) & SIMD::Int(1);
 			dst.move(0, -bit);
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformBallotBitExtract:
+	case spv::OpGroupNonUniformBallotBitExtract:
 		{
 			auto valueId = Object::ID(insn.word(4));
 			auto indexId = Object::ID(insn.word(5));
@@ -232,10 +232,10 @@
 			            (value.Int(2) & CmpEQ(vecIdx, SIMD::Int(2))) |
 			            (value.Int(3) & CmpEQ(vecIdx, SIMD::Int(3)));
 			dst.move(0, -((bits >> bitIdx) & SIMD::Int(1)));
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformBallotBitCount:
+	case spv::OpGroupNonUniformBallotBitCount:
 		{
 			auto operation = spv::GroupOperation(insn.word(4));
 			auto valueId = Object::ID(insn.word(5));
@@ -244,42 +244,42 @@
 			Operand value(this, state, valueId);
 			switch(operation)
 			{
-				case spv::GroupOperationReduce:
-					dst.move(0, CountBits(value.UInt(0) & SIMD::UInt(15)));
-					break;
-				case spv::GroupOperationInclusiveScan:
-					dst.move(0, CountBits(value.UInt(0) & SIMD::UInt(1, 3, 7, 15)));
-					break;
-				case spv::GroupOperationExclusiveScan:
-					dst.move(0, CountBits(value.UInt(0) & SIMD::UInt(0, 1, 3, 7)));
-					break;
-				default:
-					UNSUPPORTED("GroupOperation %d", int(operation));
+			case spv::GroupOperationReduce:
+				dst.move(0, CountBits(value.UInt(0) & SIMD::UInt(15)));
+				break;
+			case spv::GroupOperationInclusiveScan:
+				dst.move(0, CountBits(value.UInt(0) & SIMD::UInt(1, 3, 7, 15)));
+				break;
+			case spv::GroupOperationExclusiveScan:
+				dst.move(0, CountBits(value.UInt(0) & SIMD::UInt(0, 1, 3, 7)));
+				break;
+			default:
+				UNSUPPORTED("GroupOperation %d", int(operation));
 			}
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformBallotFindLSB:
+	case spv::OpGroupNonUniformBallotFindLSB:
 		{
 			auto valueId = Object::ID(insn.word(4));
 			ASSERT(type.componentCount == 1);
 			ASSERT(getType(getObject(valueId)).componentCount == 4);
 			Operand value(this, state, valueId);
 			dst.move(0, Cttz(value.UInt(0) & SIMD::UInt(15), true));
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformBallotFindMSB:
+	case spv::OpGroupNonUniformBallotFindMSB:
 		{
 			auto valueId = Object::ID(insn.word(4));
 			ASSERT(type.componentCount == 1);
 			ASSERT(getType(getObject(valueId)).componentCount == 4);
 			Operand value(this, state, valueId);
 			dst.move(0, SIMD::UInt(31) - Ctlz(value.UInt(0) & SIMD::UInt(15), false));
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformShuffle:
+	case spv::OpGroupNonUniformShuffle:
 		{
 			Operand value(this, state, insn.word(4));
 			Operand id(this, state, insn.word(5));
@@ -292,10 +292,10 @@
 				SIMD::Int v = value.Int(i);
 				dst.move(i, (x & v.xxxx) | (y & v.yyyy) | (z & v.zzzz) | (w & v.wwww));
 			}
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformShuffleXor:
+	case spv::OpGroupNonUniformShuffleXor:
 		{
 			Operand value(this, state, insn.word(4));
 			Operand mask(this, state, insn.word(5));
@@ -308,10 +308,10 @@
 				SIMD::Int v = value.Int(i);
 				dst.move(i, (x & v.xxxx) | (y & v.yyyy) | (z & v.zzzz) | (w & v.wwww));
 			}
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformShuffleUp:
+	case spv::OpGroupNonUniformShuffleUp:
 		{
 			Operand value(this, state, insn.word(4));
 			Operand delta(this, state, insn.word(5));
@@ -324,10 +324,10 @@
 				SIMD::Int v = value.Int(i);
 				dst.move(i, (d0 & v.xyzw) | (d1 & v.xxyz) | (d2 & v.xxxy) | (d3 & v.xxxx));
 			}
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformShuffleDown:
+	case spv::OpGroupNonUniformShuffleDown:
 		{
 			Operand value(this, state, insn.word(4));
 			Operand delta(this, state, insn.word(5));
@@ -340,116 +340,116 @@
 				SIMD::Int v = value.Int(i);
 				dst.move(i, (d0 & v.xyzw) | (d1 & v.yzww) | (d2 & v.zwww) | (d3 & v.wwww));
 			}
-			break;
 		}
+		break;
 
-		case spv::OpGroupNonUniformIAdd:
-			Impl::Group::BinaryOperation<SIMD::Int>(
-			    this, insn, state, dst, 0,
-			    [](auto a, auto b) { return a + b; });
-			break;
+	case spv::OpGroupNonUniformIAdd:
+		Impl::Group::BinaryOperation<SIMD::Int>(
+		    this, insn, state, dst, 0,
+		    [](auto a, auto b) { return a + b; });
+		break;
 
-		case spv::OpGroupNonUniformFAdd:
-			Impl::Group::BinaryOperation<SIMD::Float>(
-			    this, insn, state, dst, 0.0f,
-			    [](auto a, auto b) { return a + b; });
-			break;
+	case spv::OpGroupNonUniformFAdd:
+		Impl::Group::BinaryOperation<SIMD::Float>(
+		    this, insn, state, dst, 0.0f,
+		    [](auto a, auto b) { return a + b; });
+		break;
 
-		case spv::OpGroupNonUniformIMul:
-			Impl::Group::BinaryOperation<SIMD::Int>(
-			    this, insn, state, dst, 1,
-			    [](auto a, auto b) { return a * b; });
-			break;
+	case spv::OpGroupNonUniformIMul:
+		Impl::Group::BinaryOperation<SIMD::Int>(
+		    this, insn, state, dst, 1,
+		    [](auto a, auto b) { return a * b; });
+		break;
 
-		case spv::OpGroupNonUniformFMul:
-			Impl::Group::BinaryOperation<SIMD::Float>(
-			    this, insn, state, dst, 1.0f,
-			    [](auto a, auto b) { return a * b; });
-			break;
+	case spv::OpGroupNonUniformFMul:
+		Impl::Group::BinaryOperation<SIMD::Float>(
+		    this, insn, state, dst, 1.0f,
+		    [](auto a, auto b) { return a * b; });
+		break;
 
-		case spv::OpGroupNonUniformBitwiseAnd:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, ~0u,
-			    [](auto a, auto b) { return a & b; });
-			break;
+	case spv::OpGroupNonUniformBitwiseAnd:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, ~0u,
+		    [](auto a, auto b) { return a & b; });
+		break;
 
-		case spv::OpGroupNonUniformBitwiseOr:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, 0,
-			    [](auto a, auto b) { return a | b; });
-			break;
+	case spv::OpGroupNonUniformBitwiseOr:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, 0,
+		    [](auto a, auto b) { return a | b; });
+		break;
 
-		case spv::OpGroupNonUniformBitwiseXor:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, 0,
-			    [](auto a, auto b) { return a ^ b; });
-			break;
+	case spv::OpGroupNonUniformBitwiseXor:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, 0,
+		    [](auto a, auto b) { return a ^ b; });
+		break;
 
-		case spv::OpGroupNonUniformSMin:
-			Impl::Group::BinaryOperation<SIMD::Int>(
-			    this, insn, state, dst, INT32_MAX,
-			    [](auto a, auto b) { return Min(a, b); });
-			break;
+	case spv::OpGroupNonUniformSMin:
+		Impl::Group::BinaryOperation<SIMD::Int>(
+		    this, insn, state, dst, INT32_MAX,
+		    [](auto a, auto b) { return Min(a, b); });
+		break;
 
-		case spv::OpGroupNonUniformUMin:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, ~0u,
-			    [](auto a, auto b) { return Min(a, b); });
-			break;
+	case spv::OpGroupNonUniformUMin:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, ~0u,
+		    [](auto a, auto b) { return Min(a, b); });
+		break;
 
-		case spv::OpGroupNonUniformFMin:
-			Impl::Group::BinaryOperation<SIMD::Float>(
-			    this, insn, state, dst, SIMD::Float::infinity(),
-			    [](auto a, auto b) { return NMin(a, b); });
-			break;
+	case spv::OpGroupNonUniformFMin:
+		Impl::Group::BinaryOperation<SIMD::Float>(
+		    this, insn, state, dst, SIMD::Float::infinity(),
+		    [](auto a, auto b) { return NMin(a, b); });
+		break;
 
-		case spv::OpGroupNonUniformSMax:
-			Impl::Group::BinaryOperation<SIMD::Int>(
-			    this, insn, state, dst, INT32_MIN,
-			    [](auto a, auto b) { return Max(a, b); });
-			break;
+	case spv::OpGroupNonUniformSMax:
+		Impl::Group::BinaryOperation<SIMD::Int>(
+		    this, insn, state, dst, INT32_MIN,
+		    [](auto a, auto b) { return Max(a, b); });
+		break;
 
-		case spv::OpGroupNonUniformUMax:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, 0,
-			    [](auto a, auto b) { return Max(a, b); });
-			break;
+	case spv::OpGroupNonUniformUMax:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, 0,
+		    [](auto a, auto b) { return Max(a, b); });
+		break;
 
-		case spv::OpGroupNonUniformFMax:
-			Impl::Group::BinaryOperation<SIMD::Float>(
-			    this, insn, state, dst, -SIMD::Float::infinity(),
-			    [](auto a, auto b) { return NMax(a, b); });
-			break;
+	case spv::OpGroupNonUniformFMax:
+		Impl::Group::BinaryOperation<SIMD::Float>(
+		    this, insn, state, dst, -SIMD::Float::infinity(),
+		    [](auto a, auto b) { return NMax(a, b); });
+		break;
 
-		case spv::OpGroupNonUniformLogicalAnd:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, ~0u,
-			    [](auto a, auto b) {
-				    SIMD::UInt zero = SIMD::UInt(0);
-				    return CmpNEQ(a, zero) & CmpNEQ(b, zero);
-			    });
-			break;
+	case spv::OpGroupNonUniformLogicalAnd:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, ~0u,
+		    [](auto a, auto b) {
+			    SIMD::UInt zero = SIMD::UInt(0);
+			    return CmpNEQ(a, zero) & CmpNEQ(b, zero);
+		    });
+		break;
 
-		case spv::OpGroupNonUniformLogicalOr:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, 0,
-			    [](auto a, auto b) {
-				    SIMD::UInt zero = SIMD::UInt(0);
-				    return CmpNEQ(a, zero) | CmpNEQ(b, zero);
-			    });
-			break;
+	case spv::OpGroupNonUniformLogicalOr:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, 0,
+		    [](auto a, auto b) {
+			    SIMD::UInt zero = SIMD::UInt(0);
+			    return CmpNEQ(a, zero) | CmpNEQ(b, zero);
+		    });
+		break;
 
-		case spv::OpGroupNonUniformLogicalXor:
-			Impl::Group::BinaryOperation<SIMD::UInt>(
-			    this, insn, state, dst, 0,
-			    [](auto a, auto b) {
-				    SIMD::UInt zero = SIMD::UInt(0);
-				    return CmpNEQ(a, zero) ^ CmpNEQ(b, zero);
-			    });
-			break;
+	case spv::OpGroupNonUniformLogicalXor:
+		Impl::Group::BinaryOperation<SIMD::UInt>(
+		    this, insn, state, dst, 0,
+		    [](auto a, auto b) {
+			    SIMD::UInt zero = SIMD::UInt(0);
+			    return CmpNEQ(a, zero) ^ CmpNEQ(b, zero);
+		    });
+		break;
 
-		default:
-			UNSUPPORTED("EmitGroupNonUniform op: %s", OpcodeName(type.opcode()));
+	default:
+		UNSUPPORTED("EmitGroupNonUniform op: %s", OpcodeName(type.opcode()));
 	}
 	return EmitResult::Continue;
 }
diff --git a/src/Pipeline/SpirvShaderImage.cpp b/src/Pipeline/SpirvShaderImage.cpp
index 9da6d01..d0ea81a 100644
--- a/src/Pipeline/SpirvShaderImage.cpp
+++ b/src/Pipeline/SpirvShaderImage.cpp
@@ -27,49 +27,49 @@
 {
 	switch(format)
 	{
-		case spv::ImageFormatRgba32f: return VK_FORMAT_R32G32B32A32_SFLOAT;
-		case spv::ImageFormatRgba16f: return VK_FORMAT_R16G16B16A16_SFLOAT;
-		case spv::ImageFormatR32f: return VK_FORMAT_R32_SFLOAT;
-		case spv::ImageFormatRgba8: return VK_FORMAT_R8G8B8A8_UNORM;
-		case spv::ImageFormatRgba8Snorm: return VK_FORMAT_R8G8B8A8_SNORM;
-		case spv::ImageFormatRg32f: return VK_FORMAT_R32G32_SFLOAT;
-		case spv::ImageFormatRg16f: return VK_FORMAT_R16G16_SFLOAT;
-		case spv::ImageFormatR11fG11fB10f: return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
-		case spv::ImageFormatR16f: return VK_FORMAT_R16_SFLOAT;
-		case spv::ImageFormatRgba16: return VK_FORMAT_R16G16B16A16_UNORM;
-		case spv::ImageFormatRgb10A2: return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
-		case spv::ImageFormatRg16: return VK_FORMAT_R16G16_UNORM;
-		case spv::ImageFormatRg8: return VK_FORMAT_R8G8_UNORM;
-		case spv::ImageFormatR16: return VK_FORMAT_R16_UNORM;
-		case spv::ImageFormatR8: return VK_FORMAT_R8_UNORM;
-		case spv::ImageFormatRgba16Snorm: return VK_FORMAT_R16G16B16A16_SNORM;
-		case spv::ImageFormatRg16Snorm: return VK_FORMAT_R16G16_SNORM;
-		case spv::ImageFormatRg8Snorm: return VK_FORMAT_R8G8_SNORM;
-		case spv::ImageFormatR16Snorm: return VK_FORMAT_R16_SNORM;
-		case spv::ImageFormatR8Snorm: return VK_FORMAT_R8_SNORM;
-		case spv::ImageFormatRgba32i: return VK_FORMAT_R32G32B32A32_SINT;
-		case spv::ImageFormatRgba16i: return VK_FORMAT_R16G16B16A16_SINT;
-		case spv::ImageFormatRgba8i: return VK_FORMAT_R8G8B8A8_SINT;
-		case spv::ImageFormatR32i: return VK_FORMAT_R32_SINT;
-		case spv::ImageFormatRg32i: return VK_FORMAT_R32G32_SINT;
-		case spv::ImageFormatRg16i: return VK_FORMAT_R16G16_SINT;
-		case spv::ImageFormatRg8i: return VK_FORMAT_R8G8_SINT;
-		case spv::ImageFormatR16i: return VK_FORMAT_R16_SINT;
-		case spv::ImageFormatR8i: return VK_FORMAT_R8_SINT;
-		case spv::ImageFormatRgba32ui: return VK_FORMAT_R32G32B32A32_UINT;
-		case spv::ImageFormatRgba16ui: return VK_FORMAT_R16G16B16A16_UINT;
-		case spv::ImageFormatRgba8ui: return VK_FORMAT_R8G8B8A8_UINT;
-		case spv::ImageFormatR32ui: return VK_FORMAT_R32_UINT;
-		case spv::ImageFormatRgb10a2ui: return VK_FORMAT_A2B10G10R10_UINT_PACK32;
-		case spv::ImageFormatRg32ui: return VK_FORMAT_R32G32_UINT;
-		case spv::ImageFormatRg16ui: return VK_FORMAT_R16G16_UINT;
-		case spv::ImageFormatRg8ui: return VK_FORMAT_R8G8_UINT;
-		case spv::ImageFormatR16ui: return VK_FORMAT_R16_UINT;
-		case spv::ImageFormatR8ui: return VK_FORMAT_R8_UINT;
+	case spv::ImageFormatRgba32f: return VK_FORMAT_R32G32B32A32_SFLOAT;
+	case spv::ImageFormatRgba16f: return VK_FORMAT_R16G16B16A16_SFLOAT;
+	case spv::ImageFormatR32f: return VK_FORMAT_R32_SFLOAT;
+	case spv::ImageFormatRgba8: return VK_FORMAT_R8G8B8A8_UNORM;
+	case spv::ImageFormatRgba8Snorm: return VK_FORMAT_R8G8B8A8_SNORM;
+	case spv::ImageFormatRg32f: return VK_FORMAT_R32G32_SFLOAT;
+	case spv::ImageFormatRg16f: return VK_FORMAT_R16G16_SFLOAT;
+	case spv::ImageFormatR11fG11fB10f: return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
+	case spv::ImageFormatR16f: return VK_FORMAT_R16_SFLOAT;
+	case spv::ImageFormatRgba16: return VK_FORMAT_R16G16B16A16_UNORM;
+	case spv::ImageFormatRgb10A2: return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+	case spv::ImageFormatRg16: return VK_FORMAT_R16G16_UNORM;
+	case spv::ImageFormatRg8: return VK_FORMAT_R8G8_UNORM;
+	case spv::ImageFormatR16: return VK_FORMAT_R16_UNORM;
+	case spv::ImageFormatR8: return VK_FORMAT_R8_UNORM;
+	case spv::ImageFormatRgba16Snorm: return VK_FORMAT_R16G16B16A16_SNORM;
+	case spv::ImageFormatRg16Snorm: return VK_FORMAT_R16G16_SNORM;
+	case spv::ImageFormatRg8Snorm: return VK_FORMAT_R8G8_SNORM;
+	case spv::ImageFormatR16Snorm: return VK_FORMAT_R16_SNORM;
+	case spv::ImageFormatR8Snorm: return VK_FORMAT_R8_SNORM;
+	case spv::ImageFormatRgba32i: return VK_FORMAT_R32G32B32A32_SINT;
+	case spv::ImageFormatRgba16i: return VK_FORMAT_R16G16B16A16_SINT;
+	case spv::ImageFormatRgba8i: return VK_FORMAT_R8G8B8A8_SINT;
+	case spv::ImageFormatR32i: return VK_FORMAT_R32_SINT;
+	case spv::ImageFormatRg32i: return VK_FORMAT_R32G32_SINT;
+	case spv::ImageFormatRg16i: return VK_FORMAT_R16G16_SINT;
+	case spv::ImageFormatRg8i: return VK_FORMAT_R8G8_SINT;
+	case spv::ImageFormatR16i: return VK_FORMAT_R16_SINT;
+	case spv::ImageFormatR8i: return VK_FORMAT_R8_SINT;
+	case spv::ImageFormatRgba32ui: return VK_FORMAT_R32G32B32A32_UINT;
+	case spv::ImageFormatRgba16ui: return VK_FORMAT_R16G16B16A16_UINT;
+	case spv::ImageFormatRgba8ui: return VK_FORMAT_R8G8B8A8_UINT;
+	case spv::ImageFormatR32ui: return VK_FORMAT_R32_UINT;
+	case spv::ImageFormatRgb10a2ui: return VK_FORMAT_A2B10G10R10_UINT_PACK32;
+	case spv::ImageFormatRg32ui: return VK_FORMAT_R32G32_UINT;
+	case spv::ImageFormatRg16ui: return VK_FORMAT_R16G16_UINT;
+	case spv::ImageFormatRg8ui: return VK_FORMAT_R8G8_UINT;
+	case spv::ImageFormatR16ui: return VK_FORMAT_R16_UINT;
+	case spv::ImageFormatR8ui: return VK_FORMAT_R8_UINT;
 
-		default:
-			UNSUPPORTED("SPIR-V ImageFormat %u", format);
-			return VK_FORMAT_UNDEFINED;
+	default:
+		UNSUPPORTED("SPIR-V ImageFormat %u", format);
+		return VK_FORMAT_UNDEFINED;
 	}
 }
 
@@ -387,21 +387,21 @@
 
 	switch(descriptorType)
 	{
-		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
-		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
-			width = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, width));
-			height = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, height));
-			depth = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, depth));
-			break;
-		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-			width = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, width));
-			height = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, height));
-			depth = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, depth));
-			break;
-		default:
-			UNREACHABLE("Image descriptorType: %d", int(descriptorType));
+	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+		width = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, width));
+		height = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, height));
+		depth = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, depth));
+		break;
+	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+		width = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, width));
+		height = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, height));
+		depth = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, depth));
+		break;
+	default:
+		UNREACHABLE("Image descriptorType: %d", int(descriptorType));
 	}
 
 	if(lodId != 0)
@@ -442,13 +442,13 @@
 	Int mipLevels = 0;
 	switch(descriptorType)
 	{
-		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-			mipLevels = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, mipLevels));  // uint32_t
-			break;
-		default:
-			UNREACHABLE("Image descriptorType: %d", int(descriptorType));
+	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+		mipLevels = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, mipLevels));  // uint32_t
+		break;
+	default:
+		UNREACHABLE("Image descriptorType: %d", int(descriptorType));
 	}
 
 	auto &dst = state->createIntermediate(insn.resultId(), 1);
@@ -474,16 +474,16 @@
 	Int sampleCount = 0;
 	switch(descriptorType)
 	{
-		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
-			sampleCount = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, sampleCount));  // uint32_t
-			break;
-		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-			sampleCount = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, sampleCount));  // uint32_t
-			break;
-		default:
-			UNREACHABLE("Image descriptorType: %d", int(descriptorType));
+	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+		sampleCount = *Pointer<Int>(descriptor + OFFSET(vk::StorageImageDescriptor, sampleCount));  // uint32_t
+		break;
+	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+		sampleCount = *Pointer<Int>(descriptor + OFFSET(vk::SampledImageDescriptor, sampleCount));  // uint32_t
+		break;
+	default:
+		UNREACHABLE("Image descriptorType: %d", int(descriptorType));
 	}
 
 	auto &dst = state->createIntermediate(insn.resultId(), 1);
@@ -723,281 +723,281 @@
 	// - Any format supported as a color or depth/stencil attachment, for input attachments
 	switch(vkFormat)
 	{
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-			dst.move(0, packed[0]);
-			dst.move(1, packed[1]);
-			dst.move(2, packed[2]);
-			dst.move(3, packed[3]);
-			break;
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-			dst.move(0, packed[0]);
-			// Fill remaining channels with 0,0,1 (of the correct type)
-			dst.move(1, SIMD::Int(0));
-			dst.move(2, SIMD::Int(0));
-			dst.move(3, SIMD::Int(1));
-			break;
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			dst.move(0, packed[0]);
-			// Fill remaining channels with 0,0,1 (of the correct type)
-			dst.move(1, SIMD::Float(0.0f));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_D16_UNORM:
-			dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
-			dst.move(1, SIMD::Float(0.0f));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R16G16B16A16_UNORM:
-			dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
-			dst.move(1, SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
-			dst.move(2, SIMD::Float(packed[1] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
-			dst.move(3, SIMD::Float((packed[1] >> 16) & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
-			break;
-		case VK_FORMAT_R16G16B16A16_SNORM:
-			dst.move(0, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
-			dst.move(1, Max(SIMD::Float(packed[0] & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
-			dst.move(2, Max(SIMD::Float((packed[1] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
-			dst.move(3, Max(SIMD::Float(packed[1] & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
-			break;
-		case VK_FORMAT_R16G16B16A16_SINT:
-			dst.move(0, (packed[0] << 16) >> 16);
-			dst.move(1, packed[0] >> 16);
-			dst.move(2, (packed[1] << 16) >> 16);
-			dst.move(3, packed[1] >> 16);
-			break;
-		case VK_FORMAT_R16G16B16A16_UINT:
-			dst.move(0, packed[0] & SIMD::Int(0xFFFF));
-			dst.move(1, (packed[0] >> 16) & SIMD::Int(0xFFFF));
-			dst.move(2, packed[1] & SIMD::Int(0xFFFF));
-			dst.move(3, (packed[1] >> 16) & SIMD::Int(0xFFFF));
-			break;
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-			dst.move(0, halfToFloatBits(As<SIMD::UInt>(packed[0]) & SIMD::UInt(0x0000FFFF)));
-			dst.move(1, halfToFloatBits((As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFFFF0000)) >> 16));
-			dst.move(2, halfToFloatBits(As<SIMD::UInt>(packed[1]) & SIMD::UInt(0x0000FFFF)));
-			dst.move(3, halfToFloatBits((As<SIMD::UInt>(packed[1]) & SIMD::UInt(0xFFFF0000)) >> 16));
-			break;
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-			dst.move(0, Max(SIMD::Float((packed[0] << 24) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
-			dst.move(1, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
-			dst.move(2, Max(SIMD::Float((packed[0] << 8) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
-			dst.move(3, Max(SIMD::Float((packed[0]) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
-			break;
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-			dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(1, SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(2, SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			break;
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-			dst.move(0, ::sRGBtoLinear(SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
-			dst.move(1, ::sRGBtoLinear(SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
-			dst.move(2, ::sRGBtoLinear(SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
-			dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			break;
-		case VK_FORMAT_B8G8R8A8_UNORM:
-			dst.move(0, SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(1, SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(2, SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			break;
-		case VK_FORMAT_B8G8R8A8_SRGB:
-			dst.move(0, ::sRGBtoLinear(SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
-			dst.move(1, ::sRGBtoLinear(SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
-			dst.move(2, ::sRGBtoLinear(SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
-			dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			break;
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-			dst.move(0, As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFF));
-			dst.move(1, (As<SIMD::UInt>(packed[0]) >> 8) & SIMD::UInt(0xFF));
-			dst.move(2, (As<SIMD::UInt>(packed[0]) >> 16) & SIMD::UInt(0xFF));
-			dst.move(3, (As<SIMD::UInt>(packed[0]) >> 24) & SIMD::UInt(0xFF));
-			break;
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-			dst.move(0, (packed[0] << 24) >> 24);
-			dst.move(1, (packed[0] << 16) >> 24);
-			dst.move(2, (packed[0] << 8) >> 24);
-			dst.move(3, packed[0] >> 24);
-			break;
-		case VK_FORMAT_R8_UNORM:
-			dst.move(0, SIMD::Float((packed[0] & SIMD::Int(0xFF))) * SIMD::Float(1.0f / 0xFF));
-			dst.move(1, SIMD::Float(0.0f));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R8_SNORM:
-			dst.move(0, Max(SIMD::Float((packed[0] << 24) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
-			dst.move(1, SIMD::Float(0.0f));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_S8_UINT:
-			dst.move(0, As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFF));
-			dst.move(1, SIMD::UInt(0));
-			dst.move(2, SIMD::UInt(0));
-			dst.move(3, SIMD::UInt(1));
-			break;
-		case VK_FORMAT_R8_SINT:
-			dst.move(0, (packed[0] << 24) >> 24);
-			dst.move(1, SIMD::Int(0));
-			dst.move(2, SIMD::Int(0));
-			dst.move(3, SIMD::Int(1));
-			break;
-		case VK_FORMAT_R8G8_UNORM:
-			dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(1, SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R8G8_SNORM:
-			dst.move(0, Max(SIMD::Float((packed[0] << 24) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
-			dst.move(1, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R8G8_UINT:
-			dst.move(0, As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFF));
-			dst.move(1, (As<SIMD::UInt>(packed[0]) >> 8) & SIMD::UInt(0xFF));
-			dst.move(2, SIMD::UInt(0));
-			dst.move(3, SIMD::UInt(1));
-			break;
-		case VK_FORMAT_R8G8_SINT:
-			dst.move(0, (packed[0] << 24) >> 24);
-			dst.move(1, (packed[0] << 16) >> 24);
-			dst.move(2, SIMD::Int(0));
-			dst.move(3, SIMD::Int(1));
-			break;
-		case VK_FORMAT_R16_SFLOAT:
-			dst.move(0, halfToFloatBits(As<SIMD::UInt>(packed[0]) & SIMD::UInt(0x0000FFFF)));
-			dst.move(1, SIMD::Float(0.0f));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R16_UNORM:
-			dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
-			dst.move(1, SIMD::Float(0.0f));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R16_SNORM:
-			dst.move(0, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
-			dst.move(1, SIMD::Float(0.0f));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R16_UINT:
-			dst.move(0, packed[0] & SIMD::Int(0xFFFF));
-			dst.move(1, SIMD::UInt(0));
-			dst.move(2, SIMD::UInt(0));
-			dst.move(3, SIMD::UInt(1));
-			break;
-		case VK_FORMAT_R16_SINT:
-			dst.move(0, (packed[0] << 16) >> 16);
-			dst.move(1, SIMD::Int(0));
-			dst.move(2, SIMD::Int(0));
-			dst.move(3, SIMD::Int(1));
-			break;
-		case VK_FORMAT_R16G16_SFLOAT:
-			dst.move(0, halfToFloatBits(As<SIMD::UInt>(packed[0]) & SIMD::UInt(0x0000FFFF)));
-			dst.move(1, halfToFloatBits((As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFFFF0000)) >> 16));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R16G16_UNORM:
-			dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
-			dst.move(1, SIMD::Float(As<SIMD::UInt>(packed[0]) >> 16) * SIMD::Float(1.0f / 0xFFFF));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R16G16_SNORM:
-			dst.move(0, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
-			dst.move(1, Max(SIMD::Float(packed[0] & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_R16G16_UINT:
-			dst.move(0, packed[0] & SIMD::Int(0xFFFF));
-			dst.move(1, (packed[0] >> 16) & SIMD::Int(0xFFFF));
-			dst.move(2, SIMD::UInt(0));
-			dst.move(3, SIMD::UInt(1));
-			break;
-		case VK_FORMAT_R16G16_SINT:
-			dst.move(0, (packed[0] << 16) >> 16);
-			dst.move(1, packed[0] >> 16);
-			dst.move(2, SIMD::Int(0));
-			dst.move(3, SIMD::Int(1));
-			break;
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-			dst.move(0, packed[0]);
-			dst.move(1, packed[1]);
-			dst.move(2, SIMD::Int(0));
-			dst.move(3, SIMD::Int(1));
-			break;
-		case VK_FORMAT_R32G32_SFLOAT:
-			dst.move(0, packed[0]);
-			dst.move(1, packed[1]);
-			dst.move(2, SIMD::Float(0.0f));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-			dst.move(0, packed[0] & SIMD::Int(0x3FF));
-			dst.move(1, (packed[0] >> 10) & SIMD::Int(0x3FF));
-			dst.move(2, (packed[0] >> 20) & SIMD::Int(0x3FF));
-			dst.move(3, (packed[0] >> 30) & SIMD::Int(0x3));
-			break;
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			dst.move(2, packed[0] & SIMD::Int(0x3FF));
-			dst.move(1, (packed[0] >> 10) & SIMD::Int(0x3FF));
-			dst.move(0, (packed[0] >> 20) & SIMD::Int(0x3FF));
-			dst.move(3, (packed[0] >> 30) & SIMD::Int(0x3));
-			break;
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-			dst.move(0, SIMD::Float((packed[0]) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
-			dst.move(1, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
-			dst.move(2, SIMD::Float((packed[0] >> 20) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
-			dst.move(3, SIMD::Float((packed[0] >> 30) & SIMD::Int(0x3)) * SIMD::Float(1.0f / 0x3));
-			break;
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-			dst.move(2, SIMD::Float((packed[0]) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
-			dst.move(1, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
-			dst.move(0, SIMD::Float((packed[0] >> 20) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
-			dst.move(3, SIMD::Float((packed[0] >> 30) & SIMD::Int(0x3)) * SIMD::Float(1.0f / 0x3));
-			break;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			dst.move(0, SIMD::Float((packed[0] >> 11) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
-			dst.move(1, SIMD::Float((packed[0] >> 5) & SIMD::Int(0x3F)) * SIMD::Float(1.0f / 0x3F));
-			dst.move(2, SIMD::Float((packed[0]) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			dst.move(0, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
-			dst.move(1, SIMD::Float((packed[0] >> 5) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
-			dst.move(2, SIMD::Float((packed[0]) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
-			dst.move(3, SIMD::Float((packed[0] >> 15) & SIMD::Int(0x1)));
-			break;
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			dst.move(0, halfToFloatBits((packed[0] << 4) & SIMD::Int(0x7FF0)));
-			dst.move(1, halfToFloatBits((packed[0] >> 7) & SIMD::Int(0x7FF0)));
-			dst.move(2, halfToFloatBits((packed[0] >> 17) & SIMD::Int(0x7FE0)));
-			dst.move(3, SIMD::Float(1.0f));
-			break;
-		default:
-			UNSUPPORTED("VkFormat %d", int(vkFormat));
-			break;
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+		dst.move(0, packed[0]);
+		dst.move(1, packed[1]);
+		dst.move(2, packed[2]);
+		dst.move(3, packed[3]);
+		break;
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+		dst.move(0, packed[0]);
+		// Fill remaining channels with 0,0,1 (of the correct type)
+		dst.move(1, SIMD::Int(0));
+		dst.move(2, SIMD::Int(0));
+		dst.move(3, SIMD::Int(1));
+		break;
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		dst.move(0, packed[0]);
+		// Fill remaining channels with 0,0,1 (of the correct type)
+		dst.move(1, SIMD::Float(0.0f));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_D16_UNORM:
+		dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
+		dst.move(1, SIMD::Float(0.0f));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R16G16B16A16_UNORM:
+		dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
+		dst.move(1, SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
+		dst.move(2, SIMD::Float(packed[1] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
+		dst.move(3, SIMD::Float((packed[1] >> 16) & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
+		break;
+	case VK_FORMAT_R16G16B16A16_SNORM:
+		dst.move(0, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
+		dst.move(1, Max(SIMD::Float(packed[0] & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
+		dst.move(2, Max(SIMD::Float((packed[1] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
+		dst.move(3, Max(SIMD::Float(packed[1] & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
+		break;
+	case VK_FORMAT_R16G16B16A16_SINT:
+		dst.move(0, (packed[0] << 16) >> 16);
+		dst.move(1, packed[0] >> 16);
+		dst.move(2, (packed[1] << 16) >> 16);
+		dst.move(3, packed[1] >> 16);
+		break;
+	case VK_FORMAT_R16G16B16A16_UINT:
+		dst.move(0, packed[0] & SIMD::Int(0xFFFF));
+		dst.move(1, (packed[0] >> 16) & SIMD::Int(0xFFFF));
+		dst.move(2, packed[1] & SIMD::Int(0xFFFF));
+		dst.move(3, (packed[1] >> 16) & SIMD::Int(0xFFFF));
+		break;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		dst.move(0, halfToFloatBits(As<SIMD::UInt>(packed[0]) & SIMD::UInt(0x0000FFFF)));
+		dst.move(1, halfToFloatBits((As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFFFF0000)) >> 16));
+		dst.move(2, halfToFloatBits(As<SIMD::UInt>(packed[1]) & SIMD::UInt(0x0000FFFF)));
+		dst.move(3, halfToFloatBits((As<SIMD::UInt>(packed[1]) & SIMD::UInt(0xFFFF0000)) >> 16));
+		break;
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		dst.move(0, Max(SIMD::Float((packed[0] << 24) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
+		dst.move(1, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
+		dst.move(2, Max(SIMD::Float((packed[0] << 8) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
+		dst.move(3, Max(SIMD::Float((packed[0]) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
+		break;
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(1, SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(2, SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		break;
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+		dst.move(0, ::sRGBtoLinear(SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
+		dst.move(1, ::sRGBtoLinear(SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
+		dst.move(2, ::sRGBtoLinear(SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
+		dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		break;
+	case VK_FORMAT_B8G8R8A8_UNORM:
+		dst.move(0, SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(1, SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(2, SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		break;
+	case VK_FORMAT_B8G8R8A8_SRGB:
+		dst.move(0, ::sRGBtoLinear(SIMD::Float((packed[0] >> 16) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
+		dst.move(1, ::sRGBtoLinear(SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
+		dst.move(2, ::sRGBtoLinear(SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF)));
+		dst.move(3, SIMD::Float((packed[0] >> 24) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		break;
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		dst.move(0, As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFF));
+		dst.move(1, (As<SIMD::UInt>(packed[0]) >> 8) & SIMD::UInt(0xFF));
+		dst.move(2, (As<SIMD::UInt>(packed[0]) >> 16) & SIMD::UInt(0xFF));
+		dst.move(3, (As<SIMD::UInt>(packed[0]) >> 24) & SIMD::UInt(0xFF));
+		break;
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		dst.move(0, (packed[0] << 24) >> 24);
+		dst.move(1, (packed[0] << 16) >> 24);
+		dst.move(2, (packed[0] << 8) >> 24);
+		dst.move(3, packed[0] >> 24);
+		break;
+	case VK_FORMAT_R8_UNORM:
+		dst.move(0, SIMD::Float((packed[0] & SIMD::Int(0xFF))) * SIMD::Float(1.0f / 0xFF));
+		dst.move(1, SIMD::Float(0.0f));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R8_SNORM:
+		dst.move(0, Max(SIMD::Float((packed[0] << 24) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
+		dst.move(1, SIMD::Float(0.0f));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_S8_UINT:
+		dst.move(0, As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFF));
+		dst.move(1, SIMD::UInt(0));
+		dst.move(2, SIMD::UInt(0));
+		dst.move(3, SIMD::UInt(1));
+		break;
+	case VK_FORMAT_R8_SINT:
+		dst.move(0, (packed[0] << 24) >> 24);
+		dst.move(1, SIMD::Int(0));
+		dst.move(2, SIMD::Int(0));
+		dst.move(3, SIMD::Int(1));
+		break;
+	case VK_FORMAT_R8G8_UNORM:
+		dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(1, SIMD::Float((packed[0] >> 8) & SIMD::Int(0xFF)) * SIMD::Float(1.0f / 0xFF));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R8G8_SNORM:
+		dst.move(0, Max(SIMD::Float((packed[0] << 24) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
+		dst.move(1, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFF000000)) * SIMD::Float(1.0f / 0x7F000000), SIMD::Float(-1.0f)));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R8G8_UINT:
+		dst.move(0, As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFF));
+		dst.move(1, (As<SIMD::UInt>(packed[0]) >> 8) & SIMD::UInt(0xFF));
+		dst.move(2, SIMD::UInt(0));
+		dst.move(3, SIMD::UInt(1));
+		break;
+	case VK_FORMAT_R8G8_SINT:
+		dst.move(0, (packed[0] << 24) >> 24);
+		dst.move(1, (packed[0] << 16) >> 24);
+		dst.move(2, SIMD::Int(0));
+		dst.move(3, SIMD::Int(1));
+		break;
+	case VK_FORMAT_R16_SFLOAT:
+		dst.move(0, halfToFloatBits(As<SIMD::UInt>(packed[0]) & SIMD::UInt(0x0000FFFF)));
+		dst.move(1, SIMD::Float(0.0f));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R16_UNORM:
+		dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
+		dst.move(1, SIMD::Float(0.0f));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R16_SNORM:
+		dst.move(0, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
+		dst.move(1, SIMD::Float(0.0f));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R16_UINT:
+		dst.move(0, packed[0] & SIMD::Int(0xFFFF));
+		dst.move(1, SIMD::UInt(0));
+		dst.move(2, SIMD::UInt(0));
+		dst.move(3, SIMD::UInt(1));
+		break;
+	case VK_FORMAT_R16_SINT:
+		dst.move(0, (packed[0] << 16) >> 16);
+		dst.move(1, SIMD::Int(0));
+		dst.move(2, SIMD::Int(0));
+		dst.move(3, SIMD::Int(1));
+		break;
+	case VK_FORMAT_R16G16_SFLOAT:
+		dst.move(0, halfToFloatBits(As<SIMD::UInt>(packed[0]) & SIMD::UInt(0x0000FFFF)));
+		dst.move(1, halfToFloatBits((As<SIMD::UInt>(packed[0]) & SIMD::UInt(0xFFFF0000)) >> 16));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R16G16_UNORM:
+		dst.move(0, SIMD::Float(packed[0] & SIMD::Int(0xFFFF)) * SIMD::Float(1.0f / 0xFFFF));
+		dst.move(1, SIMD::Float(As<SIMD::UInt>(packed[0]) >> 16) * SIMD::Float(1.0f / 0xFFFF));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R16G16_SNORM:
+		dst.move(0, Max(SIMD::Float((packed[0] << 16) & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
+		dst.move(1, Max(SIMD::Float(packed[0] & SIMD::Int(0xFFFF0000)) * SIMD::Float(1.0f / 0x7FFF0000), SIMD::Float(-1.0f)));
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_R16G16_UINT:
+		dst.move(0, packed[0] & SIMD::Int(0xFFFF));
+		dst.move(1, (packed[0] >> 16) & SIMD::Int(0xFFFF));
+		dst.move(2, SIMD::UInt(0));
+		dst.move(3, SIMD::UInt(1));
+		break;
+	case VK_FORMAT_R16G16_SINT:
+		dst.move(0, (packed[0] << 16) >> 16);
+		dst.move(1, packed[0] >> 16);
+		dst.move(2, SIMD::Int(0));
+		dst.move(3, SIMD::Int(1));
+		break;
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+		dst.move(0, packed[0]);
+		dst.move(1, packed[1]);
+		dst.move(2, SIMD::Int(0));
+		dst.move(3, SIMD::Int(1));
+		break;
+	case VK_FORMAT_R32G32_SFLOAT:
+		dst.move(0, packed[0]);
+		dst.move(1, packed[1]);
+		dst.move(2, SIMD::Float(0.0f));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		dst.move(0, packed[0] & SIMD::Int(0x3FF));
+		dst.move(1, (packed[0] >> 10) & SIMD::Int(0x3FF));
+		dst.move(2, (packed[0] >> 20) & SIMD::Int(0x3FF));
+		dst.move(3, (packed[0] >> 30) & SIMD::Int(0x3));
+		break;
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		dst.move(2, packed[0] & SIMD::Int(0x3FF));
+		dst.move(1, (packed[0] >> 10) & SIMD::Int(0x3FF));
+		dst.move(0, (packed[0] >> 20) & SIMD::Int(0x3FF));
+		dst.move(3, (packed[0] >> 30) & SIMD::Int(0x3));
+		break;
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		dst.move(0, SIMD::Float((packed[0]) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+		dst.move(1, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+		dst.move(2, SIMD::Float((packed[0] >> 20) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+		dst.move(3, SIMD::Float((packed[0] >> 30) & SIMD::Int(0x3)) * SIMD::Float(1.0f / 0x3));
+		break;
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		dst.move(2, SIMD::Float((packed[0]) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+		dst.move(1, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+		dst.move(0, SIMD::Float((packed[0] >> 20) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+		dst.move(3, SIMD::Float((packed[0] >> 30) & SIMD::Int(0x3)) * SIMD::Float(1.0f / 0x3));
+		break;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		dst.move(0, SIMD::Float((packed[0] >> 11) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
+		dst.move(1, SIMD::Float((packed[0] >> 5) & SIMD::Int(0x3F)) * SIMD::Float(1.0f / 0x3F));
+		dst.move(2, SIMD::Float((packed[0]) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		dst.move(0, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
+		dst.move(1, SIMD::Float((packed[0] >> 5) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
+		dst.move(2, SIMD::Float((packed[0]) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
+		dst.move(3, SIMD::Float((packed[0] >> 15) & SIMD::Int(0x1)));
+		break;
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		dst.move(0, halfToFloatBits((packed[0] << 4) & SIMD::Int(0x7FF0)));
+		dst.move(1, halfToFloatBits((packed[0] >> 7) & SIMD::Int(0x7FF0)));
+		dst.move(2, halfToFloatBits((packed[0] >> 17) & SIMD::Int(0x7FE0)));
+		dst.move(3, SIMD::Float(1.0f));
+		break;
+	default:
+		UNSUPPORTED("VkFormat %d", int(vkFormat));
+		break;
 	}
 
 	return EmitResult::Continue;
@@ -1053,170 +1053,170 @@
 	auto format = static_cast<spv::ImageFormat>(imageType.definition.word(8));
 	switch(format)
 	{
-		case spv::ImageFormatRgba32f:
-		case spv::ImageFormatRgba32i:
-		case spv::ImageFormatRgba32ui:
-			texelSize = 16;
-			packed[0] = texel.Int(0);
-			packed[1] = texel.Int(1);
-			packed[2] = texel.Int(2);
-			packed[3] = texel.Int(3);
-			break;
-		case spv::ImageFormatR32f:
-		case spv::ImageFormatR32i:
-		case spv::ImageFormatR32ui:
-			texelSize = 4;
-			packed[0] = texel.Int(0);
-			break;
-		case spv::ImageFormatRgba8:
-			texelSize = 4;
-			packed[0] = (SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) |
-			            ((SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 8) |
-			            ((SIMD::UInt(Round(Min(Max(texel.Float(2), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 16) |
-			            ((SIMD::UInt(Round(Min(Max(texel.Float(3), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 24);
-			break;
-		case spv::ImageFormatRgba8Snorm:
-			texelSize = 4;
-			packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
-			             SIMD::Int(0xFF)) |
-			            ((SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
-			              SIMD::Int(0xFF))
-			             << 8) |
-			            ((SIMD::Int(Round(Min(Max(texel.Float(2), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
-			              SIMD::Int(0xFF))
-			             << 16) |
-			            ((SIMD::Int(Round(Min(Max(texel.Float(3), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
-			              SIMD::Int(0xFF))
-			             << 24);
-			break;
-		case spv::ImageFormatRgba8i:
-		case spv::ImageFormatRgba8ui:
-			texelSize = 4;
-			packed[0] = (SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xff))) |
-			            (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xff)) << 8) |
-			            (SIMD::UInt(texel.UInt(2) & SIMD::UInt(0xff)) << 16) |
-			            (SIMD::UInt(texel.UInt(3) & SIMD::UInt(0xff)) << 24);
-			break;
-		case spv::ImageFormatRgba16f:
-			texelSize = 8;
-			packed[0] = floatToHalfBits(texel.UInt(0), false) | floatToHalfBits(texel.UInt(1), true);
-			packed[1] = floatToHalfBits(texel.UInt(2), false) | floatToHalfBits(texel.UInt(3), true);
-			break;
-		case spv::ImageFormatRgba16i:
-		case spv::ImageFormatRgba16ui:
-			texelSize = 8;
-			packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFFFF)) | (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xFFFF)) << 16);
-			packed[1] = SIMD::UInt(texel.UInt(2) & SIMD::UInt(0xFFFF)) | (SIMD::UInt(texel.UInt(3) & SIMD::UInt(0xFFFF)) << 16);
-			break;
-		case spv::ImageFormatRg32f:
-		case spv::ImageFormatRg32i:
-		case spv::ImageFormatRg32ui:
-			texelSize = 8;
-			packed[0] = texel.Int(0);
-			packed[1] = texel.Int(1);
-			break;
-		case spv::ImageFormatRg16f:
-			texelSize = 4;
-			packed[0] = floatToHalfBits(texel.UInt(0), false) | floatToHalfBits(texel.UInt(1), true);
-			break;
-		case spv::ImageFormatRg16i:
-		case spv::ImageFormatRg16ui:
-			texelSize = 4;
-			packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFFFF)) | (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xFFFF)) << 16);
-			break;
-		case spv::ImageFormatR11fG11fB10f:
-			texelSize = 4;
-			// Truncates instead of rounding. See b/147900455
-			packed[0] = ((floatToHalfBits(As<SIMD::UInt>(Max(texel.Float(0), SIMD::Float(0.0f))), false) & SIMD::UInt(0x7FF0)) >> 4) |
-			            ((floatToHalfBits(As<SIMD::UInt>(Max(texel.Float(1), SIMD::Float(0.0f))), false) & SIMD::UInt(0x7FF0)) << 7) |
-			            ((floatToHalfBits(As<SIMD::UInt>(Max(texel.Float(2), SIMD::Float(0.0f))), false) & SIMD::UInt(0x7FE0)) << 17);
-			break;
-		case spv::ImageFormatR16f:
-			texelSize = 2;
-			packed[0] = floatToHalfBits(texel.UInt(0), false);
-			break;
-		case spv::ImageFormatRgba16:
-			texelSize = 8;
-			packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) |
-			            (SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) << 16);
-			packed[1] = SIMD::UInt(Round(Min(Max(texel.Float(2), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) |
-			            (SIMD::UInt(Round(Min(Max(texel.Float(3), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) << 16);
-			break;
-		case spv::ImageFormatRgb10A2:
-			texelSize = 4;
-			packed[0] = (SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3FF)))) |
-			            ((SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3FF)))) << 10) |
-			            ((SIMD::UInt(Round(Min(Max(texel.Float(2), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3FF)))) << 20) |
-			            ((SIMD::UInt(Round(Min(Max(texel.Float(3), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3)))) << 30);
-			break;
-		case spv::ImageFormatRg16:
-			texelSize = 4;
-			packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) |
-			            (SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) << 16);
-			break;
-		case spv::ImageFormatRg8:
-			texelSize = 2;
-			packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFF))) |
-			            (SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFF))) << 8);
-			break;
-		case spv::ImageFormatR16:
-			texelSize = 2;
-			packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF)));
-			break;
-		case spv::ImageFormatR8:
-			texelSize = 1;
-			packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFF)));
-			break;
-		case spv::ImageFormatRgba16Snorm:
-			texelSize = 8;
-			packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) & SIMD::Int(0xFFFF)) |
-			            (SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) << 16);
-			packed[1] = (SIMD::Int(Round(Min(Max(texel.Float(2), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) & SIMD::Int(0xFFFF)) |
-			            (SIMD::Int(Round(Min(Max(texel.Float(3), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) << 16);
-			break;
-		case spv::ImageFormatRg16Snorm:
-			texelSize = 4;
-			packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) & SIMD::Int(0xFFFF)) |
-			            (SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) << 16);
-			break;
-		case spv::ImageFormatRg8Snorm:
-			texelSize = 2;
-			packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7F))) & SIMD::Int(0xFF)) |
-			            (SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7F))) << 8);
-			break;
-		case spv::ImageFormatR16Snorm:
-			texelSize = 2;
-			packed[0] = SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF)));
-			break;
-		case spv::ImageFormatR8Snorm:
-			texelSize = 1;
-			packed[0] = SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7F)));
-			break;
-		case spv::ImageFormatRg8i:
-		case spv::ImageFormatRg8ui:
-			texelSize = 2;
-			packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFF)) | (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xFF)) << 8);
-			break;
-		case spv::ImageFormatR16i:
-		case spv::ImageFormatR16ui:
-			texelSize = 2;
-			packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFFFF));
-			break;
-		case spv::ImageFormatR8i:
-		case spv::ImageFormatR8ui:
-			texelSize = 1;
-			packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFF));
-			break;
-		case spv::ImageFormatRgb10a2ui:
-			texelSize = 4;
-			packed[0] = (SIMD::UInt(texel.UInt(0) & SIMD::UInt(0x3FF))) |
-			            (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0x3FF)) << 10) |
-			            (SIMD::UInt(texel.UInt(2) & SIMD::UInt(0x3FF)) << 20) |
-			            (SIMD::UInt(texel.UInt(3) & SIMD::UInt(0x3)) << 30);
-			break;
-		default:
-			UNSUPPORTED("spv::ImageFormat %d", int(format));
-			break;
+	case spv::ImageFormatRgba32f:
+	case spv::ImageFormatRgba32i:
+	case spv::ImageFormatRgba32ui:
+		texelSize = 16;
+		packed[0] = texel.Int(0);
+		packed[1] = texel.Int(1);
+		packed[2] = texel.Int(2);
+		packed[3] = texel.Int(3);
+		break;
+	case spv::ImageFormatR32f:
+	case spv::ImageFormatR32i:
+	case spv::ImageFormatR32ui:
+		texelSize = 4;
+		packed[0] = texel.Int(0);
+		break;
+	case spv::ImageFormatRgba8:
+		texelSize = 4;
+		packed[0] = (SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) |
+		            ((SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 8) |
+		            ((SIMD::UInt(Round(Min(Max(texel.Float(2), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 16) |
+		            ((SIMD::UInt(Round(Min(Max(texel.Float(3), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(255.0f)))) << 24);
+		break;
+	case spv::ImageFormatRgba8Snorm:
+		texelSize = 4;
+		packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
+		             SIMD::Int(0xFF)) |
+		            ((SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
+		              SIMD::Int(0xFF))
+		             << 8) |
+		            ((SIMD::Int(Round(Min(Max(texel.Float(2), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
+		              SIMD::Int(0xFF))
+		             << 16) |
+		            ((SIMD::Int(Round(Min(Max(texel.Float(3), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(127.0f))) &
+		              SIMD::Int(0xFF))
+		             << 24);
+		break;
+	case spv::ImageFormatRgba8i:
+	case spv::ImageFormatRgba8ui:
+		texelSize = 4;
+		packed[0] = (SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xff))) |
+		            (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xff)) << 8) |
+		            (SIMD::UInt(texel.UInt(2) & SIMD::UInt(0xff)) << 16) |
+		            (SIMD::UInt(texel.UInt(3) & SIMD::UInt(0xff)) << 24);
+		break;
+	case spv::ImageFormatRgba16f:
+		texelSize = 8;
+		packed[0] = floatToHalfBits(texel.UInt(0), false) | floatToHalfBits(texel.UInt(1), true);
+		packed[1] = floatToHalfBits(texel.UInt(2), false) | floatToHalfBits(texel.UInt(3), true);
+		break;
+	case spv::ImageFormatRgba16i:
+	case spv::ImageFormatRgba16ui:
+		texelSize = 8;
+		packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFFFF)) | (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xFFFF)) << 16);
+		packed[1] = SIMD::UInt(texel.UInt(2) & SIMD::UInt(0xFFFF)) | (SIMD::UInt(texel.UInt(3) & SIMD::UInt(0xFFFF)) << 16);
+		break;
+	case spv::ImageFormatRg32f:
+	case spv::ImageFormatRg32i:
+	case spv::ImageFormatRg32ui:
+		texelSize = 8;
+		packed[0] = texel.Int(0);
+		packed[1] = texel.Int(1);
+		break;
+	case spv::ImageFormatRg16f:
+		texelSize = 4;
+		packed[0] = floatToHalfBits(texel.UInt(0), false) | floatToHalfBits(texel.UInt(1), true);
+		break;
+	case spv::ImageFormatRg16i:
+	case spv::ImageFormatRg16ui:
+		texelSize = 4;
+		packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFFFF)) | (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xFFFF)) << 16);
+		break;
+	case spv::ImageFormatR11fG11fB10f:
+		texelSize = 4;
+		// Truncates instead of rounding. See b/147900455
+		packed[0] = ((floatToHalfBits(As<SIMD::UInt>(Max(texel.Float(0), SIMD::Float(0.0f))), false) & SIMD::UInt(0x7FF0)) >> 4) |
+		            ((floatToHalfBits(As<SIMD::UInt>(Max(texel.Float(1), SIMD::Float(0.0f))), false) & SIMD::UInt(0x7FF0)) << 7) |
+		            ((floatToHalfBits(As<SIMD::UInt>(Max(texel.Float(2), SIMD::Float(0.0f))), false) & SIMD::UInt(0x7FE0)) << 17);
+		break;
+	case spv::ImageFormatR16f:
+		texelSize = 2;
+		packed[0] = floatToHalfBits(texel.UInt(0), false);
+		break;
+	case spv::ImageFormatRgba16:
+		texelSize = 8;
+		packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) |
+		            (SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) << 16);
+		packed[1] = SIMD::UInt(Round(Min(Max(texel.Float(2), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) |
+		            (SIMD::UInt(Round(Min(Max(texel.Float(3), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) << 16);
+		break;
+	case spv::ImageFormatRgb10A2:
+		texelSize = 4;
+		packed[0] = (SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3FF)))) |
+		            ((SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3FF)))) << 10) |
+		            ((SIMD::UInt(Round(Min(Max(texel.Float(2), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3FF)))) << 20) |
+		            ((SIMD::UInt(Round(Min(Max(texel.Float(3), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x3)))) << 30);
+		break;
+	case spv::ImageFormatRg16:
+		texelSize = 4;
+		packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) |
+		            (SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF))) << 16);
+		break;
+	case spv::ImageFormatRg8:
+		texelSize = 2;
+		packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFF))) |
+		            (SIMD::UInt(Round(Min(Max(texel.Float(1), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFF))) << 8);
+		break;
+	case spv::ImageFormatR16:
+		texelSize = 2;
+		packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFFFF)));
+		break;
+	case spv::ImageFormatR8:
+		texelSize = 1;
+		packed[0] = SIMD::UInt(Round(Min(Max(texel.Float(0), SIMD::Float(0.0f)), SIMD::Float(1.0f)) * SIMD::Float(0xFF)));
+		break;
+	case spv::ImageFormatRgba16Snorm:
+		texelSize = 8;
+		packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) & SIMD::Int(0xFFFF)) |
+		            (SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) << 16);
+		packed[1] = (SIMD::Int(Round(Min(Max(texel.Float(2), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) & SIMD::Int(0xFFFF)) |
+		            (SIMD::Int(Round(Min(Max(texel.Float(3), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) << 16);
+		break;
+	case spv::ImageFormatRg16Snorm:
+		texelSize = 4;
+		packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) & SIMD::Int(0xFFFF)) |
+		            (SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF))) << 16);
+		break;
+	case spv::ImageFormatRg8Snorm:
+		texelSize = 2;
+		packed[0] = (SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7F))) & SIMD::Int(0xFF)) |
+		            (SIMD::Int(Round(Min(Max(texel.Float(1), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7F))) << 8);
+		break;
+	case spv::ImageFormatR16Snorm:
+		texelSize = 2;
+		packed[0] = SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7FFF)));
+		break;
+	case spv::ImageFormatR8Snorm:
+		texelSize = 1;
+		packed[0] = SIMD::Int(Round(Min(Max(texel.Float(0), SIMD::Float(-1.0f)), SIMD::Float(1.0f)) * SIMD::Float(0x7F)));
+		break;
+	case spv::ImageFormatRg8i:
+	case spv::ImageFormatRg8ui:
+		texelSize = 2;
+		packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFF)) | (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0xFF)) << 8);
+		break;
+	case spv::ImageFormatR16i:
+	case spv::ImageFormatR16ui:
+		texelSize = 2;
+		packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFFFF));
+		break;
+	case spv::ImageFormatR8i:
+	case spv::ImageFormatR8ui:
+		texelSize = 1;
+		packed[0] = SIMD::UInt(texel.UInt(0) & SIMD::UInt(0xFF));
+		break;
+	case spv::ImageFormatRgb10a2ui:
+		texelSize = 4;
+		packed[0] = (SIMD::UInt(texel.UInt(0) & SIMD::UInt(0x3FF))) |
+		            (SIMD::UInt(texel.UInt(1) & SIMD::UInt(0x3FF)) << 10) |
+		            (SIMD::UInt(texel.UInt(2) & SIMD::UInt(0x3FF)) << 20) |
+		            (SIMD::UInt(texel.UInt(3) & SIMD::UInt(0x3)) << 30);
+		break;
+	default:
+		UNSUPPORTED("spv::ImageFormat %d", int(format));
+		break;
 	}
 
 	// "The integer texel coordinates are validated according to the same rules as for texel input coordinate
diff --git a/src/Pipeline/SpirvShaderInstructions.cpp b/src/Pipeline/SpirvShaderInstructions.cpp
index 02ce0d2..fd787a5 100644
--- a/src/Pipeline/SpirvShaderInstructions.cpp
+++ b/src/Pipeline/SpirvShaderInstructions.cpp
@@ -39,7 +39,7 @@
 		return;
 #include "SpirvShaderInstructions.inl"
 #undef DECORATE_OP
-		case spv::OpMax: return;
+	case spv::OpMax: return;
 	}
 }
 
@@ -67,8 +67,8 @@
 #undef DECORATE_OP
 		return true;
 
-		default:
-			return false;
+	default:
+		return false;
 	}
 }
 
diff --git a/src/Pipeline/SpirvShaderMemory.cpp b/src/Pipeline/SpirvShaderMemory.cpp
index 6722b0b..2fb1e67 100644
--- a/src/Pipeline/SpirvShaderMemory.cpp
+++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -126,26 +126,26 @@
 
 	switch(objectTy.storageClass)
 	{
-		case spv::StorageClassOutput:
-		case spv::StorageClassPrivate:
-		case spv::StorageClassFunction:
+	case spv::StorageClassOutput:
+	case spv::StorageClassPrivate:
+	case spv::StorageClassFunction:
 		{
 			ASSERT(objectTy.opcode() == spv::OpTypePointer);
 			auto base = &routine->getVariable(resultId)[0];
 			auto elementTy = getType(objectTy.element);
 			auto size = elementTy.componentCount * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
 			state->createPointer(resultId, SIMD::Pointer(base, size));
-			break;
 		}
-		case spv::StorageClassWorkgroup:
+		break;
+	case spv::StorageClassWorkgroup:
 		{
 			ASSERT(objectTy.opcode() == spv::OpTypePointer);
 			auto base = &routine->workgroupMemory[0];
 			auto size = workgroupMemory.size();
 			state->createPointer(resultId, SIMD::Pointer(base, size, workgroupMemory.offsetOf(resultId)));
-			break;
 		}
-		case spv::StorageClassInput:
+		break;
+	case spv::StorageClassInput:
 		{
 			if(object.kind == Object::Kind::InterfaceVariable)
 			{
@@ -162,9 +162,9 @@
 			auto elementTy = getType(objectTy.element);
 			auto size = elementTy.componentCount * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
 			state->createPointer(resultId, SIMD::Pointer(base, size));
-			break;
 		}
-		case spv::StorageClassUniformConstant:
+		break;
+	case spv::StorageClassUniformConstant:
 		{
 			const auto &d = descriptorDecorations.at(resultId);
 			ASSERT(d.DescriptorSet >= 0);
@@ -175,10 +175,10 @@
 			Pointer<Byte> binding = Pointer<Byte>(set + bindingOffset);    // vk::SampledImageDescriptor*
 			auto size = 0;                                                 // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
 			state->createPointer(resultId, SIMD::Pointer(binding, size));
-			break;
 		}
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
+		break;
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
 		{
 			const auto &d = descriptorDecorations.at(resultId);
 			ASSERT(d.DescriptorSet >= 0);
@@ -194,16 +194,16 @@
 			{
 				state->createPointer(resultId, SIMD::Pointer(nullptr, 0));
 			}
-			break;
 		}
-		case spv::StorageClassPushConstant:
+		break;
+	case spv::StorageClassPushConstant:
 		{
 			state->createPointer(resultId, SIMD::Pointer(routine->pushConstants, vk::MAX_PUSH_CONSTANT_SIZE));
-			break;
 		}
-		default:
-			UNREACHABLE("Storage class %d", objectTy.storageClass);
-			break;
+		break;
+	default:
+		UNREACHABLE("Storage class %d", objectTy.storageClass);
+		break;
 	}
 
 	if(insn.wordCount() > 4)
@@ -216,9 +216,9 @@
 
 		switch(objectTy.storageClass)
 		{
-			case spv::StorageClassOutput:
-			case spv::StorageClassPrivate:
-			case spv::StorageClassFunction:
+		case spv::StorageClassOutput:
+		case spv::StorageClassPrivate:
+		case spv::StorageClassFunction:
 			{
 				bool interleavedByLane = IsStorageInterleavedByLane(objectTy.storageClass);
 				auto ptr = GetPointerToData(resultId, 0, state);
@@ -229,10 +229,10 @@
 					auto robustness = OutOfBoundsBehavior::UndefinedBehavior;  // Local variables are always within bounds.
 					p.Store(initialValue.Float(el.index), robustness, state->activeLaneMask());
 				});
-				break;
 			}
-			default:
-				ASSERT_MSG(initializerId == 0, "Vulkan does not permit variables of storage class %d to have initializers", int(objectTy.storageClass));
+			break;
+		default:
+			ASSERT_MSG(initializerId == 0, "Vulkan does not permit variables of storage class %d to have initializers", int(objectTy.storageClass));
 		}
 	}
 
@@ -298,24 +298,24 @@
 
 	switch(type.opcode())
 	{
-		case spv::OpTypePointer:
-			VisitMemoryObjectInner(type.definition.word(3), d, index, offset, f);
-			break;
-		case spv::OpTypeInt:
-		case spv::OpTypeFloat:
-		case spv::OpTypeRuntimeArray:
-			f(MemoryElement{ index++, offset, type });
-			break;
-		case spv::OpTypeVector:
+	case spv::OpTypePointer:
+		VisitMemoryObjectInner(type.definition.word(3), d, index, offset, f);
+		break;
+	case spv::OpTypeInt:
+	case spv::OpTypeFloat:
+	case spv::OpTypeRuntimeArray:
+		f(MemoryElement{ index++, offset, type });
+		break;
+	case spv::OpTypeVector:
 		{
 			auto elemStride = (d.InsideMatrix && d.HasRowMajor && d.RowMajor) ? d.MatrixStride : static_cast<int32_t>(sizeof(float));
 			for(auto i = 0u; i < type.definition.word(3); i++)
 			{
 				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + elemStride * i, f);
 			}
-			break;
 		}
-		case spv::OpTypeMatrix:
+		break;
+	case spv::OpTypeMatrix:
 		{
 			auto columnStride = (d.HasRowMajor && d.RowMajor) ? static_cast<int32_t>(sizeof(float)) : d.MatrixStride;
 			d.InsideMatrix = true;
@@ -324,16 +324,16 @@
 				ASSERT(d.HasMatrixStride);
 				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + columnStride * i, f);
 			}
-			break;
 		}
-		case spv::OpTypeStruct:
-			for(auto i = 0u; i < type.definition.wordCount() - 2; i++)
-			{
-				ApplyDecorationsForIdMember(&d, id, i);
-				VisitMemoryObjectInner(type.definition.word(i + 2), d, index, offset, f);
-			}
-			break;
-		case spv::OpTypeArray:
+		break;
+	case spv::OpTypeStruct:
+		for(auto i = 0u; i < type.definition.wordCount() - 2; i++)
+		{
+			ApplyDecorationsForIdMember(&d, id, i);
+			VisitMemoryObjectInner(type.definition.word(i + 2), d, index, offset, f);
+		}
+		break;
+	case spv::OpTypeArray:
 		{
 			auto arraySize = GetConstScalarInt(type.definition.word(3));
 			for(auto i = 0u; i < arraySize; i++)
@@ -341,10 +341,10 @@
 				ASSERT(d.HasArrayStride);
 				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + i * d.ArrayStride, f);
 			}
-			break;
 		}
-		default:
-			UNREACHABLE("%s", OpcodeName(type.opcode()));
+		break;
+	default:
+		UNREACHABLE("%s", OpcodeName(type.opcode()));
 	}
 }
 
@@ -378,11 +378,11 @@
 	auto &object = getObject(id);
 	switch(object.kind)
 	{
-		case Object::Kind::Pointer:
-		case Object::Kind::InterfaceVariable:
-			return state->getPointer(id);
+	case Object::Kind::Pointer:
+	case Object::Kind::InterfaceVariable:
+		return state->getPointer(id);
 
-		case Object::Kind::DescriptorSet:
+	case Object::Kind::DescriptorSet:
 		{
 			const auto &d = descriptorDecorations.at(id);
 			ASSERT(d.DescriptorSet >= 0 && d.DescriptorSet < vk::MAX_BOUND_DESCRIPTOR_SETS);
@@ -414,9 +414,9 @@
 			}
 		}
 
-		default:
-			UNREACHABLE("Invalid pointer kind %d", int(object.kind));
-			return SIMD::Pointer(Pointer<Byte>(), 0);
+	default:
+		UNREACHABLE("Invalid pointer kind %d", int(object.kind));
+		return SIMD::Pointer(Pointer<Byte>(), 0);
 	}
 }
 
@@ -429,16 +429,16 @@
 	                                                            spv::MemorySemanticsSequentiallyConsistentMask);
 	switch(control)
 	{
-		case spv::MemorySemanticsMaskNone: return std::memory_order_relaxed;
-		case spv::MemorySemanticsAcquireMask: return std::memory_order_acquire;
-		case spv::MemorySemanticsReleaseMask: return std::memory_order_release;
-		case spv::MemorySemanticsAcquireReleaseMask: return std::memory_order_acq_rel;
-		case spv::MemorySemanticsSequentiallyConsistentMask: return std::memory_order_acq_rel;  // Vulkan 1.1: "SequentiallyConsistent is treated as AcquireRelease"
-		default:
-			// "it is invalid for more than one of these four bits to be set:
-			// Acquire, Release, AcquireRelease, or SequentiallyConsistent."
-			UNREACHABLE("MemorySemanticsMask: %x", int(control));
-			return std::memory_order_acq_rel;
+	case spv::MemorySemanticsMaskNone: return std::memory_order_relaxed;
+	case spv::MemorySemanticsAcquireMask: return std::memory_order_acquire;
+	case spv::MemorySemanticsReleaseMask: return std::memory_order_release;
+	case spv::MemorySemanticsAcquireReleaseMask: return std::memory_order_acq_rel;
+	case spv::MemorySemanticsSequentiallyConsistentMask: return std::memory_order_acq_rel;  // Vulkan 1.1: "SequentiallyConsistent is treated as AcquireRelease"
+	default:
+		// "it is invalid for more than one of these four bits to be set:
+		// Acquire, Release, AcquireRelease, or SequentiallyConsistent."
+		UNREACHABLE("MemorySemanticsMask: %x", int(control));
+		return std::memory_order_acq_rel;
 	}
 }
 
@@ -446,12 +446,12 @@
 {
 	switch(storageClass)
 	{
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-		case spv::StorageClassImage:
-			return false;
-		default:
-			return true;
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+	case spv::StorageClassImage:
+		return false;
+	default:
+		return true;
 	}
 }
 
@@ -459,12 +459,12 @@
 {
 	switch(storageClass)
 	{
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-		case spv::StorageClassPushConstant:
-			return true;
-		default:
-			return false;
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+	case spv::StorageClassPushConstant:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -482,14 +482,14 @@
 {
 	switch(storageClass)
 	{
-		case spv::StorageClassUniform:
-		case spv::StorageClassStorageBuffer:
-		case spv::StorageClassPushConstant:
-		case spv::StorageClassWorkgroup:
-		case spv::StorageClassImage:
-			return false;
-		default:
-			return true;
+	case spv::StorageClassUniform:
+	case spv::StorageClassStorageBuffer:
+	case spv::StorageClassPushConstant:
+	case spv::StorageClassWorkgroup:
+	case spv::StorageClassImage:
+		return false;
+	default:
+		return true;
 	}
 }
 
diff --git a/src/Pipeline/SpirvShaderSampling.cpp b/src/Pipeline/SpirvShaderSampling.cpp
index 75dd51c..497397f 100644
--- a/src/Pipeline/SpirvShaderSampling.cpp
+++ b/src/Pipeline/SpirvShaderSampling.cpp
@@ -222,28 +222,28 @@
 
 	switch(sampler->magFilter)
 	{
-		case VK_FILTER_NEAREST:
-			switch(sampler->minFilter)
-			{
-				case VK_FILTER_NEAREST: return FILTER_POINT;
-				case VK_FILTER_LINEAR: return FILTER_MIN_LINEAR_MAG_POINT;
-				default:
-					UNSUPPORTED("minFilter %d", sampler->minFilter);
-					return FILTER_POINT;
-			}
-			break;
-		case VK_FILTER_LINEAR:
-			switch(sampler->minFilter)
-			{
-				case VK_FILTER_NEAREST: return FILTER_MIN_POINT_MAG_LINEAR;
-				case VK_FILTER_LINEAR: return FILTER_LINEAR;
-				default:
-					UNSUPPORTED("minFilter %d", sampler->minFilter);
-					return FILTER_POINT;
-			}
-			break;
+	case VK_FILTER_NEAREST:
+		switch(sampler->minFilter)
+		{
+		case VK_FILTER_NEAREST: return FILTER_POINT;
+		case VK_FILTER_LINEAR: return FILTER_MIN_LINEAR_MAG_POINT;
 		default:
-			break;
+			UNSUPPORTED("minFilter %d", sampler->minFilter);
+			return FILTER_POINT;
+		}
+		break;
+	case VK_FILTER_LINEAR:
+		switch(sampler->minFilter)
+		{
+		case VK_FILTER_NEAREST: return FILTER_MIN_POINT_MAG_LINEAR;
+		case VK_FILTER_LINEAR: return FILTER_LINEAR;
+		default:
+			UNSUPPORTED("minFilter %d", sampler->minFilter);
+			return FILTER_POINT;
+		}
+		break;
+	default:
+		break;
 	}
 
 	UNSUPPORTED("magFilter %d", sampler->magFilter);
@@ -265,11 +265,11 @@
 
 	switch(sampler->mipmapMode)
 	{
-		case VK_SAMPLER_MIPMAP_MODE_NEAREST: return MIPMAP_POINT;
-		case VK_SAMPLER_MIPMAP_MODE_LINEAR: return MIPMAP_LINEAR;
-		default:
-			UNSUPPORTED("mipmapMode %d", sampler->mipmapMode);
-			return MIPMAP_POINT;
+	case VK_SAMPLER_MIPMAP_MODE_NEAREST: return MIPMAP_POINT;
+	case VK_SAMPLER_MIPMAP_MODE_LINEAR: return MIPMAP_LINEAR;
+	default:
+		UNSUPPORTED("mipmapMode %d", sampler->mipmapMode);
+		return MIPMAP_POINT;
 	}
 }
 
@@ -277,45 +277,45 @@
 {
 	switch(imageViewType)
 	{
-		case VK_IMAGE_VIEW_TYPE_1D:
-		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-			if(coordinateIndex >= 1)
-			{
-				return ADDRESSING_UNUSED;
-			}
-			break;
-		case VK_IMAGE_VIEW_TYPE_2D:
-		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-			if(coordinateIndex == 2)
-			{
-				return ADDRESSING_UNUSED;
-			}
-			break;
+	case VK_IMAGE_VIEW_TYPE_1D:
+	case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+		if(coordinateIndex >= 1)
+		{
+			return ADDRESSING_UNUSED;
+		}
+		break;
+	case VK_IMAGE_VIEW_TYPE_2D:
+	case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+		if(coordinateIndex == 2)
+		{
+			return ADDRESSING_UNUSED;
+		}
+		break;
 
-		case VK_IMAGE_VIEW_TYPE_3D:
-			break;
+	case VK_IMAGE_VIEW_TYPE_3D:
+		break;
 
-		case VK_IMAGE_VIEW_TYPE_CUBE:
-		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-			if(coordinateIndex <= 1)  // Cube faces themselves are addressed as 2D images.
-			{
-				// Vulkan 1.1 spec:
-				// "Cube images ignore the wrap modes specified in the sampler. Instead, if VK_FILTER_NEAREST is used within a mip level then
-				//  VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE is used, and if VK_FILTER_LINEAR is used within a mip level then sampling at the edges
-				//  is performed as described earlier in the Cube map edge handling section."
-				// This corresponds with our 'SEAMLESS' addressing mode.
-				return ADDRESSING_SEAMLESS;
-			}
-			else  // coordinateIndex == 2
-			{
-				// The cube face is an index into 2D array layers.
-				return ADDRESSING_CUBEFACE;
-			}
-			break;
+	case VK_IMAGE_VIEW_TYPE_CUBE:
+	case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+		if(coordinateIndex <= 1)  // Cube faces themselves are addressed as 2D images.
+		{
+			// Vulkan 1.1 spec:
+			// "Cube images ignore the wrap modes specified in the sampler. Instead, if VK_FILTER_NEAREST is used within a mip level then
+			//  VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE is used, and if VK_FILTER_LINEAR is used within a mip level then sampling at the edges
+			//  is performed as described earlier in the Cube map edge handling section."
+			// This corresponds with our 'SEAMLESS' addressing mode.
+			return ADDRESSING_SEAMLESS;
+		}
+		else  // coordinateIndex == 2
+		{
+			// The cube face is an index into 2D array layers.
+			return ADDRESSING_CUBEFACE;
+		}
+		break;
 
-		default:
-			UNSUPPORTED("imageViewType %d", imageViewType);
-			return ADDRESSING_WRAP;
+	default:
+		UNSUPPORTED("imageViewType %d", imageViewType);
+		return ADDRESSING_WRAP;
 	}
 
 	if(!sampler)
@@ -336,22 +336,22 @@
 	VkSamplerAddressMode addressMode = VK_SAMPLER_ADDRESS_MODE_REPEAT;
 	switch(coordinateIndex)
 	{
-		case 0: addressMode = sampler->addressModeU; break;
-		case 1: addressMode = sampler->addressModeV; break;
-		case 2: addressMode = sampler->addressModeW; break;
-		default: UNSUPPORTED("coordinateIndex: %d", coordinateIndex);
+	case 0: addressMode = sampler->addressModeU; break;
+	case 1: addressMode = sampler->addressModeV; break;
+	case 2: addressMode = sampler->addressModeW; break;
+	default: UNSUPPORTED("coordinateIndex: %d", coordinateIndex);
 	}
 
 	switch(addressMode)
 	{
-		case VK_SAMPLER_ADDRESS_MODE_REPEAT: return ADDRESSING_WRAP;
-		case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT: return ADDRESSING_MIRROR;
-		case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE: return ADDRESSING_CLAMP;
-		case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER: return ADDRESSING_BORDER;
-		case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE: return ADDRESSING_MIRRORONCE;
-		default:
-			UNSUPPORTED("addressMode %d", addressMode);
-			return ADDRESSING_WRAP;
+	case VK_SAMPLER_ADDRESS_MODE_REPEAT: return ADDRESSING_WRAP;
+	case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT: return ADDRESSING_MIRROR;
+	case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE: return ADDRESSING_CLAMP;
+	case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER: return ADDRESSING_BORDER;
+	case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE: return ADDRESSING_MIRRORONCE;
+	default:
+		UNSUPPORTED("addressMode %d", addressMode);
+		return ADDRESSING_WRAP;
 	}
 }
 
diff --git a/src/Pipeline/SpirvShaderSpec.cpp b/src/Pipeline/SpirvShaderSpec.cpp
index 9225e9d..7da7396 100644
--- a/src/Pipeline/SpirvShaderSpec.cpp
+++ b/src/Pipeline/SpirvShaderSpec.cpp
@@ -24,48 +24,48 @@
 
 	switch(opcode)
 	{
-		case spv::OpIAdd:
-		case spv::OpISub:
-		case spv::OpIMul:
-		case spv::OpUDiv:
-		case spv::OpSDiv:
-		case spv::OpUMod:
-		case spv::OpSMod:
-		case spv::OpSRem:
-		case spv::OpShiftRightLogical:
-		case spv::OpShiftRightArithmetic:
-		case spv::OpShiftLeftLogical:
-		case spv::OpBitwiseOr:
-		case spv::OpLogicalOr:
-		case spv::OpBitwiseAnd:
-		case spv::OpLogicalAnd:
-		case spv::OpBitwiseXor:
-		case spv::OpLogicalEqual:
-		case spv::OpIEqual:
-		case spv::OpLogicalNotEqual:
-		case spv::OpINotEqual:
-		case spv::OpULessThan:
-		case spv::OpSLessThan:
-		case spv::OpUGreaterThan:
-		case spv::OpSGreaterThan:
-		case spv::OpULessThanEqual:
-		case spv::OpSLessThanEqual:
-		case spv::OpUGreaterThanEqual:
-		case spv::OpSGreaterThanEqual:
-			EvalSpecConstantBinaryOp(insn);
-			break;
+	case spv::OpIAdd:
+	case spv::OpISub:
+	case spv::OpIMul:
+	case spv::OpUDiv:
+	case spv::OpSDiv:
+	case spv::OpUMod:
+	case spv::OpSMod:
+	case spv::OpSRem:
+	case spv::OpShiftRightLogical:
+	case spv::OpShiftRightArithmetic:
+	case spv::OpShiftLeftLogical:
+	case spv::OpBitwiseOr:
+	case spv::OpLogicalOr:
+	case spv::OpBitwiseAnd:
+	case spv::OpLogicalAnd:
+	case spv::OpBitwiseXor:
+	case spv::OpLogicalEqual:
+	case spv::OpIEqual:
+	case spv::OpLogicalNotEqual:
+	case spv::OpINotEqual:
+	case spv::OpULessThan:
+	case spv::OpSLessThan:
+	case spv::OpUGreaterThan:
+	case spv::OpSGreaterThan:
+	case spv::OpULessThanEqual:
+	case spv::OpSLessThanEqual:
+	case spv::OpUGreaterThanEqual:
+	case spv::OpSGreaterThanEqual:
+		EvalSpecConstantBinaryOp(insn);
+		break;
 
-		case spv::OpSConvert:
-		case spv::OpFConvert:
-		case spv::OpUConvert:
-		case spv::OpSNegate:
-		case spv::OpNot:
-		case spv::OpLogicalNot:
-		case spv::OpQuantizeToF16:
-			EvalSpecConstantUnaryOp(insn);
-			break;
+	case spv::OpSConvert:
+	case spv::OpFConvert:
+	case spv::OpUConvert:
+	case spv::OpSNegate:
+	case spv::OpNot:
+	case spv::OpLogicalNot:
+	case spv::OpQuantizeToF16:
+		EvalSpecConstantUnaryOp(insn);
+		break;
 
-		case spv::OpSelect:
+	case spv::OpSelect:
 		{
 			auto &result = CreateConstant(insn);
 			auto const &cond = getObject(insn.word(4));
@@ -81,7 +81,7 @@
 			break;
 		}
 
-		case spv::OpCompositeExtract:
+	case spv::OpCompositeExtract:
 		{
 			auto &result = CreateConstant(insn);
 			auto const &compositeObject = getObject(insn.word(4));
@@ -94,7 +94,7 @@
 			break;
 		}
 
-		case spv::OpCompositeInsert:
+	case spv::OpCompositeInsert:
 		{
 			auto &result = CreateConstant(insn);
 			auto const &newPart = getObject(insn.word(4));
@@ -119,7 +119,7 @@
 			break;
 		}
 
-		case spv::OpVectorShuffle:
+	case spv::OpVectorShuffle:
 		{
 			auto &result = CreateConstant(insn);
 			auto const &firstHalf = getObject(insn.word(4));
@@ -145,11 +145,11 @@
 			break;
 		}
 
-		default:
-			// Other spec constant ops are possible, but require capabilities that are
-			// not exposed in our Vulkan implementation (eg Kernel), so we should never
-			// get here for correct shaders.
-			UNSUPPORTED("EvalSpecConstantOp op: %s", OpcodeName(opcode));
+	default:
+		// Other spec constant ops are possible, but require capabilities that are
+		// not exposed in our Vulkan implementation (eg Kernel), so we should never
+		// get here for correct shaders.
+		UNSUPPORTED("EvalSpecConstantOp op: %s", OpcodeName(opcode));
 	}
 }
 
@@ -168,21 +168,21 @@
 
 		switch(opcode)
 		{
-			case spv::OpSConvert:
-			case spv::OpFConvert:
-			case spv::OpUConvert:
-				UNREACHABLE("Not possible until we have multiple bit widths");
-				break;
+		case spv::OpSConvert:
+		case spv::OpFConvert:
+		case spv::OpUConvert:
+			UNREACHABLE("Not possible until we have multiple bit widths");
+			break;
 
-			case spv::OpSNegate:
-				v = -(int)l;
-				break;
-			case spv::OpNot:
-			case spv::OpLogicalNot:
-				v = ~l;
-				break;
+		case spv::OpSNegate:
+			v = -(int)l;
+			break;
+		case spv::OpNot:
+		case spv::OpLogicalNot:
+			v = ~l;
+			break;
 
-			case spv::OpQuantizeToF16:
+		case spv::OpQuantizeToF16:
 			{
 				// Can do this nicer with host code, but want to perfectly mirror the reactor code we emit.
 				auto abs = bit_cast<float>(l & 0x7FFFFFFF);
@@ -195,10 +195,10 @@
 				v &= ~isZero | 0x80000000;
 				v = sign | (isInfOrNan & 0x7F800000) | (~isInfOrNan & v);
 				v |= isNaN & 0x400000;
-				break;
 			}
-			default:
-				UNREACHABLE("EvalSpecConstantUnaryOp op: %s", OpcodeName(opcode));
+			break;
+		default:
+			UNREACHABLE("EvalSpecConstantUnaryOp op: %s", OpcodeName(opcode));
 		}
 	}
 }
@@ -220,93 +220,93 @@
 
 		switch(opcode)
 		{
-			case spv::OpIAdd:
-				v = l + r;
-				break;
-			case spv::OpISub:
-				v = l - r;
-				break;
-			case spv::OpIMul:
-				v = l * r;
-				break;
-			case spv::OpUDiv:
-				v = (r == 0) ? 0 : l / r;
-				break;
-			case spv::OpUMod:
-				v = (r == 0) ? 0 : l % r;
-				break;
-			case spv::OpSDiv:
-				if(r == 0) r = UINT32_MAX;
-				if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
-				v = static_cast<int32_t>(l) / static_cast<int32_t>(r);
-				break;
-			case spv::OpSRem:
-				if(r == 0) r = UINT32_MAX;
-				if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
-				v = static_cast<int32_t>(l) % static_cast<int32_t>(r);
-				break;
-			case spv::OpSMod:
-				if(r == 0) r = UINT32_MAX;
-				if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
-				// Test if a signed-multiply would be negative.
-				v = static_cast<int32_t>(l) % static_cast<int32_t>(r);
-				if((v & 0x80000000) != (r & 0x80000000))
-					v += r;
-				break;
-			case spv::OpShiftRightLogical:
-				v = l >> r;
-				break;
-			case spv::OpShiftRightArithmetic:
-				v = static_cast<int32_t>(l) >> r;
-				break;
-			case spv::OpShiftLeftLogical:
-				v = l << r;
-				break;
-			case spv::OpBitwiseOr:
-			case spv::OpLogicalOr:
-				v = l | r;
-				break;
-			case spv::OpBitwiseAnd:
-			case spv::OpLogicalAnd:
-				v = l & r;
-				break;
-			case spv::OpBitwiseXor:
-				v = l ^ r;
-				break;
-			case spv::OpLogicalEqual:
-			case spv::OpIEqual:
-				v = (l == r) ? ~0u : 0u;
-				break;
-			case spv::OpLogicalNotEqual:
-			case spv::OpINotEqual:
-				v = (l != r) ? ~0u : 0u;
-				break;
-			case spv::OpULessThan:
-				v = l < r ? ~0u : 0u;
-				break;
-			case spv::OpSLessThan:
-				v = static_cast<int32_t>(l) < static_cast<int32_t>(r) ? ~0u : 0u;
-				break;
-			case spv::OpUGreaterThan:
-				v = l > r ? ~0u : 0u;
-				break;
-			case spv::OpSGreaterThan:
-				v = static_cast<int32_t>(l) > static_cast<int32_t>(r) ? ~0u : 0u;
-				break;
-			case spv::OpULessThanEqual:
-				v = l <= r ? ~0u : 0u;
-				break;
-			case spv::OpSLessThanEqual:
-				v = static_cast<int32_t>(l) <= static_cast<int32_t>(r) ? ~0u : 0u;
-				break;
-			case spv::OpUGreaterThanEqual:
-				v = l >= r ? ~0u : 0u;
-				break;
-			case spv::OpSGreaterThanEqual:
-				v = static_cast<int32_t>(l) >= static_cast<int32_t>(r) ? ~0u : 0u;
-				break;
-			default:
-				UNREACHABLE("EvalSpecConstantBinaryOp op: %s", OpcodeName(opcode));
+		case spv::OpIAdd:
+			v = l + r;
+			break;
+		case spv::OpISub:
+			v = l - r;
+			break;
+		case spv::OpIMul:
+			v = l * r;
+			break;
+		case spv::OpUDiv:
+			v = (r == 0) ? 0 : l / r;
+			break;
+		case spv::OpUMod:
+			v = (r == 0) ? 0 : l % r;
+			break;
+		case spv::OpSDiv:
+			if(r == 0) r = UINT32_MAX;
+			if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
+			v = static_cast<int32_t>(l) / static_cast<int32_t>(r);
+			break;
+		case spv::OpSRem:
+			if(r == 0) r = UINT32_MAX;
+			if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
+			v = static_cast<int32_t>(l) % static_cast<int32_t>(r);
+			break;
+		case spv::OpSMod:
+			if(r == 0) r = UINT32_MAX;
+			if(l == static_cast<uint32_t>(INT32_MIN)) l = UINT32_MAX;
+			// Test if a signed-multiply would be negative.
+			v = static_cast<int32_t>(l) % static_cast<int32_t>(r);
+			if((v & 0x80000000) != (r & 0x80000000))
+				v += r;
+			break;
+		case spv::OpShiftRightLogical:
+			v = l >> r;
+			break;
+		case spv::OpShiftRightArithmetic:
+			v = static_cast<int32_t>(l) >> r;
+			break;
+		case spv::OpShiftLeftLogical:
+			v = l << r;
+			break;
+		case spv::OpBitwiseOr:
+		case spv::OpLogicalOr:
+			v = l | r;
+			break;
+		case spv::OpBitwiseAnd:
+		case spv::OpLogicalAnd:
+			v = l & r;
+			break;
+		case spv::OpBitwiseXor:
+			v = l ^ r;
+			break;
+		case spv::OpLogicalEqual:
+		case spv::OpIEqual:
+			v = (l == r) ? ~0u : 0u;
+			break;
+		case spv::OpLogicalNotEqual:
+		case spv::OpINotEqual:
+			v = (l != r) ? ~0u : 0u;
+			break;
+		case spv::OpULessThan:
+			v = l < r ? ~0u : 0u;
+			break;
+		case spv::OpSLessThan:
+			v = static_cast<int32_t>(l) < static_cast<int32_t>(r) ? ~0u : 0u;
+			break;
+		case spv::OpUGreaterThan:
+			v = l > r ? ~0u : 0u;
+			break;
+		case spv::OpSGreaterThan:
+			v = static_cast<int32_t>(l) > static_cast<int32_t>(r) ? ~0u : 0u;
+			break;
+		case spv::OpULessThanEqual:
+			v = l <= r ? ~0u : 0u;
+			break;
+		case spv::OpSLessThanEqual:
+			v = static_cast<int32_t>(l) <= static_cast<int32_t>(r) ? ~0u : 0u;
+			break;
+		case spv::OpUGreaterThanEqual:
+			v = l >= r ? ~0u : 0u;
+			break;
+		case spv::OpSGreaterThanEqual:
+			v = static_cast<int32_t>(l) >= static_cast<int32_t>(r) ? ~0u : 0u;
+			break;
+		default:
+			UNREACHABLE("EvalSpecConstantBinaryOp op: %s", OpcodeName(opcode));
 		}
 	}
 }
diff --git a/src/Pipeline/VertexRoutine.cpp b/src/Pipeline/VertexRoutine.cpp
index 2d5167d..737d755 100644
--- a/src/Pipeline/VertexRoutine.cpp
+++ b/src/Pipeline/VertexRoutine.cpp
@@ -201,10 +201,10 @@
 
 	switch(stream.format)
 	{
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
 		{
 			if(componentCount == 0)
 			{
@@ -231,138 +231,138 @@
 			}
 		}
 		break;
-		case VK_FORMAT_B8G8R8A8_UNORM:
-			bgra = true;
-			// [[fallthrough]]
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-			v.x = Float4(*Pointer<Byte4>(source0));
-			v.y = Float4(*Pointer<Byte4>(source1));
-			v.z = Float4(*Pointer<Byte4>(source2));
-			v.w = Float4(*Pointer<Byte4>(source3));
+	case VK_FORMAT_B8G8R8A8_UNORM:
+		bgra = true;
+		// [[fallthrough]]
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+		v.x = Float4(*Pointer<Byte4>(source0));
+		v.y = Float4(*Pointer<Byte4>(source1));
+		v.z = Float4(*Pointer<Byte4>(source2));
+		v.w = Float4(*Pointer<Byte4>(source3));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
 
-			if(componentCount >= 1) v.x *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
-			if(componentCount >= 2) v.y *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
-			if(componentCount >= 3) v.z *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
-			if(componentCount >= 4) v.w *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
-			break;
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-			v.x = As<Float4>(Int4(*Pointer<Byte4>(source0)));
-			v.y = As<Float4>(Int4(*Pointer<Byte4>(source1)));
-			v.z = As<Float4>(Int4(*Pointer<Byte4>(source2)));
-			v.w = As<Float4>(Int4(*Pointer<Byte4>(source3)));
+		if(componentCount >= 1) v.x *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
+		if(componentCount >= 2) v.y *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
+		if(componentCount >= 3) v.z *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
+		if(componentCount >= 4) v.w *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleByte));
+		break;
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+		v.x = As<Float4>(Int4(*Pointer<Byte4>(source0)));
+		v.y = As<Float4>(Int4(*Pointer<Byte4>(source1)));
+		v.z = As<Float4>(Int4(*Pointer<Byte4>(source2)));
+		v.w = As<Float4>(Int4(*Pointer<Byte4>(source3)));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
-			break;
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-			v.x = Float4(*Pointer<SByte4>(source0));
-			v.y = Float4(*Pointer<SByte4>(source1));
-			v.z = Float4(*Pointer<SByte4>(source2));
-			v.w = Float4(*Pointer<SByte4>(source3));
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		break;
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+		v.x = Float4(*Pointer<SByte4>(source0));
+		v.y = Float4(*Pointer<SByte4>(source1));
+		v.z = Float4(*Pointer<SByte4>(source2));
+		v.w = Float4(*Pointer<SByte4>(source3));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
 
-			if(componentCount >= 1) v.x = Max(v.x * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
-			if(componentCount >= 2) v.y = Max(v.y * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
-			if(componentCount >= 3) v.z = Max(v.z * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
-			if(componentCount >= 4) v.w = Max(v.w * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
-			break;
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-			v.x = As<Float4>(Int4(*Pointer<SByte4>(source0)));
-			v.y = As<Float4>(Int4(*Pointer<SByte4>(source1)));
-			v.z = As<Float4>(Int4(*Pointer<SByte4>(source2)));
-			v.w = As<Float4>(Int4(*Pointer<SByte4>(source3)));
+		if(componentCount >= 1) v.x = Max(v.x * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
+		if(componentCount >= 2) v.y = Max(v.y * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
+		if(componentCount >= 3) v.z = Max(v.z * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
+		if(componentCount >= 4) v.w = Max(v.w * *Pointer<Float4>(constants + OFFSET(Constants, unscaleSByte)), Float4(-1.0f));
+		break;
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+		v.x = As<Float4>(Int4(*Pointer<SByte4>(source0)));
+		v.y = As<Float4>(Int4(*Pointer<SByte4>(source1)));
+		v.z = As<Float4>(Int4(*Pointer<SByte4>(source2)));
+		v.w = As<Float4>(Int4(*Pointer<SByte4>(source3)));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
-			break;
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-			v.x = Float4(*Pointer<Short4>(source0));
-			v.y = Float4(*Pointer<Short4>(source1));
-			v.z = Float4(*Pointer<Short4>(source2));
-			v.w = Float4(*Pointer<Short4>(source3));
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		break;
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+		v.x = Float4(*Pointer<Short4>(source0));
+		v.y = Float4(*Pointer<Short4>(source1));
+		v.z = Float4(*Pointer<Short4>(source2));
+		v.w = Float4(*Pointer<Short4>(source3));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
 
-			if(componentCount >= 1) v.x = Max(v.x * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
-			if(componentCount >= 2) v.y = Max(v.y * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
-			if(componentCount >= 3) v.z = Max(v.z * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
-			if(componentCount >= 4) v.w = Max(v.w * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
-			break;
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-			v.x = As<Float4>(Int4(*Pointer<Short4>(source0)));
-			v.y = As<Float4>(Int4(*Pointer<Short4>(source1)));
-			v.z = As<Float4>(Int4(*Pointer<Short4>(source2)));
-			v.w = As<Float4>(Int4(*Pointer<Short4>(source3)));
+		if(componentCount >= 1) v.x = Max(v.x * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
+		if(componentCount >= 2) v.y = Max(v.y * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
+		if(componentCount >= 3) v.z = Max(v.z * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
+		if(componentCount >= 4) v.w = Max(v.w * *Pointer<Float4>(constants + OFFSET(Constants, unscaleShort)), Float4(-1.0f));
+		break;
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+		v.x = As<Float4>(Int4(*Pointer<Short4>(source0)));
+		v.y = As<Float4>(Int4(*Pointer<Short4>(source1)));
+		v.z = As<Float4>(Int4(*Pointer<Short4>(source2)));
+		v.w = As<Float4>(Int4(*Pointer<Short4>(source3)));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
-			break;
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-			v.x = Float4(*Pointer<UShort4>(source0));
-			v.y = Float4(*Pointer<UShort4>(source1));
-			v.z = Float4(*Pointer<UShort4>(source2));
-			v.w = Float4(*Pointer<UShort4>(source3));
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		break;
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+		v.x = Float4(*Pointer<UShort4>(source0));
+		v.y = Float4(*Pointer<UShort4>(source1));
+		v.z = Float4(*Pointer<UShort4>(source2));
+		v.w = Float4(*Pointer<UShort4>(source3));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
 
-			if(componentCount >= 1) v.x *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
-			if(componentCount >= 2) v.y *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
-			if(componentCount >= 3) v.z *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
-			if(componentCount >= 4) v.w *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
-			break;
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-			v.x = As<Float4>(Int4(*Pointer<UShort4>(source0)));
-			v.y = As<Float4>(Int4(*Pointer<UShort4>(source1)));
-			v.z = As<Float4>(Int4(*Pointer<UShort4>(source2)));
-			v.w = As<Float4>(Int4(*Pointer<UShort4>(source3)));
+		if(componentCount >= 1) v.x *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
+		if(componentCount >= 2) v.y *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
+		if(componentCount >= 3) v.z *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
+		if(componentCount >= 4) v.w *= *Pointer<Float4>(constants + OFFSET(Constants, unscaleUShort));
+		break;
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+		v.x = As<Float4>(Int4(*Pointer<UShort4>(source0)));
+		v.y = As<Float4>(Int4(*Pointer<UShort4>(source1)));
+		v.z = As<Float4>(Int4(*Pointer<UShort4>(source2)));
+		v.w = As<Float4>(Int4(*Pointer<UShort4>(source3)));
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
-			break;
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-			v.x = *Pointer<Float4>(source0);
-			v.y = *Pointer<Float4>(source1);
-			v.z = *Pointer<Float4>(source2);
-			v.w = *Pointer<Float4>(source3);
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		break;
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+		v.x = *Pointer<Float4>(source0);
+		v.y = *Pointer<Float4>(source1);
+		v.z = *Pointer<Float4>(source2);
+		v.w = *Pointer<Float4>(source3);
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
-			break;
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-			v.x = *Pointer<Float4>(source0);
-			v.y = *Pointer<Float4>(source1);
-			v.z = *Pointer<Float4>(source2);
-			v.w = *Pointer<Float4>(source3);
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		break;
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+		v.x = *Pointer<Float4>(source0);
+		v.y = *Pointer<Float4>(source1);
+		v.z = *Pointer<Float4>(source2);
+		v.w = *Pointer<Float4>(source3);
 
-			transpose4xN(v.x, v.y, v.z, v.w, componentCount);
-			break;
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
+		transpose4xN(v.x, v.y, v.z, v.w, componentCount);
+		break;
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
 		{
 			if(componentCount >= 1)
 			{
@@ -417,10 +417,10 @@
 			}
 		}
 		break;
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-			bgra = true;
-			// [[fallthrough]]
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+		bgra = true;
+		// [[fallthrough]]
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
 		{
 			Int4 src;
 			src = Insert(src, *Pointer<Int>(source0), 0);
@@ -438,10 +438,10 @@
 			v.w = Max(v.w, Float4(-1.0f));
 		}
 		break;
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-			bgra = true;
-			// [[fallthrough]]
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+		bgra = true;
+		// [[fallthrough]]
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
 		{
 			Int4 src;
 			src = Insert(src, *Pointer<Int>(source0), 0);
@@ -454,10 +454,10 @@
 			v.w = As<Float4>(src >> 30);
 		}
 		break;
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-			bgra = true;
-			// [[fallthrough]]
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		bgra = true;
+		// [[fallthrough]]
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		{
 			Int4 src;
 			src = Insert(src, *Pointer<Int>(source0), 0);
@@ -476,10 +476,10 @@
 			v.w *= Float4(1.0f / 0x3);
 		}
 		break;
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			bgra = true;
-			// [[fallthrough]]
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		bgra = true;
+		// [[fallthrough]]
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
 		{
 			Int4 src;
 			src = Insert(src, *Pointer<Int>(source0), 0);
@@ -493,8 +493,8 @@
 			v.w = As<Float4>((src >> 30) & Int4(0x3));
 		}
 		break;
-		default:
-			UNSUPPORTED("stream.format %d", int(stream.format));
+	default:
+		UNSUPPORTED("stream.format %d", int(stream.format));
 	}
 
 	if(bgra)
diff --git a/src/Reactor/Debug.cpp b/src/Reactor/Debug.cpp
index 2df3e28..031524d 100644
--- a/src/Reactor/Debug.cpp
+++ b/src/Reactor/Debug.cpp
@@ -110,21 +110,21 @@
 {
 	switch(level)
 	{
-		case Level::Debug:
-			__android_log_write(ANDROID_LOG_DEBUG, "SwiftShader", msg);
-			break;
-		case Level::Info:
-			__android_log_write(ANDROID_LOG_INFO, "SwiftShader", msg);
-			break;
-		case Level::Warn:
-			__android_log_write(ANDROID_LOG_WARN, "SwiftShader", msg);
-			break;
-		case Level::Error:
-			__android_log_write(ANDROID_LOG_ERROR, "SwiftShader", msg);
-			break;
-		case Level::Fatal:
-			__android_log_write(ANDROID_LOG_FATAL, "SwiftShader", msg);
-			break;
+	case Level::Debug:
+		__android_log_write(ANDROID_LOG_DEBUG, "SwiftShader", msg);
+		break;
+	case Level::Info:
+		__android_log_write(ANDROID_LOG_INFO, "SwiftShader", msg);
+		break;
+	case Level::Warn:
+		__android_log_write(ANDROID_LOG_WARN, "SwiftShader", msg);
+		break;
+	case Level::Error:
+		__android_log_write(ANDROID_LOG_ERROR, "SwiftShader", msg);
+		break;
+	case Level::Fatal:
+		__android_log_write(ANDROID_LOG_FATAL, "SwiftShader", msg);
+		break;
 	}
 }
 #else
@@ -132,15 +132,15 @@
 {
 	switch(level)
 	{
-		case Level::Debug:
-		case Level::Info:
-			fprintf(stdout, "%s", msg);
-			break;
-		case Level::Warn:
-		case Level::Error:
-		case Level::Fatal:
-			fprintf(stderr, "%s", msg);
-			break;
+	case Level::Debug:
+	case Level::Info:
+		fprintf(stdout, "%s", msg);
+		break;
+	case Level::Warn:
+	case Level::Error:
+	case Level::Fatal:
+		fprintf(stderr, "%s", msg);
+		break;
 	}
 }
 #endif
diff --git a/src/Reactor/ExecutableMemory.cpp b/src/Reactor/ExecutableMemory.cpp
index 2ffb904..bc01045 100644
--- a/src/Reactor/ExecutableMemory.cpp
+++ b/src/Reactor/ExecutableMemory.cpp
@@ -100,16 +100,16 @@
 {
 	switch(permissions)
 	{
-		case PERMISSION_READ:
-			return PAGE_READONLY;
-		case PERMISSION_EXECUTE:
-			return PAGE_EXECUTE;
-		case PERMISSION_READ | PERMISSION_WRITE:
-			return PAGE_READWRITE;
-		case PERMISSION_READ | PERMISSION_EXECUTE:
-			return PAGE_EXECUTE_READ;
-		case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE:
-			return PAGE_EXECUTE_READWRITE;
+	case PERMISSION_READ:
+		return PAGE_READONLY;
+	case PERMISSION_EXECUTE:
+		return PAGE_EXECUTE;
+	case PERMISSION_READ | PERMISSION_WRITE:
+		return PAGE_READWRITE;
+	case PERMISSION_READ | PERMISSION_EXECUTE:
+		return PAGE_EXECUTE_READ;
+	case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE:
+		return PAGE_EXECUTE_READWRITE;
 	}
 	return PAGE_NOACCESS;
 }
diff --git a/src/Reactor/LLVMJIT.cpp b/src/Reactor/LLVMJIT.cpp
index 910cea8..5c0f15d 100644
--- a/src/Reactor/LLVMJIT.cpp
+++ b/src/Reactor/LLVMJIT.cpp
@@ -97,13 +97,13 @@
 	switch(tlsIndex)
 	{
 
-		case MSanTLS::param: return reinterpret_cast<void *>(&__msan_param_tls);
-		case MSanTLS::retval: return reinterpret_cast<void *>(&__msan_retval_tls);
-		case MSanTLS::va_arg: return reinterpret_cast<void *>(&__msan_va_arg_tls);
-		case MSanTLS::va_arg_overflow_size: return reinterpret_cast<void *>(&__msan_va_arg_overflow_size_tls);
-		default:
-			UNSUPPORTED("MemorySanitizer used an unrecognized TLS variable: %d", tlsIndex);
-			return nullptr;
+	case MSanTLS::param: return reinterpret_cast<void *>(&__msan_param_tls);
+	case MSanTLS::retval: return reinterpret_cast<void *>(&__msan_retval_tls);
+	case MSanTLS::va_arg: return reinterpret_cast<void *>(&__msan_va_arg_tls);
+	case MSanTLS::va_arg_overflow_size: return reinterpret_cast<void *>(&__msan_va_arg_overflow_size_tls);
+	default:
+		UNSUPPORTED("MemorySanitizer used an unrecognized TLS variable: %d", tlsIndex);
+		return nullptr;
 	}
 }
 
@@ -236,11 +236,11 @@
 
 	switch(level)
 	{
-		case rr::Optimization::Level::None: return llvm::CodeGenOpt::None;
-		case rr::Optimization::Level::Less: return llvm::CodeGenOpt::Less;
-		case rr::Optimization::Level::Default: return llvm::CodeGenOpt::Default;
-		case rr::Optimization::Level::Aggressive: return llvm::CodeGenOpt::Aggressive;
-		default: UNREACHABLE("Unknown Optimization Level %d", int(level));
+	case rr::Optimization::Level::None: return llvm::CodeGenOpt::None;
+	case rr::Optimization::Level::Less: return llvm::CodeGenOpt::Less;
+	case rr::Optimization::Level::Default: return llvm::CodeGenOpt::Default;
+	case rr::Optimization::Level::Aggressive: return llvm::CodeGenOpt::Aggressive;
+	default: UNREACHABLE("Unknown Optimization Level %d", int(level));
 	}
 
 	return llvm::CodeGenOpt::Default;
@@ -385,24 +385,24 @@
 		{
 			switch(size)
 			{
-				case 1: atomicLoad<uint8_t>(ptr, ret, ordering); break;
-				case 2: atomicLoad<uint16_t>(ptr, ret, ordering); break;
-				case 4: atomicLoad<uint32_t>(ptr, ret, ordering); break;
-				case 8: atomicLoad<uint64_t>(ptr, ret, ordering); break;
-				default:
-					UNIMPLEMENTED_NO_BUG("Atomic::load(size: %d)", int(size));
+			case 1: atomicLoad<uint8_t>(ptr, ret, ordering); break;
+			case 2: atomicLoad<uint16_t>(ptr, ret, ordering); break;
+			case 4: atomicLoad<uint32_t>(ptr, ret, ordering); break;
+			case 8: atomicLoad<uint64_t>(ptr, ret, ordering); break;
+			default:
+				UNIMPLEMENTED_NO_BUG("Atomic::load(size: %d)", int(size));
 			}
 		}
 		static void store(size_t size, void *ptr, void *ret, llvm::AtomicOrdering ordering)
 		{
 			switch(size)
 			{
-				case 1: atomicStore<uint8_t>(ptr, ret, ordering); break;
-				case 2: atomicStore<uint16_t>(ptr, ret, ordering); break;
-				case 4: atomicStore<uint32_t>(ptr, ret, ordering); break;
-				case 8: atomicStore<uint64_t>(ptr, ret, ordering); break;
-				default:
-					UNIMPLEMENTED_NO_BUG("Atomic::store(size: %d)", int(size));
+			case 1: atomicStore<uint8_t>(ptr, ret, ordering); break;
+			case 2: atomicStore<uint16_t>(ptr, ret, ordering); break;
+			case 4: atomicStore<uint32_t>(ptr, ret, ordering); break;
+			case 8: atomicStore<uint64_t>(ptr, ret, ordering); break;
+			default:
+				UNIMPLEMENTED_NO_BUG("Atomic::store(size: %d)", int(size));
 			}
 		}
 	};
@@ -659,21 +659,21 @@
 	{
 		switch(info.getSeverity())
 		{
-			case llvm::DS_Error:
-				ASSERT_MSG(false, "LLVM JIT compilation failure");
+		case llvm::DS_Error:
+			ASSERT_MSG(false, "LLVM JIT compilation failure");
+			*fatal = true;
+			break;
+		case llvm::DS_Warning:
+			if(info.getKind() == llvm::DK_StackSize)
+			{
+				// Stack size limit exceeded
 				*fatal = true;
-				break;
-			case llvm::DS_Warning:
-				if(info.getKind() == llvm::DK_StackSize)
-				{
-					// Stack size limit exceeded
-					*fatal = true;
-				}
-				break;
-			case llvm::DS_Remark:
-				break;
-			case llvm::DS_Note:
-				break;
+			}
+			break;
+		case llvm::DS_Remark:
+			break;
+		case llvm::DS_Note:
+			break;
 		}
 
 		return true;  // Diagnostic handled, don't let LLVM print it.
@@ -842,19 +842,19 @@
 	{
 		switch(pass)
 		{
-			case rr::Optimization::Pass::Disabled: break;
-			case rr::Optimization::Pass::CFGSimplification: passManager.add(llvm::createCFGSimplificationPass()); break;
-			case rr::Optimization::Pass::LICM: passManager.add(llvm::createLICMPass()); break;
-			case rr::Optimization::Pass::AggressiveDCE: passManager.add(llvm::createAggressiveDCEPass()); break;
-			case rr::Optimization::Pass::GVN: passManager.add(llvm::createGVNPass()); break;
-			case rr::Optimization::Pass::InstructionCombining: passManager.add(llvm::createInstructionCombiningPass()); break;
-			case rr::Optimization::Pass::Reassociate: passManager.add(llvm::createReassociatePass()); break;
-			case rr::Optimization::Pass::DeadStoreElimination: passManager.add(llvm::createDeadStoreEliminationPass()); break;
-			case rr::Optimization::Pass::SCCP: passManager.add(llvm::createSCCPPass()); break;
-			case rr::Optimization::Pass::ScalarReplAggregates: passManager.add(llvm::createSROAPass()); break;
-			case rr::Optimization::Pass::EarlyCSEPass: passManager.add(llvm::createEarlyCSEPass()); break;
-			default:
-				UNREACHABLE("pass: %d", int(pass));
+		case rr::Optimization::Pass::Disabled: break;
+		case rr::Optimization::Pass::CFGSimplification: passManager.add(llvm::createCFGSimplificationPass()); break;
+		case rr::Optimization::Pass::LICM: passManager.add(llvm::createLICMPass()); break;
+		case rr::Optimization::Pass::AggressiveDCE: passManager.add(llvm::createAggressiveDCEPass()); break;
+		case rr::Optimization::Pass::GVN: passManager.add(llvm::createGVNPass()); break;
+		case rr::Optimization::Pass::InstructionCombining: passManager.add(llvm::createInstructionCombiningPass()); break;
+		case rr::Optimization::Pass::Reassociate: passManager.add(llvm::createReassociatePass()); break;
+		case rr::Optimization::Pass::DeadStoreElimination: passManager.add(llvm::createDeadStoreEliminationPass()); break;
+		case rr::Optimization::Pass::SCCP: passManager.add(llvm::createSCCPPass()); break;
+		case rr::Optimization::Pass::ScalarReplAggregates: passManager.add(llvm::createSROAPass()); break;
+		case rr::Optimization::Pass::EarlyCSEPass: passManager.add(llvm::createEarlyCSEPass()); break;
+		default:
+			UNREACHABLE("pass: %d", int(pass));
 		}
 	}
 
diff --git a/src/Reactor/LLVMReactor.cpp b/src/Reactor/LLVMReactor.cpp
index c601329..dea359f 100644
--- a/src/Reactor/LLVMReactor.cpp
+++ b/src/Reactor/LLVMReactor.cpp
@@ -409,16 +409,16 @@
 	// Use 128-bit vectors to implement logically shorter ones.
 	switch(asInternalType(t))
 	{
-		case Type_v2i32: return T(Int4::type());
-		case Type_v4i16: return T(Short8::type());
-		case Type_v2i16: return T(Short8::type());
-		case Type_v8i8: return T(Byte16::type());
-		case Type_v4i8: return T(Byte16::type());
-		case Type_v2f32: return T(Float4::type());
-		case Type_LLVM: return reinterpret_cast<llvm::Type *>(t);
-		default:
-			UNREACHABLE("asInternalType(t): %d", int(asInternalType(t)));
-			return nullptr;
+	case Type_v2i32: return T(Int4::type());
+	case Type_v4i16: return T(Short8::type());
+	case Type_v2i16: return T(Short8::type());
+	case Type_v8i8: return T(Byte16::type());
+	case Type_v4i8: return T(Byte16::type());
+	case Type_v2f32: return T(Float4::type());
+	case Type_LLVM: return reinterpret_cast<llvm::Type *>(t);
+	default:
+		UNREACHABLE("asInternalType(t): %d", int(asInternalType(t)));
+		return nullptr;
 	}
 }
 
@@ -446,13 +446,13 @@
 {
 	switch(asInternalType(type))
 	{
-		case Type_v2i32: return 8;
-		case Type_v4i16: return 8;
-		case Type_v2i16: return 4;
-		case Type_v8i8: return 8;
-		case Type_v4i8: return 4;
-		case Type_v2f32: return 8;
-		case Type_LLVM:
+	case Type_v2i32: return 8;
+	case Type_v4i16: return 8;
+	case Type_v2i16: return 4;
+	case Type_v8i8: return 8;
+	case Type_v4i8: return 4;
+	case Type_v2f32: return 8;
+	case Type_LLVM:
 		{
 			llvm::Type *t = T(type);
 
@@ -471,9 +471,9 @@
 			return (bits + 7) / 8;
 		}
 		break;
-		default:
-			UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
-			return 0;
+	default:
+		UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
+		return 0;
 	}
 }
 
@@ -481,16 +481,16 @@
 {
 	switch(asInternalType(type))
 	{
-		case Type_v2i32: return 2;
-		case Type_v4i16: return 4;
-		case Type_v2i16: return 2;
-		case Type_v8i8: return 8;
-		case Type_v4i8: return 4;
-		case Type_v2f32: return 2;
-		case Type_LLVM: return llvm::cast<llvm::FixedVectorType>(T(type))->getNumElements();
-		default:
-			UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
-			return 0;
+	case Type_v2i32: return 2;
+	case Type_v4i16: return 4;
+	case Type_v2i16: return 2;
+	case Type_v8i8: return 8;
+	case Type_v4i8: return 4;
+	case Type_v2f32: return 2;
+	case Type_LLVM: return llvm::cast<llvm::FixedVectorType>(T(type))->getNumElements();
+	default:
+		UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
+		return 0;
 	}
 }
 
@@ -873,28 +873,28 @@
 	RR_DEBUG_INFO_UPDATE_LOC();
 	switch(asInternalType(type))
 	{
-		case Type_v2i32:
-		case Type_v4i16:
-		case Type_v8i8:
-		case Type_v2f32:
-			return createBitCast(
-			    createInsertElement(
-			        V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::type()), 2, false))),
-			        createLoad(createBitCast(ptr, Pointer<Long>::type()), Long::type(), isVolatile, alignment, atomic, memoryOrder),
-			        0),
-			    type);
-		case Type_v2i16:
-		case Type_v4i8:
-			if(alignment != 0)  // Not a local variable (all vectors are 128-bit).
-			{
-				Value *u = V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::type()), 2, false)));
-				Value *i = createLoad(createBitCast(ptr, Pointer<Int>::type()), Int::type(), isVolatile, alignment, atomic, memoryOrder);
-				i = createZExt(i, Long::type());
-				Value *v = createInsertElement(u, i, 0);
-				return createBitCast(v, type);
-			}
-			// Fallthrough to non-emulated case.
-		case Type_LLVM:
+	case Type_v2i32:
+	case Type_v4i16:
+	case Type_v8i8:
+	case Type_v2f32:
+		return createBitCast(
+		    createInsertElement(
+		        V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::type()), 2, false))),
+		        createLoad(createBitCast(ptr, Pointer<Long>::type()), Long::type(), isVolatile, alignment, atomic, memoryOrder),
+		        0),
+		    type);
+	case Type_v2i16:
+	case Type_v4i8:
+		if(alignment != 0)  // Not a local variable (all vectors are 128-bit).
+		{
+			Value *u = V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::type()), 2, false)));
+			Value *i = createLoad(createBitCast(ptr, Pointer<Int>::type()), Int::type(), isVolatile, alignment, atomic, memoryOrder);
+			i = createZExt(i, Long::type());
+			Value *v = createInsertElement(u, i, 0);
+			return createBitCast(v, type);
+		}
+		// Fallthrough to non-emulated case.
+	case Type_LLVM:
 		{
 			auto elTy = T(type);
 			ASSERT(V(ptr)->getType()->getContainedType(0) == elTy);
@@ -946,9 +946,9 @@
 				return V(jit->builder->CreateLoad(V(out)));
 			}
 		}
-		default:
-			UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
-			return nullptr;
+	default:
+		UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
+		return nullptr;
 	}
 }
 
@@ -957,28 +957,28 @@
 	RR_DEBUG_INFO_UPDATE_LOC();
 	switch(asInternalType(type))
 	{
-		case Type_v2i32:
-		case Type_v4i16:
-		case Type_v8i8:
-		case Type_v2f32:
+	case Type_v2i32:
+	case Type_v4i16:
+	case Type_v8i8:
+	case Type_v2f32:
+		createStore(
+		    createExtractElement(
+		        createBitCast(value, T(llvm::VectorType::get(T(Long::type()), 2, false))), Long::type(), 0),
+		    createBitCast(ptr, Pointer<Long>::type()),
+		    Long::type(), isVolatile, alignment, atomic, memoryOrder);
+		return value;
+	case Type_v2i16:
+	case Type_v4i8:
+		if(alignment != 0)  // Not a local variable (all vectors are 128-bit).
+		{
 			createStore(
-			    createExtractElement(
-			        createBitCast(value, T(llvm::VectorType::get(T(Long::type()), 2, false))), Long::type(), 0),
-			    createBitCast(ptr, Pointer<Long>::type()),
-			    Long::type(), isVolatile, alignment, atomic, memoryOrder);
+			    createExtractElement(createBitCast(value, Int4::type()), Int::type(), 0),
+			    createBitCast(ptr, Pointer<Int>::type()),
+			    Int::type(), isVolatile, alignment, atomic, memoryOrder);
 			return value;
-		case Type_v2i16:
-		case Type_v4i8:
-			if(alignment != 0)  // Not a local variable (all vectors are 128-bit).
-			{
-				createStore(
-				    createExtractElement(createBitCast(value, Int4::type()), Int::type(), 0),
-				    createBitCast(ptr, Pointer<Int>::type()),
-				    Int::type(), isVolatile, alignment, atomic, memoryOrder);
-				return value;
-			}
-			// Fallthrough to non-emulated case.
-		case Type_LLVM:
+		}
+		// Fallthrough to non-emulated case.
+	case Type_LLVM:
 		{
 			auto elTy = T(type);
 			ASSERT(V(ptr)->getType()->getContainedType(0) == elTy);
@@ -1046,9 +1046,9 @@
 
 			return value;
 		}
-		default:
-			UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
-			return nullptr;
+	default:
+		UNREACHABLE("asInternalType(type): %d", int(asInternalType(type)));
+		return nullptr;
 	}
 }
 
diff --git a/src/Reactor/LLVMReactor.hpp b/src/Reactor/LLVMReactor.hpp
index 254be42..181fbe7 100644
--- a/src/Reactor/LLVMReactor.hpp
+++ b/src/Reactor/LLVMReactor.hpp
@@ -124,14 +124,14 @@
 {
 	switch(memoryOrder)
 	{
-		case llvm::AtomicOrdering::Monotonic: return std::memory_order_relaxed;  // https://llvm.org/docs/Atomics.html#monotonic
-		case llvm::AtomicOrdering::Acquire: return std::memory_order_acquire;
-		case llvm::AtomicOrdering::Release: return std::memory_order_release;
-		case llvm::AtomicOrdering::AcquireRelease: return std::memory_order_acq_rel;
-		case llvm::AtomicOrdering::SequentiallyConsistent: return std::memory_order_seq_cst;
-		default:
-			UNREACHABLE("memoryOrder: %d", int(memoryOrder));
-			return std::memory_order_acq_rel;
+	case llvm::AtomicOrdering::Monotonic: return std::memory_order_relaxed;  // https://llvm.org/docs/Atomics.html#monotonic
+	case llvm::AtomicOrdering::Acquire: return std::memory_order_acquire;
+	case llvm::AtomicOrdering::Release: return std::memory_order_release;
+	case llvm::AtomicOrdering::AcquireRelease: return std::memory_order_acq_rel;
+	case llvm::AtomicOrdering::SequentiallyConsistent: return std::memory_order_seq_cst;
+	default:
+		UNREACHABLE("memoryOrder: %d", int(memoryOrder));
+		return std::memory_order_acq_rel;
 	}
 }
 
@@ -144,15 +144,15 @@
 
 	switch(memoryOrder)
 	{
-		case std::memory_order_relaxed: return llvm::AtomicOrdering::Monotonic;  // https://llvm.org/docs/Atomics.html#monotonic
-		case std::memory_order_consume: return llvm::AtomicOrdering::Acquire;    // https://llvm.org/docs/Atomics.html#acquire: "It should also be used for C++11/C11 memory_order_consume."
-		case std::memory_order_acquire: return llvm::AtomicOrdering::Acquire;
-		case std::memory_order_release: return llvm::AtomicOrdering::Release;
-		case std::memory_order_acq_rel: return llvm::AtomicOrdering::AcquireRelease;
-		case std::memory_order_seq_cst: return llvm::AtomicOrdering::SequentiallyConsistent;
-		default:
-			UNREACHABLE("memoryOrder: %d", int(memoryOrder));
-			return llvm::AtomicOrdering::AcquireRelease;
+	case std::memory_order_relaxed: return llvm::AtomicOrdering::Monotonic;  // https://llvm.org/docs/Atomics.html#monotonic
+	case std::memory_order_consume: return llvm::AtomicOrdering::Acquire;    // https://llvm.org/docs/Atomics.html#acquire: "It should also be used for C++11/C11 memory_order_consume."
+	case std::memory_order_acquire: return llvm::AtomicOrdering::Acquire;
+	case std::memory_order_release: return llvm::AtomicOrdering::Release;
+	case std::memory_order_acq_rel: return llvm::AtomicOrdering::AcquireRelease;
+	case std::memory_order_seq_cst: return llvm::AtomicOrdering::SequentiallyConsistent;
+	default:
+		UNREACHABLE("memoryOrder: %d", int(memoryOrder));
+		return llvm::AtomicOrdering::AcquireRelease;
 	}
 }
 
diff --git a/src/Reactor/Reactor.cpp b/src/Reactor/Reactor.cpp
index 3c4a146..e4b3ebe 100644
--- a/src/Reactor/Reactor.cpp
+++ b/src/Reactor/Reactor.cpp
@@ -55,18 +55,18 @@
 	{
 		switch(edit.first)
 		{
-			case ListEdit::Add:
-				list.push_back(edit.second);
-				break;
-			case ListEdit::Remove:
-				list.erase(std::remove_if(list.begin(), list.end(), [&](T item) {
-					           return item == edit.second;
-				           }),
-				           list.end());
-				break;
-			case ListEdit::Clear:
-				list.clear();
-				break;
+		case ListEdit::Add:
+			list.push_back(edit.second);
+			break;
+		case ListEdit::Remove:
+			list.erase(std::remove_if(list.begin(), list.end(), [&](T item) {
+				           return item == edit.second;
+			           }),
+			           list.end());
+			break;
+		case ListEdit::Clear:
+			list.clear();
+			break;
 		}
 	}
 }
diff --git a/src/Reactor/SubzeroReactor.cpp b/src/Reactor/SubzeroReactor.cpp
index 61243c0..301c6bd 100644
--- a/src/Reactor/SubzeroReactor.cpp
+++ b/src/Reactor/SubzeroReactor.cpp
@@ -260,12 +260,12 @@
 {
 	switch(level)
 	{
-		// Note that Opt_0 and Opt_1 are not implemented by Subzero
-		case rr::Optimization::Level::None: return Ice::Opt_m1;
-		case rr::Optimization::Level::Less: return Ice::Opt_m1;
-		case rr::Optimization::Level::Default: return Ice::Opt_2;
-		case rr::Optimization::Level::Aggressive: return Ice::Opt_2;
-		default: UNREACHABLE("Unknown Optimization Level %d", int(level));
+	// Note that Opt_0 and Opt_1 are not implemented by Subzero
+	case rr::Optimization::Level::None: return Ice::Opt_m1;
+	case rr::Optimization::Level::Less: return Ice::Opt_m1;
+	case rr::Optimization::Level::Default: return Ice::Opt_2;
+	case rr::Optimization::Level::Aggressive: return Ice::Opt_2;
+	default: UNREACHABLE("Unknown Optimization Level %d", int(level));
 	}
 	return Ice::Opt_2;
 }
@@ -274,12 +274,12 @@
 {
 	switch(memoryOrder)
 	{
-		case std::memory_order_relaxed: return Ice::Intrinsics::MemoryOrderRelaxed;
-		case std::memory_order_consume: return Ice::Intrinsics::MemoryOrderConsume;
-		case std::memory_order_acquire: return Ice::Intrinsics::MemoryOrderAcquire;
-		case std::memory_order_release: return Ice::Intrinsics::MemoryOrderRelease;
-		case std::memory_order_acq_rel: return Ice::Intrinsics::MemoryOrderAcquireRelease;
-		case std::memory_order_seq_cst: return Ice::Intrinsics::MemoryOrderSequentiallyConsistent;
+	case std::memory_order_relaxed: return Ice::Intrinsics::MemoryOrderRelaxed;
+	case std::memory_order_consume: return Ice::Intrinsics::MemoryOrderConsume;
+	case std::memory_order_acquire: return Ice::Intrinsics::MemoryOrderAcquire;
+	case std::memory_order_release: return Ice::Intrinsics::MemoryOrderRelease;
+	case std::memory_order_acq_rel: return Ice::Intrinsics::MemoryOrderAcquireRelease;
+	case std::memory_order_seq_cst: return Ice::Intrinsics::MemoryOrderSequentiallyConsistent;
 	}
 	return Ice::Intrinsics::MemoryOrderInvalid;
 }
@@ -442,13 +442,13 @@
 	{
 		switch(reinterpret_cast<std::intptr_t>(type))
 		{
-			case Type_v2i32: return 8;
-			case Type_v4i16: return 8;
-			case Type_v2i16: return 4;
-			case Type_v8i8: return 8;
-			case Type_v4i8: return 4;
-			case Type_v2f32: return 8;
-			default: ASSERT(false);
+		case Type_v2i32: return 8;
+		case Type_v4i16: return 8;
+		case Type_v2i16: return 4;
+		case Type_v8i8: return 8;
+		case Type_v4i8: return 4;
+		case Type_v2f32: return 8;
+		default: ASSERT(false);
 		}
 	}
 
@@ -523,43 +523,43 @@
 	{
 		switch(relocation.getType())
 		{
-			case R_ARM_NONE:
-				// No relocation
-				break;
-			case R_ARM_MOVW_ABS_NC:
+		case R_ARM_NONE:
+			// No relocation
+			break;
+		case R_ARM_MOVW_ABS_NC:
 			{
 				uint32_t thumb = 0;  // Calls to Thumb code not supported.
 				uint32_t lo = (uint32_t)(intptr_t)symbolValue | thumb;
 				*patchSite = (*patchSite & 0xFFF0F000) | ((lo & 0xF000) << 4) | (lo & 0x0FFF);
 			}
 			break;
-			case R_ARM_MOVT_ABS:
+		case R_ARM_MOVT_ABS:
 			{
 				uint32_t hi = (uint32_t)(intptr_t)(symbolValue) >> 16;
 				*patchSite = (*patchSite & 0xFFF0F000) | ((hi & 0xF000) << 4) | (hi & 0x0FFF);
 			}
 			break;
-			default:
-				ASSERT(false && "Unsupported relocation type");
-				return nullptr;
+		default:
+			ASSERT(false && "Unsupported relocation type");
+			return nullptr;
 		}
 	}
 	else
 	{
 		switch(relocation.getType())
 		{
-			case R_386_NONE:
-				// No relocation
-				break;
-			case R_386_32:
-				*patchSite = (int32_t)((intptr_t)symbolValue + *patchSite);
-				break;
-			case R_386_PC32:
-				*patchSite = (int32_t)((intptr_t)symbolValue + *patchSite - (intptr_t)patchSite);
-				break;
-			default:
-				ASSERT(false && "Unsupported relocation type");
-				return nullptr;
+		case R_386_NONE:
+			// No relocation
+			break;
+		case R_386_32:
+			*patchSite = (int32_t)((intptr_t)symbolValue + *patchSite);
+			break;
+		case R_386_PC32:
+			*patchSite = (int32_t)((intptr_t)symbolValue + *patchSite - (intptr_t)patchSite);
+			break;
+		default:
+			ASSERT(false && "Unsupported relocation type");
+			return nullptr;
 		}
 	}
 
@@ -607,21 +607,21 @@
 
 	switch(relocation.getType())
 	{
-		case R_X86_64_NONE:
-			// No relocation
-			break;
-		case R_X86_64_64:
-			*patchSite64 = (int64_t)((intptr_t)symbolValue + *patchSite64 + relocation.r_addend);
-			break;
-		case R_X86_64_PC32:
-			*patchSite32 = (int32_t)((intptr_t)symbolValue + *patchSite32 - (intptr_t)patchSite32 + relocation.r_addend);
-			break;
-		case R_X86_64_32S:
-			*patchSite32 = (int32_t)((intptr_t)symbolValue + *patchSite32 + relocation.r_addend);
-			break;
-		default:
-			ASSERT(false && "Unsupported relocation type");
-			return nullptr;
+	case R_X86_64_NONE:
+		// No relocation
+		break;
+	case R_X86_64_64:
+		*patchSite64 = (int64_t)((intptr_t)symbolValue + *patchSite64 + relocation.r_addend);
+		break;
+	case R_X86_64_PC32:
+		*patchSite32 = (int32_t)((intptr_t)symbolValue + *patchSite32 - (intptr_t)patchSite32 + relocation.r_addend);
+		break;
+	case R_X86_64_32S:
+		*patchSite32 = (int32_t)((intptr_t)symbolValue + *patchSite32 + relocation.r_addend);
+		break;
+	default:
+		ASSERT(false && "Unsupported relocation type");
+		return nullptr;
 	}
 
 	return symbolValue;
@@ -1201,16 +1201,16 @@
 {
 	switch(op)
 	{
-		case Ice::InstArithmetic::Add:
-		case Ice::InstArithmetic::Fadd:
-		case Ice::InstArithmetic::Mul:
-		case Ice::InstArithmetic::Fmul:
-		case Ice::InstArithmetic::And:
-		case Ice::InstArithmetic::Or:
-		case Ice::InstArithmetic::Xor:
-			return true;
-		default:
-			return false;
+	case Ice::InstArithmetic::Add:
+	case Ice::InstArithmetic::Fadd:
+	case Ice::InstArithmetic::Mul:
+	case Ice::InstArithmetic::Fmul:
+	case Ice::InstArithmetic::And:
+	case Ice::InstArithmetic::Or:
+	case Ice::InstArithmetic::Xor:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -1973,16 +1973,16 @@
 	Ice::Type vecTy = T(vectorType);
 	switch(vecTy)
 	{
-		case Ice::IceType_v4i1: return T(Ice::IceType_i1);
-		case Ice::IceType_v8i1: return T(Ice::IceType_i1);
-		case Ice::IceType_v16i1: return T(Ice::IceType_i1);
-		case Ice::IceType_v16i8: return T(Ice::IceType_i8);
-		case Ice::IceType_v8i16: return T(Ice::IceType_i16);
-		case Ice::IceType_v4i32: return T(Ice::IceType_i32);
-		case Ice::IceType_v4f32: return T(Ice::IceType_f32);
-		default:
-			ASSERT_MSG(false, "getContainedType: input type is not a vector type");
-			return {};
+	case Ice::IceType_v4i1: return T(Ice::IceType_i1);
+	case Ice::IceType_v8i1: return T(Ice::IceType_i1);
+	case Ice::IceType_v16i1: return T(Ice::IceType_i1);
+	case Ice::IceType_v16i8: return T(Ice::IceType_i8);
+	case Ice::IceType_v8i16: return T(Ice::IceType_i16);
+	case Ice::IceType_v4i32: return T(Ice::IceType_i32);
+	case Ice::IceType_v4f32: return T(Ice::IceType_f32);
+	default:
+		ASSERT_MSG(false, "getContainedType: input type is not a vector type");
+		return {};
 	}
 }
 
@@ -2003,15 +2003,15 @@
 	Ice::Type valueTy = T(valueType);
 	switch(valueTy)
 	{
-		case Ice::IceType_i32:
-			return T(getNaturalIntType());
+	case Ice::IceType_i32:
+		return T(getNaturalIntType());
 
-		case Ice::IceType_f32:
-			return T(Ice::IceType_f64);
+	case Ice::IceType_f32:
+		return T(Ice::IceType_f64);
 
-		default:
-			UNIMPLEMENTED_NO_BUG("getPrintfStorageType: add more cases as needed");
-			return {};
+	default:
+		UNIMPLEMENTED_NO_BUG("getPrintfStorageType: add more cases as needed");
+		return {};
 	}
 }
 
@@ -2110,74 +2110,74 @@
 
 	switch((int)reinterpret_cast<intptr_t>(type))
 	{
-		case Ice::IceType_v4i32:
-		case Ice::IceType_v4i1:
+	case Ice::IceType_v4i32:
+	case Ice::IceType_v4i1:
 		{
 			const int initializer[4] = { (int)i[0], (int)i[1], (int)i[2], (int)i[3] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Ice::IceType_v4f32:
+	case Ice::IceType_v4f32:
 		{
 			const float initializer[4] = { (float)f[0], (float)f[1], (float)f[2], (float)f[3] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Ice::IceType_v8i16:
-		case Ice::IceType_v8i1:
+	case Ice::IceType_v8i16:
+	case Ice::IceType_v8i1:
 		{
 			const short initializer[8] = { (short)i[0], (short)i[1], (short)i[2], (short)i[3], (short)i[4], (short)i[5], (short)i[6], (short)i[7] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Ice::IceType_v16i8:
-		case Ice::IceType_v16i1:
+	case Ice::IceType_v16i8:
+	case Ice::IceType_v16i1:
 		{
 			const char initializer[16] = { (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[4], (char)i[5], (char)i[6], (char)i[7], (char)i[8], (char)i[9], (char)i[10], (char)i[11], (char)i[12], (char)i[13], (char)i[14], (char)i[15] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Type_v2i32:
+	case Type_v2i32:
 		{
 			const int initializer[4] = { (int)i[0], (int)i[1], (int)i[0], (int)i[1] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Type_v2f32:
+	case Type_v2f32:
 		{
 			const float initializer[4] = { (float)f[0], (float)f[1], (float)f[0], (float)f[1] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Type_v4i16:
+	case Type_v4i16:
 		{
 			const short initializer[8] = { (short)i[0], (short)i[1], (short)i[2], (short)i[3], (short)i[0], (short)i[1], (short)i[2], (short)i[3] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Type_v8i8:
+	case Type_v8i8:
 		{
 			const char initializer[16] = { (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[4], (char)i[5], (char)i[6], (char)i[7], (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[4], (char)i[5], (char)i[6], (char)i[7] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		case Type_v4i8:
+	case Type_v4i8:
 		{
 			const char initializer[16] = { (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[0], (char)i[1], (char)i[2], (char)i[3], (char)i[0], (char)i[1], (char)i[2], (char)i[3] };
 			static_assert(sizeof(initializer) == vectorSize, "!");
 			ptr = IceConstantData(initializer, vectorSize, alignment);
 		}
 		break;
-		default:
-			UNREACHABLE("Unknown constant vector type: %d", (int)reinterpret_cast<intptr_t>(type));
+	default:
+		UNREACHABLE("Unknown constant vector type: %d", (int)reinterpret_cast<intptr_t>(type));
 	}
 
 	ASSERT(ptr);
diff --git a/src/System/Configurator.cpp b/src/System/Configurator.cpp
index 410fb29..fb17fde 100644
--- a/src/System/Configurator.cpp
+++ b/src/System/Configurator.cpp
@@ -77,7 +77,7 @@
 			{
 				switch(line[pLeft])
 				{
-					case '[':
+				case '[':
 					{
 						string::size_type pRight = line.find_last_of("]");
 
@@ -88,17 +88,17 @@
 						}
 					}
 					break;
-					case '=':
+				case '=':
 					{
 						string valueName = line.substr(0, pLeft);
 						string value = line.substr(pLeft + 1);
 						addValue(keyName, valueName, value);
 					}
 					break;
-					case ';':
-					case '#':
-						// Ignore comments
-						break;
+				case ';':
+				case '#':
+					// Ignore comments
+					break;
 				}
 			}
 		}
diff --git a/src/System/Debug.cpp b/src/System/Debug.cpp
index 838d58d..b0bb7f3 100644
--- a/src/System/Debug.cpp
+++ b/src/System/Debug.cpp
@@ -112,23 +112,23 @@
 {
 	switch(level)
 	{
-		case Level::Debug:
-			__android_log_write(ANDROID_LOG_DEBUG, "SwiftShader", msg);
-			break;
-		case Level::Info:
-			__android_log_write(ANDROID_LOG_INFO, "SwiftShader", msg);
-			break;
-		case Level::Warn:
-			__android_log_write(ANDROID_LOG_WARN, "SwiftShader", msg);
-			break;
-		case Level::Error:
-			__android_log_write(ANDROID_LOG_ERROR, "SwiftShader", msg);
-			break;
-		case Level::Fatal:
-			__android_log_write(ANDROID_LOG_FATAL, "SwiftShader", msg);
-			break;
-		default:
-			break;
+	case Level::Debug:
+		__android_log_write(ANDROID_LOG_DEBUG, "SwiftShader", msg);
+		break;
+	case Level::Info:
+		__android_log_write(ANDROID_LOG_INFO, "SwiftShader", msg);
+		break;
+	case Level::Warn:
+		__android_log_write(ANDROID_LOG_WARN, "SwiftShader", msg);
+		break;
+	case Level::Error:
+		__android_log_write(ANDROID_LOG_ERROR, "SwiftShader", msg);
+		break;
+	case Level::Fatal:
+		__android_log_write(ANDROID_LOG_FATAL, "SwiftShader", msg);
+		break;
+	default:
+		break;
 	}
 }
 #else
@@ -136,17 +136,17 @@
 {
 	switch(level)
 	{
-		case Level::Debug:
-		case Level::Info:
-			fprintf(stdout, "%s", msg);
-			break;
-		case Level::Warn:
-		case Level::Error:
-		case Level::Fatal:
-			fprintf(stderr, "%s", msg);
-			break;
-		default:
-			break;
+	case Level::Debug:
+	case Level::Info:
+		fprintf(stdout, "%s", msg);
+		break;
+	case Level::Warn:
+	case Level::Error:
+	case Level::Fatal:
+		fprintf(stderr, "%s", msg);
+		break;
+	default:
+		break;
 	}
 }
 #endif
diff --git a/src/System/GrallocAndroid.cpp b/src/System/GrallocAndroid.cpp
index ffdf52e..428a5b9 100644
--- a/src/System/GrallocAndroid.cpp
+++ b/src/System/GrallocAndroid.cpp
@@ -59,19 +59,19 @@
 	m_major_version = (module->module_api_version >> 8) & 0xff;
 	switch(m_major_version)
 	{
-		case 0:
-			m_module = reinterpret_cast<const gralloc_module_t *>(module);
-			break;
-		case 1:
+	case 0:
+		m_module = reinterpret_cast<const gralloc_module_t *>(module);
+		break;
+	case 1:
 #ifdef HAVE_GRALLOC1
-			gralloc1_open(module, &m_gralloc1_device);
-			m_gralloc1_lock = (GRALLOC1_PFN_LOCK)m_gralloc1_device->getFunction(m_gralloc1_device, GRALLOC1_FUNCTION_LOCK);
-			m_gralloc1_unlock = (GRALLOC1_PFN_UNLOCK)m_gralloc1_device->getFunction(m_gralloc1_device, GRALLOC1_FUNCTION_UNLOCK);
-			break;
+		gralloc1_open(module, &m_gralloc1_device);
+		m_gralloc1_lock = (GRALLOC1_PFN_LOCK)m_gralloc1_device->getFunction(m_gralloc1_device, GRALLOC1_FUNCTION_LOCK);
+		m_gralloc1_unlock = (GRALLOC1_PFN_UNLOCK)m_gralloc1_device->getFunction(m_gralloc1_device, GRALLOC1_FUNCTION_UNLOCK);
+		break;
 #endif
-		default:
-			TRACE("unknown gralloc major version (%d)", m_major_version);
-			break;
+	default:
+		TRACE("unknown gralloc major version (%d)", m_major_version);
+		break;
 	}
 }
 
@@ -193,11 +193,11 @@
 
 	switch(m_major_version)
 	{
-		case 0:
+	case 0:
 		{
 			return m_module->lock(m_module, handle, usage, left, top, width, height, vaddr);
 		}
-		case 1:
+	case 1:
 #ifdef HAVE_GRALLOC1
 		{
 			gralloc1_rect_t outRect{};
@@ -208,7 +208,7 @@
 			return m_gralloc1_lock(m_gralloc1_device, handle, usage, usage, &outRect, vaddr, -1);
 		}
 #endif
-		default:
+	default:
 		{
 			TRACE("no gralloc module to lock");
 			return -1;
@@ -248,11 +248,11 @@
 
 	switch(m_major_version)
 	{
-		case 0:
+	case 0:
 		{
 			return m_module->unlock(m_module, handle);
 		}
-		case 1:
+	case 1:
 #ifdef HAVE_GRALLOC1
 		{
 			int32_t fenceFd = -1;
@@ -265,7 +265,7 @@
 			return error;
 		}
 #endif
-		default:
+	default:
 		{
 			TRACE("no gralloc module to unlock");
 			return -1;
diff --git a/src/Vulkan/Debug/Thread.cpp b/src/Vulkan/Debug/Thread.cpp
index dd9e22d..b97e2c3 100644
--- a/src/Vulkan/Debug/Thread.cpp
+++ b/src/Vulkan/Debug/Thread.cpp
@@ -53,13 +53,11 @@
 
 	switch(state_)
 	{
-		case State::Paused:
-		{
-			lock.wait(stateCV, [this]() REQUIRES(mutex) { return state_ != State::Paused; });
-			break;
-		}
+	case State::Paused:
+		lock.wait(stateCV, [this]() REQUIRES(mutex) { return state_ != State::Paused; });
+		break;
 
-		case State::Stepping:
+	case State::Stepping:
 		{
 			bool pause = false;
 
@@ -79,11 +77,11 @@
 				state_ = State::Paused;
 				lock.wait(stateCV, [this]() REQUIRES(mutex) { return state_ != State::Paused; });
 			}
-			break;
 		}
+		break;
 
-		case State::Running:
-			break;
+	case State::Running:
+		break;
 	}
 }
 
diff --git a/src/Vulkan/VkCommandBuffer.cpp b/src/Vulkan/VkCommandBuffer.cpp
index 53c62b9..aa7f024 100644
--- a/src/Vulkan/VkCommandBuffer.cpp
+++ b/src/Vulkan/VkCommandBuffer.cpp
@@ -1364,12 +1364,12 @@
 {
 	switch(pipelineBindPoint)
 	{
-		case VK_PIPELINE_BIND_POINT_COMPUTE:
-		case VK_PIPELINE_BIND_POINT_GRAPHICS:
-			addCommand<::CmdPipelineBind>(pipelineBindPoint, pipeline);
-			break;
-		default:
-			UNSUPPORTED("VkPipelineBindPoint %d", int(pipelineBindPoint));
+	case VK_PIPELINE_BIND_POINT_COMPUTE:
+	case VK_PIPELINE_BIND_POINT_GRAPHICS:
+		addCommand<::CmdPipelineBind>(pipelineBindPoint, pipeline);
+		break;
+	default:
+		UNSUPPORTED("VkPipelineBindPoint %d", int(pipelineBindPoint));
 	}
 }
 
diff --git a/src/Vulkan/VkDescriptorSet.cpp b/src/Vulkan/VkDescriptorSet.cpp
index a53ba6e..b9e2814 100644
--- a/src/Vulkan/VkDescriptorSet.cpp
+++ b/src/Vulkan/VkDescriptorSet.cpp
@@ -48,16 +48,16 @@
 					ImageView *memoryOwner = nullptr;
 					switch(type)
 					{
-						case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-						case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-							memoryOwner = reinterpret_cast<SampledImageDescriptor *>(descriptorMemory)->memoryOwner;
-							break;
-						case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
-						case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
-							memoryOwner = reinterpret_cast<StorageImageDescriptor *>(descriptorMemory)->memoryOwner;
-							break;
-						default:
-							break;
+					case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+					case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+						memoryOwner = reinterpret_cast<SampledImageDescriptor *>(descriptorMemory)->memoryOwner;
+						break;
+					case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+					case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+						memoryOwner = reinterpret_cast<StorageImageDescriptor *>(descriptorMemory)->memoryOwner;
+						break;
+					default:
+						break;
 					}
 					if(memoryOwner)
 					{
diff --git a/src/Vulkan/VkDescriptorSetLayout.cpp b/src/Vulkan/VkDescriptorSetLayout.cpp
index f8a1c24..242d6d2 100644
--- a/src/Vulkan/VkDescriptorSetLayout.cpp
+++ b/src/Vulkan/VkDescriptorSetLayout.cpp
@@ -115,23 +115,23 @@
 {
 	switch(type)
 	{
-		case VK_DESCRIPTOR_TYPE_SAMPLER:
-		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-			return static_cast<uint32_t>(sizeof(SampledImageDescriptor));
-		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
-		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
-		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
-			return static_cast<uint32_t>(sizeof(StorageImageDescriptor));
-		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
-		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
-		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
-		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
-			return static_cast<uint32_t>(sizeof(BufferDescriptor));
-		default:
-			UNSUPPORTED("Unsupported Descriptor Type: %d", int(type));
-			return 0;
+	case VK_DESCRIPTOR_TYPE_SAMPLER:
+	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+		return static_cast<uint32_t>(sizeof(SampledImageDescriptor));
+	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+	case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+		return static_cast<uint32_t>(sizeof(StorageImageDescriptor));
+	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+		return static_cast<uint32_t>(sizeof(BufferDescriptor));
+	default:
+		UNSUPPORTED("Unsupported Descriptor Type: %d", int(type));
+		return 0;
 	}
 }
 
@@ -549,31 +549,31 @@
 	void const *ptr = nullptr;
 	switch(writeDescriptorSet.descriptorType)
 	{
-		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
-		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
-			ptr = writeDescriptorSet.pTexelBufferView;
-			e.stride = sizeof(VkBufferView);
-			break;
+	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+		ptr = writeDescriptorSet.pTexelBufferView;
+		e.stride = sizeof(VkBufferView);
+		break;
 
-		case VK_DESCRIPTOR_TYPE_SAMPLER:
-		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
-		case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
-		case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
-		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
-			ptr = writeDescriptorSet.pImageInfo;
-			e.stride = sizeof(VkDescriptorImageInfo);
-			break;
+	case VK_DESCRIPTOR_TYPE_SAMPLER:
+	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+	case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+	case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+		ptr = writeDescriptorSet.pImageInfo;
+		e.stride = sizeof(VkDescriptorImageInfo);
+		break;
 
-		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
-		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
-		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
-		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
-			ptr = writeDescriptorSet.pBufferInfo;
-			e.stride = sizeof(VkDescriptorBufferInfo);
-			break;
+	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+		ptr = writeDescriptorSet.pBufferInfo;
+		e.stride = sizeof(VkDescriptorBufferInfo);
+		break;
 
-		default:
-			UNSUPPORTED("descriptor type %u", writeDescriptorSet.descriptorType);
+	default:
+		UNSUPPORTED("descriptor type %u", writeDescriptorSet.descriptorType);
 	}
 
 	WriteDescriptorSet(device, dstSet, e, reinterpret_cast<char const *>(ptr));
diff --git a/src/Vulkan/VkDeviceMemory.cpp b/src/Vulkan/VkDeviceMemory.cpp
index 9921328..76cfe20 100644
--- a/src/Vulkan/VkDeviceMemory.cpp
+++ b/src/Vulkan/VkDeviceMemory.cpp
@@ -127,7 +127,7 @@
 			{
 				switch(createInfo->sType)
 				{
-					case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
+				case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
 					{
 						const auto *importInfo = reinterpret_cast<const VkImportMemoryHostPointerInfoEXT *>(createInfo);
 
@@ -137,10 +137,10 @@
 						}
 						hostPointer = importInfo->pHostPointer;
 						supported = true;
-						break;
 					}
-					default:
-						break;
+					break;
+				default:
+					break;
 				}
 				createInfo = createInfo->pNext;
 			}
@@ -203,7 +203,7 @@
 		{
 			switch(createInfo->sType)
 			{
-				case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+			case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
 				{
 					const auto *importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR *>(createInfo);
 
@@ -215,7 +215,7 @@
 					fd = importInfo->fd;
 				}
 				break;
-				case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+			case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
 				{
 					const auto *exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(createInfo);
 
@@ -226,13 +226,13 @@
 					exportFd = true;
 				}
 				break;
-				case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
-					// This can safely be ignored, as the Vulkan spec mentions:
-					// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
-					//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
-					break;
-				default:
-					WARN("VkMemoryAllocateInfo->pNext sType = %s", vk::Stringify(createInfo->sType).c_str());
+			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+				// This can safely be ignored, as the Vulkan spec mentions:
+				// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
+				//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
+				break;
+			default:
+				WARN("VkMemoryAllocateInfo->pNext sType = %s", vk::Stringify(createInfo->sType).c_str());
 			}
 			createInfo = createInfo->pNext;
 		}
diff --git a/src/Vulkan/VkDeviceMemoryExternalAndroid.cpp b/src/Vulkan/VkDeviceMemoryExternalAndroid.cpp
index 83e2901..acdbf89 100644
--- a/src/Vulkan/VkDeviceMemoryExternalAndroid.cpp
+++ b/src/Vulkan/VkDeviceMemoryExternalAndroid.cpp
@@ -27,35 +27,35 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_D16_UNORM:
-			return AHARDWAREBUFFER_FORMAT_D16_UNORM;
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-			UNSUPPORTED("AHardwareBufferExternalMemory::VkFormat VK_FORMAT_X8_D24_UNORM_PACK32");
-			return AHARDWAREBUFFER_FORMAT_D24_UNORM;
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-			UNSUPPORTED("AHardwareBufferExternalMemory::VkFormat VK_FORMAT_D24_UNORM_S8_UINT");
-			return AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT;
-		case VK_FORMAT_D32_SFLOAT:
-			return AHARDWAREBUFFER_FORMAT_D32_FLOAT;
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			return AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT;
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-			return AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM;
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-			return AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-			return AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
-		case VK_FORMAT_R8G8B8A8_UNORM:
-			return AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
-		case VK_FORMAT_R8G8B8_UNORM:
-			return AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM;
-		case VK_FORMAT_S8_UINT:
-			return AHARDWAREBUFFER_FORMAT_S8_UINT;
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-			return AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420;
-		default:
-			UNSUPPORTED("AHardwareBufferExternalMemory::VkFormat %d", int(format));
-			return 0;
+	case VK_FORMAT_D16_UNORM:
+		return AHARDWAREBUFFER_FORMAT_D16_UNORM;
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+		UNSUPPORTED("AHardwareBufferExternalMemory::VkFormat VK_FORMAT_X8_D24_UNORM_PACK32");
+		return AHARDWAREBUFFER_FORMAT_D24_UNORM;
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+		UNSUPPORTED("AHardwareBufferExternalMemory::VkFormat VK_FORMAT_D24_UNORM_S8_UINT");
+		return AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT;
+	case VK_FORMAT_D32_SFLOAT:
+		return AHARDWAREBUFFER_FORMAT_D32_FLOAT;
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		return AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT;
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		return AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM;
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		return AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+		return AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM;
+	case VK_FORMAT_R8G8B8A8_UNORM:
+		return AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+	case VK_FORMAT_R8G8B8_UNORM:
+		return AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM;
+	case VK_FORMAT_S8_UINT:
+		return AHARDWAREBUFFER_FORMAT_S8_UINT;
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		return AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420;
+	default:
+		UNSUPPORTED("AHardwareBufferExternalMemory::VkFormat %d", int(format));
+		return 0;
 	}
 }
 
@@ -171,14 +171,14 @@
 	{
 		switch(createInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+		case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
 			{
 				const auto *importInfo = reinterpret_cast<const VkImportAndroidHardwareBufferInfoANDROID *>(createInfo);
 				importAhb = true;
 				ahb = importInfo->buffer;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+		case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
 			{
 				const auto *exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(createInfo);
 
@@ -192,14 +192,14 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+		case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
 			{
 				const auto *dedicatedAllocateInfo = reinterpret_cast<const VkMemoryDedicatedAllocateInfo *>(createInfo);
 				dedicatedImageHandle = vk::Cast(dedicatedAllocateInfo->image);
 				dedicatedBufferHandle = vk::Cast(dedicatedAllocateInfo->buffer);
 			}
 			break;
-			default:
+		default:
 			{
 				LOG_TRAP("VkMemoryAllocateInfo->pNext sType = %s", vk::Stringify(createInfo->sType).c_str());
 			}
@@ -362,40 +362,40 @@
 {
 	switch(ahbFormat)
 	{
-		case AHARDWAREBUFFER_FORMAT_BLOB:
-			return VK_FORMAT_UNDEFINED;
-		case AHARDWAREBUFFER_FORMAT_D16_UNORM:
-			return VK_FORMAT_D16_UNORM;
-		case AHARDWAREBUFFER_FORMAT_D24_UNORM:
-			UNSUPPORTED("AHardwareBufferExternalMemory::AndroidHardwareBuffer_Format AHARDWAREBUFFER_FORMAT_D24_UNORM");
-			return VK_FORMAT_X8_D24_UNORM_PACK32;
-		case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT:
-			UNSUPPORTED("AHardwareBufferExternalMemory::AndroidHardwareBuffer_Format AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT");
-			return VK_FORMAT_X8_D24_UNORM_PACK32;
-		case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
-			return VK_FORMAT_D32_SFLOAT;
-		case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT:
-			return VK_FORMAT_D32_SFLOAT_S8_UINT;
-		case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
-			return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
-		case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
-			return VK_FORMAT_R16G16B16A16_SFLOAT;
-		case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
-			return VK_FORMAT_R5G6B5_UNORM_PACK16;
-		case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
-			return VK_FORMAT_R8G8B8A8_UNORM;
-		case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
-			return VK_FORMAT_R8G8B8A8_UNORM;
-		case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
-			return VK_FORMAT_R8G8B8_UNORM;
-		case AHARDWAREBUFFER_FORMAT_S8_UINT:
-			return VK_FORMAT_S8_UINT;
-		case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
-		case AHARDWAREBUFFER_FORMAT_YV12:
-			return VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
-		default:
-			UNSUPPORTED("AHardwareBufferExternalMemory::AHardwareBuffer_Format %d", int(ahbFormat));
-			return VK_FORMAT_UNDEFINED;
+	case AHARDWAREBUFFER_FORMAT_BLOB:
+		return VK_FORMAT_UNDEFINED;
+	case AHARDWAREBUFFER_FORMAT_D16_UNORM:
+		return VK_FORMAT_D16_UNORM;
+	case AHARDWAREBUFFER_FORMAT_D24_UNORM:
+		UNSUPPORTED("AHardwareBufferExternalMemory::AndroidHardwareBuffer_Format AHARDWAREBUFFER_FORMAT_D24_UNORM");
+		return VK_FORMAT_X8_D24_UNORM_PACK32;
+	case AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT:
+		UNSUPPORTED("AHardwareBufferExternalMemory::AndroidHardwareBuffer_Format AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT");
+		return VK_FORMAT_X8_D24_UNORM_PACK32;
+	case AHARDWAREBUFFER_FORMAT_D32_FLOAT:
+		return VK_FORMAT_D32_SFLOAT;
+	case AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT:
+		return VK_FORMAT_D32_SFLOAT_S8_UINT;
+	case AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM:
+		return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+	case AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT:
+		return VK_FORMAT_R16G16B16A16_SFLOAT;
+	case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
+		return VK_FORMAT_R5G6B5_UNORM_PACK16;
+	case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
+		return VK_FORMAT_R8G8B8A8_UNORM;
+	case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
+		return VK_FORMAT_R8G8B8A8_UNORM;
+	case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
+		return VK_FORMAT_R8G8B8_UNORM;
+	case AHARDWAREBUFFER_FORMAT_S8_UINT:
+		return VK_FORMAT_S8_UINT;
+	case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
+	case AHARDWAREBUFFER_FORMAT_YV12:
+		return VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
+	default:
+		UNSUPPORTED("AHardwareBufferExternalMemory::AHardwareBuffer_Format %d", int(ahbFormat));
+		return VK_FORMAT_UNDEFINED;
 	}
 }
 
@@ -483,23 +483,23 @@
 
 	switch(ahbDesc.format)
 	{
-		case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
-		case AHARDWAREBUFFER_FORMAT_YV12:
-			switch(aspect)
-			{
-				case VK_IMAGE_ASPECT_PLANE_0_BIT:
-					return static_cast<int>(ahbPlanes.planes[0].rowStride);
-				case VK_IMAGE_ASPECT_PLANE_1_BIT:
-					return static_cast<int>(ahbPlanes.planes[1].rowStride);
-				case VK_IMAGE_ASPECT_PLANE_2_BIT:
-					return static_cast<int>(ahbPlanes.planes[2].rowStride);
-				default:
-					UNSUPPORTED("Unsupported aspect %d for AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420", int(aspect));
-					return 0;
-			}
-			break;
+	case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
+	case AHARDWAREBUFFER_FORMAT_YV12:
+		switch(aspect)
+		{
+		case VK_IMAGE_ASPECT_PLANE_0_BIT:
+			return static_cast<int>(ahbPlanes.planes[0].rowStride);
+		case VK_IMAGE_ASPECT_PLANE_1_BIT:
+			return static_cast<int>(ahbPlanes.planes[1].rowStride);
+		case VK_IMAGE_ASPECT_PLANE_2_BIT:
+			return static_cast<int>(ahbPlanes.planes[2].rowStride);
 		default:
-			break;
+			UNSUPPORTED("Unsupported aspect %d for AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420", int(aspect));
+			return 0;
+		}
+		break;
+	default:
+		break;
 	}
 	return static_cast<int>(ahbPlanes.planes[0].rowStride);
 }
@@ -510,25 +510,25 @@
 
 	switch(ahbDesc.format)
 	{
-		case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
-		case AHARDWAREBUFFER_FORMAT_YV12:
-			switch(aspect)
-			{
-				case VK_IMAGE_ASPECT_PLANE_0_BIT:
-					return 0;
-				case VK_IMAGE_ASPECT_PLANE_1_BIT:
-					return reinterpret_cast<const char *>(ahbPlanes.planes[1].data) -
-					       reinterpret_cast<const char *>(ahbPlanes.planes[0].data);
-				case VK_IMAGE_ASPECT_PLANE_2_BIT:
-					return reinterpret_cast<const char *>(ahbPlanes.planes[2].data) -
-					       reinterpret_cast<const char *>(ahbPlanes.planes[0].data);
-				default:
-					UNSUPPORTED("Unsupported aspect %d for AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420", int(aspect));
-					return 0;
-			}
-			break;
+	case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
+	case AHARDWAREBUFFER_FORMAT_YV12:
+		switch(aspect)
+		{
+		case VK_IMAGE_ASPECT_PLANE_0_BIT:
+			return 0;
+		case VK_IMAGE_ASPECT_PLANE_1_BIT:
+			return reinterpret_cast<const char *>(ahbPlanes.planes[1].data) -
+			       reinterpret_cast<const char *>(ahbPlanes.planes[0].data);
+		case VK_IMAGE_ASPECT_PLANE_2_BIT:
+			return reinterpret_cast<const char *>(ahbPlanes.planes[2].data) -
+			       reinterpret_cast<const char *>(ahbPlanes.planes[0].data);
 		default:
-			break;
+			UNSUPPORTED("Unsupported aspect %d for AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420", int(aspect));
+			return 0;
+		}
+		break;
+	default:
+		break;
 	}
 	return 0;
 }
diff --git a/src/Vulkan/VkDeviceMemoryExternalFuchsia.hpp b/src/Vulkan/VkDeviceMemoryExternalFuchsia.hpp
index d1de8a2..5823397 100644
--- a/src/Vulkan/VkDeviceMemoryExternalFuchsia.hpp
+++ b/src/Vulkan/VkDeviceMemoryExternalFuchsia.hpp
@@ -43,7 +43,7 @@
 			{
 				switch(extInfo->sType)
 				{
-					case VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA:
+				case VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA:
 					{
 						const auto *importInfo = reinterpret_cast<const VkImportMemoryZirconHandleInfoFUCHSIA *>(extInfo);
 
@@ -53,9 +53,9 @@
 						}
 						importHandle = true;
 						handle = importInfo->handle;
-						break;
 					}
-					case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+					break;
+				case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
 					{
 						const auto *exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(extInfo);
 
@@ -64,16 +64,16 @@
 							UNSUPPORTED("exportInfo->handleTypes");
 						}
 						exportHandle = true;
-						break;
 					}
-					case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
-						// This can safely be ignored, as the Vulkan spec mentions:
-						// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
-						//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
-						break;
+					break;
+				case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+					// This can safely be ignored, as the Vulkan spec mentions:
+					// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
+					//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
+					break;
 
-					default:
-						WARN("VkMemoryAllocateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
+				default:
+					WARN("VkMemoryAllocateInfo->pNext sType = %s", vk::Stringify(extInfo->sType).c_str());
 				}
 				extInfo = extInfo->pNext;
 			}
diff --git a/src/Vulkan/VkFormat.cpp b/src/Vulkan/VkFormat.cpp
index a8c211b..bd412c0 100644
--- a/src/Vulkan/VkFormat.cpp
+++ b/src/Vulkan/VkFormat.cpp
@@ -23,28 +23,28 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-			return true;
-		default:
-			return false;
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -52,20 +52,20 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-			return true;
-		default:
-			return false;
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -73,28 +73,28 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R64_SINT:
-		case VK_FORMAT_R64G64_SINT:
-		case VK_FORMAT_R64G64B64_SINT:
-		case VK_FORMAT_R64G64B64A64_SINT:
-			return true;
-		default:
-			return false;
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R64_SINT:
+	case VK_FORMAT_R64G64_SINT:
+	case VK_FORMAT_R64G64B64_SINT:
+	case VK_FORMAT_R64G64B64A64_SINT:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -102,29 +102,29 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R64_UINT:
-		case VK_FORMAT_R64G64_UINT:
-		case VK_FORMAT_R64G64B64_UINT:
-		case VK_FORMAT_R64G64B64A64_UINT:
-		case VK_FORMAT_S8_UINT:
-			return true;
-		default:
-			return false;
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R64_UINT:
+	case VK_FORMAT_R64G64_UINT:
+	case VK_FORMAT_R64G64B64_UINT:
+	case VK_FORMAT_R64G64B64A64_UINT:
+	case VK_FORMAT_S8_UINT:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -145,15 +145,15 @@
 	// YCbCr formats
 	switch(format)
 	{
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-			aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT;
-			break;
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
-			break;
-		default:
-			ASSERT(!isYcbcrFormat());
-			break;
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT;
+		break;
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		aspects = VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
+		break;
+	default:
+		ASSERT(!isYcbcrFormat());
+		break;
 	}
 
 	// Anything else is "color".
@@ -165,88 +165,88 @@
 {
 	switch(aspect)
 	{
-		case VK_IMAGE_ASPECT_COLOR_BIT:
-		case(VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT):
-		case(VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT):
-		case(VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT):
-			return format;
+	case VK_IMAGE_ASPECT_COLOR_BIT:
+	case(VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT):
+	case(VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT):
+	case(VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT):
+		return format;
 
-		case VK_IMAGE_ASPECT_DEPTH_BIT:
-			switch(format)
-			{
-				case VK_FORMAT_D16_UNORM:
-				case VK_FORMAT_D16_UNORM_S8_UINT:
-					return VK_FORMAT_D16_UNORM;
-				case VK_FORMAT_D24_UNORM_S8_UINT:
-					return VK_FORMAT_X8_D24_UNORM_PACK32;
-				case VK_FORMAT_D32_SFLOAT:
-				case VK_FORMAT_D32_SFLOAT_S8_UINT:
-					return VK_FORMAT_D32_SFLOAT;
-				default:
-					UNSUPPORTED("format %d", int(format));
-					break;
-			}
-			break;
-
-		case VK_IMAGE_ASPECT_STENCIL_BIT:
-			switch(format)
-			{
-				case VK_FORMAT_S8_UINT:
-				case VK_FORMAT_D16_UNORM_S8_UINT:
-				case VK_FORMAT_D24_UNORM_S8_UINT:
-				case VK_FORMAT_D32_SFLOAT_S8_UINT:
-					return VK_FORMAT_S8_UINT;
-				default:
-					UNSUPPORTED("format %d", int(format));
-					break;
-			}
-			break;
-
-		// YCbCr formats
-		// Vulkan 1.1 section 32.1.1. Compatible formats of planes of multi-planar formats
-		case VK_IMAGE_ASPECT_PLANE_0_BIT:
-			switch(format)
-			{
-				case VK_FORMAT_R8_UNORM:
-				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-				case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-					return VK_FORMAT_R8_UNORM;
-				default:
-					UNSUPPORTED("format %d", int(format));
-					break;
-			}
-			break;
-
-		case VK_IMAGE_ASPECT_PLANE_1_BIT:
-			switch(format)
-			{
-				case VK_FORMAT_R8_UNORM:
-				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-					return VK_FORMAT_R8_UNORM;
-				case VK_FORMAT_R8G8_UNORM:
-				case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-					return VK_FORMAT_R8G8_UNORM;
-				default:
-					UNSUPPORTED("format %d", int(format));
-					break;
-			}
-			break;
-
-		case VK_IMAGE_ASPECT_PLANE_2_BIT:
-			switch(format)
-			{
-				case VK_FORMAT_R8_UNORM:
-				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-					return VK_FORMAT_R8_UNORM;
-				default:
-					UNSUPPORTED("format %d", int(format));
-					break;
-			}
-			break;
-
+	case VK_IMAGE_ASPECT_DEPTH_BIT:
+		switch(format)
+		{
+		case VK_FORMAT_D16_UNORM:
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+			return VK_FORMAT_D16_UNORM;
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+			return VK_FORMAT_X8_D24_UNORM_PACK32;
+		case VK_FORMAT_D32_SFLOAT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			return VK_FORMAT_D32_SFLOAT;
 		default:
-			UNSUPPORTED("aspect %x", int(aspect));
+			UNSUPPORTED("format %d", int(format));
 			break;
+		}
+		break;
+
+	case VK_IMAGE_ASPECT_STENCIL_BIT:
+		switch(format)
+		{
+		case VK_FORMAT_S8_UINT:
+		case VK_FORMAT_D16_UNORM_S8_UINT:
+		case VK_FORMAT_D24_UNORM_S8_UINT:
+		case VK_FORMAT_D32_SFLOAT_S8_UINT:
+			return VK_FORMAT_S8_UINT;
+		default:
+			UNSUPPORTED("format %d", int(format));
+			break;
+		}
+		break;
+
+	// YCbCr formats
+	// Vulkan 1.1 section 32.1.1. Compatible formats of planes of multi-planar formats
+	case VK_IMAGE_ASPECT_PLANE_0_BIT:
+		switch(format)
+		{
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			return VK_FORMAT_R8_UNORM;
+		default:
+			UNSUPPORTED("format %d", int(format));
+			break;
+		}
+		break;
+
+	case VK_IMAGE_ASPECT_PLANE_1_BIT:
+		switch(format)
+		{
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+			return VK_FORMAT_R8_UNORM;
+		case VK_FORMAT_R8G8_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			return VK_FORMAT_R8G8_UNORM;
+		default:
+			UNSUPPORTED("format %d", int(format));
+			break;
+		}
+		break;
+
+	case VK_IMAGE_ASPECT_PLANE_2_BIT:
+		switch(format)
+		{
+		case VK_FORMAT_R8_UNORM:
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+			return VK_FORMAT_R8_UNORM;
+		default:
+			UNSUPPORTED("format %d", int(format));
+			break;
+		}
+		break;
+
+	default:
+		UNSUPPORTED("aspect %x", int(aspect));
+		break;
 	}
 
 	return format;
@@ -256,16 +256,16 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_S8_UINT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			return true;
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-		case VK_FORMAT_D32_SFLOAT:
-		default:
-			return false;
+	case VK_FORMAT_D16_UNORM_S8_UINT:
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+	case VK_FORMAT_S8_UINT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		return true;
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+	case VK_FORMAT_D32_SFLOAT:
+	default:
+		return false;
 	}
 }
 
@@ -273,16 +273,16 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			return true;
-		case VK_FORMAT_S8_UINT:
-		default:
-			return false;
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_D16_UNORM_S8_UINT:
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		return true;
+	case VK_FORMAT_S8_UINT:
+	default:
+		return false;
 	}
 }
 
@@ -290,35 +290,35 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return true;
-		default:
-			return false;
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -326,128 +326,128 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_R8_SSCALED:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8G8_SSCALED:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_USCALED:
-		case VK_FORMAT_B8G8R8A8_SSCALED:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_USCALED:
-		case VK_FORMAT_R16_SSCALED:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_USCALED:
-		case VK_FORMAT_R16G16_SSCALED:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16_USCALED:
-		case VK_FORMAT_R16G16B16_SSCALED:
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R64_UINT:
-		case VK_FORMAT_R64_SINT:
-		case VK_FORMAT_R64G64_UINT:
-		case VK_FORMAT_R64G64_SINT:
-		case VK_FORMAT_R64G64B64_UINT:
-		case VK_FORMAT_R64G64B64_SINT:
-		case VK_FORMAT_R64G64B64A64_UINT:
-		case VK_FORMAT_R64G64B64A64_SINT:
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-		case VK_FORMAT_S8_UINT:
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			return false;
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R64_SFLOAT:
-		case VK_FORMAT_R64G64_SFLOAT:
-		case VK_FORMAT_R64G64B64_SFLOAT:
-		case VK_FORMAT_R64G64B64A64_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			return true;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_R8_SSCALED:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8G8_SSCALED:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_USCALED:
+	case VK_FORMAT_B8G8R8A8_SSCALED:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_USCALED:
+	case VK_FORMAT_R16_SSCALED:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_USCALED:
+	case VK_FORMAT_R16G16_SSCALED:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16_USCALED:
+	case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R64_UINT:
+	case VK_FORMAT_R64_SINT:
+	case VK_FORMAT_R64G64_UINT:
+	case VK_FORMAT_R64G64_SINT:
+	case VK_FORMAT_R64G64B64_UINT:
+	case VK_FORMAT_R64G64B64_SINT:
+	case VK_FORMAT_R64G64B64A64_UINT:
+	case VK_FORMAT_R64G64B64A64_SINT:
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+	case VK_FORMAT_S8_UINT:
+	case VK_FORMAT_D16_UNORM_S8_UINT:
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		return false;
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R64_SFLOAT:
+	case VK_FORMAT_R64G64_SFLOAT:
+	case VK_FORMAT_R64G64B64_SFLOAT:
+	case VK_FORMAT_R64G64B64A64_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		return true;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -457,11 +457,11 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			return true;
-		default:
-			return false;
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -469,63 +469,63 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return true;
-		default:
-			return false;
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -534,80 +534,80 @@
 	// Note: our ETC2 decoder decompresses the 64 bit RGB compressed texel data to B8G8R8
 	switch(format)
 	{
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-			return VK_FORMAT_B8G8R8A8_UNORM;
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-			return VK_FORMAT_B8G8R8A8_SRGB;
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-			return VK_FORMAT_B8G8R8A8_UNORM;
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-			return VK_FORMAT_B8G8R8A8_SRGB;
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-			return VK_FORMAT_R16_UNORM;
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-			return VK_FORMAT_R16_SNORM;
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-			return VK_FORMAT_R16G16_UNORM;
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-			return VK_FORMAT_R16G16_SNORM;
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-			return VK_FORMAT_B8G8R8A8_UNORM;
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-			return VK_FORMAT_B8G8R8A8_SRGB;
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-			return VK_FORMAT_R8_UNORM;
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-			return VK_FORMAT_R8_SNORM;
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-			return VK_FORMAT_R8G8_UNORM;
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-			return VK_FORMAT_R8G8_SNORM;
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-			return VK_FORMAT_R8G8B8A8_UNORM;
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return VK_FORMAT_R8G8B8A8_SRGB;
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-			return VK_FORMAT_R16G16B16A16_SFLOAT;
-		default:
-			UNSUPPORTED("format: %d", int(format));
-			return VK_FORMAT_UNDEFINED;
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		return VK_FORMAT_B8G8R8A8_UNORM;
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		return VK_FORMAT_B8G8R8A8_SRGB;
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		return VK_FORMAT_B8G8R8A8_UNORM;
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		return VK_FORMAT_B8G8R8A8_SRGB;
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		return VK_FORMAT_R16_UNORM;
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		return VK_FORMAT_R16_SNORM;
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		return VK_FORMAT_R16G16_UNORM;
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		return VK_FORMAT_R16G16_SNORM;
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+		return VK_FORMAT_B8G8R8A8_UNORM;
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+		return VK_FORMAT_B8G8R8A8_SRGB;
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+		return VK_FORMAT_R8_UNORM;
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+		return VK_FORMAT_R8_SNORM;
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+		return VK_FORMAT_R8G8_UNORM;
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+		return VK_FORMAT_R8G8_SNORM;
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		return VK_FORMAT_R8G8B8A8_UNORM;
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return VK_FORMAT_R8G8B8A8_SRGB;
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		return VK_FORMAT_R16G16B16A16_SFLOAT;
+	default:
+		UNSUPPORTED("format: %d", int(format));
+		return VK_FORMAT_UNDEFINED;
 	}
 }
 
@@ -625,284 +625,284 @@
 	// Return a single format per group of compatible formats, for quick comparison
 	switch(format)
 	{
-		// 8 - bit, Block size 1 byte, 1 texel / block
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_R8_SSCALED:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-			return VK_FORMAT_R8_UNORM;
+	// 8 - bit, Block size 1 byte, 1 texel / block
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_R8_SSCALED:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+		return VK_FORMAT_R8_UNORM;
 
-		// 16 - bit, Block size 2 bytes, 1 texel / block
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8G8_SSCALED:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_USCALED:
-		case VK_FORMAT_R16_SSCALED:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R10X6_UNORM_PACK16:
-		case VK_FORMAT_R12X4_UNORM_PACK16:
-			return VK_FORMAT_R16_UNORM;
+	// 16 - bit, Block size 2 bytes, 1 texel / block
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8G8_SSCALED:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_USCALED:
+	case VK_FORMAT_R16_SSCALED:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R10X6_UNORM_PACK16:
+	case VK_FORMAT_R12X4_UNORM_PACK16:
+		return VK_FORMAT_R16_UNORM;
 
-		// 32 - bit, Block size 4 bytes, 1 texel / block
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_USCALED:
-		case VK_FORMAT_B8G8R8A8_SSCALED:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_USCALED:
-		case VK_FORMAT_R16G16_SSCALED:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
-		case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:
-			return VK_FORMAT_R8G8B8A8_UNORM;
+	// 32 - bit, Block size 4 bytes, 1 texel / block
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_USCALED:
+	case VK_FORMAT_B8G8R8A8_SSCALED:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_USCALED:
+	case VK_FORMAT_R16G16_SSCALED:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
+	case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:
+		return VK_FORMAT_R8G8B8A8_UNORM;
 
-		// 48 - bit, Block size 6 bytes, 1 texel / block
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16_USCALED:
-		case VK_FORMAT_R16G16B16_SSCALED:
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16_SFLOAT:
-			return VK_FORMAT_R16G16B16_UNORM;
+	// 48 - bit, Block size 6 bytes, 1 texel / block
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16_USCALED:
+	case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16_SFLOAT:
+		return VK_FORMAT_R16G16B16_UNORM;
 
-		// 64 - bit, Block size 8 bytes, 1 texel / block
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R64_UINT:
-		case VK_FORMAT_R64_SINT:
-		case VK_FORMAT_R64_SFLOAT:
-			return VK_FORMAT_R16G16B16A16_UNORM;
+	// 64 - bit, Block size 8 bytes, 1 texel / block
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R64_UINT:
+	case VK_FORMAT_R64_SINT:
+	case VK_FORMAT_R64_SFLOAT:
+		return VK_FORMAT_R16G16B16A16_UNORM;
 
-		// 96 - bit, Block size 12 bytes, 1 texel / block
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-			return VK_FORMAT_R32G32B32_UINT;
+	// 96 - bit, Block size 12 bytes, 1 texel / block
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+		return VK_FORMAT_R32G32B32_UINT;
 
-		// 128 - bit, Block size 16 bytes, 1 texel / block
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R64G64_UINT:
-		case VK_FORMAT_R64G64_SINT:
-		case VK_FORMAT_R64G64_SFLOAT:
-			return VK_FORMAT_R32G32B32A32_UINT;
+	// 128 - bit, Block size 16 bytes, 1 texel / block
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R64G64_UINT:
+	case VK_FORMAT_R64G64_SINT:
+	case VK_FORMAT_R64G64_SFLOAT:
+		return VK_FORMAT_R32G32B32A32_UINT;
 
-		// 192 - bit, Block size 24 bytes, 1 texel / block
-		case VK_FORMAT_R64G64B64_UINT:
-		case VK_FORMAT_R64G64B64_SINT:
-		case VK_FORMAT_R64G64B64_SFLOAT:
-			return VK_FORMAT_R64G64B64_UINT;
+	// 192 - bit, Block size 24 bytes, 1 texel / block
+	case VK_FORMAT_R64G64B64_UINT:
+	case VK_FORMAT_R64G64B64_SINT:
+	case VK_FORMAT_R64G64B64_SFLOAT:
+		return VK_FORMAT_R64G64B64_UINT;
 
-		// 256 - bit, Block size 32 bytes, 1 texel / block
-		case VK_FORMAT_R64G64B64A64_UINT:
-		case VK_FORMAT_R64G64B64A64_SINT:
-		case VK_FORMAT_R64G64B64A64_SFLOAT:
-			return VK_FORMAT_R64G64B64A64_UINT;
+	// 256 - bit, Block size 32 bytes, 1 texel / block
+	case VK_FORMAT_R64G64B64A64_UINT:
+	case VK_FORMAT_R64G64B64A64_SINT:
+	case VK_FORMAT_R64G64B64A64_SFLOAT:
+		return VK_FORMAT_R64G64B64A64_UINT;
 
-		// BC1_RGB(64 bit), Block size 8 bytes, 16 texels / block
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-			return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
+	// BC1_RGB(64 bit), Block size 8 bytes, 16 texels / block
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
 
-		// BC1_RGBA(64 bit), Block size 8 bytes, 16 texels / block
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-			return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+	// BC1_RGBA(64 bit), Block size 8 bytes, 16 texels / block
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
 
-		// BC2(128 bit), Block size 16 bytes, 16 texels / block
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-			return VK_FORMAT_BC2_UNORM_BLOCK;
+	// BC2(128 bit), Block size 16 bytes, 16 texels / block
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+		return VK_FORMAT_BC2_UNORM_BLOCK;
 
-		// BC3(128 bit), Block size 16 bytes, 16 texels / block
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-			return VK_FORMAT_BC3_UNORM_BLOCK;
+	// BC3(128 bit), Block size 16 bytes, 16 texels / block
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+		return VK_FORMAT_BC3_UNORM_BLOCK;
 
-		// BC4(64 bit), Block size 8 bytes, 16 texels / block
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-			return VK_FORMAT_BC4_UNORM_BLOCK;
+	// BC4(64 bit), Block size 8 bytes, 16 texels / block
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+		return VK_FORMAT_BC4_UNORM_BLOCK;
 
-		// BC5(128 bit), Block size 16 bytes, 16 texels / block
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-			return VK_FORMAT_BC5_UNORM_BLOCK;
+	// BC5(128 bit), Block size 16 bytes, 16 texels / block
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+		return VK_FORMAT_BC5_UNORM_BLOCK;
 
-		// BC6H(128 bit), Block size 16 bytes, 16 texels / block
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-			return VK_FORMAT_BC6H_UFLOAT_BLOCK;
+	// BC6H(128 bit), Block size 16 bytes, 16 texels / block
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		return VK_FORMAT_BC6H_UFLOAT_BLOCK;
 
-		// BC7(128 bit), Block size 16 bytes, 16 texels / block
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-			return VK_FORMAT_BC7_UNORM_BLOCK;
+	// BC7(128 bit), Block size 16 bytes, 16 texels / block
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+		return VK_FORMAT_BC7_UNORM_BLOCK;
 
-		// ETC2_RGB(64 bit), Block size 8 bytes, 16 texels / block
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-			return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+	// ETC2_RGB(64 bit), Block size 8 bytes, 16 texels / block
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
 
-		// ETC2_RGBA(64 bit), Block size 8 bytes, 16 texels / block
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-			return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+	// ETC2_RGBA(64 bit), Block size 8 bytes, 16 texels / block
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
 
-		// ETC2_EAC_RGBA(64 bit), Block size 8 bytes, 16 texels / block
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-			return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+	// ETC2_EAC_RGBA(64 bit), Block size 8 bytes, 16 texels / block
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
 
-		// EAC_R(64 bit), Block size 8 bytes, 16 texels / block
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-			return VK_FORMAT_EAC_R11_UNORM_BLOCK;
+	// EAC_R(64 bit), Block size 8 bytes, 16 texels / block
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		return VK_FORMAT_EAC_R11_UNORM_BLOCK;
 
-		// EAC_RG(128 bit), Block size 16 bytes, 16 texels / block
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-			return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
+	// EAC_RG(128 bit), Block size 16 bytes, 16 texels / block
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
 
-		// ASTC_4x4(128 bit), Block size 16 bytes, 16 texels / block
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+	// ASTC_4x4(128 bit), Block size 16 bytes, 16 texels / block
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
 
-		// ASTC_5x4(128 bit), Block size 16 bytes, 20 texels / block
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+	// ASTC_5x4(128 bit), Block size 16 bytes, 20 texels / block
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
 
-		// ASTC_5x5(128 bit), Block size 16 bytes, 25 texels / block
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+	// ASTC_5x5(128 bit), Block size 16 bytes, 25 texels / block
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
 
-		// ASTC_6x5(128 bit), Block size 16 bytes, 30 texels / block
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+	// ASTC_6x5(128 bit), Block size 16 bytes, 30 texels / block
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
 
-		// ASTC_6x6(128 bit), Block size 16 bytes, 36 texels / block
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+	// ASTC_6x6(128 bit), Block size 16 bytes, 36 texels / block
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
 
-		// ASTC_8x5(128 bit), Block size 16 bytes, 40 texels / block
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+	// ASTC_8x5(128 bit), Block size 16 bytes, 40 texels / block
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
 
-		// ASTC_8x6(128 bit), Block size 16 bytes, 48 texels / block
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+	// ASTC_8x6(128 bit), Block size 16 bytes, 48 texels / block
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
 
-		// ASTC_8x8(128 bit), Block size 16 bytes, 64 texels / block
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+	// ASTC_8x8(128 bit), Block size 16 bytes, 64 texels / block
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
 
-		// ASTC_10x5(128 bit), Block size 16 bytes, 50 texels / block
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+	// ASTC_10x5(128 bit), Block size 16 bytes, 50 texels / block
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
 
-		// ASTC_10x6(128 bit), Block size 16 bytes, 60 texels / block
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+	// ASTC_10x6(128 bit), Block size 16 bytes, 60 texels / block
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
 
-		// ASTC_10x8(128 bit), Block size 16 bytes, 80 texels / block
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+	// ASTC_10x8(128 bit), Block size 16 bytes, 80 texels / block
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
 
-		// ASTC_10x10(128 bit), Block size 16 bytes, 100 texels / block
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+	// ASTC_10x10(128 bit), Block size 16 bytes, 100 texels / block
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
 
-		// ASTC_12x10(128 bit), Block size 16 bytes, 120 texels / block
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+	// ASTC_12x10(128 bit), Block size 16 bytes, 120 texels / block
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
 
-		// ASTC_12x12(128 bit), Block size 16 bytes, 144 texels / block
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+	// ASTC_12x12(128 bit), Block size 16 bytes, 144 texels / block
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
 
-		// All other formats are only compatible with themselves
-		default:
-			return format;
+	// All other formats are only compatible with themselves
+	default:
+		return format;
 	}
 }
 
@@ -915,68 +915,68 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-			return 4;
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-			return 5;
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-			return 6;
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-			return 8;
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-			return 10;
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return 12;
-		default:
-			return 1;
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		return 4;
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		return 5;
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		return 6;
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		return 8;
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		return 10;
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return 12;
+	default:
+		return 1;
 	}
 }
 
@@ -984,68 +984,68 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-			return 4;
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-			return 5;
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-			return 6;
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-			return 8;
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-			return 10;
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return 12;
-		default:
-			return 1;
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		return 4;
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		return 5;
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		return 6;
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		return 8;
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		return 10;
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return 12;
+	default:
+		return 1;
 	}
 }
 
@@ -1053,64 +1053,64 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-			return 8;
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return 16;
-		default:
-			return bytes();
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		return 8;
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return 16;
+	default:
+		return bytes();
 	}
 }
 
@@ -1118,184 +1118,184 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_R8_SSCALED:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_USCALED:
-		case VK_FORMAT_R16_SSCALED:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R64_UINT:
-		case VK_FORMAT_R64_SINT:
-		case VK_FORMAT_R64_SFLOAT:
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_S8_UINT:
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-			return 1;
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8G8_SSCALED:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_USCALED:
-		case VK_FORMAT_R16G16_SSCALED:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R64G64_UINT:
-		case VK_FORMAT_R64G64_SINT:
-		case VK_FORMAT_R64G64_SFLOAT:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-			return 2;
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16_USCALED:
-		case VK_FORMAT_R16G16B16_SSCALED:
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16_SFLOAT:
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-		case VK_FORMAT_R64G64B64_UINT:
-		case VK_FORMAT_R64G64B64_SINT:
-		case VK_FORMAT_R64G64B64_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-			return 3;
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_USCALED:
-		case VK_FORMAT_B8G8R8A8_SSCALED:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R64G64B64A64_UINT:
-		case VK_FORMAT_R64G64B64A64_SINT:
-		case VK_FORMAT_R64G64B64A64_SFLOAT:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return 4;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_R8_SSCALED:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_USCALED:
+	case VK_FORMAT_R16_SSCALED:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R64_UINT:
+	case VK_FORMAT_R64_SINT:
+	case VK_FORMAT_R64_SFLOAT:
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_S8_UINT:
+	case VK_FORMAT_D16_UNORM_S8_UINT:
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		return 1;
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8G8_SSCALED:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_USCALED:
+	case VK_FORMAT_R16G16_SSCALED:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R64G64_UINT:
+	case VK_FORMAT_R64G64_SINT:
+	case VK_FORMAT_R64G64_SFLOAT:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		return 2;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16_USCALED:
+	case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16_SFLOAT:
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+	case VK_FORMAT_R64G64B64_UINT:
+	case VK_FORMAT_R64G64B64_SINT:
+	case VK_FORMAT_R64G64B64_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		return 3;
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_USCALED:
+	case VK_FORMAT_B8G8R8A8_SSCALED:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R64G64B64A64_UINT:
+	case VK_FORMAT_R64G64B64A64_SINT:
+	case VK_FORMAT_R64G64B64A64_SFLOAT:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return 4;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return 1;
@@ -1305,188 +1305,188 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_UNDEFINED:
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_USCALED:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_USCALED:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_USCALED:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16_USCALED:
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R64_UINT:
-		case VK_FORMAT_R64G64_UINT:
-		case VK_FORMAT_R64G64B64_UINT:
-		case VK_FORMAT_R64G64B64A64_UINT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-		case VK_FORMAT_S8_UINT:
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return true;
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_SSCALED:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R64G64B64A64_SINT:
-		case VK_FORMAT_R64G64B64A64_SFLOAT:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		// YCbCr formats treated as signed because VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY
-		// expects chroma components to be in range [-0.5, 0.5]
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			return false;
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_R8_SSCALED:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_SSCALED:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R64_SINT:
-		case VK_FORMAT_R64_SFLOAT:
-			return component >= 1;
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_SSCALED:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_SSCALED:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R64G64_SINT:
-		case VK_FORMAT_R64G64_SFLOAT:
-			return component >= 2;
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16_SSCALED:
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16_SFLOAT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-		case VK_FORMAT_R64G64B64_SINT:
-		case VK_FORMAT_R64G64B64_SFLOAT:
-			return component >= 3;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_UNDEFINED:
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_USCALED:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_USCALED:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_USCALED:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16_USCALED:
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R64_UINT:
+	case VK_FORMAT_R64G64_UINT:
+	case VK_FORMAT_R64G64B64_UINT:
+	case VK_FORMAT_R64G64B64A64_UINT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+	case VK_FORMAT_S8_UINT:
+	case VK_FORMAT_D16_UNORM_S8_UINT:
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return true;
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_SSCALED:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R64G64B64A64_SINT:
+	case VK_FORMAT_R64G64B64A64_SFLOAT:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	// YCbCr formats treated as signed because VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY
+	// expects chroma components to be in range [-0.5, 0.5]
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		return false;
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_R8_SSCALED:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_SSCALED:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R64_SINT:
+	case VK_FORMAT_R64_SFLOAT:
+		return component >= 1;
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_SSCALED:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_SSCALED:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R64G64_SINT:
+	case VK_FORMAT_R64G64_SFLOAT:
+		return component >= 2;
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16_SFLOAT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+	case VK_FORMAT_R64G64B64_SINT:
+	case VK_FORMAT_R64G64B64_SFLOAT:
+		return component >= 3;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -1496,208 +1496,208 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_UNDEFINED:
-			return 0;
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-			return 1;
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			return 2;
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_R8_SSCALED:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-			return 1;
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8G8_SSCALED:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-			return 2;
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_USCALED:
-		case VK_FORMAT_B8G8R8A8_SSCALED:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-			return 4;
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_USCALED:
-		case VK_FORMAT_R16_SSCALED:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-			return 2;
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_USCALED:
-		case VK_FORMAT_R16G16_SSCALED:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-			return 4;
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16_USCALED:
-		case VK_FORMAT_R16G16B16_SSCALED:
-		case VK_FORMAT_R16G16B16_UINT:
-		case VK_FORMAT_R16G16B16_SINT:
-		case VK_FORMAT_R16G16B16_SFLOAT:
-			return 6;
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-			return 8;
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-			return 4;
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-			return 8;
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-			return 12;
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-			return 16;
-		case VK_FORMAT_R64_UINT:
-		case VK_FORMAT_R64_SINT:
-		case VK_FORMAT_R64_SFLOAT:
-			return 8;
-		case VK_FORMAT_R64G64_UINT:
-		case VK_FORMAT_R64G64_SINT:
-		case VK_FORMAT_R64G64_SFLOAT:
-			return 16;
-		case VK_FORMAT_R64G64B64_UINT:
-		case VK_FORMAT_R64G64B64_SINT:
-		case VK_FORMAT_R64G64B64_SFLOAT:
-			return 24;
-		case VK_FORMAT_R64G64B64A64_UINT:
-		case VK_FORMAT_R64G64B64A64_SINT:
-		case VK_FORMAT_R64G64B64A64_SFLOAT:
-			return 32;
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return 4;
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4;
-		case VK_FORMAT_D16_UNORM: return 2;
-		case VK_FORMAT_X8_D24_UNORM_PACK32: return 4;
-		case VK_FORMAT_D32_SFLOAT: return 4;
-		case VK_FORMAT_S8_UINT: return 1;
-		case VK_FORMAT_D16_UNORM_S8_UINT: return 2;  // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
-		case VK_FORMAT_D24_UNORM_S8_UINT: return 4;  // Combined depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			return 4;  // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
-		// Note: Compressed formats don't return bytes per pixel,
-		//       since these would be fractional. The returned value
-		//       is bytes per pixel for 1 column, so 2 for 64 bit 4x4
-		//       blocks and 4 for 128 bit 4x4 blocks.
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return 2;
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK: return 2;
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return 2;
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return 2;
-		case VK_FORMAT_BC2_UNORM_BLOCK: return 4;
-		case VK_FORMAT_BC2_SRGB_BLOCK: return 4;
-		case VK_FORMAT_BC3_UNORM_BLOCK: return 4;
-		case VK_FORMAT_BC3_SRGB_BLOCK: return 4;
-		case VK_FORMAT_BC4_UNORM_BLOCK: return 2;
-		case VK_FORMAT_BC4_SNORM_BLOCK: return 2;
-		case VK_FORMAT_BC5_UNORM_BLOCK: return 4;
-		case VK_FORMAT_BC5_SNORM_BLOCK: return 4;
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK: return 4;
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK: return 4;
-		case VK_FORMAT_BC7_UNORM_BLOCK: return 4;
-		case VK_FORMAT_BC7_SRGB_BLOCK: return 4;
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return 2;
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return 2;
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return 2;
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return 2;
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return 4;
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return 4;
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK: return 2;
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK: return 2;
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: return 4;
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: return 4;
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return 4;
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return 4;
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			UNSUPPORTED("format: %d", int(format));
-			return 0;
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			// TODO: ASSERT to ensure this is only called per-aspect?
-			return 1;  // Y plane only
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_UNDEFINED:
+		return 0;
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+		return 1;
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		return 2;
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_R8_SSCALED:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+		return 1;
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8G8_SSCALED:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+		return 2;
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_USCALED:
+	case VK_FORMAT_B8G8R8A8_SSCALED:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+		return 4;
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_USCALED:
+	case VK_FORMAT_R16_SSCALED:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+		return 2;
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_USCALED:
+	case VK_FORMAT_R16G16_SSCALED:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+		return 4;
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16_USCALED:
+	case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_R16G16B16_UINT:
+	case VK_FORMAT_R16G16B16_SINT:
+	case VK_FORMAT_R16G16B16_SFLOAT:
+		return 6;
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+		return 8;
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+		return 4;
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+		return 8;
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+		return 12;
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+		return 16;
+	case VK_FORMAT_R64_UINT:
+	case VK_FORMAT_R64_SINT:
+	case VK_FORMAT_R64_SFLOAT:
+		return 8;
+	case VK_FORMAT_R64G64_UINT:
+	case VK_FORMAT_R64G64_SINT:
+	case VK_FORMAT_R64G64_SFLOAT:
+		return 16;
+	case VK_FORMAT_R64G64B64_UINT:
+	case VK_FORMAT_R64G64B64_SINT:
+	case VK_FORMAT_R64G64B64_SFLOAT:
+		return 24;
+	case VK_FORMAT_R64G64B64A64_UINT:
+	case VK_FORMAT_R64G64B64A64_SINT:
+	case VK_FORMAT_R64G64B64A64_SFLOAT:
+		return 32;
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return 4;
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4;
+	case VK_FORMAT_D16_UNORM: return 2;
+	case VK_FORMAT_X8_D24_UNORM_PACK32: return 4;
+	case VK_FORMAT_D32_SFLOAT: return 4;
+	case VK_FORMAT_S8_UINT: return 1;
+	case VK_FORMAT_D16_UNORM_S8_UINT: return 2;  // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
+	case VK_FORMAT_D24_UNORM_S8_UINT: return 4;  // Combined depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		return 4;  // Separate depth and stencil planes  // TODO: ASSERT to ensure this is only called per-aspect?
+	// Note: Compressed formats don't return bytes per pixel,
+	//       since these would be fractional. The returned value
+	//       is bytes per pixel for 1 column, so 2 for 64 bit 4x4
+	//       blocks and 4 for 128 bit 4x4 blocks.
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return 2;
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK: return 2;
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return 2;
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return 2;
+	case VK_FORMAT_BC2_UNORM_BLOCK: return 4;
+	case VK_FORMAT_BC2_SRGB_BLOCK: return 4;
+	case VK_FORMAT_BC3_UNORM_BLOCK: return 4;
+	case VK_FORMAT_BC3_SRGB_BLOCK: return 4;
+	case VK_FORMAT_BC4_UNORM_BLOCK: return 2;
+	case VK_FORMAT_BC4_SNORM_BLOCK: return 2;
+	case VK_FORMAT_BC5_UNORM_BLOCK: return 4;
+	case VK_FORMAT_BC5_SNORM_BLOCK: return 4;
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK: return 4;
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK: return 4;
+	case VK_FORMAT_BC7_UNORM_BLOCK: return 4;
+	case VK_FORMAT_BC7_SRGB_BLOCK: return 4;
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return 2;
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return 2;
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return 2;
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return 2;
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return 4;
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return 4;
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK: return 2;
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK: return 2;
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: return 4;
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: return 4;
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return 4;
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return 4;
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		UNSUPPORTED("format: %d", int(format));
+		return 0;
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		// TODO: ASSERT to ensure this is only called per-aspect?
+		return 1;  // Y plane only
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return 0;
@@ -1715,72 +1715,72 @@
 
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-			return 8 * ((width + 3) / 4);  // 64 bit per 4x4 block, computed per 4 rows
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-			return 16 * ((width + 3) / 4);  // 128 bit per 4x4 block, computed per 4 rows
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-			return 16 * ((width + 4) / 5);
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-			return 16 * ((width + 5) / 6);
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-			return 16 * ((width + 7) / 8);
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-			return 16 * ((width + 9) / 10);
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return 16 * ((width + 11) / 12);
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			return sw::align<16>(width);  // Y plane only  // TODO: ASSERT to ensure this is only called per-aspect?
-		default:
-			return bytes() * width;
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		return 8 * ((width + 3) / 4);  // 64 bit per 4x4 block, computed per 4 rows
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		return 16 * ((width + 3) / 4);  // 128 bit per 4x4 block, computed per 4 rows
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		return 16 * ((width + 4) / 5);
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		return 16 * ((width + 5) / 6);
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		return 16 * ((width + 7) / 8);
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		return 16 * ((width + 9) / 10);
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return 16 * ((width + 11) / 12);
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		return sw::align<16>(width);  // Y plane only  // TODO: ASSERT to ensure this is only called per-aspect?
+	default:
+		return bytes() * width;
 	}
 }
 
@@ -1796,68 +1796,68 @@
 
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-			return pitchB(width, border, target) * ((height + 3) / 4);  // Pitch computed per 4 rows
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-			return pitchB(width, border, target) * ((height + 4) / 5);  // Pitch computed per 5 rows
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-			return pitchB(width, border, target) * ((height + 5) / 6);  // Pitch computed per 6 rows
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-			return pitchB(width, border, target) * ((height + 7) / 8);  // Pitch computed per 8 rows
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-			return pitchB(width, border, target) * ((height + 9) / 10);  // Pitch computed per 10 rows
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-			return pitchB(width, border, target) * ((height + 11) / 12);  // Pitch computed per 12 rows
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			// "Images in this format must be defined with a width and height that is a multiple of two."
-			return pitchB(width, border, target) * (height + height / 2);  // U and V planes are 1/4 size of Y plane.
-		default:
-			return pitchB(width, border, target) * height;  // Pitch computed per row
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		return pitchB(width, border, target) * ((height + 3) / 4);  // Pitch computed per 4 rows
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		return pitchB(width, border, target) * ((height + 4) / 5);  // Pitch computed per 5 rows
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		return pitchB(width, border, target) * ((height + 5) / 6);  // Pitch computed per 6 rows
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		return pitchB(width, border, target) * ((height + 7) / 8);  // Pitch computed per 8 rows
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		return pitchB(width, border, target) * ((height + 9) / 10);  // Pitch computed per 10 rows
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+		return pitchB(width, border, target) * ((height + 11) / 12);  // Pitch computed per 12 rows
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		// "Images in this format must be defined with a width and height that is a multiple of two."
+		return pitchB(width, border, target) * (height + height / 2);  // U and V planes are 1/4 size of Y plane.
+	default:
+		return pitchB(width, border, target) * height;  // Pitch computed per row
 	}
 }
 
@@ -1870,123 +1870,123 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R4G4_UNORM_PACK8:
-		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-			return sw::float4(0xF, 0xF, 0xF, 0xF);
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-			return sw::float4(0xFF, 0xFF, 0xFF, 0xFF);
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-			return sw::float4(0x7F, 0x7F, 0x7F, 0x7F);
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16B16_UNORM:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-			return sw::float4(0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16B16_SNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-			return sw::float4(0x7FFF, 0x7FFF, 0x7FFF, 0x7FFF);
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_R8_USCALED:
-		case VK_FORMAT_R8G8_USCALED:
-		case VK_FORMAT_R8G8B8A8_USCALED:
-		case VK_FORMAT_B8G8R8A8_USCALED:
-		case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
-		case VK_FORMAT_R8_SSCALED:
-		case VK_FORMAT_R8G8_SSCALED:
-		case VK_FORMAT_R8G8B8A8_SSCALED:
-		case VK_FORMAT_B8G8R8A8_SSCALED:
-		case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16_SSCALED:
-		case VK_FORMAT_R16G16_SSCALED:
-		case VK_FORMAT_R16G16B16_SSCALED:
-		case VK_FORMAT_R16G16B16A16_SSCALED:
-		case VK_FORMAT_R16_USCALED:
-		case VK_FORMAT_R16G16_USCALED:
-		case VK_FORMAT_R16G16B16_USCALED:
-		case VK_FORMAT_R16G16B16A16_USCALED:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R16G16B16_SFLOAT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-			return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
-		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
-		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			return sw::float4(0x1F, 0x1F, 0x1F, 0x01);
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_B5G6R5_UNORM_PACK16:
-			return sw::float4(0x1F, 0x3F, 0x1F, 1.0f);
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-			return sw::float4(0x3FF, 0x3FF, 0x3FF, 0x03);
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-			return sw::float4(0x1FF, 0x1FF, 0x1FF, 0x01);
-		case VK_FORMAT_D16_UNORM:
-			return sw::float4(0xFFFF, 0.0f, 0.0f, 0.0f);
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_X8_D24_UNORM_PACK32:
-			return sw::float4(0xFFFFFF, 0.0f, 0.0f, 0.0f);
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-		case VK_FORMAT_S8_UINT:
-			return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
-		default:
-			UNSUPPORTED("format %d", int(format));
-			break;
+	case VK_FORMAT_R4G4_UNORM_PACK8:
+	case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+		return sw::float4(0xF, 0xF, 0xF, 0xF);
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+		return sw::float4(0xFF, 0xFF, 0xFF, 0xFF);
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+		return sw::float4(0x7F, 0x7F, 0x7F, 0x7F);
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16B16_UNORM:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+		return sw::float4(0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16B16_SNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+		return sw::float4(0x7FFF, 0x7FFF, 0x7FFF, 0x7FFF);
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_R8_USCALED:
+	case VK_FORMAT_R8G8_USCALED:
+	case VK_FORMAT_R8G8B8A8_USCALED:
+	case VK_FORMAT_B8G8R8A8_USCALED:
+	case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
+	case VK_FORMAT_R8_SSCALED:
+	case VK_FORMAT_R8G8_SSCALED:
+	case VK_FORMAT_R8G8B8A8_SSCALED:
+	case VK_FORMAT_B8G8R8A8_SSCALED:
+	case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16_SSCALED:
+	case VK_FORMAT_R16G16_SSCALED:
+	case VK_FORMAT_R16G16B16_SSCALED:
+	case VK_FORMAT_R16G16B16A16_SSCALED:
+	case VK_FORMAT_R16_USCALED:
+	case VK_FORMAT_R16G16_USCALED:
+	case VK_FORMAT_R16G16B16_USCALED:
+	case VK_FORMAT_R16G16B16A16_USCALED:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R16G16B16_SFLOAT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+		return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
+	case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
+	case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		return sw::float4(0x1F, 0x1F, 0x1F, 0x01);
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_B5G6R5_UNORM_PACK16:
+		return sw::float4(0x1F, 0x3F, 0x1F, 1.0f);
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		return sw::float4(0x3FF, 0x3FF, 0x3FF, 0x03);
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+		return sw::float4(0x1FF, 0x1FF, 0x1FF, 0x01);
+	case VK_FORMAT_D16_UNORM:
+		return sw::float4(0xFFFF, 0.0f, 0.0f, 0.0f);
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+	case VK_FORMAT_X8_D24_UNORM_PACK32:
+		return sw::float4(0xFFFFFF, 0.0f, 0.0f, 0.0f);
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+	case VK_FORMAT_S8_UINT:
+		return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
+	default:
+		UNSUPPORTED("format %d", int(format));
+		break;
 	}
 
 	return sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
@@ -1996,30 +1996,30 @@
 {
 	switch(format)
 	{
-		// Vulkan 1.1 mandatory
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		// Optional
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			return true;
-		default:
-			return false;
+	// Vulkan 1.1 mandatory
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	// Optional
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		return true;
+	default:
+		return false;
 	}
 }
 
@@ -2032,24 +2032,24 @@
 
 	switch(format)
 	{
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-			return true;
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_D16_UNORM:
-			return false;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+		return true;
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_D16_UNORM:
+		return false;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2059,72 +2059,72 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_S8_UINT:
-			return true;
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_D16_UNORM:
-			return false;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_S8_UINT:
+		return true;
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_D16_UNORM:
+		return false;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2134,72 +2134,72 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_S8_UINT:
-			return false;
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_D16_UNORM:
-			return true;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_S8_UINT:
+		return false;
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_D16_UNORM:
+		return true;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2209,72 +2209,72 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_S8_UINT:
-			return false;
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-			return true;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_S8_UINT:
+		return false;
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+		return true;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return false;
@@ -2284,70 +2284,70 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-			return component < 1;
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-			return component < 2;
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SNORM:
-		case VK_FORMAT_B8G8R8A8_UINT:
-		case VK_FORMAT_B8G8R8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-			return component < 3;
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_S8_UINT:
-			return false;
-		default:
-			UNSUPPORTED("Format: %d", int(format));
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+		return component < 1;
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+		return component < 2;
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SNORM:
+	case VK_FORMAT_B8G8R8A8_UINT:
+	case VK_FORMAT_B8G8R8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+		return component < 3;
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_S8_UINT:
+		return false;
+	default:
+		UNSUPPORTED("Format: %d", int(format));
 	}
 
 	return false;
diff --git a/src/Vulkan/VkFramebuffer.cpp b/src/Vulkan/VkFramebuffer.cpp
index 4f6a088..132169b 100644
--- a/src/Vulkan/VkFramebuffer.cpp
+++ b/src/Vulkan/VkFramebuffer.cpp
@@ -33,12 +33,12 @@
 	{
 		switch(curInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
-				attachmentsCreateInfo = reinterpret_cast<const VkFramebufferAttachmentsCreateInfo *>(curInfo);
-				break;
-			default:
-				LOG_TRAP("pFramebufferCreateInfo->pNext->sType = %s", vk::Stringify(curInfo->sType).c_str());
-				break;
+		case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+			attachmentsCreateInfo = reinterpret_cast<const VkFramebufferAttachmentsCreateInfo *>(curInfo);
+			break;
+		default:
+			LOG_TRAP("pFramebufferCreateInfo->pNext->sType = %s", vk::Stringify(curInfo->sType).c_str());
+			break;
 		}
 		curInfo = curInfo->pNext;
 	}
@@ -204,12 +204,12 @@
 	{
 		switch(curInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
-				attachmentsInfo = reinterpret_cast<const VkFramebufferAttachmentsCreateInfo *>(curInfo);
-				break;
-			default:
-				LOG_TRAP("pFramebufferCreateInfo->pNext->sType = %s", vk::Stringify(curInfo->sType).c_str());
-				break;
+		case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+			attachmentsInfo = reinterpret_cast<const VkFramebufferAttachmentsCreateInfo *>(curInfo);
+			break;
+		default:
+			LOG_TRAP("pFramebufferCreateInfo->pNext->sType = %s", vk::Stringify(curInfo->sType).c_str());
+			break;
 		}
 
 		curInfo = curInfo->pNext;
diff --git a/src/Vulkan/VkImage.cpp b/src/Vulkan/VkImage.cpp
index 596741c..6814eef 100644
--- a/src/Vulkan/VkImage.cpp
+++ b/src/Vulkan/VkImage.cpp
@@ -37,26 +37,26 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-			return ETC_Decoder::ETC_R_UNSIGNED;
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-			return ETC_Decoder::ETC_R_SIGNED;
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-			return ETC_Decoder::ETC_RG_UNSIGNED;
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-			return ETC_Decoder::ETC_RG_SIGNED;
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-			return ETC_Decoder::ETC_RGB;
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-			return ETC_Decoder::ETC_RGB_PUNCHTHROUGH_ALPHA;
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-			return ETC_Decoder::ETC_RGBA;
-		default:
-			UNSUPPORTED("format: %d", int(format));
-			return ETC_Decoder::ETC_RGBA;
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		return ETC_Decoder::ETC_R_UNSIGNED;
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		return ETC_Decoder::ETC_R_SIGNED;
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		return ETC_Decoder::ETC_RG_UNSIGNED;
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		return ETC_Decoder::ETC_RG_SIGNED;
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		return ETC_Decoder::ETC_RGB;
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		return ETC_Decoder::ETC_RGB_PUNCHTHROUGH_ALPHA;
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+		return ETC_Decoder::ETC_RGBA;
+	default:
+		UNSUPPORTED("format: %d", int(format));
+		return ETC_Decoder::ETC_RGBA;
 	}
 }
 
@@ -64,32 +64,32 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-			return 1;
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-			return 2;
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-			return 3;
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-			return 4;
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-			return 5;
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-			return 6;
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-			return 7;
-		default:
-			UNSUPPORTED("format: %d", int(format));
-			return 0;
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		return 1;
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+		return 2;
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+		return 3;
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+		return 4;
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+		return 5;
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		return 6;
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+		return 7;
+	default:
+		UNSUPPORTED("format: %d", int(format));
+		return 0;
 	}
 }
 
@@ -100,27 +100,27 @@
 {
 	switch(format)
 	{
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-			return true;
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-			return false;
-		default:
-			UNSUPPORTED("format: %d", int(format));
-			return false;
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+		return true;
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+		return false;
+	default:
+		UNSUPPORTED("format: %d", int(format));
+		return false;
 	}
 }
 
@@ -132,7 +132,7 @@
 		switch(nextInfo->sType)
 		{
 #ifdef __ANDROID__
-			case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
+		case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
 			{
 				const VkExternalFormatANDROID *externalFormatAndroid = reinterpret_cast<const VkExternalFormatANDROID *>(nextInfo);
 
@@ -148,9 +148,9 @@
 			}
 			break;
 #endif
-			default:
-				LOG_TRAP("pCreateInfo->pNext->sType = %s", vk::Stringify(nextInfo->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pCreateInfo->pNext->sType = %s", vk::Stringify(nextInfo->sType).c_str());
+			break;
 		}
 
 		nextInfo = nextInfo->pNext;
@@ -519,16 +519,16 @@
 {
 	switch(region.imageSubresource.aspectMask)
 	{
-		case VK_IMAGE_ASPECT_COLOR_BIT:
-		case VK_IMAGE_ASPECT_DEPTH_BIT:
-		case VK_IMAGE_ASPECT_STENCIL_BIT:
-		case VK_IMAGE_ASPECT_PLANE_0_BIT:
-		case VK_IMAGE_ASPECT_PLANE_1_BIT:
-		case VK_IMAGE_ASPECT_PLANE_2_BIT:
-			break;
-		default:
-			UNSUPPORTED("aspectMask %x", int(region.imageSubresource.aspectMask));
-			break;
+	case VK_IMAGE_ASPECT_COLOR_BIT:
+	case VK_IMAGE_ASPECT_DEPTH_BIT:
+	case VK_IMAGE_ASPECT_STENCIL_BIT:
+	case VK_IMAGE_ASPECT_PLANE_0_BIT:
+	case VK_IMAGE_ASPECT_PLANE_1_BIT:
+	case VK_IMAGE_ASPECT_PLANE_2_BIT:
+		break;
+	default:
+		UNSUPPORTED("aspectMask %x", int(region.imageSubresource.aspectMask));
+		break;
 	}
 
 	auto aspect = static_cast<VkImageAspectFlagBits>(region.imageSubresource.aspectMask);
@@ -760,29 +760,29 @@
 
 	switch(aspect)
 	{
-		case VK_IMAGE_ASPECT_COLOR_BIT:
-		case VK_IMAGE_ASPECT_DEPTH_BIT:
-		case VK_IMAGE_ASPECT_STENCIL_BIT:
-		case VK_IMAGE_ASPECT_PLANE_0_BIT:  // Vulkan 1.1 Table 31. Plane Format Compatibility Table: plane 0 of all defined formats is full resolution.
-			break;
-		case VK_IMAGE_ASPECT_PLANE_1_BIT:
-		case VK_IMAGE_ASPECT_PLANE_2_BIT:
-			switch(format)
-			{
-				case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-				case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-					ASSERT(mipLevelExtent.width % 2 == 0 && mipLevelExtent.height % 2 == 0);  // Vulkan 1.1: "Images in this format must be defined with a width and height that is a multiple of two."
-					// Vulkan 1.1 Table 31. Plane Format Compatibility Table:
-					// Half-resolution U and V planes.
-					mipLevelExtent.width /= 2;
-					mipLevelExtent.height /= 2;
-					break;
-				default:
-					UNSUPPORTED("format %d", int(format));
-			}
+	case VK_IMAGE_ASPECT_COLOR_BIT:
+	case VK_IMAGE_ASPECT_DEPTH_BIT:
+	case VK_IMAGE_ASPECT_STENCIL_BIT:
+	case VK_IMAGE_ASPECT_PLANE_0_BIT:  // Vulkan 1.1 Table 31. Plane Format Compatibility Table: plane 0 of all defined formats is full resolution.
+		break;
+	case VK_IMAGE_ASPECT_PLANE_1_BIT:
+	case VK_IMAGE_ASPECT_PLANE_2_BIT:
+		switch(format)
+		{
+		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+			ASSERT(mipLevelExtent.width % 2 == 0 && mipLevelExtent.height % 2 == 0);  // Vulkan 1.1: "Images in this format must be defined with a width and height that is a multiple of two."
+			// Vulkan 1.1 Table 31. Plane Format Compatibility Table:
+			// Half-resolution U and V planes.
+			mipLevelExtent.width /= 2;
+			mipLevelExtent.height /= 2;
 			break;
 		default:
-			UNSUPPORTED("aspect %x", int(aspect));
+			UNSUPPORTED("format %d", int(format));
+		}
+		break;
+	default:
+		UNSUPPORTED("aspect %x", int(aspect));
 	}
 
 	return mipLevelExtent;
@@ -851,37 +851,37 @@
 
 	switch(format)
 	{
-		case VK_FORMAT_D16_UNORM_S8_UINT:
-		case VK_FORMAT_D24_UNORM_S8_UINT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			if(aspect == VK_IMAGE_ASPECT_STENCIL_BIT)
-			{
-				// Offset by depth buffer to get to stencil buffer
-				return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_DEPTH_BIT);
-			}
-			break;
+	case VK_FORMAT_D16_UNORM_S8_UINT:
+	case VK_FORMAT_D24_UNORM_S8_UINT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		if(aspect == VK_IMAGE_ASPECT_STENCIL_BIT)
+		{
+			// Offset by depth buffer to get to stencil buffer
+			return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_DEPTH_BIT);
+		}
+		break;
 
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-			if(aspect == VK_IMAGE_ASPECT_PLANE_2_BIT)
-			{
-				return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_1_BIT) + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
-			}
-			// Fall through to 2PLANE case:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			if(aspect == VK_IMAGE_ASPECT_PLANE_1_BIT)
-			{
-				return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
-			}
-			else
-			{
-				ASSERT(aspect == VK_IMAGE_ASPECT_PLANE_0_BIT);
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+		if(aspect == VK_IMAGE_ASPECT_PLANE_2_BIT)
+		{
+			return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_1_BIT) + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
+		}
+		// Fall through to 2PLANE case:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		if(aspect == VK_IMAGE_ASPECT_PLANE_1_BIT)
+		{
+			return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT);
+		}
+		else
+		{
+			ASSERT(aspect == VK_IMAGE_ASPECT_PLANE_0_BIT);
 
-				return memoryOffset;
-			}
-			break;
+			return memoryOffset;
+		}
+		break;
 
-		default:
-			break;
+	default:
+		break;
 	}
 
 	return memoryOffset;
@@ -1231,68 +1231,68 @@
 	{
 		switch(format)
 		{
-			case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-			case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-			case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-			case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
-			case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-			case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-			case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-			case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-			case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-			case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-				decodeETC2(subresource);
-				break;
-			case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-			case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-			case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-			case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-			case VK_FORMAT_BC2_UNORM_BLOCK:
-			case VK_FORMAT_BC2_SRGB_BLOCK:
-			case VK_FORMAT_BC3_UNORM_BLOCK:
-			case VK_FORMAT_BC3_SRGB_BLOCK:
-			case VK_FORMAT_BC4_UNORM_BLOCK:
-			case VK_FORMAT_BC4_SNORM_BLOCK:
-			case VK_FORMAT_BC5_UNORM_BLOCK:
-			case VK_FORMAT_BC5_SNORM_BLOCK:
-			case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-			case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-			case VK_FORMAT_BC7_UNORM_BLOCK:
-			case VK_FORMAT_BC7_SRGB_BLOCK:
-				decodeBC(subresource);
-				break;
-			case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-			case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-			case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
-				decodeASTC(subresource);
-				break;
-			default:
-				break;
+		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+			decodeETC2(subresource);
+			break;
+		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+		case VK_FORMAT_BC2_UNORM_BLOCK:
+		case VK_FORMAT_BC2_SRGB_BLOCK:
+		case VK_FORMAT_BC3_UNORM_BLOCK:
+		case VK_FORMAT_BC3_SRGB_BLOCK:
+		case VK_FORMAT_BC4_UNORM_BLOCK:
+		case VK_FORMAT_BC4_SNORM_BLOCK:
+		case VK_FORMAT_BC5_UNORM_BLOCK:
+		case VK_FORMAT_BC5_SNORM_BLOCK:
+		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+		case VK_FORMAT_BC7_UNORM_BLOCK:
+		case VK_FORMAT_BC7_SRGB_BLOCK:
+			decodeBC(subresource);
+			break;
+		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+			decodeASTC(subresource);
+			break;
+		default:
+			break;
 		}
 	}
 }
diff --git a/src/Vulkan/VkImageView.cpp b/src/Vulkan/VkImageView.cpp
index e629c77..d6756e9 100644
--- a/src/Vulkan/VkImageView.cpp
+++ b/src/Vulkan/VkImageView.cpp
@@ -122,34 +122,34 @@
 
 	switch(viewType)
 	{
-		case VK_IMAGE_VIEW_TYPE_1D:
-			return (imageType == VK_IMAGE_TYPE_1D) &&
-			       (subresourceRange.layerCount == 1);
-		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
-			return imageType == VK_IMAGE_TYPE_1D;
-		case VK_IMAGE_VIEW_TYPE_2D:
-			return ((imageType == VK_IMAGE_TYPE_2D) ||
-			        ((imageType == VK_IMAGE_TYPE_3D) &&
-			         (imageArrayLayers == 1))) &&
-			       (subresourceRange.layerCount == 1);
-		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
-			return (imageType == VK_IMAGE_TYPE_2D) ||
-			       ((imageType == VK_IMAGE_TYPE_3D) &&
-			        (imageArrayLayers == 1));
-		case VK_IMAGE_VIEW_TYPE_CUBE:
-			return image->isCube() &&
-			       (imageArrayLayers >= subresourceRange.layerCount) &&
-			       (subresourceRange.layerCount == 6);
-		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
-			return image->isCube() &&
-			       (imageArrayLayers >= subresourceRange.layerCount) &&
-			       (subresourceRange.layerCount >= 6);
-		case VK_IMAGE_VIEW_TYPE_3D:
-			return (imageType == VK_IMAGE_TYPE_3D) &&
-			       (imageArrayLayers == 1) &&
-			       (subresourceRange.layerCount == 1);
-		default:
-			UNREACHABLE("Unexpected viewType %d", (int)viewType);
+	case VK_IMAGE_VIEW_TYPE_1D:
+		return (imageType == VK_IMAGE_TYPE_1D) &&
+		       (subresourceRange.layerCount == 1);
+	case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+		return imageType == VK_IMAGE_TYPE_1D;
+	case VK_IMAGE_VIEW_TYPE_2D:
+		return ((imageType == VK_IMAGE_TYPE_2D) ||
+		        ((imageType == VK_IMAGE_TYPE_3D) &&
+		         (imageArrayLayers == 1))) &&
+		       (subresourceRange.layerCount == 1);
+	case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+		return (imageType == VK_IMAGE_TYPE_2D) ||
+		       ((imageType == VK_IMAGE_TYPE_3D) &&
+		        (imageArrayLayers == 1));
+	case VK_IMAGE_VIEW_TYPE_CUBE:
+		return image->isCube() &&
+		       (imageArrayLayers >= subresourceRange.layerCount) &&
+		       (subresourceRange.layerCount == 6);
+	case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+		return image->isCube() &&
+		       (imageArrayLayers >= subresourceRange.layerCount) &&
+		       (subresourceRange.layerCount >= 6);
+	case VK_IMAGE_VIEW_TYPE_3D:
+		return (imageType == VK_IMAGE_TYPE_3D) &&
+		       (imageArrayLayers == 1) &&
+		       (subresourceRange.layerCount == 1);
+	default:
+		UNREACHABLE("Unexpected viewType %d", (int)viewType);
 	}
 
 	return false;
@@ -277,13 +277,13 @@
 {
 	switch(usage)
 	{
-		case RAW:
-			return image;
-		case SAMPLING:
-			return image->getSampledImage(format);
-		default:
-			UNREACHABLE("usage %d", int(usage));
-			return nullptr;
+	case RAW:
+		return image;
+	case SAMPLING:
+		return image->getSampledImage(format);
+	default:
+		UNREACHABLE("usage %d", int(usage));
+		return nullptr;
 	}
 }
 
diff --git a/src/Vulkan/VkImageView.hpp b/src/Vulkan/VkImageView.hpp
index 5bf6b50..5af89e7 100644
--- a/src/Vulkan/VkImageView.hpp
+++ b/src/Vulkan/VkImageView.hpp
@@ -96,11 +96,11 @@
 	{
 		switch(image->getSampleCountFlagBits())
 		{
-			case VK_SAMPLE_COUNT_1_BIT: return 1;
-			case VK_SAMPLE_COUNT_4_BIT: return 4;
-			default:
-				UNSUPPORTED("Sample count flags %d", image->getSampleCountFlagBits());
-				return 1;
+		case VK_SAMPLE_COUNT_1_BIT: return 1;
+		case VK_SAMPLE_COUNT_4_BIT: return 4;
+		default:
+			UNSUPPORTED("Sample count flags %d", image->getSampleCountFlagBits());
+			return 1;
 		}
 	}
 
diff --git a/src/Vulkan/VkPhysicalDevice.cpp b/src/Vulkan/VkPhysicalDevice.cpp
index dc507f8..bd12029 100644
--- a/src/Vulkan/VkPhysicalDevice.cpp
+++ b/src/Vulkan/VkPhysicalDevice.cpp
@@ -328,86 +328,86 @@
 		// Need to switch on an integer since Provoking Vertex isn't a part of the Vulkan spec.
 		switch((int)curExtension->sType)
 		{
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
-				getPhysicalDeviceVulkan11Features(reinterpret_cast<VkPhysicalDeviceVulkan11Features *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
-				getPhysicalDeviceVulkan12Features(reinterpret_cast<VkPhysicalDeviceVulkan12Features *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
-				getPhysicalDeviceMultiviewFeatures(reinterpret_cast<VkPhysicalDeviceMultiviewFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
-				getPhysicalDeviceVariablePointersFeatures(reinterpret_cast<VkPhysicalDeviceVariablePointersFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
-				getPhysicalDevice16BitStorageFeatures(reinterpret_cast<VkPhysicalDevice16BitStorageFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
-				getPhysicalDeviceSamplerYcbcrConversionFeatures(reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
-				getPhysicalDeviceProtectedMemoryFeatures(reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
-				getPhysicalDeviceShaderDrawParameterFeatures(reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
-				getPhysicalDeviceHostQueryResetFeatures(reinterpret_cast<VkPhysicalDeviceHostQueryResetFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
-				getPhysicalDeviceImageRobustnessFeaturesEXT(reinterpret_cast<VkPhysicalDeviceImageRobustnessFeaturesEXT *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
-				getPhysicalDeviceLineRasterizationFeaturesEXT(reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
-				getPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(reinterpret_cast<VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
-				getPhysicalDevice8BitStorageFeaturesKHR(reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
-				getPhysicalDeviceProvokingVertexFeaturesEXT(reinterpret_cast<VkPhysicalDeviceProvokingVertexFeaturesEXT *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
-				getPhysicalDeviceImagelessFramebufferFeatures(reinterpret_cast<VkPhysicalDeviceImagelessFramebufferFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR:
-				getPhysicalDeviceShaderSubgroupExtendedTypesFeatures(reinterpret_cast<VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
-				getPhysicalDeviceScalarBlockLayoutFeatures(reinterpret_cast<VkPhysicalDeviceScalarBlockLayoutFeatures *>(curExtension));
-				break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+			getPhysicalDeviceVulkan11Features(reinterpret_cast<VkPhysicalDeviceVulkan11Features *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+			getPhysicalDeviceVulkan12Features(reinterpret_cast<VkPhysicalDeviceVulkan12Features *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+			getPhysicalDeviceMultiviewFeatures(reinterpret_cast<VkPhysicalDeviceMultiviewFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
+			getPhysicalDeviceVariablePointersFeatures(reinterpret_cast<VkPhysicalDeviceVariablePointersFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+			getPhysicalDevice16BitStorageFeatures(reinterpret_cast<VkPhysicalDevice16BitStorageFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+			getPhysicalDeviceSamplerYcbcrConversionFeatures(reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
+			getPhysicalDeviceProtectedMemoryFeatures(reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
+			getPhysicalDeviceShaderDrawParameterFeatures(reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+			getPhysicalDeviceHostQueryResetFeatures(reinterpret_cast<VkPhysicalDeviceHostQueryResetFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+			getPhysicalDeviceImageRobustnessFeaturesEXT(reinterpret_cast<VkPhysicalDeviceImageRobustnessFeaturesEXT *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+			getPhysicalDeviceLineRasterizationFeaturesEXT(reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+			getPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR(reinterpret_cast<VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+			getPhysicalDevice8BitStorageFeaturesKHR(reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+			getPhysicalDeviceProvokingVertexFeaturesEXT(reinterpret_cast<VkPhysicalDeviceProvokingVertexFeaturesEXT *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+			getPhysicalDeviceImagelessFramebufferFeatures(reinterpret_cast<VkPhysicalDeviceImagelessFramebufferFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR:
+			getPhysicalDeviceShaderSubgroupExtendedTypesFeatures(reinterpret_cast<VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+			getPhysicalDeviceScalarBlockLayoutFeatures(reinterpret_cast<VkPhysicalDeviceScalarBlockLayoutFeatures *>(curExtension));
+			break;
 #ifdef SWIFTSHADER_DEVICE_MEMORY_REPORT
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
-				getPhysicalDeviceDeviceMemoryReportFeaturesEXT(reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT *>(curExtension));
-				break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+			getPhysicalDeviceDeviceMemoryReportFeaturesEXT(reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT *>(curExtension));
+			break;
 #endif  // SWIFTSHADER_DEVICE_MEMORY_REPORT
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
-				getPhysicalDeviceUniformBufferStandardLayoutFeatures(reinterpret_cast<VkPhysicalDeviceUniformBufferStandardLayoutFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
-				getPhysicalDeviceVulkanMemoryModelFeatures(reinterpret_cast<VkPhysicalDeviceVulkanMemoryModelFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
-				getPhysicalDeviceTimelineSemaphoreFeatures(reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
-				getPhysicalDeviceShaderAtomicInt64Features(reinterpret_cast<VkPhysicalDeviceShaderAtomicInt64Features *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
-				getPhysicalDeviceShaderFloat16Int8Features(reinterpret_cast<VkPhysicalDeviceShaderFloat16Int8Features *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
-				getPhysicalDeviceBufferDeviceAddressFeatures(reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeatures *>(curExtension));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
-				getPhysicalDeviceDescriptorIndexingFeatures(reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeatures *>(curExtension));
-				break;
-			default:
-				LOG_TRAP("curExtension->pNext->sType = %s", vk::Stringify(curExtension->sType).c_str());
-				break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+			getPhysicalDeviceUniformBufferStandardLayoutFeatures(reinterpret_cast<VkPhysicalDeviceUniformBufferStandardLayoutFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+			getPhysicalDeviceVulkanMemoryModelFeatures(reinterpret_cast<VkPhysicalDeviceVulkanMemoryModelFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+			getPhysicalDeviceTimelineSemaphoreFeatures(reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+			getPhysicalDeviceShaderAtomicInt64Features(reinterpret_cast<VkPhysicalDeviceShaderAtomicInt64Features *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+			getPhysicalDeviceShaderFloat16Int8Features(reinterpret_cast<VkPhysicalDeviceShaderFloat16Int8Features *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+			getPhysicalDeviceBufferDeviceAddressFeatures(reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeatures *>(curExtension));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+			getPhysicalDeviceDescriptorIndexingFeatures(reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeatures *>(curExtension));
+			break;
+		default:
+			LOG_TRAP("curExtension->pNext->sType = %s", vk::Stringify(curExtension->sType).c_str());
+			break;
 		}
 		curExtension = reinterpret_cast<VkBaseOutStructure *>(curExtension->pNext);
 	}
@@ -822,7 +822,7 @@
 	{
 		switch(nextInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
 			{
 				const auto *tlsInfo = reinterpret_cast<const VkSemaphoreTypeCreateInfo *>(nextInfo);
 				// Timeline Semaphore does not support external semaphore
@@ -835,9 +835,9 @@
 				}
 			}
 			break;
-			default:
-				WARN("nextInfo->sType = %s", vk::Stringify(nextInfo->sType).c_str());
-				break;
+		default:
+			WARN("nextInfo->sType = %s", vk::Stringify(nextInfo->sType).c_str());
+			break;
 		}
 	}
 
@@ -1039,265 +1039,265 @@
 
 	switch(format)
 	{
-		// Formats which can be sampled *and* filtered
-		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SRGB:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SRGB:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
-		case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
-		case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
-		case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
-		case VK_FORMAT_BC2_UNORM_BLOCK:
-		case VK_FORMAT_BC2_SRGB_BLOCK:
-		case VK_FORMAT_BC3_UNORM_BLOCK:
-		case VK_FORMAT_BC3_SRGB_BLOCK:
-		case VK_FORMAT_BC4_UNORM_BLOCK:
-		case VK_FORMAT_BC4_SNORM_BLOCK:
-		case VK_FORMAT_BC5_UNORM_BLOCK:
-		case VK_FORMAT_BC5_SNORM_BLOCK:
-		case VK_FORMAT_BC6H_UFLOAT_BLOCK:
-		case VK_FORMAT_BC6H_SFLOAT_BLOCK:
-		case VK_FORMAT_BC7_UNORM_BLOCK:
-		case VK_FORMAT_BC7_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
-		case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
-		case VK_FORMAT_EAC_R11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11_SNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
-		case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
+	// Formats which can be sampled *and* filtered
+	case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SRGB:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SRGB:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
+	case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
+	case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
+	case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
+	case VK_FORMAT_BC2_UNORM_BLOCK:
+	case VK_FORMAT_BC2_SRGB_BLOCK:
+	case VK_FORMAT_BC3_UNORM_BLOCK:
+	case VK_FORMAT_BC3_SRGB_BLOCK:
+	case VK_FORMAT_BC4_UNORM_BLOCK:
+	case VK_FORMAT_BC4_SNORM_BLOCK:
+	case VK_FORMAT_BC5_UNORM_BLOCK:
+	case VK_FORMAT_BC5_SNORM_BLOCK:
+	case VK_FORMAT_BC6H_UFLOAT_BLOCK:
+	case VK_FORMAT_BC6H_SFLOAT_BLOCK:
+	case VK_FORMAT_BC7_UNORM_BLOCK:
+	case VK_FORMAT_BC7_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
+	case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
+	case VK_FORMAT_EAC_R11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11_SNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
+	case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
 #ifdef SWIFTSHADER_ENABLE_ASTC
-		case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
-		case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
-		case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
+	case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
+	case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
 #endif
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_D32_SFLOAT:
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:
-			pFormatProperties->optimalTilingFeatures |=
-			    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
-			// [[fallthrough]]
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_D32_SFLOAT:
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:
+		pFormatProperties->optimalTilingFeatures |=
+		    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+		// [[fallthrough]]
 
-		// Formats which can be sampled, but don't support filtering
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_S8_UINT:
-			pFormatProperties->optimalTilingFeatures |=
-			    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
-			    VK_FORMAT_FEATURE_BLIT_SRC_BIT |
-			    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
-			    VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
-			break;
+	// Formats which can be sampled, but don't support filtering
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_S8_UINT:
+		pFormatProperties->optimalTilingFeatures |=
+		    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+		    VK_FORMAT_FEATURE_BLIT_SRC_BIT |
+		    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
+		    VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
+		break;
 
-		// YCbCr formats:
-		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-			pFormatProperties->optimalTilingFeatures |=
-			    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
-			    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT |
-			    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
-			    VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
-			    VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT;
-			break;
-		default:
-			break;
+	// YCbCr formats:
+	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+		pFormatProperties->optimalTilingFeatures |=
+		    VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
+		    VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT |
+		    VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
+		    VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
+		    VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT;
+		break;
+	default:
+		break;
 	}
 
 	switch(format)
 	{
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-			pFormatProperties->optimalTilingFeatures |=
-			    VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
-			pFormatProperties->bufferFeatures |=
-			    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
-			// [[fallthrough]]
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		// shaderStorageImageExtendedFormats
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R8_UINT:
-			pFormatProperties->optimalTilingFeatures |=
-			    VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
-			// [[fallthrough]]
-			pFormatProperties->bufferFeatures |=
-			    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
-			break;
-		default:
-			break;
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+		pFormatProperties->optimalTilingFeatures |=
+		    VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
+		pFormatProperties->bufferFeatures |=
+		    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT;
+		// [[fallthrough]]
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	// shaderStorageImageExtendedFormats
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R8_UINT:
+		pFormatProperties->optimalTilingFeatures |=
+		    VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
+		// [[fallthrough]]
+		pFormatProperties->bufferFeatures |=
+		    VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT;
+		break;
+	default:
+		break;
 	}
 
 	switch(format)
 	{
-		case VK_FORMAT_R5G6B5_UNORM_PACK16:
-		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SRGB:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_B8G8R8A8_SRGB:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-			pFormatProperties->optimalTilingFeatures |=
-			    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
-			    VK_FORMAT_FEATURE_BLIT_DST_BIT;
-			break;
-		case VK_FORMAT_S8_UINT:
-		case VK_FORMAT_D16_UNORM:
-		case VK_FORMAT_D32_SFLOAT:          // Note: either VK_FORMAT_D32_SFLOAT or VK_FORMAT_X8_D24_UNORM_PACK32 must be supported
-		case VK_FORMAT_D32_SFLOAT_S8_UINT:  // Note: either VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT must be supported
-			pFormatProperties->optimalTilingFeatures |=
-			    VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
-			break;
-		default:
-			break;
+	case VK_FORMAT_R5G6B5_UNORM_PACK16:
+	case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SRGB:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_B8G8R8A8_SRGB:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+		pFormatProperties->optimalTilingFeatures |=
+		    VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+		    VK_FORMAT_FEATURE_BLIT_DST_BIT;
+		break;
+	case VK_FORMAT_S8_UINT:
+	case VK_FORMAT_D16_UNORM:
+	case VK_FORMAT_D32_SFLOAT:          // Note: either VK_FORMAT_D32_SFLOAT or VK_FORMAT_X8_D24_UNORM_PACK32 must be supported
+	case VK_FORMAT_D32_SFLOAT_S8_UINT:  // Note: either VK_FORMAT_D24_UNORM_S8_UINT or VK_FORMAT_D32_SFLOAT_S8_UINT must be supported
+		pFormatProperties->optimalTilingFeatures |=
+		    VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+		break;
+	default:
+		break;
 	}
 
 	if(format.supportsColorAttachmentBlend())
@@ -1308,114 +1308,114 @@
 
 	switch(format)
 	{
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_SINT_PACK32:
-		case VK_FORMAT_R16_UNORM:
-		case VK_FORMAT_R16_SNORM:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_UNORM:
-		case VK_FORMAT_R16G16_SNORM:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_UNORM:
-		case VK_FORMAT_R16G16B16A16_SNORM:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32_UINT:
-		case VK_FORMAT_R32G32B32_SINT:
-		case VK_FORMAT_R32G32B32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-			pFormatProperties->bufferFeatures |=
-			    VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
-			break;
-		default:
-			break;
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+	case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+	case VK_FORMAT_R16_UNORM:
+	case VK_FORMAT_R16_SNORM:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_UNORM:
+	case VK_FORMAT_R16G16_SNORM:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_UNORM:
+	case VK_FORMAT_R16G16B16A16_SNORM:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32_UINT:
+	case VK_FORMAT_R32G32B32_SINT:
+	case VK_FORMAT_R32G32B32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+		pFormatProperties->bufferFeatures |=
+		    VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
+		break;
+	default:
+		break;
 	}
 
 	switch(format)
 	{
-		// Vulkan 1.1 mandatory
-		case VK_FORMAT_R8_UNORM:
-		case VK_FORMAT_R8_SNORM:
-		case VK_FORMAT_R8_UINT:
-		case VK_FORMAT_R8_SINT:
-		case VK_FORMAT_R8G8_UNORM:
-		case VK_FORMAT_R8G8_SNORM:
-		case VK_FORMAT_R8G8_UINT:
-		case VK_FORMAT_R8G8_SINT:
-		case VK_FORMAT_R8G8B8A8_UNORM:
-		case VK_FORMAT_R8G8B8A8_SNORM:
-		case VK_FORMAT_R8G8B8A8_UINT:
-		case VK_FORMAT_R8G8B8A8_SINT:
-		case VK_FORMAT_B8G8R8A8_UNORM:
-		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
-		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
-		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
-		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
-		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
-		case VK_FORMAT_R16_UINT:
-		case VK_FORMAT_R16_SINT:
-		case VK_FORMAT_R16_SFLOAT:
-		case VK_FORMAT_R16G16_UINT:
-		case VK_FORMAT_R16G16_SINT:
-		case VK_FORMAT_R16G16_SFLOAT:
-		case VK_FORMAT_R16G16B16A16_UINT:
-		case VK_FORMAT_R16G16B16A16_SINT:
-		case VK_FORMAT_R16G16B16A16_SFLOAT:
-		case VK_FORMAT_R32_UINT:
-		case VK_FORMAT_R32_SINT:
-		case VK_FORMAT_R32_SFLOAT:
-		case VK_FORMAT_R32G32_UINT:
-		case VK_FORMAT_R32G32_SINT:
-		case VK_FORMAT_R32G32_SFLOAT:
-		case VK_FORMAT_R32G32B32A32_UINT:
-		case VK_FORMAT_R32G32B32A32_SINT:
-		case VK_FORMAT_R32G32B32A32_SFLOAT:
-		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
-		// Optional
-		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
-		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
-			pFormatProperties->bufferFeatures |=
-			    VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
-			break;
-		default:
-			break;
+	// Vulkan 1.1 mandatory
+	case VK_FORMAT_R8_UNORM:
+	case VK_FORMAT_R8_SNORM:
+	case VK_FORMAT_R8_UINT:
+	case VK_FORMAT_R8_SINT:
+	case VK_FORMAT_R8G8_UNORM:
+	case VK_FORMAT_R8G8_SNORM:
+	case VK_FORMAT_R8G8_UINT:
+	case VK_FORMAT_R8G8_SINT:
+	case VK_FORMAT_R8G8B8A8_UNORM:
+	case VK_FORMAT_R8G8B8A8_SNORM:
+	case VK_FORMAT_R8G8B8A8_UINT:
+	case VK_FORMAT_R8G8B8A8_SINT:
+	case VK_FORMAT_B8G8R8A8_UNORM:
+	case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+	case VK_FORMAT_A8B8G8R8_UINT_PACK32:
+	case VK_FORMAT_A8B8G8R8_SINT_PACK32:
+	case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+	case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+	case VK_FORMAT_R16_UINT:
+	case VK_FORMAT_R16_SINT:
+	case VK_FORMAT_R16_SFLOAT:
+	case VK_FORMAT_R16G16_UINT:
+	case VK_FORMAT_R16G16_SINT:
+	case VK_FORMAT_R16G16_SFLOAT:
+	case VK_FORMAT_R16G16B16A16_UINT:
+	case VK_FORMAT_R16G16B16A16_SINT:
+	case VK_FORMAT_R16G16B16A16_SFLOAT:
+	case VK_FORMAT_R32_UINT:
+	case VK_FORMAT_R32_SINT:
+	case VK_FORMAT_R32_SFLOAT:
+	case VK_FORMAT_R32G32_UINT:
+	case VK_FORMAT_R32G32_SINT:
+	case VK_FORMAT_R32G32_SFLOAT:
+	case VK_FORMAT_R32G32B32A32_UINT:
+	case VK_FORMAT_R32G32B32A32_SINT:
+	case VK_FORMAT_R32G32B32A32_SFLOAT:
+	case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
+	// Optional
+	case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+	case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+		pFormatProperties->bufferFeatures |=
+		    VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
+		break;
+	default:
+		break;
 	}
 
 	if(pFormatProperties->optimalTilingFeatures)
@@ -1447,44 +1447,44 @@
 
 	switch(type)
 	{
-		case VK_IMAGE_TYPE_1D:
-			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_1D;
-			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_1D - 1);
-			pImageFormatProperties->maxExtent.height = 1;
-			break;
-		case VK_IMAGE_TYPE_2D:
-			if(flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
-			{
-				pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_CUBE;
-				pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
-				pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
-			}
-			else
-			{
-				pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_2D;
-				pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
-				pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
+	case VK_IMAGE_TYPE_1D:
+		pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_1D;
+		pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_1D - 1);
+		pImageFormatProperties->maxExtent.height = 1;
+		break;
+	case VK_IMAGE_TYPE_2D:
+		if(flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
+		{
+			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_CUBE;
+			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
+			pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_CUBE - 1);
+		}
+		else
+		{
+			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_2D;
+			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
+			pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_2D - 1);
 
-				VkFormatProperties props;
-				GetFormatProperties(format, &props);
-				auto features = tiling == VK_IMAGE_TILING_LINEAR ? props.linearTilingFeatures : props.optimalTilingFeatures;
-				if(features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
-				{
-					// Only renderable formats make sense for multisample
-					pImageFormatProperties->sampleCounts = getSampleCounts();
-				}
+			VkFormatProperties props;
+			GetFormatProperties(format, &props);
+			auto features = tiling == VK_IMAGE_TILING_LINEAR ? props.linearTilingFeatures : props.optimalTilingFeatures;
+			if(features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+			{
+				// Only renderable formats make sense for multisample
+				pImageFormatProperties->sampleCounts = getSampleCounts();
 			}
-			break;
-		case VK_IMAGE_TYPE_3D:
-			pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_3D;
-			pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
-			pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
-			pImageFormatProperties->maxExtent.depth = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
-			pImageFormatProperties->maxArrayLayers = 1;  // no 3D + layers
-			break;
-		default:
-			UNREACHABLE("VkImageType: %d", int(type));
-			break;
+		}
+		break;
+	case VK_IMAGE_TYPE_3D:
+		pImageFormatProperties->maxMipLevels = vk::MAX_IMAGE_LEVELS_3D;
+		pImageFormatProperties->maxExtent.width = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
+		pImageFormatProperties->maxExtent.height = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
+		pImageFormatProperties->maxExtent.depth = 1 << (vk::MAX_IMAGE_LEVELS_3D - 1);
+		pImageFormatProperties->maxArrayLayers = 1;  // no 3D + layers
+		break;
+	default:
+		UNREACHABLE("VkImageType: %d", int(type));
+		break;
 	}
 
 	pImageFormatProperties->maxResourceSize = 1u << 31;  // Minimum value for maxResourceSize
diff --git a/src/Vulkan/VkPipeline.cpp b/src/Vulkan/VkPipeline.cpp
index 3c65496..3901672 100644
--- a/src/Vulkan/VkPipeline.cpp
+++ b/src/Vulkan/VkPipeline.cpp
@@ -43,13 +43,13 @@
 	opt.SetMessageConsumer([](spv_message_level_t level, const char *source, const spv_position_t &position, const char *message) {
 		switch(level)
 		{
-			case SPV_MSG_FATAL: sw::warn("SPIR-V FATAL: %d:%d %s\n", int(position.line), int(position.column), message);
-			case SPV_MSG_INTERNAL_ERROR: sw::warn("SPIR-V INTERNAL_ERROR: %d:%d %s\n", int(position.line), int(position.column), message);
-			case SPV_MSG_ERROR: sw::warn("SPIR-V ERROR: %d:%d %s\n", int(position.line), int(position.column), message);
-			case SPV_MSG_WARNING: sw::warn("SPIR-V WARNING: %d:%d %s\n", int(position.line), int(position.column), message);
-			case SPV_MSG_INFO: sw::trace("SPIR-V INFO: %d:%d %s\n", int(position.line), int(position.column), message);
-			case SPV_MSG_DEBUG: sw::trace("SPIR-V DEBUG: %d:%d %s\n", int(position.line), int(position.column), message);
-			default: sw::trace("SPIR-V MESSAGE: %d:%d %s\n", int(position.line), int(position.column), message);
+		case SPV_MSG_FATAL: sw::warn("SPIR-V FATAL: %d:%d %s\n", int(position.line), int(position.column), message);
+		case SPV_MSG_INTERNAL_ERROR: sw::warn("SPIR-V INTERNAL_ERROR: %d:%d %s\n", int(position.line), int(position.column), message);
+		case SPV_MSG_ERROR: sw::warn("SPIR-V ERROR: %d:%d %s\n", int(position.line), int(position.column), message);
+		case SPV_MSG_WARNING: sw::warn("SPIR-V WARNING: %d:%d %s\n", int(position.line), int(position.column), message);
+		case SPV_MSG_INFO: sw::trace("SPIR-V INFO: %d:%d %s\n", int(position.line), int(position.column), message);
+		case SPV_MSG_DEBUG: sw::trace("SPIR-V DEBUG: %d:%d %s\n", int(position.line), int(position.column), message);
+		default: sw::trace("SPIR-V MESSAGE: %d:%d %s\n", int(position.line), int(position.column), message);
 		}
 	});
 
@@ -189,19 +189,19 @@
 {
 	switch(stage)
 	{
-		case VK_SHADER_STAGE_VERTEX_BIT:
-			ASSERT(vertexShader.get() == nullptr);
-			vertexShader = spirvShader;
-			break;
+	case VK_SHADER_STAGE_VERTEX_BIT:
+		ASSERT(vertexShader.get() == nullptr);
+		vertexShader = spirvShader;
+		break;
 
-		case VK_SHADER_STAGE_FRAGMENT_BIT:
-			ASSERT(fragmentShader.get() == nullptr);
-			fragmentShader = spirvShader;
-			break;
+	case VK_SHADER_STAGE_FRAGMENT_BIT:
+		ASSERT(fragmentShader.get() == nullptr);
+		fragmentShader = spirvShader;
+		break;
 
-		default:
-			UNSUPPORTED("Unsupported stage");
-			break;
+	default:
+		UNSUPPORTED("Unsupported stage");
+		break;
 	}
 }
 
@@ -209,13 +209,13 @@
 {
 	switch(stage)
 	{
-		case VK_SHADER_STAGE_VERTEX_BIT:
-			return vertexShader;
-		case VK_SHADER_STAGE_FRAGMENT_BIT:
-			return fragmentShader;
-		default:
-			UNSUPPORTED("Unsupported stage");
-			return fragmentShader;
+	case VK_SHADER_STAGE_VERTEX_BIT:
+		return vertexShader;
+	case VK_SHADER_STAGE_FRAGMENT_BIT:
+		return fragmentShader;
+	default:
+		UNSUPPORTED("Unsupported stage");
+		return fragmentShader;
 	}
 }
 
diff --git a/src/Vulkan/VkQueue.cpp b/src/Vulkan/VkQueue.cpp
index c1afd6f..918b467 100644
--- a/src/Vulkan/VkQueue.cpp
+++ b/src/Vulkan/VkQueue.cpp
@@ -46,7 +46,7 @@
 		{
 			switch(extension->sType)
 			{
-				case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+			case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
 				{
 					const auto *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension);
 					totalSize += sizeof(VkTimelineSemaphoreSubmitInfo);
@@ -54,9 +54,9 @@
 					totalSize += tlsSubmitInfo->signalSemaphoreValueCount * sizeof(uint64_t);
 				}
 				break;
-				default:
-					WARN("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
-					break;
+			default:
+				WARN("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
+				break;
 			}
 		}
 	}
@@ -95,7 +95,7 @@
 		{
 			switch(extension->sType)
 			{
-				case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+			case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
 				{
 					const VkTimelineSemaphoreSubmitInfo *tlsSubmitInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(extension);
 
@@ -119,9 +119,9 @@
 					submits[i].pNext = tlsSubmitInfoCopy;
 				}
 				break;
-				default:
-					WARN("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
-					break;
+			default:
+				WARN("submitInfo[%d]->pNext sType: %s", i, vk::Stringify(extension->sType).c_str());
+				break;
 			}
 		}
 	}
@@ -185,12 +185,12 @@
 		{
 			switch(nextInfo->sType)
 			{
-				case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
-					timelineInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(submitInfo.pNext);
-					break;
-				default:
-					WARN("submitInfo.pNext->sType = %s", vk::Stringify(nextInfo->sType).c_str());
-					break;
+			case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+				timelineInfo = reinterpret_cast<const VkTimelineSemaphoreSubmitInfo *>(submitInfo.pNext);
+				break;
+			default:
+				WARN("submitInfo.pNext->sType = %s", vk::Stringify(nextInfo->sType).c_str());
+				break;
 			}
 		}
 
@@ -267,15 +267,15 @@
 
 		switch(task.type)
 		{
-			case Task::KILL_THREAD:
-				ASSERT_MSG(pending.count() == 0, "queue has remaining work!");
-				return;
-			case Task::SUBMIT_QUEUE:
-				submitQueue(task);
-				break;
-			default:
-				UNREACHABLE("task.type %d", static_cast<int>(task.type));
-				break;
+		case Task::KILL_THREAD:
+			ASSERT_MSG(pending.count() == 0, "queue has remaining work!");
+			return;
+		case Task::SUBMIT_QUEUE:
+			submitQueue(task);
+			break;
+		default:
+			UNREACHABLE("task.type %d", static_cast<int>(task.type));
+			break;
 		}
 	}
 }
diff --git a/src/Vulkan/VkRenderPass.cpp b/src/Vulkan/VkRenderPass.cpp
index dab3d51..d2836c3 100644
--- a/src/Vulkan/VkRenderPass.cpp
+++ b/src/Vulkan/VkRenderPass.cpp
@@ -155,7 +155,7 @@
 		{
 			switch(extension->sType)
 			{
-				case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+			case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
 				{
 					const auto *ext = reinterpret_cast<const VkSubpassDescriptionDepthStencilResolve *>(extension);
 					// If any subpass includes depthStencilResolve, allocate a DSR struct for each subpass
@@ -192,10 +192,10 @@
 					}
 				}
 				break;
-				default:
-					LOG_TRAP("VkRenderPassCreateInfo2KHR->subpass[%d]->pNext sType: %s",
-					         i, vk::Stringify(extension->sType).c_str());
-					break;
+			default:
+				LOG_TRAP("VkRenderPassCreateInfo2KHR->subpass[%d]->pNext sType: %s",
+				         i, vk::Stringify(extension->sType).c_str());
+				break;
 			}
 
 			extension = extension->pNext;
@@ -243,7 +243,7 @@
 	{
 		switch(extensionCreateInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
 			{
 				// Renderpass uses multiview if this structure is present AND some subpass specifies
 				// a nonzero view mask
@@ -257,12 +257,11 @@
 						viewMasks = masks;
 					}
 				}
-
-				break;
 			}
-			default:
-				WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+			break;
+		default:
+			WARN("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -387,7 +386,7 @@
 		{
 			switch(extension->sType)
 			{
-				case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+			case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
 				{
 					const auto *ext = reinterpret_cast<const VkSubpassDescriptionDepthStencilResolve *>(extension);
 					if(ext->pDepthStencilResolveAttachment != nullptr && ext->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED)
@@ -404,10 +403,10 @@
 					}
 				}
 				break;
-				default:
-					LOG_TRAP("VkRenderPassCreateInfo2KHR->subpass[%d]->pNext sType: %s",
-					         i, vk::Stringify(extension->sType).c_str());
-					break;
+			default:
+				LOG_TRAP("VkRenderPassCreateInfo2KHR->subpass[%d]->pNext sType: %s",
+				         i, vk::Stringify(extension->sType).c_str());
+				break;
 			}
 
 			extension = extension->pNext;
diff --git a/src/Vulkan/VkSemaphore.cpp b/src/Vulkan/VkSemaphore.cpp
index 5f9e7d1..e589e08 100644
--- a/src/Vulkan/VkSemaphore.cpp
+++ b/src/Vulkan/VkSemaphore.cpp
@@ -100,7 +100,7 @@
 	{
 		switch(nextInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
 			{
 				const auto *exportInfo = reinterpret_cast<const VkExportSemaphoreCreateInfo *>(nextInfo);
 				exportSemaphore = true;
@@ -113,16 +113,16 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
 			{
 				const auto *tlsInfo = reinterpret_cast<const VkSemaphoreTypeCreateInfo *>(nextInfo);
 				semaphoreType = tlsInfo->semaphoreType;
 				initialPayload = tlsInfo->initialValue;
 			}
 			break;
-			default:
-				WARN("nextInfo->sType = %s", vk::Stringify(nextInfo->sType).c_str());
-				break;
+		default:
+			WARN("nextInfo->sType = %s", vk::Stringify(nextInfo->sType).c_str());
+			break;
 		}
 	}
 }
diff --git a/src/Vulkan/VkStringify.cpp b/src/Vulkan/VkStringify.cpp
index e2007ce..8821187 100644
--- a/src/Vulkan/VkStringify.cpp
+++ b/src/Vulkan/VkStringify.cpp
@@ -30,21 +30,21 @@
 	std::string ret = "";
 	switch(static_cast<int>(value))
 	{
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
-			ret = "PhysicalDeviceProvokingVertexFeaturesEXT";
-			break;
-		case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
-			ret = "PipelineRasterizationProvokingVertexStateCreateInfoEXT";
-			break;
-		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
-			ret = "PhysicalDeviceProvokingVertexPropertiesEXT";
-			break;
-		case VK_STRUCTURE_TYPE_SAMPLER_FILTERING_PRECISION_GOOGLE:
-			ret = "SamplerFilteringPrecisionGOOGLE";
-			break;
-		default:
-			ret = vkhpp::to_string(static_cast<vkhpp::StructureType>(value));
-			break;
+	case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+		ret = "PhysicalDeviceProvokingVertexFeaturesEXT";
+		break;
+	case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT:
+		ret = "PipelineRasterizationProvokingVertexStateCreateInfoEXT";
+		break;
+	case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
+		ret = "PhysicalDeviceProvokingVertexPropertiesEXT";
+		break;
+	case VK_STRUCTURE_TYPE_SAMPLER_FILTERING_PRECISION_GOOGLE:
+		ret = "SamplerFilteringPrecisionGOOGLE";
+		break;
+	default:
+		ret = vkhpp::to_string(static_cast<vkhpp::StructureType>(value));
+		break;
 	}
 
 	return ret;
diff --git a/src/Vulkan/libVulkan.cpp b/src/Vulkan/libVulkan.cpp
index f84fe63..7555c69 100644
--- a/src/Vulkan/libVulkan.cpp
+++ b/src/Vulkan/libVulkan.cpp
@@ -191,7 +191,7 @@
 	{
 		switch(extensionCreateInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
 			{
 				const VkRenderPassInputAttachmentAspectCreateInfo *inputAttachmentAspectCreateInfo = reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo *>(extensionCreateInfo);
 
@@ -219,7 +219,7 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
 			{
 				const VkRenderPassMultiviewCreateInfo *multiviewCreateInfo = reinterpret_cast<const VkRenderPassMultiviewCreateInfo *>(extensionCreateInfo);
 				ASSERT((multiviewCreateInfo->subpassCount == 0) || (multiviewCreateInfo->subpassCount == pCreateInfo->subpassCount));
@@ -259,9 +259,9 @@
 				ASSERT(vk::Cast(device)->getPhysicalDevice()->getProperties().limits.maxFramebufferLayers >= 32);
 			}
 			break;
-			default:
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -530,7 +530,7 @@
 		const VkBaseInStructure *createInfo = reinterpret_cast<const VkBaseInStructure *>(pCreateInfo->pNext);
 		switch(createInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
+		case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
 			{
 				const VkDebugUtilsMessengerCreateInfoEXT *debugUtilsMessengerCreateInfoEXT = reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT *>(createInfo);
 				VkResult result = vk::DebugUtilsMessenger::Create(pAllocator, debugUtilsMessengerCreateInfoEXT, &messenger);
@@ -540,16 +540,16 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO:
-				// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
-				// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
-				//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
-				//  internal use by the loader, and do not have corresponding
-				//  Vulkan structures in this Specification."
-				break;
-			default:
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(createInfo->sType).c_str());
-				break;
+		case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO:
+			// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
+			// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
+			//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
+			//  internal use by the loader, and do not have corresponding
+			//  Vulkan structures in this Specification."
+			break;
+		default:
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(createInfo->sType).c_str());
+			break;
 		}
 	}
 
@@ -709,14 +709,14 @@
 		// are not enumerated in the official Vulkan header
 		switch((long)(extensionCreateInfo->sType))
 		{
-			case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO:
-				// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
-				// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
-				//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
-				//  internal use by the loader, and do not have corresponding
-				//  Vulkan structures in this Specification."
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+		case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO:
+			// According to the Vulkan spec, section 2.7.2. Implicit Valid Usage:
+			// "The values VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO and
+			//  VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO are reserved for
+			//  internal use by the loader, and do not have corresponding
+			//  Vulkan structures in this Specification."
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
 			{
 				ASSERT(!pCreateInfo->pEnabledFeatures);  // "If the pNext chain includes a VkPhysicalDeviceFeatures2 structure, then pEnabledFeatures must be NULL"
 
@@ -725,7 +725,7 @@
 				enabledFeatures = &physicalDeviceFeatures2->features;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
 			{
 				const VkPhysicalDeviceSamplerYcbcrConversionFeatures *samplerYcbcrConversionFeatures = reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures *>(extensionCreateInfo);
 
@@ -736,7 +736,7 @@
 				(void)samplerYcbcrConversionFeatures->samplerYcbcrConversion;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
 			{
 				const VkPhysicalDevice16BitStorageFeatures *storage16BitFeatures = reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures *>(extensionCreateInfo);
 
@@ -749,7 +749,7 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
 			{
 				const VkPhysicalDeviceVariablePointerFeatures *variablePointerFeatures = reinterpret_cast<const VkPhysicalDeviceVariablePointerFeatures *>(extensionCreateInfo);
 
@@ -760,7 +760,7 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
 			{
 				const VkDeviceGroupDeviceCreateInfo *groupDeviceCreateInfo = reinterpret_cast<const VkDeviceGroupDeviceCreateInfo *>(extensionCreateInfo);
 
@@ -771,7 +771,7 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
 			{
 				const VkPhysicalDeviceMultiviewFeatures *multiviewFeatures = reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures *>(extensionCreateInfo);
 
@@ -782,7 +782,7 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
 			{
 				const VkPhysicalDeviceShaderDrawParametersFeatures *shaderDrawParametersFeatures = reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures *>(extensionCreateInfo);
 
@@ -792,7 +792,7 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR:
 			{
 				const VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *shaderDrawParametersFeatures = reinterpret_cast<const VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *>(extensionCreateInfo);
 
@@ -800,7 +800,7 @@
 				(void)(shaderDrawParametersFeatures->separateDepthStencilLayouts);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
 			{
 				const VkPhysicalDeviceLineRasterizationFeaturesEXT *lineRasterizationFeatures = reinterpret_cast<const VkPhysicalDeviceLineRasterizationFeaturesEXT *>(extensionCreateInfo);
 				if((lineRasterizationFeatures->smoothLines != VK_FALSE) ||
@@ -812,7 +812,7 @@
 				}
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT:
 			{
 				const VkPhysicalDeviceProvokingVertexFeaturesEXT *provokingVertexFeatures = reinterpret_cast<const VkPhysicalDeviceProvokingVertexFeaturesEXT *>(extensionCreateInfo);
 
@@ -823,7 +823,7 @@
 				(void)provokingVertexFeatures->provokingVertexLast;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
 			{
 				const VkPhysicalDeviceImageRobustnessFeaturesEXT *imageRobustnessFeatures = reinterpret_cast<const VkPhysicalDeviceImageRobustnessFeaturesEXT *>(extensionCreateInfo);
 
@@ -833,18 +833,18 @@
 				(void)imageRobustnessFeatures->robustImageAccess;
 			}
 			break;
-			// For unsupported structures, check that we don't expose the corresponding extension string:
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
-				ASSERT(!hasDeviceExtension(VK_EXT_ROBUSTNESS_2_EXTENSION_NAME));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR:
+		// For unsupported structures, check that we don't expose the corresponding extension string:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+			ASSERT(!hasDeviceExtension(VK_EXT_ROBUSTNESS_2_EXTENSION_NAME));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR:
 			{
 				const VkPhysicalDeviceImagelessFramebufferFeaturesKHR *imagelessFramebufferFeatures = reinterpret_cast<const VkPhysicalDeviceImagelessFramebufferFeaturesKHR *>(extensionCreateInfo);
 				// Always provide Imageless Framebuffers
 				(void)imagelessFramebufferFeatures->imagelessFramebuffer;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
 			{
 				const VkPhysicalDeviceScalarBlockLayoutFeatures *scalarBlockLayoutFeatures = reinterpret_cast<const VkPhysicalDeviceScalarBlockLayoutFeatures *>(extensionCreateInfo);
 
@@ -853,33 +853,33 @@
 			}
 			break;
 #ifdef SWIFTSHADER_DEVICE_MEMORY_REPORT
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
 			{
 				const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT *deviceMemoryReportFeatures = reinterpret_cast<const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT *>(extensionCreateInfo);
 				(void)deviceMemoryReportFeatures->deviceMemoryReport;
 			}
 			break;
 #endif  // SWIFTSHADER_DEVICE_MEMORY_REPORT
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
 			{
 				const VkPhysicalDeviceHostQueryResetFeatures *hostQueryResetFeatures = reinterpret_cast<const VkPhysicalDeviceHostQueryResetFeatures *>(extensionCreateInfo);
 
 				// VK_EXT_host_query_reset is always enabled.
 				(void)hostQueryResetFeatures->hostQueryReset;
-				break;
 			}
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
 			{
 				const auto *tsFeatures = reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreFeatures *>(extensionCreateInfo);
 
 				// VK_KHR_timeline_semaphores is always enabled
 				(void)tsFeatures->timelineSemaphore;
-				break;
 			}
-			default:
-				// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+			break;
+		default:
+			// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -1032,17 +1032,17 @@
 	{
 		switch(allocationInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
-				// This can safely be ignored, as the Vulkan spec mentions:
-				// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
-				//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
-				break;
-			case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
-				// This extension controls on which physical devices the memory gets allocated.
-				// SwiftShader only has a single physical device, so this extension does nothing in this case.
-				break;
+		case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+			// This can safely be ignored, as the Vulkan spec mentions:
+			// "If the pNext chain includes a VkMemoryDedicatedAllocateInfo structure, then that structure
+			//  includes a handle of the sole buffer or image resource that the memory *can* be bound to."
+			break;
+		case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
+			// This extension controls on which physical devices the memory gets allocated.
+			// SwiftShader only has a single physical device, so this extension does nothing in this case.
+			break;
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-			case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+		case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
 			{
 				auto *importInfo = reinterpret_cast<const VkImportMemoryFdInfoKHR *>(allocationInfo);
 				if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
@@ -1050,38 +1050,38 @@
 					UNSUPPORTED("importInfo->handleType %u", importInfo->handleType);
 					return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 				}
-				break;
 			}
+			break;
 #endif  // SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-			case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+		case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
 			{
 				auto *exportInfo = reinterpret_cast<const VkExportMemoryAllocateInfo *>(allocationInfo);
 				switch(exportInfo->handleTypes)
 				{
 #if SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD
-					case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
-						break;
+				case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
+					break;
 #endif
 #if SWIFTSHADER_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER
-					case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID:
-						break;
+				case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID:
+					break;
 #endif
 #if VK_USE_PLATFORM_FUCHSIA
-					case VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA:
-						break;
+				case VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA:
+					break;
 #endif
-					default:
-						UNSUPPORTED("exportInfo->handleTypes %u", exportInfo->handleTypes);
-						return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+				default:
+					UNSUPPORTED("exportInfo->handleTypes %u", exportInfo->handleTypes);
+					return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 				}
-				break;
 			}
+			break;
 #if SWIFTSHADER_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER
-			case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
-				// Ignore
-				break;
+		case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+			// Ignore
+			break;
 #endif  // SWIFTSHADER_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER
-			case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
+		case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
 			{
 				auto *importInfo = reinterpret_cast<const VkImportMemoryHostPointerInfoEXT *>(allocationInfo);
 				if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT && importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT)
@@ -1089,10 +1089,10 @@
 					UNSUPPORTED("importInfo->handleType %u", importInfo->handleType);
 					return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 				}
-				break;
 			}
+			break;
 #if VK_USE_PLATFORM_FUCHSIA
-			case VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA:
+		case VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA:
 			{
 				auto *importInfo = reinterpret_cast<const VkImportMemoryZirconHandleInfoFUCHSIA *>(allocationInfo);
 				if(importInfo->handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA)
@@ -1100,12 +1100,12 @@
 					UNSUPPORTED("importInfo->handleType %u", importInfo->handleType);
 					return VK_ERROR_INVALID_EXTERNAL_HANDLE;
 				}
-				break;
 			}
+			break;
 #endif  // VK_USE_PLATFORM_FUCHSIA
-			default:
-				LOG_TRAP("pAllocateInfo->pNext sType = %s", vk::Stringify(allocationInfo->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pAllocateInfo->pNext sType = %s", vk::Stringify(allocationInfo->sType).c_str());
+			break;
 		}
 
 		allocationInfo = allocationInfo->pNext;
@@ -1444,18 +1444,18 @@
 	{
 		switch(nextInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
-				// Let the semaphore constructor handle this
-				break;
-			case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
+			// Let the semaphore constructor handle this
+			break;
+		case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
 			{
 				const VkSemaphoreTypeCreateInfo *info = reinterpret_cast<const VkSemaphoreTypeCreateInfo *>(nextInfo);
 				type = info->semaphoreType;
 			}
 			break;
-			default:
-				WARN("nextInfo->sType = %s", vk::Stringify(nextInfo->sType).c_str());
-				break;
+		default:
+			WARN("nextInfo->sType = %s", vk::Stringify(nextInfo->sType).c_str());
+			break;
 		}
 	}
 
@@ -1672,12 +1672,12 @@
 	{
 		switch(nextInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
-				// Do nothing. Should be handled by vk::Buffer::Create().
-				break;
-			default:
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
-				break;
+		case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
+			// Do nothing. Should be handled by vk::Buffer::Create().
+			break;
+		default:
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(nextInfo->sType).c_str());
+			break;
 		}
 		nextInfo = nextInfo->pNext;
 	}
@@ -1763,13 +1763,13 @@
 		switch((long)(extensionCreateInfo->sType))
 		{
 #ifdef __ANDROID__
-			case VK_STRUCTURE_TYPE_SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID:
+		case VK_STRUCTURE_TYPE_SWAPCHAIN_IMAGE_CREATE_INFO_ANDROID:
 			{
 				const VkSwapchainImageCreateInfoANDROID *swapImageCreateInfo = reinterpret_cast<const VkSwapchainImageCreateInfoANDROID *>(extensionCreateInfo);
 				backmem.androidUsage = swapImageCreateInfo->usage;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
+		case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
 			{
 				const VkNativeBufferANDROID *nativeBufferInfo = reinterpret_cast<const VkNativeBufferANDROID *>(extensionCreateInfo);
 				backmem.nativeHandle = nativeBufferInfo->handle;
@@ -1777,21 +1777,21 @@
 				swapchainImage = true;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
-				break;
+		case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
+			break;
 #endif
-			case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
-				// Do nothing. Should be handled by vk::Image::Create()
-				break;
-			case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
-				/* Do nothing. We don't actually need the swapchain handle yet; we'll do all the work in vkBindImageMemory2. */
-				break;
-			case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
-				// Do nothing. This extension tells the driver which image formats will be used
-				// by the application. Swiftshader is not impacted from lacking this information,
-				// so we don't need to track the format list.
-				break;
-			case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
+			// Do nothing. Should be handled by vk::Image::Create()
+			break;
+		case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
+			/* Do nothing. We don't actually need the swapchain handle yet; we'll do all the work in vkBindImageMemory2. */
+			break;
+		case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+			// Do nothing. This extension tells the driver which image formats will be used
+			// by the application. Swiftshader is not impacted from lacking this information,
+			// so we don't need to track the format list.
+			break;
+		case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
 			{
 				// SwiftShader does not use an image's usage info for non-debug purposes outside of
 				// vkGetPhysicalDeviceImageFormatProperties2. This also applies to separate stencil usage.
@@ -1799,10 +1799,10 @@
 				(void)stencilUsageInfo->stencilUsage;
 			}
 			break;
-			default:
-				// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+		default:
+			// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -1884,13 +1884,13 @@
 	{
 		switch(extensionCreateInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR:
+		case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR:
 			{
 				const VkImageViewUsageCreateInfo *multiviewCreateInfo = reinterpret_cast<const VkImageViewUsageCreateInfo *>(extensionCreateInfo);
 				ASSERT(!(~vk::Cast(pCreateInfo->image)->getUsage() & multiviewCreateInfo->usage));
 			}
 			break;
-			case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+		case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
 			{
 				const VkSamplerYcbcrConversionInfo *samplerYcbcrConversionInfo = reinterpret_cast<const VkSamplerYcbcrConversionInfo *>(extensionCreateInfo);
 				ycbcrConversion = vk::Cast(samplerYcbcrConversionInfo->conversion);
@@ -1904,9 +1904,9 @@
 				}
 			}
 			break;
-			default:
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -2123,14 +2123,14 @@
 	{
 		switch(static_cast<long>(extensionCreateInfo->sType))
 		{
-			case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+		case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
 			{
 				const VkSamplerYcbcrConversionInfo *samplerYcbcrConversionInfo = reinterpret_cast<const VkSamplerYcbcrConversionInfo *>(extensionCreateInfo);
 				ycbcrConversion = vk::Cast(samplerYcbcrConversionInfo->conversion);
 			}
 			break;
 #if !defined(__ANDROID__)
-			case VK_STRUCTURE_TYPE_SAMPLER_FILTERING_PRECISION_GOOGLE:
+		case VK_STRUCTURE_TYPE_SAMPLER_FILTERING_PRECISION_GOOGLE:
 			{
 				const VkSamplerFilteringPrecisionGOOGLE *filteringInfo =
 				    reinterpret_cast<const VkSamplerFilteringPrecisionGOOGLE *>(extensionCreateInfo);
@@ -2138,9 +2138,9 @@
 			}
 			break;
 #endif
-			default:
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -2184,12 +2184,12 @@
 	{
 		switch(extensionCreateInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
-				ASSERT(!vk::Cast(device)->hasExtension(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME));
-				break;
-			default:
-				LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
-				break;
+		case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+			ASSERT(!vk::Cast(device)->hasExtension(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME));
+			break;
+		default:
+			LOG_TRAP("pCreateInfo->pNext sType = %s", vk::Stringify(extensionCreateInfo->sType).c_str());
+			break;
 		}
 
 		extensionCreateInfo = extensionCreateInfo->pNext;
@@ -2775,17 +2775,17 @@
 	{
 		switch(renderPassBeginInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
-				// This extension controls which render area is used on which physical device,
-				// in order to distribute rendering between multiple physical devices.
-				// SwiftShader only has a single physical device, so this extension does nothing in this case.
-				break;
-			case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
-				attachmentBeginInfo = reinterpret_cast<const VkRenderPassAttachmentBeginInfo *>(renderPassBeginInfo);
-				break;
-			default:
-				LOG_TRAP("pRenderPassBegin->pNext sType = %s", vk::Stringify(renderPassBeginInfo->sType).c_str());
-				break;
+		case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+			// This extension controls which render area is used on which physical device,
+			// in order to distribute rendering between multiple physical devices.
+			// SwiftShader only has a single physical device, so this extension does nothing in this case.
+			break;
+		case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+			attachmentBeginInfo = reinterpret_cast<const VkRenderPassAttachmentBeginInfo *>(renderPassBeginInfo);
+			break;
+		default:
+			LOG_TRAP("pRenderPassBegin->pNext sType = %s", vk::Stringify(renderPassBeginInfo->sType).c_str());
+			break;
 		}
 
 		renderPassBeginInfo = renderPassBeginInfo->pNext;
@@ -2892,12 +2892,12 @@
 		{
 			switch(extInfo->sType)
 			{
-				case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
-					/* Do nothing */
-					break;
+			case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
+				/* Do nothing */
+				break;
 
 #ifndef __ANDROID__
-				case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
+			case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
 				{
 					auto swapchainInfo = reinterpret_cast<VkBindImageMemorySwapchainInfoKHR const *>(extInfo);
 					memory = vk::Cast(swapchainInfo->swapchain)->getImage(swapchainInfo->imageIndex).getImageMemory();
@@ -2906,9 +2906,9 @@
 				break;
 #endif
 
-				default:
-					LOG_TRAP("pBindInfos[%d].pNext sType = %s", i, vk::Stringify(extInfo->sType).c_str());
-					break;
+			default:
+				LOG_TRAP("pBindInfos[%d].pNext sType = %s", i, vk::Stringify(extInfo->sType).c_str());
+				break;
 			}
 			extInfo = extInfo->pNext;
 		}
@@ -2975,7 +2975,7 @@
 	{
 		switch(extensionRequirements->sType)
 		{
-			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+		case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
 			{
 				auto requirements = reinterpret_cast<VkMemoryDedicatedRequirements *>(extensionRequirements);
 				vk::Cast(device)->getRequirements(requirements);
@@ -2988,9 +2988,9 @@
 #endif
 			}
 			break;
-			default:
-				LOG_TRAP("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
+			break;
 		}
 
 		extensionRequirements = extensionRequirements->pNext;
@@ -3016,15 +3016,15 @@
 	{
 		switch(extensionRequirements->sType)
 		{
-			case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+		case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
 			{
 				auto requirements = reinterpret_cast<VkMemoryDedicatedRequirements *>(extensionRequirements);
 				vk::Cast(device)->getRequirements(requirements);
 			}
 			break;
-			default:
-				LOG_TRAP("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pMemoryRequirements->pNext sType = %s", vk::Stringify(extensionRequirements->sType).c_str());
+			break;
 		}
 
 		extensionRequirements = extensionRequirements->pNext;
@@ -3077,124 +3077,124 @@
 		// are not enumerated in the official Vulkan header
 		switch((long)(extensionProperties->sType))
 		{
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceIDProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceMaintenance3Properties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceMultiviewProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDevicePointClippingProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceProtectedMemoryProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceSubgroupProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
-				// Explicitly ignored, since VK_EXT_sample_locations is not supported
-				ASSERT(!hasDeviceExtension(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME));
-				break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
+			// Explicitly ignored, since VK_EXT_sample_locations is not supported
+			ASSERT(!hasDeviceExtension(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME));
+			break;
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceExternalMemoryHostPropertiesEXT *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceDriverPropertiesKHR *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
 #ifdef __ANDROID__
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENTATION_PROPERTIES_ANDROID:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDevicePresentationPropertiesANDROID *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
 #endif
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceProvokingVertexPropertiesEXT *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceFloatControlsProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceVulkan11Properties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceVulkan12Properties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceDescriptorIndexingProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkPhysicalDeviceDepthStencilResolveProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			default:
-				// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
-				LOG_TRAP("pProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
-				break;
+		default:
+			// "the [driver] must skip over, without processing (other than reading the sType and pNext members) any structures in the chain with sType values not defined by [supported extenions]"
+			LOG_TRAP("pProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
+			break;
 		}
 
 		extensionProperties = extensionProperties->pNext;
@@ -3276,33 +3276,33 @@
 	{
 		switch(extensionFormatInfo->sType)
 		{
-			case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+		case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
 			{
 				// Explicitly ignored, since VK_KHR_image_format_list is not supported
 				ASSERT(!hasDeviceExtension(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME));
 			}
 			break;
-			case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+		case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
 			{
 				const VkImageStencilUsageCreateInfo *stencilUsageInfo = reinterpret_cast<const VkImageStencilUsageCreateInfo *>(extensionFormatInfo);
 				stencilUsage = stencilUsageInfo->stencilUsage;
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
 			{
 				const VkPhysicalDeviceExternalImageFormatInfo *imageFormatInfo = reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo *>(extensionFormatInfo);
 				handleType = &(imageFormatInfo->handleType);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+		case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
 			{
 				// Explicitly ignored, since VK_EXT_image_drm_format_modifier is not supported
 				ASSERT(!hasDeviceExtension(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME));
 			}
 			break;
-			default:
-				LOG_TRAP("pImageFormatInfo->pNext sType = %s", vk::Stringify(extensionFormatInfo->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pImageFormatInfo->pNext sType = %s", vk::Stringify(extensionFormatInfo->sType).c_str());
+			break;
 		}
 
 		extensionFormatInfo = extensionFormatInfo->pNext;
@@ -3318,26 +3318,26 @@
 	{
 		switch(extensionProperties->sType)
 		{
-			case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
+		case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkExternalImageFormatProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(handleType, properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
+		case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
 			{
 				auto properties = reinterpret_cast<VkSamplerYcbcrConversionImageFormatProperties *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(properties);
 			}
 			break;
-			case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
+		case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
 			{
 				// Explicitly ignored, since VK_AMD_texture_gather_bias_lod is not supported
 				ASSERT(!hasDeviceExtension(VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME));
 			}
 			break;
 #ifdef __ANDROID__
-			case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
+		case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
 			{
 				auto properties = reinterpret_cast<VkAndroidHardwareBufferUsageANDROID *>(extensionProperties);
 				vk::Cast(physicalDevice)->getProperties(pImageFormatInfo, properties);
@@ -3345,9 +3345,9 @@
 			}
 			break;
 #endif
-			default:
-				LOG_TRAP("pImageFormatProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
-				break;
+		default:
+			LOG_TRAP("pImageFormatProperties->pNext sType = %s", vk::Stringify(extensionProperties->sType).c_str());
+			break;
 		}
 
 		extensionProperties = extensionProperties->pNext;
@@ -3365,17 +3365,17 @@
 	VkFormatFeatureFlags features;
 	switch(tiling)
 	{
-		case VK_IMAGE_TILING_LINEAR:
-			features = properties.linearTilingFeatures;
-			break;
+	case VK_IMAGE_TILING_LINEAR:
+		features = properties.linearTilingFeatures;
+		break;
 
-		case VK_IMAGE_TILING_OPTIMAL:
-			features = properties.optimalTilingFeatures;
-			break;
+	case VK_IMAGE_TILING_OPTIMAL:
+		features = properties.optimalTilingFeatures;
+		break;
 
-		default:
-			UNSUPPORTED("VkImageTiling %d", int(tiling));
-			features = 0;
+	default:
+		UNSUPPORTED("VkImageTiling %d", int(tiling));
+		features = 0;
 	}
 
 	if(features == 0)
diff --git a/src/Vulkan/main.cpp b/src/Vulkan/main.cpp
index 7037d1a..2179eab 100644
--- a/src/Vulkan/main.cpp
+++ b/src/Vulkan/main.cpp
@@ -25,22 +25,22 @@
 
 	switch(uMsg)
 	{
-		case WM_INITDIALOG:
-			GetWindowRect(GetDesktopWindow(), &rect);
-			SetWindowPos(hwnd, HWND_TOP, rect.right / 2, rect.bottom / 2, 0, 0, SWP_NOSIZE);
-			SetTimer(hwnd, 1, 100, NULL);
-			return TRUE;
-		case WM_COMMAND:
-			if(LOWORD(wParam) == IDCANCEL)
-			{
-				EndDialog(hwnd, 0);
-			}
-			break;
-		case WM_TIMER:
-			if(IsDebuggerPresent())
-			{
-				EndDialog(hwnd, 0);
-			}
+	case WM_INITDIALOG:
+		GetWindowRect(GetDesktopWindow(), &rect);
+		SetWindowPos(hwnd, HWND_TOP, rect.right / 2, rect.bottom / 2, 0, 0, SWP_NOSIZE);
+		SetTimer(hwnd, 1, 100, NULL);
+		return TRUE;
+	case WM_COMMAND:
+		if(LOWORD(wParam) == IDCANCEL)
+		{
+			EndDialog(hwnd, 0);
+		}
+		break;
+	case WM_TIMER:
+		if(IsDebuggerPresent())
+		{
+			EndDialog(hwnd, 0);
+		}
 	}
 
 	return FALSE;
@@ -61,7 +61,7 @@
 {
 	switch(reason)
 	{
-		case DLL_PROCESS_ATTACH:
+	case DLL_PROCESS_ATTACH:
 #	ifdef DEBUGGER_WAIT_DIALOG
 		{
 			char disable_debugger_wait_dialog[] = "0";
@@ -74,11 +74,11 @@
 		}
 #	endif
 		break;
-		case DLL_THREAD_ATTACH:
-		case DLL_THREAD_DETACH:
-		case DLL_PROCESS_DETACH:
-		default:
-			break;
+	case DLL_THREAD_ATTACH:
+	case DLL_THREAD_DETACH:
+	case DLL_PROCESS_DETACH:
+	default:
+		break;
 	}
 
 	return TRUE;