A2R10G10B10 Support

A2B10G10R10 (RGBA) was already supported by
SwiftShader, but A2R10G10B10 (BGRA) was not. Most of
this cl is trivial, as it simply adds an equivalent
version of the new formats from the code used for the
already supported formats, with the R and B channels
swapped.

The only new piece of code is rounding for 1010102
formats at the top of the PixelRoutine::writeColor()
function. There was already rounding for 8 bit formats,
but not 1010102, which led to potential off by 1 errors
in the output, which is fairly large when it happens
on the 2 bit alpha channel. This fixes one of the
dEQP-VK.pipeline.blend.*a2r10g10b10* tests.

Tests: dEQP-VK.*a2r10g10b10*
Bug b/142661203

Change-Id: Ifcae17aecafab3ea7967fdc755391ddd5e651ca5
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/40008
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
Tested-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/Device/Blitter.cpp b/src/Device/Blitter.cpp
index 958da63..d67968f 100644
--- a/src/Device/Blitter.cpp
+++ b/src/Device/Blitter.cpp
@@ -442,6 +442,13 @@
 			c.z = Float(Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20));
 			c.w = Float(Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30));
 			break;
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+			c.z = Float(Int((*Pointer<UInt>(element) & UInt(0x000003FF))));
+			c.y = Float(Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10));
+			c.x = Float(Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20));
+			c.w = Float(Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30));
+			break;
 		case VK_FORMAT_D16_UNORM:
 			c.x = Float(Int((*Pointer<UShort>(element))));
 			break;
@@ -974,6 +981,12 @@
 			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20), 2);
 			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30), 3);
 			break;
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000003FF))), 2);
+			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x000FFC00)) >> 10), 1);
+			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0x3FF00000)) >> 20), 0);
+			c = Insert(c, Int((*Pointer<UInt>(element) & UInt(0xC0000000)) >> 30), 3);
+			break;
 		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
 		case VK_FORMAT_R8G8B8A8_UINT:
 			c = Insert(c, Int(*Pointer<Byte>(element + 3)), 3);
@@ -1029,6 +1042,7 @@
 	switch(state.destFormat)
 	{
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 			c = Min(As<UInt4>(c), UInt4(0x03FF, 0x03FF, 0x03FF, 0x0003));
 			break;
 		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
diff --git a/src/Pipeline/PixelProgram.cpp b/src/Pipeline/PixelProgram.cpp
index 0bfd0b0..7782f20 100644
--- a/src/Pipeline/PixelProgram.cpp
+++ b/src/Pipeline/PixelProgram.cpp
@@ -231,6 +231,7 @@
 			case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
 			case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
 			case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+			case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
 				for(unsigned int q = 0; q < state.multiSample; q++)
 				{
 					if(state.multiSampleMask & (1 << q))
@@ -276,6 +277,7 @@
 			case VK_FORMAT_A8B8G8R8_UINT_PACK32:
 			case VK_FORMAT_A8B8G8R8_SINT_PACK32:
 			case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+			case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 				for(unsigned int q = 0; q < state.multiSample; q++)
 				{
 					if(state.multiSampleMask & (1 << q))
@@ -320,6 +322,7 @@
 			case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
 			case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
 			case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+			case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
 				oC[index].x = Max(oC[index].x, Float4(0.0f));
 				oC[index].x = Min(oC[index].x, Float4(1.0f));
 				oC[index].y = Max(oC[index].y, Float4(0.0f));
@@ -357,6 +360,7 @@
 			case VK_FORMAT_A8B8G8R8_UINT_PACK32:
 			case VK_FORMAT_A8B8G8R8_SINT_PACK32:
 			case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+			case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 				break;
 			default:
 				UNIMPLEMENTED("VkFormat: %d", int(state.targetFormat[index]));
diff --git a/src/Pipeline/PixelRoutine.cpp b/src/Pipeline/PixelRoutine.cpp
index 82a44f1..c6fd4e3 100644
--- a/src/Pipeline/PixelRoutine.cpp
+++ b/src/Pipeline/PixelRoutine.cpp
@@ -1063,6 +1063,18 @@
 			a2b10g10r10Unpack(v, pixel);
 		}
 		break;
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		{
+			Int4 v = Int4(0);
+			v = Insert(v, *Pointer<Int>(buffer + 4 * x), 0);
+			v = Insert(v, *Pointer<Int>(buffer + 4 * x + 4), 1);
+			buffer += *Pointer<Int>(data + OFFSET(DrawData, colorPitchB[index]));
+			v = Insert(v, *Pointer<Int>(buffer + 4 * x), 2);
+			v = Insert(v, *Pointer<Int>(buffer + 4 * x + 4), 3);
+
+			a2r10g10b10Unpack(v, pixel);
+		}
+		break;
 		default:
 			UNIMPLEMENTED("VkFormat %d", state.targetFormat[index]);
 	}
@@ -1214,6 +1226,13 @@
 			current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 8) + Short4(0x0080);
 			current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 8) + Short4(0x0080);
 			break;
+		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+			current.x = current.x - As<Short4>(As<UShort4>(current.x) >> 10) + Short4(0x0020);
+			current.y = current.y - As<Short4>(As<UShort4>(current.y) >> 10) + Short4(0x0020);
+			current.z = current.z - As<Short4>(As<UShort4>(current.z) >> 10) + Short4(0x0020);
+			current.w = current.w - As<Short4>(As<UShort4>(current.w) >> 2) + Short4(0x2000);
+			break;
 		default:
 			break;
 	}
@@ -1349,6 +1368,19 @@
 			current.y = UnpackHigh(c02, c13);
 			break;
 		}
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		{
+			auto r = (Int4(current.x) >> 6) & Int4(0x3ff);
+			auto g = (Int4(current.y) >> 6) & Int4(0x3ff);
+			auto b = (Int4(current.z) >> 6) & Int4(0x3ff);
+			auto a = (Int4(current.w) >> 14) & Int4(0x3);
+			Int4 packed = (a << 30) | (r << 20) | (g << 10) | b;
+			auto c02 = As<Int2>(Int4(packed.xzzz));  // TODO: auto c02 = packed.xz;
+			auto c13 = As<Int2>(Int4(packed.ywww));  // TODO: auto c13 = packed.yw;
+			current.x = UnpackLow(c02, c13);
+			current.y = UnpackHigh(c02, c13);
+			break;
+		}
 		default:
 			UNIMPLEMENTED("VkFormat: %d", int(state.targetFormat[index]));
 	}
@@ -1635,6 +1667,9 @@
 			}
 		}
 		break;
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+			rgbaWriteMask = bgraWriteMask;
+			// [[fallthrough]]
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		{
 			buffer += 4 * x;
@@ -2040,6 +2075,7 @@
 		case VK_FORMAT_R8_SINT:
 		case VK_FORMAT_R8_UINT:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 			break;
 		case VK_FORMAT_R16G16_SFLOAT:
 		case VK_FORMAT_R32G32_SFLOAT:
@@ -2072,6 +2108,7 @@
 	}
 
 	int rgbaWriteMask = state.colorWriteActive(index);
+	int bgraWriteMask = (rgbaWriteMask & 0x0000000A) | (rgbaWriteMask & 0x00000001) << 2 | (rgbaWriteMask & 0x00000004) >> 2;
 
 	Int xMask;  // Combination of all masks
 
@@ -2621,6 +2658,35 @@
 				*Pointer<Int2>(buffer) = (As<Int2>(Int4(packed.zwww)) & mergedMask) | (value & ~mergedMask);
 			}
 			break;
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+			if((bgraWriteMask & 0x0000000F) != 0x0)
+			{
+				Int2 mergedMask, packedCol, value;
+				Int4 packed = ((As<Int4>(oC.w) & Int4(0x3)) << 30) |
+				              ((As<Int4>(oC.x) & Int4(0x3ff)) << 20) |
+				              ((As<Int4>(oC.y) & Int4(0x3ff)) << 10) |
+				              ((As<Int4>(oC.z) & Int4(0x3ff)));
+
+				buffer += 4 * x;
+				value = *Pointer<Int2>(buffer, 16);
+				mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD01Q) + xMask * 8);
+				if(bgraWriteMask != 0xF)
+				{
+					mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[bgraWriteMask][0]));
+				}
+				*Pointer<Int2>(buffer) = (As<Int2>(packed) & mergedMask) | (value & ~mergedMask);
+
+				buffer += *Pointer<Int>(data + OFFSET(DrawData, colorPitchB[index]));
+
+				value = *Pointer<Int2>(buffer, 16);
+				mergedMask = *Pointer<Int2>(constants + OFFSET(Constants, maskD23Q) + xMask * 8);
+				if(bgraWriteMask != 0xF)
+				{
+					mergedMask &= *Pointer<Int2>(constants + OFFSET(Constants, mask10Q[bgraWriteMask][0]));
+				}
+				*Pointer<Int2>(buffer) = (As<Int2>(Int4(packed.zwww)) & mergedMask) | (value & ~mergedMask);
+			}
+			break;
 		default:
 			UNIMPLEMENTED("VkFormat: %d", int(targetFormat));
 	}
diff --git a/src/Pipeline/SamplerCore.cpp b/src/Pipeline/SamplerCore.cpp
index 63b15f9..3ca28a5 100644
--- a/src/Pipeline/SamplerCore.cpp
+++ b/src/Pipeline/SamplerCore.cpp
@@ -1701,6 +1701,16 @@
 
 		a2b10g10r10Unpack(cc, c);
 	}
+	else if(state.textureFormat == VK_FORMAT_A2R10G10B10_UNORM_PACK32)
+	{
+		Int4 cc;
+		cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0);
+		cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1);
+		cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2);
+		cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3);
+
+		a2r10g10b10Unpack(cc, c);
+	}
 	else if(state.textureFormat == VK_FORMAT_A2B10G10R10_UINT_PACK32)
 	{
 		Int4 cc;
@@ -1714,6 +1724,19 @@
 		c.z = Short4(((cc >> 20) & Int4(0x3FF)));
 		c.w = Short4(((cc >> 30) & Int4(0x3)));
 	}
+	else if(state.textureFormat == VK_FORMAT_A2R10G10B10_UINT_PACK32)
+	{
+		Int4 cc;
+		cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0);
+		cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1);
+		cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2);
+		cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3);
+
+		c.z = Short4(((cc)&Int4(0x3FF)));
+		c.y = Short4(((cc >> 10) & Int4(0x3FF)));
+		c.x = Short4(((cc >> 20) & Int4(0x3FF)));
+		c.w = Short4(((cc >> 30) & Int4(0x3)));
+	}
 	else
 		ASSERT(false);
 
diff --git a/src/Pipeline/ShaderCore.cpp b/src/Pipeline/ShaderCore.cpp
index d8e5f02..e6a847d 100644
--- a/src/Pipeline/ShaderCore.cpp
+++ b/src/Pipeline/ShaderCore.cpp
@@ -632,6 +632,22 @@
 	result.w |= As<Short4>(As<UShort4>(result.w) >> 8);
 }
 
+void a2r10g10b10Unpack(Int4 &value, Vector4s &result)
+{
+	result.x = Short4(value >> 14) & Short4(0xFFC0u);
+	result.y = Short4(value >> 4) & Short4(0xFFC0u);
+	result.z = Short4(value << 6) & Short4(0xFFC0u);
+	result.w = Short4(value >> 16) & Short4(0xC000u);
+
+	// Expand to 16 bit range
+	result.x |= As<Short4>(As<UShort4>(result.x) >> 10);
+	result.y |= As<Short4>(As<UShort4>(result.y) >> 10);
+	result.z |= As<Short4>(As<UShort4>(result.z) >> 10);
+	result.w |= As<Short4>(As<UShort4>(result.w) >> 2);
+	result.w |= As<Short4>(As<UShort4>(result.w) >> 4);
+	result.w |= As<Short4>(As<UShort4>(result.w) >> 8);
+}
+
 rr::RValue<rr::Bool> AnyTrue(rr::RValue<sw::SIMD::Int> const &ints)
 {
 	return rr::SignMask(ints) != 0;
diff --git a/src/Pipeline/ShaderCore.hpp b/src/Pipeline/ShaderCore.hpp
index e19fdbd..9a1ce32 100644
--- a/src/Pipeline/ShaderCore.hpp
+++ b/src/Pipeline/ShaderCore.hpp
@@ -209,6 +209,7 @@
 sw::SIMD::Float r11g11b10Unpack(UInt r11g11b10bits);
 UInt r11g11b10Pack(sw::SIMD::Float &value);
 void a2b10g10r10Unpack(Int4 &value, Vector4s &result);
+void a2r10g10b10Unpack(Int4 &value, Vector4s &result);
 
 rr::RValue<rr::Bool> AnyTrue(rr::RValue<sw::SIMD::Int> const &ints);
 
diff --git a/src/Pipeline/SpirvShaderImage.cpp b/src/Pipeline/SpirvShaderImage.cpp
index e632310..40ed41f 100644
--- a/src/Pipeline/SpirvShaderImage.cpp
+++ b/src/Pipeline/SpirvShaderImage.cpp
@@ -805,12 +805,24 @@
 			dst.move(2, (packed[0] >> 20) & SIMD::Int(0x3FF));
 			dst.move(3, (packed[0] >> 30) & SIMD::Int(0x3));
 			break;
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+			dst.move(2, (packed[0]) & SIMD::Int(0x3FF));
+			dst.move(1, (packed[0] >> 10) & SIMD::Int(0x3FF));
+			dst.move(0, (packed[0] >> 20) & SIMD::Int(0x3FF));
+			dst.move(3, (packed[0] >> 30) & SIMD::Int(0x3));
+			break;
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 			dst.move(0, SIMD::Float((packed[0]) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
 			dst.move(1, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
 			dst.move(2, SIMD::Float((packed[0] >> 20) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
 			dst.move(3, SIMD::Float((packed[0] >> 30) & SIMD::Int(0x3)) * SIMD::Float(1.0f / 0x3));
 			break;
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+			dst.move(2, SIMD::Float((packed[0]) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+			dst.move(1, SIMD::Float((packed[0] >> 10) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+			dst.move(0, SIMD::Float((packed[0] >> 20) & SIMD::Int(0x3FF)) * SIMD::Float(1.0f / 0x3FF));
+			dst.move(3, SIMD::Float((packed[0] >> 30) & SIMD::Int(0x3)) * SIMD::Float(1.0f / 0x3));
+			break;
 		case VK_FORMAT_R5G6B5_UNORM_PACK16:
 			dst.move(0, SIMD::Float((packed[0] >> 11) & SIMD::Int(0x1F)) * SIMD::Float(1.0f / 0x1F));
 			dst.move(1, SIMD::Float((packed[0] >> 5) & SIMD::Int(0x3F)) * SIMD::Float(1.0f / 0x3F));
diff --git a/src/Vulkan/VkFormat.cpp b/src/Vulkan/VkFormat.cpp
index 7487543..6fb6264 100644
--- a/src/Vulkan/VkFormat.cpp
+++ b/src/Vulkan/VkFormat.cpp
@@ -2024,6 +2024,8 @@
 		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
 		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
 		case VK_FORMAT_D16_UNORM:
@@ -2091,6 +2093,8 @@
 		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
 		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
 		case VK_FORMAT_D16_UNORM:
@@ -2141,6 +2145,8 @@
 		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
 		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
 		case VK_FORMAT_S8_UINT:
@@ -2215,6 +2221,8 @@
 		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
 		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
 		case VK_FORMAT_D16_UNORM:
diff --git a/src/Vulkan/VkPhysicalDevice.cpp b/src/Vulkan/VkPhysicalDevice.cpp
index 129b824..a208fc8 100644
--- a/src/Vulkan/VkPhysicalDevice.cpp
+++ b/src/Vulkan/VkPhysicalDevice.cpp
@@ -477,6 +477,7 @@
 		case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
 		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
 		case VK_FORMAT_R16_SFLOAT:
 		case VK_FORMAT_R16G16_SFLOAT:
 		case VK_FORMAT_R16G16B16A16_SFLOAT:
@@ -524,6 +525,7 @@
 		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
 		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 		case VK_FORMAT_R16_UINT:
 		case VK_FORMAT_R16_SINT:
 		case VK_FORMAT_R16G16_UINT:
@@ -610,6 +612,7 @@
 		case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
 		case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
 		case VK_FORMAT_R16_SFLOAT:
 		case VK_FORMAT_R16G16_SFLOAT:
 		case VK_FORMAT_R16G16B16A16_SFLOAT:
@@ -629,6 +632,7 @@
 		case VK_FORMAT_A8B8G8R8_UINT_PACK32:
 		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 		case VK_FORMAT_R16_UINT:
 		case VK_FORMAT_R16_SINT:
 		case VK_FORMAT_R16G16_UINT:
@@ -731,6 +735,8 @@
 		case VK_FORMAT_A8B8G8R8_SINT_PACK32:
 		case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
 		case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
+		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
 		case VK_FORMAT_R16_UINT:
 		case VK_FORMAT_R16_SINT:
 		case VK_FORMAT_R16_SFLOAT: