Fix undefined behavior in minifloat conversion
Shifting a 32-bit value by 32 or more is undefined behavior in C++. This
previously happened in this code when converting a 32-bit float value to
an 11- or 10-bit minifloat which is too small to be represented as a
denormal, and should produce 0 instead.
Instead of going through this arithmetic for denormals, just test
whether the input value is too small to produce a valid denormal, and
return 0 instead.
Bug: chromium:1117433
Change-Id: I8149996fb6d66d328db45725c4cdb81dc7826a10
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/48069
Presubmit-Ready: Nicolas Capens <nicolascapens@google.com>
Tested-by: Nicolas Capens <nicolascapens@google.com>
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/System/Half.hpp b/src/System/Half.hpp
index 5775141..84025ef 100644
--- a/src/System/Half.hpp
+++ b/src/System/Half.hpp
@@ -174,7 +174,8 @@
const unsigned int float11ExponentBias = 14;
const unsigned int float32Maxfloat11 = 0x477E0000;
- const unsigned int float32Minfloat11 = 0x38800000;
+ const unsigned int float32MinNormfloat11 = 0x38800000;
+ const unsigned int float32MinDenormfloat11 = 0x35000080;
const unsigned int float32Bits = *reinterpret_cast<unsigned int *>(&fp32);
const bool float32Sign = (float32Bits & float32SignMask) == float32SignMask;
@@ -210,9 +211,14 @@
// The number is too large to be represented as a float11, set to max
return float11Max;
}
+ else if(float32Val < float32MinDenormfloat11)
+ {
+ // The number is too small to be represented as a denormalized float11, set to 0
+ return 0;
+ }
else
{
- if(float32Val < float32Minfloat11)
+ if(float32Val < float32MinNormfloat11)
{
// The number is too small to be represented as a normalized float11
// Convert it to a denormalized value.
@@ -247,7 +253,8 @@
const unsigned int float10ExponentBias = 14;
const unsigned int float32Maxfloat10 = 0x477C0000;
- const unsigned int float32Minfloat10 = 0x38800000;
+ const unsigned int float32MinNormfloat10 = 0x38800000;
+ const unsigned int float32MinDenormfloat10 = 0x35800040;
const unsigned int float32Bits = *reinterpret_cast<unsigned int *>(&fp32);
const bool float32Sign = (float32Bits & float32SignMask) == float32SignMask;
@@ -283,9 +290,14 @@
// The number is too large to be represented as a float10, set to max
return float10Max;
}
+ else if(float32Val < float32MinDenormfloat10)
+ {
+ // The number is too small to be represented as a denormalized float10, set to 0
+ return 0;
+ }
else
{
- if(float32Val < float32Minfloat10)
+ if(float32Val < float32MinNormfloat10)
{
// The number is too small to be represented as a normalized float10
// Convert it to a denormalized value.