Fix integer overflow in image size calculation
GLsizei is historically defined as a 32-bit signed integer, which is
not allowed to be negative. But doing arithmetic with them can lead to
overflows producing undefined values, including negative ones.
Use size_t instead for these image data size calculations. It's defined
as an unsigned integer large enough to fit valid memory buffer sizes.
In practice that's 32-bit on 32-bit platforms, and 64-bit on 64-bit
ones, which is large enough to avoid overflow in the OutOfMemory test.
Bug: b/140421004
Change-Id: Id05dd64d470f579455fdc9925b0ab303ecaf37fb
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/36968
Tested-by: Nicolas Capens <nicolascapens@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/OpenGL/libGLESv2/Context.cpp b/src/OpenGL/libGLESv2/Context.cpp
index e03abc2..35ec488 100644
--- a/src/OpenGL/libGLESv2/Context.cpp
+++ b/src/OpenGL/libGLESv2/Context.cpp
@@ -1567,15 +1567,16 @@
return mState.genericUniformBuffer;
}
-GLsizei Context::getRequiredBufferSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type) const
+size_t Context::getRequiredBufferSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type) const
{
GLsizei inputWidth = (mState.unpackParameters.rowLength == 0) ? width : mState.unpackParameters.rowLength;
GLsizei inputPitch = gl::ComputePitch(inputWidth, format, type, mState.unpackParameters.alignment);
GLsizei inputHeight = (mState.unpackParameters.imageHeight == 0) ? height : mState.unpackParameters.imageHeight;
- return inputPitch * inputHeight * depth;
+
+ return static_cast<size_t>(inputPitch) * inputHeight * depth;
}
-GLenum Context::getPixels(const GLvoid **pixels, GLenum type, GLsizei imageSize) const
+GLenum Context::getPixels(const GLvoid **pixels, GLenum type, size_t imageSize) const
{
if(mState.pixelUnpackBuffer)
{
@@ -1598,7 +1599,7 @@
return GL_INVALID_OPERATION;
}
- if(mState.pixelUnpackBuffer->size() - offset < static_cast<size_t>(imageSize))
+ if(mState.pixelUnpackBuffer->size() - offset < imageSize)
{
return GL_INVALID_OPERATION;
}
diff --git a/src/OpenGL/libGLESv2/Context.h b/src/OpenGL/libGLESv2/Context.h
index 4b37709..5eb28b5 100644
--- a/src/OpenGL/libGLESv2/Context.h
+++ b/src/OpenGL/libGLESv2/Context.h
@@ -637,8 +637,8 @@
Buffer *getPixelPackBuffer() const;
Buffer *getPixelUnpackBuffer() const;
Buffer *getGenericUniformBuffer() const;
- GLsizei getRequiredBufferSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type) const;
- GLenum getPixels(const GLvoid **data, GLenum type, GLsizei imageSize) const;
+ size_t getRequiredBufferSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type) const;
+ GLenum getPixels(const GLvoid **data, GLenum type, size_t imageSize) const;
bool getBuffer(GLenum target, es2::Buffer **buffer) const;
Program *getCurrentProgram() const;
Texture *getTargetTexture(GLenum target) const;