Fix how we calculate potential overflows with PBOs

We were previously checking if we could read to the end of the last
row in a pixel buffer object without overrunning the buffer. Instead
we need to check if we could read to the last byte defined by the
parameters and state without overrunning the buffer.

Also added a unittest to ensure that we don't reject valid PBO
operations, and do reject invalid PBO operations.

Bug: 140881221
Change-Id: I9b8879f1be30ec4fe6d60fe43f8ff4078f215868
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/37228
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Tested-by: Sean Risser <srisser@google.com>
Reviewed-by: Alexis Hétu <sugoi@google.com>
diff --git a/src/OpenGL/common/Image.cpp b/src/OpenGL/common/Image.cpp
index 695a825..4f84bcb 100644
--- a/src/OpenGL/common/Image.cpp
+++ b/src/OpenGL/common/Image.cpp
@@ -436,7 +436,7 @@
 
 	// Returns the size, in bytes, of a single client-side pixel.
     // OpenGL ES 3.0.5 table 3.2.
-	static int ComputePixelSize(GLenum format, GLenum type)
+	GLsizei ComputePixelSize(GLenum format, GLenum type)
 	{
 		switch(format)
 		{
diff --git a/src/OpenGL/common/Image.hpp b/src/OpenGL/common/Image.hpp
index 856283f..4dc53f4 100644
--- a/src/OpenGL/common/Image.hpp
+++ b/src/OpenGL/common/Image.hpp
@@ -58,6 +58,7 @@
 GLenum GetBaseInternalFormat(GLint internalformat);
 GLsizei ComputePitch(GLsizei width, GLenum format, GLenum type, GLint alignment);
 GLsizei ComputeCompressedSize(GLsizei width, GLsizei height, GLenum format);
+GLsizei ComputePixelSize(GLenum format, GLenum type);
 size_t ComputePackingOffset(GLenum format, GLenum type, GLsizei width, GLsizei height, const PixelStorageModes &storageModes);
 
 }
diff --git a/src/OpenGL/libGLESv2/Context.cpp b/src/OpenGL/libGLESv2/Context.cpp
index 35ec488..aec386c 100644
--- a/src/OpenGL/libGLESv2/Context.cpp
+++ b/src/OpenGL/libGLESv2/Context.cpp
@@ -1567,13 +1567,31 @@
 	return mState.genericUniformBuffer;
 }
 
+// The "required buffer size" is the number of bytes from the start of the
+// buffer to the last byte referenced within the buffer. If the caller of this
+// function has to worry about offsets within the buffer, it only needs to add
+// that byte offset to this function's return value to get its required buffer
+// size.
 size_t Context::getRequiredBufferSize(GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type) const
 {
-	GLsizei inputWidth = (mState.unpackParameters.rowLength == 0) ? width : mState.unpackParameters.rowLength;
-	GLsizei inputPitch = gl::ComputePitch(inputWidth, format, type, mState.unpackParameters.alignment);
-	GLsizei inputHeight = (mState.unpackParameters.imageHeight == 0) ? height : mState.unpackParameters.imageHeight;
+	// 0-dimensional images have no bytes in them.
+	if (width == 0 || height == 0 || depth == 0)
+	{
+		return 0;
+	}
 
-	return static_cast<size_t>(inputPitch) * inputHeight * depth;
+	GLint pixelsPerRow = (mState.unpackParameters.rowLength) > 0 ? mState.unpackParameters.rowLength : width;
+	GLint rowsPerImage = (mState.unpackParameters.imageHeight) > 0 ? mState.unpackParameters.imageHeight : height;
+
+	GLint bytesPerPixel = gl::ComputePixelSize(format, type);
+	GLint bytesPerRow = gl::ComputePitch(pixelsPerRow, format, type, mState.unpackParameters.alignment);
+	GLint bytesPerImage = rowsPerImage * bytesPerRow;
+
+	// Depth and height are subtracted by 1, while width is not, because we're not
+	// reading the full last row or image, but we are reading the full last pixel.
+	return (mState.unpackParameters.skipImages + (depth - 1))  * bytesPerImage
+		 + (mState.unpackParameters.skipRows   + (height - 1)) * bytesPerRow
+		 + (mState.unpackParameters.skipPixels + (width))      * bytesPerPixel;
 }
 
 GLenum Context::getPixels(const GLvoid **pixels, GLenum type, size_t imageSize) const
diff --git a/tests/GLESUnitTests/unittests.cpp b/tests/GLESUnitTests/unittests.cpp
index 208fbe3..2b01528 100644
--- a/tests/GLESUnitTests/unittests.cpp
+++ b/tests/GLESUnitTests/unittests.cpp
@@ -952,6 +952,84 @@
 	Uninitialize();
 }
 
+// Tests copying to a texture from a pixel buffer object
+TEST_F(SwiftShaderTest, CopyTexImageFromPixelBuffer)
+{
+	Initialize(3, false);
+	const GLuint red   = 0xff0000ff;
+	const GLuint green = 0x00ff00ff;
+	const GLuint blue  = 0x0000ffff;
+	// Set up texture
+	GLuint texture = 0;
+	glGenTextures(1, &texture);
+	EXPECT_NO_GL_ERROR();
+	GLuint tex_data[4][4] = {
+		{red, red, red, red},
+		{red, red, red, red},
+		{red, red, red, red},
+		{red, red, red, red}
+	};
+	glBindTexture(GL_TEXTURE_2D, texture);
+	EXPECT_NO_GL_ERROR();
+	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 4, 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, (void *) tex_data[0]);
+	EXPECT_NO_GL_ERROR();
+	// Set up Pixel Buffer Object
+	GLuint pixelBuffer = 0;
+	glGenBuffers(1, &pixelBuffer);
+	glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pixelBuffer);
+	EXPECT_NO_GL_ERROR();
+	glPixelStorei(GL_UNPACK_ROW_LENGTH, 4);
+	glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+	EXPECT_NO_GL_ERROR();
+	GLuint pixel_data[4][4] = {
+		{blue, blue, green, green},
+		{blue, blue, green, green},
+		{blue, blue, green, green},
+		{blue, blue, green, green},
+	};
+	glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(pixel_data), (void *) pixel_data, GL_STREAM_DRAW);
+	// Should set the 2-rightmost columns of the currently bound texture to the
+	// 2-rightmost columns of the PBO;
+	GLintptr offset = 2 * sizeof(GLuint);
+	glTexSubImage2D(GL_TEXTURE_2D, 0, 2, 0, 2, 4, GL_RGBA, GL_UNSIGNED_BYTE, reinterpret_cast<void *>(offset));
+	EXPECT_NO_GL_ERROR();
+	// Create an off-screen framebuffer to render the texture data to.
+	GLuint fbo = 0;
+	glGenFramebuffers(1, &fbo);
+	glBindFramebuffer(GL_FRAMEBUFFER, fbo);
+	glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
+	EXPECT_NO_GL_ERROR();
+	unsigned int color[4][4] = {
+		{0, 0, 0, 0},
+		{0, 0, 0, 0},
+		{0, 0, 0, 0},
+		{0, 0, 0, 0}
+	};
+	glReadPixels(0, 0, 4, 4, GL_RGBA, GL_UNSIGNED_BYTE, &color);
+	EXPECT_NO_GL_ERROR();
+	bool allEqual = true;
+	for (int i = 0; i < 4; i++)
+	{
+		for (int j = 0; j < 2; j++)
+		{
+			allEqual = allEqual && (color[i][j] == tex_data[i][j]);
+			allEqual = allEqual && (color[i][j+2] == pixel_data[i][j+2]);
+			if (!allEqual)
+				break;
+		}
+		if (!allEqual)
+			break;
+	}
+	EXPECT_EQ(allEqual, true);
+	// We can't use an offset of 3 GLuints or more, because the PBO is not large
+	// enough to satisfy such a request with the current GL_UNPACK_ROW_LENGTH.
+	offset = 3 * sizeof(GLuint);
+	glTexSubImage2D(GL_TEXTURE_2D, 0, 2, 0, 2, 4, GL_RGBA, GL_UNSIGNED_BYTE, reinterpret_cast<void *>(offset));
+	GLenum error = glGetError();
+	EXPECT_GLENUM_EQ(GL_INVALID_OPERATION, error);
+	Uninitialize();
+}
+
 // Tests reading of half-float textures.
 TEST_F(SwiftShaderTest, ReadHalfFloat)
 {