[fuchsia] Simplify memory allocation

Previuously Fuchsia-specific memory allocation logic was trying to deal
with non-page-aligned address returned from zx_vmar_map(), but it's no
longer necessary because zx_vmap_map() is guaranteed to return
page-aligned address. Also it was trying to unmap partial pages, but
zx_vmar_unmap() now requires that len is page-aligned. This change fixes
both issues by removing two redundant zx_vmar_unmap() calls. Also
deallocateExecutable() has been updated to pass page-aligned size to
zx_vmar_unmap().

This CL resolves test failures in some viz_unittests on Fuchsia.

Bug: chromium:961455
Change-Id: Ib53e46af853802679a55dacc8546c3f67a3486c7
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/34409
Tested-by: Sergey Ulanov <sergeyu@chromium.org>
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
diff --git a/src/Reactor/ExecutableMemory.cpp b/src/Reactor/ExecutableMemory.cpp
index 056d055..bb1285a 100644
--- a/src/Reactor/ExecutableMemory.cpp
+++ b/src/Reactor/ExecutableMemory.cpp
@@ -233,25 +233,10 @@
 			return nullptr;
 		}
 
-		zx_vaddr_t alignedReservation = roundUp(reservation, pageSize);
-		mapping = reinterpret_cast<void*>(alignedReservation);
+		// zx_vmar_map() returns page-aligned address.
+		ASSERT(roundUp(reservation, pageSize) == reservation);
 
-		// Unmap extra memory reserved before the block.
-		if (alignedReservation != reservation) {
-			size_t prefix_size = alignedReservation - reservation;
-			status =
-				zx_vmar_unmap(zx_vmar_root_self(), reservation, prefix_size);
-			ASSERT(status == ZX_OK);
-			length -= prefix_size;
-		}
-
-		// Unmap extra memory at the end.
-		if (length > bytes) {
-			status = zx_vmar_unmap(
-				zx_vmar_root_self(), alignedReservation + bytes,
-				length - bytes);
-			ASSERT(status == ZX_OK);
-		}
+		mapping = reinterpret_cast<void*>(reservation);
 	#else
 		mapping = allocate(length, pageSize);
 	#endif
@@ -265,10 +250,12 @@
 		unsigned long oldProtection;
 		VirtualProtect(memory, bytes, PAGE_EXECUTE_READ, &oldProtection);
 	#elif defined(__Fuchsia__)
+		size_t pageSize = memoryPageSize();
+		size_t length = roundUp(bytes, pageSize);
 		zx_status_t status = zx_vmar_protect(
 			zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE,
-			reinterpret_cast<zx_vaddr_t>(memory), bytes);
-	    ASSERT(status != ZX_OK);
+			reinterpret_cast<zx_vaddr_t>(memory), length);
+		ASSERT(status == ZX_OK);
 	#else
 		mprotect(memory, bytes, PROT_READ | PROT_EXEC);
 	#endif
@@ -285,8 +272,11 @@
 		size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
 		munmap(memory, length);
 	#elif defined(__Fuchsia__)
-	    zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory),
-			          bytes);
+		size_t pageSize = memoryPageSize();
+		size_t length = roundUp(bytes, pageSize);
+		zx_status_t status =  zx_vmar_unmap(
+		    zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
+		ASSERT(status == ZX_OK);
 	#else
 		mprotect(memory, bytes, PROT_READ | PROT_WRITE);
 		deallocate(memory);