[Fuchsia] Implement ExecutableMemory support on Fuchsia.

Added fuchsia-specific versions of allocateExecutable() and
markExecutable().

Bug: chromium: 778467
Change-Id: I027f1409f7b0343e102f98fcc34f2e93c2d626e4
Reviewed-on: https://swiftshader-review.googlesource.com/c/23408
Reviewed-by: Alexis Hétu <sugoi@google.com>
Tested-by: Sergey Ulanov <sergeyu@chromium.org>
diff --git a/src/Reactor/ExecutableMemory.cpp b/src/Reactor/ExecutableMemory.cpp
index 04684ef..78aa72c 100644
--- a/src/Reactor/ExecutableMemory.cpp
+++ b/src/Reactor/ExecutableMemory.cpp
@@ -22,6 +22,10 @@
 	#endif
 	#include <windows.h>
 	#include <intrin.h>
+#elif defined(__Fuchsia__)
+	#include <unistd.h>
+	#include <zircon/process.h>
+	#include <zircon/syscalls.h>
 #else
 	#include <errno.h>
 	#include <sys/mman.h>
@@ -172,10 +176,17 @@
 	#endif
 }
 
+// Rounds |x| up to a multiple of |m|, where |m| is a power of 2.
+inline uintptr_t roundUp(uintptr_t x, uintptr_t m)
+{
+	ASSERT(m > 0 && (m & (m - 1)) == 0); // |m| must be a power of 2.
+	return (x + m - 1) & ~(m - 1);
+}
+
 void *allocateExecutable(size_t bytes)
 {
 	size_t pageSize = memoryPageSize();
-	size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
+	size_t length = roundUp(bytes, pageSize);
 	void *mapping;
 
 	#if defined(LINUX_ENABLE_NAMED_MMAP)
@@ -198,6 +209,39 @@
 		{
 			mapping = nullptr;
 		}
+	#elif defined(__Fuchsia__)
+		zx_handle_t vmo;
+		if (zx_vmo_create(length, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) {
+			return nullptr;
+		}
+		zx_vaddr_t reservation;
+		zx_status_t status = zx_vmar_map(
+			zx_vmar_root_self(), ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE,
+			0, vmo, 0, length, &reservation);
+		zx_handle_close(vmo);
+		if (status != ZX_OK) {
+			return nullptr;
+		}
+
+		zx_vaddr_t alignedReservation = roundUp(reservation, pageSize);
+		mapping = reinterpret_cast<void*>(alignedReservation);
+
+		// Unmap extra memory reserved before the block.
+		if (alignedReservation != reservation) {
+			size_t prefix_size = alignedReservation - reservation;
+			status =
+				zx_vmar_unmap(zx_vmar_root_self(), reservation, prefix_size);
+			ASSERT(status == ZX_OK);
+			length -= prefix_size;
+		}
+
+		// Unmap extra memory at the end.
+		if (length > bytes) {
+			status = zx_vmar_unmap(
+				zx_vmar_root_self(), alignedReservation + bytes,
+				length - bytes);
+			ASSERT(status == ZX_OK);
+		}
 	#else
 		mapping = allocate(length, pageSize);
 	#endif
@@ -210,6 +254,11 @@
 	#if defined(_WIN32)
 		unsigned long oldProtection;
 		VirtualProtect(memory, bytes, PAGE_EXECUTE_READ, &oldProtection);
+	#elif defined(__Fuchsia__)
+		zx_status_t status = zx_vmar_protect(
+			zx_vmar_root_self(), ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_EXECUTE,
+			reinterpret_cast<zx_vaddr_t>(memory), bytes);
+	    ASSERT(status != ZX_OK);
 	#else
 		mprotect(memory, bytes, PROT_READ | PROT_EXEC);
 	#endif
@@ -225,6 +274,9 @@
 		size_t pageSize = memoryPageSize();
 		size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
 		munmap(memory, length);
+	#elif defined(__Fuchsia__)
+	    zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory),
+			          bytes);
 	#else
 		mprotect(memory, bytes, PROT_READ | PROT_WRITE);
 		deallocate(memory);