Use allocateExecutable() in LLVMReactor

LLVMReactor was using default memory mapper when creating
SectionMemoryManager. The default memory mapper calls mmap() to allocate
memory on all platforms except Windows. It doesn't specify MAP_JIT flag
that is required to create executable memory on Mac and Fuchsia. As
result the allocated memory is not executable, which leads to crashes
when trying to run the generated code. Update LLVMRector to use a custom
MemoryMapper implementation.

Bug: chromium:1032622, b/145348318
Change-Id: Id188af269d80371b709e625c29b92ff546e9ba28
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/39408
Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
Tested-by: Sergey Ulanov <sergeyu@chromium.org>
Reviewed-by: Nicolas Capens <nicolascapens@google.com>
diff --git a/src/Reactor/ExecutableMemory.cpp b/src/Reactor/ExecutableMemory.cpp
index 40acb94..8d5ec55 100644
--- a/src/Reactor/ExecutableMemory.cpp
+++ b/src/Reactor/ExecutableMemory.cpp
@@ -88,6 +88,45 @@
 	#endif
 }
 
+#if defined(_WIN32)
+DWORD permissionsToProtectMode(int permissions)
+{
+	switch (permissions) {
+		case PERMISSION_READ:
+		  return PAGE_READONLY;
+		case PERMISSION_EXECUTE:
+		  return PAGE_EXECUTE;
+		case PERMISSION_READ | PERMISSION_WRITE:
+		  return PAGE_READWRITE;
+		case PERMISSION_READ | PERMISSION_EXECUTE:
+		  return PAGE_EXECUTE_READ;
+		case PERMISSION_READ | PERMISSION_WRITE | PERMISSION_EXECUTE:
+		  return PAGE_EXECUTE_READWRITE;
+	}
+	return PAGE_NOACCESS;
+}
+#endif
+
+#if !defined(_WIN32) && !defined(__Fuchsia__)
+int permissionsToMmapProt(int permissions)
+{
+	int result = 0;
+	if (permissions & PERMISSION_READ)
+	{
+		result |= PROT_READ;
+	}
+	if (permissions & PERMISSION_WRITE)
+	{
+		result |= PROT_WRITE;
+	}
+	if (permissions & PERMISSION_EXECUTE)
+	{
+		result |= PROT_EXEC;
+	}
+	return result;
+}
+#endif  // !defined(_WIN32) && !defined(__Fuchsia__)
+
 #if defined(LINUX_ENABLE_NAMED_MMAP)
 // Create a file descriptor for anonymous memory with the given
 // name. Returns -1 on failure.
@@ -135,6 +174,25 @@
 }
 #endif  // defined(LINUX_ENABLE_NAMED_MMAP)
 
+#if defined(__Fuchsia__)
+zx_vm_option_t permissionsToZxVmOptions(int permissions) {
+	zx_vm_option_t result = 0;
+	if (permissions & PERMISSION_READ)
+	{
+		result |= ZX_VM_PERM_READ;
+	}
+	if (permissions & PERMISSION_WRITE)
+	{
+		result |= ZX_VM_PERM_WRITE;
+	}
+	if (permissions & PERMISSION_EXECUTE)
+	{
+		result |= ZX_VM_PERM_EXECUTE;
+	}
+	return result;
+}
+#endif  // defined(__Fuchsia__)
+
 }  // anonymous namespace
 
 size_t memoryPageSize()
@@ -189,28 +247,30 @@
 	return (x + m - 1) & ~(m - 1);
 }
 
-void *allocateExecutable(size_t bytes)
+void *allocateMemoryPages(size_t bytes, int permissions, bool need_exec)
 {
 	size_t pageSize = memoryPageSize();
 	size_t length = roundUp(bytes, pageSize);
 	void *mapping = nullptr;
 
 	#if defined(LINUX_ENABLE_NAMED_MMAP)
+		int flags = MAP_PRIVATE;
+
 		// Try to name the memory region for the executable code,
 		// to aid profilers.
 		int anonFd = anonymousFd();
 		if(anonFd == -1)
 		{
-			mapping = mmap(nullptr, length, PROT_READ | PROT_WRITE,
-			               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+			flags |= MAP_ANONYMOUS;
 		}
 		else
 		{
 			ensureAnonFileSize(anonFd, length);
-			mapping = mmap(nullptr, length, PROT_READ | PROT_WRITE,
-			               MAP_PRIVATE, anonFd, 0);
 		}
 
+		mapping = mmap(
+			nullptr, length, permissionsToMmapProt(permissions), flags, anonFd, 0);
+
 		if(mapping == MAP_FAILED)
 		{
 			mapping = nullptr;
@@ -220,13 +280,15 @@
 		if (zx_vmo_create(length, 0, &vmo) != ZX_OK) {
 			return nullptr;
 		}
-		if (zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK) {
+		if (need_exec &&
+		    zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo) != ZX_OK)
+		{
 			return nullptr;
 		}
 		zx_vaddr_t reservation;
 		zx_status_t status = zx_vmar_map(
-			zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
-			0, vmo, 0, length, &reservation);
+			zx_vmar_root_self(), permissionsToZxVmOptions(permissions), 0, vmo,
+			0, length, &reservation);
 		zx_handle_close(vmo);
 		if (status != ZX_OK) {
 			return nullptr;
@@ -237,18 +299,18 @@
 
 		mapping = reinterpret_cast<void*>(reservation);
 	#elif defined(__APPLE__)
+		int prot = permissionsToMmapProt(permissions);
+		int flags = MAP_PRIVATE | MAP_ANONYMOUS;
 		// On macOS 10.14 and higher, executables that are code signed with the
 		// "runtime" option cannot execute writable memory by default. They can opt
 		// into this capability by specifying the "com.apple.security.cs.allow-jit"
 		// code signing entitlement and allocating the region with the MAP_JIT flag.
-		mapping = mmap(nullptr, length, PROT_READ | PROT_WRITE,
-		               MAP_PRIVATE | MAP_ANONYMOUS | MAP_JIT, -1, 0);
+		mapping = mmap(nullptr, length, prot, flags | MAP_JIT, -1, 0);
 
 		if(mapping == MAP_FAILED)
 		{
 			// Retry without MAP_JIT (for older macOS versions).
-			mapping = mmap(nullptr, length, PROT_READ | PROT_WRITE,
-			               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+			mapping = mmap(nullptr, length, prot, flags, -1, 0);
 		}
 
 		if(mapping == MAP_FAILED)
@@ -257,46 +319,58 @@
 		}
 	#else
 		mapping = allocate(length, pageSize);
+		protectMemoryPages(mapping, length, permissions);
 	#endif
 
 	return mapping;
 }
 
-void markExecutable(void *memory, size_t bytes)
+void protectMemoryPages(void *memory, size_t bytes, int permissions)
 {
+	if (bytes == 0)
+		return;
+	bytes = roundUp(bytes, memoryPageSize());
+
 	#if defined(_WIN32)
 		unsigned long oldProtection;
-		VirtualProtect(memory, bytes, PAGE_EXECUTE_READ, &oldProtection);
+		BOOL result =
+			VirtualProtect(memory, bytes, permissionsToProtectMode(permissions),
+			               &oldProtection);
+		ASSERT(result);
 	#elif defined(__Fuchsia__)
-		size_t pageSize = memoryPageSize();
-		size_t length = roundUp(bytes, pageSize);
 		zx_status_t status = zx_vmar_protect(
-			zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE,
-			reinterpret_cast<zx_vaddr_t>(memory), length);
+			zx_vmar_root_self(), permissionsToZxVmOptions(permissions),
+			reinterpret_cast<zx_vaddr_t>(memory), bytes);
 		ASSERT(status == ZX_OK);
 	#else
-		mprotect(memory, bytes, PROT_READ | PROT_EXEC);
+		int result =
+			mprotect(memory, bytes, permissionsToMmapProt(permissions));
+		ASSERT(result == 0);
 	#endif
 }
 
-void deallocateExecutable(void *memory, size_t bytes)
+void deallocateMemoryPages(void *memory, size_t bytes)
 {
 	#if defined(_WIN32)
 		unsigned long oldProtection;
-		VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection);
+		BOOL result =
+			VirtualProtect(memory, bytes, PAGE_READWRITE, &oldProtection);
+		ASSERT(result);
 		deallocate(memory);
 	#elif defined(LINUX_ENABLE_NAMED_MMAP) || defined(__APPLE__)
 		size_t pageSize = memoryPageSize();
 		size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
-		munmap(memory, length);
+		int result = munmap(memory, length);
+		ASSERT(result == 0);
 	#elif defined(__Fuchsia__)
 		size_t pageSize = memoryPageSize();
 		size_t length = roundUp(bytes, pageSize);
 		zx_status_t status =  zx_vmar_unmap(
-		    zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
+			zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
 		ASSERT(status == ZX_OK);
 	#else
-		mprotect(memory, bytes, PROT_READ | PROT_WRITE);
+		int result = mprotect(memory, bytes, PROT_READ | PROT_WRITE);
+		ASSERT(result == 0);
 		deallocate(memory);
 	#endif
 }
diff --git a/src/Reactor/ExecutableMemory.hpp b/src/Reactor/ExecutableMemory.hpp
index 4c1ef33..b0ca05e 100644
--- a/src/Reactor/ExecutableMemory.hpp
+++ b/src/Reactor/ExecutableMemory.hpp
@@ -23,9 +23,21 @@
 
 size_t memoryPageSize();
 
-void *allocateExecutable(size_t bytes);   // Allocates memory that can be made executable using markExecutable()
-void markExecutable(void *memory, size_t bytes);
-void deallocateExecutable(void *memory, size_t bytes);
+enum MemoryPermission  {
+	PERMISSION_READ = 1,
+	PERMISSION_WRITE = 2,
+	PERMISSION_EXECUTE = 4,
+};
+
+// Allocates memory with the specified permissions. If |need_exec| is true then
+// the allocate memory can be made marked executable using protectMemoryPages().
+void* allocateMemoryPages(size_t bytes, int permissions, bool need_exec);
+
+// Sets permissions for memory allocated with allocateMemoryPages().
+void protectMemoryPages(void *memory, size_t bytes, int permissions);
+
+// Releases memory allocated with allocateMemoryPages().
+void deallocateMemoryPages(void *memory, size_t bytes);
 
 template<typename P>
 P unaligned_read(P *address)
diff --git a/src/Reactor/LLVMReactor.cpp b/src/Reactor/LLVMReactor.cpp
index 64ee668..c76ff55 100644
--- a/src/Reactor/LLVMReactor.cpp
+++ b/src/Reactor/LLVMReactor.cpp
@@ -313,6 +313,72 @@
 {
 }
 
+class MemoryMapper : public llvm::SectionMemoryManager::MemoryMapper
+{
+public:
+	MemoryMapper() {}
+	~MemoryMapper() final {}
+
+	llvm::sys::MemoryBlock allocateMappedMemory(
+			llvm::SectionMemoryManager::AllocationPurpose purpose,
+			size_t numBytes, const llvm::sys::MemoryBlock *const nearBlock,
+			unsigned flags, std::error_code &errorCode) final {
+		errorCode = std::error_code();
+
+		// Round up numBytes to page size.
+		size_t pageSize = rr::memoryPageSize();
+		numBytes = (numBytes + pageSize - 1) & ~(pageSize - 1);
+
+		bool need_exec =
+			purpose == llvm::SectionMemoryManager::AllocationPurpose::Code;
+		void* addr = rr::allocateMemoryPages(
+			numBytes, flagsToPermissions(flags), need_exec);
+		if (!addr)
+			return llvm::sys::MemoryBlock();
+		return llvm::sys::MemoryBlock(addr, numBytes);
+	}
+
+	std::error_code protectMappedMemory(const llvm::sys::MemoryBlock &block,
+	                                    unsigned flags) {
+		// Round down base address to align with a page boundary. This matches
+		// DefaultMMapper behavior.
+		void* addr = block.base();
+		size_t size = block.size();
+		size_t pageSize = rr::memoryPageSize();
+		addr = reinterpret_cast<void*>(
+			reinterpret_cast<uintptr_t>(addr) & ~(pageSize - 1));
+		size += reinterpret_cast<uintptr_t>(block.base()) -
+			reinterpret_cast<uintptr_t>(addr);
+
+		rr::protectMemoryPages(addr, size, flagsToPermissions(flags));
+		return std::error_code();
+	}
+
+	std::error_code releaseMappedMemory(llvm::sys::MemoryBlock &block) {
+		rr::deallocateMemoryPages(block.base(), block.size());
+		return std::error_code();
+	}
+
+private:
+	int flagsToPermissions(unsigned flags) {
+		int result = 0;
+		if (flags & llvm::sys::Memory::MF_READ)
+		{
+			result |= rr::PERMISSION_READ;
+		}
+		if (flags & llvm::sys::Memory::MF_WRITE)
+		{
+			result |= rr::PERMISSION_WRITE;
+		}
+		if (flags & llvm::sys::Memory::MF_EXEC)
+		{
+			result |= rr::PERMISSION_EXECUTE;
+		}
+		return result;
+	}
+
+};
+
 // JITRoutine is a rr::Routine that holds a LLVM JIT session, compiler and
 // object layer as each routine may require different target machine
 // settings and no Reactor routine directly links against another.
@@ -355,7 +421,7 @@
 		objLayer(
 			session,
 			[this](llvm::orc::VModuleKey) {
-				return ObjLayer::Resources{std::make_shared<llvm::SectionMemoryManager>(), resolver};
+				return ObjLayer::Resources{std::make_shared<llvm::SectionMemoryManager>(&memoryMapper), resolver};
 			},
 			ObjLayer::NotifyLoadedFtor(),
 			[](llvm::orc::VModuleKey, const llvm::object::ObjectFile &Obj, const llvm::RuntimeDyld::LoadedObjectInfo &L) {
@@ -415,6 +481,7 @@
 	std::shared_ptr<llvm::TargetMachine> targetMachine;
 	llvm::orc::ExecutionSession session;
 	CompileLayer compileLayer;
+	MemoryMapper memoryMapper;
 	ObjLayer objLayer;
 	std::vector<const void *> addresses;
 };
diff --git a/src/Reactor/SubzeroReactor.cpp b/src/Reactor/SubzeroReactor.cpp
index 8683862..4b09748 100644
--- a/src/Reactor/SubzeroReactor.cpp
+++ b/src/Reactor/SubzeroReactor.cpp
@@ -43,11 +43,6 @@
 #define NOMINMAX
 #endif // !NOMINMAX
 #include <Windows.h>
-#else
-#include <sys/mman.h>
-#if !defined(MAP_ANONYMOUS)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
 #endif
 
 #include <mutex>
@@ -474,12 +469,13 @@
 
 	T *allocate(size_type n)
 	{
-		return (T*)allocateExecutable(sizeof(T) * n);
+		return (T*)allocateMemoryPages(
+			sizeof(T) * n, PERMISSION_READ | PERMISSION_WRITE, true);
 	}
 
 	void deallocate(T *p, size_type n)
 	{
-		deallocateExecutable(p, sizeof(T) * n);
+		deallocateMemoryPages(p, sizeof(T) * n);
 	}
 };
 
@@ -497,13 +493,6 @@
 
 	~ELFMemoryStreamer() override
 	{
-		#if defined(_WIN32)
-			if(buffer.size() != 0)
-			{
-				DWORD exeProtection;
-				VirtualProtect(&buffer[0], buffer.size(), oldProtection, &exeProtection);
-			}
-		#endif
 	}
 
 	void write8(uint8_t Value) override
@@ -540,11 +529,10 @@
 		size_t codeSize = 0;
 		const void *entry = loadImage(&buffer[0], codeSize);
 
+		protectMemoryPages(&buffer[0], buffer.size(), PERMISSION_READ | PERMISSION_EXECUTE);
 #if defined(_WIN32)
-		VirtualProtect(&buffer[0], buffer.size(), PAGE_EXECUTE_READ, &oldProtection);
 		FlushInstructionCache(GetCurrentProcess(), NULL, 0);
 #else
-		mprotect(&buffer[0], buffer.size(), PROT_READ | PROT_EXEC);
 		__builtin___clear_cache((char*)entry, (char*)entry + codeSize);
 #endif
 		return entry;
@@ -576,10 +564,6 @@
 	std::vector<uint8_t, ExecutableAllocator<uint8_t>> buffer;
 	std::size_t position;
 	std::vector<std::unique_ptr<uint8_t[]>> constantData;
-
-	#if defined(_WIN32)
-	DWORD oldProtection;
-	#endif
 };
 
 Nucleus::Nucleus()