Subzero: Support non sequentially consistent memory orderings for atomic ops.

The actual code lowering is unchanged, but the validation is made less strict to allow the additional orderings.

BUG= https://code.google.com/p/nativeclient/issues/detail?id=4029
R=jfb@chromium.org

Review URL: https://codereview.chromium.org/1017453007
diff --git a/src/IceGlobalContext.cpp b/src/IceGlobalContext.cpp
index 8f20129..6badc55 100644
--- a/src/IceGlobalContext.cpp
+++ b/src/IceGlobalContext.cpp
@@ -195,7 +195,8 @@
     if (Func->hasError()) {
       getErrorStatus()->assign(EC_Translation);
       OstreamLocker L(this);
-      getStrDump() << "ICE translation error: " << Func->getError() << "\n";
+      getStrDump() << "ICE translation error: " << Func->getFunctionName()
+                   << ": " << Func->getError() << "\n";
       Item = new EmitterWorkItem(Func->getSequenceNumber());
     } else {
       Func->getAssembler<>()->setInternal(Func->getInternal());
diff --git a/src/IceIntrinsics.cpp b/src/IceIntrinsics.cpp
index 26a81de..d528027 100644
--- a/src/IceIntrinsics.cpp
+++ b/src/IceIntrinsics.cpp
@@ -233,9 +233,73 @@
   return &it->second;
 }
 
-bool Intrinsics::VerifyMemoryOrder(uint64_t Order) {
-  // There is only one memory ordering for atomics allowed right now.
-  return Order == Intrinsics::MemoryOrderSequentiallyConsistent;
+namespace {
+
+// Returns whether PNaCl allows the given memory ordering in general.
+bool isMemoryOrderValidPNaCl(uint64_t Order) {
+  switch (Order) {
+  case Intrinsics::MemoryOrderAcquire:
+  case Intrinsics::MemoryOrderRelease:
+  case Intrinsics::MemoryOrderAcquireRelease:
+  case Intrinsics::MemoryOrderSequentiallyConsistent:
+    return true;
+  default:
+    return false;
+  }
+}
+
+} // end of anonymous namespace
+
+bool Intrinsics::isMemoryOrderValid(IntrinsicID ID, uint64_t Order,
+                                    uint64_t OrderOther) {
+  // Reject orderings not allowed in PNaCl.
+  if (!isMemoryOrderValidPNaCl(Order))
+    return false;
+  if (ID == AtomicCmpxchg && !isMemoryOrderValidPNaCl(OrderOther))
+    return false;
+  // Reject orderings not allowed by C++11.
+  switch (ID) {
+  default:
+    llvm_unreachable("isMemoryOrderValid: Unknown IntrinsicID");
+    return false;
+  case AtomicFence:
+  case AtomicFenceAll:
+  case AtomicRMW:
+    return true;
+  case AtomicCmpxchg:
+    // Reject orderings that are disallowed by C++11 as invalid
+    // combinations for cmpxchg.
+    switch (OrderOther) {
+    case MemoryOrderRelaxed:
+    case MemoryOrderConsume:
+    case MemoryOrderAcquire:
+    case MemoryOrderSequentiallyConsistent:
+      if (OrderOther > Order)
+        return false;
+      if (Order == MemoryOrderRelease && OrderOther != MemoryOrderRelaxed)
+        return false;
+      return true;
+    default:
+      return false;
+    }
+  case AtomicLoad:
+    switch (Order) {
+    case MemoryOrderRelease:
+    case MemoryOrderAcquireRelease:
+      return false;
+    default:
+      return true;
+    }
+  case AtomicStore:
+    switch (Order) {
+    case MemoryOrderConsume:
+    case MemoryOrderAcquire:
+    case MemoryOrderAcquireRelease:
+      return false;
+    default:
+      return true;
+    }
+  }
 }
 
 Intrinsics::ValidateCallValue
diff --git a/src/IceIntrinsics.h b/src/IceIntrinsics.h
index fb066b9..74702c2 100644
--- a/src/IceIntrinsics.h
+++ b/src/IceIntrinsics.h
@@ -91,7 +91,14 @@
     MemoryOrderNum // Invalid, keep last.
   };
 
-  static bool VerifyMemoryOrder(uint64_t Order);
+  // Verify memory ordering rules for atomic intrinsics.  For
+  // AtomicCmpxchg, Order is the "success" ordering and OrderOther is
+  // the "failure" ordering.  Returns true if valid, false if invalid.
+  // TODO(stichnot,kschimpf): Perform memory order validation in the
+  // bitcode reader/parser, allowing LLVM and Subzero to share.  See
+  // https://code.google.com/p/nativeclient/issues/detail?id=4126 .
+  static bool isMemoryOrderValid(IntrinsicID ID, uint64_t Order,
+                                 uint64_t OrderOther = MemoryOrderInvalid);
 
   enum SideEffects { SideEffects_F = 0, SideEffects_T = 1 };
 
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp
index ebfc4d2..42f513b 100644
--- a/src/IceTargetLoweringX8632.cpp
+++ b/src/IceTargetLoweringX8632.cpp
@@ -2856,17 +2856,25 @@
   }
 }
 
+namespace {
+
+// Converts a ConstantInteger32 operand into its constant value, or
+// MemoryOrderInvalid if the operand is not a ConstantInteger32.
+uint64_t getConstantMemoryOrder(Operand *Opnd) {
+  if (auto Integer = llvm::dyn_cast<ConstantInteger32>(Opnd))
+    return Integer->getValue();
+  return Intrinsics::MemoryOrderInvalid;
+}
+
+} // end of anonymous namespace
+
 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
-  switch (Instr->getIntrinsicInfo().ID) {
+  switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
   case Intrinsics::AtomicCmpxchg: {
-    if (!Intrinsics::VerifyMemoryOrder(
-            llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) {
-      Func->setError("Unexpected memory ordering (success) for AtomicCmpxchg");
-      return;
-    }
-    if (!Intrinsics::VerifyMemoryOrder(
-            llvm::cast<ConstantInteger32>(Instr->getArg(4))->getValue())) {
-      Func->setError("Unexpected memory ordering (failure) for AtomicCmpxchg");
+    if (!Intrinsics::isMemoryOrderValid(
+            ID, getConstantMemoryOrder(Instr->getArg(3)),
+            getConstantMemoryOrder(Instr->getArg(4)))) {
+      Func->setError("Unexpected memory ordering for AtomicCmpxchg");
       return;
     }
     Variable *DestPrev = Instr->getDest();
@@ -2879,8 +2887,8 @@
     return;
   }
   case Intrinsics::AtomicFence:
-    if (!Intrinsics::VerifyMemoryOrder(
-            llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue())) {
+    if (!Intrinsics::isMemoryOrderValid(
+            ID, getConstantMemoryOrder(Instr->getArg(0)))) {
       Func->setError("Unexpected memory ordering for AtomicFence");
       return;
     }
@@ -2925,8 +2933,8 @@
   case Intrinsics::AtomicLoad: {
     // We require the memory address to be naturally aligned.
     // Given that is the case, then normal loads are atomic.
-    if (!Intrinsics::VerifyMemoryOrder(
-            llvm::cast<ConstantInteger32>(Instr->getArg(1))->getValue())) {
+    if (!Intrinsics::isMemoryOrderValid(
+            ID, getConstantMemoryOrder(Instr->getArg(1)))) {
       Func->setError("Unexpected memory ordering for AtomicLoad");
       return;
     }
@@ -2958,8 +2966,8 @@
     return;
   }
   case Intrinsics::AtomicRMW:
-    if (!Intrinsics::VerifyMemoryOrder(
-            llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) {
+    if (!Intrinsics::isMemoryOrderValid(
+            ID, getConstantMemoryOrder(Instr->getArg(3)))) {
       Func->setError("Unexpected memory ordering for AtomicRMW");
       return;
     }
@@ -2969,8 +2977,8 @@
                    Instr->getArg(1), Instr->getArg(2));
     return;
   case Intrinsics::AtomicStore: {
-    if (!Intrinsics::VerifyMemoryOrder(
-            llvm::cast<ConstantInteger32>(Instr->getArg(2))->getValue())) {
+    if (!Intrinsics::isMemoryOrderValid(
+            ID, getConstantMemoryOrder(Instr->getArg(2)))) {
       Func->setError("Unexpected memory ordering for AtomicStore");
       return;
     }
@@ -4485,6 +4493,8 @@
     Constant *Offset = llvm::dyn_cast<Constant>(Operand);
     assert(Base || Offset);
     if (Offset) {
+      // Make sure Offset is not undef.
+      Offset = llvm::cast<Constant>(legalize(Offset));
       assert(llvm::isa<ConstantInteger32>(Offset) ||
              llvm::isa<ConstantRelocatable>(Offset));
     }
diff --git a/tests_lit/llvm2ice_tests/abi-atomics.ll b/tests_lit/llvm2ice_tests/abi-atomics.ll
new file mode 100644
index 0000000..dad86dd
--- /dev/null
+++ b/tests_lit/llvm2ice_tests/abi-atomics.ll
@@ -0,0 +1,534 @@
+; This file is copied/adapted from llvm/test/NaCl/PNaClABI/abi-atomics.ll .
+; TODO(stichnot): Find a way to share the file to avoid divergence.
+
+; RUN: %p2i -i %s --args --verbose none --exit-success -threads=0 2>&1 \
+; RUN:   | FileCheck %s
+
+declare i8 @llvm.nacl.atomic.load.i8(i8*, i32)
+declare i16 @llvm.nacl.atomic.load.i16(i16*, i32)
+declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
+declare i64 @llvm.nacl.atomic.load.i64(i64*, i32)
+declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32)
+declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32)
+declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32)
+declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32)
+declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32)
+declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32)
+declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32)
+declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32)
+declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32)
+declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32)
+declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32)
+declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32)
+declare void @llvm.nacl.atomic.fence(i32)
+declare void @llvm.nacl.atomic.fence.all()
+declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*)
+
+
+; Load
+
+define internal i32 @test_load_invalid_7() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 7)
+  ret i32 %1
+}
+; CHECK: test_load_invalid_7: Unexpected memory ordering for AtomicLoad
+
+define internal i32 @test_load_invalid_0() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 0)
+  ret i32 %1
+}
+; CHECK: test_load_invalid_0: Unexpected memory ordering for AtomicLoad
+
+define internal i32 @test_load_seqcst() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
+  ret i32 %1
+}
+; CHECK-LABEL: test_load_seqcst
+
+define internal i32 @test_load_acqrel() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 5)
+  ret i32 %1
+}
+; CHECK: test_load_acqrel: Unexpected memory ordering for AtomicLoad
+
+define internal i32 @test_load_release() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 4)
+  ret i32 %1
+}
+; CHECK: test_load_release: Unexpected memory ordering for AtomicLoad
+
+define internal i32 @test_load_acquire() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 3)
+  ret i32 %1
+}
+; CHECK-LABEL: test_load_acquire
+
+define internal i32 @test_load_consume() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 2)
+  ret i32 %1
+}
+; CHECK: test_load_consume: Unexpected memory ordering for AtomicLoad
+
+define internal i32 @test_load_relaxed() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 1)
+  ret i32 %1
+}
+; CHECK: test_load_relaxed: Unexpected memory ordering for AtomicLoad
+
+
+; Store
+
+define internal void @test_store_invalid_7() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 7)
+  ret void
+}
+; CHECK: test_store_invalid_7: Unexpected memory ordering for AtomicStore
+
+define internal void @test_store_invalid_0() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 0)
+  ret void
+}
+; CHECK: test_store_invalid_0: Unexpected memory ordering for AtomicStore
+
+define internal void @test_store_seqcst() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 6)
+  ret void
+}
+; CHECK-LABEL: test_store_seqcst
+
+define internal void @test_store_acqrel() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 5)
+  ret void
+}
+; CHECK: test_store_acqrel: Unexpected memory ordering for AtomicStore
+
+define internal void @test_store_release() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 4)
+  ret void
+}
+; CHECK-LABEL: test_store_release
+
+define internal void @test_store_acquire() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 3)
+  ret void
+}
+; CHECK: test_store_acquire: Unexpected memory ordering for AtomicStore
+
+define internal void @test_store_consume() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 2)
+  ret void
+}
+; CHECK: test_store_consume: Unexpected memory ordering for AtomicStore
+
+define internal void @test_store_relaxed() {
+  %ptr = inttoptr i32 undef to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 undef, i32* %ptr, i32 1)
+  ret void
+}
+; CHECK: test_store_relaxed: Unexpected memory ordering for AtomicStore
+
+
+; rmw
+
+define internal i32 @test_rmw_invalid_7() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 7)
+  ret i32 %1
+}
+; CHECK: test_rmw_invalid_7: Unexpected memory ordering for AtomicRMW
+
+define internal i32 @test_rmw_invalid_0() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 0)
+  ret i32 %1
+}
+; CHECK: test_rmw_invalid_0: Unexpected memory ordering for AtomicRMW
+
+define internal i32 @test_rmw_seqcst() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 6)
+  ret i32 %1
+}
+; CHECK-LABEL: test_rmw_seqcst
+
+define internal i32 @test_rmw_acqrel() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 5)
+  ret i32 %1
+}
+; CHECK-LABEL: test_rmw_acqrel
+
+define internal i32 @test_rmw_release() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 4)
+  ret i32 %1
+}
+; CHECK-LABEL: test_rmw_release
+
+define internal i32 @test_rmw_acquire() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 3)
+  ret i32 %1
+}
+; CHECK-LABEL: test_rmw_acquire
+
+define internal i32 @test_rmw_consume() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 2)
+  ret i32 %1
+}
+; CHECK: test_rmw_consume: Unexpected memory ordering for AtomicRMW
+
+define internal i32 @test_rmw_relaxed() {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 0, i32 1)
+  ret i32 %1
+}
+; CHECK: test_rmw_relaxed: Unexpected memory ordering for AtomicRMW
+
+
+; cmpxchg
+
+define internal i32 @test_cmpxchg_invalid_7(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 7, i32 7)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_invalid_7: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_invalid_0(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 0, i32 0)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_invalid_0: Unexpected memory ordering for AtomicCmpxchg
+
+; seq_cst
+
+define internal i32 @test_cmpxchg_seqcst_seqcst(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 6)
+  ret i32 %1
+}
+; CHECK-LABEL: test_cmpxchg_seqcst_seqcst
+
+define internal i32 @test_cmpxchg_seqcst_acqrel(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 5)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_seqcst_acqrel: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_seqcst_release(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 4)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_seqcst_release: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_seqcst_acquire(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 3)
+  ret i32 %1
+}
+; CHECK-LABEL: test_cmpxchg_seqcst_acquire
+
+define internal i32 @test_cmpxchg_seqcst_consume(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 2)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_seqcst_consume: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_seqcst_relaxed(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 6, i32 1)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_seqcst_relaxed: Unexpected memory ordering for AtomicCmpxchg
+
+; acq_rel
+
+define internal i32 @test_cmpxchg_acqrel_seqcst(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 5, i32 6)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acqrel_seqcst: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acqrel_acqrel(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 5, i32 5)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acqrel_acqrel: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acqrel_release(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 5, i32 4)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acqrel_release: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acqrel_acquire(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 5, i32 3)
+  ret i32 %1
+}
+; CHECK-LABEL: test_cmpxchg_acqrel_acquire
+
+define internal i32 @test_cmpxchg_acqrel_consume(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 5, i32 2)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acqrel_consume: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acqrel_relaxed(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 5, i32 1)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acqrel_relaxed: Unexpected memory ordering for AtomicCmpxchg
+
+; release
+
+define internal i32 @test_cmpxchg_release_seqcst(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 4, i32 6)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_release_seqcst: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_release_acqrel(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 4, i32 5)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_release_acqrel: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_release_release(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 4, i32 4)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_release_release: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_release_acquire(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 4, i32 3)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_release_acquire: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_release_consume(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 4, i32 2)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_release_consume: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_release_relaxed(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 4, i32 1)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_release_relaxed: Unexpected memory ordering for AtomicCmpxchg
+
+; acquire
+
+define internal i32 @test_cmpxchg_acquire_seqcst(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 3, i32 6)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acquire_seqcst: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acquire_acqrel(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 3, i32 5)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acquire_acqrel: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acquire_release(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 3, i32 4)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acquire_release: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acquire_acquire(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 3, i32 3)
+  ret i32 %1
+}
+; CHECK-LABEL: test_cmpxchg_acquire_acquire
+
+define internal i32 @test_cmpxchg_acquire_consume(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 3, i32 2)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acquire_consume: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_acquire_relaxed(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 3, i32 1)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_acquire_relaxed: Unexpected memory ordering for AtomicCmpxchg
+
+; consume
+
+define internal i32 @test_cmpxchg_consume_seqcst(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 2, i32 6)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_consume_seqcst: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_consume_acqrel(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 2, i32 5)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_consume_acqrel: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_consume_release(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 2, i32 4)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_consume_release: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_consume_acquire(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 2, i32 3)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_consume_acquire: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_consume_consume(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 2, i32 2)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_consume_consume: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_consume_relaxed(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 2, i32 1)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_consume_relaxed: Unexpected memory ordering for AtomicCmpxchg
+
+; relaxed
+
+define internal i32 @test_cmpxchg_relaxed_seqcst(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 1, i32 6)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_relaxed_seqcst: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_relaxed_acqrel(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 1, i32 5)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_relaxed_acqrel: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_relaxed_release(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 1, i32 4)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_relaxed_release: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_relaxed_acquire(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 1, i32 3)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_relaxed_acquire: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_relaxed_consume(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 1, i32 2)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_relaxed_consume: Unexpected memory ordering for AtomicCmpxchg
+
+define internal i32 @test_cmpxchg_relaxed_relaxed(i32 %oldval, i32 %newval) {
+  %ptr = inttoptr i32 undef to i32*
+  %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newval, i32 1, i32 1)
+  ret i32 %1
+}
+; CHECK: test_cmpxchg_relaxed_relaxed: Unexpected memory ordering for AtomicCmpxchg
+
+
+; fence
+
+define internal void @test_fence_invalid_7() {
+  call void @llvm.nacl.atomic.fence(i32 7)
+  ret void
+}
+; CHECK: test_fence_invalid_7: Unexpected memory ordering for AtomicFence
+
+define internal void @test_fence_invalid_0() {
+  call void @llvm.nacl.atomic.fence(i32 0)
+  ret void
+}
+; CHECK: test_fence_invalid_0: Unexpected memory ordering for AtomicFence
+
+define internal void @test_fence_seqcst() {
+  call void @llvm.nacl.atomic.fence(i32 6)
+  ret void
+}
+; CHECK-LABEL: test_fence_seqcst
+
+define internal void @test_fence_acqrel() {
+  call void @llvm.nacl.atomic.fence(i32 5)
+  ret void
+}
+; CHECK-LABEL: test_fence_acqrel
+
+define internal void @test_fence_acquire() {
+  call void @llvm.nacl.atomic.fence(i32 4)
+  ret void
+}
+; CHECK-LABEL: test_fence_acquire
+
+define internal void @test_fence_release() {
+  call void @llvm.nacl.atomic.fence(i32 3)
+  ret void
+}
+; CHECK-LABEL: test_fence_release
+
+define internal void @test_fence_consume() {
+  call void @llvm.nacl.atomic.fence(i32 2)
+  ret void
+}
+; CHECK: test_fence_consume: Unexpected memory ordering for AtomicFence
+
+define internal void @test_fence_relaxed() {
+  call void @llvm.nacl.atomic.fence(i32 1)
+  ret void
+}
+; CHECK: test_fence_relaxed: Unexpected memory ordering for AtomicFence
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll b/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll
index 92de7aa..6e58728 100644
--- a/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-errors.ll
@@ -19,8 +19,8 @@
 declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*)
 
 ;;; Load
-;;; Check unexpected memory order parameter (only sequential
-;;; consistency == 6 is currently allowed).
+;;; Check unexpected memory order parameter (release=4 and acq_rel=5
+;;; are disallowed).
 
 define i32 @error_atomic_load_8(i32 %iptr) {
 entry:
@@ -34,7 +34,7 @@
 define i32 @error_atomic_load_16(i32 %iptr) {
 entry:
   %ptr = inttoptr i32 %iptr to i16*
-  %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 1)
+  %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 4)
   %r = zext i16 %i to i32
   ret i32 %r
 }
@@ -43,13 +43,14 @@
 define i64 @error_atomic_load_64(i32 %iptr) {
 entry:
   %ptr = inttoptr i32 %iptr to i64*
-  %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 2)
+  %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 5)
   ret i64 %r
 }
 ; CHECK: Unexpected memory ordering for AtomicLoad
 
 
 ;;; Store
+;;; consume=2, acquire=3, acq_rel=5 are disallowed
 
 define void @error_atomic_store_32(i32 %iptr, i32 %v) {
 entry:
@@ -70,19 +71,20 @@
 define void @error_atomic_store_64_const(i32 %iptr) {
 entry:
   %ptr = inttoptr i32 %iptr to i64*
-  call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 4)
+  call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 5)
   ret void
 }
 ; CHECK: Unexpected memory ordering for AtomicStore
 
 ;;; RMW
 ;;; Test atomic memory order and operation.
+;;; Modes 3:6 allowed.
 
 define i32 @error_atomic_rmw_add_8(i32 %iptr, i32 %v) {
 entry:
   %trunc = trunc i32 %v to i8
   %ptr = inttoptr i32 %iptr to i8*
-  %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 5)
+  %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 1)
   %a_ext = zext i8 %a to i32
   ret i32 %a_ext
 }
@@ -91,7 +93,7 @@
 define i64 @error_atomic_rmw_add_64(i32 %iptr, i64 %v) {
 entry:
   %ptr = inttoptr i32 %iptr to i64*
-  %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 4)
+  %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 7)
   ret i64 %a
 }
 ; CHECK: Unexpected memory ordering for AtomicRMW
@@ -131,7 +133,7 @@
                                                i32 %desired, i32 0, i32 6)
   ret i32 %old
 }
-; CHECK: Unexpected memory ordering (success) for AtomicCmpxchg
+; CHECK: Unexpected memory ordering for AtomicCmpxchg
 
 define i32 @error_atomic_cmpxchg_32_failure(i32 %iptr, i32 %expected, i32 %desired) {
 entry:
@@ -140,22 +142,22 @@
                                                i32 %desired, i32 6, i32 0)
   ret i32 %old
 }
-; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg
+; CHECK: Unexpected memory ordering for AtomicCmpxchg
 
 define i64 @error_atomic_cmpxchg_64_failure(i32 %iptr, i64 %expected, i64 %desired) {
 entry:
   %ptr = inttoptr i32 %iptr to i64*
   %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
-                                               i64 %desired, i32 6, i32 3)
+                                               i64 %desired, i32 4, i32 1)
   ret i64 %old
 }
-; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg
+; CHECK: Unexpected memory ordering for AtomicCmpxchg
 
 ;;; Fence and is-lock-free.
 
 define void @error_atomic_fence() {
 entry:
-  call void @llvm.nacl.atomic.fence(i32 1)
+  call void @llvm.nacl.atomic.fence(i32 0)
   ret void
 }
 ; CHECK: Unexpected memory ordering for AtomicFence
@@ -168,3 +170,58 @@
   ret i32 %r
 }
 ; CHECK: AtomicIsLockFree byte size should be compile-time const
+
+
+;;; Test bad non-constant memory ordering values.
+
+define i32 @error_atomic_load_8_nonconst(i32 %iptr) {
+entry:
+  %ptr = inttoptr i32 %iptr to i8*
+  %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 %iptr)
+  %r = zext i8 %i to i32
+  ret i32 %r
+}
+; CHECK: Unexpected memory ordering for AtomicLoad
+
+define void @error_atomic_store_32_nonconst(i32 %iptr, i32 %v) {
+entry:
+  %ptr = inttoptr i32 %iptr to i32*
+  call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 %v)
+  ret void
+}
+; CHECK: Unexpected memory ordering for AtomicStore
+
+define i32 @error_atomic_rmw_add_8_nonconst(i32 %iptr, i32 %v) {
+entry:
+  %trunc = trunc i32 %v to i8
+  %ptr = inttoptr i32 %iptr to i8*
+  %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 %iptr)
+  %a_ext = zext i8 %a to i32
+  ret i32 %a_ext
+}
+; CHECK: Unexpected memory ordering for AtomicRMW
+
+define i32 @error_atomic_cmpxchg_32_success_nonconst_1(i32 %iptr, i32 %expected, i32 %desired) {
+entry:
+  %ptr = inttoptr i32 %iptr to i32*
+  %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
+                                               i32 %desired, i32 %iptr, i32 6)
+  ret i32 %old
+}
+; CHECK: Unexpected memory ordering for AtomicCmpxchg
+
+define i32 @error_atomic_cmpxchg_32_success_nonconst_2(i32 %iptr, i32 %expected, i32 %desired) {
+entry:
+  %ptr = inttoptr i32 %iptr to i32*
+  %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
+                                               i32 %desired, i32 6, i32 %iptr)
+  ret i32 %old
+}
+; CHECK: Unexpected memory ordering for AtomicCmpxchg
+
+define void @error_atomic_fence_nonconst(i32 %v) {
+entry:
+  call void @llvm.nacl.atomic.fence(i32 %v)
+  ret void
+}
+; CHECK: Unexpected memory ordering for AtomicFence