Add a peephole to fuse cmpxchg w/ later cmp+branch.

The cmpxchg instruction already sets ZF for comparing the return value
vs the expected value. So there is no need to compare eq again.

Lots of pexes-in-the-wild have this pattern. Some compare against
a constant, some compare against a variable.

BUG=https://code.google.com/p/nativeclient/issues/detail?id=3882
R=stichnot@chromium.org

Review URL: https://codereview.chromium.org/413903002
diff --git a/src/IceCfgNode.cpp b/src/IceCfgNode.cpp
index 2f2897c..22915b2 100644
--- a/src/IceCfgNode.cpp
+++ b/src/IceCfgNode.cpp
@@ -104,7 +104,8 @@
 // added before any branch instruction, and also if the block ends
 // with a compare instruction followed by a branch instruction that we
 // may want to fuse, it's better to insert the new assignments before
-// the compare instruction.
+// the compare instruction. The tryOptimizedCmpxchgCmpBr() method
+// assumes this ordering of instructions.
 //
 // Note that this transformation takes the Phi dest variables out of
 // SSA form, as there may be assignments to the dest variable in
diff --git a/src/IceTargetLowering.cpp b/src/IceTargetLowering.cpp
index 05ae121..2a9b8c4 100644
--- a/src/IceTargetLowering.cpp
+++ b/src/IceTargetLowering.cpp
@@ -38,12 +38,12 @@
   Inst->updateVars(getNode());
 }
 
-void LoweringContext::skipDeleted(InstList::iterator &I) {
+void LoweringContext::skipDeleted(InstList::iterator &I) const {
   while (I != End && (*I)->isDeleted())
     ++I;
 }
 
-void LoweringContext::advance(InstList::iterator &I) {
+void LoweringContext::advance(InstList::iterator &I) const {
   if (I != End) {
     ++I;
     skipDeleted(I);
diff --git a/src/IceTargetLowering.h b/src/IceTargetLowering.h
index da52adf..ec86cff 100644
--- a/src/IceTargetLowering.h
+++ b/src/IceTargetLowering.h
@@ -41,6 +41,12 @@
       return NULL;
     return *Next;
   }
+  Inst *getNextInst(InstList::iterator &Iter) const {
+    advance(Iter);
+    if (Iter == End)
+      return NULL;
+    return *Iter;
+  }
   CfgNode *getNode() const { return Node; }
   bool atEnd() const { return Cur == End; }
   InstList::iterator getCur() const { return Cur; }
@@ -68,8 +74,8 @@
   // End is a copy of Insts.end(), used if Next needs to be advanced.
   InstList::iterator End;
 
-  void skipDeleted(InstList::iterator &I);
-  void advance(InstList::iterator &I);
+  void skipDeleted(InstList::iterator &I) const;
+  void advance(InstList::iterator &I) const;
   LoweringContext(const LoweringContext &) LLVM_DELETED_FUNCTION;
   LoweringContext &operator=(const LoweringContext &) LLVM_DELETED_FUNCTION;
 };
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp
index 7c60d08..2db795b 100644
--- a/src/IceTargetLoweringX8632.cpp
+++ b/src/IceTargetLoweringX8632.cpp
@@ -2654,12 +2654,9 @@
     Operand *PtrToMem = Instr->getArg(0);
     Operand *Expected = Instr->getArg(1);
     Operand *Desired = Instr->getArg(2);
+    if (tryOptimizedCmpxchgCmpBr(DestPrev, PtrToMem, Expected, Desired))
+      return;
     lowerAtomicCmpxchg(DestPrev, PtrToMem, Expected, Desired);
-    // TODO(jvoung): If we peek ahead a few instructions and see how
-    // DestPrev is used (typically via another compare and branch),
-    // we may be able to optimize. If the result truly is used by a
-    // compare + branch, and the comparison is for equality, then we can
-    // optimize out the later compare, and fuse with the later branch.
     return;
   }
   case Intrinsics::AtomicFence:
@@ -2975,6 +2972,79 @@
   _mov(DestPrev, T_eax);
 }
 
+bool TargetX8632::tryOptimizedCmpxchgCmpBr(Variable *Dest, Operand *PtrToMem,
+                                           Operand *Expected,
+                                           Operand *Desired) {
+  if (Ctx->getOptLevel() == Opt_m1)
+    return false;
+  // Peek ahead a few instructions and see how Dest is used.
+  // It's very common to have:
+  //
+  // %x = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* ptr, i32 %expected, ...)
+  // [%y_phi = ...] // list of phi stores
+  // %p = icmp eq i32 %x, %expected
+  // br i1 %p, label %l1, label %l2
+  //
+  // which we can optimize into:
+  //
+  // %x = <cmpxchg code>
+  // [%y_phi = ...] // list of phi stores
+  // br eq, %l1, %l2
+  InstList::iterator I = Context.getCur();
+  // I is currently the InstIntrinsicCall. Peek past that.
+  // This assumes that the atomic cmpxchg has not been lowered yet,
+  // so that the instructions seen in the scan from "Cur" is simple.
+  assert(llvm::isa<InstIntrinsicCall>(*I));
+  Inst *NextInst = Context.getNextInst(I);
+  if (!NextInst)
+    return false;
+  // There might be phi assignments right before the compare+branch, since this
+  // could be a backward branch for a loop. This placement of assignments is
+  // determined by placePhiStores().
+  std::vector<InstAssign *> PhiAssigns;
+  while (InstAssign *PhiAssign = llvm::dyn_cast<InstAssign>(NextInst)) {
+    if (PhiAssign->getDest() == Dest)
+      return false;
+    PhiAssigns.push_back(PhiAssign);
+    NextInst = Context.getNextInst(I);
+    if (!NextInst)
+      return false;
+  }
+  if (InstIcmp *NextCmp = llvm::dyn_cast<InstIcmp>(NextInst)) {
+    if (!(NextCmp->getCondition() == InstIcmp::Eq &&
+          ((NextCmp->getSrc(0) == Dest && NextCmp->getSrc(1) == Expected) ||
+           (NextCmp->getSrc(1) == Dest && NextCmp->getSrc(0) == Expected)))) {
+      return false;
+    }
+    NextInst = Context.getNextInst(I);
+    if (!NextInst)
+      return false;
+    if (InstBr *NextBr = llvm::dyn_cast<InstBr>(NextInst)) {
+      if (!NextBr->isUnconditional() &&
+          NextCmp->getDest() == NextBr->getCondition() &&
+          NextBr->isLastUse(NextCmp->getDest())) {
+        lowerAtomicCmpxchg(Dest, PtrToMem, Expected, Desired);
+        for (size_t i = 0; i < PhiAssigns.size(); ++i) {
+          // Lower the phi assignments now, before the branch (same placement
+          // as before).
+          InstAssign *PhiAssign = PhiAssigns[i];
+          lowerAssign(PhiAssign);
+          PhiAssign->setDeleted();
+          Context.advanceNext();
+        }
+        _br(InstX8632::Br_e, NextBr->getTargetTrue(), NextBr->getTargetFalse());
+        // Skip over the old compare and branch, by deleting them.
+        NextCmp->setDeleted();
+        NextBr->setDeleted();
+        Context.advanceNext();
+        Context.advanceNext();
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 void TargetX8632::lowerAtomicRMW(Variable *Dest, uint32_t Operation,
                                  Operand *Ptr, Operand *Val) {
   bool NeedsCmpxchg = false;
diff --git a/src/IceTargetLoweringX8632.h b/src/IceTargetLoweringX8632.h
index 6f09a90..daca0cd 100644
--- a/src/IceTargetLoweringX8632.h
+++ b/src/IceTargetLoweringX8632.h
@@ -97,8 +97,12 @@
   virtual void doAddressOptLoad();
   virtual void doAddressOptStore();
 
+  // Naive lowering of cmpxchg.
   void lowerAtomicCmpxchg(Variable *DestPrev, Operand *Ptr, Operand *Expected,
                           Operand *Desired);
+  // Attempt a more optimized lowering of cmpxchg. Returns true if optimized.
+  bool tryOptimizedCmpxchgCmpBr(Variable *DestPrev, Operand *Ptr,
+                                Operand *Expected, Operand *Desired);
   void lowerAtomicRMW(Variable *Dest, uint32_t Operation, Operand *Ptr,
                       Operand *Val);
   void lowerCountZeros(bool Cttz, Type Ty, Variable *Dest, Operand *FirstVal,