Add SwiftShader dump from Feb 6 2013
diff --git a/src/LLVM/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll b/src/LLVM/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll
new file mode 100644
index 0000000..dad5798
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2002-03-11-InstCombineHang.ll
@@ -0,0 +1,9 @@
+; This testcase causes instcombine to hang.
+;
+; RUN: opt < %s -instcombine
+
+define void @test(i32 %X) {
+ %reg117 = add i32 %X, 0 ; <i32> [#uses=0]
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2002-05-14-SubFailure.ll b/src/LLVM/test/Transforms/InstCombine/2002-05-14-SubFailure.ll
new file mode 100644
index 0000000..642d206
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2002-05-14-SubFailure.ll
@@ -0,0 +1,10 @@
+; Instcombine was missing a test that caused it to make illegal transformations
+; sometimes. In this case, it transforms the sub into an add:
+; RUN: opt < %s -instcombine -S | grep sub
+;
+define i32 @test(i32 %i, i32 %j) {
+ %A = mul i32 %i, %j
+ %B = sub i32 2, %A
+ ret i32 %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2002-08-02-CastTest.ll b/src/LLVM/test/Transforms/InstCombine/2002-08-02-CastTest.ll
new file mode 100644
index 0000000..5add123
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2002-08-02-CastTest.ll
@@ -0,0 +1,11 @@
+; This testcase is incorrectly getting completely eliminated. There should be
+; SOME instruction named %c here, even if it's a bitwise and.
+;
+; RUN: opt < %s -instcombine -S | grep %c
+;
+define i64 @test3(i64 %A) {
+ %c1 = trunc i64 %A to i8 ; <i8> [#uses=1]
+ %c2 = zext i8 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %c2
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll b/src/LLVM/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll
new file mode 100644
index 0000000..394cdca
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2002-12-05-MissedConstProp.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | not grep add
+
+define i32 @test(i32 %A) {
+ %A.neg = sub i32 0, %A ; <i32> [#uses=1]
+ %.neg = sub i32 0, 1 ; <i32> [#uses=1]
+ %X = add i32 %.neg, 1 ; <i32> [#uses=1]
+ %Y.neg.ra = add i32 %A, %X ; <i32> [#uses=1]
+ %r = add i32 %A.neg, %Y.neg.ra ; <i32> [#uses=1]
+ ret i32 %r
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll b/src/LLVM/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll
new file mode 100644
index 0000000..709ebfc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-05-26-CastMiscompile.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep 4294967295
+
+define i64 @test(i64 %Val) {
+ %tmp.3 = trunc i64 %Val to i32 ; <i32> [#uses=1]
+ %tmp.8 = zext i32 %tmp.3 to i64 ; <i64> [#uses=1]
+ ret i64 %tmp.8
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll b/src/LLVM/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll
new file mode 100644
index 0000000..c3da050
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-05-27-ConstExprCrash.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+@X = global i32 5 ; <i32*> [#uses=1]
+
+define i64 @test() {
+ %C = add i64 1, 2 ; <i64> [#uses=1]
+ %V = add i64 ptrtoint (i32* @X to i64), %C ; <i64> [#uses=1]
+ ret i64 %V
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll
new file mode 100644
index 0000000..ebcc75f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-06-05-BranchInvertInfLoop.ll
@@ -0,0 +1,16 @@
+; This testcase causes an infinite loop in the instruction combiner,
+; because it things that the constant value is a not expression... and
+; constantly inverts the branch back and forth.
+;
+; RUN: opt < %s -instcombine -disable-output
+
+define i8 @test19(i1 %c) {
+ br i1 true, label %True, label %False
+
+True: ; preds = %0
+ ret i8 1
+
+False: ; preds = %0
+ ret i8 3
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll b/src/LLVM/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
new file mode 100644
index 0000000..eff5f4c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
@@ -0,0 +1,44 @@
+;
+; Test: ExternalConstant
+;
+; Description:
+; This regression test helps check whether the instruction combining
+; optimization pass correctly handles global variables which are marked
+; as external and constant.
+;
+; If a problem occurs, we should die on an assert(). Otherwise, we
+; should pass through the optimizer without failure.
+;
+; Extra code:
+; RUN: opt < %s -instcombine
+; END.
+
+target datalayout = "e-p:32:32"
+@silly = external constant i32 ; <i32*> [#uses=1]
+
+declare void @bzero(i8*, i32)
+
+declare void @bcopy(i8*, i8*, i32)
+
+declare i32 @bcmp(i8*, i8*, i32)
+
+declare i32 @fputs(i8*, i8*)
+
+declare i32 @fputs_unlocked(i8*, i8*)
+
+define i32 @function(i32 %a.1) {
+entry:
+ %a.0 = alloca i32 ; <i32*> [#uses=2]
+ %result = alloca i32 ; <i32*> [#uses=2]
+ store i32 %a.1, i32* %a.0
+ %tmp.0 = load i32* %a.0 ; <i32> [#uses=1]
+ %tmp.1 = load i32* @silly ; <i32> [#uses=1]
+ %tmp.2 = add i32 %tmp.0, %tmp.1 ; <i32> [#uses=1]
+ store i32 %tmp.2, i32* %result
+ br label %return
+
+return: ; preds = %entry
+ %tmp.3 = load i32* %result ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll b/src/LLVM/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll
new file mode 100644
index 0000000..d3db282
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-08-12-AllocaNonNull.ll
@@ -0,0 +1,20 @@
+; This testcase can be simplified by "realizing" that alloca can never return
+; null.
+; RUN: opt < %s -instcombine -simplifycfg -S | not grep br
+
+declare i32 @bitmap_clear(...)
+
+define i32 @oof() {
+entry:
+ %live_head = alloca i32 ; <i32*> [#uses=2]
+ %tmp.1 = icmp ne i32* %live_head, null ; <i1> [#uses=1]
+ br i1 %tmp.1, label %then, label %UnifiedExitNode
+
+then: ; preds = %entry
+ %tmp.4 = call i32 (...)* @bitmap_clear( i32* %live_head ) ; <i32> [#uses=0]
+ br label %UnifiedExitNode
+
+UnifiedExitNode: ; preds = %then, %entry
+ ret i32 0
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll b/src/LLVM/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
new file mode 100644
index 0000000..60dee1f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep load
+
+define void @test(i32* %P) {
+ ; Dead but not deletable!
+ %X = volatile load i32* %P ; <i32> [#uses=0]
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll b/src/LLVM/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll
new file mode 100644
index 0000000..09d1421
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-10-29-CallSiteResolve.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -disable-output
+
+declare i32* @bar()
+
+define float* @foo() {
+ %tmp.11 = invoke float* bitcast (i32* ()* @bar to float* ()*)( )
+ to label %invoke_cont unwind label %X ; <float*> [#uses=1]
+
+invoke_cont: ; preds = %0
+ ret float* %tmp.11
+
+X: ; preds = %0
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ ret float* null
+}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll b/src/LLVM/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll
new file mode 100644
index 0000000..fa2dee6
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2003-11-03-VarargsCallBug.ll
@@ -0,0 +1,13 @@
+; The cast in this testcase is not eliminable on a 32-bit target!
+; RUN: opt < %s -instcombine -S | grep inttoptr
+
+target datalayout = "e-p:32:32"
+
+declare void @foo(...)
+
+define void @test(i64 %X) {
+ %Y = inttoptr i64 %X to i32* ; <i32*> [#uses=1]
+ call void (...)* @foo( i32* %Y )
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll b/src/LLVM/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
new file mode 100644
index 0000000..bb2d379
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
@@ -0,0 +1,31 @@
+; Test for a problem afflicting several C++ programs in the testsuite. The
+; instcombine pass is trying to get rid of the cast in the invoke instruction,
+; inserting a cast of the return value after the PHI instruction, but which is
+; used by the PHI instruction. This is bad: because of the semantics of the
+; invoke instruction, we really cannot perform this transformation at all at
+; least without splitting the critical edge.
+;
+; RUN: opt < %s -instcombine -disable-output
+
+declare i8* @test()
+
+define i32 @foo() {
+entry:
+ br i1 true, label %cont, label %call
+
+call: ; preds = %entry
+ %P = invoke i32* bitcast (i8* ()* @test to i32* ()*)( )
+ to label %cont unwind label %N ; <i32*> [#uses=1]
+
+cont: ; preds = %call, %entry
+ %P2 = phi i32* [ %P, %call ], [ null, %entry ] ; <i32*> [#uses=1]
+ %V = load i32* %P2 ; <i32> [#uses=1]
+ ret i32 %V
+
+N: ; preds = %call
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ ret i32 0
+}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll b/src/LLVM/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll
new file mode 100644
index 0000000..64d9660
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-02-23-ShiftShiftOverflow.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | not grep 34
+
+define i32 @test(i32 %X) {
+ ; Do not fold into shr X, 34, as this uses undefined behavior!
+ %Y = ashr i32 %X, 17 ; <i32> [#uses=1]
+ %Z = ashr i32 %Y, 17 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test2(i32 %X) {
+ ; Do not fold into shl X, 34, as this uses undefined behavior!
+ %Y = shl i32 %X, 17 ; <i32> [#uses=1]
+ %Z = shl i32 %Y, 17 ; <i32> [#uses=1]
+ ret i32 %Z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll
new file mode 100644
index 0000000..1ceab8e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-03-13-InstCombineInfLoop.ll
@@ -0,0 +1,13 @@
+; This testcase caused the combiner to go into an infinite loop, moving the
+; cast back and forth, changing the seteq to operate on int vs uint and back.
+
+; RUN: opt < %s -instcombine -disable-output
+
+define i1 @test(i32 %A, i32 %B) {
+ %C = sub i32 0, %A ; <i32> [#uses=1]
+ %Cc = bitcast i32 %C to i32 ; <i32> [#uses=1]
+ %D = sub i32 0, %B ; <i32> [#uses=1]
+ %E = icmp eq i32 %Cc, %D ; <i1> [#uses=1]
+ ret i1 %E
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll b/src/LLVM/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll
new file mode 100644
index 0000000..30d2e47
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-04-04-InstCombineReplaceAllUsesWith.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @test() {
+ ret i32 0
+
+Loop: ; preds = %Loop
+ %X = add i32 %X, 1 ; <i32> [#uses=1]
+ br label %Loop
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll b/src/LLVM/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
new file mode 100644
index 0000000..df56d07
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+%Ty = type opaque
+
+define i32 @test(%Ty* %X) {
+ %Y = bitcast %Ty* %X to i32* ; <i32*> [#uses=1]
+ %Z = load i32* %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll b/src/LLVM/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll
new file mode 100644
index 0000000..ee19cbc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-07-27-ConstantExprMul.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -disable-output
+
+@p = weak global i32 0 ; <i32*> [#uses=1]
+
+define i32 @test(i32 %x) {
+ %y = mul i32 %x, ptrtoint (i32* @p to i32) ; <i32> [#uses=1]
+ ret i32 %y
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-08-09-RemInfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2004-08-09-RemInfLoop.ll
new file mode 100644
index 0000000..da55d85
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-08-09-RemInfLoop.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine
+
+; This testcase should not send the instcombiner into an infinite loop!
+
+define i32 @test(i32 %X) {
+ %Y = srem i32 %X, 0 ; <i32> [#uses=1]
+ ret i32 %Y
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll b/src/LLVM/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll
new file mode 100644
index 0000000..20b681c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-08-10-BoolSetCC.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ret i1 false}
+
+define i1 @test(i1 %V) {
+ %Y = icmp ult i1 %V, false ; <i1> [#uses=1]
+ ret i1 %Y
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll b/src/LLVM/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
new file mode 100644
index 0000000..30a3f4e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -mem2reg -S | \
+; RUN: not grep {i32 1}
+
+; When propagating the load through the select, make sure that the load is
+; inserted where the original load was, not where the select is. Not doing
+; so could produce incorrect results!
+
+define i32 @test(i1 %C) {
+ %X = alloca i32 ; <i32*> [#uses=3]
+ %X2 = alloca i32 ; <i32*> [#uses=2]
+ store i32 1, i32* %X
+ store i32 2, i32* %X2
+ %Y = select i1 %C, i32* %X, i32* %X2 ; <i32*> [#uses=1]
+ store i32 3, i32* %X
+ %Z = load i32* %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll b/src/LLVM/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
new file mode 100644
index 0000000..8b70d74
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine -mem2reg -simplifycfg | \
+; RUN: llvm-dis | grep -v store | not grep {i32 1}
+
+; Test to make sure that instcombine does not accidentally propagate the load
+; into the PHI, which would break the program.
+
+define i32 @test(i1 %C) {
+entry:
+ %X = alloca i32 ; <i32*> [#uses=3]
+ %X2 = alloca i32 ; <i32*> [#uses=2]
+ store i32 1, i32* %X
+ store i32 2, i32* %X2
+ br i1 %C, label %cond_true.i, label %cond_continue.i
+
+cond_true.i: ; preds = %entry
+ br label %cond_continue.i
+
+cond_continue.i: ; preds = %cond_true.i, %entry
+ %mem_tmp.i.0 = phi i32* [ %X, %cond_true.i ], [ %X2, %entry ] ; <i32*> [#uses=1]
+ store i32 3, i32* %X
+ %tmp.3 = load i32* %mem_tmp.i.0 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.ll b/src/LLVM/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.ll
new file mode 100644
index 0000000..f8861a9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-09-28-BadShiftAndSetCC.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep -- -65536
+
+define i1 @test(i32 %tmp.124) {
+ %tmp.125 = shl i32 %tmp.124, 8 ; <i32> [#uses=1]
+ %tmp.126.mask = and i32 %tmp.125, -16777216 ; <i32> [#uses=1]
+ %tmp.128 = icmp eq i32 %tmp.126.mask, 167772160 ; <i1> [#uses=1]
+ ret i1 %tmp.128
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll b/src/LLVM/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll
new file mode 100644
index 0000000..b851b98
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-11-22-Missed-and-fold.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | not grep and
+
+define i8 @test21(i8 %A) {
+ ;; sign extend
+ %C = ashr i8 %A, 7 ; <i8> [#uses=1]
+ ;; chop off sign
+ %D = and i8 %C, 1 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll b/src/LLVM/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
new file mode 100644
index 0000000..dfd8c6d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll
@@ -0,0 +1,192 @@
+; This test case tests the InstructionCombining optimization that
+; reduces things like:
+; %Y = sext i8 %X to i32
+; %C = icmp ult i32 %Y, 1024
+; to
+; %C = i1 true
+; It includes test cases for different constant values, signedness of the
+; cast operands, and types of setCC operators. In all cases, the cast should
+; be eliminated. In many cases the setCC is also eliminated based on the
+; constant value and the range of the casted value.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; END.
+define i1 @lt_signed_to_large_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ult i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C1 = icmp sgt i8 %SB, -1
+; CHECK: ret i1 %C1
+}
+
+define i1 @lt_signed_to_large_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @lt_signed_to_large_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @lt_signed_to_small_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32
+ %C = icmp ult i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ult i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_signed_to_small_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp slt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+define i1 @lt_signed_to_small_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp slt i8 %SB, -17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_unsigned_to_large_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ult i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @lt_unsigned_to_large_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @lt_unsigned_to_large_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @lt_unsigned_to_small_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ult i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp ult i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_unsigned_to_small_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32
+ %C = icmp slt i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ult i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @lt_unsigned_to_small_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp slt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_signed_to_large_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp slt i8 %SB, 0
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_signed_to_large_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_signed_to_large_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @gt_signed_to_small_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32
+ %C = icmp ugt i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ugt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_signed_to_small_signed(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp sgt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_signed_to_small_negative(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp sgt i8 %SB, -17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_unsigned_to_large_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_unsigned_to_large_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, 1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 false
+}
+
+define i1 @gt_unsigned_to_large_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -1024 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @gt_unsigned_to_small_unsigned(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp ugt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_unsigned_to_small_signed(i8 %SB) {
+ %Y = zext i8 %SB to i32
+ %C = icmp sgt i32 %Y, 17
+ ret i1 %C
+; CHECK: %C = icmp ugt i8 %SB, 17
+; CHECK: ret i1 %C
+}
+
+define i1 @gt_unsigned_to_small_negative(i8 %SB) {
+ %Y = zext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp sgt i32 %Y, -17 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll b/src/LLVM/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll
new file mode 100644
index 0000000..a5d4e03
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2004-12-08-RemInfiniteLoop.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine
+
+define i32 @test(i32 %X) {
+ %Y = srem i32 %X, undef ; <i32> [#uses=1]
+ ret i32 %Y
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll b/src/LLVM/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll
new file mode 100644
index 0000000..1842566
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2005-03-04-ShiftOverflow.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {ret i1 false}
+
+define i1 @test(i64 %tmp.169) {
+ %tmp.1710 = lshr i64 %tmp.169, 1 ; <i64> [#uses=1]
+ %tmp.1912 = icmp ugt i64 %tmp.1710, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.1912
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll b/src/LLVM/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll
new file mode 100644
index 0000000..d709f19
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @test(i1 %C, i32 %tmp.15) {
+ %tmp.16 = select i1 %C, i32 8, i32 1 ; <i32> [#uses=1]
+ %tmp.18 = udiv i32 %tmp.15, %tmp.16 ; <i32> [#uses=1]
+ ret i32 %tmp.18
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll b/src/LLVM/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll
new file mode 100644
index 0000000..1cb10a2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @_Z13func_31585107li(i32 %l_39521025, i32 %l_59244666) {
+ %shortcirc_val = select i1 false, i32 1, i32 0 ; <i32> [#uses=1]
+ %tmp.8 = udiv i32 0, %shortcirc_val ; <i32> [#uses=1]
+ %tmp.9 = icmp eq i32 %tmp.8, 0 ; <i1> [#uses=1]
+ %retval = select i1 %tmp.9, i32 %l_59244666, i32 -1621308501 ; <i32> [#uses=1]
+ ret i32 %retval
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll b/src/LLVM/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll
new file mode 100644
index 0000000..bfdc0c4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2005-06-15-ShiftSetCCCrash.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR577
+
+define i1 @test() {
+ %tmp.3 = shl i32 0, 41 ; <i32> [#uses=1]
+ %tmp.4 = icmp ne i32 %tmp.3, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll b/src/LLVM/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll
new file mode 100644
index 0000000..2ce7aaa
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2005-06-16-RangeCrash.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR585
+
+define i1 @test() {
+ %tmp.26 = sdiv i32 0, -2147483648 ; <i32> [#uses=1]
+ %tmp.27 = icmp eq i32 %tmp.26, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.27
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll b/src/LLVM/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
new file mode 100644
index 0000000..6d91094
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ret i1 true}
+; PR586
+
+@g_07918478 = external global i32 ; <i32*> [#uses=1]
+
+define i1 @test() {
+ %tmp.0 = load i32* @g_07918478 ; <i32> [#uses=2]
+ %tmp.1 = icmp ne i32 %tmp.0, 0 ; <i1> [#uses=1]
+ %tmp.4 = icmp ult i32 %tmp.0, 4111 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp.1, %tmp.4 ; <i1> [#uses=1]
+ ret i1 %bothcond
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll b/src/LLVM/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll
new file mode 100644
index 0000000..d404b10
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -disable-output
+
+; This example caused instcombine to spin into an infinite loop.
+
+define void @test(i32* %P) {
+ ret void
+
+Dead: ; preds = %Dead
+ %X = phi i32 [ %Y, %Dead ] ; <i32> [#uses=1]
+ %Y = sdiv i32 %X, 10 ; <i32> [#uses=2]
+ store i32 %Y, i32* %P
+ br label %Dead
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll b/src/LLVM/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll
new file mode 100644
index 0000000..8c61efe
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-02-13-DemandedMiscompile.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep undef
+
+define i32 @test(i8 %A) {
+ %B = sext i8 %A to i32 ; <i32> [#uses=1]
+ %C = ashr i32 %B, 8 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-02-28-Crash.ll b/src/LLVM/test/Transforms/InstCombine/2006-02-28-Crash.ll
new file mode 100644
index 0000000..4857880
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-02-28-Crash.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define i32 @test() {
+ %tmp203 = icmp eq i32 1, 2 ; <i1> [#uses=1]
+ %tmp203.upgrd.1 = zext i1 %tmp203 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp203.upgrd.1
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll b/src/LLVM/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll
new file mode 100644
index 0000000..5decc21
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-03-30-ExtractElement.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define float @test(<4 x float> %V) {
+ %V2 = insertelement <4 x float> %V, float 1.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
+ %R = extractelement <4 x float> %V2, i32 2 ; <float> [#uses=1]
+ ret float %R
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll b/src/LLVM/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll
new file mode 100644
index 0000000..28075c0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-04-28-ShiftShiftLongLong.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; This cannot be turned into a sign extending cast!
+
+define i64 @test(i64 %X) {
+ %Y = shl i64 %X, 16 ; <i64> [#uses=1]
+; CHECK: %Y = shl i64 %X, 16
+ %Z = ashr i64 %Y, 16 ; <i64> [#uses=1]
+; CHECK: %Z = ashr exact i64 %Y, 16
+ ret i64 %Z
+; CHECK: ret i64 %Z
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll b/src/LLVM/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll
new file mode 100644
index 0000000..b328b7b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-05-04-DemandedBitCrash.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -instcombine -disable-output
+; END.
+
+define void @test() {
+bb38.i:
+ %varspec.0.i1014 = bitcast i64 123814269237067777 to i64 ; <i64> [#uses=1]
+ %locspec.0.i1015 = bitcast i32 1 to i32 ; <i32> [#uses=2]
+ %tmp51391.i1018 = lshr i64 %varspec.0.i1014, 16 ; <i64> [#uses=1]
+ %tmp51392.i1019 = trunc i64 %tmp51391.i1018 to i32 ; <i32> [#uses=2]
+ %tmp51392.mask.i1020 = lshr i32 %tmp51392.i1019, 29 ; <i32> [#uses=1]
+ %tmp7.i1021 = and i32 %tmp51392.mask.i1020, 1 ; <i32> [#uses=2]
+ %tmp18.i1026 = lshr i32 %tmp51392.i1019, 31 ; <i32> [#uses=2]
+ %tmp18.i1027 = trunc i32 %tmp18.i1026 to i8 ; <i8> [#uses=1]
+ br i1 false, label %cond_false1148.i1653, label %bb377.i1259
+
+bb377.i1259: ; preds = %bb38.i
+ br i1 false, label %cond_true541.i1317, label %cond_false1148.i1653
+
+cond_true541.i1317: ; preds = %bb377.i1259
+ %tmp545.i1318 = lshr i32 %locspec.0.i1015, 10 ; <i32> [#uses=1]
+ %tmp550.i1319 = lshr i32 %locspec.0.i1015, 4 ; <i32> [#uses=1]
+ %tmp550551.i1320 = and i32 %tmp550.i1319, 63 ; <i32> [#uses=1]
+ %tmp553.i1321 = icmp ult i32 %tmp550551.i1320, 4 ; <i1> [#uses=1]
+ %tmp558.i1322 = icmp eq i32 %tmp7.i1021, 0 ; <i1> [#uses=1]
+ %bothcond.i1326 = or i1 %tmp553.i1321, false ; <i1> [#uses=1]
+ %bothcond1.i1327 = or i1 %bothcond.i1326, false ; <i1> [#uses=1]
+ %bothcond2.not.i1328 = or i1 %bothcond1.i1327, false ; <i1> [#uses=1]
+ %bothcond3.i1329 = or i1 %bothcond2.not.i1328, %tmp558.i1322 ; <i1> [#uses=0]
+ br i1 false, label %cond_true583.i1333, label %cond_next592.i1337
+
+cond_true583.i1333: ; preds = %cond_true541.i1317
+ br i1 false, label %cond_true586.i1335, label %cond_next592.i1337
+
+cond_true586.i1335: ; preds = %cond_true583.i1333
+ br label %cond_true.i
+
+cond_next592.i1337: ; preds = %cond_true583.i1333, %cond_true541.i1317
+ %mask_z.0.i1339 = phi i32 [ %tmp18.i1026, %cond_true541.i1317 ], [ 0, %cond_true583.i1333 ] ; <i32> [#uses=0]
+ %tmp594.i1340 = and i32 %tmp545.i1318, 15 ; <i32> [#uses=0]
+ br label %cond_true.i
+
+cond_false1148.i1653: ; preds = %bb377.i1259, %bb38.i
+ %tmp1150.i1654 = icmp eq i32 %tmp7.i1021, 0 ; <i1> [#uses=1]
+ %tmp1160.i1656 = icmp eq i8 %tmp18.i1027, 0 ; <i1> [#uses=1]
+ %bothcond8.i1658 = or i1 %tmp1150.i1654, %tmp1160.i1656 ; <i1> [#uses=1]
+ %bothcond9.i1659 = or i1 %bothcond8.i1658, false ; <i1> [#uses=0]
+ br label %cond_true.i
+
+cond_true.i: ; preds = %cond_false1148.i1653, %cond_next592.i1337, %cond_true586.i1335
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-09-15-CastToBool.ll b/src/LLVM/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
new file mode 100644
index 0000000..128bdc4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep and
+; PR913
+
+define i32 @test(i32* %tmp1) {
+ %tmp.i = load i32* %tmp1 ; <i32> [#uses=1]
+ %tmp = bitcast i32 %tmp.i to i32 ; <i32> [#uses=1]
+ %tmp2.ui = lshr i32 %tmp, 5 ; <i32> [#uses=1]
+ %tmp2 = bitcast i32 %tmp2.ui to i32 ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp2, 1 ; <i32> [#uses=1]
+ %tmp3.upgrd.1 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
+ %tmp34 = zext i1 %tmp3.upgrd.1 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp34
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll b/src/LLVM/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll
new file mode 100644
index 0000000..252786e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-10-19-SignedToUnsignedCastAndConst-2.ll
@@ -0,0 +1,10 @@
+; The optimizer should be able to remove cast operation here.
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep sext.*i32
+
+define i1 @eq_signed_to_small_unsigned(i8 %SB) {
+ %Y = sext i8 %SB to i32 ; <i32> [#uses=1]
+ %C = icmp eq i32 %Y, 17 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-10-20-mask.ll b/src/LLVM/test/Transforms/InstCombine/2006-10-20-mask.ll
new file mode 100644
index 0000000..2cdcfc7
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-10-20-mask.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep and
+
+define i64 @foo(i64 %tmp, i64 %tmp2) {
+ %tmp.upgrd.1 = trunc i64 %tmp to i32 ; <i32> [#uses=1]
+ %tmp2.upgrd.2 = trunc i64 %tmp2 to i32 ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp.upgrd.1, %tmp2.upgrd.2 ; <i32> [#uses=1]
+ %tmp4 = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
+ ret i64 %tmp4
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll b/src/LLVM/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
new file mode 100644
index 0000000..1541191
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-10-26-VectorReassoc.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep mul | count 2
+
+define <4 x float> @test(<4 x float> %V) {
+ %Y = fmul <4 x float> %V, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ %Z = fmul <4 x float> %Y, < float 1.000000e+00, float 2.000000e+05, float -3.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
+ ret <4 x float> %Z
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll b/src/LLVM/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll
new file mode 100644
index 0000000..cc512d0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-11-10-ashr-miscompile.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep lshr
+; Verify this is not turned into -1.
+
+define i32 @test(i8 %amt) {
+ %shift.upgrd.1 = zext i8 %amt to i32 ; <i32> [#uses=1]
+ %B = lshr i32 -1, %shift.upgrd.1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll b/src/LLVM/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
new file mode 100644
index 0000000..1d3cf2d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-12-01-BadFPVectorXform.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep sub
+; RUN: opt < %s -instcombine -S | grep add
+
+define <4 x float> @test(<4 x float> %tmp26, <4 x float> %tmp53) {
+ ; (X+Y)-Y != X for fp vectors.
+ %tmp64 = fadd <4 x float> %tmp26, %tmp53 ; <<4 x float>> [#uses=1]
+ %tmp75 = fsub <4 x float> %tmp64, %tmp53 ; <<4 x float>> [#uses=1]
+ ret <4 x float> %tmp75
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll b/src/LLVM/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll
new file mode 100644
index 0000000..682d99d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-12-05-fp-to-int-ext.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep zext
+
+; Never merge these two conversions, even though it's possible: this is
+; significantly more expensive than the two conversions on some targets
+; and it causes libgcc to be compile __fixunsdfdi into a recursive
+; function.
+define i64 @test(double %D) {
+ %A = fptoui double %D to i32 ; <i32> [#uses=1]
+ %B = zext i32 %A to i64 ; <i64> [#uses=1]
+ ret i64 %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll b/src/LLVM/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
new file mode 100644
index 0000000..43e9822
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {icmp sgt}
+; END.
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.point = type { i32, i32 }
+
+define i32 @visible(i32 %direction, i64 %p1.0, i64 %p2.0, i64 %p3.0) {
+entry:
+ %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr { i64 }* %tmp, i64 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p1.0, i64* %tmp.upgrd.1
+ %tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp2 = getelementptr { i64 }* %tmp1, i64 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p2.0, i64* %tmp2
+ %tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp4 = getelementptr { i64 }* %tmp3, i64 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p3.0, i64* %tmp4
+ %tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
+ %tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp6 = getelementptr { i64 }* %tmp5, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
+ %tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp8 = getelementptr { i64 }* %tmp7, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
+ %tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp11 = getelementptr { i64 }* %tmp10, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
+ %tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
+ br i1 %tmp.upgrd.2, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ %tmp14 = icmp slt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %tmp14.upgrd.4 = zext i1 %tmp14 to i32 ; <i32> [#uses=1]
+ br label %return
+
+cond_false: ; preds = %entry
+ %tmp26 = icmp sgt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %tmp26.upgrd.5 = zext i1 %tmp26 to i32 ; <i32> [#uses=1]
+ br label %return
+
+return: ; preds = %cond_false, %cond_true
+ %retval.0 = phi i32 [ %tmp14.upgrd.4, %cond_true ], [ %tmp26.upgrd.5, %cond_false ] ; <i32> [#uses=1]
+ ret i32 %retval.0
+}
+
+declare i32 @determinant(i64, i64, i64)
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll b/src/LLVM/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
new file mode 100644
index 0000000..507bcbc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -instcombine -S | grep select
+; END.
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.point = type { i32, i32 }
+
+define i32 @visible(i32 %direction, i64 %p1.0, i64 %p2.0, i64 %p3.0) {
+entry:
+ %p1_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
+ %tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr { i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p1.0, i64* %tmp.upgrd.1
+ %tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp2 = getelementptr { i64 }* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p2.0, i64* %tmp2
+ %tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp4 = getelementptr { i64 }* %tmp3, i32 0, i32 0 ; <i64*> [#uses=1]
+ store i64 %p3.0, i64* %tmp4
+ %tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
+ %tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp6 = getelementptr { i64 }* %tmp5, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
+ %tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp8 = getelementptr { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
+ %tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
+ %tmp11 = getelementptr { i64 }* %tmp10, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
+ %tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
+ %tmp14 = icmp slt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %tmp26 = icmp sgt i32 %tmp13, 0 ; <i1> [#uses=1]
+ %retval.0.in = select i1 %tmp.upgrd.2, i1 %tmp14, i1 %tmp26 ; <i1> [#uses=1]
+ %retval.0 = zext i1 %retval.0.in to i32 ; <i32> [#uses=1]
+ ret i32 %retval.0
+}
+
+declare i32 @determinant(i64, i64, i64)
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-12-15-Range-Test.ll b/src/LLVM/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
new file mode 100644
index 0000000..b6b810a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep icmp | count 1
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {icmp ugt} | count 1
+; END.
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+@r = external global [17 x i32] ; <[17 x i32]*> [#uses=1]
+
+define i1 @print_pgm_cond_true(i32 %tmp12.reload, i32* %tmp16.out) {
+newFuncRoot:
+ br label %cond_true
+
+bb27.exitStub: ; preds = %cond_true
+ store i32 %tmp16, i32* %tmp16.out
+ ret i1 true
+
+cond_next23.exitStub: ; preds = %cond_true
+ store i32 %tmp16, i32* %tmp16.out
+ ret i1 false
+
+cond_true: ; preds = %newFuncRoot
+ %tmp15 = getelementptr [17 x i32]* @r, i32 0, i32 %tmp12.reload ; <i32*> [#uses=1]
+ %tmp16 = load i32* %tmp15 ; <i32> [#uses=4]
+ %tmp18 = icmp slt i32 %tmp16, -31 ; <i1> [#uses=1]
+ %tmp21 = icmp sgt i32 %tmp16, 31 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp18, %tmp21 ; <i1> [#uses=1]
+ br i1 %bothcond, label %bb27.exitStub, label %cond_next23.exitStub
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll b/src/LLVM/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
new file mode 100644
index 0000000..6c3a06f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
@@ -0,0 +1,30 @@
+; For PR1065. This causes an assertion in instcombine if a select with two cmp
+; operands is encountered.
+; RUN: opt < %s -instcombine -disable-output
+; END.
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.internal_state = type { i32 }
+ %struct.mng_data = type { i32, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i8, double, double, double, i8, i8, i8, i8, i32, i32, i32, i32, i32, i8, i32, i32, i8*, i8* (i32)*, void (i8*, i32)*, void (i8*, i8*, i32)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*, i8*, i32, i32*)*, i8 (%struct.mng_data*, i8*, i32, i32*)*, i8 (%struct.mng_data*, i32, i8, i32, i32, i32, i32, i8*)*, i8 (%struct.mng_data*, i32, i32, i8*)*, i8 (%struct.mng_data*, i32, i32)*, i8 (%struct.mng_data*, i8, i8*, i8*, i8*, i8*)*, i8 (%struct.mng_data*)*, i8 (%struct.mng_data*, i8*)*, i8 (%struct.mng_data*, i8*)*, i8 (%struct.mng_data*, i32, i32)*, i8 (%struct.mng_data*, i32, i32, i8*)*, i8 (%struct.mng_data*, i8, i8, i32, i32)*, i8* (%struct.mng_data*, i32)*, i8* (%struct.mng_data*, i32)*, i8* (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32, i32, i32, i32)*, i32 (%struct.mng_data*)*, i8 (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32)*, i8 (%struct.mng_data*, i32, i32, i32, i32, i32, i32, i32, i32)*, i8 (%struct.mng_data*, i8)*, i8 (%struct.mng_data*, i32, i8*)*, i8 (%struct.mng_data*, i32, i8, i8*)*, i8, i32, i32, i8*, i8*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i8, i8, i8, i8, i8, i32, i8, i8, i8, i32, i8*, i32, i8*, i32, i8, i8, i8, i32, i8*, i8*, i32, i32, i8*, i8*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, %struct.mng_pushdata*, i8, i8, i32, i32, i8*, i8, i8, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8, i32, i32, i8*, i32, i32, i32, i8, i8, i32, i32, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8*, i8*, i8*, i32, i8*, i8*, i8*, i8*, i8*, %struct.mng_savedata*, i32, i32, i32, i32, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i8*, [256 x i8], double, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, i16, i8, i8, i8, i8, i8, i32, i32, i8, i32, i32, i32, i32, i16, i16, i16, i8, i16, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i8, i32, i32, i8, i32, i32, i32, i32, i32, i8, i32, i8, i16, i16, i16, i16, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i8*, i16, i16, i16, i8*, i8, i8, i32, i32, i32, i32, i8, void ()*, void ()*, void ()*, void ()*, void ()*, void ()*, i8*, i8, i8, i8, i32, i8*, i8*, i16, i16, i16, i16, i32, i32, i8*, %struct.z_stream, i32, i32, i32, i32, i32, i32, i8, i8, [256 x i32], i8 }
+ %struct.mng_palette8e = type { i8, i8, i8 }
+ %struct.mng_pushdata = type { i8*, i8*, i32, i8, i8*, i32 }
+ %struct.mng_savedata = type { i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i8, i16, i8, i8, i32, i32, i8, i32, i32, i32, i32, i32, [256 x %struct.mng_palette8e], i32, [256 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i32, i8*, i16, i16, i16 }
+ %struct.z_stream = type { i8*, i32, i32, i8*, i32, i32, i8*, %struct.internal_state*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i8*, i32, i32, i32 }
+
+define void @mng_write_basi() {
+entry:
+ %tmp = load i8* null ; <i8> [#uses=1]
+ %tmp.upgrd.1 = icmp ugt i8 %tmp, 8 ; <i1> [#uses=1]
+ %tmp.upgrd.2 = load i16* null ; <i16> [#uses=2]
+ %tmp3 = icmp eq i16 %tmp.upgrd.2, 255 ; <i1> [#uses=1]
+ %tmp7 = icmp eq i16 %tmp.upgrd.2, -1 ; <i1> [#uses=1]
+ %bOpaque.0.in = select i1 %tmp.upgrd.1, i1 %tmp7, i1 %tmp3 ; <i1> [#uses=1]
+ br i1 %bOpaque.0.in, label %cond_next90, label %bb95
+
+cond_next90: ; preds = %entry
+ ret void
+
+bb95: ; preds = %entry
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll b/src/LLVM/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll
new file mode 100644
index 0000000..fd00319
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-01-13-ExtCompareMiscompile.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {icmp ugt}
+; PR1107
+; PR1940
+
+define i1 @test(i8 %A, i8 %B) {
+ %a = zext i8 %A to i32
+ %b = zext i8 %B to i32
+ %c = icmp sgt i32 %a, %b
+ ret i1 %c
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll b/src/LLVM/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll
new file mode 100644
index 0000000..c02955f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-01-14-FcmpSelf.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -S | grep {fcmp uno.*0.0}
+; PR1111
+define i1 @test(double %X) {
+ %tmp = fcmp une double %X, %X
+ ret i1 %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll
new file mode 100644
index 0000000..c0852f1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-01-18-VectorInfLoop.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define <4 x i32> @test(<4 x i32> %A) {
+ %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ %C = and <4 x i32> %B, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ ret <4 x i32> %C
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-01-27-AndICmp.ll b/src/LLVM/test/Transforms/InstCombine/2007-01-27-AndICmp.ll
new file mode 100644
index 0000000..e6c3768
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-01-27-AndICmp.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {ugt.*, 1}
+
+define i1 @test(i32 %tmp1030) {
+ %tmp1037 = icmp ne i32 %tmp1030, 40 ; <i1> [#uses=1]
+ %tmp1039 = icmp ne i32 %tmp1030, 41 ; <i1> [#uses=1]
+ %tmp1042 = and i1 %tmp1037, %tmp1039 ; <i1> [#uses=1]
+ ret i1 %tmp1042
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll b/src/LLVM/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
new file mode 100644
index 0000000..4c5ceb7
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
@@ -0,0 +1,45 @@
+; RUN: opt < %s -instcombine -mem2reg -S | grep {%A = alloca}
+; RUN: opt < %s -instcombine -mem2reg -S | \
+; RUN: not grep {%B = alloca}
+; END.
+
+; Ensure that instcombine doesn't sink the loads in entry/cond_true into
+; cond_next. Doing so prevents mem2reg from promoting the B alloca.
+
+define i32 @test2(i32 %C) {
+entry:
+ %A = alloca i32
+ %B = alloca i32
+ %tmp = call i32 (...)* @bar( i32* %A ) ; <i32> [#uses=0]
+ %T = load i32* %A ; <i32> [#uses=1]
+ %tmp2 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
+ br i1 %tmp2, label %cond_next, label %cond_true
+
+cond_true: ; preds = %entry
+ store i32 123, i32* %B
+ call i32 @test2( i32 123 ) ; <i32>:0 [#uses=0]
+ %T1 = load i32* %B ; <i32> [#uses=1]
+ br label %cond_next
+
+cond_next: ; preds = %cond_true, %entry
+ %tmp1.0 = phi i32 [ %T1, %cond_true ], [ %T, %entry ] ; <i32> [#uses=1]
+ %tmp7 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp8 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp9 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp10 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp11 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp12 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp13 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp14 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp15 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp16 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp17 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp18 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp19 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ %tmp20 = call i32 (...)* @baq( ) ; <i32> [#uses=0]
+ ret i32 %tmp1.0
+}
+
+declare i32 @bar(...)
+
+declare i32 @baq(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-02-07-PointerCast.ll b/src/LLVM/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
new file mode 100644
index 0000000..e6ccc22
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
@@ -0,0 +1,22 @@
+;RUN: opt < %s -instcombine -S | grep zext
+
+; Make sure the uint isn't removed. Instcombine in llvm 1.9 was dropping the
+; uint cast which was causing a sign extend. This only affected code with
+; pointers in the high half of memory, so it wasn't noticed much
+; compile a kernel though...
+
+target datalayout = "e-p:32:32"
+@str = internal constant [6 x i8] c"%llx\0A\00" ; <[6 x i8]*> [#uses=1]
+
+declare i32 @printf(i8*, ...)
+
+define i32 @main(i32 %x, i8** %a) {
+entry:
+ %tmp = getelementptr [6 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
+ %tmp1 = load i8** %a ; <i8*> [#uses=1]
+ %tmp2 = ptrtoint i8* %tmp1 to i32 ; <i32> [#uses=1]
+ %tmp3 = zext i32 %tmp2 to i64 ; <i64> [#uses=1]
+ %tmp.upgrd.1 = call i32 (i8*, ...)* @printf( i8* %tmp, i64 %tmp3 ) ; <i32> [#uses=0]
+ ret i32 0
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll
new file mode 100644
index 0000000..b06a851
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-02-23-PhiFoldInfLoop.ll
@@ -0,0 +1,31 @@
+; RUN: opt < %s -instcombine -S | grep ret
+; PR1217
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+ %struct.termbox = type { %struct.termbox*, i32, i32, i32, i32, i32 }
+
+
+define void @ggenorien() {
+entry:
+ %tmp68 = icmp eq %struct.termbox* null, null ; <i1> [#uses=1]
+ br i1 %tmp68, label %cond_next448, label %bb80
+
+bb80: ; preds = %entry
+ ret void
+
+cond_next448: ; preds = %entry
+ br i1 false, label %bb756, label %bb595
+
+bb595: ; preds = %cond_next448
+ br label %bb609
+
+bb609: ; preds = %bb756, %bb595
+ %termnum.6240.0 = phi i32 [ 2, %bb595 ], [ %termnum.6, %bb756 ] ; <i32> [#uses=1]
+ %tmp755 = add i32 %termnum.6240.0, 1 ; <i32> [#uses=1]
+ br label %bb756
+
+bb756: ; preds = %bb609, %cond_next448
+ %termnum.6 = phi i32 [ %tmp755, %bb609 ], [ 2, %cond_next448 ] ; <i32> [#uses=1]
+ br label %bb609
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll b/src/LLVM/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll
new file mode 100644
index 0000000..7f87161
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-03-13-CompareMerge.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {icmp sle}
+; PR1244
+
+define i1 @test(i32 %c.3.i, i32 %d.292.2.i) {
+ %tmp266.i = icmp slt i32 %c.3.i, %d.292.2.i
+ %tmp276.i = icmp eq i32 %c.3.i, %d.292.2.i
+ %sel_tmp80 = or i1 %tmp266.i, %tmp276.i
+ ret i1 %sel_tmp80
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll b/src/LLVM/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll
new file mode 100644
index 0000000..75f5d76
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-03-19-BadTruncChangePR1261.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep zext
+; PR1261.
+
+define i16 @test(i31 %zzz) {
+ %A = sext i31 %zzz to i32
+ %B = add i32 %A, 16384
+ %C = lshr i32 %B, 15
+ %D = trunc i32 %C to i16
+ ret i16 %D
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll b/src/LLVM/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll
new file mode 100644
index 0000000..d0ac22d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll
@@ -0,0 +1,7 @@
+; For PR1248
+; RUN: opt < %s -instcombine -S | grep {ugt i32 .*, 11}
+define i1 @test(i32 %tmp6) {
+ %tmp7 = sdiv i32 %tmp6, 12 ; <i32> [#uses=1]
+ icmp ne i32 %tmp7, -6 ; <i1>:1 [#uses=1]
+ ret i1 %1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll b/src/LLVM/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
new file mode 100644
index 0000000..addcf8a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
@@ -0,0 +1,29 @@
+; PR1271
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {icmp eq i32 .tmp.*, 2146435072}
+%struct..0anon = type { i32, i32 }
+%struct..1anon = type { double }
+
+define i32 @main() {
+entry:
+ %u = alloca %struct..1anon, align 8 ; <%struct..1anon*> [#uses=4]
+ %tmp1 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ store double 0x7FF0000000000000, double* %tmp1
+ %tmp3 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp34 = bitcast double* %tmp3 to %struct..0anon* ; <%struct..0anon*> [#uses=1]
+ %tmp5 = getelementptr %struct..0anon* %tmp34, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp6 = load i32* %tmp5 ; <i32> [#uses=1]
+ %tmp7 = shl i32 %tmp6, 1 ; <i32> [#uses=1]
+ %tmp8 = lshr i32 %tmp7, 21 ; <i32> [#uses=1]
+ %tmp89 = trunc i32 %tmp8 to i16 ; <i16> [#uses=1]
+ icmp ne i16 %tmp89, 2047 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i8 ; <i8>:1 [#uses=1]
+ icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
+ br i1 %2, label %cond_true, label %cond_false
+
+cond_true: ; preds = %entry
+ ret i32 0
+
+cond_false: ; preds = %entry
+ ret i32 1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll b/src/LLVM/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll
new file mode 100644
index 0000000..ac38dfe
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-03-25-DoubleShift.ll
@@ -0,0 +1,9 @@
+; PR1271
+; RUN: opt < %s -instcombine -S | grep and
+define i1 @test(i32 %tmp13) {
+entry:
+ %tmp14 = shl i32 %tmp13, 12 ; <i32> [#uses=1]
+ %tmp15 = lshr i32 %tmp14, 12 ; <i32> [#uses=1]
+ %res = icmp ne i32 %tmp15, 0 ; <i1>:3 [#uses=1]
+ ret i1 %res
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll b/src/LLVM/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll
new file mode 100644
index 0000000..c3032d9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-03-26-BadShiftMask.ll
@@ -0,0 +1,35 @@
+; PR1271
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ashr exact i32 %.mp137, 2}
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "i686-pc-linux-gnu"
+
+
+define i1 @test(i32* %tmp141, i32* %tmp145,
+ i32 %b8, i32 %iftmp.430.0, i32* %tmp134.out, i32* %tmp137.out)
+{
+newFuncRoot:
+ %tmp133 = and i32 %b8, 1 ; <i32> [#uses=1]
+ %tmp134 = shl i32 %tmp133, 3 ; <i32> [#uses=3]
+ %tmp136 = ashr i32 %b8, 1 ; <i32> [#uses=1]
+ %tmp137 = shl i32 %tmp136, 3 ; <i32> [#uses=3]
+ %tmp139 = ashr i32 %tmp134, 2 ; <i32> [#uses=1]
+ store i32 %tmp139, i32* %tmp141
+ %tmp143 = ashr i32 %tmp137, 2 ; <i32> [#uses=1]
+ store i32 %tmp143, i32* %tmp145
+ icmp eq i32 %iftmp.430.0, 0 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i8 ; <i8>:1 [#uses=1]
+ icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
+ br i1 %2, label %cond_true147.exitStub, label %cond_false252.exitStub
+
+cond_true147.exitStub: ; preds = %newFuncRoot
+ store i32 %tmp134, i32* %tmp134.out
+ store i32 %tmp137, i32* %tmp137.out
+ ret i1 true
+
+cond_false252.exitStub: ; preds = %newFuncRoot
+ store i32 %tmp134, i32* %tmp134.out
+ store i32 %tmp137, i32* %tmp137.out
+ ret i1 false
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll b/src/LLVM/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll
new file mode 100644
index 0000000..335d707
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-04-08-SingleEltVectorCrash.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1304
+
+define i64 @bork(<1 x i64> %vec) {
+ %tmp = extractelement <1 x i64> %vec, i32 0
+ ret i64 %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-05-10-icmp-or.ll b/src/LLVM/test/Transforms/InstCombine/2007-05-10-icmp-or.ll
new file mode 100644
index 0000000..54b60f0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-05-10-icmp-or.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -disable-output
+define i1 @test(i32 %tmp9) {
+ %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
+ %tmp11.not = icmp sgt i32 %tmp9, 255 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
+ ret i1 %bothcond
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-05-14-Crash.ll b/src/LLVM/test/Transforms/InstCombine/2007-05-14-Crash.ll
new file mode 100644
index 0000000..86b747c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-05-14-Crash.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -disable-output
+
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
+target triple = "powerpc-apple-darwin8.8.0"
+
+%struct.abc = type { i32, [32 x i8] }
+%struct.def = type { i8**, %struct.abc }
+ %struct.anon = type <{ }>
+
+define i8* @foo(%struct.anon* %deviceRef, %struct.abc* %pCap) {
+entry:
+ %tmp1 = bitcast %struct.anon* %deviceRef to %struct.def*
+ %tmp3 = getelementptr %struct.def* %tmp1, i32 0, i32 1
+ %tmp35 = bitcast %struct.abc* %tmp3 to i8*
+ ret i8* %tmp35
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll b/src/LLVM/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll
new file mode 100644
index 0000000..c6f6cc9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-05-18-CastFoldBug.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {call.*sret}
+; Make sure instcombine doesn't drop the sret attribute.
+
+define void @blah(i16* %tmp10) {
+entry:
+ call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend_stret to void (i16*)*)( i16* sret %tmp10 )
+ ret void
+}
+
+declare i8* @objc_msgSend_stret(i8*, i8*, ...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll b/src/LLVM/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
new file mode 100644
index 0000000..f9bf725
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | grep {ashr}
+; PR1499
+
+define void @av_cmp_q_cond_true(i32* %retval, i32* %tmp9, i64* %tmp10) {
+newFuncRoot:
+ br label %cond_true
+
+return.exitStub: ; preds = %cond_true
+ ret void
+
+cond_true: ; preds = %newFuncRoot
+ %tmp30 = load i64* %tmp10 ; <i64> [#uses=1]
+ %.cast = zext i32 63 to i64 ; <i64> [#uses=1]
+ %tmp31 = ashr i64 %tmp30, %.cast ; <i64> [#uses=1]
+ %tmp3132 = trunc i64 %tmp31 to i32 ; <i32> [#uses=1]
+ %tmp33 = or i32 %tmp3132, 1 ; <i32> [#uses=1]
+ store i32 %tmp33, i32* %tmp9
+ %tmp34 = load i32* %tmp9 ; <i32> [#uses=1]
+ store i32 %tmp34, i32* %retval
+ br label %return.exitStub
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll b/src/LLVM/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll
new file mode 100644
index 0000000..7a30e6a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 true}
+; rdar://5278853
+
+define i1 @test(i32 %tmp468) {
+ %tmp470 = udiv i32 %tmp468, 4 ; <i32> [#uses=2]
+ %tmp475 = icmp ult i32 %tmp470, 1073741824 ; <i1> [#uses=1]
+ ret i1 %tmp475
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-08-02-InfiniteLoop.ll b/src/LLVM/test/Transforms/InstCombine/2007-08-02-InfiniteLoop.ll
new file mode 100644
index 0000000..3f76187
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-08-02-InfiniteLoop.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1594
+
+define i64 @test(i16 %tmp510, i16 %tmp512) {
+ %W = sext i16 %tmp510 to i32 ; <i32> [#uses=1]
+ %X = sext i16 %tmp512 to i32 ; <i32> [#uses=1]
+ %Y = add i32 %W, %X ; <i32> [#uses=1]
+ %Z = sext i32 %Y to i64 ; <i64> [#uses=1]
+ ret i64 %Z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll b/src/LLVM/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
new file mode 100644
index 0000000..c27fe0a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | grep icmp
+; PR1646
+
+@__gthrw_pthread_cancel = alias weak i32 (i32)* @pthread_cancel ; <i32 (i32)*> [#uses=1]
+@__gthread_active_ptr.5335 = internal constant i8* bitcast (i32 (i32)* @__gthrw_pthread_cancel to i8*) ; <i8**> [#uses=1]
+declare extern_weak i32 @pthread_cancel(i32)
+
+define i1 @__gthread_active_p() {
+entry:
+ %tmp1 = load i8** @__gthread_active_ptr.5335, align 4 ; <i8*> [#uses=1]
+ %tmp2 = icmp ne i8* %tmp1, null ; <i1> [#uses=1]
+ ret i1 %tmp2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-09-11-Trampoline.ll b/src/LLVM/test/Transforms/InstCombine/2007-09-11-Trampoline.ll
new file mode 100644
index 0000000..6190aa9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-09-11-Trampoline.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep {call i32 @f}
+
+ %struct.FRAME.nest = type { i32, i32 (i32)* }
+ %struct.__builtin_trampoline = type { [10 x i8] }
+
+declare i8* @llvm.init.trampoline(i8*, i8*, i8*)
+
+declare i32 @f(%struct.FRAME.nest* nest , i32 )
+
+define i32 @nest(i32 %n) {
+entry:
+ %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
+ %TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 %n, i32* %tmp3, align 8
+ %FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
+ %tramp = call i8* @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest* , i32)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
+ %tmp7 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (i32)**> [#uses=1]
+ %tmp89 = bitcast i8* %tramp to i32 (i32)* ; <i32 (i32)*> [#uses=2]
+ store i32 (i32)* %tmp89, i32 (i32)** %tmp7, align 8
+ %tmp2.i = call i32 %tmp89( i32 1 ) ; <i32> [#uses=1]
+ ret i32 %tmp2.i
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll b/src/LLVM/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll
new file mode 100644
index 0000000..23ee12b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-09-17-AliasConstFold2.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep icmp
+; PR1678
+
+@A = alias weak void ()* @B ; <void ()*> [#uses=1]
+
+declare extern_weak void @B()
+
+define i32 @active() {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp1 = icmp ne void ()* @A, null ; <i1> [#uses=1]
+ %tmp12 = zext i1 %tmp1 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll b/src/LLVM/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
new file mode 100644
index 0000000..fe935f9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | not grep call
+; RUN: opt < %s -std-compile-opts -S | not grep xyz
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+@.str = internal constant [4 x i8] c"xyz\00" ; <[4 x i8]*> [#uses=1]
+
+define void @foo(i8* %P) {
+entry:
+ %P_addr = alloca i8*
+ store i8* %P, i8** %P_addr
+ %tmp = load i8** %P_addr, align 4
+ %tmp1 = getelementptr [4 x i8]* @.str, i32 0, i32 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %tmp1, i32 4, i32 1, i1 false)
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-10-12-Crash.ll b/src/LLVM/test/Transforms/InstCombine/2007-10-12-Crash.ll
new file mode 100644
index 0000000..b3d9f02
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-10-12-Crash.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -instcombine -disable-output
+
+ %struct.Ray = type { %struct.Vec, %struct.Vec }
+ %struct.Scene = type { i32 (...)** }
+ %struct.Vec = type { double, double, double }
+
+declare double @_Z9ray_traceRK3VecRK3RayRK5Scene(%struct.Vec*, %struct.Ray*, %struct.Scene*)
+
+define i32 @main(i32 %argc, i8** %argv) {
+entry:
+ %tmp3 = alloca %struct.Ray, align 4 ; <%struct.Ray*> [#uses=2]
+ %tmp97 = icmp slt i32 0, 512 ; <i1> [#uses=1]
+ br i1 %tmp97, label %bb71, label %bb108
+
+bb29: ; preds = %bb62
+ %tmp322 = bitcast %struct.Ray* %tmp3 to %struct.Vec* ; <%struct.Vec*> [#uses=1]
+ %tmp322.0 = getelementptr %struct.Vec* %tmp322, i32 0, i32 0 ; <double*> [#uses=1]
+ store double 0.000000e+00, double* %tmp322.0
+ %tmp57 = call double @_Z9ray_traceRK3VecRK3RayRK5Scene( %struct.Vec* null, %struct.Ray* %tmp3, %struct.Scene* null ) ; <double> [#uses=0]
+ br label %bb62
+
+bb62: ; preds = %bb71, %bb29
+ %tmp65 = icmp slt i32 0, 4 ; <i1> [#uses=1]
+ br i1 %tmp65, label %bb29, label %bb68
+
+bb68: ; preds = %bb62
+ ret i32 0
+
+bb71: ; preds = %entry
+ %tmp74 = icmp slt i32 0, 4 ; <i1> [#uses=1]
+ br i1 %tmp74, label %bb62, label %bb77
+
+bb77: ; preds = %bb71
+ ret i32 0
+
+bb108: ; preds = %entry
+ ret i32 0
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-10-28-stacksave.ll b/src/LLVM/test/Transforms/InstCombine/2007-10-28-stacksave.ll
new file mode 100644
index 0000000..76bceb6
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-10-28-stacksave.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -instcombine -S | grep {call.*stacksave}
+; PR1745
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+@p = weak global i8* null ; <i8**> [#uses=1]
+
+define i32 @main() {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ br label %lab
+
+lab: ; preds = %cleanup31, %entry
+ %n.0 = phi i32 [ 0, %entry ], [ %tmp25, %cleanup31 ] ; <i32> [#uses=2]
+ %tmp2 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=2]
+ %tmp4 = srem i32 %n.0, 47 ; <i32> [#uses=1]
+ %tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=5]
+ %tmp7 = sub i32 %tmp5, 1 ; <i32> [#uses=0]
+ %tmp89 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
+ %tmp10 = mul i64 %tmp89, 32 ; <i64> [#uses=0]
+ %tmp12 = mul i32 %tmp5, 4 ; <i32> [#uses=0]
+ %tmp1314 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
+ %tmp15 = mul i64 %tmp1314, 32 ; <i64> [#uses=0]
+ %tmp17 = mul i32 %tmp5, 4 ; <i32> [#uses=1]
+ %tmp18 = alloca i8, i32 %tmp17 ; <i8*> [#uses=1]
+ %tmp1819 = bitcast i8* %tmp18 to i32* ; <i32*> [#uses=2]
+ %tmp21 = getelementptr i32* %tmp1819, i32 0 ; <i32*> [#uses=1]
+ store i32 1, i32* %tmp21, align 4
+ %tmp2223 = bitcast i32* %tmp1819 to i8* ; <i8*> [#uses=1]
+ volatile store i8* %tmp2223, i8** @p, align 4
+ %tmp25 = add i32 %n.0, 1 ; <i32> [#uses=2]
+ %tmp27 = icmp sle i32 %tmp25, 999999 ; <i1> [#uses=1]
+ %tmp2728 = zext i1 %tmp27 to i8 ; <i8> [#uses=1]
+ %toBool = icmp ne i8 %tmp2728, 0 ; <i1> [#uses=1]
+ br i1 %toBool, label %cleanup31, label %cond_next
+
+cond_next: ; preds = %lab
+ call void @llvm.stackrestore( i8* %tmp2 )
+ ret i32 0
+
+cleanup31: ; preds = %lab
+ call void @llvm.stackrestore( i8* %tmp2 )
+ br label %lab
+}
+
+declare i8* @llvm.stacksave()
+
+declare void @llvm.stackrestore(i8*)
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll b/src/LLVM/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
new file mode 100644
index 0000000..8105b4b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -disable-output
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
+target triple = "powerpc-apple-darwin8"
+
+define i32 @test() {
+entry:
+ %tmp50.i17 = icmp slt i32 0, 4 ; <i1> [#uses=1]
+ br i1 %tmp50.i17, label %bb.i, label %calculateColorSpecificBlackLevel.exit
+
+bb.i: ; preds = %entry
+ br label %bb51.i.i
+
+bb27.i.i: ; preds = %bb51.i.i
+ %tmp31.i.i = load i16* null, align 2 ; <i16> [#uses=2]
+ %tmp35.i.i = icmp ult i16 %tmp31.i.i, 1 ; <i1> [#uses=1]
+ %tmp41.i.i = icmp ugt i16 %tmp31.i.i, -1 ; <i1> [#uses=1]
+ %bothcond.i.i = or i1 %tmp35.i.i, %tmp41.i.i ; <i1> [#uses=1]
+ %bothcond1.i.i = zext i1 %bothcond.i.i to i32 ; <i32> [#uses=1]
+ %tmp46.i.i = xor i32 %bothcond1.i.i, 1 ; <i32> [#uses=1]
+ %count.0.i.i = add i32 %count.1.i.i, %tmp46.i.i ; <i32> [#uses=1]
+ %tmp50.i.i = add i32 %x.0.i.i, 2 ; <i32> [#uses=1]
+ br label %bb51.i.i
+
+bb51.i.i: ; preds = %bb27.i.i, %bb.i
+ %count.1.i.i = phi i32 [ %count.0.i.i, %bb27.i.i ], [ 0, %bb.i ] ; <i32> [#uses=1]
+ %x.0.i.i = phi i32 [ %tmp50.i.i, %bb27.i.i ], [ 0, %bb.i ] ; <i32> [#uses=2]
+ %tmp54.i.i = icmp slt i32 %x.0.i.i, 0 ; <i1> [#uses=1]
+ br i1 %tmp54.i.i, label %bb27.i.i, label %bb57.i.i
+
+bb57.i.i: ; preds = %bb51.i.i
+ ret i32 0
+
+calculateColorSpecificBlackLevel.exit: ; preds = %entry
+ ret i32 undef
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-10-31-StringCrash.ll b/src/LLVM/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
new file mode 100644
index 0000000..220f3e2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -disable-output
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+declare void @__darwin_gcc3_preregister_frame_info()
+
+define void @_start(i32 %argc, i8** %argv, i8** %envp) {
+entry:
+ %tmp1 = bitcast void ()* @__darwin_gcc3_preregister_frame_info to i32* ; <i32*> [#uses=1]
+ %tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp3 = icmp ne i32 %tmp2, 0 ; <i1> [#uses=1]
+ %tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
+ %toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1]
+ br i1 %toBool, label %cond_true, label %return
+
+cond_true: ; preds = %entry
+ ret void
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll b/src/LLVM/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
new file mode 100644
index 0000000..e6c9bcd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1780
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i686-pc-linux-gnu"
+
+%opaque_t = type opaque
+%opaque2 = type opaque
+%op_ts = type {%opaque2, i32}
+
+@g = external global %opaque_t
+@h = external global %op_ts
+
+define i32 @foo() {
+entry:
+ %x = load i8* bitcast (%opaque_t* @g to i8*)
+ %y = load i32* bitcast (%op_ts* @h to i32*)
+ %z = zext i8 %x to i32
+ %r = add i32 %y, %z
+ ret i32 %r
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-11-15-CompareMiscomp.ll b/src/LLVM/test/Transforms/InstCombine/2007-11-15-CompareMiscomp.ll
new file mode 100644
index 0000000..5282739
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-11-15-CompareMiscomp.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {icmp eq i32 %In, 1}
+; PR1800
+
+define i1 @test(i32 %In) {
+ %c1 = icmp sgt i32 %In, -1
+ %c2 = icmp eq i32 %In, 1
+ %V = and i1 %c1, %c2
+ ret i1 %V
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-11-22-IcmpCrash.ll b/src/LLVM/test/Transforms/InstCombine/2007-11-22-IcmpCrash.ll
new file mode 100644
index 0000000..f71b99c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-11-22-IcmpCrash.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR1817
+
+define i1 @test1(i32 %X) {
+ %A = icmp slt i32 %X, 10
+ %B = icmp ult i32 %X, 10
+ %C = and i1 %A, %B
+ ret i1 %C
+}
+
+define i1 @test2(i32 %X) {
+ %A = icmp slt i32 %X, 10
+ %B = icmp ult i32 %X, 10
+ %C = or i1 %A, %B
+ ret i1 %C
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-11-25-CompatibleAttributes.ll b/src/LLVM/test/Transforms/InstCombine/2007-11-25-CompatibleAttributes.ll
new file mode 100644
index 0000000..e3192a9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-11-25-CompatibleAttributes.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+; PR1716
+
+@.str = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
+
+define i32 @main(i32 %argc, i8** %argv) {
+entry:
+ %tmp32 = tail call i32 (i8* , ...) * bitcast (i32 (i8*, ...) * @printf to i32 (i8* , ...) *)( i8* getelementptr ([4 x i8]* @.str, i32 0, i32 0) , i32 0 ) nounwind ; <i32> [#uses=0]
+ ret i32 undef
+}
+
+declare i32 @printf(i8*, ...) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-12-10-ConstFoldCompare.ll b/src/LLVM/test/Transforms/InstCombine/2007-12-10-ConstFoldCompare.ll
new file mode 100644
index 0000000..6420537
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-12-10-ConstFoldCompare.ll
@@ -0,0 +1,9 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i686-pc-linux-gnu"
+; RUN: opt < %s -instcombine -S | not grep {ret i1 0}
+; PR1850
+
+define i1 @test() {
+ %cond = icmp ule i8* inttoptr (i64 4294967297 to i8*), inttoptr (i64 5 to i8*)
+ ret i1 %cond
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-12-12-GEPScale.ll b/src/LLVM/test/Transforms/InstCombine/2007-12-12-GEPScale.ll
new file mode 100644
index 0000000..cea87f2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-12-12-GEPScale.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | not grep 1431655764
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+
+define i8* @foo([100 x {i8,i8,i8}]* %x) {
+entry:
+ %p = bitcast [100 x {i8,i8,i8}]* %x to i8*
+ %q = getelementptr i8* %p, i32 -4
+ ret i8* %q
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-12-16-AsmNoUnwind.ll b/src/LLVM/test/Transforms/InstCombine/2007-12-16-AsmNoUnwind.ll
new file mode 100644
index 0000000..85cf9b6
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-12-16-AsmNoUnwind.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep nounwind
+
+define void @bar() {
+entry:
+ call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"( )
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-12-18-AddSelCmpSub.ll b/src/LLVM/test/Transforms/InstCombine/2007-12-18-AddSelCmpSub.ll
new file mode 100644
index 0000000..cc89f6d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-12-18-AddSelCmpSub.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | grep {add} | count 1
+
+define i32 @foo(i32 %a) {
+entry:
+ %tmp15 = sub i32 99, %a ; <i32> [#uses=2]
+ %tmp16 = icmp slt i32 %tmp15, 0 ; <i1> [#uses=1]
+ %smax = select i1 %tmp16, i32 0, i32 %tmp15 ; <i32> [#uses=1]
+ %tmp12 = add i32 %smax, %a ; <i32> [#uses=1]
+ %tmp13 = add i32 %tmp12, 1 ; <i32> [#uses=1]
+ ret i32 %tmp13
+}
+
+define i32 @bar(i32 %a) {
+entry:
+ %tmp15 = sub i32 99, %a ; <i32> [#uses=2]
+ %tmp16 = icmp slt i32 %tmp15, 0 ; <i1> [#uses=1]
+ %smax = select i1 %tmp16, i32 0, i32 %tmp15 ; <i32> [#uses=1]
+ %tmp12 = add i32 %smax, %a ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
+
+define i32 @fun(i32 %a) {
+entry:
+ %tmp15 = sub i32 99, %a ; <i32> [#uses=1]
+ %tmp16 = icmp slt i32 %a, 0 ; <i1> [#uses=1]
+ %smax = select i1 %tmp16, i32 0, i32 %tmp15 ; <i32> [#uses=1]
+ %tmp12 = add i32 %smax, %a ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll b/src/LLVM/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
new file mode 100644
index 0000000..8721c83
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
@@ -0,0 +1,89 @@
+; RUN: opt < %s -mem2reg -instcombine -S | grep "ret i32 1" | count 8
+
+define i32 @test1() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp ule i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test2() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp ugt i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+}
+
+define i32 @test3() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp slt i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+}
+
+define i32 @test4() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp sle i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+}
+
+define i32 @test5() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp sge i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test6() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp sgt i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test7() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp eq i32 %sub, 0
+ %retval = select i1 %cmp, i32 0, i32 1
+ ret i32 %retval
+}
+
+define i32 @test8() {
+entry:
+ %z = alloca i32
+ store i32 0, i32* %z
+ %tmp = load i32* %z
+ %sub = sub i32 %tmp, 1
+ %cmp = icmp ne i32 %sub, 0
+ %retval = select i1 %cmp, i32 1, i32 0
+ ret i32 %retval
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-06-BitCastAttributes.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-06-BitCastAttributes.ll
new file mode 100644
index 0000000..23b6067
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-06-BitCastAttributes.ll
@@ -0,0 +1,23 @@
+; Ignore stderr, we expect warnings there
+; RUN: opt < %s -instcombine 2> /dev/null -S | not grep bitcast
+
+define void @a() {
+ ret void
+}
+
+define signext i32 @b(i32* inreg %x) {
+ ret i32 0
+}
+
+define void @c(...) {
+ ret void
+}
+
+define void @g(i32* %y) {
+ call void bitcast (void ()* @a to void (i32*)*)( i32* noalias %y )
+ call <2 x i32> bitcast (i32 (i32*)* @b to <2 x i32> (i32*)*)( i32* inreg null ) ; <<2 x i32>>:1 [#uses=0]
+ %x = call i64 bitcast (i32 (i32*)* @b to i64 (i32)*)( i32 0 ) ; <i64> [#uses=0]
+ call void bitcast (void (...)* @c to void (i32)*)( i32 0 )
+ call void bitcast (void (...)* @c to void (i32)*)( i32 zeroext 0 )
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-06-CastCrash.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-06-CastCrash.ll
new file mode 100644
index 0000000..097a0ce
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-06-CastCrash.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define <2 x i32> @f() {
+ ret <2 x i32> undef
+}
+
+define i32 @g() {
+ %x = call i32 bitcast (<2 x i32> ()* @f to i32 ()*)( ) ; <i32> [#uses=1]
+ ret i32 %x
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-06-VoidCast.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
new file mode 100644
index 0000000..407ff4d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-06-VoidCast.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+
+define void @f(i16 %y) {
+ ret void
+}
+
+define i32 @g(i32 %y) {
+ %x = call i32 bitcast (void (i16)* @f to i32 (i32)*)( i32 %y ) ; <i32> [#uses=1]
+ ret i32 %x
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-13-AndCmpCmp.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-13-AndCmpCmp.ll
new file mode 100644
index 0000000..fbc8ba9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-13-AndCmpCmp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep and
+; PR1907
+
+define i1 @test(i32 %c84.17) {
+ %tmp2696 = icmp ne i32 %c84.17, 34 ; <i1> [#uses=2]
+ %tmp2699 = icmp sgt i32 %c84.17, -1 ; <i1> [#uses=1]
+ %tmp2703 = and i1 %tmp2696, %tmp2699 ; <i1> [#uses=1]
+ ret i1 %tmp2703
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll
new file mode 100644
index 0000000..510a68c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-13-NoBitCastAttributes.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | grep bitcast | count 2
+
+define signext i32 @b(i32* inreg %x) {
+ ret i32 0
+}
+
+define void @c(...) {
+ ret void
+}
+
+define void @g(i32* %y) {
+ call i32 bitcast (i32 (i32*)* @b to i32 (i32)*)( i32 zeroext 0 ) ; <i32>:2 [#uses=0]
+ call void bitcast (void (...)* @c to void (i32*)*)( i32* sret null )
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-14-DoubleNest.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-14-DoubleNest.ll
new file mode 100644
index 0000000..6401dfd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-14-DoubleNest.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -disable-output
+
+ %struct.FRAME.nest = type { i32, i32 (i32*)* }
+ %struct.__builtin_trampoline = type { [10 x i8] }
+
+declare i8* @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+
+declare i32 @f(%struct.FRAME.nest* nest , i32*)
+
+define i32 @nest(i32 %n) {
+entry:
+ %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
+ %TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 %n, i32* %tmp3, align 8
+ %FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
+ %tramp = call i8* @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest*, i32*)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
+ %tmp7 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (i32*)**> [#uses=1]
+ %tmp89 = bitcast i8* %tramp to i32 (i32*)* ; <i32 (i32*)*> [#uses=2]
+ store i32 (i32*)* %tmp89, i32 (i32*)** %tmp7, align 8
+ %tmp2.i = call i32 %tmp89( i32* nest null ) ; <i32> [#uses=1]
+ ret i32 %tmp2.i
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-14-VarArgTrampoline.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-14-VarArgTrampoline.ll
new file mode 100644
index 0000000..aacea9d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-14-VarArgTrampoline.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -instcombine -S | grep zeroext
+
+ %struct.FRAME.nest = type { i32, i32 (...)* }
+ %struct.__builtin_trampoline = type { [10 x i8] }
+
+declare void @llvm.init.trampoline(i8*, i8*, i8*) nounwind
+declare i8* @llvm.adjust.trampoline(i8*) nounwind
+
+declare i32 @f(%struct.FRAME.nest* nest , ...)
+
+define i32 @nest(i32 %n) {
+entry:
+ %FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
+ %TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 %n, i32* %tmp3, align 8
+ %FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
+ call void @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest*, ...)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
+ %tramp = call i8* @llvm.adjust.trampoline( i8* %TRAMP.216.sub)
+ %tmp7 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (...)**> [#uses=1]
+ %tmp89 = bitcast i8* %tramp to i32 (...)* ; <i32 (...)*> [#uses=2]
+ store i32 (...)* %tmp89, i32 (...)** %tmp7, align 8
+ %tmp2.i = call i32 (...)* %tmp89( i32 zeroext 0 ) ; <i32> [#uses=1]
+ ret i32 %tmp2.i
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-21-MismatchedCastAndCompare.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-21-MismatchedCastAndCompare.ll
new file mode 100644
index 0000000..5ff23a3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-21-MismatchedCastAndCompare.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR1940
+
+define i1 @test1(i8 %A, i8 %B) {
+ %a = zext i8 %A to i32
+ %b = zext i8 %B to i32
+ %c = icmp sgt i32 %a, %b
+ ret i1 %c
+; CHECK: %c = icmp ugt i8 %A, %B
+; CHECK: ret i1 %c
+}
+
+define i1 @test2(i8 %A, i8 %B) {
+ %a = sext i8 %A to i32
+ %b = sext i8 %B to i32
+ %c = icmp ugt i32 %a, %b
+ ret i1 %c
+; CHECK: %c = icmp ugt i8 %A, %B
+; CHECK: ret i1 %c
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll
new file mode 100644
index 0000000..87c2b75
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-21-MulTrunc.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+define i16 @test1(i16 %a) {
+ %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
+ %tmp21 = lshr i32 %tmp, 8 ; <i32> [#uses=1]
+; CHECK: %tmp21 = lshr i16 %a, 8
+ %tmp5 = mul i32 %tmp, 5 ; <i32> [#uses=1]
+; CHECK: %tmp5 = mul i16 %a, 5
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5 ; <i32> [#uses=1]
+; CHECK: %tmp.upgrd.32 = or i16 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16 ; <i16> [#uses=1]
+ ret i16 %tmp.upgrd.3
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-27-FloatSelect.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-27-FloatSelect.ll
new file mode 100644
index 0000000..c161bcc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-27-FloatSelect.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep select
+
+define double @fold(i1 %a, double %b) {
+%s = select i1 %a, double 0., double 1.
+%c = fdiv double %b, %s
+ret double %c
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-01-29-AddICmp.ll b/src/LLVM/test/Transforms/InstCombine/2008-01-29-AddICmp.ll
new file mode 100644
index 0000000..28a94ce
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-01-29-AddICmp.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | not grep {a.off}
+; PR1949
+
+define i1 @test1(i32 %a) {
+ %a.off = add i32 %a, 4 ; <i32> [#uses=1]
+ %C = icmp ult i32 %a.off, 4 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test2(i32 %a) {
+ %a.off = sub i32 %a, 4 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %a.off, -5 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test3(i32 %a) {
+ %a.off = add i32 %a, 4 ; <i32> [#uses=1]
+ %C = icmp slt i32 %a.off, 2147483652 ; <i1> [#uses=1]
+ ret i1 %C
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-02-13-MulURem.ll b/src/LLVM/test/Transforms/InstCombine/2008-02-13-MulURem.ll
new file mode 100644
index 0000000..a88c510
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-02-13-MulURem.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep rem
+; PR1933
+
+define i32 @fold(i32 %a) {
+ %s = mul i32 %a, 3
+ %c = urem i32 %s, 3
+ ret i32 %c
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-02-16-SDivOverflow.ll b/src/LLVM/test/Transforms/InstCombine/2008-02-16-SDivOverflow.ll
new file mode 100644
index 0000000..af61c15
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-02-16-SDivOverflow.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {ret i.* 0} | count 2
+; PR2048
+
+define i32 @i(i32 %a) {
+ %tmp1 = sdiv i32 %a, -1431655765
+ %tmp2 = sdiv i32 %tmp1, 3
+ ret i32 %tmp2
+}
+
+define i8 @j(i8 %a) {
+ %tmp1 = sdiv i8 %a, 64
+ %tmp2 = sdiv i8 %tmp1, 3
+ ret i8 %tmp2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll b/src/LLVM/test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll
new file mode 100644
index 0000000..d26dec1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {sdiv i8 \%a, 9}
+; PR2048
+
+define i8 @i(i8 %a) {
+ %tmp1 = sdiv i8 %a, -3
+ %tmp2 = sdiv i8 %tmp1, -3
+ ret i8 %tmp2
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-02-23-MulSub.ll b/src/LLVM/test/Transforms/InstCombine/2008-02-23-MulSub.ll
new file mode 100644
index 0000000..bb21c4b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-02-23-MulSub.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep mul
+
+define i26 @test(i26 %a) nounwind {
+entry:
+ %_add = mul i26 %a, 2885 ; <i26> [#uses=1]
+ %_shl2 = mul i26 %a, 2884 ; <i26> [#uses=1]
+ %_sub = sub i26 %_add, %_shl2 ; <i26> [#uses=1]
+ ret i26 %_sub
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-02-28-OrFCmpCrash.ll b/src/LLVM/test/Transforms/InstCombine/2008-02-28-OrFCmpCrash.ll
new file mode 100644
index 0000000..7f8bd4f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-02-28-OrFCmpCrash.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; rdar://5771353
+
+define float @test(float %x, x86_fp80 %y) nounwind readonly {
+entry:
+ %tmp67 = fcmp uno x86_fp80 %y, 0xK00000000000000000000 ; <i1> [#uses=1]
+ %tmp71 = fcmp uno float %x, 0.000000e+00 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp67, %tmp71 ; <i1> [#uses=1]
+ br i1 %bothcond, label %bb74, label %bb80
+
+bb74: ; preds = %entry
+ ret float 0.000000e+00
+
+bb80: ; preds = %entry
+ ret float 0.000000e+00
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll b/src/LLVM/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
new file mode 100644
index 0000000..da7e49e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {16} | count 1
+
+define i8* @bork(i8** %qux) {
+ %tmp275 = load i8** %qux, align 1
+ %tmp275276 = ptrtoint i8* %tmp275 to i32
+ %tmp277 = add i32 %tmp275276, 16
+ %tmp277278 = inttoptr i32 %tmp277 to i8*
+ ret i8* %tmp277278
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll b/src/LLVM/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
new file mode 100644
index 0000000..aa38065
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll
@@ -0,0 +1,15 @@
+;; The bitcast cannot be eliminated because byval arguments need
+;; the correct type, or at least a type of the correct size.
+; RUN: opt < %s -instcombine -S | grep bitcast
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9"
+ %struct.NSRect = type { [4 x float] }
+
+define void @foo(i8* %context) nounwind {
+entry:
+ %tmp1 = bitcast i8* %context to %struct.NSRect* ; <%struct.NSRect*> [#uses=1]
+ call void (i32, ...)* @bar( i32 3, %struct.NSRect* byval align 4 %tmp1 ) nounwind
+ ret void
+}
+
+declare void @bar(i32, ...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll b/src/LLVM/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
new file mode 100644
index 0000000..6847f5e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-04-28-VolatileStore.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {store volatile}
+
+define void @test() {
+ %votf = alloca <4 x float> ; <<4 x float>*> [#uses=1]
+ volatile store <4 x float> zeroinitializer, <4 x float>* %votf, align 16
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll b/src/LLVM/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
new file mode 100644
index 0000000..a24f307
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+
+define i32 @main() nounwind {
+entry:
+ %tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
+ %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br label %bb
+
+bb: ; preds = %bb, %entry
+ %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1]
+ %tmp3.reg2mem.0 = phi i32 [ %tmp34, %entry ], [ %tmp3, %bb ] ; <i32> [#uses=1]
+ %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
+ volatile store i32 %tmp4, i32* @g_1, align 4
+ %tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
+ %tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
+ %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br i1 %tmp9, label %bb, label %bb11
+
+bb11: ; preds = %bb
+ ret i32 0
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll b/src/LLVM/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
new file mode 100644
index 0000000..5fb11ff
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
+; PR2262
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+
+define i32 @main(i32 %i) nounwind {
+entry:
+ %tmp93 = icmp slt i32 %i, 10 ; <i1> [#uses=0]
+ %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br i1 %tmp93, label %bb11, label %bb
+
+bb: ; preds = %bb, %entry
+ %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br label %bb11
+
+bb11: ; preds = %bb
+ %tmp4 = phi i32 [ %tmp34, %entry ], [ %tmp3, %bb ] ; <i32> [#uses=1]
+ ret i32 %tmp4
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-08-LiveStoreDelete.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-08-LiveStoreDelete.ll
new file mode 100644
index 0000000..bbd0042
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-08-LiveStoreDelete.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine -S | grep {store i8} | count 3
+; PR2297
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+
+define i32 @a() nounwind {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
+ %tmp3 = getelementptr i8* %tmp1, i32 1 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp3, align 1
+ %tmp5 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 1, i8* %tmp5, align 1
+ %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
+ %tmp9 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp9, align 1
+ %tmp11 = call i32 (...)* @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
+ ret i32 %tmp7
+}
+
+declare i8* @malloc(i32) nounwind
+
+declare i32 @strlen(i8*) nounwind readonly
+
+declare i32 @b(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-08-StrLenSink.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-08-StrLenSink.ll
new file mode 100644
index 0000000..1da2856
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-08-StrLenSink.ll
@@ -0,0 +1,32 @@
+; RUN: opt -S -instcombine %s | FileCheck %s
+; PR2297
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+
+define i32 @a() nounwind {
+entry:
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
+ %tmp3 = getelementptr i8* %tmp1, i32 1 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp3, align 1
+ %tmp5 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 1, i8* %tmp5, align 1
+; CHECK: store
+; CHECK: store
+; CHECK-NEXT: strlen
+; CHECK-NEXT: store
+ %tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
+ %tmp9 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp9, align 1
+ %tmp11 = call i32 (...)* @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret i32 %tmp7
+}
+
+declare i8* @malloc(i32) nounwind
+
+declare i32 @strlen(i8*) nounwind readonly
+
+declare i32 @b(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
new file mode 100644
index 0000000..f6eb248
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
@@ -0,0 +1,37 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR2303
+ %"struct.std::ctype<char>" = type { %"struct.std::locale::facet", i32*, i8, i32*, i32*, i16*, i8, [256 x i8], [256 x i8], i8 }
+ %"struct.std::locale::facet" = type { i32 (...)**, i32 }
+
+declare i32* @_ZNSt6locale5facet15_S_get_c_localeEv()
+
+declare i32** @__ctype_toupper_loc() readnone
+
+declare i32** @__ctype_tolower_loc() readnone
+
+define void @_ZNSt5ctypeIcEC2EPiPKtbm(%"struct.std::ctype<char>"* %this, i32* %unnamed_arg, i16* %__table, i8 zeroext %__del, i64 %__refs) {
+entry:
+ %tmp8 = invoke i32* @_ZNSt6locale5facet15_S_get_c_localeEv( )
+ to label %invcont unwind label %lpad ; <i32*> [#uses=0]
+
+invcont: ; preds = %entry
+ %tmp32 = invoke i32** @__ctype_toupper_loc( ) readnone
+ to label %invcont31 unwind label %lpad ; <i32**> [#uses=0]
+
+invcont31: ; preds = %invcont
+ %tmp38 = invoke i32** @__ctype_tolower_loc( ) readnone
+ to label %invcont37 unwind label %lpad ; <i32**> [#uses=1]
+
+invcont37: ; preds = %invcont31
+ %tmp39 = load i32** %tmp38, align 8 ; <i32*> [#uses=1]
+ %tmp41 = getelementptr %"struct.std::ctype<char>"* %this, i32 0, i32 4 ; <i32**> [#uses=1]
+ store i32* %tmp39, i32** %tmp41, align 8
+ ret void
+
+lpad: ; preds = %invcont31, %invcont, %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ unreachable
+}
+
+declare i32 @__gxx_personality_v0(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-17-InfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
new file mode 100644
index 0000000..2939a48
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR2339
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-s0:0:64-f80:32:32"
+target triple = "i686-pc-linux-gnu"
+
+declare void @BZALLOC(i32)
+
+define void @f(i32) {
+entry:
+ %blockSize100k = alloca i32 ; <i32*> [#uses=2]
+ store i32 %0, i32* %blockSize100k
+ %n = alloca i32 ; <i32*> [#uses=2]
+ load i32* %blockSize100k ; <i32>:1 [#uses=1]
+ store i32 %1, i32* %n
+ load i32* %n ; <i32>:2 [#uses=1]
+ add i32 %2, 2 ; <i32>:3 [#uses=1]
+ mul i32 %3, ptrtoint (i32* getelementptr (i32* null, i32 1) to i32) ; <i32>:4 [#uses=1]
+ call void @BZALLOC( i32 %4 )
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-18-FoldIntToPtr.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-18-FoldIntToPtr.ll
new file mode 100644
index 0000000..b34fc1e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-18-FoldIntToPtr.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false} | count 2
+; PR2329
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+
+define i1 @f1() {
+ ret i1 icmp eq (i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 2 to i8*))
+}
+
+define i1 @f2() {
+ ret i1 icmp eq (i8* inttoptr (i16 1 to i8*), i8* inttoptr (i16 2 to i8*))
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-22-IDivVector.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-22-IDivVector.ll
new file mode 100644
index 0000000..f7ba99c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-22-IDivVector.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -disable-output
+
+define <3 x i8> @f(<3 x i8> %i) {
+ %A = sdiv <3 x i8> %i, %i
+ ret <3 x i8> %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-22-NegValVector.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-22-NegValVector.ll
new file mode 100644
index 0000000..bf92faf
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-22-NegValVector.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep sub
+
+define <3 x i8> @f(<3 x i8> %a) {
+ %A = sub <3 x i8> zeroinitializer, %a
+ %B = mul <3 x i8> %A, <i8 5, i8 5, i8 5>
+ ret <3 x i8> %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-23-CompareFold.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
new file mode 100644
index 0000000..2de5af7
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2359
+define i1 @f(i8* %x) {
+entry:
+ %tmp462 = load i8* %x, align 1 ; <i8> [#uses=1]
+ %tmp462463 = sitofp i8 %tmp462 to float ; <float> [#uses=1]
+ %tmp464 = fcmp ugt float %tmp462463, 0x47EFFFFFE0000000 ; <i1>
+ ret i1 %tmp464
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-31-AddBool.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-31-AddBool.ll
new file mode 100644
index 0000000..5416693
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-31-AddBool.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {xor}
+; PR2389
+
+define i1 @test(i1 %a, i1 %b) {
+ %A = add i1 %a, %b
+ ret i1 %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-05-31-Bools.ll b/src/LLVM/test/Transforms/InstCombine/2008-05-31-Bools.ll
new file mode 100644
index 0000000..a0fe47a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-05-31-Bools.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: grep {xor} %t
+; RUN: grep {and} %t
+; RUN: not grep {div} %t
+
+define i1 @foo1(i1 %a, i1 %b) {
+ %A = sub i1 %a, %b
+ ret i1 %A
+}
+
+define i1 @foo2(i1 %a, i1 %b) {
+ %A = mul i1 %a, %b
+ ret i1 %A
+}
+
+define i1 @foo3(i1 %a, i1 %b) {
+ %A = udiv i1 %a, %b
+ ret i1 %A
+}
+
+define i1 @foo4(i1 %a, i1 %b) {
+ %A = sdiv i1 %a, %b
+ ret i1 %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-06-05-ashr-crash.ll b/src/LLVM/test/Transforms/InstCombine/2008-06-05-ashr-crash.ll
new file mode 100644
index 0000000..5e4a9d0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-06-05-ashr-crash.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine
+
+define i65 @foo(i65 %x) nounwind {
+entry:
+ %tmp2 = ashr i65 %x, 65 ; <i65> [#uses=1]
+ ret i65 %tmp2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-06-08-ICmpPHI.ll b/src/LLVM/test/Transforms/InstCombine/2008-06-08-ICmpPHI.ll
new file mode 100644
index 0000000..917d3ae
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-06-08-ICmpPHI.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -instcombine -S | grep {phi i32} | count 2
+
+define void @test() nounwind {
+entry:
+ br label %bb
+
+bb: ; preds = %bb16, %entry
+ %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %somebb ] ; <i32> [#uses=1]
+ %x.0 = phi i32 [ 37, %entry ], [ %tmp17, %somebb ] ; <i32> [#uses=1]
+ %tmp = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp1 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp2 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=1]
+ %tmp3 = icmp eq i32 %tmp2, 0 ; <i1> [#uses=1]
+ br i1 %tmp3, label %bb7, label %bb5
+
+bb5: ; preds = %bb
+ %tmp6 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ br label %bb7
+
+bb7: ; preds = %bb5, %bb
+ %tmp8 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp9 = tail call i32 (...)* @bork( ) nounwind ; <i32> [#uses=0]
+ %tmp11 = icmp eq i32 %x.0, 37 ; <i1> [#uses=1]
+ br i1 %tmp11, label %bb14, label %bb16
+
+bb14: ; preds = %bb7
+ %tmp15 = tail call i32 (...)* @bar( ) nounwind ; <i32> [#uses=0]
+ br label %bb16
+
+bb16: ; preds = %bb14, %bb7
+ %tmp17 = tail call i32 (...)* @zap( ) nounwind ; <i32> [#uses=1]
+ %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %indvar.next, 42 ; <i1> [#uses=1]
+ br i1 %exitcond, label %return, label %somebb
+
+somebb:
+ br label %bb
+
+return: ; preds = %bb16
+ ret void
+}
+
+declare i32 @bork(...)
+
+declare i32 @bar(...)
+
+declare i32 @zap(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-06-13-InfiniteLoopStore.ll b/src/LLVM/test/Transforms/InstCombine/2008-06-13-InfiniteLoopStore.ll
new file mode 100644
index 0000000..08959c9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-06-13-InfiniteLoopStore.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | grep {store i32} | count 2
+
+@g_139 = global i32 0 ; <i32*> [#uses=2]
+
+define void @func_56(i32 %p_60) nounwind {
+entry:
+ store i32 1, i32* @g_139, align 4
+ %tmp1 = icmp ne i32 %p_60, 0 ; <i1> [#uses=1]
+ %tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
+ %toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
+ br i1 %toBool, label %bb, label %return
+
+bb: ; preds = %bb, %entry
+ store i32 1, i32* @g_139, align 4
+ br label %bb
+
+return: ; preds = %entry
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-06-13-ReadOnlyCallStore.ll b/src/LLVM/test/Transforms/InstCombine/2008-06-13-ReadOnlyCallStore.ll
new file mode 100644
index 0000000..aed1b14
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-06-13-ReadOnlyCallStore.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | grep {store i8} | count 2
+
+define i32 @a(i8* %s) nounwind {
+entry:
+ store i8 0, i8* %s, align 1 ; This store cannot be eliminated!
+ %tmp3 = call i32 @strlen( i8* %s ) nounwind readonly
+ %tmp5 = icmp ne i32 %tmp3, 0
+ br i1 %tmp5, label %bb, label %bb8
+
+bb: ; preds = %entry
+ store i8 0, i8* %s, align 1
+ br label %bb8
+
+bb8:
+ ret i32 %tmp3
+}
+
+declare i32 @strlen(i8*) nounwind readonly
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll b/src/LLVM/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
new file mode 100644
index 0000000..05f1c52
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | grep load | count 3
+; PR2471
+
+declare i32 @x(i32*)
+define i32 @b(i32* %a, i32* %b) {
+entry:
+ %tmp1 = load i32* %a
+ %tmp3 = load i32* %b
+ %add = add i32 %tmp1, %tmp3
+ %call = call i32 @x( i32* %a )
+ %tobool = icmp ne i32 %add, 0
+ ; not safe to turn into an uncond load
+ %cond = select i1 %tobool, i32* %b, i32* %a
+ %tmp8 = load i32* %cond
+ ret i32 %tmp8
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-06-21-CompareMiscomp.ll b/src/LLVM/test/Transforms/InstCombine/2008-06-21-CompareMiscomp.ll
new file mode 100644
index 0000000..c3371c6
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-06-21-CompareMiscomp.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | grep {icmp eq i32 %In, 15}
+; PR2479
+; (See also PR1800.)
+
+define i1 @test(i32 %In) {
+ %c1 = icmp ugt i32 %In, 13
+ %c2 = icmp eq i32 %In, 15
+ %V = and i1 %c1, %c2
+ ret i1 %V
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-06-24-StackRestore.ll b/src/LLVM/test/Transforms/InstCombine/2008-06-24-StackRestore.ll
new file mode 100644
index 0000000..8307834
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-06-24-StackRestore.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -instcombine -S | grep {call.*llvm.stackrestore}
+; PR2488
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+@p = weak global i8* null ; <i8**> [#uses=2]
+
+define i32 @main() nounwind {
+entry:
+ %tmp248 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp2752 = alloca i32 ; <i32*> [#uses=2]
+ %tmpcast53 = bitcast i32* %tmp2752 to i8* ; <i8*> [#uses=1]
+ store i32 2, i32* %tmp2752, align 4
+ volatile store i8* %tmpcast53, i8** @p, align 4
+ br label %bb44
+
+bb: ; preds = %bb44
+ ret i32 0
+
+bb44: ; preds = %bb44, %entry
+ %indvar = phi i32 [ 0, %entry ], [ %tmp3857, %bb44 ] ; <i32> [#uses=1]
+ %tmp249 = phi i8* [ %tmp248, %entry ], [ %tmp2, %bb44 ] ; <i8*> [#uses=1]
+ %tmp3857 = add i32 %indvar, 1 ; <i32> [#uses=3]
+ call void @llvm.stackrestore( i8* %tmp249 )
+ %tmp2 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp4 = srem i32 %tmp3857, 1000 ; <i32> [#uses=2]
+ %tmp5 = add i32 %tmp4, 1 ; <i32> [#uses=1]
+ %tmp27 = alloca i32, i32 %tmp5 ; <i32*> [#uses=3]
+ %tmpcast = bitcast i32* %tmp27 to i8* ; <i8*> [#uses=1]
+ store i32 1, i32* %tmp27, align 4
+ %tmp34 = getelementptr i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1]
+ store i32 2, i32* %tmp34, align 4
+ volatile store i8* %tmpcast, i8** @p, align 4
+ %exitcond = icmp eq i32 %tmp3857, 999999 ; <i1> [#uses=1]
+ br i1 %exitcond, label %bb, label %bb44
+}
+
+declare i8* @llvm.stacksave() nounwind
+
+declare void @llvm.stackrestore(i8*) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-08-AndICmp.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-08-AndICmp.ll
new file mode 100644
index 0000000..a12f4bd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-08-AndICmp.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep icmp | count 1
+; PR2330
+
+define i1 @foo(i32 %a, i32 %b) nounwind {
+entry:
+ icmp ult i32 %a, 8 ; <i1>:0 [#uses=1]
+ icmp ult i32 %b, 8 ; <i1>:1 [#uses=1]
+ and i1 %1, %0 ; <i1>:2 [#uses=1]
+ ret i1 %2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-08-ShiftOneAndOne.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-08-ShiftOneAndOne.ll
new file mode 100644
index 0000000..8245b4d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-08-ShiftOneAndOne.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {icmp ne i32 \%a}
+; PR2330
+
+define i1 @foo(i32 %a) nounwind {
+entry:
+ %tmp15 = shl i32 1, %a ; <i32> [#uses=1]
+ %tmp237 = and i32 %tmp15, 1 ; <i32> [#uses=1]
+ %toBool = icmp eq i32 %tmp237, 0 ; <i1> [#uses=1]
+ ret i1 %toBool
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-08-SubAnd.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-08-SubAnd.ll
new file mode 100644
index 0000000..0091159
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-08-SubAnd.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep -v {i32 8}
+; PR2330
+
+define i32 @a(i32 %a) nounwind {
+entry:
+ %tmp2 = sub i32 8, %a ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp2, 7 ; <i32> [#uses=1]
+ ret i32 %tmp3
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
new file mode 100644
index 0000000..8104408
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
+; PR2496
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+@g_1 = internal global i32 0 ; <i32*> [#uses=3]
+
+define i32 @main() nounwind {
+entry:
+ %tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
+ %tmp34 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br label %bb
+
+bb: ; preds = %bb, %entry
+ %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; <i32> [#uses=1]
+ %tmp3.reg2mem.0 = phi i32 [ %tmp3, %bb ], [ %tmp34, %entry ]
+ %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; <i32> [#uses=1]
+ volatile store i32 %tmp4, i32* @g_1, align 4
+ %tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
+ %tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
+ %tmp3 = volatile load i32* @g_1, align 4 ; <i32> [#uses=1]
+ br i1 %tmp9, label %bb, label %bb11
+
+bb11: ; preds = %bb
+ ret i32 0
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-09-SubAndError.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-09-SubAndError.ll
new file mode 100644
index 0000000..47a7590
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-09-SubAndError.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep {sub i32 0}
+; PR2330
+
+define i32 @foo(i32 %a) nounwind {
+entry:
+ %A = sub i32 5, %a
+ %B = and i32 %A, 2
+ ret i32 %B
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-10-CastSextBool.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-10-CastSextBool.ll
new file mode 100644
index 0000000..e911532
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-10-CastSextBool.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | grep {%C = xor i1 %A, true}
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2539
+
+define i1 @test1(i1 %A) {
+ %B = zext i1 %A to i32
+ %C = icmp slt i32 %B, 1
+ ret i1 %C
+}
+
+
+define i1 @test2(i1 zeroext %b) {
+entry:
+ %cmptmp = icmp slt i1 %b, true ; <i1> [#uses=1]
+ ret i1 %cmptmp
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-10-ICmpBinOp.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-10-ICmpBinOp.ll
new file mode 100644
index 0000000..76e3039
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-10-ICmpBinOp.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | not grep add
+; RUN: opt < %s -instcombine -S | not grep mul
+; PR2330
+
+define i1 @f(i32 %x, i32 %y) nounwind {
+entry:
+ %A = add i32 %x, 5
+ %B = add i32 %y, 5
+ %C = icmp eq i32 %A, %B
+ ret i1 %C
+}
+
+define i1 @g(i32 %x, i32 %y) nounwind {
+entry:
+ %A = mul i32 %x, 5
+ %B = mul i32 %y, 5
+ %C = icmp eq i32 %A, %B
+ ret i1 %C
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-11-RemAnd.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-11-RemAnd.ll
new file mode 100644
index 0000000..bf53451
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-11-RemAnd.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep rem
+; PR2330
+
+define i32 @a(i32 %b) nounwind {
+entry:
+ srem i32 %b, 8 ; <i32>:0 [#uses=1]
+ and i32 %0, 1 ; <i32>:1 [#uses=1]
+ ret i32 %1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-13-DivZero.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-13-DivZero.ll
new file mode 100644
index 0000000..be1f8c2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-13-DivZero.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | grep {lshr.*3}
+; RUN: opt < %s -instcombine -S | grep {call .*%cond}
+; PR2506
+
+; We can simplify the operand of udiv to '8', but not the operand to the
+; call. If the callee never returns, we can't assume the div is reachable.
+define i32 @a(i32 %x, i32 %y) {
+entry:
+ %tobool = icmp ne i32 %y, 0 ; <i1> [#uses=1]
+ %cond = select i1 %tobool, i32 8, i32 0 ; <i32> [#uses=2]
+ %call = call i32 @b( i32 %cond ) ; <i32> [#uses=0]
+ %div = udiv i32 %x, %cond ; <i32> [#uses=1]
+ ret i32 %div
+}
+
+declare i32 @b(i32)
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-16-fsub.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-16-fsub.ll
new file mode 100644
index 0000000..672b4e9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-16-fsub.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep sub
+; PR2553
+
+define double @test(double %X) nounwind {
+ ; fsub of self can't be optimized away.
+ %Y = fsub double %X, %X
+ ret double %Y
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-07-16-sse2_storel_dq.ll b/src/LLVM/test/Transforms/InstCombine/2008-07-16-sse2_storel_dq.ll
new file mode 100644
index 0000000..501d8a6
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-07-16-sse2_storel_dq.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | not grep {store }
+; PR2296
+
+@G = common global double 0.000000e+00, align 16
+
+define void @x(<2 x i64> %y) nounwind {
+entry:
+ bitcast <2 x i64> %y to <4 x i32>
+ call void @llvm.x86.sse2.storel.dq( i8* bitcast (double* @G to i8*), <4 x i32> %0 ) nounwind
+ ret void
+}
+
+declare void @llvm.x86.sse2.storel.dq(i8*, <4 x i32>) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-08-05-And.ll b/src/LLVM/test/Transforms/InstCombine/2008-08-05-And.ll
new file mode 100644
index 0000000..9773c2d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-08-05-And.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -S | not grep or
+; PR2629
+
+define void @f(i8* %x) nounwind {
+entry:
+ br label %bb
+
+bb:
+ %g1 = getelementptr i8* %x, i32 0
+ %l1 = load i8* %g1, align 1
+ %s1 = sub i8 %l1, 6
+ %c1 = icmp ugt i8 %s1, 2
+ %s2 = sub i8 %l1, 10
+ %c2 = icmp ugt i8 %s2, 2
+ %a1 = and i1 %c1, %c2
+ br i1 %a1, label %incompatible, label %okay
+
+okay:
+ ret void
+
+incompatible:
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll b/src/LLVM/test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll
new file mode 100644
index 0000000..e9081f0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -instcombine -S | not grep xor
+
+define i1 @test1(i8 %x, i8 %y) {
+ %X = xor i8 %x, 128
+ %Y = xor i8 %y, 128
+ %tmp = icmp slt i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test2(i8 %x, i8 %y) {
+ %X = xor i8 %x, 128
+ %Y = xor i8 %y, 128
+ %tmp = icmp ult i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test3(i8 %x) {
+ %X = xor i8 %x, 128
+ %tmp = icmp uge i8 %X, 15
+ ret i1 %tmp
+}
+
+define i1 @test4(i8 %x, i8 %y) {
+ %X = xor i8 %x, 127
+ %Y = xor i8 %y, 127
+ %tmp = icmp slt i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test5(i8 %x, i8 %y) {
+ %X = xor i8 %x, 127
+ %Y = xor i8 %y, 127
+ %tmp = icmp ult i8 %X, %Y
+ ret i1 %tmp
+}
+
+define i1 @test6(i8 %x) {
+ %X = xor i8 %x, 127
+ %tmp = icmp uge i8 %X, 15
+ ret i1 %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-09-02-VectorCrash.ll b/src/LLVM/test/Transforms/InstCombine/2008-09-02-VectorCrash.ll
new file mode 100644
index 0000000..7c50141
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-09-02-VectorCrash.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine
+
+define void @entry(i32 %m_task_id, i32 %start_x, i32 %end_x, i32 %start_y, i32 %end_y) {
+ br label %1
+
+; <label>:1 ; preds = %4, %0
+ %2 = icmp slt i32 0, %end_y ; <i1> [#uses=1]
+ br i1 %2, label %4, label %3
+
+; <label>:3 ; preds = %1
+ ret void
+
+; <label>:4 ; preds = %6, %1
+ %5 = icmp slt i32 0, %end_x ; <i1> [#uses=1]
+ br i1 %5, label %6, label %1
+
+; <label>:6 ; preds = %4
+ %7 = srem <2 x i32> zeroinitializer, zeroinitializer ; <<2 x i32>> [#uses=1]
+ %8 = extractelement <2 x i32> %7, i32 1 ; <i32> [#uses=1]
+ %9 = select i1 false, i32 0, i32 %8 ; <i32> [#uses=1]
+ %10 = insertelement <2 x i32> zeroinitializer, i32 %9, i32 1 ; <<2 x i32>> [#uses=1]
+ %11 = extractelement <2 x i32> %10, i32 1 ; <i32> [#uses=1]
+ %12 = insertelement <4 x i32> zeroinitializer, i32 %11, i32 3 ; <<4 x i32>> [#uses=1]
+ %13 = sitofp <4 x i32> %12 to <4 x float> ; <<4 x float>> [#uses=1]
+ store <4 x float> %13, <4 x float>* null
+ br label %4
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll b/src/LLVM/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll
new file mode 100644
index 0000000..31ea94a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-09-29-FoldingOr.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {or i1}
+; PR2844
+
+define i32 @test(i32 %p_74) {
+ %A = icmp eq i32 %p_74, 0 ; <i1> [#uses=1]
+ %B = icmp slt i32 %p_74, -638208501 ; <i1> [#uses=1]
+ %or.cond = or i1 %A, %B ; <i1> [#uses=1]
+ %iftmp.10.0 = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %iftmp.10.0
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll b/src/LLVM/test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll
new file mode 100644
index 0000000..fd36d86
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2697
+
+define i1 @x(i32 %x) nounwind {
+ %div = sdiv i32 %x, 65536 ; <i32> [#uses=1]
+ %cmp = icmp slt i32 %div, -65536
+ ret i1 %cmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-10-23-ConstFoldWithoutMask.ll b/src/LLVM/test/Transforms/InstCombine/2008-10-23-ConstFoldWithoutMask.ll
new file mode 100644
index 0000000..d70d052
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-10-23-ConstFoldWithoutMask.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine
+; PR2940
+
+define i32 @tstid() {
+ %var0 = inttoptr i32 1 to i8* ; <i8*> [#uses=1]
+ %var2 = ptrtoint i8* %var0 to i32 ; <i32> [#uses=1]
+ ret i32 %var2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-11-01-SRemDemandedBits.ll b/src/LLVM/test/Transforms/InstCombine/2008-11-01-SRemDemandedBits.ll
new file mode 100644
index 0000000..aa077e2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-11-01-SRemDemandedBits.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 true}
+; PR2993
+
+define i1 @foo(i32 %x) {
+ %1 = srem i32 %x, -1
+ %2 = icmp eq i32 %1, 0
+ ret i1 %2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-11-08-FCmp.ll b/src/LLVM/test/Transforms/InstCombine/2008-11-08-FCmp.ll
new file mode 100644
index 0000000..c636288
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-11-08-FCmp.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR3021
+
+; When inst combining an FCMP with the LHS coming from a uitofp instruction, we
+; can't lower it to signed ICMP instructions.
+
+define i1 @test1(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp ole double %1, 0.000000e+00
+; CHECK: icmp eq i32 %val, 0
+ ret i1 %2
+}
+
+define i1 @test2(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp olt double %1, 0.000000e+00
+ ret i1 %2
+; CHECK: ret i1 false
+}
+
+define i1 @test3(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp oge double %1, 0.000000e+00
+ ret i1 %2
+; CHECK: ret i1 true
+}
+
+define i1 @test4(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp ogt double %1, 0.000000e+00
+; CHECK: icmp ne i32 %val, 0
+ ret i1 %2
+}
+
+define i1 @test5(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp ogt double %1, -4.400000e+00
+ ret i1 %2
+; CHECK: ret i1 true
+}
+
+define i1 @test6(i32 %val) {
+ %1 = uitofp i32 %val to double
+ %2 = fcmp olt double %1, -4.400000e+00
+ ret i1 %2
+; CHECK: ret i1 false
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll b/src/LLVM/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll
new file mode 100644
index 0000000..43af190
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-11-20-DivMulRem.ll
@@ -0,0 +1,67 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR3103
+
+define i8 @test1(i8 %x, i8 %y) {
+; CHECK: @test1
+ %A = udiv i8 %x, %y
+; CHECK-NEXT: urem
+ %B = mul i8 %A, %y
+ %C = sub i8 %x, %B
+ ret i8 %C
+; CHECK-NEXT: ret
+}
+
+define i8 @test2(i8 %x, i8 %y) {
+; CHECK: @test2
+ %A = sdiv i8 %x, %y
+; CHECK-NEXT: srem
+ %B = mul i8 %A, %y
+ %C = sub i8 %x, %B
+ ret i8 %C
+; CHECK-NEXT: ret
+}
+
+define i8 @test3(i8 %x, i8 %y) {
+; CHECK: @test3
+ %A = udiv i8 %x, %y
+; CHECK-NEXT: urem
+ %B = mul i8 %A, %y
+ %C = sub i8 %B, %x
+; CHECK-NEXT: sub
+ ret i8 %C
+; CHECK-NEXT: ret
+}
+
+define i8 @test4(i8 %x) {
+; CHECK: @test4
+ %A = udiv i8 %x, 3
+; CHECK-NEXT: urem
+ %B = mul i8 %A, -3
+; CHECK-NEXT: sub
+ %C = sub i8 %x, %B
+; CHECK-NEXT: add
+ ret i8 %C
+; CHECK-NEXT: ret
+}
+
+define i32 @test5(i32 %x, i32 %y) {
+; CHECK: @test5
+; (((X / Y) * Y) / Y) -> X / Y
+ %div = sdiv i32 %x, %y
+; CHECK-NEXT: sdiv
+ %mul = mul i32 %div, %y
+ %r = sdiv i32 %mul, %y
+ ret i32 %r
+; CHECK-NEXT: ret
+}
+
+define i32 @test6(i32 %x, i32 %y) {
+; CHECK: @test6
+; (((X / Y) * Y) / Y) -> X / Y
+ %div = udiv i32 %x, %y
+; CHECK-NEXT: udiv
+ %mul = mul i32 %div, %y
+ %r = udiv i32 %mul, %y
+ ret i32 %r
+; CHECK-NEXT: ret
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-11-27-IDivVector.ll b/src/LLVM/test/Transforms/InstCombine/2008-11-27-IDivVector.ll
new file mode 100644
index 0000000..318a80c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-11-27-IDivVector.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | not grep div
+
+define <2 x i8> @f(<2 x i8> %x) {
+ %A = udiv <2 x i8> %x, <i8 1, i8 1>
+ ret <2 x i8> %A
+}
+
+define <2 x i8> @g(<2 x i8> %x) {
+ %A = sdiv <2 x i8> %x, <i8 1, i8 1>
+ ret <2 x i8> %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-11-27-MultiplyIntVec.ll b/src/LLVM/test/Transforms/InstCombine/2008-11-27-MultiplyIntVec.ll
new file mode 100644
index 0000000..d8c53fa
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-11-27-MultiplyIntVec.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | not grep mul
+
+define <2 x i8> @f(<2 x i8> %x) {
+ %A = mul <2 x i8> %x, <i8 1, i8 1>
+ ret <2 x i8> %A
+}
+
+define <2 x i8> @g(<2 x i8> %x) {
+ %A = mul <2 x i8> %x, <i8 -1, i8 -1>
+ ret <2 x i8> %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-11-27-UDivNegative.ll b/src/LLVM/test/Transforms/InstCombine/2008-11-27-UDivNegative.ll
new file mode 100644
index 0000000..fc90bba
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-11-27-UDivNegative.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -S | not grep div
+
+define i8 @test(i8 %x) readnone nounwind {
+ %A = udiv i8 %x, 250
+ ret i8 %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2008-12-17-SRemNegConstVec.ll b/src/LLVM/test/Transforms/InstCombine/2008-12-17-SRemNegConstVec.ll
new file mode 100644
index 0000000..e4c7ebc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2008-12-17-SRemNegConstVec.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {i8 2, i8 2}
+; PR2756
+
+define <2 x i8> @foo(<2 x i8> %x) {
+ %A = srem <2 x i8> %x, <i8 2, i8 -2>
+ ret <2 x i8> %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-05-i128-crash.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-05-i128-crash.ll
new file mode 100644
index 0000000..d355e0a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-05-i128-crash.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3235
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define hidden i128 @"\01_gfortrani_max_value"(i32 %length, i32 %signed_flag) nounwind {
+entry:
+ switch i32 %length, label %bb13 [
+ i32 1, label %bb17
+ i32 4, label %bb9
+ i32 8, label %bb5
+ ]
+
+bb5: ; preds = %entry
+ %0 = icmp eq i32 %signed_flag, 0 ; <i1> [#uses=1]
+ %iftmp.28.0 = select i1 %0, i128 18446744073709551615, i128 9223372036854775807 ; <i128> [#uses=1]
+ ret i128 %iftmp.28.0
+
+bb9: ; preds = %entry
+ ret i128 0
+
+bb13: ; preds = %entry
+ ret i128 0
+
+bb17: ; preds = %entry
+ ret i128 0
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
new file mode 100644
index 0000000..a61a94e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: grep {, align 4} %t | count 3
+; RUN: grep {, align 8} %t | count 3
+; rdar://6480438
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+ %struct.Key = type { { i32, i32 } }
+ %struct.anon = type <{ i8, [3 x i8], i32 }>
+
+define i32 @bar(i64 %key_token2) nounwind {
+entry:
+ %iospec = alloca %struct.Key ; <%struct.Key*> [#uses=3]
+ %ret = alloca i32 ; <i32*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %0 = getelementptr %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=2]
+ %1 = getelementptr { i32, i32 }* %0, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 0, i32* %1, align 4
+ %2 = getelementptr { i32, i32 }* %0, i32 0, i32 1 ; <i32*> [#uses=1]
+ store i32 0, i32* %2, align 4
+ %3 = getelementptr %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=1]
+ %4 = bitcast { i32, i32 }* %3 to i64* ; <i64*> [#uses=1]
+ store i64 %key_token2, i64* %4, align 4
+ %5 = call i32 (...)* @foo(%struct.Key* byval align 4 %iospec, i32* %ret) nounwind ; <i32> [#uses=0]
+ %6 = load i32* %ret, align 4 ; <i32> [#uses=1]
+ ret i32 %6
+}
+
+declare i32 @foo(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-16-PointerAddrSpace.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-16-PointerAddrSpace.ll
new file mode 100644
index 0000000..ce62f35
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-16-PointerAddrSpace.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | grep {store.*addrspace(1)}
+; PR3335
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+
+define i32 @test(i32* %P) nounwind {
+entry:
+ %Q = bitcast i32* %P to i32 addrspace(1)*
+ store i32 0, i32 addrspace(1)* %Q, align 4
+ ret i32 0
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
new file mode 100644
index 0000000..1421347
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
@@ -0,0 +1,315 @@
+; RUN: opt < %s -simplifycfg -instcombine -S | grep 0x7FF8000000000000 | count 12
+; RUN: opt < %s -simplifycfg -instcombine -S | grep {0\\.0} | count 3
+; RUN: opt < %s -simplifycfg -instcombine -S | grep {3\\.5} | count 1
+;
+
+; ModuleID = 'apf.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+@"\01LC" = internal constant [4 x i8] c"%f\0A\00" ; <[4 x i8]*> [#uses=1]
+
+define void @foo1() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+declare i32 @printf(i8*, ...) nounwind
+
+define void @foo2() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo3() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo4() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF0000000000000, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo5() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo6() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo7() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo8() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0x7FF8000000000000, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo9() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo10() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo11() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo12() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 0.000000e+00, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo13() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 0x7FF8000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo14() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 0x7FF0000000000000, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo15() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 0.000000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
+
+define void @foo16() nounwind {
+entry:
+ %y = alloca float ; <float*> [#uses=2]
+ %x = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store float 3.500000e+00, float* %x, align 4
+ store float 3.500000e+00, float* %y, align 4
+ %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %1 = fpext float %0 to double ; <double> [#uses=1]
+ %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %3 = fpext float %2 to double ; <double> [#uses=1]
+ %4 = frem double %3, %1 ; <double> [#uses=1]
+ %5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
new file mode 100644
index 0000000..6bc7ce3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
@@ -0,0 +1,75 @@
+; RUN: opt < %s -simplifycfg -instcombine -S | grep 0x3FB99999A0000000 | count 2
+; RUN: opt < %s -simplifycfg -instcombine -S | grep 0xBFB99999A0000000 | count 2
+; check constant folding for 'frem'. PR 3316.
+
+; ModuleID = 'tt.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+
+define float @test1() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double 1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
+
+define float @test2() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double -1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
+
+define float @test3() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double 1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
+
+define float @test4() nounwind {
+entry:
+ %retval = alloca float ; <float*> [#uses=2]
+ %0 = alloca float ; <float*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %1 = frem double -1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
+ %2 = fptrunc double %1 to float ; <float> [#uses=1]
+ store float %2, float* %0, align 4
+ %3 = load float* %0, align 4 ; <float> [#uses=1]
+ store float %3, float* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load float* %retval ; <float> [#uses=1]
+ ret float %retval1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-24-EmptyStruct.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-24-EmptyStruct.ll
new file mode 100644
index 0000000..4b64b48
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-24-EmptyStruct.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine
+; PR3381
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+ %struct.atomic_t = type { i32 }
+ %struct.inode = type { i32, %struct.mutex }
+ %struct.list_head = type { %struct.list_head*, %struct.list_head* }
+ %struct.lock_class_key = type { }
+ %struct.mutex = type { %struct.atomic_t, %struct.rwlock_t, %struct.list_head }
+ %struct.rwlock_t = type { %struct.lock_class_key }
+
+define void @handle_event(%struct.inode* %bar) nounwind {
+entry:
+ %0 = getelementptr %struct.inode* %bar, i64 -1, i32 1, i32 1 ; <%struct.rwlock_t*> [#uses=1]
+ %1 = bitcast %struct.rwlock_t* %0 to i32* ; <i32*> [#uses=1]
+ store i32 1, i32* %1, align 4
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-31-InfIterate.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-31-InfIterate.ll
new file mode 100644
index 0000000..815c1a9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-31-InfIterate.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3452
+define i128 @test(i64 %A, i64 %B, i1 %C, i128 %Z, i128 %Y, i64* %P, i64* %Q) {
+entry:
+ %tmp2 = trunc i128 %Z to i64
+ %tmp4 = trunc i128 %Y to i64
+ store i64 %tmp2, i64* %P
+ store i64 %tmp4, i64* %Q
+ %x = sub i64 %tmp2, %tmp4
+ %c = sub i64 %tmp2, %tmp4
+ %tmp137 = zext i1 %C to i64
+ %tmp138 = sub i64 %c, %tmp137
+ br label %T
+
+T:
+ %G = phi i64 [%tmp138, %entry], [%tmp2, %Fal]
+ %F = zext i64 %G to i128
+ ret i128 %F
+
+Fal:
+ br label %T
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-01-31-Pressure.ll b/src/LLVM/test/Transforms/InstCombine/2009-01-31-Pressure.ll
new file mode 100644
index 0000000..c3ee9a3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-01-31-Pressure.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | grep {%B = add i8 %b, %x}
+; PR2698
+
+declare void @use1(i1)
+declare void @use8(i8)
+
+define void @test1(i8 %a, i8 %b, i8 %x) {
+ %A = add i8 %a, %x
+ %B = add i8 %b, %x
+ %C = icmp eq i8 %A, %B
+ call void @use1(i1 %C)
+ ret void
+}
+
+define void @test2(i8 %a, i8 %b, i8 %x) {
+ %A = add i8 %a, %x
+ %B = add i8 %b, %x
+ %C = icmp eq i8 %A, %B
+ call void @use1(i1 %C)
+ call void @use8(i8 %A)
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-02-04-FPBitcast.ll b/src/LLVM/test/Transforms/InstCombine/2009-02-04-FPBitcast.ll
new file mode 100644
index 0000000..bc6a204
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-02-04-FPBitcast.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine
+; PR3468
+
+define x86_fp80 @cast() {
+ %tmp = bitcast i80 0 to x86_fp80 ; <x86_fp80> [#uses=1]
+ ret x86_fp80 %tmp
+}
+
+define i80 @invcast() {
+ %tmp = bitcast x86_fp80 0xK00000000000000000000 to i80 ; <i80> [#uses=1]
+ ret i80 %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll b/src/LLVM/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
new file mode 100644
index 0000000..a51c47d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
@@ -0,0 +1,279 @@
+; RUN: opt < %s -instcombine -scalarrepl -S | not grep { = alloca}
+; rdar://6417724
+; Instcombine shouldn't do anything to this function that prevents promoting the allocas inside it.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+
+%"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >" = type { i32* }
+%"struct.std::_Vector_base<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" }
+%"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" = type { i32*, i32*, i32* }
+%"struct.std::bidirectional_iterator_tag" = type <{ i8 }>
+%"struct.std::forward_iterator_tag" = type <{ i8 }>
+%"struct.std::input_iterator_tag" = type <{ i8 }>
+%"struct.std::random_access_iterator_tag" = type <{ i8 }>
+%"struct.std::vector<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >" }
+
+define i32* @_Z3fooRSt6vectorIiSaIiEE(%"struct.std::vector<int,std::allocator<int> >"* %X) {
+entry:
+ %0 = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"
+ %__first_addr.i.i = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"
+ %__last_addr.i.i = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"
+ %unnamed_arg.i = alloca %"struct.std::bidirectional_iterator_tag", align 8
+ %1 = alloca %"struct.std::bidirectional_iterator_tag"
+ %__first_addr.i = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"
+ %2 = alloca %"struct.std::bidirectional_iterator_tag"
+ %3 = alloca %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"
+ %4 = alloca i32
+ %"alloca point" = bitcast i32 0 to i32
+ store i32 42, i32* %4, align 4
+ %5 = getelementptr %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
+ %6 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >"* %5, i32 0, i32 0
+ %7 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %6, i32 0, i32 1
+ %8 = load i32** %7, align 4
+ %9 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
+ store i32* %8, i32** %9, align 4
+ %10 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
+ %11 = load i32** %10, align 4
+ %tmp2.i = ptrtoint i32* %11 to i32
+ %tmp1.i = inttoptr i32 %tmp2.i to i32*
+ %tmp3 = ptrtoint i32* %tmp1.i to i32
+ %tmp2 = inttoptr i32 %tmp3 to i32*
+ %12 = getelementptr %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
+ %13 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >"* %12, i32 0, i32 0
+ %14 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %13, i32 0, i32 0
+ %15 = load i32** %14, align 4
+ %16 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
+ store i32* %15, i32** %16, align 4
+ %17 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
+ %18 = load i32** %17, align 4
+ %tmp2.i17 = ptrtoint i32* %18 to i32
+ %tmp1.i18 = inttoptr i32 %tmp2.i17 to i32*
+ %tmp8 = ptrtoint i32* %tmp1.i18 to i32
+ %tmp6 = inttoptr i32 %tmp8 to i32*
+ %19 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
+ store i32* %tmp6, i32** %19
+ %20 = getelementptr %"struct.std::bidirectional_iterator_tag"* %1, i32 0, i32 0
+ %21 = load i8* %20, align 1
+ %22 = or i8 %21, 0
+ %23 = or i8 %22, 0
+ %24 = or i8 %23, 0
+ %25 = getelementptr %"struct.std::bidirectional_iterator_tag"* %2, i32 0, i32 0
+ store i8 0, i8* %25, align 1
+ %elt.i = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
+ %val.i = load i32** %elt.i
+ %tmp.i = bitcast %"struct.std::bidirectional_iterator_tag"* %unnamed_arg.i to i8*
+ %tmp9.i = bitcast %"struct.std::bidirectional_iterator_tag"* %2 to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp.i, i8* %tmp9.i, i64 1, i32 1, i1 false)
+ %26 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %val.i, i32** %26
+ %27 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ store i32* %tmp2, i32** %27
+ %28 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ %29 = load i32** %28, align 4
+ %30 = ptrtoint i32* %29 to i32
+ %31 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %32 = load i32** %31, align 4
+ %33 = ptrtoint i32* %32 to i32
+ %34 = sub i32 %30, %33
+ %35 = ashr i32 %34, 2
+ %36 = ashr i32 %35, 2
+ br label %bb12.i.i
+
+bb.i.i: ; preds = %bb12.i.i
+ %37 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %38 = load i32** %37, align 4
+ %39 = load i32* %38, align 4
+ %40 = load i32* %4, align 4
+ %41 = icmp eq i32 %39, %40
+ %42 = zext i1 %41 to i8
+ %toBool.i.i = icmp ne i8 %42, 0
+ br i1 %toBool.i.i, label %bb1.i.i, label %bb2.i.i
+
+bb1.i.i: ; preds = %bb.i.i
+ %43 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %44 = load i32** %43, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb2.i.i: ; preds = %bb.i.i
+ %45 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %46 = load i32** %45, align 4
+ %47 = getelementptr i32* %46, i64 1
+ %48 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %47, i32** %48, align 4
+ %49 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %50 = load i32** %49, align 4
+ %51 = load i32* %50, align 4
+ %52 = load i32* %4, align 4
+ %53 = icmp eq i32 %51, %52
+ %54 = zext i1 %53 to i8
+ %toBool3.i.i = icmp ne i8 %54, 0
+ br i1 %toBool3.i.i, label %bb4.i.i, label %bb5.i.i
+
+bb4.i.i: ; preds = %bb2.i.i
+ %55 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %56 = load i32** %55, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb5.i.i: ; preds = %bb2.i.i
+ %57 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %58 = load i32** %57, align 4
+ %59 = getelementptr i32* %58, i64 1
+ %60 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %59, i32** %60, align 4
+ %61 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %62 = load i32** %61, align 4
+ %63 = load i32* %62, align 4
+ %64 = load i32* %4, align 4
+ %65 = icmp eq i32 %63, %64
+ %66 = zext i1 %65 to i8
+ %toBool6.i.i = icmp ne i8 %66, 0
+ br i1 %toBool6.i.i, label %bb7.i.i, label %bb8.i.i
+
+bb7.i.i: ; preds = %bb5.i.i
+ %67 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %68 = load i32** %67, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb8.i.i: ; preds = %bb5.i.i
+ %69 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %70 = load i32** %69, align 4
+ %71 = getelementptr i32* %70, i64 1
+ %72 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %71, i32** %72, align 4
+ %73 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %74 = load i32** %73, align 4
+ %75 = load i32* %74, align 4
+ %76 = load i32* %4, align 4
+ %77 = icmp eq i32 %75, %76
+ %78 = zext i1 %77 to i8
+ %toBool9.i.i = icmp ne i8 %78, 0
+ br i1 %toBool9.i.i, label %bb10.i.i, label %bb11.i.i
+
+bb10.i.i: ; preds = %bb8.i.i
+ %79 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %80 = load i32** %79, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb11.i.i: ; preds = %bb8.i.i
+ %81 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %82 = load i32** %81, align 4
+ %83 = getelementptr i32* %82, i64 1
+ %84 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %83, i32** %84, align 4
+ %85 = sub i32 %__trip_count.0.i.i, 1
+ br label %bb12.i.i
+
+bb12.i.i: ; preds = %bb11.i.i, %entry
+ %__trip_count.0.i.i = phi i32 [ %36, %entry ], [ %85, %bb11.i.i ]
+ %86 = icmp sgt i32 %__trip_count.0.i.i, 0
+ br i1 %86, label %bb.i.i, label %bb13.i.i
+
+bb13.i.i: ; preds = %bb12.i.i
+ %87 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ %88 = load i32** %87, align 4
+ %89 = ptrtoint i32* %88 to i32
+ %90 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %91 = load i32** %90, align 4
+ %92 = ptrtoint i32* %91 to i32
+ %93 = sub i32 %89, %92
+ %94 = ashr i32 %93, 2
+ switch i32 %94, label %bb26.i.i [
+ i32 1, label %bb22.i.i
+ i32 2, label %bb18.i.i
+ i32 3, label %bb14.i.i
+ ]
+
+bb14.i.i: ; preds = %bb13.i.i
+ %95 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %96 = load i32** %95, align 4
+ %97 = load i32* %96, align 4
+ %98 = load i32* %4, align 4
+ %99 = icmp eq i32 %97, %98
+ %100 = zext i1 %99 to i8
+ %toBool15.i.i = icmp ne i8 %100, 0
+ br i1 %toBool15.i.i, label %bb16.i.i, label %bb17.i.i
+
+bb16.i.i: ; preds = %bb14.i.i
+ %101 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %102 = load i32** %101, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb17.i.i: ; preds = %bb14.i.i
+ %103 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %104 = load i32** %103, align 4
+ %105 = getelementptr i32* %104, i64 1
+ %106 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %105, i32** %106, align 4
+ br label %bb18.i.i
+
+bb18.i.i: ; preds = %bb17.i.i, %bb13.i.i
+ %107 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %108 = load i32** %107, align 4
+ %109 = load i32* %108, align 4
+ %110 = load i32* %4, align 4
+ %111 = icmp eq i32 %109, %110
+ %112 = zext i1 %111 to i8
+ %toBool19.i.i = icmp ne i8 %112, 0
+ br i1 %toBool19.i.i, label %bb20.i.i, label %bb21.i.i
+
+bb20.i.i: ; preds = %bb18.i.i
+ %113 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %114 = load i32** %113, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb21.i.i: ; preds = %bb18.i.i
+ %115 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %116 = load i32** %115, align 4
+ %117 = getelementptr i32* %116, i64 1
+ %118 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %117, i32** %118, align 4
+ br label %bb22.i.i
+
+bb22.i.i: ; preds = %bb21.i.i, %bb13.i.i
+ %119 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %120 = load i32** %119, align 4
+ %121 = load i32* %120, align 4
+ %122 = load i32* %4, align 4
+ %123 = icmp eq i32 %121, %122
+ %124 = zext i1 %123 to i8
+ %toBool23.i.i = icmp ne i8 %124, 0
+ br i1 %toBool23.i.i, label %bb24.i.i, label %bb25.i.i
+
+bb24.i.i: ; preds = %bb22.i.i
+ %125 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %126 = load i32** %125, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+bb25.i.i: ; preds = %bb22.i.i
+ %127 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %128 = load i32** %127, align 4
+ %129 = getelementptr i32* %128, i64 1
+ %130 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ store i32* %129, i32** %130, align 4
+ br label %bb26.i.i
+
+bb26.i.i: ; preds = %bb25.i.i, %bb13.i.i
+ %131 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ %132 = load i32** %131, align 4
+ br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+
+_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit: ; preds = %bb26.i.i, %bb24.i.i, %bb20.i.i, %bb16.i.i, %bb10.i.i, %bb7.i.i, %bb4.i.i, %bb1.i.i
+ %.0.0.i.i = phi i32* [ %132, %bb26.i.i ], [ %126, %bb24.i.i ], [ %114, %bb20.i.i ], [ %102, %bb16.i.i ], [ %80, %bb10.i.i ], [ %68, %bb7.i.i ], [ %56, %bb4.i.i ], [ %44, %bb1.i.i ]
+ %tmp2.i.i = ptrtoint i32* %.0.0.i.i to i32
+ %tmp1.i.i = inttoptr i32 %tmp2.i.i to i32*
+ %tmp4.i = ptrtoint i32* %tmp1.i.i to i32
+ %tmp3.i = inttoptr i32 %tmp4.i to i32*
+ %tmp8.i = ptrtoint i32* %tmp3.i to i32
+ %tmp6.i = inttoptr i32 %tmp8.i to i32*
+ %tmp12 = ptrtoint i32* %tmp6.i to i32
+ %tmp10 = inttoptr i32 %tmp12 to i32*
+ %tmp16 = ptrtoint i32* %tmp10 to i32
+ br label %return
+
+return: ; preds = %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
+ %tmp14 = inttoptr i32 %tmp16 to i32*
+ ret i32* %tmp14
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-02-21-LoadCST.ll b/src/LLVM/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
new file mode 100644
index 0000000..f56fc38
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 3679669}
+; PR3595
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+
+@.str1 = internal constant [4 x i8] c"\B5%8\00"
+
+define i32 @test() {
+ %rhsv = load i32* bitcast ([4 x i8]* @.str1 to i32*), align 1
+ ret i32 %rhsv
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll b/src/LLVM/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
new file mode 100644
index 0000000..a8349f0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3667
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
+target triple = "i386-pc-linux-gnu"
+
+define void @_ada_c32001b(i32 %tmp5) {
+entry:
+ %max289 = select i1 false, i32 %tmp5, i32 0 ; <i32> [#uses=1]
+ %tmp6 = mul i32 %max289, 4 ; <i32> [#uses=1]
+ %tmp7 = alloca i8, i32 0 ; <i8*> [#uses=1]
+ %tmp8 = bitcast i8* %tmp7 to [0 x [0 x i32]]* ; <[0 x [0 x i32]]*> [#uses=1]
+ %tmp11 = load i32* null, align 1 ; <i32> [#uses=1]
+ %tmp12 = icmp eq i32 %tmp11, 3 ; <i1> [#uses=1]
+ %tmp13 = zext i1 %tmp12 to i8 ; <i8> [#uses=1]
+ %tmp14 = ashr i32 %tmp6, 2 ; <i32> [#uses=1]
+ %tmp15 = bitcast [0 x [0 x i32]]* %tmp8 to i8* ; <i8*> [#uses=1]
+ %tmp16 = mul i32 %tmp14, 4 ; <i32> [#uses=1]
+ %tmp17 = mul i32 1, %tmp16 ; <i32> [#uses=1]
+ %tmp18 = getelementptr i8* %tmp15, i32 %tmp17 ; <i8*> [#uses=1]
+ %tmp19 = bitcast i8* %tmp18 to [0 x i32]* ; <[0 x i32]*> [#uses=1]
+ %tmp20 = bitcast [0 x i32]* %tmp19 to i32* ; <i32*> [#uses=1]
+ %tmp21 = getelementptr i32* %tmp20, i32 0 ; <i32*> [#uses=1]
+ %tmp22 = load i32* %tmp21, align 1 ; <i32> [#uses=1]
+ %tmp23 = icmp eq i32 %tmp22, 4 ; <i1> [#uses=1]
+ %tmp24 = zext i1 %tmp23 to i8 ; <i8> [#uses=1]
+ %toBool709 = icmp ne i8 %tmp13, 0 ; <i1> [#uses=1]
+ %toBool710 = icmp ne i8 %tmp24, 0 ; <i1> [#uses=1]
+ %tmp25 = and i1 %toBool709, %toBool710 ; <i1> [#uses=1]
+ %tmp26 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
+ %toBool711 = icmp ne i8 %tmp26, 0 ; <i1> [#uses=1]
+ br i1 %toBool711, label %a, label %b
+
+a: ; preds = %entry
+ ret void
+
+b: ; preds = %entry
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll b/src/LLVM/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
new file mode 100644
index 0000000..c617ca4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR3826
+
+define void @0(<4 x i16>*, <4 x i16>*) {
+ %3 = alloca <4 x i16>* ; <<4 x i16>**> [#uses=1]
+ %4 = load <4 x i16>* null, align 1 ; <<4 x i16>> [#uses=1]
+ %5 = ashr <4 x i16> %4, <i16 5, i16 5, i16 5, i16 5> ; <<4 x i16>> [#uses=1]
+ %6 = load <4 x i16>** %3 ; <<4 x i16>*> [#uses=1]
+ store <4 x i16> %5, <4 x i16>* %6, align 1
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll b/src/LLVM/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll
new file mode 100644
index 0000000..0a07bf3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-03-20-AShrOverShift.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {ashr i32 %val, 31}
+; PR3851
+
+define i32 @foo2(i32 %val) nounwind {
+entry:
+ %shr = ashr i32 %val, 15 ; <i32> [#uses=3]
+ %shr4 = ashr i32 %shr, 17 ; <i32> [#uses=1]
+ ret i32 %shr4
+ }
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-03-24-InfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2009-03-24-InfLoop.ll
new file mode 100644
index 0000000..4ce04a1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-03-24-InfLoop.ll
@@ -0,0 +1,9 @@
+; PR3874
+; RUN: opt < %s -instcombine | llvm-dis
+ define i1 @test(i32 %x) {
+ %A = lshr i32 3968, %x
+ %B = and i32 %A, 1
+ %C = icmp eq i32 %B, 0
+ ret i1 %C
+ }
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-04-07-MulPromoteToI96.ll b/src/LLVM/test/Transforms/InstCombine/2009-04-07-MulPromoteToI96.ll
new file mode 100644
index 0000000..244b22a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-04-07-MulPromoteToI96.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | grep {mul i64}
+; rdar://6762288
+
+; Instcombine should not promote the mul to i96 because it is definitely
+; not a legal type for the target, and we don't want a libcall.
+
+define i96 @test(i96 %a.4, i96 %b.2) {
+ %tmp1086 = trunc i96 %a.4 to i64 ; <i64> [#uses=1]
+ %tmp836 = trunc i96 %b.2 to i64 ; <i64> [#uses=1]
+ %mul185 = mul i64 %tmp1086, %tmp836 ; <i64> [#uses=1]
+ %tmp544 = zext i64 %mul185 to i96 ; <i96> [#uses=1]
+ ret i96 %tmp544
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll b/src/LLVM/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
new file mode 100644
index 0000000..dd14c6b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep cmp
+; rdar://6903175
+
+define i1 @f0(i32 *%a) nounwind {
+ %b = load i32* %a, align 4
+ %c = uitofp i32 %b to double
+ %d = fcmp ogt double %c, 0x41EFFFFFFFE00000
+ ret i1 %d
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-06-11-StoreAddrSpace.ll b/src/LLVM/test/Transforms/InstCombine/2009-06-11-StoreAddrSpace.ll
new file mode 100644
index 0000000..e5355b8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-06-11-StoreAddrSpace.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {store i32 0,}
+; PR4366
+
+define void @a() {
+ store i32 0, i32 addrspace(1)* null
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-06-16-SRemDemandedBits.ll b/src/LLVM/test/Transforms/InstCombine/2009-06-16-SRemDemandedBits.ll
new file mode 100644
index 0000000..6beedf8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-06-16-SRemDemandedBits.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep srem
+; PR3439
+
+define i32 @a(i32 %x) nounwind {
+entry:
+ %rem = srem i32 %x, 2
+ %and = and i32 %rem, 2
+ ret i32 %and
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-07-02-MaskedIntVector.ll b/src/LLVM/test/Transforms/InstCombine/2009-07-02-MaskedIntVector.ll
new file mode 100644
index 0000000..41940fe
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-07-02-MaskedIntVector.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine | llvm-dis
+; PR4495
+
+define i32 @test(i64 %test) {
+entry:
+ %0 = bitcast <4 x i32> undef to <16 x i8> ; <<16 x i8>> [#uses=1]
+ %t12 = shufflevector <16 x i8> %0, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> ; <<16 x i8>> [#uses=1]
+ %t11 = bitcast <16 x i8> %t12 to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %t9 = extractelement <2 x i64> %t11, i32 0 ; <i64> [#uses=1]
+ %t10 = bitcast i64 %t9 to <2 x i32> ; <<2 x i32>> [#uses=1]
+ %t7 = bitcast i64 %test to <2 x i32> ; <<2 x i32>> [#uses=1]
+ %t6 = xor <2 x i32> %t10, %t7 ; <<2 x i32>> [#uses=1]
+ %t1 = extractelement <2 x i32> %t6, i32 0 ; <i32> [#uses=1]
+ ret i32 %t1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2009-12-17-CmpSelectNull.ll b/src/LLVM/test/Transforms/InstCombine/2009-12-17-CmpSelectNull.ll
new file mode 100644
index 0000000..fb7497b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2009-12-17-CmpSelectNull.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+@.str254 = internal constant [2 x i8] c".\00"
+@.str557 = internal constant [3 x i8] c"::\00"
+
+define i8* @demangle_qualified(i32 %isfuncname) nounwind {
+entry:
+ %tobool272 = icmp ne i32 %isfuncname, 0
+ %cond276 = select i1 %tobool272, i8* getelementptr inbounds ([2 x i8]* @.str254, i32 0, i32 0), i8* getelementptr inbounds ([3 x i8]* @.str557, i32 0, i32 0) ; <i8*> [#uses=4]
+ %cmp.i504 = icmp eq i8* %cond276, null
+ %rval = getelementptr i8* %cond276, i1 %cmp.i504
+ ret i8* %rval
+}
+
+; CHECK: %cond276 = select i1
+; CHECK: ret i8* %cond276
diff --git a/src/LLVM/test/Transforms/InstCombine/2010-01-28-NegativeSRem.ll b/src/LLVM/test/Transforms/InstCombine/2010-01-28-NegativeSRem.ll
new file mode 100644
index 0000000..4ab9bf0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2010-01-28-NegativeSRem.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR6165
+
+define i32 @f() {
+entry:
+ br label %BB1
+
+BB1: ; preds = %BB1, %entry
+; CHECK: BB1:
+ %x = phi i32 [ -29, %entry ], [ 0, %BB1 ] ; <i32> [#uses=2]
+ %rem = srem i32 %x, 2 ; <i32> [#uses=1]
+ %t = icmp eq i32 %rem, -1 ; <i1> [#uses=1]
+ br i1 %t, label %BB2, label %BB1
+; CHECK-NOT: br i1 false
+
+BB2: ; preds = %BB1
+; CHECK: BB2:
+ ret i32 %x
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2010-03-03-ExtElim.ll b/src/LLVM/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
new file mode 100644
index 0000000..2df12d6
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
@@ -0,0 +1,18 @@
+; RUN: opt -instcombine -S %s | FileCheck %s
+; PR6486
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+target triple = "i386-unknown-linux-gnu"
+
+@g_92 = common global [2 x i32*] zeroinitializer, align 4 ; <[2 x i32*]*> [#uses=1]
+@g_177 = constant i32** bitcast (i8* getelementptr (i8* bitcast ([2 x i32*]* @g_92 to i8*), i64 4) to i32**), align 4 ; <i32***> [#uses=1]
+
+define i1 @test() nounwind {
+; CHECK: @test
+ %tmp = load i32*** @g_177 ; <i32**> [#uses=1]
+ %cmp = icmp ne i32** null, %tmp ; <i1> [#uses=1]
+ %conv = zext i1 %cmp to i32 ; <i32> [#uses=1]
+ %cmp1 = icmp sle i32 0, %conv ; <i1> [#uses=1]
+ ret i1 %cmp1
+; CHECK: ret i1 true
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll b/src/LLVM/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll
new file mode 100644
index 0000000..441d5f9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll
@@ -0,0 +1,46 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+; <rdar://problem/8606771>
+; CHECK: @main
+define i32 @main(i32 %argc) nounwind ssp {
+entry:
+ %tmp3151 = trunc i32 %argc to i8
+; CHECK: %tmp3162 = shl i8 %tmp3151, 5
+; CHECK: and i8 %tmp3162, 64
+; CHECK-NOT: shl
+; CHECK-NOT: shr
+ %tmp3161 = or i8 %tmp3151, -17
+ %tmp3162 = and i8 %tmp3151, 122
+ %tmp3163 = xor i8 %tmp3162, -17
+ %tmp4114 = shl i8 %tmp3163, 6
+ %tmp4115 = xor i8 %tmp4114, %tmp3163
+ %tmp4120 = xor i8 %tmp3161, %tmp4115
+ %tmp4126 = lshr i8 %tmp4120, 7
+ %tmp4127 = mul i8 %tmp4126, 64
+ %tmp4086 = zext i8 %tmp4127 to i32
+; CHECK: ret i32
+ ret i32 %tmp4086
+}
+
+; rdar://8739316
+; CHECK: @foo
+define i8 @foo(i8 %arg, i8 %arg1) nounwind {
+bb:
+ %tmp = shl i8 %arg, 7
+ %tmp2 = and i8 %arg1, 84
+ %tmp3 = and i8 %arg1, -118
+ %tmp4 = and i8 %arg1, 33
+ %tmp5 = sub i8 -88, %tmp2
+ %tmp6 = and i8 %tmp5, 84
+ %tmp7 = or i8 %tmp4, %tmp6
+ %tmp8 = xor i8 %tmp, %tmp3
+ %tmp9 = or i8 %tmp7, %tmp8
+ %tmp10 = lshr i8 %tmp8, 7
+ %tmp11 = shl i8 %tmp10, 5
+
+; CHECK: %0 = lshr i8 %tmp8, 2
+; CHECK: %tmp11 = and i8 %0, 32
+
+ %tmp12 = xor i8 %tmp11, %tmp9
+ ret i8 %tmp12
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll b/src/LLVM/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
new file mode 100644
index 0000000..720365c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2010-11-21-SizeZeroTypeGEP.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+define {}* @foo({}* %x, i32 %n) {
+; CHECK: @foo
+; CHECK-NOT: getelementptr
+ %p = getelementptr {}* %x, i32 %n
+ ret {}* %p
+}
+
+define i8* @bar(i64 %n, {{}, [0 x {[0 x i8]}]}* %p) {
+; CHECK: @bar
+ %g = getelementptr {{}, [0 x {[0 x i8]}]}* %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
+; CHECK: %p, i64 0, i32 1, i64 0, i32 0, i64 %n
+ ret i8* %g
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2010-11-23-Distributed.ll b/src/LLVM/test/Transforms/InstCombine/2010-11-23-Distributed.ll
new file mode 100644
index 0000000..4f8e8dc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2010-11-23-Distributed.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+define i32 @foo(i32 %x, i32 %y) {
+; CHECK: @foo
+ %add = add nsw i32 %y, %x
+ %mul = mul nsw i32 %add, %y
+ %square = mul nsw i32 %y, %y
+ %res = sub i32 %mul, %square
+ ret i32 %res
+; CHECK-NEXT: mul i32 %x, %y
+; CHECK-NEXT: ret i32
+}
+
+define i1 @bar(i64 %x, i64 %y) {
+; CHECK: @bar
+ %a = and i64 %y, %x
+; CHECK: and
+; CHECK-NOT: and
+ %not = xor i64 %a, -1
+ %b = and i64 %y, %not
+ %r = icmp eq i64 %b, 0
+ ret i1 %r
+; CHECK: ret i1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-02-14-InfLoop.ll b/src/LLVM/test/Transforms/InstCombine/2011-02-14-InfLoop.ll
new file mode 100644
index 0000000..14fa1a8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-02-14-InfLoop.ll
@@ -0,0 +1,19 @@
+; This testcase causes an infinite loop in the instruction combiner,
+; because it changes a pattern and the original pattern is almost
+; identical to the newly-generated pattern.
+; RUN: opt < %s -instcombine -disable-output
+
+;PR PR9216
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define <4 x float> @m_387(i8* noalias nocapture %A, i8* nocapture %B, <4 x i1> %C) nounwind {
+entry:
+ %movcsext20 = sext <4 x i1> %C to <4 x i32>
+ %tmp2389 = xor <4 x i32> %movcsext20, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %movcand25 = and <4 x i32> %tmp2389, <i32 undef, i32 undef, i32 undef, i32 -1>
+ %movcor26 = or <4 x i32> %movcand25, zeroinitializer
+ %L2 = bitcast <4 x i32> %movcor26 to <4 x float>
+ %L3 = shufflevector <4 x float> zeroinitializer, <4 x float> %L2, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+ ret <4 x float> %L3
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-03-08-SRemMinusOneBadOpt.ll b/src/LLVM/test/Transforms/InstCombine/2011-03-08-SRemMinusOneBadOpt.ll
new file mode 100644
index 0000000..6a3e3e4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-03-08-SRemMinusOneBadOpt.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR9346
+
+define i32 @test(i64 %x) nounwind {
+; CHECK: ret i32 0
+entry:
+ %or = or i64 %x, 4294967294
+ %conv = trunc i64 %or to i32
+ %rem.i = srem i32 %conv, -1
+ ret i32 %rem.i
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll b/src/LLVM/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll
new file mode 100644
index 0000000..02b64e3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine
+; PR9579
+
+define <2 x i16> @entry(<2 x i16> %a) nounwind {
+entry:
+ %a.addr = alloca <2 x i16>, align 4
+ %.compoundliteral = alloca <2 x i16>, align 4
+ store <2 x i16> %a, <2 x i16>* %a.addr, align 4
+ %tmp = load <2 x i16>* %a.addr, align 4
+ store <2 x i16> zeroinitializer, <2 x i16>* %.compoundliteral
+ %tmp1 = load <2 x i16>* %.compoundliteral
+ %cmp = icmp uge <2 x i16> %tmp, %tmp1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ ret <2 x i16> %sext
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-05-13-InBoundsGEP.ll b/src/LLVM/test/Transforms/InstCombine/2011-05-13-InBoundsGEP.ll
new file mode 100644
index 0000000..fba7239
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-05-13-InBoundsGEP.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -S -instcombine | FileCheck %s
+; rdar://problem/9267970
+; ideally this test will run on a 32-bit host
+; must not discard GEPs that might overflow at runtime (aren't inbounds)
+
+define i32 @main(i32 %argc) {
+entry:
+ %tmp1 = add i32 %argc, -2
+ %tmp2 = add i32 %argc, 1879048192
+ %p = alloca i8
+; CHECK: getelementptr
+ %p1 = getelementptr i8* %p, i32 %tmp1
+; CHECK: getelementptr
+ %p2 = getelementptr i8* %p, i32 %tmp2
+ %cmp = icmp ult i8* %p1, %p2
+ br i1 %cmp, label %bbtrue, label %bbfalse
+bbtrue: ; preds = %entry
+ ret i32 -1
+bbfalse: ; preds = %entry
+ ret i32 0
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll b/src/LLVM/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll
new file mode 100644
index 0000000..b096d1f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll
@@ -0,0 +1,57 @@
+; ModuleID = 'test1.c'
+; RUN: opt -S -instcombine < %s | FileCheck %s
+target triple = "x86_64-apple-macosx10.6.6"
+
+define zeroext i16 @foo1(i32 %on_off) nounwind uwtable ssp {
+entry:
+ %on_off.addr = alloca i32, align 4
+ %a = alloca i32, align 4
+ store i32 %on_off, i32* %on_off.addr, align 4
+ %tmp = load i32* %on_off.addr, align 4
+ %sub = sub i32 1, %tmp
+; CHECK-NOT: mul i32
+ %mul = mul i32 %sub, -2
+; CHECK: shl
+; CHECK-NEXT: add
+ store i32 %mul, i32* %a, align 4
+ %tmp1 = load i32* %a, align 4
+ %conv = trunc i32 %tmp1 to i16
+ ret i16 %conv
+}
+
+define zeroext i16 @foo2(i32 %on_off, i32 %q) nounwind uwtable ssp {
+entry:
+ %on_off.addr = alloca i32, align 4
+ %q.addr = alloca i32, align 4
+ %a = alloca i32, align 4
+ store i32 %on_off, i32* %on_off.addr, align 4
+ store i32 %q, i32* %q.addr, align 4
+ %tmp = load i32* %q.addr, align 4
+ %tmp1 = load i32* %on_off.addr, align 4
+ %sub = sub i32 %tmp, %tmp1
+; CHECK-NOT: mul i32
+ %mul = mul i32 %sub, -4
+; CHECK: sub i32
+; CHECK-NEXT: shl
+ store i32 %mul, i32* %a, align 4
+ %tmp2 = load i32* %a, align 4
+ %conv = trunc i32 %tmp2 to i16
+ ret i16 %conv
+}
+
+define zeroext i16 @foo3(i32 %on_off) nounwind uwtable ssp {
+entry:
+ %on_off.addr = alloca i32, align 4
+ %a = alloca i32, align 4
+ store i32 %on_off, i32* %on_off.addr, align 4
+ %tmp = load i32* %on_off.addr, align 4
+ %sub = sub i32 7, %tmp
+; CHECK-NOT: mul i32
+ %mul = mul i32 %sub, -4
+; CHECK: shl
+; CHECK-NEXT: add
+ store i32 %mul, i32* %a, align 4
+ %tmp1 = load i32* %a, align 4
+ %conv = trunc i32 %tmp1 to i16
+ ret i16 %conv
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll b/src/LLVM/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll
new file mode 100644
index 0000000..2f72b73
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll
@@ -0,0 +1,60 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+target triple = "i386-apple-darwin10.0.0"
+
+define void @fu1(i32 %parm) nounwind ssp {
+ %1 = alloca i32, align 4
+ %ptr = alloca double*, align 4
+ store i32 %parm, i32* %1, align 4
+ store double* null, double** %ptr, align 4
+ %2 = load i32* %1, align 4
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %10
+
+; <label>:4 ; preds = %0
+ %5 = load i32* %1, align 4
+ %6 = mul nsw i32 %5, 8
+; With "nsw", the alloca and its bitcast can be fused:
+ %7 = add nsw i32 %6, 2048
+; CHECK: alloca double*
+ %8 = alloca i8, i32 %7
+ %9 = bitcast i8* %8 to double*
+ store double* %9, double** %ptr, align 4
+ br label %10
+
+; <label>:10 ; preds = %4, %0
+ %11 = load double** %ptr, align 4
+ call void @bar(double* %11)
+; CHECK: ret
+ ret void
+}
+
+declare void @bar(double*)
+
+define void @fu2(i32 %parm) nounwind ssp {
+ %1 = alloca i32, align 4
+ %ptr = alloca double*, align 4
+ store i32 %parm, i32* %1, align 4
+ store double* null, double** %ptr, align 4
+ %2 = load i32* %1, align 4
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %10
+
+; <label>:4 ; preds = %0
+ %5 = load i32* %1, align 4
+ %6 = mul nsw i32 %5, 8
+; Without "nsw", the alloca and its bitcast cannot be fused:
+ %7 = add i32 %6, 2048
+; CHECK: alloca i8
+ %8 = alloca i8, i32 %7
+; CHECK-NEXT: bitcast i8*
+ %9 = bitcast i8* %8 to double*
+ store double* %9, double** %ptr, align 4
+ br label %10
+
+; <label>:10 ; preds = %4, %0
+ %11 = load double** %ptr, align 4
+ call void @bar(double* %11)
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-09-03-Trampoline.ll b/src/LLVM/test/Transforms/InstCombine/2011-09-03-Trampoline.ll
new file mode 100644
index 0000000..5456e03
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-09-03-Trampoline.ll
@@ -0,0 +1,87 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+declare void @llvm.init.trampoline(i8*, i8*, i8*)
+declare i8* @llvm.adjust.trampoline(i8*)
+declare i32 @f(i8 * nest, i32)
+
+; Most common case
+define i32 @test0(i32 %n) {
+ %alloca = alloca [10 x i8], align 16
+ %gep = getelementptr [10 x i8]* %alloca, i32 0, i32 0
+ call void @llvm.init.trampoline(i8* %gep, i8* bitcast (i32 (i8*, i32)* @f to i8*),
+ i8* null)
+ %tramp = call i8* @llvm.adjust.trampoline(i8* %gep)
+ %function = bitcast i8* %tramp to i32(i32)*
+ %ret = call i32 %function(i32 %n)
+ ret i32 %ret
+
+; CHECK: define i32 @test0(i32 %n) {
+; CHECK: %ret = call i32 @f(i8* nest null, i32 %n)
+}
+
+define i32 @test1(i32 %n, i8* %trampmem) {
+ call void @llvm.init.trampoline(i8* %trampmem,
+ i8* bitcast (i32 (i8*, i32)* @f to i8*),
+ i8* null)
+ %tramp = call i8* @llvm.adjust.trampoline(i8* %trampmem)
+ %function = bitcast i8* %tramp to i32(i32)*
+ %ret = call i32 %function(i32 %n)
+ ret i32 %ret
+; CHECK: define i32 @test1(i32 %n, i8* %trampmem) {
+; CHECK: %ret = call i32 @f(i8* nest null, i32 %n)
+}
+
+define i32 @test2(i32 %n, i8* %trampmem) {
+ %tramp = call i8* @llvm.adjust.trampoline(i8* %trampmem)
+ %functiona = bitcast i8* %tramp to i32(i32)*
+ %ret = call i32 %functiona(i32 %n)
+ ret i32 %ret
+; CHECK: define i32 @test2(i32 %n, i8* %trampmem) {
+; CHECK: %ret = call i32 %functiona(i32 %n)
+}
+
+define i32 @test3(i32 %n, i8* %trampmem) {
+ call void @llvm.init.trampoline(i8* %trampmem,
+ i8* bitcast (i32 (i8*, i32)* @f to i8*),
+ i8* null)
+
+; CHECK: define i32 @test3(i32 %n, i8* %trampmem) {
+; CHECK: %ret0 = call i32 @f(i8* nest null, i32 %n)
+ %tramp0 = call i8* @llvm.adjust.trampoline(i8* %trampmem)
+ %function0 = bitcast i8* %tramp0 to i32(i32)*
+ %ret0 = call i32 %function0(i32 %n)
+
+ ;; Not optimized since previous call could be writing.
+ %tramp1 = call i8* @llvm.adjust.trampoline(i8* %trampmem)
+ %function1 = bitcast i8* %tramp1 to i32(i32)*
+ %ret1 = call i32 %function1(i32 %n)
+; CHECK: %ret1 = call i32 %function1(i32 %n)
+
+ ret i32 %ret1
+}
+
+define i32 @test4(i32 %n) {
+ %alloca = alloca [10 x i8], align 16
+ %gep = getelementptr [10 x i8]* %alloca, i32 0, i32 0
+ call void @llvm.init.trampoline(i8* %gep, i8* bitcast (i32 (i8*, i32)* @f to i8*),
+ i8* null)
+
+ %tramp0 = call i8* @llvm.adjust.trampoline(i8* %gep)
+ %function0 = bitcast i8* %tramp0 to i32(i32)*
+ %ret0 = call i32 %function0(i32 %n)
+
+ %tramp1 = call i8* @llvm.adjust.trampoline(i8* %gep)
+ %function1 = bitcast i8* %tramp0 to i32(i32)*
+ %ret1 = call i32 %function1(i32 %n)
+
+ %tramp2 = call i8* @llvm.adjust.trampoline(i8* %gep)
+ %function2 = bitcast i8* %tramp2 to i32(i32)*
+ %ret2 = call i32 %function2(i32 %n)
+
+ ret i32 %ret2
+
+; CHECK: define i32 @test4(i32 %n) {
+; CHECK: %ret0 = call i32 @f(i8* nest null, i32 %n)
+; CHECK: %ret1 = call i32 @f(i8* nest null, i32 %n)
+; CHECK: %ret2 = call i32 @f(i8* nest null, i32 %n)
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll b/src/LLVM/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll
new file mode 100644
index 0000000..22061b2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+; rdar://problem/10063307
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios5.0.0"
+
+%0 = type { [2 x i32] }
+%struct.CGPoint = type { float, float }
+
+define void @t(%struct.CGPoint* %a) nounwind {
+ %Point = alloca %struct.CGPoint, align 4
+ %1 = bitcast %struct.CGPoint* %a to i64*
+ %2 = bitcast %struct.CGPoint* %Point to i64*
+ %3 = load i64* %1, align 4
+ store i64 %3, i64* %2, align 4
+ call void @foo(i64* %2) nounwind
+ ret void
+; CHECK: %Point = alloca i64, align 4
+}
+
+declare void @foo(i64*)
diff --git a/src/LLVM/test/Transforms/InstCombine/CPP_min_max.ll b/src/LLVM/test/Transforms/InstCombine/CPP_min_max.ll
new file mode 100644
index 0000000..6bb0ba9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/CPP_min_max.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep select | not grep {i32\\*}
+
+; This testcase corresponds to PR362, which notices that this horrible code
+; is generated by the C++ front-end and LLVM optimizers, which has lots of
+; loads and other stuff that are unneeded.
+;
+; Instcombine should propagate the load through the select instructions to
+; allow elimination of the extra stuff by the mem2reg pass.
+
+define void @_Z5test1RiS_(i32* %x, i32* %y) {
+entry:
+ %tmp.1.i = load i32* %y ; <i32> [#uses=1]
+ %tmp.3.i = load i32* %x ; <i32> [#uses=1]
+ %tmp.4.i = icmp slt i32 %tmp.1.i, %tmp.3.i ; <i1> [#uses=1]
+ %retval.i = select i1 %tmp.4.i, i32* %y, i32* %x ; <i32*> [#uses=1]
+ %tmp.4 = load i32* %retval.i ; <i32> [#uses=1]
+ store i32 %tmp.4, i32* %x
+ ret void
+}
+
+define void @_Z5test2RiS_(i32* %x, i32* %y) {
+entry:
+ %tmp.0 = alloca i32 ; <i32*> [#uses=2]
+ %tmp.2 = load i32* %x ; <i32> [#uses=2]
+ store i32 %tmp.2, i32* %tmp.0
+ %tmp.3.i = load i32* %y ; <i32> [#uses=1]
+ %tmp.4.i = icmp slt i32 %tmp.2, %tmp.3.i ; <i1> [#uses=1]
+ %retval.i = select i1 %tmp.4.i, i32* %y, i32* %tmp.0 ; <i32*> [#uses=1]
+ %tmp.6 = load i32* %retval.i ; <i32> [#uses=1]
+ store i32 %tmp.6, i32* %y
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/ExtractCast.ll b/src/LLVM/test/Transforms/InstCombine/ExtractCast.ll
new file mode 100644
index 0000000..5ebbefd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/ExtractCast.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine -S -o - | FileCheck %s
+
+; CHECK: @a
+define i32 @a(<4 x i64> %I) {
+entry:
+; CHECK-NOT: trunc <4 x i64>
+ %J = trunc <4 x i64> %I to <4 x i32>
+ %K = extractelement <4 x i32> %J, i32 3
+; CHECK: extractelement <4 x i64>
+; CHECK: trunc i64
+; CHECK: ret
+ ret i32 %K
+}
+
+
+; CHECK: @b
+define i32 @b(<4 x float> %I) {
+entry:
+; CHECK-NOT: fptosi <4 x float>
+ %J = fptosi <4 x float> %I to <4 x i32>
+ %K = extractelement <4 x i32> %J, i32 3
+; CHECK: extractelement <4 x float>
+; CHECK: fptosi float
+; CHECK: ret
+ ret i32 %K
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/IntPtrCast.ll b/src/LLVM/test/Transforms/InstCombine/IntPtrCast.ll
new file mode 100644
index 0000000..ef43048
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/IntPtrCast.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:32:32"
+
+define i32* @test(i32* %P) {
+ %V = ptrtoint i32* %P to i32 ; <i32> [#uses=1]
+ %P2 = inttoptr i32 %V to i32* ; <i32*> [#uses=1]
+ ret i32* %P2
+; CHECK: ret i32* %P
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/JavaCompare.ll b/src/LLVM/test/Transforms/InstCombine/JavaCompare.ll
new file mode 100644
index 0000000..266c4ad
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/JavaCompare.ll
@@ -0,0 +1,14 @@
+; This is the sequence of stuff that the Java front-end expands for a single
+; <= comparison. Check to make sure we turn it into a <= (only)
+
+; RUN: opt < %s -instcombine -S | grep {icmp sle i32 %A, %B}
+
+define i1 @le(i32 %A, i32 %B) {
+ %c1 = icmp sgt i32 %A, %B ; <i1> [#uses=1]
+ %tmp = select i1 %c1, i32 1, i32 0 ; <i32> [#uses=1]
+ %c2 = icmp slt i32 %A, %B ; <i1> [#uses=1]
+ %result = select i1 %c2, i32 -1, i32 %tmp ; <i32> [#uses=1]
+ %c3 = icmp sle i32 %result, 0 ; <i1> [#uses=1]
+ ret i1 %c3
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/LandingPadClauses.ll b/src/LLVM/test/Transforms/InstCombine/LandingPadClauses.ll
new file mode 100644
index 0000000..055bdcc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/LandingPadClauses.ll
@@ -0,0 +1,181 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+@T1 = external constant i32
+@T2 = external constant i32
+@T3 = external constant i32
+
+declare i32 @generic_personality(i32, i64, i8*, i8*)
+declare i32 @__gxx_personality_v0(i32, i64, i8*, i8*)
+
+declare void @bar()
+
+define void @foo_generic() {
+; CHECK: @foo_generic
+ invoke void @bar()
+ to label %cont.a unwind label %lpad.a
+cont.a:
+ invoke void @bar()
+ to label %cont.b unwind label %lpad.b
+cont.b:
+ invoke void @bar()
+ to label %cont.c unwind label %lpad.c
+cont.c:
+ invoke void @bar()
+ to label %cont.d unwind label %lpad.d
+cont.d:
+ invoke void @bar()
+ to label %cont.e unwind label %lpad.e
+cont.e:
+ invoke void @bar()
+ to label %cont.f unwind label %lpad.f
+cont.f:
+ invoke void @bar()
+ to label %cont.g unwind label %lpad.g
+cont.g:
+ invoke void @bar()
+ to label %cont.h unwind label %lpad.h
+cont.h:
+ invoke void @bar()
+ to label %cont.i unwind label %lpad.i
+cont.i:
+ ret void
+
+lpad.a:
+ %a = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ catch i32* @T1
+ catch i32* @T2
+ catch i32* @T1
+ catch i32* @T2
+ unreachable
+; CHECK: %a = landingpad
+; CHECK-NEXT: @T1
+; CHECK-NEXT: @T2
+; CHECK-NEXT: unreachable
+
+lpad.b:
+ %b = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ filter [0 x i32*] zeroinitializer
+ catch i32* @T1
+ unreachable
+; CHECK: %b = landingpad
+; CHECK-NEXT: filter
+; CHECK-NEXT: unreachable
+
+lpad.c:
+ %c = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ catch i32* @T1
+ filter [1 x i32*] [i32* @T1]
+ catch i32* @T2
+ unreachable
+; CHECK: %c = landingpad
+; CHECK-NEXT: @T1
+; CHECK-NEXT: filter [0 x i32*]
+; CHECK-NEXT: unreachable
+
+lpad.d:
+ %d = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ filter [3 x i32*] zeroinitializer
+ unreachable
+; CHECK: %d = landingpad
+; CHECK-NEXT: filter [1 x i32*] zeroinitializer
+; CHECK-NEXT: unreachable
+
+lpad.e:
+ %e = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ catch i32* @T1
+ filter [3 x i32*] [i32* @T1, i32* @T2, i32* @T2]
+ unreachable
+; CHECK: %e = landingpad
+; CHECK-NEXT: @T1
+; CHECK-NEXT: filter [1 x i32*] [i32* @T2]
+; CHECK-NEXT: unreachable
+
+lpad.f:
+ %f = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ filter [2 x i32*] [i32* @T2, i32* @T1]
+ filter [1 x i32*] [i32* @T1]
+ unreachable
+; CHECK: %f = landingpad
+; CHECK-NEXT: filter [1 x i32*] [i32* @T1]
+; CHECK-NEXT: unreachable
+
+lpad.g:
+ %g = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ filter [1 x i32*] [i32* @T1]
+ catch i32* @T3
+ filter [2 x i32*] [i32* @T2, i32* @T1]
+ unreachable
+; CHECK: %g = landingpad
+; CHECK-NEXT: filter [1 x i32*] [i32* @T1]
+; CHECK-NEXT: catch i32* @T3
+; CHECK-NEXT: unreachable
+
+lpad.h:
+ %h = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ filter [2 x i32*] [i32* @T1, i32* null]
+ filter [1 x i32*] zeroinitializer
+ unreachable
+; CHECK: %h = landingpad
+; CHECK-NEXT: filter [1 x i32*] zeroinitializer
+; CHECK-NEXT: unreachable
+
+lpad.i:
+ %i = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @generic_personality
+ cleanup
+ filter [0 x i32*] zeroinitializer
+ unreachable
+; CHECK: %i = landingpad
+; CHECK-NEXT: filter
+; CHECK-NEXT: unreachable
+}
+
+define void @foo_cxx() {
+; CHECK: @foo_cxx
+ invoke void @bar()
+ to label %cont.a unwind label %lpad.a
+cont.a:
+ invoke void @bar()
+ to label %cont.b unwind label %lpad.b
+cont.b:
+ invoke void @bar()
+ to label %cont.c unwind label %lpad.c
+cont.c:
+ invoke void @bar()
+ to label %cont.d unwind label %lpad.d
+cont.d:
+ ret void
+
+lpad.a:
+ %a = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0
+ catch i32* null
+ catch i32* @T1
+ unreachable
+; CHECK: %a = landingpad
+; CHECK-NEXT: null
+; CHECK-NEXT: unreachable
+
+lpad.b:
+ %b = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0
+ filter [1 x i32*] zeroinitializer
+ unreachable
+; CHECK: %b = landingpad
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: unreachable
+
+lpad.c:
+ %c = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0
+ filter [2 x i32*] [i32* @T1, i32* null]
+ unreachable
+; CHECK: %c = landingpad
+; CHECK-NEXT: cleanup
+; CHECK-NEXT: unreachable
+
+lpad.d:
+ %d = landingpad { i8*, i32 } personality i32 (i32, i64, i8*, i8*)* @__gxx_personality_v0
+ cleanup
+ catch i32* null
+ unreachable
+; CHECK: %d = landingpad
+; CHECK-NEXT: null
+; CHECK-NEXT: unreachable
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/README.txt b/src/LLVM/test/Transforms/InstCombine/README.txt
new file mode 100644
index 0000000..46ac500
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/README.txt
@@ -0,0 +1,4 @@
+This directory contains test cases for the instcombine transformation. The
+dated tests are actual bug tests, whereas the named tests are used to test
+for features that the this pass should be capable of performing.
+
diff --git a/src/LLVM/test/Transforms/InstCombine/add-shrink.ll b/src/LLVM/test/Transforms/InstCombine/add-shrink.ll
new file mode 100644
index 0000000..cc57478
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/add-shrink.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {add nsw i32}
+; RUN: opt < %s -instcombine -S | grep sext | count 1
+
+; Should only have one sext and the add should be i32 instead of i64.
+
+define i64 @test1(i32 %A) {
+ %B = ashr i32 %A, 7 ; <i32> [#uses=1]
+ %C = ashr i32 %A, 9 ; <i32> [#uses=1]
+ %D = sext i32 %B to i64 ; <i64> [#uses=1]
+ %E = sext i32 %C to i64 ; <i64> [#uses=1]
+ %F = add i64 %D, %E ; <i64> [#uses=1]
+ ret i64 %F
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/add-sitofp.ll b/src/LLVM/test/Transforms/InstCombine/add-sitofp.ll
new file mode 100644
index 0000000..98a8cb4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/add-sitofp.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {add nsw i32}
+
+define double @x(i32 %a, i32 %b) nounwind {
+ %m = lshr i32 %a, 24
+ %n = and i32 %m, %b
+ %o = sitofp i32 %n to double
+ %p = fadd double %o, 1.0
+ ret double %p
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/add.ll b/src/LLVM/test/Transforms/InstCombine/add.ll
new file mode 100644
index 0000000..af85686
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/add.ll
@@ -0,0 +1,301 @@
+; This test makes sure that add instructions are properly eliminated.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v OK | not grep add
+
+define i32 @test1(i32 %A) {
+ %B = add i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+ %B = add i32 %A, 5 ; <i32> [#uses=1]
+ %C = add i32 %B, -5 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test3(i32 %A) {
+ %B = add i32 %A, 5 ; <i32> [#uses=1]
+ ;; This should get converted to an add
+ %C = sub i32 %B, 5 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test4(i32 %A, i32 %B) {
+ %C = sub i32 0, %A ; <i32> [#uses=1]
+ ; D = B + -A = B - A
+ %D = add i32 %B, %C ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test5(i32 %A, i32 %B) {
+ %C = sub i32 0, %A ; <i32> [#uses=1]
+ ; D = -A + B = B - A
+ %D = add i32 %C, %B ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test6(i32 %A) {
+ %B = mul i32 7, %A ; <i32> [#uses=1]
+ ; C = 7*A+A == 8*A == A << 3
+ %C = add i32 %B, %A ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test7(i32 %A) {
+ %B = mul i32 7, %A ; <i32> [#uses=1]
+ ; C = A+7*A == 8*A == A << 3
+ %C = add i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+; (A & C1)+(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+define i32 @test8(i32 %A, i32 %B) {
+ %A1 = and i32 %A, 7 ; <i32> [#uses=1]
+ %B1 = and i32 %B, 128 ; <i32> [#uses=1]
+ %C = add i32 %A1, %B1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test9(i32 %A) {
+ %B = shl i32 %A, 4 ; <i32> [#uses=2]
+ ; === shl int %A, 5
+ %C = add i32 %B, %B ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i1 @test10(i8 %A, i8 %b) {
+ %B = add i8 %A, %b ; <i8> [#uses=1]
+ ; === A != -b
+ %c = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %c
+}
+
+define i1 @test11(i8 %A) {
+ %B = add i8 %A, -1 ; <i8> [#uses=1]
+ ; === A != 1
+ %c = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %c
+}
+
+define i32 @test12(i32 %A, i32 %B) {
+ ; Should be transformed into shl A, 1
+ %C_OK = add i32 %B, %A ; <i32> [#uses=1]
+ br label %X
+
+X: ; preds = %0
+ %D = add i32 %C_OK, %A ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test13(i32 %A, i32 %B, i32 %C) {
+ %D_OK = add i32 %A, %B ; <i32> [#uses=1]
+ %E_OK = add i32 %D_OK, %C ; <i32> [#uses=1]
+ ;; shl A, 1
+ %F = add i32 %E_OK, %A ; <i32> [#uses=1]
+ ret i32 %F
+}
+
+define i32 @test14(i32 %offset, i32 %difference) {
+ %tmp.2 = and i32 %difference, 3 ; <i32> [#uses=1]
+ %tmp.3_OK = add i32 %tmp.2, %offset ; <i32> [#uses=1]
+ %tmp.5.mask = and i32 %difference, -4 ; <i32> [#uses=1]
+ ; == add %offset, %difference
+ %tmp.8 = add i32 %tmp.3_OK, %tmp.5.mask ; <i32> [#uses=1]
+ ret i32 %tmp.8
+}
+
+define i8 @test15(i8 %A) {
+ ; Does not effect result
+ %B = add i8 %A, -64 ; <i8> [#uses=1]
+ ; Only one bit set
+ %C = and i8 %B, 16 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test16(i8 %A) {
+ ; Turn this into a XOR
+ %B = add i8 %A, 16 ; <i8> [#uses=1]
+ ; Only one bit set
+ %C = and i8 %B, 16 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test17(i32 %A) {
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ ; == sub int 0, %A
+ %C = add i32 %B, 1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i8 @test18(i8 %A) {
+ %B = xor i8 %A, -1 ; <i8> [#uses=1]
+ ; == sub ubyte 16, %A
+ %C = add i8 %B, 17 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test19(i1 %C) {
+ %A = select i1 %C, i32 1000, i32 10 ; <i32> [#uses=1]
+ %V = add i32 %A, 123 ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define i32 @test20(i32 %x) {
+ %tmp.2 = xor i32 %x, -2147483648 ; <i32> [#uses=1]
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i32 %tmp.2, -2147483648 ; <i32> [#uses=1]
+ ret i32 %tmp.4
+}
+
+define i1 @test21(i32 %x) {
+ %t = add i32 %x, 4 ; <i32> [#uses=1]
+ %y = icmp eq i32 %t, 123 ; <i1> [#uses=1]
+ ret i1 %y
+}
+
+define i32 @test22(i32 %V) {
+ %V2 = add i32 %V, 10 ; <i32> [#uses=1]
+ switch i32 %V2, label %Default [
+ i32 20, label %Lab1
+ i32 30, label %Lab2
+ ]
+
+Default: ; preds = %0
+ ret i32 123
+
+Lab1: ; preds = %0
+ ret i32 12312
+
+Lab2: ; preds = %0
+ ret i32 1231231
+}
+
+define i32 @test23(i1 %C, i32 %a) {
+entry:
+ br i1 %C, label %endif, label %else
+
+else: ; preds = %entry
+ br label %endif
+
+endif: ; preds = %else, %entry
+ %b.0 = phi i32 [ 0, %entry ], [ 1, %else ] ; <i32> [#uses=1]
+ %tmp.4 = add i32 %b.0, 1 ; <i32> [#uses=1]
+ ret i32 %tmp.4
+}
+
+define i32 @test24(i32 %A) {
+ %B = add i32 %A, 1 ; <i32> [#uses=1]
+ %C = shl i32 %B, 1 ; <i32> [#uses=1]
+ %D = sub i32 %C, 2 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i64 @test25(i64 %Y) {
+ %tmp.4 = shl i64 %Y, 2 ; <i64> [#uses=1]
+ %tmp.12 = shl i64 %Y, 2 ; <i64> [#uses=1]
+ %tmp.8 = add i64 %tmp.4, %tmp.12 ; <i64> [#uses=1]
+ ret i64 %tmp.8
+}
+
+define i32 @test26(i32 %A, i32 %B) {
+ %C = add i32 %A, %B ; <i32> [#uses=1]
+ %D = sub i32 %C, %B ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test27(i1 %C, i32 %X, i32 %Y) {
+ %A = add i32 %X, %Y ; <i32> [#uses=1]
+ %B = add i32 %Y, 123 ; <i32> [#uses=1]
+ ;; Fold add through select.
+ %C.upgrd.1 = select i1 %C, i32 %A, i32 %B ; <i32> [#uses=1]
+ %D = sub i32 %C.upgrd.1, %Y ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test28(i32 %X) {
+ %Y = add i32 %X, 1234 ; <i32> [#uses=1]
+ %Z = sub i32 42, %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test29(i32 %X, i32 %x) {
+ %tmp.2 = sub i32 %X, %x ; <i32> [#uses=2]
+ %tmp.2.mask = and i32 %tmp.2, 63 ; <i32> [#uses=1]
+ %tmp.6 = add i32 %tmp.2.mask, %x ; <i32> [#uses=1]
+ %tmp.7 = and i32 %tmp.6, 63 ; <i32> [#uses=1]
+ %tmp.9 = and i32 %tmp.2, -64 ; <i32> [#uses=1]
+ %tmp.10 = or i32 %tmp.7, %tmp.9 ; <i32> [#uses=1]
+ ret i32 %tmp.10
+}
+
+define i64 @test30(i64 %x) {
+ %tmp.2 = xor i64 %x, -9223372036854775808 ; <i64> [#uses=1]
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i64 %tmp.2, -9223372036854775808 ; <i64> [#uses=1]
+ ret i64 %tmp.4
+}
+
+define i32 @test31(i32 %A) {
+ %B = add i32 %A, 4 ; <i32> [#uses=1]
+ %C = mul i32 %B, 5 ; <i32> [#uses=1]
+ %D = sub i32 %C, 20 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test32(i32 %A) {
+ %B = add i32 %A, 4 ; <i32> [#uses=1]
+ %C = shl i32 %B, 2 ; <i32> [#uses=1]
+ %D = sub i32 %C, 16 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i8 @test33(i8 %A) {
+ %B = and i8 %A, -2 ; <i8> [#uses=1]
+ %C = add i8 %B, 1 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test34(i8 %A) {
+ %B = add i8 %A, 64 ; <i8> [#uses=1]
+ %C = and i8 %B, 12 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test35(i32 %a) {
+ %tmpnot = xor i32 %a, -1 ; <i32> [#uses=1]
+ %tmp2 = add i32 %tmpnot, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
+}
+
+define i32 @test36(i32 %a) {
+ %x = and i32 %a, -2
+ %y = and i32 %a, -126
+ %z = add i32 %x, %y
+ %q = and i32 %z, 1 ; always zero
+ ret i32 %q
+}
+
+define i1 @test37(i32 %a, i32 %b) nounwind readnone {
+ %add = add i32 %a, %b
+ %cmp = icmp eq i32 %add, %a
+ ret i1 %cmp
+}
+
+define i1 @test38(i32 %a, i32 %b) nounwind readnone {
+ %add = add i32 %a, %b
+ %cmp = icmp eq i32 %add, %b
+ ret i1 %cmp
+}
+
+define i1 @test39(i32 %a, i32 %b) nounwind readnone {
+ %add = add i32 %b, %a
+ %cmp = icmp eq i32 %add, %a
+ ret i1 %cmp
+}
+
+define i1 @test40(i32 %a, i32 %b) nounwind readnone {
+ %add = add i32 %b, %a
+ %cmp = icmp eq i32 %add, %b
+ ret i1 %cmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/add2.ll b/src/LLVM/test/Transforms/InstCombine/add2.ll
new file mode 100644
index 0000000..2050ee8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/add2.ll
@@ -0,0 +1,43 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i64 @test1(i64 %A, i32 %B) {
+ %tmp12 = zext i32 %B to i64
+ %tmp3 = shl i64 %tmp12, 32
+ %tmp5 = add i64 %tmp3, %A
+ %tmp6 = and i64 %tmp5, 123
+ ret i64 %tmp6
+; CHECK: @test1
+; CHECK-NEXT: and i64 %A, 123
+; CHECK-NEXT: ret i64
+}
+
+define i32 @test2(i32 %A) {
+ %B = and i32 %A, 7
+ %C = and i32 %A, 32
+ %F = add i32 %B, %C
+ ret i32 %F
+; CHECK: @test2
+; CHECK-NEXT: and i32 %A, 39
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test3(i32 %A) {
+ %B = and i32 %A, 128
+ %C = lshr i32 %A, 30
+ %F = add i32 %B, %C
+ ret i32 %F
+; CHECK: @test3
+; CHECK-NEXT: and
+; CHECK-NEXT: lshr
+; CHECK-NEXT: or i32 %B, %C
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test4(i32 %A) {
+ %B = add nuw i32 %A, %A
+ ret i32 %B
+; CHECK: @test4
+; CHECK-NEXT: %B = shl nuw i32 %A, 1
+; CHECK-NEXT: ret i32 %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/add3.ll b/src/LLVM/test/Transforms/InstCombine/add3.ll
new file mode 100644
index 0000000..53a7b43
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/add3.ll
@@ -0,0 +1,21 @@
+; RUN: opt < %s -instcombine -S | grep inttoptr | count 2
+
+;; Target triple for gep raising case below.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+; PR1795
+define void @test2(i32 %.val24) {
+EntryBlock:
+ add i32 %.val24, -12
+ inttoptr i32 %0 to i32*
+ store i32 1, i32* %1
+ add i32 %.val24, -16
+ inttoptr i32 %2 to i32*
+ getelementptr i32* %3, i32 1
+ load i32* %4
+ tail call i32 @callee( i32 %5 )
+ ret void
+}
+
+declare i32 @callee(i32)
diff --git a/src/LLVM/test/Transforms/InstCombine/addnegneg.ll b/src/LLVM/test/Transforms/InstCombine/addnegneg.ll
new file mode 100644
index 0000000..a3a09f2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/addnegneg.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep { sub } | count 1
+; PR2047
+
+define i32 @l(i32 %a, i32 %b, i32 %c, i32 %d) {
+entry:
+ %b.neg = sub i32 0, %b ; <i32> [#uses=1]
+ %c.neg = sub i32 0, %c ; <i32> [#uses=1]
+ %sub4 = add i32 %c.neg, %b.neg ; <i32> [#uses=1]
+ %sub6 = add i32 %sub4, %d ; <i32> [#uses=1]
+ ret i32 %sub6
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/adjust-for-sminmax.ll b/src/LLVM/test/Transforms/InstCombine/adjust-for-sminmax.ll
new file mode 100644
index 0000000..b9b6f70
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/adjust-for-sminmax.ll
@@ -0,0 +1,85 @@
+; RUN: opt < %s -instcombine -S | grep {icmp s\[lg\]t i32 %n, 0} | count 16
+
+; Instcombine should recognize that this code can be adjusted
+; to fit the canonical smax/smin pattern.
+
+define i32 @floor_a(i32 %n) {
+ %t = icmp sgt i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_a(i32 %n) {
+ %t = icmp slt i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_b(i32 %n) {
+ %t = icmp sgt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_b(i32 %n) {
+ %t = icmp slt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_c(i32 %n) {
+ %t = icmp sge i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_c(i32 %n) {
+ %t = icmp sle i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_d(i32 %n) {
+ %t = icmp sge i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_d(i32 %n) {
+ %t = icmp sle i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_e(i32 %n) {
+ %t = icmp sgt i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_e(i32 %n) {
+ %t = icmp slt i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_f(i32 %n) {
+ %t = icmp sgt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_f(i32 %n) {
+ %t = icmp slt i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_g(i32 %n) {
+ %t = icmp sge i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_g(i32 %n) {
+ %t = icmp sle i32 %n, 0
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @floor_h(i32 %n) {
+ %t = icmp sge i32 %n, 1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
+define i32 @ceil_h(i32 %n) {
+ %t = icmp sle i32 %n, -1
+ %m = select i1 %t, i32 %n, i32 0
+ ret i32 %m
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/align-2d-gep.ll b/src/LLVM/test/Transforms/InstCombine/align-2d-gep.ll
new file mode 100644
index 0000000..eeca5c0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/align-2d-gep.ll
@@ -0,0 +1,44 @@
+; RUN: opt < %s -instcombine -S | grep {align 16} | count 1
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; A multi-dimensional array in a nested loop doing vector stores that
+; aren't yet aligned. Instcombine can understand the addressing in the
+; Nice case to prove 16 byte alignment. In the Awkward case, the inner
+; array dimension is not even, so the stores to it won't always be
+; aligned. Instcombine should prove alignment in exactly one of the two
+; stores.
+
+@Nice = global [1001 x [20000 x double]] zeroinitializer, align 32
+@Awkward = global [1001 x [20001 x double]] zeroinitializer, align 32
+
+define void @foo() nounwind {
+entry:
+ br label %bb7.outer
+
+bb7.outer:
+ %i = phi i64 [ 0, %entry ], [ %indvar.next26, %bb11 ]
+ br label %bb1
+
+bb1:
+ %j = phi i64 [ 0, %bb7.outer ], [ %indvar.next, %bb1 ]
+
+ %t4 = getelementptr [1001 x [20000 x double]]* @Nice, i64 0, i64 %i, i64 %j
+ %q = bitcast double* %t4 to <2 x double>*
+ store <2 x double><double 0.0, double 0.0>, <2 x double>* %q, align 8
+
+ %s4 = getelementptr [1001 x [20001 x double]]* @Awkward, i64 0, i64 %i, i64 %j
+ %r = bitcast double* %s4 to <2 x double>*
+ store <2 x double><double 0.0, double 0.0>, <2 x double>* %r, align 8
+
+ %indvar.next = add i64 %j, 2
+ %exitcond = icmp eq i64 %indvar.next, 557
+ br i1 %exitcond, label %bb11, label %bb1
+
+bb11:
+ %indvar.next26 = add i64 %i, 1
+ %exitcond27 = icmp eq i64 %indvar.next26, 991
+ br i1 %exitcond27, label %return.split, label %bb7.outer
+
+return.split:
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/align-addr.ll b/src/LLVM/test/Transforms/InstCombine/align-addr.ll
new file mode 100644
index 0000000..27916b9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/align-addr.ll
@@ -0,0 +1,60 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; Instcombine should be able to prove vector alignment in the
+; presence of a few mild address computation tricks.
+
+; CHECK: @test0(
+; CHECK: align 16
+
+define void @test0(i8* %b, i64 %n, i64 %u, i64 %y) nounwind {
+entry:
+ %c = ptrtoint i8* %b to i64
+ %d = and i64 %c, -16
+ %e = inttoptr i64 %d to double*
+ %v = mul i64 %u, 2
+ %z = and i64 %y, -2
+ %t1421 = icmp eq i64 %n, 0
+ br i1 %t1421, label %return, label %bb
+
+bb:
+ %i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
+ %j = mul i64 %i, %v
+ %h = add i64 %j, %z
+ %t8 = getelementptr double* %e, i64 %h
+ %p = bitcast double* %t8 to <2 x double>*
+ store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
+ %indvar.next = add i64 %i, 1
+ %exitcond = icmp eq i64 %indvar.next, %n
+ br i1 %exitcond, label %return, label %bb
+
+return:
+ ret void
+}
+
+; When we see a unaligned load from an insufficiently aligned global or
+; alloca, increase the alignment of the load, turning it into an aligned load.
+
+; CHECK: @test1(
+; CHECK: tmp = load
+; CHECK: GLOBAL{{.*}}align 16
+
+@GLOBAL = internal global [4 x i32] zeroinitializer
+
+define <16 x i8> @test1(<2 x i64> %x) {
+entry:
+ %tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
+ ret <16 x i8> %tmp
+}
+
+; When a load or store lacks an explicit alignment, add one.
+
+; CHECK: @test2(
+; CHECK: load double* %p, align 8
+; CHECK: store double %n, double* %p, align 8
+
+define double @test2(double* %p, double %n) nounwind {
+ %t = load double* %p
+ store double %n, double* %p
+ ret double %t
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/align-external.ll b/src/LLVM/test/Transforms/InstCombine/align-external.ll
new file mode 100644
index 0000000..6e8ad87
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/align-external.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Don't assume that external global variables have their preferred
+; alignment. They may only have the ABI minimum alignment.
+
+; CHECK: %s = shl i64 %a, 3
+; CHECK: %r = or i64 %s, ptrtoint (i32* @A to i64)
+; CHECK: %q = add i64 %r, 1
+; CHECK: ret i64 %q
+
+target datalayout = "-i32:8:32"
+
+@A = external global i32
+@B = external global i32
+
+define i64 @foo(i64 %a) {
+ %t = ptrtoint i32* @A to i64
+ %s = shl i64 %a, 3
+ %r = or i64 %t, %s
+ %q = add i64 %r, 1
+ ret i64 %q
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/alloca.ll b/src/LLVM/test/Transforms/InstCombine/alloca.ll
new file mode 100644
index 0000000..9873198
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/alloca.ll
@@ -0,0 +1,46 @@
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; END.
+
+declare void @use(...)
+
+; Zero byte allocas should be deleted.
+; CHECK: @test
+; CHECK-NOT: alloca
+define void @test() {
+ %X = alloca [0 x i32] ; <[0 x i32]*> [#uses=1]
+ call void (...)* @use( [0 x i32]* %X )
+ %Y = alloca i32, i32 0 ; <i32*> [#uses=1]
+ call void (...)* @use( i32* %Y )
+ %Z = alloca { } ; <{ }*> [#uses=1]
+ call void (...)* @use( { }* %Z )
+ ret void
+}
+
+; Zero byte allocas should be deleted.
+; CHECK: @test2
+; CHECK-NOT: alloca
+define void @test2() {
+ %A = alloca i32 ; <i32*> [#uses=1]
+ store i32 123, i32* %A
+ ret void
+}
+
+; Zero byte allocas should be deleted.
+; CHECK: @test3
+; CHECK-NOT: alloca
+define void @test3() {
+ %A = alloca { i32 } ; <{ i32 }*> [#uses=1]
+ %B = getelementptr { i32 }* %A, i32 0, i32 0 ; <i32*> [#uses=1]
+ store i32 123, i32* %B
+ ret void
+}
+
+; CHECK: @test4
+; CHECK: = zext i32 %n to i64
+; CHECK: %A = alloca i32, i64 %
+define i32* @test4(i32 %n) {
+ %A = alloca i32, i32 %n
+ ret i32* %A
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/and-compare.ll b/src/LLVM/test/Transforms/InstCombine/and-compare.ll
new file mode 100644
index 0000000..67bd605
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and-compare.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep and | count 1
+
+; Should be optimized to one and.
+define i1 @test1(i32 %a, i32 %b) {
+ %tmp1 = and i32 %a, 65280 ; <i32> [#uses=1]
+ %tmp3 = and i32 %b, 65280 ; <i32> [#uses=1]
+ %tmp = icmp ne i32 %tmp1, %tmp3 ; <i1> [#uses=1]
+ ret i1 %tmp
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/and-fcmp.ll b/src/LLVM/test/Transforms/InstCombine/and-fcmp.ll
new file mode 100644
index 0000000..91868d1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and-fcmp.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | grep fcmp | count 3
+; RUN: opt < %s -instcombine -S | grep ret | grep 0
+
+define zeroext i8 @t1(float %x, float %y) nounwind {
+ %a = fcmp ueq float %x, %y
+ %b = fcmp ord float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t2(float %x, float %y) nounwind {
+ %a = fcmp olt float %x, %y
+ %b = fcmp ord float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t3(float %x, float %y) nounwind {
+ %a = fcmp oge float %x, %y
+ %b = fcmp uno float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+define zeroext i8 @t4(float %x, float %y) nounwind {
+ %a = fcmp one float %y, %x
+ %b = fcmp ord float %x, %y
+ %c = and i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/and-not-or.ll b/src/LLVM/test/Transforms/InstCombine/and-not-or.ll
new file mode 100644
index 0000000..9dce7b4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and-not-or.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -instcombine -S | grep {and i32 %x, %y} | count 4
+; RUN: opt < %s -instcombine -S | not grep {or}
+
+define i32 @func1(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %n, %x
+ %a = and i32 %o, %y
+ ret i32 %a
+}
+
+define i32 @func2(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %x, %n
+ %a = and i32 %o, %y
+ ret i32 %a
+}
+
+define i32 @func3(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %n, %x
+ %a = and i32 %y, %o
+ ret i32 %a
+}
+
+define i32 @func4(i32 %x, i32 %y) nounwind {
+entry:
+ %n = xor i32 %y, -1
+ %o = or i32 %x, %n
+ %a = and i32 %y, %o
+ ret i32 %a
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/and-or-and.ll b/src/LLVM/test/Transforms/InstCombine/and-or-and.ll
new file mode 100644
index 0000000..36ce672
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and-or-and.ll
@@ -0,0 +1,61 @@
+; If we have an 'and' of the result of an 'or', and one of the 'or' operands
+; cannot have contributed any of the resultant bits, delete the or. This
+; occurs for very common C/C++ code like this:
+;
+; struct foo { int A : 16; int B : 16; };
+; void test(struct foo *F, int X, int Y) {
+; F->A = X; F->B = Y;
+; }
+;
+; Which corresponds to test1.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {or }
+
+define i32 @test1(i32 %X, i32 %Y) {
+ %A = and i32 %X, 7 ; <i32> [#uses=1]
+ %B = and i32 %Y, 8 ; <i32> [#uses=1]
+ %C = or i32 %A, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 7 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test2(i32 %X, i8 %Y) {
+ %B = zext i8 %Y to i32 ; <i32> [#uses=1]
+ %C = or i32 %X, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 65536 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test3(i32 %X, i32 %Y) {
+ %B = shl i32 %Y, 1 ; <i32> [#uses=1]
+ %C = or i32 %X, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 1 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test4(i32 %X, i32 %Y) {
+ %B = lshr i32 %Y, 31 ; <i32> [#uses=1]
+ %C = or i32 %X, %B ; <i32> [#uses=1]
+ ;; This cannot include any bits from %Y!
+ %D = and i32 %C, 2 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @or_test1(i32 %X, i32 %Y) {
+ %A = and i32 %X, 1 ; <i32> [#uses=1]
+ ;; This cannot include any bits from X!
+ %B = or i32 %A, 1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i8 @or_test2(i8 %X, i8 %Y) {
+ %A = shl i8 %X, 7 ; <i8> [#uses=1]
+ ;; This cannot include any bits from X!
+ %B = or i8 %A, -128 ; <i8> [#uses=1]
+ ret i8 %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/and-or-not.ll b/src/LLVM/test/Transforms/InstCombine/and-or-not.ll
new file mode 100644
index 0000000..a57871f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and-or-not.ll
@@ -0,0 +1,46 @@
+; RUN: opt < %s -instcombine -S | grep xor | count 4
+; RUN: opt < %s -instcombine -S | not grep and
+; RUN: opt < %s -instcombine -S | not grep { or}
+
+; PR1510
+
+; These are all equivalent to A^B
+
+define i32 @test1(i32 %a, i32 %b) {
+entry:
+ %tmp3 = or i32 %b, %a ; <i32> [#uses=1]
+ %tmp3not = xor i32 %tmp3, -1 ; <i32> [#uses=1]
+ %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
+ %tmp7 = or i32 %tmp6, %tmp3not ; <i32> [#uses=1]
+ %tmp7not = xor i32 %tmp7, -1 ; <i32> [#uses=1]
+ ret i32 %tmp7not
+}
+
+define i32 @test2(i32 %a, i32 %b) {
+entry:
+ %tmp3 = or i32 %b, %a ; <i32> [#uses=1]
+ %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
+ %tmp6not = xor i32 %tmp6, -1 ; <i32> [#uses=1]
+ %tmp7 = and i32 %tmp3, %tmp6not ; <i32> [#uses=1]
+ ret i32 %tmp7
+}
+
+define <4 x i32> @test3(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %tmp3 = or <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp3not = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp7 = or <4 x i32> %tmp6, %tmp3not ; <<4 x i32>> [#uses=1]
+ %tmp7not = xor <4 x i32> %tmp7, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %tmp7not
+}
+
+define <4 x i32> @test4(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %tmp3 = or <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
+ %tmp6not = xor <4 x i32> %tmp6, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
+ %tmp7 = and <4 x i32> %tmp3, %tmp6not ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %tmp7
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/and-or.ll b/src/LLVM/test/Transforms/InstCombine/and-or.ll
new file mode 100644
index 0000000..b4224b3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and-or.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -instcombine -S | grep {and i32 %a, 1} | count 4
+; RUN: opt < %s -instcombine -S | grep {or i32 %0, %b} | count 4
+
+
+define i32 @func1(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %b, %a ; <i32> [#uses=1]
+ %1 = and i32 %0, 1 ; <i32> [#uses=1]
+ %2 = and i32 %b, -2 ; <i32> [#uses=1]
+ %3 = or i32 %1, %2 ; <i32> [#uses=1]
+ ret i32 %3
+}
+
+define i32 @func2(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %a, %b ; <i32> [#uses=1]
+ %1 = and i32 1, %0 ; <i32> [#uses=1]
+ %2 = and i32 -2, %b ; <i32> [#uses=1]
+ %3 = or i32 %1, %2 ; <i32> [#uses=1]
+ ret i32 %3
+}
+
+define i32 @func3(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %b, %a ; <i32> [#uses=1]
+ %1 = and i32 %0, 1 ; <i32> [#uses=1]
+ %2 = and i32 %b, -2 ; <i32> [#uses=1]
+ %3 = or i32 %2, %1 ; <i32> [#uses=1]
+ ret i32 %3
+}
+
+define i32 @func4(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %0 = or i32 %a, %b ; <i32> [#uses=1]
+ %1 = and i32 1, %0 ; <i32> [#uses=1]
+ %2 = and i32 -2, %b ; <i32> [#uses=1]
+ %3 = or i32 %2, %1 ; <i32> [#uses=1]
+ ret i32 %3
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/and-xor-merge.ll b/src/LLVM/test/Transforms/InstCombine/and-xor-merge.ll
new file mode 100644
index 0000000..3c76838
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and-xor-merge.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | grep and | count 1
+; RUN: opt < %s -instcombine -S | grep xor | count 2
+
+; (x&z) ^ (y&z) -> (x^y)&z
+define i32 @test1(i32 %x, i32 %y, i32 %z) {
+ %tmp3 = and i32 %z, %x
+ %tmp6 = and i32 %z, %y
+ %tmp7 = xor i32 %tmp3, %tmp6
+ ret i32 %tmp7
+}
+
+; (x & y) ^ (x|y) -> x^y
+define i32 @test2(i32 %x, i32 %y, i32 %z) {
+ %tmp3 = and i32 %y, %x
+ %tmp6 = or i32 %y, %x
+ %tmp7 = xor i32 %tmp3, %tmp6
+ ret i32 %tmp7
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/and.ll b/src/LLVM/test/Transforms/InstCombine/and.ll
new file mode 100644
index 0000000..1702e63
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and.ll
@@ -0,0 +1,255 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+
+; RUN: opt < %s -instcombine -S | not grep and
+
+define i32 @test1(i32 %A) {
+ ; zero result
+ %B = and i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+ ; noop
+ %B = and i32 %A, -1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i1 @test3(i1 %A) {
+ ; always = false
+ %B = and i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test4(i1 %A) {
+ ; noop
+ %B = and i1 %A, true ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i32 @test5(i32 %A) {
+ %B = and i32 %A, %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i1 @test6(i1 %A) {
+ %B = and i1 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+; A & ~A == 0
+define i32 @test7(i32 %A) {
+ %NotA = xor i32 %A, -1 ; <i32> [#uses=1]
+ %B = and i32 %A, %NotA ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+; AND associates
+define i8 @test8(i8 %A) {
+ %B = and i8 %A, 3 ; <i8> [#uses=1]
+ %C = and i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i1 @test9(i32 %A) {
+ ; Test of sign bit, convert to setle %A, 0
+ %B = and i32 %A, -2147483648 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test9a(i32 %A) {
+ ; Test of sign bit, convert to setle %A, 0
+ %B = and i32 %A, -2147483648 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i32 @test10(i32 %A) {
+ %B = and i32 %A, 12 ; <i32> [#uses=1]
+ %C = xor i32 %B, 15 ; <i32> [#uses=1]
+ ; (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
+ %D = and i32 %C, 1 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test11(i32 %A, i32* %P) {
+ %B = or i32 %A, 3 ; <i32> [#uses=1]
+ %C = xor i32 %B, 12 ; <i32> [#uses=2]
+ ; additional use of C
+ store i32 %C, i32* %P
+ ; %C = and uint %B, 3 --> 3
+ %D = and i32 %C, 3 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i1 @test12(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B ; <i1> [#uses=1]
+ %C2 = icmp ule i32 %A, %B ; <i1> [#uses=1]
+ ; (A < B) & (A <= B) === (A < B)
+ %D = and i1 %C1, %C2 ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test13(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B ; <i1> [#uses=1]
+ %C2 = icmp ugt i32 %A, %B ; <i1> [#uses=1]
+ ; (A < B) & (A > B) === false
+ %D = and i1 %C1, %C2 ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test14(i8 %A) {
+ %B = and i8 %A, -128 ; <i8> [#uses=1]
+ %C = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i8 @test15(i8 %A) {
+ %B = lshr i8 %A, 7 ; <i8> [#uses=1]
+ ; Always equals zero
+ %C = and i8 %B, 2 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test16(i8 %A) {
+ %B = shl i8 %A, 2 ; <i8> [#uses=1]
+ %C = and i8 %B, 3 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; ~(~X & Y) --> (X | ~Y)
+define i8 @test17(i8 %X, i8 %Y) {
+ %B = xor i8 %X, -1 ; <i8> [#uses=1]
+ %C = and i8 %B, %Y ; <i8> [#uses=1]
+ %D = xor i8 %C, -1 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
+define i1 @test18(i32 %A) {
+ %B = and i32 %A, -128 ; <i32> [#uses=1]
+ ;; C >= 128
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test18a(i8 %A) {
+ %B = and i8 %A, -2 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i32 @test19(i32 %A) {
+ %B = shl i32 %A, 3 ; <i32> [#uses=1]
+ ;; Clearing a zero bit
+ %C = and i32 %B, -2 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i8 @test20(i8 %A) {
+ %C = lshr i8 %A, 7 ; <i8> [#uses=1]
+ ;; Unneeded
+ %D = and i8 %C, 1 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
+define i1 @test22(i32 %A) {
+ %B = icmp eq i32 %A, 1 ; <i1> [#uses=1]
+ %C = icmp sge i32 %A, 3 ; <i1> [#uses=1]
+ ;; false
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test23(i32 %A) {
+ %B = icmp sgt i32 %A, 1 ; <i1> [#uses=1]
+ %C = icmp sle i32 %A, 2 ; <i1> [#uses=1]
+ ;; A == 2
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test24(i32 %A) {
+ %B = icmp sgt i32 %A, 1 ; <i1> [#uses=1]
+ %C = icmp ne i32 %A, 2 ; <i1> [#uses=1]
+ ;; A > 2
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test25(i32 %A) {
+ %B = icmp sge i32 %A, 50 ; <i1> [#uses=1]
+ %C = icmp slt i32 %A, 100 ; <i1> [#uses=1]
+ ;; (A-50) <u 50
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test26(i32 %A) {
+ %B = icmp ne i32 %A, 50 ; <i1> [#uses=1]
+ %C = icmp ne i32 %A, 51 ; <i1> [#uses=1]
+ ;; (A-50) > 1
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i8 @test27(i8 %A) {
+ %B = and i8 %A, 4 ; <i8> [#uses=1]
+ %C = sub i8 %B, 16 ; <i8> [#uses=1]
+ ;; 0xF0
+ %D = and i8 %C, -16 ; <i8> [#uses=1]
+ %E = add i8 %D, 16 ; <i8> [#uses=1]
+ ret i8 %E
+}
+
+;; This is juse a zero extending shr.
+define i32 @test28(i32 %X) {
+ ;; Sign extend
+ %Y = ashr i32 %X, 24 ; <i32> [#uses=1]
+ ;; Mask out sign bits
+ %Z = and i32 %Y, 255 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test29(i8 %X) {
+ %Y = zext i8 %X to i32 ; <i32> [#uses=1]
+ ;; Zero extend makes this unneeded.
+ %Z = and i32 %Y, 255 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test30(i1 %X) {
+ %Y = zext i1 %X to i32 ; <i32> [#uses=1]
+ %Z = and i32 %Y, 1 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test31(i1 %X) {
+ %Y = zext i1 %X to i32 ; <i32> [#uses=1]
+ %Z = shl i32 %Y, 4 ; <i32> [#uses=1]
+ %A = and i32 %Z, 16 ; <i32> [#uses=1]
+ ret i32 %A
+}
+
+define i32 @test32(i32 %In) {
+ %Y = and i32 %In, 16 ; <i32> [#uses=1]
+ %Z = lshr i32 %Y, 2 ; <i32> [#uses=1]
+ %A = and i32 %Z, 1 ; <i32> [#uses=1]
+ ret i32 %A
+}
+
+;; Code corresponding to one-bit bitfield ^1.
+define i32 @test33(i32 %b) {
+ %tmp.4.mask = and i32 %b, 1 ; <i32> [#uses=1]
+ %tmp.10 = xor i32 %tmp.4.mask, 1 ; <i32> [#uses=1]
+ %tmp.12 = and i32 %b, -2 ; <i32> [#uses=1]
+ %tmp.13 = or i32 %tmp.12, %tmp.10 ; <i32> [#uses=1]
+ ret i32 %tmp.13
+}
+
+define i32 @test34(i32 %A, i32 %B) {
+ %tmp.2 = or i32 %B, %A ; <i32> [#uses=1]
+ %tmp.4 = and i32 %tmp.2, %B ; <i32> [#uses=1]
+ ret i32 %tmp.4
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/and2.ll b/src/LLVM/test/Transforms/InstCombine/and2.ll
new file mode 100644
index 0000000..531aedb
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/and2.ll
@@ -0,0 +1,44 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; PR1738
+define i1 @test1(double %X, double %Y) {
+ %tmp9 = fcmp ord double %X, 0.000000e+00
+ %tmp13 = fcmp ord double %Y, 0.000000e+00
+ %bothcond = and i1 %tmp13, %tmp9
+ ret i1 %bothcond
+; CHECK: fcmp ord double %Y, %X
+}
+
+define i1 @test2(i1 %X, i1 %Y) {
+ %a = and i1 %X, %Y
+ %b = and i1 %a, %X
+ ret i1 %b
+; CHECK: @test2
+; CHECK-NEXT: and i1 %X, %Y
+; CHECK-NEXT: ret
+}
+
+define i32 @test3(i32 %X, i32 %Y) {
+ %a = and i32 %X, %Y
+ %b = and i32 %Y, %a
+ ret i32 %b
+; CHECK: @test3
+; CHECK-NEXT: and i32 %X, %Y
+; CHECK-NEXT: ret
+}
+
+define i1 @test4(i32 %X) {
+ %a = icmp ult i32 %X, 31
+ %b = icmp slt i32 %X, 0
+ %c = and i1 %a, %b
+ ret i1 %c
+; CHECK: @test4
+; CHECK-NEXT: ret i1 false
+}
+
+; Make sure we don't go into an infinite loop with this test
+define <4 x i32> @test5(<4 x i32> %A) {
+ %1 = xor <4 x i32> %A, <i32 1, i32 2, i32 3, i32 4>
+ %2 = and <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %1
+ ret <4 x i32> %2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-add1.ll b/src/LLVM/test/Transforms/InstCombine/apint-add1.ll
new file mode 100644
index 0000000..734e15d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-add1.ll
@@ -0,0 +1,34 @@
+; This test makes sure that add instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v OK | not grep add
+
+
+define i1 @test1(i1 %x) {
+ %tmp.2 = xor i1 %x, 1
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i1 %tmp.2, 1
+ ret i1 %tmp.4
+}
+
+define i47 @test2(i47 %x) {
+ %tmp.2 = xor i47 %x, 70368744177664
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i47 %tmp.2, 70368744177664
+ ret i47 %tmp.4
+}
+
+define i15 @test3(i15 %x) {
+ %tmp.2 = xor i15 %x, 16384
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i15 %tmp.2, 16384
+ ret i15 %tmp.4
+}
+
+define i49 @test6(i49 %x) {
+ ;; (x & 254)+1 -> (x & 254)|1
+ %tmp.2 = and i49 %x, 562949953421310
+ %tmp.4 = add i49 %tmp.2, 1
+ ret i49 %tmp.4
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-add2.ll b/src/LLVM/test/Transforms/InstCombine/apint-add2.ll
new file mode 100644
index 0000000..3de13ac
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-add2.ll
@@ -0,0 +1,46 @@
+; This test makes sure that add instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v OK | not grep add
+; END.
+
+define i111 @test1(i111 %x) {
+ %tmp.2 = shl i111 1, 110
+ %tmp.4 = xor i111 %x, %tmp.2
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.6 = add i111 %tmp.4, %tmp.2
+ ret i111 %tmp.6
+}
+
+define i65 @test2(i65 %x) {
+ %tmp.0 = shl i65 1, 64
+ %tmp.2 = xor i65 %x, %tmp.0
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i65 %tmp.2, %tmp.0
+ ret i65 %tmp.4
+}
+
+define i1024 @test3(i1024 %x) {
+ %tmp.0 = shl i1024 1, 1023
+ %tmp.2 = xor i1024 %x, %tmp.0
+ ;; Add of sign bit -> xor of sign bit.
+ %tmp.4 = add i1024 %tmp.2, %tmp.0
+ ret i1024 %tmp.4
+}
+
+define i128 @test4(i128 %x) {
+ ;; If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
+ %tmp.5 = shl i128 1, 127
+ %tmp.1 = ashr i128 %tmp.5, 120
+ %tmp.2 = xor i128 %x, %tmp.1
+ %tmp.4 = add i128 %tmp.2, %tmp.5
+ ret i128 %tmp.4
+}
+
+define i77 @test6(i77 %x) {
+ ;; (x & 254)+1 -> (x & 254)|1
+ %tmp.2 = and i77 %x, 562949953421310
+ %tmp.4 = add i77 %tmp.2, 1
+ ret i77 %tmp.4
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-and-compare.ll b/src/LLVM/test/Transforms/InstCombine/apint-and-compare.ll
new file mode 100644
index 0000000..143327d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-and-compare.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | grep and | count 2
+
+; Should be optimized to one and.
+define i1 @test1(i33 %a, i33 %b) {
+ %tmp1 = and i33 %a, 65280
+ %tmp3 = and i33 %b, 65280
+ %tmp = icmp ne i33 %tmp1, %tmp3
+ ret i1 %tmp
+}
+
+define i1 @test2(i999 %a, i999 %b) {
+ %tmp1 = and i999 %a, 65280
+ %tmp3 = and i999 %b, 65280
+ %tmp = icmp ne i999 %tmp1, %tmp3
+ ret i1 %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-and-or-and.ll b/src/LLVM/test/Transforms/InstCombine/apint-and-or-and.ll
new file mode 100644
index 0000000..6a9d4b2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-and-or-and.ll
@@ -0,0 +1,50 @@
+; If we have an 'and' of the result of an 'or', and one of the 'or' operands
+; cannot have contributed any of the resultant bits, delete the or. This
+; occurs for very common C/C++ code like this:
+;
+; struct foo { int A : 16; int B : 16; };
+; void test(struct foo *F, int X, int Y) {
+; F->A = X; F->B = Y;
+; }
+;
+; Which corresponds to test1.
+;
+; This tests arbitrary precision integers.
+
+; RUN: opt < %s -instcombine -S | not grep {or }
+; END.
+
+define i17 @test1(i17 %X, i17 %Y) {
+ %A = and i17 %X, 7
+ %B = and i17 %Y, 8
+ %C = or i17 %A, %B
+ %D = and i17 %C, 7 ;; This cannot include any bits from %Y!
+ ret i17 %D
+}
+
+define i49 @test3(i49 %X, i49 %Y) {
+ %B = shl i49 %Y, 1
+ %C = or i49 %X, %B
+ %D = and i49 %C, 1 ;; This cannot include any bits from %Y!
+ ret i49 %D
+}
+
+define i67 @test4(i67 %X, i67 %Y) {
+ %B = lshr i67 %Y, 66
+ %C = or i67 %X, %B
+ %D = and i67 %C, 2 ;; This cannot include any bits from %Y!
+ ret i67 %D
+}
+
+define i231 @or_test1(i231 %X, i231 %Y) {
+ %A = and i231 %X, 1
+ %B = or i231 %A, 1 ;; This cannot include any bits from X!
+ ret i231 %B
+}
+
+define i7 @or_test2(i7 %X, i7 %Y) {
+ %A = shl i7 %X, 6
+ %B = or i7 %A, 64 ;; This cannot include any bits from X!
+ ret i7 %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-and-xor-merge.ll b/src/LLVM/test/Transforms/InstCombine/apint-and-xor-merge.ll
new file mode 100644
index 0000000..cac5119
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-and-xor-merge.ll
@@ -0,0 +1,22 @@
+; This test case checks that the merge of and/xor can work on arbitrary
+; precision integers.
+
+; RUN: opt < %s -instcombine -S | grep and | count 1
+; RUN: opt < %s -instcombine -S | grep xor | count 2
+
+; (x &z ) ^ (y & z) -> (x ^ y) & z
+define i57 @test1(i57 %x, i57 %y, i57 %z) {
+ %tmp3 = and i57 %z, %x
+ %tmp6 = and i57 %z, %y
+ %tmp7 = xor i57 %tmp3, %tmp6
+ ret i57 %tmp7
+}
+
+; (x & y) ^ (x | y) -> x ^ y
+define i23 @test2(i23 %x, i23 %y, i23 %z) {
+ %tmp3 = and i23 %y, %x
+ %tmp6 = or i23 %y, %x
+ %tmp7 = xor i23 %tmp3, %tmp6
+ ret i23 %tmp7
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-and1.ll b/src/LLVM/test/Transforms/InstCombine/apint-and1.ll
new file mode 100644
index 0000000..7f7bc23
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-and1.ll
@@ -0,0 +1,57 @@
+; This test makes sure that and instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: opt < %s -instcombine -S | not grep {and }
+; END.
+
+define i39 @test0(i39 %A) {
+ %B = and i39 %A, 0 ; zero result
+ ret i39 %B
+}
+
+define i47 @test1(i47 %A, i47 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i47 %A, -1
+ %NotB = xor i47 %B, -1
+ %C1 = and i47 %NotA, %NotB
+ ret i47 %C1
+}
+
+define i15 @test2(i15 %x) {
+ %tmp.2 = and i15 %x, -1 ; noop
+ ret i15 %tmp.2
+}
+
+define i23 @test3(i23 %x) {
+ %tmp.0 = and i23 %x, 127
+ %tmp.2 = and i23 %tmp.0, 128
+ ret i23 %tmp.2
+}
+
+define i1 @test4(i37 %x) {
+ %A = and i37 %x, -2147483648
+ %B = icmp ne i37 %A, 0
+ ret i1 %B
+}
+
+define i7 @test5(i7 %A, i7* %P) {
+ %B = or i7 %A, 3
+ %C = xor i7 %B, 12
+ store i7 %C, i7* %P
+ %r = and i7 %C, 3
+ ret i7 %r
+}
+
+define i7 @test6(i7 %A, i7 %B) {
+ ;; ~(~X & Y) --> (X | ~Y)
+ %t0 = xor i7 %A, -1
+ %t1 = and i7 %t0, %B
+ %r = xor i7 %t1, -1
+ ret i7 %r
+}
+
+define i47 @test7(i47 %A) {
+ %X = ashr i47 %A, 39 ;; sign extend
+ %C1 = and i47 %X, 255
+ ret i47 %C1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-and2.ll b/src/LLVM/test/Transforms/InstCombine/apint-and2.ll
new file mode 100644
index 0000000..145f81a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-and2.ll
@@ -0,0 +1,82 @@
+; This test makes sure that and instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: opt < %s -instcombine -S | not grep {and }
+; END.
+
+
+define i999 @test0(i999 %A) {
+ %B = and i999 %A, 0 ; zero result
+ ret i999 %B
+}
+
+define i477 @test1(i477 %A, i477 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i477 %A, -1
+ %NotB = xor i477 %B, -1
+ %C1 = and i477 %NotA, %NotB
+ ret i477 %C1
+}
+
+define i129 @tst(i129 %A, i129 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i129 %A, -1
+ %NotB = xor i129 %B, -1
+ %C1 = and i129 %NotA, %NotB
+ ret i129 %C1
+}
+
+define i65 @test(i65 %A, i65 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i65 %A, -1
+ %NotB = xor i65 -1, %B
+ %C1 = and i65 %NotA, %NotB
+ ret i65 %C1
+}
+
+define i66 @tes(i66 %A, i66 %B) {
+ ;; (~A & ~B) == (~(A | B)) - De Morgan's Law
+ %NotA = xor i66 %A, -1
+ %NotB = xor i66 %B, -1
+ %C1 = and i66 %NotA, %NotB
+ ret i66 %C1
+}
+
+define i1005 @test2(i1005 %x) {
+ %tmp.2 = and i1005 %x, -1 ; noop
+ ret i1005 %tmp.2
+}
+
+define i123 @test3(i123 %x) {
+ %tmp.0 = and i123 %x, 127
+ %tmp.2 = and i123 %tmp.0, 128
+ ret i123 %tmp.2
+}
+
+define i1 @test4(i737 %x) {
+ %A = and i737 %x, -2147483648
+ %B = icmp ne i737 %A, 0
+ ret i1 %B
+}
+
+define i117 @test5(i117 %A, i117* %P) {
+ %B = or i117 %A, 3
+ %C = xor i117 %B, 12
+ store i117 %C, i117* %P
+ %r = and i117 %C, 3
+ ret i117 %r
+}
+
+define i117 @test6(i117 %A, i117 %B) {
+ ;; ~(~X & Y) --> (X | ~Y)
+ %t0 = xor i117 %A, -1
+ %t1 = and i117 %t0, %B
+ %r = xor i117 %t1, -1
+ ret i117 %r
+}
+
+define i1024 @test7(i1024 %A) {
+ %X = ashr i1024 %A, 1016 ;; sign extend
+ %C1 = and i1024 %X, 255
+ ret i1024 %C1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-call-cast-target.ll b/src/LLVM/test/Transforms/InstCombine/apint-call-cast-target.ll
new file mode 100644
index 0000000..72f7570
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-call-cast-target.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | grep call | not grep bitcast
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+
+
+define i32 @main() {
+entry:
+ %tmp = call i32 bitcast (i7* (i999*)* @ctime to i32 (i99*)*)( i99* null )
+ ret i32 %tmp
+}
+
+define i7* @ctime(i999*) {
+entry:
+ %tmp = call i7* bitcast (i32 ()* @main to i7* ()*)( )
+ ret i7* %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-cast-and-cast.ll b/src/LLVM/test/Transforms/InstCombine/apint-cast-and-cast.ll
new file mode 100644
index 0000000..fe87928
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-cast-and-cast.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+
+define i19 @test1(i43 %val) {
+ %t1 = bitcast i43 %val to i43
+ %t2 = and i43 %t1, 1
+ %t3 = trunc i43 %t2 to i19
+ ret i19 %t3
+}
+
+define i73 @test2(i677 %val) {
+ %t1 = bitcast i677 %val to i677
+ %t2 = and i677 %t1, 1
+ %t3 = trunc i677 %t2 to i73
+ ret i73 %t3
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-cast-cast-to-and.ll b/src/LLVM/test/Transforms/InstCombine/apint-cast-cast-to-and.ll
new file mode 100644
index 0000000..9194970
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-cast-cast-to-and.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep i41
+
+define i61 @test1(i61 %X) {
+ %Y = trunc i61 %X to i41 ;; Turn i61o an AND
+ %Z = zext i41 %Y to i61
+ ret i61 %Z
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-cast.ll b/src/LLVM/test/Transforms/InstCombine/apint-cast.ll
new file mode 100644
index 0000000..161a2c5
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-cast.ll
@@ -0,0 +1,30 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+define i17 @test1(i17 %a) {
+ %tmp = zext i17 %a to i37 ; <i37> [#uses=2]
+ %tmp21 = lshr i37 %tmp, 8 ; <i37> [#uses=1]
+; CHECK: %tmp21 = lshr i17 %a, 8
+ %tmp5 = shl i37 %tmp, 8 ; <i37> [#uses=1]
+; CHECK: %tmp5 = shl i17 %a, 8
+ %tmp.upgrd.32 = or i37 %tmp21, %tmp5 ; <i37> [#uses=1]
+; CHECK: %tmp.upgrd.32 = or i17 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i37 %tmp.upgrd.32 to i17 ; <i17> [#uses=1]
+ ret i17 %tmp.upgrd.3
+; CHECK: ret i17 %tmp.upgrd.32
+}
+
+define i167 @test2(i167 %a) {
+ %tmp = zext i167 %a to i577 ; <i577> [#uses=2]
+ %tmp21 = lshr i577 %tmp, 9 ; <i577> [#uses=1]
+; CHECK: %tmp21 = lshr i167 %a, 9
+ %tmp5 = shl i577 %tmp, 8 ; <i577> [#uses=1]
+; CHECK: %tmp5 = shl i167 %a, 8
+ %tmp.upgrd.32 = or i577 %tmp21, %tmp5 ; <i577> [#uses=1]
+; CHECK: %tmp.upgrd.32 = or i167 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i577 %tmp.upgrd.32 to i167 ; <i167> [#uses=1]
+ ret i167 %tmp.upgrd.3
+; CHECK: ret i167 %tmp.upgrd.32
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-div1.ll b/src/LLVM/test/Transforms/InstCombine/apint-div1.ll
new file mode 100644
index 0000000..f015c89
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-div1.ll
@@ -0,0 +1,22 @@
+; This test makes sure that div instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+; RUN: opt < %s -instcombine -S | not grep div
+
+
+define i33 @test1(i33 %X) {
+ %Y = udiv i33 %X, 4096
+ ret i33 %Y
+}
+
+define i49 @test2(i49 %X) {
+ %tmp.0 = shl i49 4096, 17
+ %Y = udiv i49 %X, %tmp.0
+ ret i49 %Y
+}
+
+define i59 @test3(i59 %X, i1 %C) {
+ %V = select i1 %C, i59 1024, i59 4096
+ %R = udiv i59 %X, %V
+ ret i59 %R
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-div2.ll b/src/LLVM/test/Transforms/InstCombine/apint-div2.ll
new file mode 100644
index 0000000..61a5ae8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-div2.ll
@@ -0,0 +1,22 @@
+; This test makes sure that div instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth <= 1024.
+;
+; RUN: opt < %s -instcombine -S | not grep div
+
+
+define i333 @test1(i333 %X) {
+ %Y = udiv i333 %X, 70368744177664
+ ret i333 %Y
+}
+
+define i499 @test2(i499 %X) {
+ %tmp.0 = shl i499 4096, 197
+ %Y = udiv i499 %X, %tmp.0
+ ret i499 %Y
+}
+
+define i599 @test3(i599 %X, i1 %C) {
+ %V = select i1 %C, i599 70368744177664, i599 4096
+ %R = udiv i599 %X, %V
+ ret i599 %R
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-mul1.ll b/src/LLVM/test/Transforms/InstCombine/apint-mul1.ll
new file mode 100644
index 0000000..7516d1f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-mul1.ll
@@ -0,0 +1,11 @@
+; This test makes sure that mul instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+
+; RUN: opt < %s -instcombine -S | not grep mul
+
+
+define i17 @test1(i17 %X) {
+ %Y = mul i17 %X, 1024
+ ret i17 %Y
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-mul2.ll b/src/LLVM/test/Transforms/InstCombine/apint-mul2.ll
new file mode 100644
index 0000000..e495f45
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-mul2.ll
@@ -0,0 +1,12 @@
+; This test makes sure that mul instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth % 2 >= 1024.
+;
+
+; RUN: opt < %s -instcombine -S | not grep mul
+
+
+define i177 @test1(i177 %X) {
+ %C = shl i177 1, 155
+ %Y = mul i177 %X, %C
+ ret i177 %Y
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-not.ll b/src/LLVM/test/Transforms/InstCombine/apint-not.ll
new file mode 100644
index 0000000..aabac21
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-not.ll
@@ -0,0 +1,42 @@
+; This test makes sure that the xor instructions are properly eliminated
+; when arbitrary precision integers are used.
+
+; RUN: opt < %s -instcombine -S | not grep xor
+
+define i33 @test1(i33 %A) {
+ %B = xor i33 %A, -1
+ %C = xor i33 %B, -1
+ ret i33 %C
+}
+
+define i1 @test2(i52 %A, i52 %B) {
+ %cond = icmp ule i52 %A, %B ; Can change into uge
+ %Ret = xor i1 %cond, true
+ ret i1 %Ret
+}
+
+; Test that demorgans law can be instcombined
+define i47 @test3(i47 %A, i47 %B) {
+ %a = xor i47 %A, -1
+ %b = xor i47 %B, -1
+ %c = and i47 %a, %b
+ %d = xor i47 %c, -1
+ ret i47 %d
+}
+
+; Test that demorgens law can work with constants
+define i61 @test4(i61 %A, i61 %B) {
+ %a = xor i61 %A, -1
+ %c = and i61 %a, 5 ; 5 = ~c2
+ %d = xor i61 %c, -1
+ ret i61 %d
+}
+
+; test the mirror of demorgans law...
+define i71 @test5(i71 %A, i71 %B) {
+ %a = xor i71 %A, -1
+ %b = xor i71 %B, -1
+ %c = or i71 %a, %b
+ %d = xor i71 %c, -1
+ ret i71 %d
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-or1.ll b/src/LLVM/test/Transforms/InstCombine/apint-or1.ll
new file mode 100644
index 0000000..f4b37da
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-or1.ll
@@ -0,0 +1,36 @@
+; This test makes sure that or instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
+;
+
+; RUN: opt < %s -instcombine -S | not grep or
+
+
+define i7 @test0(i7 %X) {
+ %Y = or i7 %X, 0
+ ret i7 %Y
+}
+
+define i17 @test1(i17 %X) {
+ %Y = or i17 %X, -1
+ ret i17 %Y
+}
+
+define i23 @test2(i23 %A) {
+ ;; A | ~A == -1
+ %NotA = xor i23 -1, %A
+ %B = or i23 %A, %NotA
+ ret i23 %B
+}
+
+define i39 @test3(i39 %V, i39 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i39 274877906943, -1 ;; C2 = 274877906943
+ %N = and i39 %M, 274877906944
+ %A = add i39 %V, %N
+ %B = and i39 %A, %C1
+ %D = and i39 %V, 274877906943
+ %R = or i39 %B, %D
+ ret i39 %R
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-or2.ll b/src/LLVM/test/Transforms/InstCombine/apint-or2.ll
new file mode 100644
index 0000000..702776f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-or2.ll
@@ -0,0 +1,35 @@
+; This test makes sure that or instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+;
+; RUN: opt < %s -instcombine -S | not grep or
+
+
+define i777 @test0(i777 %X) {
+ %Y = or i777 %X, 0
+ ret i777 %Y
+}
+
+define i117 @test1(i117 %X) {
+ %Y = or i117 %X, -1
+ ret i117 %Y
+}
+
+define i1023 @test2(i1023 %A) {
+ ;; A | ~A == -1
+ %NotA = xor i1023 -1, %A
+ %B = or i1023 %A, %NotA
+ ret i1023 %B
+}
+
+define i399 @test3(i399 %V, i399 %M) {
+ ;; If we have: ((V + N) & C1) | (V & C2)
+ ;; .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
+ ;; replace with V+N.
+ %C1 = xor i399 274877906943, -1 ;; C2 = 274877906943
+ %N = and i399 %M, 18446742974197923840
+ %A = add i399 %V, %N
+ %B = and i399 %A, %C1
+ %D = and i399 %V, 274877906943
+ %R = or i399 %B, %D
+ ret i399 %R
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-rem1.ll b/src/LLVM/test/Transforms/InstCombine/apint-rem1.ll
new file mode 100644
index 0000000..245d860
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-rem1.ll
@@ -0,0 +1,22 @@
+; This test makes sure that these instructions are properly eliminated.
+; This test is for Integer BitWidth < 64 && BitWidth % 2 != 0.
+;
+; RUN: opt < %s -instcombine -S | not grep rem
+
+
+define i33 @test1(i33 %A) {
+ %B = urem i33 %A, 4096
+ ret i33 %B
+}
+
+define i49 @test2(i49 %A) {
+ %B = shl i49 4096, 11
+ %Y = urem i49 %A, %B
+ ret i49 %Y
+}
+
+define i59 @test3(i59 %X, i1 %C) {
+ %V = select i1 %C, i59 70368744177664, i59 4096
+ %R = urem i59 %X, %V
+ ret i59 %R
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-rem2.ll b/src/LLVM/test/Transforms/InstCombine/apint-rem2.ll
new file mode 100644
index 0000000..72807b9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-rem2.ll
@@ -0,0 +1,22 @@
+; This test makes sure that these instructions are properly eliminated.
+; This test is for Integer BitWidth >= 64 && BitWidth <= 1024.
+;
+; RUN: opt < %s -instcombine -S | not grep rem
+
+
+define i333 @test1(i333 %A) {
+ %B = urem i333 %A, 70368744177664
+ ret i333 %B
+}
+
+define i499 @test2(i499 %A) {
+ %B = shl i499 4096, 111
+ %Y = urem i499 %A, %B
+ ret i499 %Y
+}
+
+define i599 @test3(i599 %X, i1 %C) {
+ %V = select i1 %C, i599 70368744177664, i599 4096
+ %R = urem i599 %X, %V
+ ret i599 %R
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-select.ll b/src/LLVM/test/Transforms/InstCombine/apint-select.ll
new file mode 100644
index 0000000..2802b53
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-select.ll
@@ -0,0 +1,44 @@
+; This test makes sure that these instructions are properly eliminated.
+
+; RUN: opt < %s -instcombine -S | not grep select
+
+
+define i41 @test1(i1 %C) {
+ %V = select i1 %C, i41 1, i41 0 ; V = C
+ ret i41 %V
+}
+
+define i999 @test2(i1 %C) {
+ %V = select i1 %C, i999 0, i999 1 ; V = C
+ ret i999 %V
+}
+
+define i41 @test3(i41 %X) {
+ ;; (x <s 0) ? -1 : 0 -> ashr x, 31
+ %t = icmp slt i41 %X, 0
+ %V = select i1 %t, i41 -1, i41 0
+ ret i41 %V
+}
+
+define i1023 @test4(i1023 %X) {
+ ;; (x <s 0) ? -1 : 0 -> ashr x, 31
+ %t = icmp slt i1023 %X, 0
+ %V = select i1 %t, i1023 -1, i1023 0
+ ret i1023 %V
+}
+
+define i41 @test5(i41 %X) {
+ ;; ((X & 27) ? 27 : 0)
+ %Y = and i41 %X, 32
+ %t = icmp ne i41 %Y, 0
+ %V = select i1 %t, i41 32, i41 0
+ ret i41 %V
+}
+
+define i1023 @test6(i1023 %X) {
+ ;; ((X & 27) ? 27 : 0)
+ %Y = and i1023 %X, 64
+ %t = icmp ne i1023 %Y, 0
+ %V = select i1 %t, i1023 64, i1023 0
+ ret i1023 %V
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-shift-simplify.ll b/src/LLVM/test/Transforms/InstCombine/apint-shift-simplify.ll
new file mode 100644
index 0000000..f9e3af7
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-shift-simplify.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: egrep {shl|lshr|ashr} | count 3
+
+define i41 @test0(i41 %A, i41 %B, i41 %C) {
+ %X = shl i41 %A, %C
+ %Y = shl i41 %B, %C
+ %Z = and i41 %X, %Y
+ ret i41 %Z
+}
+
+define i57 @test1(i57 %A, i57 %B, i57 %C) {
+ %X = lshr i57 %A, %C
+ %Y = lshr i57 %B, %C
+ %Z = or i57 %X, %Y
+ ret i57 %Z
+}
+
+define i49 @test2(i49 %A, i49 %B, i49 %C) {
+ %X = ashr i49 %A, %C
+ %Y = ashr i49 %B, %C
+ %Z = xor i49 %X, %Y
+ ret i49 %Z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-shift.ll b/src/LLVM/test/Transforms/InstCombine/apint-shift.ll
new file mode 100644
index 0000000..4f4f1dc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-shift.ll
@@ -0,0 +1,184 @@
+; This test makes sure that shit instructions are properly eliminated
+; even with arbitrary precision integers.
+; RUN: opt < %s -instcombine -S | not grep sh
+; END.
+
+define i47 @test1(i47 %A) {
+ %B = shl i47 %A, 0 ; <i47> [#uses=1]
+ ret i47 %B
+}
+
+define i41 @test2(i7 %X) {
+ %A = zext i7 %X to i41 ; <i41> [#uses=1]
+ %B = shl i41 0, %A ; <i41> [#uses=1]
+ ret i41 %B
+}
+
+define i41 @test3(i41 %A) {
+ %B = ashr i41 %A, 0 ; <i41> [#uses=1]
+ ret i41 %B
+}
+
+define i39 @test4(i7 %X) {
+ %A = zext i7 %X to i39 ; <i39> [#uses=1]
+ %B = ashr i39 0, %A ; <i39> [#uses=1]
+ ret i39 %B
+}
+
+define i55 @test5(i55 %A) {
+ %B = lshr i55 %A, 55 ; <i55> [#uses=1]
+ ret i55 %B
+}
+
+define i32 @test5a(i32 %A) {
+ %B = shl i32 %A, 32 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i55 @test6(i55 %A) {
+ %B = shl i55 %A, 1 ; <i55> [#uses=1]
+ %C = mul i55 %B, 3 ; <i55> [#uses=1]
+ ret i55 %C
+}
+
+define i29 @test7(i8 %X) {
+ %A = zext i8 %X to i29 ; <i29> [#uses=1]
+ %B = ashr i29 -1, %A ; <i29> [#uses=1]
+ ret i29 %B
+}
+
+define i7 @test8(i7 %A) {
+ %B = shl i7 %A, 4 ; <i7> [#uses=1]
+ %C = shl i7 %B, 3 ; <i7> [#uses=1]
+ ret i7 %C
+}
+
+define i17 @test9(i17 %A) {
+ %B = shl i17 %A, 16 ; <i17> [#uses=1]
+ %C = lshr i17 %B, 16 ; <i17> [#uses=1]
+ ret i17 %C
+}
+
+define i19 @test10(i19 %A) {
+ %B = lshr i19 %A, 18 ; <i19> [#uses=1]
+ %C = shl i19 %B, 18 ; <i19> [#uses=1]
+ ret i19 %C
+}
+
+define i23 @test11(i23 %A) {
+ %a = mul i23 %A, 3 ; <i23> [#uses=1]
+ %B = lshr i23 %a, 11 ; <i23> [#uses=1]
+ %C = shl i23 %B, 12 ; <i23> [#uses=1]
+ ret i23 %C
+}
+
+define i47 @test12(i47 %A) {
+ %B = ashr i47 %A, 8 ; <i47> [#uses=1]
+ %C = shl i47 %B, 8 ; <i47> [#uses=1]
+ ret i47 %C
+}
+
+define i18 @test13(i18 %A) {
+ %a = mul i18 %A, 3 ; <i18> [#uses=1]
+ %B = ashr i18 %a, 8 ; <i18> [#uses=1]
+ %C = shl i18 %B, 9 ; <i18> [#uses=1]
+ ret i18 %C
+}
+
+define i35 @test14(i35 %A) {
+ %B = lshr i35 %A, 4 ; <i35> [#uses=1]
+ %C = or i35 %B, 1234 ; <i35> [#uses=1]
+ %D = shl i35 %C, 4 ; <i35> [#uses=1]
+ ret i35 %D
+}
+
+define i79 @test14a(i79 %A) {
+ %B = shl i79 %A, 4 ; <i79> [#uses=1]
+ %C = and i79 %B, 1234 ; <i79> [#uses=1]
+ %D = lshr i79 %C, 4 ; <i79> [#uses=1]
+ ret i79 %D
+}
+
+define i45 @test15(i1 %C) {
+ %A = select i1 %C, i45 3, i45 1 ; <i45> [#uses=1]
+ %V = shl i45 %A, 2 ; <i45> [#uses=1]
+ ret i45 %V
+}
+
+define i53 @test15a(i1 %X) {
+ %A = select i1 %X, i8 3, i8 1 ; <i8> [#uses=1]
+ %B = zext i8 %A to i53 ; <i53> [#uses=1]
+ %V = shl i53 64, %B ; <i53> [#uses=1]
+ ret i53 %V
+}
+
+define i1 @test16(i84 %X) {
+ %tmp.3 = ashr i84 %X, 4 ; <i84> [#uses=1]
+ %tmp.6 = and i84 %tmp.3, 1 ; <i84> [#uses=1]
+ %tmp.7 = icmp ne i84 %tmp.6, 0 ; <i1> [#uses=1]
+ ret i1 %tmp.7
+}
+
+define i1 @test17(i106 %A) {
+ %B = lshr i106 %A, 3 ; <i106> [#uses=1]
+ %C = icmp eq i106 %B, 1234 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test18(i11 %A) {
+ %B = lshr i11 %A, 10 ; <i11> [#uses=1]
+ %C = icmp eq i11 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19(i37 %A) {
+ %B = ashr i37 %A, 2 ; <i37> [#uses=1]
+ %C = icmp eq i37 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19a(i39 %A) {
+ %B = ashr i39 %A, 2 ; <i39> [#uses=1]
+ %C = icmp eq i39 %B, -1 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test20(i13 %A) {
+ %B = ashr i13 %A, 12 ; <i13> [#uses=1]
+ %C = icmp eq i13 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test21(i12 %A) {
+ %B = shl i12 %A, 6 ; <i12> [#uses=1]
+ %C = icmp eq i12 %B, -128 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test22(i14 %A) {
+ %B = shl i14 %A, 7 ; <i14> [#uses=1]
+ %C = icmp eq i14 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i11 @test23(i44 %A) {
+ %B = shl i44 %A, 33 ; <i44> [#uses=1]
+ %C = ashr i44 %B, 33 ; <i44> [#uses=1]
+ %D = trunc i44 %C to i11 ; <i8> [#uses=1]
+ ret i11 %D
+}
+
+define i37 @test25(i37 %tmp.2, i37 %AA) {
+ %x = lshr i37 %AA, 17 ; <i37> [#uses=1]
+ %tmp.3 = lshr i37 %tmp.2, 17 ; <i37> [#uses=1]
+ %tmp.5 = add i37 %tmp.3, %x ; <i37> [#uses=1]
+ %tmp.6 = shl i37 %tmp.5, 17 ; <i37> [#uses=1]
+ ret i37 %tmp.6
+}
+
+define i40 @test26(i40 %A) {
+ %B = lshr i40 %A, 1 ; <i40> [#uses=1]
+ %C = bitcast i40 %B to i40 ; <i40> [#uses=1]
+ %D = shl i40 %C, 1 ; <i40> [#uses=1]
+ ret i40 %D
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-shl-trunc.ll b/src/LLVM/test/Transforms/InstCombine/apint-shl-trunc.ll
new file mode 100644
index 0000000..40efb83
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-shl-trunc.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep shl
+; END.
+
+define i1 @test0(i39 %X, i39 %A) {
+ %B = lshr i39 %X, %A
+ %D = trunc i39 %B to i1
+ ret i1 %D
+}
+
+define i1 @test1(i799 %X, i799 %A) {
+ %B = lshr i799 %X, %A
+ %D = trunc i799 %B to i1
+ ret i1 %D
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-sub.ll b/src/LLVM/test/Transforms/InstCombine/apint-sub.ll
new file mode 100644
index 0000000..ade1688
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-sub.ll
@@ -0,0 +1,141 @@
+; This test makes sure that sub instructions are properly eliminated
+; even with arbitrary precision integers.
+;
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v {sub i19 %Cok, %Bok} | grep -v {sub i25 0, %Aok} | not grep sub
+; END.
+
+define i23 @test1(i23 %A) {
+ %B = sub i23 %A, %A ; <i23> [#uses=1]
+ ret i23 %B
+}
+
+define i47 @test2(i47 %A) {
+ %B = sub i47 %A, 0 ; <i47> [#uses=1]
+ ret i47 %B
+}
+
+define i97 @test3(i97 %A) {
+ %B = sub i97 0, %A ; <i97> [#uses=1]
+ %C = sub i97 0, %B ; <i97> [#uses=1]
+ ret i97 %C
+}
+
+define i108 @test4(i108 %A, i108 %x) {
+ %B = sub i108 0, %A ; <i108> [#uses=1]
+ %C = sub i108 %x, %B ; <i108> [#uses=1]
+ ret i108 %C
+}
+
+define i19 @test5(i19 %A, i19 %Bok, i19 %Cok) {
+ %D = sub i19 %Bok, %Cok ; <i19> [#uses=1]
+ %E = sub i19 %A, %D ; <i19> [#uses=1]
+ ret i19 %E
+}
+
+define i57 @test6(i57 %A, i57 %B) {
+ %C = and i57 %A, %B ; <i57> [#uses=1]
+ %D = sub i57 %A, %C ; <i57> [#uses=1]
+ ret i57 %D
+}
+
+define i77 @test7(i77 %A) {
+ %B = sub i77 -1, %A ; <i77> [#uses=1]
+ ret i77 %B
+}
+
+define i27 @test8(i27 %A) {
+ %B = mul i27 9, %A ; <i27> [#uses=1]
+ %C = sub i27 %B, %A ; <i27> [#uses=1]
+ ret i27 %C
+}
+
+define i42 @test9(i42 %A) {
+ %B = mul i42 3, %A ; <i42> [#uses=1]
+ %C = sub i42 %A, %B ; <i42> [#uses=1]
+ ret i42 %C
+}
+
+define i124 @test10(i124 %A, i124 %B) {
+ %C = sub i124 0, %A ; <i124> [#uses=1]
+ %D = sub i124 0, %B ; <i124> [#uses=1]
+ %E = mul i124 %C, %D ; <i124> [#uses=1]
+ ret i124 %E
+}
+
+define i55 @test10a(i55 %A) {
+ %C = sub i55 0, %A ; <i55> [#uses=1]
+ %E = mul i55 %C, 7 ; <i55> [#uses=1]
+ ret i55 %E
+}
+
+define i1 @test11(i9 %A, i9 %B) {
+ %C = sub i9 %A, %B ; <i9> [#uses=1]
+ %cD = icmp ne i9 %C, 0 ; <i1> [#uses=1]
+ ret i1 %cD
+}
+
+define i43 @test12(i43 %A) {
+ %B = ashr i43 %A, 42 ; <i43> [#uses=1]
+ %C = sub i43 0, %B ; <i43> [#uses=1]
+ ret i43 %C
+}
+
+define i79 @test13(i79 %A) {
+ %B = lshr i79 %A, 78 ; <i79> [#uses=1]
+ %C = sub i79 0, %B ; <i79> [#uses=1]
+ ret i79 %C
+}
+
+define i1024 @test14(i1024 %A) {
+ %B = lshr i1024 %A, 1023 ; <i1024> [#uses=1]
+ %C = bitcast i1024 %B to i1024 ; <i1024> [#uses=1]
+ %D = sub i1024 0, %C ; <i1024> [#uses=1]
+ ret i1024 %D
+}
+
+define i14 @test15(i14 %A, i14 %B) {
+ %C = sub i14 0, %A ; <i14> [#uses=1]
+ %D = srem i14 %B, %C ; <i14> [#uses=1]
+ ret i14 %D
+}
+
+define i51 @test16(i51 %A) {
+ %X = sdiv i51 %A, 1123 ; <i51> [#uses=1]
+ %Y = sub i51 0, %X ; <i51> [#uses=1]
+ ret i51 %Y
+}
+
+; Can't fold subtract here because negation it might oveflow.
+; PR3142
+define i25 @test17(i25 %Aok) {
+ %B = sub i25 0, %Aok ; <i25> [#uses=1]
+ %C = sdiv i25 %B, 1234 ; <i25> [#uses=1]
+ ret i25 %C
+}
+
+define i128 @test18(i128 %Y) {
+ %tmp.4 = shl i128 %Y, 2 ; <i128> [#uses=1]
+ %tmp.12 = shl i128 %Y, 2 ; <i128> [#uses=1]
+ %tmp.8 = sub i128 %tmp.4, %tmp.12 ; <i128> [#uses=1]
+ ret i128 %tmp.8
+}
+
+define i39 @test19(i39 %X, i39 %Y) {
+ %Z = sub i39 %X, %Y ; <i39> [#uses=1]
+ %Q = add i39 %Z, %Y ; <i39> [#uses=1]
+ ret i39 %Q
+}
+
+define i1 @test20(i33 %g, i33 %h) {
+ %tmp.2 = sub i33 %g, %h ; <i33> [#uses=1]
+ %tmp.4 = icmp ne i33 %tmp.2, %g ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i1 @test21(i256 %g, i256 %h) {
+ %tmp.2 = sub i256 %g, %h ; <i256> [#uses=1]
+ %tmp.4 = icmp ne i256 %tmp.2, %g; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-xor1.ll b/src/LLVM/test/Transforms/InstCombine/apint-xor1.ll
new file mode 100644
index 0000000..33a6ef4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-xor1.ll
@@ -0,0 +1,50 @@
+; This test makes sure that xor instructions are properly eliminated.
+; This test is for Integer BitWidth <= 64 && BitWidth % 8 != 0.
+
+; RUN: opt < %s -instcombine -S | not grep {xor }
+
+
+define i47 @test1(i47 %A, i47 %B) {
+ ;; (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ %A1 = and i47 %A, 70368744177664
+ %B1 = and i47 %B, 70368744177661
+ %C1 = xor i47 %A1, %B1
+ ret i47 %C1
+}
+
+define i15 @test2(i15 %x) {
+ %tmp.2 = xor i15 %x, 0
+ ret i15 %tmp.2
+}
+
+define i23 @test3(i23 %x) {
+ %tmp.2 = xor i23 %x, %x
+ ret i23 %tmp.2
+}
+
+define i37 @test4(i37 %x) {
+ ; x ^ ~x == -1
+ %NotX = xor i37 -1, %x
+ %B = xor i37 %x, %NotX
+ ret i37 %B
+}
+
+define i7 @test5(i7 %A) {
+ ;; (A|B)^B == A & (~B)
+ %t1 = or i7 %A, 23
+ %r = xor i7 %t1, 23
+ ret i7 %r
+}
+
+define i7 @test6(i7 %A) {
+ %t1 = xor i7 %A, 23
+ %r = xor i7 %t1, 23
+ ret i7 %r
+}
+
+define i47 @test7(i47 %A) {
+ ;; (A | C1) ^ C2 -> (A | C1) & ~C2 iff (C1&C2) == C2
+ %B1 = or i47 %A, 70368744177663
+ %C1 = xor i47 %B1, 703687463
+ ret i47 %C1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-xor2.ll b/src/LLVM/test/Transforms/InstCombine/apint-xor2.ll
new file mode 100644
index 0000000..28ed9e9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-xor2.ll
@@ -0,0 +1,51 @@
+; This test makes sure that xor instructions are properly eliminated.
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+
+; RUN: opt < %s -instcombine -S | not grep {xor }
+; END.
+
+
+define i447 @test1(i447 %A, i447 %B) {
+ ;; (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+ %A1 = and i447 %A, 70368744177664
+ %B1 = and i447 %B, 70368744177663
+ %C1 = xor i447 %A1, %B1
+ ret i447 %C1
+}
+
+define i1005 @test2(i1005 %x) {
+ %tmp.2 = xor i1005 %x, 0
+ ret i1005 %tmp.2
+}
+
+define i123 @test3(i123 %x) {
+ %tmp.2 = xor i123 %x, %x
+ ret i123 %tmp.2
+}
+
+define i737 @test4(i737 %x) {
+ ; x ^ ~x == -1
+ %NotX = xor i737 -1, %x
+ %B = xor i737 %x, %NotX
+ ret i737 %B
+}
+
+define i700 @test5(i700 %A) {
+ ;; (A|B)^B == A & (~B)
+ %t1 = or i700 %A, 288230376151711743
+ %r = xor i700 %t1, 288230376151711743
+ ret i700 %r
+}
+
+define i77 @test6(i77 %A) {
+ %t1 = xor i77 %A, 23
+ %r = xor i77 %t1, 23
+ ret i77 %r
+}
+
+define i1023 @test7(i1023 %A) {
+ ;; (A | C1) ^ C2 -> (A | C1) & ~C2 iff (C1&C2) == C2
+ %B1 = or i1023 %A, 70368744177663
+ %C1 = xor i1023 %B1, 703687463
+ ret i1023 %C1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-zext1.ll b/src/LLVM/test/Transforms/InstCombine/apint-zext1.ll
new file mode 100644
index 0000000..a05470c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-zext1.ll
@@ -0,0 +1,11 @@
+; Tests to make sure elimination of casts is working correctly
+; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i47 @test_sext_zext(i11 %A) {
+ %c1 = zext i11 %A to i39
+ %c2 = sext i39 %c1 to i47
+ ret i47 %c2
+; CHECK: %c2 = zext i11 %A to i47
+; CHECK: ret i47 %c2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/apint-zext2.ll b/src/LLVM/test/Transforms/InstCombine/apint-zext2.ll
new file mode 100644
index 0000000..94b9ca9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/apint-zext2.ll
@@ -0,0 +1,11 @@
+; Tests to make sure elimination of casts is working correctly
+; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i1024 @test_sext_zext(i77 %A) {
+ %c1 = zext i77 %A to i533
+ %c2 = sext i533 %c1 to i1024
+ ret i1024 %c2
+; CHECK: %c2 = zext i77 %A to i1024
+; CHECK: ret i1024 %c2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/ashr-nop.ll b/src/LLVM/test/Transforms/InstCombine/ashr-nop.ll
new file mode 100644
index 0000000..870ede3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/ashr-nop.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep ashr
+
+define i32 @foo(i32 %x) {
+ %o = and i32 %x, 1
+ %n = add i32 %o, -1
+ %t = ashr i32 %n, 17
+ ret i32 %t
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/atomic.ll b/src/LLVM/test/Transforms/InstCombine/atomic.ll
new file mode 100644
index 0000000..097cf5e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/atomic.ll
@@ -0,0 +1,24 @@
+; RUN: opt -S < %s -instcombine | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.0"
+
+; Check transforms involving atomic operations
+
+define i32* @test1(i8** %p) {
+; CHECK: define i32* @test1
+; CHECK: load atomic i8** %p monotonic, align 8
+ %c = bitcast i8** %p to i32**
+ %r = load atomic i32** %c monotonic, align 8
+ ret i32* %r
+}
+
+define i32 @test2(i32* %p) {
+; CHECK: define i32 @test2
+; CHECK: %x = load atomic i32* %p seq_cst, align 4
+; CHECK: shl i32 %x, 1
+ %x = load atomic i32* %p seq_cst, align 4
+ %y = load i32* %p, align 4
+ %z = add i32 %x, %y
+ ret i32 %z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/badmalloc.ll b/src/LLVM/test/Transforms/InstCombine/badmalloc.ll
new file mode 100644
index 0000000..f5a623d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/badmalloc.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-apple-darwin10.0"
+
+declare noalias i8* @malloc(i64) nounwind
+declare void @free(i8*)
+
+; PR5130
+define i1 @test1() {
+ %A = call noalias i8* @malloc(i64 4) nounwind
+ %B = icmp eq i8* %A, null
+ store i8 0, i8* %A
+
+ call void @free(i8* %A)
+ ret i1 %B
+
+; CHECK: @test1
+; CHECK: ret i1 %B
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/binop-cast.ll b/src/LLVM/test/Transforms/InstCombine/binop-cast.ll
new file mode 100644
index 0000000..27b544d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/binop-cast.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @testAdd(i32 %X, i32 %Y) {
+ %tmp = add i32 %X, %Y
+; CHECK: %tmp = add i32 %X, %Y
+ %tmp.l = bitcast i32 %tmp to i32
+ ret i32 %tmp.l
+; CHECK: ret i32 %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/bit-checks.ll b/src/LLVM/test/Transforms/InstCombine/bit-checks.ll
new file mode 100644
index 0000000..79a096f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bit-checks.ll
@@ -0,0 +1,372 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {tobool}
+; END.
+define i32 @main(i32 %argc, i8** %argv) nounwind ssp {
+entry:
+ %and = and i32 %argc, 1 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 2 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 0 ; <i1> [#uses=1]
+ %or.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %retval.0 = select i1 %or.cond, i32 2, i32 1 ; <i32> [#uses=1]
+ ret i32 %retval.0
+}
+
+define i32 @main2(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 1 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 2 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 0 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; tests to check combining (icmp eq (A & B), C) & (icmp eq (A & D), E)
+; tests to check if (icmp eq (A & B), 0) is treated like (icmp eq (A & B), B)
+; if B is a single bit constant
+
+; (icmp eq (A & B), 0) & (icmp eq (A & D), 0) -> (icmp eq (A & (B|D)), 0)
+define i32 @main3(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 48 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 0 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main3b(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 16 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 16 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main3e_like(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, %argc2 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %argc3 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 0 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (icmp ne (A & B), 0) | (icmp ne (A & D), 0) -> (icmp ne (A & (B|D)), 0)
+define i32 @main3c(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 48 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 0 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main3d(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 16 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 16 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main3f_like(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, %argc2 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 0 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %argc3 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 0 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (icmp eq (A & B), B) & (icmp eq (A & D), D) -> (icmp eq (A & (B|D)), (B|D))
+define i32 @main4(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 7 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 48 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 48 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main4b(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 7 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 16 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 0 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main4e_like(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, %argc2 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, %argc2 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %argc3 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, %argc3 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (icmp ne (A & B), B) | (icmp ne (A & D), D) -> (icmp ne (A & (B|D)), (B|D))
+define i32 @main4c(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 7 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 48 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 48 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main4d(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 7 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 16 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 0 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main4f_like(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, %argc2 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, %argc2 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %argc3 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, %argc3 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (icmp eq (A & B), A) & (icmp eq (A & D), A) -> (icmp eq (A & (B&D)), A)
+define i32 @main5_like(i32 %argc, i32 %argc2, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 7 ; <i1> [#uses=1]
+ %and2 = and i32 %argc2, 7 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 7 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main5e_like(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, %argc2 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, %argc ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %argc3 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, %argc ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (icmp ne (A & B), A) | (icmp ne (A & D), A) -> (icmp ne (A & (B&D)), A)
+define i32 @main5c_like(i32 %argc, i32 %argc2, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 7 ; <i1> [#uses=1]
+ %and2 = and i32 %argc2, 7 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 7 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main5f_like(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, %argc2 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, %argc ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %argc3 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, %argc ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (icmp eq (A & B), C) & (icmp eq (A & D), E) -> (icmp eq (A & (B|D)), (C|E))
+; if B, C, D, E are constant, and it's possible
+define i32 @main6(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 3 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 48 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 16 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main6b(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and, 3 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 16 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 0 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (icmp ne (A & B), C) | (icmp ne (A & D), E) -> (icmp ne (A & (B|D)), (C|E))
+; if B, C, D, E are constant, and it's possible
+define i32 @main6c(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 3 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 48 ; <i32> [#uses=1]
+ %tobool3 = icmp ne i32 %and2, 16 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+define i32 @main6d(i32 %argc, i8** nocapture %argv) nounwind readnone ssp {
+entry:
+ %and = and i32 %argc, 7 ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %and, 3 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, 16 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, 0 ; <i1> [#uses=1]
+ %or.cond = or i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %or.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; test parameter permutations
+; (B & A) == B & (D & A) == D
+define i32 @main7a(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and1 = and i32 %argc2, %argc ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and1, %argc2 ; <i1> [#uses=1]
+ %and2 = and i32 %argc3, %argc ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, %argc3 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; B == (A & B) & D == (A & D)
+define i32 @main7b(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and1 = and i32 %argc, %argc2 ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %argc2, %and1 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %argc3 ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %argc3, %and2 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; B == (B & A) & D == (D & A)
+define i32 @main7c(i32 %argc, i32 %argc2, i32 %argc3, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %and1 = and i32 %argc2, %argc ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %argc2, %and1 ; <i1> [#uses=1]
+ %and2 = and i32 %argc3, %argc ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %argc3, %and2 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (A & (B & C)) == (B & C) & (A & (D & E)) == (D & E)
+define i32 @main7d(i32 %argc, i32 %argc2, i32 %argc3,
+ i32 %argc4, i32 %argc5, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %bc = and i32 %argc2, %argc4 ; <i32> [#uses=1]
+ %de = and i32 %argc3, %argc5 ; <i32> [#uses=1]
+ %and1 = and i32 %argc, %bc ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and1, %bc ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %de ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, %de ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; ((B & C) & A) == (B & C) & ((D & E) & A) == (D & E)
+define i32 @main7e(i32 %argc, i32 %argc2, i32 %argc3,
+ i32 %argc4, i32 %argc5, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %bc = and i32 %argc2, %argc4 ; <i32> [#uses=1]
+ %de = and i32 %argc3, %argc5 ; <i32> [#uses=1]
+ %and1 = and i32 %bc, %argc ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %and1, %bc ; <i1> [#uses=1]
+ %and2 = and i32 %de, %argc ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %and2, %de ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (B & C) == (A & (B & C)) & (D & E) == (A & (D & E))
+define i32 @main7f(i32 %argc, i32 %argc2, i32 %argc3,
+ i32 %argc4, i32 %argc5, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %bc = and i32 %argc2, %argc4 ; <i32> [#uses=1]
+ %de = and i32 %argc3, %argc5 ; <i32> [#uses=1]
+ %and1 = and i32 %argc, %bc ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %bc, %and1 ; <i1> [#uses=1]
+ %and2 = and i32 %argc, %de ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %de, %and2 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
+
+; (B & C) == ((B & C) & A) & (D & E) == ((D & E) & A)
+define i32 @main7g(i32 %argc, i32 %argc2, i32 %argc3,
+ i32 %argc4, i32 %argc5, i8** nocapture %argv)
+ nounwind readnone ssp {
+entry:
+ %bc = and i32 %argc2, %argc4 ; <i32> [#uses=1]
+ %de = and i32 %argc3, %argc5 ; <i32> [#uses=1]
+ %and1 = and i32 %bc, %argc ; <i32> [#uses=1]
+ %tobool = icmp eq i32 %bc, %and1 ; <i1> [#uses=1]
+ %and2 = and i32 %de, %argc ; <i32> [#uses=1]
+ %tobool3 = icmp eq i32 %de, %and2 ; <i1> [#uses=1]
+ %and.cond = and i1 %tobool, %tobool3 ; <i1> [#uses=1]
+ %storemerge = select i1 %and.cond, i32 0, i32 1 ; <i32> [#uses=1]
+ ret i32 %storemerge
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/bit-tracking.ll b/src/LLVM/test/Transforms/InstCombine/bit-tracking.ll
new file mode 100644
index 0000000..755fc4d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bit-tracking.ll
@@ -0,0 +1,26 @@
+; This file contains various testcases that require tracking whether bits are
+; set or cleared by various instructions.
+; RUN: opt < %s -instcombine -instcombine -S |\
+; RUN: not grep %ELIM
+
+; Reduce down to a single XOR
+define i32 @test3(i32 %B) {
+ %ELIMinc = and i32 %B, 1 ; <i32> [#uses=1]
+ %tmp.5 = xor i32 %ELIMinc, 1 ; <i32> [#uses=1]
+ %ELIM7 = and i32 %B, -2 ; <i32> [#uses=1]
+ %tmp.8 = or i32 %tmp.5, %ELIM7 ; <i32> [#uses=1]
+ ret i32 %tmp.8
+}
+
+; Finally, a bigger case where we chain things together. This corresponds to
+; incrementing a single-bit bitfield, which should become just an xor.
+define i32 @test4(i32 %B) {
+ %ELIM3 = shl i32 %B, 31 ; <i32> [#uses=1]
+ %ELIM4 = ashr i32 %ELIM3, 31 ; <i32> [#uses=1]
+ %inc = add i32 %ELIM4, 1 ; <i32> [#uses=1]
+ %ELIM5 = and i32 %inc, 1 ; <i32> [#uses=1]
+ %ELIM7 = and i32 %B, -2 ; <i32> [#uses=1]
+ %tmp.8 = or i32 %ELIM5, %ELIM7 ; <i32> [#uses=1]
+ ret i32 %tmp.8
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/bitcast-sext-vector.ll b/src/LLVM/test/Transforms/InstCombine/bitcast-sext-vector.ll
new file mode 100644
index 0000000..d70bdba
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bitcast-sext-vector.ll
@@ -0,0 +1,11 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; CHECK: sext
+; Don't fold zero/sign extensions with a bitcast between a vector and scalar.
+
+define i32 @t(<4 x i8> %src1, <4 x i8> %src2) nounwind readonly {
+entry:
+ %cmp = icmp eq <4 x i8> %src1, %src2; <<4 x i1>> [#uses=1]
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %val = bitcast <4 x i8> %sext to i32
+ ret i32 %val
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/bitcast-store.ll b/src/LLVM/test/Transforms/InstCombine/bitcast-store.ll
new file mode 100644
index 0000000..e4a61e9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bitcast-store.ll
@@ -0,0 +1,21 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+; Instcombine should preserve metadata and alignment while
+; folding a bitcast into a store.
+
+; CHECK: store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*]* @G, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 16, !tag !0
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+
+%struct.A = type { i32 (...)** }
+
+@G = external constant [5 x i8*]
+
+define void @foo(%struct.A* %a) nounwind {
+entry:
+ %0 = bitcast %struct.A* %a to i8***
+ store i8** getelementptr inbounds ([5 x i8*]* @G, i64 0, i64 2), i8*** %0, align 16, !tag !0
+ ret void
+}
+
+!0 = metadata !{metadata !"hello"}
diff --git a/src/LLVM/test/Transforms/InstCombine/bitcast-vec-canon.ll b/src/LLVM/test/Transforms/InstCombine/bitcast-vec-canon.ll
new file mode 100644
index 0000000..d27765e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bitcast-vec-canon.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | grep element | count 4
+
+define double @a(<1 x i64> %y) {
+ %c = bitcast <1 x i64> %y to double
+ ret double %c
+}
+
+define i64 @b(<1 x i64> %y) {
+ %c = bitcast <1 x i64> %y to i64
+ ret i64 %c
+}
+
+define <1 x i64> @c(double %y) {
+ %c = bitcast double %y to <1 x i64>
+ ret <1 x i64> %c
+}
+
+define <1 x i64> @d(i64 %y) {
+ %c = bitcast i64 %y to <1 x i64>
+ ret <1 x i64> %c
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/bitcast-vec-uniform.ll b/src/LLVM/test/Transforms/InstCombine/bitcast-vec-uniform.ll
new file mode 100644
index 0000000..2f42d1e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bitcast-vec-uniform.ll
@@ -0,0 +1,70 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; CHECK: @a
+; CHECK-NOT: bitcast
+; CHECK: ret
+define <4 x i32> @a(<1 x i64> %y) {
+ %c = bitcast <2 x i64> <i64 0, i64 0> to <4 x i32>
+ ret <4 x i32> %c
+}
+
+; CHECK: @b
+; CHECK-NOT: bitcast
+; CHECK: ret
+
+define <4 x i32> @b(<1 x i64> %y) {
+ %c = bitcast <2 x i64> <i64 -1, i64 -1> to <4 x i32>
+ ret <4 x i32> %c
+}
+
+; CHECK: @foo
+; CHECK-NOT: bitcast
+; CHECK: ret
+
+; from MultiSource/Benchmarks/Bullet
+define <2 x float> @foo() {
+ %cast = bitcast i64 -1 to <2 x float>
+ ret <2 x float> %cast
+}
+
+
+; CHECK: @foo2
+; CHECK-NOT: bitcast
+; CHECK: ret
+define <2 x double> @foo2() {
+ %cast = bitcast i128 -1 to <2 x double>
+ ret <2 x double> %cast
+}
+
+; CHECK: @foo3
+; CHECK-NOT: bitcast
+; CHECK: ret
+define <1 x float> @foo3() {
+ %cast = bitcast i32 -1 to <1 x float>
+ ret <1 x float> %cast
+}
+
+; CHECK: @foo4
+; CHECK-NOT: bitcast
+; CHECK: ret
+define float @foo4() {
+ %cast = bitcast <1 x i32 ><i32 -1> to float
+ ret float %cast
+}
+
+; CHECK: @foo5
+; CHECK-NOT: bitcast
+; CHECK: ret
+define double @foo5() {
+ %cast = bitcast <2 x i32 ><i32 -1, i32 -1> to double
+ ret double %cast
+}
+
+
+; CHECK: @foo6
+; CHECK-NOT: bitcast
+; CHECK: ret
+define <2 x double> @foo6() {
+ %cast = bitcast <4 x i32><i32 -1, i32 -1, i32 -1, i32 -1> to <2 x double>
+ ret <2 x double> %cast
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/bitcast-vector-fold.ll b/src/LLVM/test/Transforms/InstCombine/bitcast-vector-fold.ll
new file mode 100644
index 0000000..8feec22
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bitcast-vector-fold.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -S | not grep bitcast
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+define <2 x i64> @test1() {
+ %tmp3 = bitcast <4 x i32> < i32 0, i32 1, i32 2, i32 3 > to <2 x i64>
+ ret <2 x i64> %tmp3
+}
+
+define <4 x i32> @test2() {
+ %tmp3 = bitcast <2 x i64> < i64 0, i64 1 > to <4 x i32>
+ ret <4 x i32> %tmp3
+}
+
+define <2 x double> @test3() {
+ %tmp3 = bitcast <4 x i32> < i32 0, i32 1, i32 2, i32 3 > to <2 x double>
+ ret <2 x double> %tmp3
+}
+
+define <4 x float> @test4() {
+ %tmp3 = bitcast <2 x i64> < i64 0, i64 1 > to <4 x float>
+ ret <4 x float> %tmp3
+}
+
+define <2 x i64> @test5() {
+ %tmp3 = bitcast <4 x float> <float 0.0, float 1.0, float 2.0, float 3.0> to <2 x i64>
+ ret <2 x i64> %tmp3
+}
+
+define <4 x i32> @test6() {
+ %tmp3 = bitcast <2 x double> <double 0.5, double 1.0> to <4 x i32>
+ ret <4 x i32> %tmp3
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/bitcast.ll b/src/LLVM/test/Transforms/InstCombine/bitcast.ll
new file mode 100644
index 0000000..8f6ae7d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bitcast.ll
@@ -0,0 +1,139 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0.0"
+
+; Bitcasts between vectors and scalars are valid.
+; PR4487
+define i32 @test1(i64 %a) {
+ %t1 = bitcast i64 %a to <2 x i32>
+ %t2 = bitcast i64 %a to <2 x i32>
+ %t3 = xor <2 x i32> %t1, %t2
+ %t4 = extractelement <2 x i32> %t3, i32 0
+ ret i32 %t4
+
+; CHECK: @test1
+; CHECK: ret i32 0
+}
+
+; Optimize bitcasts that are extracting low element of vector. This happens
+; because of SRoA.
+; rdar://7892780
+define float @test2(<2 x float> %A, <2 x i32> %B) {
+ %tmp28 = bitcast <2 x float> %A to i64 ; <i64> [#uses=2]
+ %tmp23 = trunc i64 %tmp28 to i32 ; <i32> [#uses=1]
+ %tmp24 = bitcast i32 %tmp23 to float ; <float> [#uses=1]
+
+ %tmp = bitcast <2 x i32> %B to i64
+ %tmp2 = trunc i64 %tmp to i32 ; <i32> [#uses=1]
+ %tmp4 = bitcast i32 %tmp2 to float ; <float> [#uses=1]
+
+ %add = fadd float %tmp24, %tmp4
+ ret float %add
+
+; CHECK: @test2
+; CHECK-NEXT: %tmp24 = extractelement <2 x float> %A, i32 0
+; CHECK-NEXT: bitcast <2 x i32> %B to <2 x float>
+; CHECK-NEXT: %tmp4 = extractelement <2 x float> {{.*}}, i32 0
+; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
+; CHECK-NEXT: ret float %add
+}
+
+; Optimize bitcasts that are extracting other elements of a vector. This
+; happens because of SRoA.
+; rdar://7892780
+define float @test3(<2 x float> %A, <2 x i64> %B) {
+ %tmp28 = bitcast <2 x float> %A to i64
+ %tmp29 = lshr i64 %tmp28, 32
+ %tmp23 = trunc i64 %tmp29 to i32
+ %tmp24 = bitcast i32 %tmp23 to float
+
+ %tmp = bitcast <2 x i64> %B to i128
+ %tmp1 = lshr i128 %tmp, 64
+ %tmp2 = trunc i128 %tmp1 to i32
+ %tmp4 = bitcast i32 %tmp2 to float
+
+ %add = fadd float %tmp24, %tmp4
+ ret float %add
+
+; CHECK: @test3
+; CHECK-NEXT: %tmp24 = extractelement <2 x float> %A, i32 1
+; CHECK-NEXT: bitcast <2 x i64> %B to <4 x float>
+; CHECK-NEXT: %tmp4 = extractelement <4 x float> {{.*}}, i32 2
+; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
+; CHECK-NEXT: ret float %add
+}
+
+
+define <2 x i32> @test4(i32 %A, i32 %B){
+ %tmp38 = zext i32 %A to i64
+ %tmp32 = zext i32 %B to i64
+ %tmp33 = shl i64 %tmp32, 32
+ %ins35 = or i64 %tmp33, %tmp38
+ %tmp43 = bitcast i64 %ins35 to <2 x i32>
+ ret <2 x i32> %tmp43
+ ; CHECK: @test4
+ ; CHECK-NEXT: insertelement <2 x i32> undef, i32 %A, i32 0
+ ; CHECK-NEXT: insertelement <2 x i32> {{.*}}, i32 %B, i32 1
+ ; CHECK-NEXT: ret <2 x i32>
+
+}
+
+; rdar://8360454
+define <2 x float> @test5(float %A, float %B) {
+ %tmp37 = bitcast float %A to i32
+ %tmp38 = zext i32 %tmp37 to i64
+ %tmp31 = bitcast float %B to i32
+ %tmp32 = zext i32 %tmp31 to i64
+ %tmp33 = shl i64 %tmp32, 32
+ %ins35 = or i64 %tmp33, %tmp38
+ %tmp43 = bitcast i64 %ins35 to <2 x float>
+ ret <2 x float> %tmp43
+ ; CHECK: @test5
+ ; CHECK-NEXT: insertelement <2 x float> undef, float %A, i32 0
+ ; CHECK-NEXT: insertelement <2 x float> {{.*}}, float %B, i32 1
+ ; CHECK-NEXT: ret <2 x float>
+}
+
+define <2 x float> @test6(float %A){
+ %tmp23 = bitcast float %A to i32 ; <i32> [#uses=1]
+ %tmp24 = zext i32 %tmp23 to i64 ; <i64> [#uses=1]
+ %tmp25 = shl i64 %tmp24, 32 ; <i64> [#uses=1]
+ %mask20 = or i64 %tmp25, 1109917696 ; <i64> [#uses=1]
+ %tmp35 = bitcast i64 %mask20 to <2 x float> ; <<2 x float>> [#uses=1]
+ ret <2 x float> %tmp35
+; CHECK: @test6
+; CHECK-NEXT: insertelement <2 x float> <float 4.200000e+01, float undef>, float %A, i32 1
+; CHECK: ret
+}
+
+define i64 @ISPC0(i64 %in) {
+ %out = and i64 %in, xor (i64 bitcast (<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1> to i64), i64 -1)
+ ret i64 %out
+; CHECK: @ISPC0
+; CHECK: ret i64 0
+}
+
+
+define i64 @Vec2(i64 %in) {
+ %out = and i64 %in, xor (i64 bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 0> to i64), i64 0)
+ ret i64 %out
+; CHECK: @Vec2
+; CHECK: ret i64 0
+}
+
+define i64 @All11(i64 %in) {
+ %out = and i64 %in, xor (i64 bitcast (<2 x float> bitcast (i64 -1 to <2 x float>) to i64), i64 -1)
+ ret i64 %out
+; CHECK: @All11
+; CHECK: ret i64 0
+}
+
+
+define i32 @All111(i32 %in) {
+ %out = and i32 %in, xor (i32 bitcast (<1 x float> bitcast (i32 -1 to <1 x float>) to i32), i32 -1)
+ ret i32 %out
+; CHECK: @All111
+; CHECK: ret i32 0
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/bitcount.ll b/src/LLVM/test/Transforms/InstCombine/bitcount.ll
new file mode 100644
index 0000000..d46eef5
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bitcount.ll
@@ -0,0 +1,19 @@
+; Tests to make sure bit counts of constants are folded
+; RUN: opt < %s -instcombine -S | grep {ret i32 19}
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v declare | not grep llvm.ct
+
+declare i31 @llvm.ctpop.i31(i31 %val)
+declare i32 @llvm.cttz.i32(i32 %val)
+declare i33 @llvm.ctlz.i33(i33 %val)
+
+define i32 @test(i32 %A) {
+ %c1 = call i31 @llvm.ctpop.i31(i31 12415124)
+ %c2 = call i32 @llvm.cttz.i32(i32 87359874)
+ %c3 = call i33 @llvm.ctlz.i33(i33 87359874)
+ %t1 = zext i31 %c1 to i32
+ %t3 = trunc i33 %c3 to i32
+ %r1 = add i32 %t1, %c2
+ %r2 = add i32 %r1, %t3
+ ret i32 %r2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/bittest.ll b/src/LLVM/test/Transforms/InstCombine/bittest.ll
new file mode 100644
index 0000000..1f5a427
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bittest.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -instcombine -simplifycfg -S |\
+; RUN: not grep {call void @abort}
+
+@b_rec.0 = external global i32 ; <i32*> [#uses=2]
+
+define void @_Z12h000007_testv(i32* %P) {
+entry:
+ %tmp.2 = load i32* @b_rec.0 ; <i32> [#uses=1]
+ %tmp.9 = or i32 %tmp.2, -989855744 ; <i32> [#uses=2]
+ %tmp.16 = and i32 %tmp.9, -805306369 ; <i32> [#uses=2]
+ %tmp.17 = and i32 %tmp.9, -973078529 ; <i32> [#uses=1]
+ store i32 %tmp.17, i32* @b_rec.0
+ %tmp.17.shrunk = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
+ %tmp.22 = and i32 %tmp.17.shrunk, -1073741824 ; <i32> [#uses=1]
+ %tmp.23 = icmp eq i32 %tmp.22, -1073741824 ; <i1> [#uses=1]
+ br i1 %tmp.23, label %endif.0, label %then.0
+
+then.0: ; preds = %entry
+ tail call void @abort( )
+ unreachable
+
+endif.0: ; preds = %entry
+ %tmp.17.shrunk2 = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
+ %tmp.27.mask = and i32 %tmp.17.shrunk2, 100663295 ; <i32> [#uses=1]
+ store i32 %tmp.27.mask, i32* %P
+ ret void
+}
+
+declare void @abort()
+
diff --git a/src/LLVM/test/Transforms/InstCombine/bswap-fold.ll b/src/LLVM/test/Transforms/InstCombine/bswap-fold.ll
new file mode 100644
index 0000000..52f4a40
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bswap-fold.ll
@@ -0,0 +1,71 @@
+; RUN: opt < %s -instcombine -S | not grep call.*bswap
+
+define i1 @test1(i16 %tmp2) {
+ %tmp10 = call i16 @llvm.bswap.i16( i16 %tmp2 )
+ %tmp = icmp eq i16 %tmp10, 1
+ ret i1 %tmp
+}
+
+define i1 @test2(i32 %tmp) {
+ %tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
+ %tmp.upgrd.1 = icmp eq i32 %tmp34, 1
+ ret i1 %tmp.upgrd.1
+}
+
+declare i32 @llvm.bswap.i32(i32)
+
+define i1 @test3(i64 %tmp) {
+ %tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
+ %tmp.upgrd.2 = icmp eq i64 %tmp34, 1
+ ret i1 %tmp.upgrd.2
+}
+
+declare i64 @llvm.bswap.i64(i64)
+
+declare i16 @llvm.bswap.i16(i16)
+
+; rdar://5992453
+; A & 255
+define i32 @test4(i32 %a) nounwind {
+entry:
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = lshr i32 %tmp2, 24
+ ret i32 %tmp4
+}
+
+; A
+define i32 @test5(i32 %a) nounwind {
+entry:
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = tail call i32 @llvm.bswap.i32( i32 %tmp2 )
+ ret i32 %tmp4
+}
+
+; a >> 24
+define i32 @test6(i32 %a) nounwind {
+entry:
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+ %tmp4 = and i32 %tmp2, 255
+ ret i32 %tmp4
+}
+
+; PR5284
+define i16 @test7(i32 %A) {
+ %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
+ %C = trunc i32 %B to i16
+ %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
+ ret i16 %D
+}
+
+define i16 @test8(i64 %A) {
+ %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
+ %C = trunc i64 %B to i16
+ %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
+ ret i16 %D
+}
+
+; Misc: Fold bswap(undef) to undef.
+define i64 @foo() {
+ %a = call i64 @llvm.bswap.i64(i64 undef)
+ ret i64 %a
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/bswap.ll b/src/LLVM/test/Transforms/InstCombine/bswap.ll
new file mode 100644
index 0000000..9ae1481
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/bswap.ll
@@ -0,0 +1,74 @@
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {call.*llvm.bswap} | count 6
+
+define i32 @test1(i32 %i) {
+ %tmp1 = lshr i32 %i, 24 ; <i32> [#uses=1]
+ %tmp3 = lshr i32 %i, 8 ; <i32> [#uses=1]
+ %tmp4 = and i32 %tmp3, 65280 ; <i32> [#uses=1]
+ %tmp5 = or i32 %tmp1, %tmp4 ; <i32> [#uses=1]
+ %tmp7 = shl i32 %i, 8 ; <i32> [#uses=1]
+ %tmp8 = and i32 %tmp7, 16711680 ; <i32> [#uses=1]
+ %tmp9 = or i32 %tmp5, %tmp8 ; <i32> [#uses=1]
+ %tmp11 = shl i32 %i, 24 ; <i32> [#uses=1]
+ %tmp12 = or i32 %tmp9, %tmp11 ; <i32> [#uses=1]
+ ret i32 %tmp12
+}
+
+define i32 @test2(i32 %arg) {
+ %tmp2 = shl i32 %arg, 24 ; <i32> [#uses=1]
+ %tmp4 = shl i32 %arg, 8 ; <i32> [#uses=1]
+ %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
+ %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
+ %tmp8 = lshr i32 %arg, 8 ; <i32> [#uses=1]
+ %tmp9 = and i32 %tmp8, 65280 ; <i32> [#uses=1]
+ %tmp10 = or i32 %tmp6, %tmp9 ; <i32> [#uses=1]
+ %tmp12 = lshr i32 %arg, 24 ; <i32> [#uses=1]
+ %tmp14 = or i32 %tmp10, %tmp12 ; <i32> [#uses=1]
+ ret i32 %tmp14
+}
+
+define i16 @test3(i16 %s) {
+ %tmp2 = lshr i16 %s, 8 ; <i16> [#uses=1]
+ %tmp4 = shl i16 %s, 8 ; <i16> [#uses=1]
+ %tmp5 = or i16 %tmp2, %tmp4 ; <i16> [#uses=1]
+ ret i16 %tmp5
+}
+
+define i16 @test4(i16 %s) {
+ %tmp2 = lshr i16 %s, 8 ; <i16> [#uses=1]
+ %tmp4 = shl i16 %s, 8 ; <i16> [#uses=1]
+ %tmp5 = or i16 %tmp4, %tmp2 ; <i16> [#uses=1]
+ ret i16 %tmp5
+}
+
+define i16 @test5(i16 %a) {
+ %tmp = zext i16 %a to i32 ; <i32> [#uses=2]
+ %tmp1 = and i32 %tmp, 65280 ; <i32> [#uses=1]
+ %tmp2 = ashr i32 %tmp1, 8 ; <i32> [#uses=1]
+ %tmp2.upgrd.1 = trunc i32 %tmp2 to i16 ; <i16> [#uses=1]
+ %tmp4 = and i32 %tmp, 255 ; <i32> [#uses=1]
+ %tmp5 = shl i32 %tmp4, 8 ; <i32> [#uses=1]
+ %tmp5.upgrd.2 = trunc i32 %tmp5 to i16 ; <i16> [#uses=1]
+ %tmp.upgrd.3 = or i16 %tmp2.upgrd.1, %tmp5.upgrd.2 ; <i16> [#uses=1]
+ %tmp6 = bitcast i16 %tmp.upgrd.3 to i16 ; <i16> [#uses=1]
+ %tmp6.upgrd.4 = zext i16 %tmp6 to i32 ; <i32> [#uses=1]
+ %retval = trunc i32 %tmp6.upgrd.4 to i16 ; <i16> [#uses=1]
+ ret i16 %retval
+}
+
+; PR2842
+define i32 @test6(i32 %x) nounwind readnone {
+ %tmp = shl i32 %x, 16 ; <i32> [#uses=1]
+ %x.mask = and i32 %x, 65280 ; <i32> [#uses=1]
+ %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
+ %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
+ %tmp3 = or i32 %x.mask, %tmp ; <i32> [#uses=1]
+ %tmp4 = or i32 %tmp3, %tmp2 ; <i32> [#uses=1]
+ %tmp5 = shl i32 %tmp4, 8 ; <i32> [#uses=1]
+ %tmp6 = lshr i32 %x, 24 ; <i32> [#uses=1]
+ %tmp7 = or i32 %tmp5, %tmp6 ; <i32> [#uses=1]
+ ret i32 %tmp7
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/call-cast-target.ll b/src/LLVM/test/Transforms/InstCombine/call-cast-target.ll
new file mode 100644
index 0000000..47661f1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/call-cast-target.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep call | not grep bitcast
+
+target datalayout = "e-p:32:32"
+target triple = "i686-pc-linux-gnu"
+
+define i32 @main() {
+entry:
+ %tmp = call i32 bitcast (i8* (i32*)* @ctime to i32 (i32*)*)( i32* null ) ; <i32> [#uses=1]
+ ret i32 %tmp
+}
+
+declare i8* @ctime(i32*)
+
diff --git a/src/LLVM/test/Transforms/InstCombine/call-intrinsics.ll b/src/LLVM/test/Transforms/InstCombine/call-intrinsics.ll
new file mode 100644
index 0000000..4bfc2ce
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/call-intrinsics.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine | llvm-dis
+
+@X = global i8 0 ; <i8*> [#uses=3]
+@Y = global i8 12 ; <i8*> [#uses=2]
+
+declare void @llvm.memmove.i32(i8*, i8*, i32, i32)
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+declare void @llvm.memset.i32(i8*, i8, i32, i32)
+
+define void @zero_byte_test() {
+ ; These process zero bytes, so they are a noop.
+ call void @llvm.memmove.i32( i8* @X, i8* @Y, i32 0, i32 100 )
+ call void @llvm.memcpy.i32( i8* @X, i8* @Y, i32 0, i32 100 )
+ call void @llvm.memset.i32( i8* @X, i8 123, i32 0, i32 100 )
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/call.ll b/src/LLVM/test/Transforms/InstCombine/call.ll
new file mode 100644
index 0000000..55bcac9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/call.ll
@@ -0,0 +1,136 @@
+; Ignore stderr, we expect warnings there
+; RUN: opt < %s -instcombine 2> /dev/null -S | FileCheck %s
+
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+; Simple case, argument translatable without changing the value
+declare void @test1a(i8*)
+
+define void @test1(i32* %A) {
+ call void bitcast (void (i8*)* @test1a to void (i32*)*)( i32* %A )
+ ret void
+; CHECK: %1 = bitcast i32* %A to i8*
+; CHECK: call void @test1a(i8* %1)
+; CHECK: ret void
+}
+
+; More complex case, translate argument because of resolution. This is safe
+; because we have the body of the function
+define void @test2a(i8 %A) {
+ ret void
+; CHECK: ret void
+}
+
+define i32 @test2(i32 %A) {
+ call void bitcast (void (i8)* @test2a to void (i32)*)( i32 %A )
+ ret i32 %A
+; CHECK: %1 = trunc i32 %A to i8
+; CHECK: call void @test2a(i8 %1)
+; CHECK: ret i32 %A
+}
+
+
+; Resolving this should insert a cast from sbyte to int, following the C
+; promotion rules.
+define void @test3a(i8, ...) {unreachable }
+
+define void @test3(i8 %A, i8 %B) {
+ call void bitcast (void (i8, ...)* @test3a to void (i8, i8)*)( i8 %A, i8 %B
+)
+ ret void
+; CHECK: %1 = zext i8 %B to i32
+; CHECK: call void (i8, ...)* @test3a(i8 %A, i32 %1)
+; CHECK: ret void
+}
+
+
+; test conversion of return value...
+define i8 @test4a() {
+ ret i8 0
+; CHECK: ret i8 0
+}
+
+define i32 @test4() {
+ %X = call i32 bitcast (i8 ()* @test4a to i32 ()*)( ) ; <i32> [#uses=1]
+ ret i32 %X
+; CHECK: %X = call i8 @test4a()
+; CHECK: %1 = zext i8 %X to i32
+; CHECK: ret i32 %1
+}
+
+
+; test conversion of return value... no value conversion occurs so we can do
+; this with just a prototype...
+declare i32 @test5a()
+
+define i32 @test5() {
+ %X = call i32 @test5a( ) ; <i32> [#uses=1]
+ ret i32 %X
+; CHECK: %X = call i32 @test5a()
+; CHECK: ret i32 %X
+}
+
+
+; test addition of new arguments...
+declare i32 @test6a(i32)
+
+define i32 @test6() {
+ %X = call i32 bitcast (i32 (i32)* @test6a to i32 ()*)( )
+ ret i32 %X
+; CHECK: %X = call i32 @test6a(i32 0)
+; CHECK: ret i32 %X
+}
+
+
+; test removal of arguments, only can happen with a function body
+define void @test7a() {
+ ret void
+; CHECK: ret void
+}
+
+define void @test7() {
+ call void bitcast (void ()* @test7a to void (i32)*)( i32 5 )
+ ret void
+; CHECK: call void @test7a()
+; CHECK: ret void
+}
+
+
+; rdar://7590304
+declare void @test8a()
+
+define i8* @test8() {
+ invoke void @test8a()
+ to label %invoke.cont unwind label %try.handler
+
+invoke.cont: ; preds = %entry
+ unreachable
+
+try.handler: ; preds = %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ ret i8* null
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+; Don't turn this into "unreachable": the callee and caller don't agree in
+; calling conv, but the implementation of test8a may actually end up using the
+; right calling conv.
+; CHECK: @test8() {
+; CHECK-NEXT: invoke void @test8a()
+
+
+
+; Don't turn this into a direct call, because test9x is just a prototype and
+; doing so will make it varargs.
+; rdar://9038601
+declare i8* @test9x(i8*, i8*, ...) noredzone
+define i8* @test9(i8* %arg, i8* %tmp3) nounwind ssp noredzone {
+entry:
+ %call = call i8* bitcast (i8* (i8*, i8*, ...)* @test9x to i8* (i8*, i8*)*)(i8* %arg, i8* %tmp3) noredzone
+ ret i8* %call
+; CHECK: @test9(
+; CHECK: call i8* bitcast
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/call2.ll b/src/LLVM/test/Transforms/InstCombine/call2.ll
new file mode 100644
index 0000000..b7af1cd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/call2.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine | llvm-dis
+
+; This used to crash trying to do a double-to-pointer conversion
+define i32 @bar() {
+entry:
+ %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ %tmp = call i32 (...)* bitcast (i32 (i8*)* @f to i32 (...)*)( double 3.000000e+00 ) ; <i32> [#uses=0]
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval1
+}
+
+define i32 @f(i8* %p) {
+entry:
+ %p_addr = alloca i8* ; <i8**> [#uses=1]
+ %retval = alloca i32, align 4 ; <i32*> [#uses=1]
+ store i8* %p, i8** %p_addr
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/canonicalize_branch.ll b/src/LLVM/test/Transforms/InstCombine/canonicalize_branch.ll
new file mode 100644
index 0000000..56ad5c9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/canonicalize_branch.ll
@@ -0,0 +1,69 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Test an already canonical branch to make sure we don't flip those.
+define i32 @test0(i32 %X, i32 %Y) {
+ %C = icmp eq i32 %X, %Y
+ br i1 %C, label %T, label %F, !prof !0
+
+; CHECK: @test0
+; CHECK: %C = icmp eq i32 %X, %Y
+; CHECK: br i1 %C, label %T, label %F
+
+T:
+ ret i32 12
+F:
+ ret i32 123
+}
+
+define i32 @test1(i32 %X, i32 %Y) {
+ %C = icmp ne i32 %X, %Y
+ br i1 %C, label %T, label %F, !prof !1
+
+; CHECK: @test1
+; CHECK: %C = icmp eq i32 %X, %Y
+; CHECK: br i1 %C, label %F, label %T
+
+T:
+ ret i32 12
+F:
+ ret i32 123
+}
+
+define i32 @test2(i32 %X, i32 %Y) {
+ %C = icmp ule i32 %X, %Y
+ br i1 %C, label %T, label %F, !prof !2
+
+; CHECK: @test2
+; CHECK: %C = icmp ugt i32 %X, %Y
+; CHECK: br i1 %C, label %F, label %T
+
+T:
+ ret i32 12
+F:
+ ret i32 123
+}
+
+define i32 @test3(i32 %X, i32 %Y) {
+ %C = icmp uge i32 %X, %Y
+ br i1 %C, label %T, label %F, !prof !3
+
+; CHECK: @test3
+; CHECK: %C = icmp ult i32 %X, %Y
+; CHECK: br i1 %C, label %F, label %T
+
+T:
+ ret i32 12
+F:
+ ret i32 123
+}
+
+!0 = metadata !{metadata !"branch_weights", i32 1, i32 2}
+!1 = metadata !{metadata !"branch_weights", i32 3, i32 4}
+!2 = metadata !{metadata !"branch_weights", i32 5, i32 6}
+!3 = metadata !{metadata !"branch_weights", i32 7, i32 8}
+; Base case shouldn't change.
+; CHECK: !0 = {{.*}} i32 1, i32 2}
+; Ensure that the branch metadata is reversed to match the reversals above.
+; CHECK: !1 = {{.*}} i32 4, i32 3}
+; CHECK: !2 = {{.*}} i32 6, i32 5}
+; CHECK: !3 = {{.*}} i32 8, i32 7}
diff --git a/src/LLVM/test/Transforms/InstCombine/cast-mul-select.ll b/src/LLVM/test/Transforms/InstCombine/cast-mul-select.ll
new file mode 100644
index 0000000..f55423c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/cast-mul-select.ll
@@ -0,0 +1,41 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32"
+
+define i32 @mul(i32 %x, i32 %y) {
+ %A = trunc i32 %x to i8
+ %B = trunc i32 %y to i8
+ %C = mul i8 %A, %B
+ %D = zext i8 %C to i32
+ ret i32 %D
+; CHECK: %C = mul i32 %x, %y
+; CHECK: %D = and i32 %C, 255
+; CHECK: ret i32 %D
+}
+
+define i32 @select1(i1 %cond, i32 %x, i32 %y, i32 %z) {
+ %A = trunc i32 %x to i8
+ %B = trunc i32 %y to i8
+ %C = trunc i32 %z to i8
+ %D = add i8 %A, %B
+ %E = select i1 %cond, i8 %C, i8 %D
+ %F = zext i8 %E to i32
+ ret i32 %F
+; CHECK: %D = add i32 %x, %y
+; CHECK: %E = select i1 %cond, i32 %z, i32 %D
+; CHECK: %F = and i32 %E, 255
+; CHECK: ret i32 %F
+}
+
+define i8 @select2(i1 %cond, i8 %x, i8 %y, i8 %z) {
+ %A = zext i8 %x to i32
+ %B = zext i8 %y to i32
+ %C = zext i8 %z to i32
+ %D = add i32 %A, %B
+ %E = select i1 %cond, i32 %C, i32 %D
+ %F = trunc i32 %E to i8
+ ret i8 %F
+; CHECK: %D = add i8 %x, %y
+; CHECK: %E = select i1 %cond, i8 %z, i8 %D
+; CHECK: ret i8 %E
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/cast-set.ll b/src/LLVM/test/Transforms/InstCombine/cast-set.ll
new file mode 100644
index 0000000..a0b4aea
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/cast-set.ll
@@ -0,0 +1,65 @@
+; This tests for various complex cast elimination cases instcombine should
+; handle.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i1 @test1(i32 %X) {
+ %A = bitcast i32 %X to i32 ; <i32> [#uses=1]
+ ; Convert to setne int %X, 12
+ %c = icmp ne i32 %A, 12 ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: %c = icmp ne i32 %X, 12
+; CHECK: ret i1 %c
+}
+
+define i1 @test2(i32 %X, i32 %Y) {
+ %A = bitcast i32 %X to i32 ; <i32> [#uses=1]
+ %B = bitcast i32 %Y to i32 ; <i32> [#uses=1]
+ ; Convert to setne int %X, %Y
+ %c = icmp ne i32 %A, %B ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: %c = icmp ne i32 %X, %Y
+; CHECK: ret i1 %c
+}
+
+define i32 @test4(i32 %A) {
+ %B = bitcast i32 %A to i32 ; <i32> [#uses=1]
+ %C = shl i32 %B, 2 ; <i32> [#uses=1]
+ %D = bitcast i32 %C to i32 ; <i32> [#uses=1]
+ ret i32 %D
+; CHECK: %C = shl i32 %A, 2
+; CHECK: ret i32 %C
+}
+
+define i16 @test5(i16 %A) {
+ %B = sext i16 %A to i32 ; <i32> [#uses=1]
+ %C = and i32 %B, 15 ; <i32> [#uses=1]
+ %D = trunc i32 %C to i16 ; <i16> [#uses=1]
+ ret i16 %D
+; CHECK: %C = and i16 %A, 15
+; CHECK: ret i16 %C
+}
+
+define i1 @test6(i1 %A) {
+ %B = zext i1 %A to i32 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 %A
+}
+
+define i1 @test6a(i1 %A) {
+ %B = zext i1 %A to i32 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, -1 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: ret i1 true
+}
+
+define i1 @test7(i8* %A) {
+ %B = bitcast i8* %A to i32* ; <i32*> [#uses=1]
+ %C = icmp eq i32* %B, null ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: %C = icmp eq i8* %A, null
+; CHECK: ret i1 %C
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/cast.ll b/src/LLVM/test/Transforms/InstCombine/cast.ll
new file mode 100644
index 0000000..e5a5b1a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/cast.ll
@@ -0,0 +1,679 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
+
+@inbuf = external global [32832 x i8] ; <[32832 x i8]*> [#uses=1]
+
+define i32 @test1(i32 %A) {
+ %c1 = bitcast i32 %A to i32 ; <i32> [#uses=1]
+ %c2 = bitcast i32 %c1 to i32 ; <i32> [#uses=1]
+ ret i32 %c2
+; CHECK: ret i32 %A
+}
+
+define i64 @test2(i8 %A) {
+ %c1 = zext i8 %A to i16 ; <i16> [#uses=1]
+ %c2 = zext i16 %c1 to i32 ; <i32> [#uses=1]
+ %Ret = zext i32 %c2 to i64 ; <i64> [#uses=1]
+ ret i64 %Ret
+; CHECK: %Ret = zext i8 %A to i64
+; CHECK: ret i64 %Ret
+}
+
+; This function should just use bitwise AND
+define i64 @test3(i64 %A) {
+ %c1 = trunc i64 %A to i8 ; <i8> [#uses=1]
+ %c2 = zext i8 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %c2
+; CHECK: %c2 = and i64 %A, 255
+; CHECK: ret i64 %c2
+}
+
+define i32 @test4(i32 %A, i32 %B) {
+ %COND = icmp slt i32 %A, %B ; <i1> [#uses=1]
+ ; Booleans are unsigned integrals
+ %c = zext i1 %COND to i8 ; <i8> [#uses=1]
+ ; for the cast elim purpose
+ %result = zext i8 %c to i32 ; <i32> [#uses=1]
+ ret i32 %result
+; CHECK: %COND = icmp slt i32 %A, %B
+; CHECK: %result = zext i1 %COND to i32
+; CHECK: ret i32 %result
+}
+
+define i32 @test5(i1 %B) {
+ ; This cast should get folded into
+ %c = zext i1 %B to i8 ; <i8> [#uses=1]
+ ; this cast
+ %result = zext i8 %c to i32 ; <i32> [#uses=1]
+ ret i32 %result
+; CHECK: %result = zext i1 %B to i32
+; CHECK: ret i32 %result
+}
+
+define i32 @test6(i64 %A) {
+ %c1 = trunc i64 %A to i32 ; <i32> [#uses=1]
+ %res = bitcast i32 %c1 to i32 ; <i32> [#uses=1]
+ ret i32 %res
+; CHECK: trunc i64 %A to i32
+; CHECK-NEXT: ret i32
+}
+
+define i64 @test7(i1 %A) {
+ %c1 = zext i1 %A to i32 ; <i32> [#uses=1]
+ %res = sext i32 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %res
+; CHECK: %res = zext i1 %A to i64
+; CHECK: ret i64 %res
+}
+
+define i64 @test8(i8 %A) {
+ %c1 = sext i8 %A to i64 ; <i64> [#uses=1]
+ %res = bitcast i64 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %res
+; CHECK: = sext i8 %A to i64
+; CHECK-NEXT: ret i64
+}
+
+define i16 @test9(i16 %A) {
+ %c1 = sext i16 %A to i32 ; <i32> [#uses=1]
+ %c2 = trunc i32 %c1 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: ret i16 %A
+}
+
+define i16 @test10(i16 %A) {
+ %c1 = sext i16 %A to i32 ; <i32> [#uses=1]
+ %c2 = trunc i32 %c1 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: ret i16 %A
+}
+
+declare void @varargs(i32, ...)
+
+define void @test11(i32* %P) {
+ %c = bitcast i32* %P to i16* ; <i16*> [#uses=1]
+ call void (i32, ...)* @varargs( i32 5, i16* %c )
+ ret void
+; CHECK: call void (i32, ...)* @varargs(i32 5, i32* %P)
+; CHECK: ret void
+}
+
+define i8* @test13(i64 %A) {
+ %c = getelementptr [0 x i8]* bitcast ([32832 x i8]* @inbuf to [0 x i8]*), i64 0, i64 %A ; <i8*> [#uses=1]
+ ret i8* %c
+; CHECK: %c = getelementptr [32832 x i8]* @inbuf, i64 0, i64 %A
+; CHECK: ret i8* %c
+}
+
+define i1 @test14(i8 %A) {
+ %c = bitcast i8 %A to i8 ; <i8> [#uses=1]
+ %X = icmp ult i8 %c, -128 ; <i1> [#uses=1]
+ ret i1 %X
+; CHECK: %X = icmp sgt i8 %A, -1
+; CHECK: ret i1 %X
+}
+
+
+; This just won't occur when there's no difference between ubyte and sbyte
+;bool %test15(ubyte %A) {
+; %c = cast ubyte %A to sbyte
+; %X = setlt sbyte %c, 0 ; setgt %A, 127
+; ret bool %X
+;}
+
+define i1 @test16(i32* %P) {
+ %c = icmp ne i32* %P, null ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: %c = icmp ne i32* %P, null
+; CHECK: ret i1 %c
+}
+
+define i16 @test17(i1 %tmp3) {
+ %c = zext i1 %tmp3 to i32 ; <i32> [#uses=1]
+ %t86 = trunc i32 %c to i16 ; <i16> [#uses=1]
+ ret i16 %t86
+; CHECK: %t86 = zext i1 %tmp3 to i16
+; CHECK: ret i16 %t86
+}
+
+define i16 @test18(i8 %tmp3) {
+ %c = sext i8 %tmp3 to i32 ; <i32> [#uses=1]
+ %t86 = trunc i32 %c to i16 ; <i16> [#uses=1]
+ ret i16 %t86
+; CHECK: %t86 = sext i8 %tmp3 to i16
+; CHECK: ret i16 %t86
+}
+
+define i1 @test19(i32 %X) {
+ %c = sext i32 %X to i64 ; <i64> [#uses=1]
+ %Z = icmp slt i64 %c, 12345 ; <i1> [#uses=1]
+ ret i1 %Z
+; CHECK: %Z = icmp slt i32 %X, 12345
+; CHECK: ret i1 %Z
+}
+
+define i1 @test20(i1 %B) {
+ %c = zext i1 %B to i32 ; <i32> [#uses=1]
+ %D = icmp slt i32 %c, -1 ; <i1> [#uses=1]
+ ;; false
+ ret i1 %D
+; CHECK: ret i1 false
+}
+
+define i32 @test21(i32 %X) {
+ %c1 = trunc i32 %X to i8 ; <i8> [#uses=1]
+ ;; sext -> zext -> and -> nop
+ %c2 = sext i8 %c1 to i32 ; <i32> [#uses=1]
+ %RV = and i32 %c2, 255 ; <i32> [#uses=1]
+ ret i32 %RV
+; CHECK: %c21 = and i32 %X, 255
+; CHECK: ret i32 %c21
+}
+
+define i32 @test22(i32 %X) {
+ %c1 = trunc i32 %X to i8 ; <i8> [#uses=1]
+ ;; sext -> zext -> and -> nop
+ %c2 = sext i8 %c1 to i32 ; <i32> [#uses=1]
+ %RV = shl i32 %c2, 24 ; <i32> [#uses=1]
+ ret i32 %RV
+; CHECK: shl i32 %X, 24
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test23(i32 %X) {
+ ;; Turn into an AND even though X
+ %c1 = trunc i32 %X to i16 ; <i16> [#uses=1]
+ ;; and Z are signed.
+ %c2 = zext i16 %c1 to i32 ; <i32> [#uses=1]
+ ret i32 %c2
+; CHECK: %c2 = and i32 %X, 65535
+; CHECK: ret i32 %c2
+}
+
+define i1 @test24(i1 %C) {
+ %X = select i1 %C, i32 14, i32 1234 ; <i32> [#uses=1]
+ ;; Fold cast into select
+ %c = icmp ne i32 %X, 0 ; <i1> [#uses=1]
+ ret i1 %c
+; CHECK: ret i1 true
+}
+
+define void @test25(i32** %P) {
+ %c = bitcast i32** %P to float** ; <float**> [#uses=1]
+ ;; Fold cast into null
+ store float* null, float** %c
+ ret void
+; CHECK: store i32* null, i32** %P
+; CHECK: ret void
+}
+
+define i32 @test26(float %F) {
+ ;; no need to cast from float->double.
+ %c = fpext float %F to double ; <double> [#uses=1]
+ %D = fptosi double %c to i32 ; <i32> [#uses=1]
+ ret i32 %D
+; CHECK: %D = fptosi float %F to i32
+; CHECK: ret i32 %D
+}
+
+define [4 x float]* @test27([9 x [4 x float]]* %A) {
+ %c = bitcast [9 x [4 x float]]* %A to [4 x float]* ; <[4 x float]*> [#uses=1]
+ ret [4 x float]* %c
+; CHECK: %c = getelementptr inbounds [9 x [4 x float]]* %A, i64 0, i64 0
+; CHECK: ret [4 x float]* %c
+}
+
+define float* @test28([4 x float]* %A) {
+ %c = bitcast [4 x float]* %A to float* ; <float*> [#uses=1]
+ ret float* %c
+; CHECK: %c = getelementptr inbounds [4 x float]* %A, i64 0, i64 0
+; CHECK: ret float* %c
+}
+
+define i32 @test29(i32 %c1, i32 %c2) {
+ %tmp1 = trunc i32 %c1 to i8 ; <i8> [#uses=1]
+ %tmp4.mask = trunc i32 %c2 to i8 ; <i8> [#uses=1]
+ %tmp = or i8 %tmp4.mask, %tmp1 ; <i8> [#uses=1]
+ %tmp10 = zext i8 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %tmp10
+; CHECK: %tmp2 = or i32 %c2, %c1
+; CHECK: %tmp10 = and i32 %tmp2, 255
+; CHECK: ret i32 %tmp10
+}
+
+define i32 @test30(i32 %c1) {
+ %c2 = trunc i32 %c1 to i8 ; <i8> [#uses=1]
+ %c3 = xor i8 %c2, 1 ; <i8> [#uses=1]
+ %c4 = zext i8 %c3 to i32 ; <i32> [#uses=1]
+ ret i32 %c4
+; CHECK: %c3 = and i32 %c1, 255
+; CHECK: %c4 = xor i32 %c3, 1
+; CHECK: ret i32 %c4
+}
+
+define i1 @test31(i64 %A) {
+ %B = trunc i64 %A to i32 ; <i32> [#uses=1]
+ %C = and i32 %B, 42 ; <i32> [#uses=1]
+ %D = icmp eq i32 %C, 10 ; <i1> [#uses=1]
+ ret i1 %D
+; CHECK: %C = and i64 %A, 42
+; CHECK: %D = icmp eq i64 %C, 10
+; CHECK: ret i1 %D
+}
+
+define i32 @test33(i32 %c1) {
+ %x = bitcast i32 %c1 to float ; <float> [#uses=1]
+ %y = bitcast float %x to i32 ; <i32> [#uses=1]
+ ret i32 %y
+; CHECK: ret i32 %c1
+}
+
+define i16 @test34(i16 %a) {
+ %c1 = zext i16 %a to i32 ; <i32> [#uses=1]
+ %tmp21 = lshr i32 %c1, 8 ; <i32> [#uses=1]
+ %c2 = trunc i32 %tmp21 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: %tmp21 = lshr i16 %a, 8
+; CHECK: ret i16 %tmp21
+}
+
+define i16 @test35(i16 %a) {
+ %c1 = bitcast i16 %a to i16 ; <i16> [#uses=1]
+ %tmp2 = lshr i16 %c1, 8 ; <i16> [#uses=1]
+ %c2 = bitcast i16 %tmp2 to i16 ; <i16> [#uses=1]
+ ret i16 %c2
+; CHECK: %tmp2 = lshr i16 %a, 8
+; CHECK: ret i16 %tmp2
+}
+
+; icmp sgt i32 %a, -1
+; rdar://6480391
+define i1 @test36(i32 %a) {
+ %b = lshr i32 %a, 31
+ %c = trunc i32 %b to i8
+ %d = icmp eq i8 %c, 0
+ ret i1 %d
+; CHECK: %d = icmp sgt i32 %a, -1
+; CHECK: ret i1 %d
+}
+
+; ret i1 false
+define i1 @test37(i32 %a) {
+ %b = lshr i32 %a, 31
+ %c = or i32 %b, 512
+ %d = trunc i32 %c to i8
+ %e = icmp eq i8 %d, 11
+ ret i1 %e
+; CHECK: ret i1 false
+}
+
+define i64 @test38(i32 %a) {
+ %1 = icmp eq i32 %a, -2
+ %2 = zext i1 %1 to i8
+ %3 = xor i8 %2, 1
+ %4 = zext i8 %3 to i64
+ ret i64 %4
+; CHECK: %1 = icmp ne i32 %a, -2
+; CHECK: %2 = zext i1 %1 to i64
+; CHECK: ret i64 %2
+}
+
+define i16 @test39(i16 %a) {
+ %tmp = zext i16 %a to i32
+ %tmp21 = lshr i32 %tmp, 8
+ %tmp5 = shl i32 %tmp, 8
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
+ ret i16 %tmp.upgrd.3
+; CHECK: @test39
+; CHECK: %tmp.upgrd.32 = call i16 @llvm.bswap.i16(i16 %a)
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
+define i16 @test40(i16 %a) {
+ %tmp = zext i16 %a to i32
+ %tmp21 = lshr i32 %tmp, 9
+ %tmp5 = shl i32 %tmp, 8
+ %tmp.upgrd.32 = or i32 %tmp21, %tmp5
+ %tmp.upgrd.3 = trunc i32 %tmp.upgrd.32 to i16
+ ret i16 %tmp.upgrd.3
+; CHECK: @test40
+; CHECK: %tmp21 = lshr i16 %a, 9
+; CHECK: %tmp5 = shl i16 %a, 8
+; CHECK: %tmp.upgrd.32 = or i16 %tmp21, %tmp5
+; CHECK: ret i16 %tmp.upgrd.32
+}
+
+; PR1263
+define i32* @test41(i32* %tmp1) {
+ %tmp64 = bitcast i32* %tmp1 to { i32 }*
+ %tmp65 = getelementptr { i32 }* %tmp64, i32 0, i32 0
+ ret i32* %tmp65
+; CHECK: @test41
+; CHECK: ret i32* %tmp1
+}
+
+define i32 @test42(i32 %X) {
+ %Y = trunc i32 %X to i8 ; <i8> [#uses=1]
+ %Z = zext i8 %Y to i32 ; <i32> [#uses=1]
+ ret i32 %Z
+; CHECK: @test42
+; CHECK: %Z = and i32 %X, 255
+}
+
+; rdar://6598839
+define zeroext i64 @test43(i8 zeroext %on_off) nounwind readonly {
+ %A = zext i8 %on_off to i32
+ %B = add i32 %A, -1
+ %C = sext i32 %B to i64
+ ret i64 %C ;; Should be (add (zext i8 -> i64), -1)
+; CHECK: @test43
+; CHECK-NEXT: %A = zext i8 %on_off to i64
+; CHECK-NEXT: %B = add i64 %A, -1
+; CHECK-NEXT: ret i64 %B
+}
+
+define i64 @test44(i8 %T) {
+ %A = zext i8 %T to i16
+ %B = or i16 %A, 1234
+ %C = zext i16 %B to i64
+ ret i64 %C
+; CHECK: @test44
+; CHECK-NEXT: %A = zext i8 %T to i64
+; CHECK-NEXT: %B = or i64 %A, 1234
+; CHECK-NEXT: ret i64 %B
+}
+
+define i64 @test45(i8 %A, i64 %Q) {
+ %D = trunc i64 %Q to i32 ;; should be removed
+ %B = sext i8 %A to i32
+ %C = or i32 %B, %D
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test45
+; CHECK-NEXT: %B = sext i8 %A to i64
+; CHECK-NEXT: %C = or i64 %B, %Q
+; CHECK-NEXT: %E = and i64 %C, 4294967295
+; CHECK-NEXT: ret i64 %E
+}
+
+
+define i64 @test46(i64 %A) {
+ %B = trunc i64 %A to i32
+ %C = and i32 %B, 42
+ %D = shl i32 %C, 8
+ %E = zext i32 %D to i64
+ ret i64 %E
+; CHECK: @test46
+; CHECK-NEXT: %C = shl i64 %A, 8
+; CHECK-NEXT: %D = and i64 %C, 10752
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test47(i8 %A) {
+ %B = sext i8 %A to i32
+ %C = or i32 %B, 42
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test47
+; CHECK-NEXT: %B = sext i8 %A to i64
+; CHECK-NEXT: %C = and i64 %B, 4294967253
+; CHECK-NEXT: %E = or i64 %C, 42
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test48(i8 %A, i8 %a) {
+ %b = zext i8 %a to i32
+ %B = zext i8 %A to i32
+ %C = shl i32 %B, 8
+ %D = or i32 %C, %b
+ %E = zext i32 %D to i64
+ ret i64 %E
+; CHECK: @test48
+; CHECK-NEXT: %b = zext i8 %a to i64
+; CHECK-NEXT: %B = zext i8 %A to i64
+; CHECK-NEXT: %C = shl nuw nsw i64 %B, 8
+; CHECK-NEXT: %D = or i64 %C, %b
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test49(i64 %A) {
+ %B = trunc i64 %A to i32
+ %C = or i32 %B, 1
+ %D = sext i32 %C to i64
+ ret i64 %D
+; CHECK: @test49
+; CHECK-NEXT: %C = shl i64 %A, 32
+; CHECK-NEXT: ashr exact i64 %C, 32
+; CHECK-NEXT: %D = or i64 {{.*}}, 1
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test50(i64 %A) {
+ %a = lshr i64 %A, 2
+ %B = trunc i64 %a to i32
+ %D = add i32 %B, -1
+ %E = sext i32 %D to i64
+ ret i64 %E
+; CHECK: @test50
+; CHECK-NEXT: shl i64 %A, 30
+; CHECK-NEXT: add i64 {{.*}}, -4294967296
+; CHECK-NEXT: %sext = ashr i64 {{.*}}, 32
+; CHECK-NEXT: ret i64 %sext
+}
+
+define i64 @test51(i64 %A, i1 %cond) {
+ %B = trunc i64 %A to i32
+ %C = and i32 %B, -2
+ %D = or i32 %B, 1
+ %E = select i1 %cond, i32 %C, i32 %D
+ %F = sext i32 %E to i64
+ ret i64 %F
+; CHECK: @test51
+
+; FIXME: disabled, see PR5997
+; HECK-NEXT: %C = and i64 %A, 4294967294
+; HECK-NEXT: %D = or i64 %A, 1
+; HECK-NEXT: %E = select i1 %cond, i64 %C, i64 %D
+; HECK-NEXT: %sext = shl i64 %E, 32
+; HECK-NEXT: %F = ashr i64 %sext, 32
+; HECK-NEXT: ret i64 %F
+}
+
+define i32 @test52(i64 %A) {
+ %B = trunc i64 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = zext i16 %D to i32
+ ret i32 %E
+; CHECK: @test52
+; CHECK-NEXT: %B = trunc i64 %A to i32
+; CHECK-NEXT: %C = and i32 %B, 7224
+; CHECK-NEXT: %D = or i32 %C, 32962
+; CHECK-NEXT: ret i32 %D
+}
+
+define i64 @test53(i32 %A) {
+ %B = trunc i32 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = zext i16 %D to i64
+ ret i64 %E
+; CHECK: @test53
+; CHECK-NEXT: %B = zext i32 %A to i64
+; CHECK-NEXT: %C = and i64 %B, 7224
+; CHECK-NEXT: %D = or i64 %C, 32962
+; CHECK-NEXT: ret i64 %D
+}
+
+define i32 @test54(i64 %A) {
+ %B = trunc i64 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = sext i16 %D to i32
+ ret i32 %E
+; CHECK: @test54
+; CHECK-NEXT: %B = trunc i64 %A to i32
+; CHECK-NEXT: %C = and i32 %B, 7224
+; CHECK-NEXT: %D = or i32 %C, -32574
+; CHECK-NEXT: ret i32 %D
+}
+
+define i64 @test55(i32 %A) {
+ %B = trunc i32 %A to i16
+ %C = or i16 %B, -32574
+ %D = and i16 %C, -25350
+ %E = sext i16 %D to i64
+ ret i64 %E
+; CHECK: @test55
+; CHECK-NEXT: %B = zext i32 %A to i64
+; CHECK-NEXT: %C = and i64 %B, 7224
+; CHECK-NEXT: %D = or i64 %C, -32574
+; CHECK-NEXT: ret i64 %D
+}
+
+define i64 @test56(i16 %A) nounwind {
+ %tmp353 = sext i16 %A to i32
+ %tmp354 = lshr i32 %tmp353, 5
+ %tmp355 = zext i32 %tmp354 to i64
+ ret i64 %tmp355
+; CHECK: @test56
+; CHECK-NEXT: %tmp353 = sext i16 %A to i64
+; CHECK-NEXT: %tmp354 = lshr i64 %tmp353, 5
+; CHECK-NEXT: %tmp355 = and i64 %tmp354, 134217727
+; CHECK-NEXT: ret i64 %tmp355
+}
+
+define i64 @test57(i64 %A) nounwind {
+ %B = trunc i64 %A to i32
+ %C = lshr i32 %B, 8
+ %E = zext i32 %C to i64
+ ret i64 %E
+; CHECK: @test57
+; CHECK-NEXT: %C = lshr i64 %A, 8
+; CHECK-NEXT: %E = and i64 %C, 16777215
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test58(i64 %A) nounwind {
+ %B = trunc i64 %A to i32
+ %C = lshr i32 %B, 8
+ %D = or i32 %C, 128
+ %E = zext i32 %D to i64
+ ret i64 %E
+
+; CHECK: @test58
+; CHECK-NEXT: %C = lshr i64 %A, 8
+; CHECK-NEXT: %D = and i64 %C, 16777087
+; CHECK-NEXT: %E = or i64 %D, 128
+; CHECK-NEXT: ret i64 %E
+}
+
+define i64 @test59(i8 %A, i8 %B) nounwind {
+ %C = zext i8 %A to i32
+ %D = shl i32 %C, 4
+ %E = and i32 %D, 48
+ %F = zext i8 %B to i32
+ %G = lshr i32 %F, 4
+ %H = or i32 %G, %E
+ %I = zext i32 %H to i64
+ ret i64 %I
+; CHECK: @test59
+; CHECK-NEXT: %C = zext i8 %A to i64
+; CHECK-NOT: i32
+; CHECK: %F = zext i8 %B to i64
+; CHECK-NOT: i32
+; CHECK: ret i64 %H
+}
+
+define <3 x i32> @test60(<4 x i32> %call4) nounwind {
+ %tmp11 = bitcast <4 x i32> %call4 to i128
+ %tmp9 = trunc i128 %tmp11 to i96
+ %tmp10 = bitcast i96 %tmp9 to <3 x i32>
+ ret <3 x i32> %tmp10
+
+; CHECK: @test60
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+}
+
+define <4 x i32> @test61(<3 x i32> %call4) nounwind {
+ %tmp11 = bitcast <3 x i32> %call4 to i96
+ %tmp9 = zext i96 %tmp11 to i128
+ %tmp10 = bitcast i128 %tmp9 to <4 x i32>
+ ret <4 x i32> %tmp10
+; CHECK: @test61
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+}
+
+define <4 x i32> @test62(<3 x float> %call4) nounwind {
+ %tmp11 = bitcast <3 x float> %call4 to i96
+ %tmp9 = zext i96 %tmp11 to i128
+ %tmp10 = bitcast i128 %tmp9 to <4 x i32>
+ ret <4 x i32> %tmp10
+; CHECK: @test62
+; CHECK-NEXT: bitcast
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+}
+
+; PR7311 - Don't create invalid IR on scalar->vector cast.
+define <2 x float> @test63(i64 %tmp8) nounwind {
+entry:
+ %a = bitcast i64 %tmp8 to <2 x i32>
+ %vcvt.i = uitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %vcvt.i
+; CHECK: @test63
+; CHECK: bitcast
+; CHECK: uitofp
+}
+
+define <4 x float> @test64(<4 x float> %c) nounwind {
+ %t0 = bitcast <4 x float> %c to <4 x i32>
+ %t1 = bitcast <4 x i32> %t0 to <4 x float>
+ ret <4 x float> %t1
+; CHECK: @test64
+; CHECK-NEXT: ret <4 x float> %c
+}
+
+define <4 x float> @test65(<4 x float> %c) nounwind {
+ %t0 = bitcast <4 x float> %c to <2 x double>
+ %t1 = bitcast <2 x double> %t0 to <4 x float>
+ ret <4 x float> %t1
+; CHECK: @test65
+; CHECK-NEXT: ret <4 x float> %c
+}
+
+define <2 x float> @test66(<2 x float> %c) nounwind {
+ %t0 = bitcast <2 x float> %c to double
+ %t1 = bitcast double %t0 to <2 x float>
+ ret <2 x float> %t1
+; CHECK: @test66
+; CHECK-NEXT: ret <2 x float> %c
+}
+
+define float @test2c() {
+ ret float extractelement (<2 x float> bitcast (double bitcast (<2 x float> <float -1.000000e+00, float -1.000000e+00> to double) to <2 x float>), i32 0)
+; CHECK: @test2c
+; CHECK-NOT: extractelement
+}
+
+define i64 @test_mmx(<2 x i32> %c) nounwind {
+ %A = bitcast <2 x i32> %c to x86_mmx
+ %B = bitcast x86_mmx %A to <2 x i32>
+ %C = bitcast <2 x i32> %B to i64
+ ret i64 %C
+; CHECK: @test_mmx
+; CHECK-NOT: x86_mmx
+}
+
+define i64 @test_mmx_const(<2 x i32> %c) nounwind {
+ %A = bitcast <2 x i32> zeroinitializer to x86_mmx
+ %B = bitcast x86_mmx %A to <2 x i32>
+ %C = bitcast <2 x i32> %B to i64
+ ret i64 %C
+; CHECK: @test_mmx_const
+; CHECK-NOT: x86_mmx
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/cast_ptr.ll b/src/LLVM/test/Transforms/InstCombine/cast_ptr.ll
new file mode 100644
index 0000000..b524511
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/cast_ptr.ll
@@ -0,0 +1,79 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "p:32:32"
+
+; This shouldn't convert to getelementptr because the relationship
+; between the arithmetic and the layout of allocated memory is
+; entirely unknown.
+; CHECK: @test1
+; CHECK: ptrtoint
+; CHECK: add
+; CHECK: inttoptr
+define i8* @test1(i8* %t) {
+ %tmpc = ptrtoint i8* %t to i32 ; <i32> [#uses=1]
+ %tmpa = add i32 %tmpc, 32 ; <i32> [#uses=1]
+ %tv = inttoptr i32 %tmpa to i8* ; <i8*> [#uses=1]
+ ret i8* %tv
+}
+
+; These casts should be folded away.
+; CHECK: @test2
+; CHECK: icmp eq i8* %a, %b
+define i1 @test2(i8* %a, i8* %b) {
+ %tmpa = ptrtoint i8* %a to i32 ; <i32> [#uses=1]
+ %tmpb = ptrtoint i8* %b to i32 ; <i32> [#uses=1]
+ %r = icmp eq i32 %tmpa, %tmpb ; <i1> [#uses=1]
+ ret i1 %r
+}
+
+; These casts should also be folded away.
+; CHECK: @test3
+; CHECK: icmp eq i8* %a, @global
+@global = global i8 0
+define i1 @test3(i8* %a) {
+ %tmpa = ptrtoint i8* %a to i32
+ %r = icmp eq i32 %tmpa, ptrtoint (i8* @global to i32)
+ ret i1 %r
+}
+
+define i1 @test4(i32 %A) {
+ %B = inttoptr i32 %A to i8*
+ %C = icmp eq i8* %B, null
+ ret i1 %C
+; CHECK: @test4
+; CHECK-NEXT: %C = icmp eq i32 %A, 0
+; CHECK-NEXT: ret i1 %C
+}
+
+
+; Pulling the cast out of the load allows us to eliminate the load, and then
+; the whole array.
+
+ %op = type { float }
+ %unop = type { i32 }
+@Array = internal constant [1 x %op* (%op*)*] [ %op* (%op*)* @foo ] ; <[1 x %op* (%op*)*]*> [#uses=1]
+
+declare %op* @foo(%op* %X)
+
+define %unop* @test5(%op* %O) {
+ %tmp = load %unop* (%op*)** bitcast ([1 x %op* (%op*)*]* @Array to %unop* (%op*)**); <%unop* (%op*)*> [#uses=1]
+ %tmp.2 = call %unop* %tmp( %op* %O ) ; <%unop*> [#uses=1]
+ ret %unop* %tmp.2
+; CHECK: @test5
+; CHECK: call %op* @foo(%op* %O)
+}
+
+
+
+; InstCombine can not 'load (cast P)' -> cast (load P)' if the cast changes
+; the address space.
+
+define i8 @test6(i8 addrspace(1)* %source) {
+entry:
+ %arrayidx223 = bitcast i8 addrspace(1)* %source to i8*
+ %tmp4 = load i8* %arrayidx223
+ ret i8 %tmp4
+; CHECK: @test6
+; CHECK: load i8* %arrayidx223
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/compare-signs.ll b/src/LLVM/test/Transforms/InstCombine/compare-signs.ll
new file mode 100644
index 0000000..f8e4911
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/compare-signs.ll
@@ -0,0 +1,58 @@
+; RUN: opt %s -instcombine -S | FileCheck %s
+; PR5438
+
+; TODO: This should also optimize down.
+;define i32 @test1(i32 %a, i32 %b) nounwind readnone {
+;entry:
+; %0 = icmp sgt i32 %a, -1 ; <i1> [#uses=1]
+; %1 = icmp slt i32 %b, 0 ; <i1> [#uses=1]
+; %2 = xor i1 %1, %0 ; <i1> [#uses=1]
+; %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
+; ret i32 %3
+;}
+
+; TODO: This optimizes partially but not all the way.
+;define i32 @test2(i32 %a, i32 %b) nounwind readnone {
+;entry:
+; %0 = and i32 %a, 8 ;<i32> [#uses=1]
+; %1 = and i32 %b, 8 ;<i32> [#uses=1]
+; %2 = icmp eq i32 %0, %1 ;<i1> [#uses=1]
+; %3 = zext i1 %2 to i32 ;<i32> [#uses=1]
+; ret i32 %3
+;}
+
+define i32 @test3(i32 %a, i32 %b) nounwind readnone {
+; CHECK: @test3
+entry:
+; CHECK: xor i32 %a, %b
+; CHECK: lshr i32 %0, 31
+; CHECK: xor i32 %1, 1
+ %0 = lshr i32 %a, 31 ; <i32> [#uses=1]
+ %1 = lshr i32 %b, 31 ; <i32> [#uses=1]
+ %2 = icmp eq i32 %0, %1 ; <i1> [#uses=1]
+ %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
+ ret i32 %3
+; CHECK-NOT: icmp
+; CHECK-NOT: zext
+; CHECK: ret i32 %2
+}
+
+; Variation on @test3: checking the 2nd bit in a situation where the 5th bit
+; is one, not zero.
+define i32 @test3i(i32 %a, i32 %b) nounwind readnone {
+; CHECK: @test3i
+entry:
+; CHECK: xor i32 %a, %b
+; CHECK: lshr i32 %0, 31
+; CHECK: xor i32 %1, 1
+ %0 = lshr i32 %a, 29 ; <i32> [#uses=1]
+ %1 = lshr i32 %b, 29 ; <i32> [#uses=1]
+ %2 = or i32 %0, 35
+ %3 = or i32 %1, 35
+ %4 = icmp eq i32 %2, %3 ; <i1> [#uses=1]
+ %5 = zext i1 %4 to i32 ; <i32> [#uses=1]
+ ret i32 %5
+; CHECK-NOT: icmp
+; CHECK-NOT: zext
+; CHECK: ret i32 %2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/constant-fold-compare.ll b/src/LLVM/test/Transforms/InstCombine/constant-fold-compare.ll
new file mode 100644
index 0000000..6e41e2f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/constant-fold-compare.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+
+define i32 @a() nounwind readnone {
+entry:
+ ret i32 zext (i1 icmp eq (i32 0, i32 ptrtoint (i32 ()* @a to i32)) to i32)
+}
+; CHECK: ret i32 0
diff --git a/src/LLVM/test/Transforms/InstCombine/constant-fold-gep.ll b/src/LLVM/test/Transforms/InstCombine/constant-fold-gep.ll
new file mode 100644
index 0000000..c679226
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/constant-fold-gep.ll
@@ -0,0 +1,74 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
+
+; Constant folding should fix notionally out-of-bounds indices
+; and add inbounds keywords.
+
+%struct.X = type { [3 x i32], [3 x i32] }
+
+@Y = internal global [3 x %struct.X] zeroinitializer
+
+define void @frob() {
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 1), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 2), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 0), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 3), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 4), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 2), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 5), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 6), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 7), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 8), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 0), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 9), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 10), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 2), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 11), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 12), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 1), align 4
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 13), align 4
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 14), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 15), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 1), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 16), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 17), align 8
+; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 18), align 8
+; CHECK: store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 2, i64 0, i32 0, i64 0), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 36), align 8
+; CHECK: store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 1), align 8
+ store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 19), align 8
+ ret void
+}
+
+
+; PR8883 - Constant fold exotic gep subtract
+; CHECK: @test2
+@X = global [1000 x i8] zeroinitializer, align 16
+
+define i64 @test2() {
+entry:
+ %A = bitcast i8* getelementptr inbounds ([1000 x i8]* @X, i64 1, i64 0) to i8*
+ %B = bitcast i8* getelementptr inbounds ([1000 x i8]* @X, i64 0, i64 0) to i8*
+
+ %B2 = ptrtoint i8* %B to i64
+ %C = sub i64 0, %B2
+ %D = getelementptr i8* %A, i64 %C
+ %E = ptrtoint i8* %D to i64
+
+ ret i64 %E
+ ; CHECK: ret i64 1000
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/crash.ll b/src/LLVM/test/Transforms/InstCombine/crash.ll
new file mode 100644
index 0000000..54a77aa
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/crash.ll
@@ -0,0 +1,401 @@
+; RUN: opt < %s -instcombine -S
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128:n8:16:32"
+target triple = "i386-apple-darwin10.0"
+
+define i32 @test0(i8 %tmp2) ssp {
+entry:
+ %tmp3 = zext i8 %tmp2 to i32
+ %tmp8 = lshr i32 %tmp3, 6
+ %tmp9 = lshr i32 %tmp3, 7
+ %tmp10 = xor i32 %tmp9, 67108858
+ %tmp11 = xor i32 %tmp10, %tmp8
+ %tmp12 = xor i32 %tmp11, 0
+ ret i32 %tmp12
+}
+
+; PR4905
+define <2 x i64> @test1(<2 x i64> %x, <2 x i64> %y) nounwind {
+entry:
+ %conv.i94 = bitcast <2 x i64> %y to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %sub.i97 = sub <4 x i32> %conv.i94, undef ; <<4 x i32>> [#uses=1]
+ %conv3.i98 = bitcast <4 x i32> %sub.i97 to <2 x i64> ; <<2 x i64>> [#uses=2]
+ %conv2.i86 = bitcast <2 x i64> %conv3.i98 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %cmp.i87 = icmp sgt <4 x i32> undef, %conv2.i86 ; <<4 x i1>> [#uses=1]
+ %sext.i88 = sext <4 x i1> %cmp.i87 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %conv3.i89 = bitcast <4 x i32> %sext.i88 to <2 x i64> ; <<2 x i64>> [#uses=1]
+ %and.i = and <2 x i64> %conv3.i89, %conv3.i98 ; <<2 x i64>> [#uses=1]
+ %or.i = or <2 x i64> zeroinitializer, %and.i ; <<2 x i64>> [#uses=1]
+ %conv2.i43 = bitcast <2 x i64> %or.i to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %sub.i = sub <4 x i32> zeroinitializer, %conv2.i43 ; <<4 x i32>> [#uses=1]
+ %conv3.i44 = bitcast <4 x i32> %sub.i to <2 x i64> ; <<2 x i64>> [#uses=1]
+ ret <2 x i64> %conv3.i44
+}
+
+
+; PR4908
+define void @test2(<1 x i16>* nocapture %b, i32* nocapture %c) nounwind ssp {
+entry:
+ %arrayidx = getelementptr inbounds <1 x i16>* %b, i64 undef ; <<1 x i16>*>
+ %tmp2 = load <1 x i16>* %arrayidx ; <<1 x i16>> [#uses=1]
+ %tmp6 = bitcast <1 x i16> %tmp2 to i16 ; <i16> [#uses=1]
+ %tmp7 = zext i16 %tmp6 to i32 ; <i32> [#uses=1]
+ %ins = or i32 0, %tmp7 ; <i32> [#uses=1]
+ %arrayidx20 = getelementptr inbounds i32* %c, i64 undef ; <i32*> [#uses=1]
+ store i32 %ins, i32* %arrayidx20
+ ret void
+}
+
+; PR5262
+@tmp2 = global i64 0 ; <i64*> [#uses=1]
+
+declare void @use(i64) nounwind
+
+define void @foo(i1) nounwind align 2 {
+; <label>:1
+ br i1 %0, label %2, label %3
+
+; <label>:2 ; preds = %1
+ br label %3
+
+; <label>:3 ; preds = %2, %1
+ %4 = phi i8 [ 1, %2 ], [ 0, %1 ] ; <i8> [#uses=1]
+ %5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
+ %6 = load i64* @tmp2, align 8 ; <i64> [#uses=1]
+ %7 = select i1 %5, i64 0, i64 %6 ; <i64> [#uses=1]
+ br label %8
+
+; <label>:8 ; preds = %3
+ call void @use(i64 %7)
+ ret void
+}
+
+%t0 = type { i32, i32 }
+%t1 = type { i32, i32, i32, i32, i32* }
+
+declare %t0* @bar2(i64)
+
+define void @bar3(i1, i1) nounwind align 2 {
+; <label>:2
+ br i1 %1, label %10, label %3
+
+; <label>:3 ; preds = %2
+ %4 = getelementptr inbounds %t0* null, i64 0, i32 1 ; <i32*> [#uses=0]
+ %5 = getelementptr inbounds %t1* null, i64 0, i32 4 ; <i32**> [#uses=1]
+ %6 = load i32** %5, align 8 ; <i32*> [#uses=1]
+ %7 = icmp ne i32* %6, null ; <i1> [#uses=1]
+ %8 = zext i1 %7 to i32 ; <i32> [#uses=1]
+ %9 = add i32 %8, 0 ; <i32> [#uses=1]
+ br label %10
+
+; <label>:10 ; preds = %3, %2
+ %11 = phi i32 [ %9, %3 ], [ 0, %2 ] ; <i32> [#uses=1]
+ br i1 %1, label %12, label %13
+
+; <label>:12 ; preds = %10
+ br label %13
+
+; <label>:13 ; preds = %12, %10
+ %14 = zext i32 %11 to i64 ; <i64> [#uses=1]
+ %15 = tail call %t0* @bar2(i64 %14) nounwind ; <%0*> [#uses=0]
+ ret void
+}
+
+
+
+
+; PR5262
+; Make sure the PHI node gets put in a place where all of its operands dominate
+; it.
+define i64 @test4(i1 %c, i64* %P) nounwind align 2 {
+BB0:
+ br i1 %c, label %BB1, label %BB2
+
+BB1:
+ br label %BB2
+
+BB2:
+ %v5_ = phi i1 [ true, %BB0], [false, %BB1]
+ %v6 = load i64* %P
+ br label %l8
+
+l8:
+ br label %l10
+
+l10:
+ %v11 = select i1 %v5_, i64 0, i64 %v6
+ ret i64 %v11
+}
+
+; PR5471
+define i32 @test5a() {
+ ret i32 0
+}
+
+define void @test5() {
+ store i1 true, i1* undef
+ %1 = invoke i32 @test5a() to label %exit unwind label %exit
+exit:
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ ret void
+}
+
+
+; PR5673
+
+@test6g = external global i32*
+
+define arm_aapcs_vfpcc i32 @test6(i32 %argc, i8** %argv) nounwind {
+entry:
+ store i32* getelementptr (i32* bitcast (i32 (i32, i8**)* @test6 to i32*), i32 -2048), i32** @test6g, align 4
+ unreachable
+}
+
+
+; PR5827
+
+%class.RuleBasedBreakIterator = type { i64 ()* }
+%class.UStack = type { i8** }
+
+define i32 @_ZN22RuleBasedBreakIterator15checkDictionaryEi(%class.RuleBasedBreakIterator* %this, i32 %x) align 2 {
+entry:
+ %breaks = alloca %class.UStack, align 4 ; <%class.UStack*> [#uses=3]
+ call void @_ZN6UStackC1Ei(%class.UStack* %breaks, i32 0)
+ %tobool = icmp ne i32 %x, 0 ; <i1> [#uses=1]
+ br i1 %tobool, label %cond.end, label %cond.false
+
+terminate.handler: ; preds = %ehcleanup
+ %exc = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
+ %0 = call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* %exc, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i32 1) ; <i32> [#uses=0]
+ call void @_ZSt9terminatev() noreturn nounwind
+ unreachable
+
+ehcleanup: ; preds = %cond.false
+ %exc1 = call i8* @llvm.eh.exception() ; <i8*> [#uses=2]
+ %1 = call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* %exc1, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* null) ; <i32> [#uses=0]
+ invoke void @_ZN6UStackD1Ev(%class.UStack* %breaks)
+ to label %cont unwind label %terminate.handler
+
+cont: ; preds = %ehcleanup
+ call void @_Unwind_Resume_or_Rethrow(i8* %exc1)
+ unreachable
+
+cond.false: ; preds = %entry
+ %tmp4 = getelementptr inbounds %class.RuleBasedBreakIterator* %this, i32 0, i32 0 ; <i64 ()**> [#uses=1]
+ %tmp5 = load i64 ()** %tmp4 ; <i64 ()*> [#uses=1]
+ %call = invoke i64 %tmp5()
+ to label %cond.end unwind label %ehcleanup ; <i64> [#uses=1]
+
+cond.end: ; preds = %cond.false, %entry
+ %cond = phi i64 [ 0, %entry ], [ %call, %cond.false ] ; <i64> [#uses=1]
+ %conv = trunc i64 %cond to i32 ; <i32> [#uses=1]
+ call void @_ZN6UStackD1Ev(%class.UStack* %breaks)
+ ret i32 %conv
+}
+
+declare void @_ZN6UStackC1Ei(%class.UStack*, i32)
+
+declare void @_ZN6UStackD1Ev(%class.UStack*)
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @llvm.eh.exception() nounwind readonly
+
+declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind
+
+declare void @_ZSt9terminatev()
+
+declare void @_Unwind_Resume_or_Rethrow(i8*)
+
+
+
+; rdar://7590304
+define i8* @test10(i8* %self, i8* %tmp3) {
+entry:
+ store i1 true, i1* undef
+ store i1 true, i1* undef
+ invoke void @test10a()
+ to label %invoke.cont unwind label %try.handler ; <i8*> [#uses=0]
+
+invoke.cont: ; preds = %entry
+ unreachable
+
+try.handler: ; preds = %entry
+ %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
+ catch i8* null
+ ret i8* %self
+}
+
+define void @test10a() {
+ ret void
+}
+
+
+; PR6193
+define i32 @test11(i32 %aMaskWidth, i8 %aStride) nounwind {
+entry:
+ %conv41 = sext i8 %aStride to i32
+ %neg = xor i32 %conv41, -1
+ %and42 = and i32 %aMaskWidth, %neg
+ %and47 = and i32 130, %conv41
+ %or = or i32 %and42, %and47
+ ret i32 %or
+}
+
+; PR6503
+define void @test12(i32* %A) nounwind {
+entry:
+ %tmp1 = load i32* %A
+ %cmp = icmp ugt i32 1, %tmp1 ; <i1> [#uses=1]
+ %conv = zext i1 %cmp to i32 ; <i32> [#uses=1]
+ %tmp2 = load i32* %A
+ %cmp3 = icmp ne i32 %tmp2, 0 ; <i1> [#uses=1]
+ %conv4 = zext i1 %cmp3 to i32 ; <i32> [#uses=1]
+ %or = or i32 %conv, %conv4 ; <i32> [#uses=1]
+ %cmp5 = icmp ugt i32 undef, %or ; <i1> [#uses=1]
+ %conv6 = zext i1 %cmp5 to i32 ; <i32> [#uses=0]
+ ret void
+}
+
+%s1 = type { %s2, %s2, [6 x %s2], i32, i32, i32, [1 x i32], [0 x i8] }
+%s2 = type { i64 }
+define void @test13() nounwind ssp {
+entry:
+ %0 = getelementptr inbounds %s1* null, i64 0, i32 2, i64 0, i32 0
+ %1 = bitcast i64* %0 to i32*
+ %2 = getelementptr inbounds %s1* null, i64 0, i32 2, i64 1, i32 0
+ %.pre = load i32* %1, align 8
+ %3 = lshr i32 %.pre, 19
+ %brmerge = or i1 undef, undef
+ %4 = and i32 %3, 3
+ %5 = add nsw i32 %4, 1
+ %6 = shl i32 %5, 19
+ %7 = add i32 %6, 1572864
+ %8 = and i32 %7, 1572864
+ %9 = load i64* %2, align 8
+ %trunc156 = trunc i64 %9 to i32
+ %10 = and i32 %trunc156, -1537
+ %11 = and i32 %10, -6145
+ %12 = or i32 %11, 2048
+ %13 = and i32 %12, -24577
+ %14 = or i32 %13, 16384
+ %15 = or i32 %14, 98304
+ store i32 %15, i32* undef, align 8
+ %16 = and i32 %15, -1572865
+ %17 = or i32 %16, %8
+ store i32 %17, i32* undef, align 8
+ %18 = and i32 %17, -449
+ %19 = or i32 %18, 64
+ store i32 %19, i32* undef, align 8
+ unreachable
+}
+
+
+; PR8807
+declare i32 @test14f(i8* (i8*)*) nounwind
+
+define void @test14() nounwind readnone {
+entry:
+ %tmp = bitcast i32 (i8* (i8*)*)* @test14f to i32 (i32*)*
+ %call10 = call i32 %tmp(i32* byval undef)
+ ret void
+}
+
+
+; PR8896
+@g_54 = external global [7 x i16]
+
+define void @test15(i32* %p_92) nounwind {
+entry:
+%0 = load i32* %p_92, align 4
+%1 = icmp ne i32 %0, 0
+%2 = zext i1 %1 to i32
+%3 = call i32 @func_14() nounwind
+%4 = trunc i32 %3 to i16
+%5 = sext i16 %4 to i32
+%6 = trunc i32 %5 to i16
+br i1 undef, label %"3", label %"5"
+
+"3": ; preds = %entry
+%7 = sext i16 %6 to i32
+%8 = ashr i32 %7, -1649554541
+%9 = trunc i32 %8 to i16
+br label %"5"
+
+"5": ; preds = %"3", %entry
+%10 = phi i16 [ %9, %"3" ], [ %6, %entry ]
+%11 = sext i16 %10 to i32
+%12 = xor i32 %2, %11
+%13 = sext i32 %12 to i64
+%14 = icmp ne i64 %13, 0
+br i1 %14, label %return, label %"7"
+
+"7": ; preds = %"5"
+ret void
+
+return: ; preds = %"5"
+ret void
+}
+
+declare i32 @func_14()
+
+
+define double @test16(i32 %a) nounwind {
+ %cmp = icmp slt i32 %a, 2
+ %select = select i1 %cmp, double 2.000000e+00, double 3.141592e+00
+ ret double %select
+}
+
+
+; PR8983
+%struct.basic_ios = type { i8 }
+
+define %struct.basic_ios *@test17() ssp {
+entry:
+ %add.ptr.i = getelementptr i8* null, i64 undef
+ %0 = bitcast i8* %add.ptr.i to %struct.basic_ios*
+ ret %struct.basic_ios* %0
+}
+
+; PR9013
+define void @test18() nounwind ssp {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %l_197.0 = phi i32 [ 0, %entry ], [ %sub.i, %for.inc ]
+ br label %for.inc
+
+for.inc: ; preds = %for.cond
+ %conv = and i32 %l_197.0, 255
+ %sub.i = add nsw i32 %conv, -1
+ br label %for.cond
+
+return: ; No predecessors!
+ ret void
+}
+
+; PR11275
+declare void @test18b() noreturn
+declare void @test18foo(double**)
+declare void @test18a() noreturn
+define fastcc void @test18x(i8* %t0, i1 %b) uwtable align 2 {
+entry:
+ br i1 %b, label %e1, label %e2
+e1:
+ %t2 = bitcast i8* %t0 to double**
+ invoke void @test18b() noreturn
+ to label %u unwind label %lpad
+e2:
+ %t4 = bitcast i8* %t0 to double**
+ invoke void @test18a() noreturn
+ to label %u unwind label %lpad
+lpad:
+ %t5 = phi double** [ %t2, %e1 ], [ %t4, %e2 ]
+ %lpad.nonloopexit262 = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ call void @test18foo(double** %t5)
+ unreachable
+u:
+ unreachable
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/dce-iterate.ll b/src/LLVM/test/Transforms/InstCombine/dce-iterate.ll
new file mode 100644
index 0000000..1d2cc53
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/dce-iterate.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep {ret double .sy}
+
+define internal double @ScaleObjectAdd(double %sx, double %sy, double %sz) nounwind {
+entry:
+ %sx34 = bitcast double %sx to i64 ; <i64> [#uses=1]
+ %sx3435 = zext i64 %sx34 to i960 ; <i960> [#uses=1]
+ %sy22 = bitcast double %sy to i64 ; <i64> [#uses=1]
+ %sy2223 = zext i64 %sy22 to i960 ; <i960> [#uses=1]
+ %sy222324 = shl i960 %sy2223, 320 ; <i960> [#uses=1]
+ %sy222324.ins = or i960 %sx3435, %sy222324 ; <i960> [#uses=1]
+ %sz10 = bitcast double %sz to i64 ; <i64> [#uses=1]
+ %sz1011 = zext i64 %sz10 to i960 ; <i960> [#uses=1]
+ %sz101112 = shl i960 %sz1011, 640 ; <i960> [#uses=1]
+ %sz101112.ins = or i960 %sy222324.ins, %sz101112
+
+ %a = trunc i960 %sz101112.ins to i64 ; <i64> [#uses=1]
+ %b = bitcast i64 %a to double ; <double> [#uses=1]
+ %c = lshr i960 %sz101112.ins, 320 ; <i960> [#uses=1]
+ %d = trunc i960 %c to i64 ; <i64> [#uses=1]
+ %e = bitcast i64 %d to double ; <double> [#uses=1]
+ %f = fadd double %b, %e
+
+ ret double %e
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/deadcode.ll b/src/LLVM/test/Transforms/InstCombine/deadcode.ll
new file mode 100644
index 0000000..f6620c7
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/deadcode.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 %A}
+; RUN: opt < %s -die -S | not grep call.*llvm
+
+define i32 @test(i32 %A) {
+ %X = or i1 false, false
+ br i1 %X, label %T, label %C
+
+T: ; preds = %0
+ %B = add i32 %A, 1
+ br label %C
+
+C: ; preds = %T, %0
+ %C.upgrd.1 = phi i32 [ %B, %T ], [ %A, %0 ]
+ ret i32 %C.upgrd.1
+}
+
+define i32* @test2(i32 %width) {
+ %tmp = call i8* @llvm.stacksave( )
+ %tmp14 = alloca i32, i32 %width
+ ret i32* %tmp14
+}
+
+declare i8* @llvm.stacksave()
+
+declare void @llvm.lifetime.start(i64, i8*)
+declare void @llvm.lifetime.end(i64, i8*)
+
+define void @test3() {
+ call void @llvm.lifetime.start(i64 -1, i8* undef)
+ call void @llvm.lifetime.end(i64 -1, i8* undef)
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/debuginfo.ll b/src/LLVM/test/Transforms/InstCombine/debuginfo.ll
new file mode 100644
index 0000000..f6892fc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/debuginfo.ll
@@ -0,0 +1,57 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
+
+declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
+
+declare i8* @foo(i8*, i32, i64, i64) nounwind
+
+define hidden i8* @foobar(i8* %__dest, i32 %__val, i64 %__len) nounwind inlinehint ssp {
+entry:
+ %__dest.addr = alloca i8*, align 8
+ %__val.addr = alloca i32, align 4
+ %__len.addr = alloca i64, align 8
+ store i8* %__dest, i8** %__dest.addr, align 8, !tbaa !1
+; CHECK-NOT: call void @llvm.dbg.declare
+; CHECK: call void @llvm.dbg.value
+ call void @llvm.dbg.declare(metadata !{i8** %__dest.addr}, metadata !0), !dbg !16
+ store i32 %__val, i32* %__val.addr, align 4, !tbaa !17
+ call void @llvm.dbg.declare(metadata !{i32* %__val.addr}, metadata !7), !dbg !18
+ store i64 %__len, i64* %__len.addr, align 8, !tbaa !19
+ call void @llvm.dbg.declare(metadata !{i64* %__len.addr}, metadata !9), !dbg !20
+ %tmp = load i8** %__dest.addr, align 8, !dbg !21, !tbaa !13
+ %tmp1 = load i32* %__val.addr, align 4, !dbg !21, !tbaa !17
+ %tmp2 = load i64* %__len.addr, align 8, !dbg !21, !tbaa !19
+ %tmp3 = load i8** %__dest.addr, align 8, !dbg !21, !tbaa !13
+ %0 = call i64 @llvm.objectsize.i64(i8* %tmp3, i1 false), !dbg !21
+ %call = call i8* @foo(i8* %tmp, i32 %tmp1, i64 %tmp2, i64 %0), !dbg !21
+ ret i8* %call, !dbg !21
+}
+
+!llvm.dbg.lv.foobar = !{!0, !7, !9}
+!llvm.dbg.sp = !{!1}
+
+!0 = metadata !{i32 590081, metadata !1, metadata !"__dest", metadata !2, i32 16777294, metadata !6, i32 0} ; [ DW_TAG_arg_variable ]
+!1 = metadata !{i32 589870, i32 0, metadata !2, metadata !"foobar", metadata !"foobar", metadata !"", metadata !2, i32 79, metadata !4, i1 true, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i8* (i8*, i32, i64)* @foobar} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 589865, metadata !"string.h", metadata !"Game", metadata !3} ; [ DW_TAG_file_type ]
+!3 = metadata !{i32 589841, i32 0, i32 12, metadata !"bits.c", metadata !"Game", metadata !"clang version 3.0 (trunk 127710)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
+!4 = metadata !{i32 589845, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !5, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!5 = metadata !{metadata !6}
+!6 = metadata !{i32 589839, metadata !3, metadata !"", null, i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
+!7 = metadata !{i32 590081, metadata !1, metadata !"__val", metadata !2, i32 33554510, metadata !8, i32 0} ; [ DW_TAG_arg_variable ]
+!8 = metadata !{i32 589860, metadata !3, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!9 = metadata !{i32 590081, metadata !1, metadata !"__len", metadata !2, i32 50331726, metadata !10, i32 0} ; [ DW_TAG_arg_variable ]
+!10 = metadata !{i32 589846, metadata !3, metadata !"size_t", metadata !2, i32 80, i64 0, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_typedef ]
+!11 = metadata !{i32 589846, metadata !3, metadata !"__darwin_size_t", metadata !2, i32 90, i64 0, i64 0, i64 0, i32 0, metadata !12} ; [ DW_TAG_typedef ]
+!12 = metadata !{i32 589860, metadata !3, metadata !"long unsigned int", null, i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
+!13 = metadata !{metadata !"any pointer", metadata !14}
+!14 = metadata !{metadata !"omnipotent char", metadata !15}
+!15 = metadata !{metadata !"Simple C/C++ TBAA", null}
+!16 = metadata !{i32 78, i32 28, metadata !1, null}
+!17 = metadata !{metadata !"int", metadata !14}
+!18 = metadata !{i32 78, i32 40, metadata !1, null}
+!19 = metadata !{metadata !"long", metadata !14}
+!20 = metadata !{i32 78, i32 54, metadata !1, null}
+!21 = metadata !{i32 80, i32 3, metadata !22, null}
+!22 = metadata !{i32 589835, metadata !23, i32 80, i32 3, metadata !2, i32 7} ; [ DW_TAG_lexical_block ]
+!23 = metadata !{i32 589835, metadata !1, i32 79, i32 1, metadata !2, i32 6} ; [ DW_TAG_lexical_block ]
diff --git a/src/LLVM/test/Transforms/InstCombine/devirt.ll b/src/LLVM/test/Transforms/InstCombine/devirt.ll
new file mode 100644
index 0000000..6189dc2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/devirt.ll
@@ -0,0 +1,39 @@
+; RUN: opt -instcombine -S -o - %s | FileCheck %s
+
+; CHECK-NOT: getelementptr
+; CHECK-NOT: ptrtoint
+; CHECK: bitcast i8*
+%struct.S = type { i32 (...)** }
+
+@_ZL1p = internal constant { i64, i64 } { i64 1, i64 0 }, align 8
+
+define void @_Z1g1S(%struct.S* %s) nounwind {
+entry:
+ %tmp = load { i64, i64 }* @_ZL1p, align 8
+ %memptr.adj = extractvalue { i64, i64 } %tmp, 1
+ %0 = bitcast %struct.S* %s to i8*
+ %1 = getelementptr inbounds i8* %0, i64 %memptr.adj
+ %this.adjusted = bitcast i8* %1 to %struct.S*
+ %memptr.ptr = extractvalue { i64, i64 } %tmp, 0
+ %2 = and i64 %memptr.ptr, 1
+ %memptr.isvirtual = icmp ne i64 %2, 0
+ br i1 %memptr.isvirtual, label %memptr.virtual, label %memptr.nonvirtual
+
+memptr.virtual: ; preds = %entry
+ %3 = bitcast %struct.S* %this.adjusted to i8**
+ %memptr.vtable = load i8** %3
+ %4 = sub i64 %memptr.ptr, 1
+ %5 = getelementptr i8* %memptr.vtable, i64 %4
+ %6 = bitcast i8* %5 to void (%struct.S*)**
+ %memptr.virtualfn = load void (%struct.S*)** %6
+ br label %memptr.end
+
+memptr.nonvirtual: ; preds = %entry
+ %memptr.nonvirtualfn = inttoptr i64 %memptr.ptr to void (%struct.S*)*
+ br label %memptr.end
+
+memptr.end: ; preds = %memptr.nonvirtual, %memptr.virtual
+ %7 = phi void (%struct.S*)* [ %memptr.virtualfn, %memptr.virtual ], [ %memptr.nonvirtualfn, %memptr.nonvirtual ]
+ call void %7(%struct.S* %this.adjusted)
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/dg.exp b/src/LLVM/test/Transforms/InstCombine/dg.exp
new file mode 100644
index 0000000..f2e8f3b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/dg.exp
@@ -0,0 +1,3 @@
+load_lib llvm.exp
+
+RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/src/LLVM/test/Transforms/InstCombine/div.ll b/src/LLVM/test/Transforms/InstCombine/div.ll
new file mode 100644
index 0000000..9933325
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/div.ll
@@ -0,0 +1,134 @@
+; This test makes sure that div instructions are properly eliminated.
+
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %A) {
+ %B = sdiv i32 %A, 1 ; <i32> [#uses=1]
+ ret i32 %B
+; CHECK: @test1
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test2(i32 %A) {
+ ; => Shift
+ %B = udiv i32 %A, 8 ; <i32> [#uses=1]
+ ret i32 %B
+; CHECK: @test2
+; CHECK-NEXT: lshr i32 %A, 3
+}
+
+define i32 @test3(i32 %A) {
+ ; => 0, don't need to keep traps
+ %B = sdiv i32 0, %A ; <i32> [#uses=1]
+ ret i32 %B
+; CHECK: @test3
+; CHECK-NEXT: ret i32 0
+}
+
+define i32 @test4(i32 %A) {
+ ; 0-A
+ %B = sdiv i32 %A, -1 ; <i32> [#uses=1]
+ ret i32 %B
+; CHECK: @test4
+; CHECK-NEXT: sub i32 0, %A
+}
+
+define i32 @test5(i32 %A) {
+ %B = udiv i32 %A, -16 ; <i32> [#uses=1]
+ %C = udiv i32 %B, -4 ; <i32> [#uses=1]
+ ret i32 %C
+; CHECK: @test5
+; CHECK-NEXT: ret i32 0
+}
+
+define i1 @test6(i32 %A) {
+ %B = udiv i32 %A, 123 ; <i32> [#uses=1]
+ ; A < 123
+ %C = icmp eq i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: @test6
+; CHECK-NEXT: icmp ult i32 %A, 123
+}
+
+define i1 @test7(i32 %A) {
+ %B = udiv i32 %A, 10 ; <i32> [#uses=1]
+ ; A >= 20 && A < 30
+ %C = icmp eq i32 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: @test7
+; CHECK-NEXT: add i32 %A, -20
+; CHECK-NEXT: icmp ult i32
+}
+
+define i1 @test8(i8 %A) {
+ %B = udiv i8 %A, 123 ; <i8> [#uses=1]
+ ; A >= 246
+ %C = icmp eq i8 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: @test8
+; CHECK-NEXT: icmp ugt i8 %A, -11
+}
+
+define i1 @test9(i8 %A) {
+ %B = udiv i8 %A, 123 ; <i8> [#uses=1]
+ ; A < 246
+ %C = icmp ne i8 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+; CHECK: @test9
+; CHECK-NEXT: icmp ult i8 %A, -10
+}
+
+define i32 @test10(i32 %X, i1 %C) {
+ %V = select i1 %C, i32 64, i32 8 ; <i32> [#uses=1]
+ %R = udiv i32 %X, %V ; <i32> [#uses=1]
+ ret i32 %R
+; CHECK: @test10
+; CHECK-NEXT: select i1 %C, i32 6, i32 3
+; CHECK-NEXT: lshr i32 %X
+}
+
+define i32 @test11(i32 %X, i1 %C) {
+ %A = select i1 %C, i32 1024, i32 32 ; <i32> [#uses=1]
+ %B = udiv i32 %X, %A ; <i32> [#uses=1]
+ ret i32 %B
+; CHECK: @test11
+; CHECK-NEXT: select i1 %C, i32 10, i32 5
+; CHECK-NEXT: lshr i32 %X
+}
+
+; PR2328
+define i32 @test12(i32 %x) nounwind {
+ %tmp3 = udiv i32 %x, %x ; 1
+ ret i32 %tmp3
+; CHECK: @test12
+; CHECK-NEXT: ret i32 1
+}
+
+define i32 @test13(i32 %x) nounwind {
+ %tmp3 = sdiv i32 %x, %x ; 1
+ ret i32 %tmp3
+; CHECK: @test13
+; CHECK-NEXT: ret i32 1
+}
+
+define i32 @test14(i8 %x) nounwind {
+ %zext = zext i8 %x to i32
+ %div = udiv i32 %zext, 257 ; 0
+ ret i32 %div
+; CHECK: @test14
+; CHECK-NEXT: ret i32 0
+}
+
+; PR9814
+define i32 @test15(i32 %a, i32 %b) nounwind {
+ %shl = shl i32 1, %b
+ %div = lshr i32 %shl, 2
+ %div2 = udiv i32 %a, %div
+ ret i32 %div2
+; CHECK: @test15
+; CHECK-NEXT: add i32 %b, -2
+; CHECK-NEXT: lshr i32 %a,
+; CHECK-NEXT: ret i32
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/enforce-known-alignment.ll b/src/LLVM/test/Transforms/InstCombine/enforce-known-alignment.ll
new file mode 100644
index 0000000..9e9be7f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/enforce-known-alignment.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | grep alloca | grep {align 16}
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+
+define void @foo(i32) {
+ %2 = alloca [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], align 16 ; <[3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]*> [#uses=1]
+ %3 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]* %2, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
+ %4 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>* %3, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
+ %5 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }* %4, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
+ %6 = bitcast { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }* %5 to { [8 x i16] }* ; <{ [8 x i16] }*> [#uses=1]
+ %7 = getelementptr { [8 x i16] }* %6, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
+ %8 = getelementptr [8 x i16]* %7, i32 0, i32 0 ; <i16*> [#uses=1]
+ store i16 0, i16* %8, align 16
+ call void @bar(i16* %8)
+ ret void
+}
+
+declare void @bar(i16*)
diff --git a/src/LLVM/test/Transforms/InstCombine/exact.ll b/src/LLVM/test/Transforms/InstCombine/exact.ll
new file mode 100644
index 0000000..14741e3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/exact.ll
@@ -0,0 +1,170 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; CHECK: @sdiv1
+; CHECK: sdiv i32 %x, 8
+define i32 @sdiv1(i32 %x) {
+ %y = sdiv i32 %x, 8
+ ret i32 %y
+}
+
+; CHECK: @sdiv2
+; CHECK: ashr exact i32 %x, 3
+define i32 @sdiv2(i32 %x) {
+ %y = sdiv exact i32 %x, 8
+ ret i32 %y
+}
+
+; CHECK: @sdiv3
+; CHECK: %y = srem i32 %x, 3
+; CHECK: %z = sub i32 %x, %y
+; CHECK: ret i32 %z
+define i32 @sdiv3(i32 %x) {
+ %y = sdiv i32 %x, 3
+ %z = mul i32 %y, 3
+ ret i32 %z
+}
+
+; CHECK: @sdiv4
+; CHECK: ret i32 %x
+define i32 @sdiv4(i32 %x) {
+ %y = sdiv exact i32 %x, 3
+ %z = mul i32 %y, 3
+ ret i32 %z
+}
+
+; CHECK: i32 @sdiv5
+; CHECK: %y = srem i32 %x, 3
+; CHECK: %z = sub i32 %y, %x
+; CHECK: ret i32 %z
+define i32 @sdiv5(i32 %x) {
+ %y = sdiv i32 %x, 3
+ %z = mul i32 %y, -3
+ ret i32 %z
+}
+
+; CHECK: @sdiv6
+; CHECK: %z = sub i32 0, %x
+; CHECK: ret i32 %z
+define i32 @sdiv6(i32 %x) {
+ %y = sdiv exact i32 %x, 3
+ %z = mul i32 %y, -3
+ ret i32 %z
+}
+
+; CHECK: @udiv1
+; CHECK: ret i32 %x
+define i32 @udiv1(i32 %x, i32 %w) {
+ %y = udiv exact i32 %x, %w
+ %z = mul i32 %y, %w
+ ret i32 %z
+}
+
+; CHECK: @udiv2
+; CHECK: %z = lshr exact i32 %x, %w
+; CHECK: ret i32 %z
+define i32 @udiv2(i32 %x, i32 %w) {
+ %y = shl i32 1, %w
+ %z = udiv exact i32 %x, %y
+ ret i32 %z
+}
+
+; CHECK: @ashr1
+; CHECK: %B = ashr exact i64 %A, 2
+; CHECK: ret i64 %B
+define i64 @ashr1(i64 %X) nounwind {
+ %A = shl i64 %X, 8
+ %B = ashr i64 %A, 2 ; X/4
+ ret i64 %B
+}
+
+; PR9120
+; CHECK: @ashr_icmp1
+; CHECK: %B = icmp eq i64 %X, 0
+; CHECK: ret i1 %B
+define i1 @ashr_icmp1(i64 %X) nounwind {
+ %A = ashr exact i64 %X, 2 ; X/4
+ %B = icmp eq i64 %A, 0
+ ret i1 %B
+}
+
+; CHECK: @ashr_icmp2
+; CHECK: %Z = icmp slt i64 %X, 16
+; CHECK: ret i1 %Z
+define i1 @ashr_icmp2(i64 %X) nounwind {
+ %Y = ashr exact i64 %X, 2 ; x / 4
+ %Z = icmp slt i64 %Y, 4 ; x < 16
+ ret i1 %Z
+}
+
+; PR9998
+; Make sure we don't transform the ashr here into an sdiv
+; CHECK: @pr9998
+; CHECK: = and i32 %V, 1
+; CHECK: %Z = icmp ne
+; CHECK: ret i1 %Z
+define i1 @pr9998(i32 %V) nounwind {
+entry:
+ %W = shl i32 %V, 31
+ %X = ashr exact i32 %W, 31
+ %Y = sext i32 %X to i64
+ %Z = icmp ugt i64 %Y, 7297771788697658747
+ ret i1 %Z
+}
+
+
+; CHECK: @udiv_icmp1
+; CHECK: icmp ne i64 %X, 0
+define i1 @udiv_icmp1(i64 %X) nounwind {
+ %A = udiv exact i64 %X, 5 ; X/5
+ %B = icmp ne i64 %A, 0
+ ret i1 %B
+}
+
+; CHECK: @sdiv_icmp1
+; CHECK: icmp eq i64 %X, 0
+define i1 @sdiv_icmp1(i64 %X) nounwind {
+ %A = sdiv exact i64 %X, 5 ; X/5 == 0 --> x == 0
+ %B = icmp eq i64 %A, 0
+ ret i1 %B
+}
+
+; CHECK: @sdiv_icmp2
+; CHECK: icmp eq i64 %X, 5
+define i1 @sdiv_icmp2(i64 %X) nounwind {
+ %A = sdiv exact i64 %X, 5 ; X/5 == 1 --> x == 5
+ %B = icmp eq i64 %A, 1
+ ret i1 %B
+}
+
+; CHECK: @sdiv_icmp3
+; CHECK: icmp eq i64 %X, -5
+define i1 @sdiv_icmp3(i64 %X) nounwind {
+ %A = sdiv exact i64 %X, 5 ; X/5 == -1 --> x == -5
+ %B = icmp eq i64 %A, -1
+ ret i1 %B
+}
+
+; CHECK: @sdiv_icmp4
+; CHECK: icmp eq i64 %X, 0
+define i1 @sdiv_icmp4(i64 %X) nounwind {
+ %A = sdiv exact i64 %X, -5 ; X/-5 == 0 --> x == 0
+ %B = icmp eq i64 %A, 0
+ ret i1 %B
+}
+
+; CHECK: @sdiv_icmp5
+; CHECK: icmp eq i64 %X, -5
+define i1 @sdiv_icmp5(i64 %X) nounwind {
+ %A = sdiv exact i64 %X, -5 ; X/-5 == 1 --> x == -5
+ %B = icmp eq i64 %A, 1
+ ret i1 %B
+}
+
+; CHECK: @sdiv_icmp6
+; CHECK: icmp eq i64 %X, 5
+define i1 @sdiv_icmp6(i64 %X) nounwind {
+ %A = sdiv exact i64 %X, -5 ; X/-5 == 1 --> x == 5
+ %B = icmp eq i64 %A, -1
+ ret i1 %B
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/extractvalue.ll b/src/LLVM/test/Transforms/InstCombine/extractvalue.ll
new file mode 100644
index 0000000..cf36b8f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/extractvalue.ll
@@ -0,0 +1,107 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @bar({i32, i32} %a)
+declare i32 @baz(i32 %a)
+
+; CHECK: define i32 @foo
+; CHECK-NOT: extractvalue
+define i32 @foo(i32 %a, i32 %b) {
+; Instcombine should fold various combinations of insertvalue and extractvalue
+; together
+ ; Build a simple struct and pull values out again
+ %s1.1 = insertvalue {i32, i32} undef, i32 %a, 0
+ %s1 = insertvalue {i32, i32} %s1.1, i32 %b, 1
+ %v1 = extractvalue {i32, i32} %s1, 0
+ %v2 = extractvalue {i32, i32} %s1, 1
+
+ ; Build a nested struct and pull a sub struct out of it
+ ; This requires instcombine to insert a few insertvalue instructions
+ %ns1.1 = insertvalue {i32, {i32, i32}} undef, i32 %v1, 0
+ %ns1.2 = insertvalue {i32, {i32, i32}} %ns1.1, i32 %v1, 1, 0
+ %ns1 = insertvalue {i32, {i32, i32}} %ns1.2, i32 %v2, 1, 1
+ %s2 = extractvalue {i32, {i32, i32}} %ns1, 1
+ %v3 = extractvalue {i32, {i32, i32}} %ns1, 1, 1
+ call void @bar({i32, i32} %s2)
+
+ ; Use nested extractvalues to get to a value
+ %s3 = extractvalue {i32, {i32, i32}} %ns1, 1
+ %v4 = extractvalue {i32, i32} %s3, 1
+ call void @bar({i32, i32} %s3)
+
+ ; Use nested insertvalues to build a nested struct
+ %s4.1 = insertvalue {i32, i32} undef, i32 %v3, 0
+ %s4 = insertvalue {i32, i32} %s4.1, i32 %v4, 1
+ %ns2 = insertvalue {i32, {i32, i32}} undef, {i32, i32} %s4, 1
+
+ ; And now extract a single value from there
+ %v5 = extractvalue {i32, {i32, i32}} %ns2, 1, 1
+
+ ret i32 %v5
+}
+
+; CHECK: define i32 @extract2gep
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}* %pair, i32 0, i32 1
+; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32* [[GEP]]
+; CHECK-NEXT: store
+; CHECK-NEXT: br label %loop
+; CHECK-NOT: extractvalue
+; CHECK: call {{.*}}(i32 [[LOAD]])
+; CHECK-NOT: extractvalue
+; CHECK: ret i32 [[LOAD]]
+define i32 @extract2gep({i32, i32}* %pair, i32* %P) {
+ ; The load + extractvalue should be converted
+ ; to an inbounds gep + smaller load.
+ ; The new load should be in the same spot as the old load.
+ %L = load {i32, i32}* %pair
+ store i32 0, i32* %P
+ br label %loop
+
+loop:
+ %E = extractvalue {i32, i32} %L, 1
+ %C = call i32 @baz(i32 %E)
+ store i32 %C, i32* %P
+ %cond = icmp eq i32 %C, 0
+ br i1 %cond, label %end, label %loop
+
+end:
+ ret i32 %E
+}
+
+; CHECK: define i32 @doubleextract2gep
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}* %arg, i32 0, i32 1, i32 1
+; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32* [[GEP]]
+; CHECK-NEXT: ret i32 [[LOAD]]
+define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) {
+ ; The load + extractvalues should be converted
+ ; to a 3-index inbounds gep + smaller load.
+ %L = load {i32, {i32, i32}}* %arg
+ %E1 = extractvalue {i32, {i32, i32}} %L, 1
+ %E2 = extractvalue {i32, i32} %E1, 1
+ ret i32 %E2
+}
+
+; CHECK: define i32 @nogep-multiuse
+; CHECK-NEXT: load {{.*}} %pair
+; CHECK-NEXT: extractvalue
+; CHECK-NEXT: extractvalue
+; CHECK-NEXT: add
+; CHECK-NEXT: ret
+define i32 @nogep-multiuse({i32, i32}* %pair) {
+ ; The load should be left unchanged since both parts are needed.
+ %L = volatile load {i32, i32}* %pair
+ %LHS = extractvalue {i32, i32} %L, 0
+ %RHS = extractvalue {i32, i32} %L, 1
+ %R = add i32 %LHS, %RHS
+ ret i32 %R
+}
+
+; CHECK: define i32 @nogep-volatile
+; CHECK-NEXT: load volatile {{.*}} %pair
+; CHECK-NEXT: extractvalue
+; CHECK-NEXT: ret
+define i32 @nogep-volatile({i32, i32}* %pair) {
+ ; The volatile load should be left unchanged.
+ %L = volatile load {i32, i32}* %pair
+ %E = extractvalue {i32, i32} %L, 1
+ ret i32 %E
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/fcmp-select.ll b/src/LLVM/test/Transforms/InstCombine/fcmp-select.ll
new file mode 100644
index 0000000..e04ab3e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fcmp-select.ll
@@ -0,0 +1,53 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; x != y ? x : y -> x if it's the right kind of != and at least
+; one of x and y is not negative zero.
+
+; CHECK: f0
+; CHECK: ret double %x
+define double @f0(double %x) nounwind readnone {
+entry:
+ %cmp = fcmp une double %x, -1.0
+ %cond = select i1 %cmp, double %x, double -1.0
+ ret double %cond
+}
+; CHECK: f1
+; CHECK: ret double -1.000000e+00
+define double @f1(double %x) nounwind readnone {
+entry:
+ %cmp = fcmp une double %x, -1.0
+ %cond = select i1 %cmp, double -1.0, double %x
+ ret double %cond
+}
+; CHECK: f2
+; CHECK: ret double %cond
+define double @f2(double %x, double %y) nounwind readnone {
+entry:
+ %cmp = fcmp une double %x, %y
+ %cond = select i1 %cmp, double %x, double %y
+ ret double %cond
+}
+; CHECK: f3
+; CHECK: ret double %cond
+define double @f3(double %x, double %y) nounwind readnone {
+entry:
+ %cmp = fcmp une double %x, %y
+ %cond = select i1 %cmp, double %y, double %x
+ ret double %cond
+}
+; CHECK: f4
+; CHECK: ret double %cond
+define double @f4(double %x) nounwind readnone {
+entry:
+ %cmp = fcmp one double %x, -1.0
+ %cond = select i1 %cmp, double %x, double -1.0
+ ret double %cond
+}
+; CHECK: f5
+; CHECK: ret double %cond
+define double @f5(double %x) nounwind readnone {
+entry:
+ %cmp = fcmp one double %x, -1.0
+ %cond = select i1 %cmp, double -1.0, double %x
+ ret double %cond
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/fcmp-special.ll b/src/LLVM/test/Transforms/InstCombine/fcmp-special.ll
new file mode 100644
index 0000000..a39021e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fcmp-special.ll
@@ -0,0 +1,155 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Infinity
+
+; CHECK: inf0
+; CHECK: ret i1 false
+define i1 @inf0(double %arg) nounwind readnone {
+ %tmp = fcmp ogt double %arg, 0x7FF0000000000000
+ ret i1 %tmp
+}
+
+; CHECK: inf1
+; CHECK: ret i1 true
+define i1 @inf1(double %arg) nounwind readnone {
+ %tmp = fcmp ule double %arg, 0x7FF0000000000000
+ ret i1 %tmp
+}
+
+; Negative infinity
+
+; CHECK: ninf0
+; CHECK: ret i1 false
+define i1 @ninf0(double %arg) nounwind readnone {
+ %tmp = fcmp olt double %arg, 0xFFF0000000000000
+ ret i1 %tmp
+}
+
+; CHECK: ninf1
+; CHECK: ret i1 true
+define i1 @ninf1(double %arg) nounwind readnone {
+ %tmp = fcmp uge double %arg, 0xFFF0000000000000
+ ret i1 %tmp
+}
+
+; NaNs
+
+; CHECK: nan0
+; CHECK: ret i1 false
+define i1 @nan0(double %arg) nounwind readnone {
+ %tmp = fcmp ord double %arg, 0x7FF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nan1
+; CHECK: ret i1 false
+define i1 @nan1(double %arg) nounwind readnone {
+ %tmp = fcmp oeq double %arg, 0x7FF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nan2
+; CHECK: ret i1 false
+define i1 @nan2(double %arg) nounwind readnone {
+ %tmp = fcmp olt double %arg, 0x7FF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nan3
+; CHECK: ret i1 true
+define i1 @nan3(double %arg) nounwind readnone {
+ %tmp = fcmp uno double %arg, 0x7FF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nan4
+; CHECK: ret i1 true
+define i1 @nan4(double %arg) nounwind readnone {
+ %tmp = fcmp une double %arg, 0x7FF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nan5
+; CHECK: ret i1 true
+define i1 @nan5(double %arg) nounwind readnone {
+ %tmp = fcmp ult double %arg, 0x7FF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; Negative NaN.
+
+; CHECK: nnan0
+; CHECK: ret i1 false
+define i1 @nnan0(double %arg) nounwind readnone {
+ %tmp = fcmp ord double %arg, 0xFFF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nnan1
+; CHECK: ret i1 false
+define i1 @nnan1(double %arg) nounwind readnone {
+ %tmp = fcmp oeq double %arg, 0xFFF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nnan2
+; CHECK: ret i1 false
+define i1 @nnan2(double %arg) nounwind readnone {
+ %tmp = fcmp olt double %arg, 0xFFF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nnan3
+; CHECK: ret i1 true
+define i1 @nnan3(double %arg) nounwind readnone {
+ %tmp = fcmp uno double %arg, 0xFFF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nnan4
+; CHECK: ret i1 true
+define i1 @nnan4(double %arg) nounwind readnone {
+ %tmp = fcmp une double %arg, 0xFFF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; CHECK: nnan5
+; CHECK: ret i1 true
+define i1 @nnan5(double %arg) nounwind readnone {
+ %tmp = fcmp ult double %arg, 0xFFF00000FFFFFFFF
+ ret i1 %tmp
+}
+
+; Negative zero.
+
+; CHECK: nzero0
+; CHECK: ret i1 true
+define i1 @nzero0() {
+ %tmp = fcmp oeq double 0.0, -0.0
+ ret i1 %tmp
+}
+
+; CHECK: nzero1
+; CHECK: ret i1 false
+define i1 @nzero1() {
+ %tmp = fcmp ogt double 0.0, -0.0
+ ret i1 %tmp
+}
+
+; Misc.
+
+; CHECK: misc0
+; CHECK: %tmp = fcmp ord double %arg, 0.000000e+00
+; CHECK: ret i1 %tmp
+define i1 @misc0(double %arg) {
+ %tmp = fcmp oeq double %arg, %arg
+ ret i1 %tmp
+}
+
+; CHECK: misc1
+; CHECK: ret i1 false
+define i1 @misc1(double %arg) {
+ %tmp = fcmp one double %arg, %arg
+ ret i1 %tmp
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/fcmp.ll b/src/LLVM/test/Transforms/InstCombine/fcmp.ll
new file mode 100644
index 0000000..d08cbf5
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fcmp.ll
@@ -0,0 +1,71 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+define i1 @test1(float %x, float %y) nounwind {
+ %ext1 = fpext float %x to double
+ %ext2 = fpext float %y to double
+ %cmp = fcmp ogt double %ext1, %ext2
+ ret i1 %cmp
+; CHECK: @test1
+; CHECK-NEXT: fcmp ogt float %x, %y
+}
+
+define i1 @test2(float %a) nounwind {
+ %ext = fpext float %a to double
+ %cmp = fcmp ogt double %ext, 1.000000e+00
+ ret i1 %cmp
+; CHECK: @test2
+; CHECK-NEXT: fcmp ogt float %a, 1.0
+}
+
+define i1 @test3(float %a) nounwind {
+ %ext = fpext float %a to double
+ %cmp = fcmp ogt double %ext, 0x3FF0000000000001 ; more precision than float.
+ ret i1 %cmp
+; CHECK: @test3
+; CHECK-NEXT: fpext float %a to double
+}
+
+define i1 @test4(float %a) nounwind {
+ %ext = fpext float %a to double
+ %cmp = fcmp ogt double %ext, 0x36A0000000000000 ; denormal in float.
+ ret i1 %cmp
+; CHECK: @test4
+; CHECK-NEXT: fpext float %a to double
+}
+
+define i1 @test5(float %a) nounwind {
+ %neg = fsub float -0.000000e+00, %a
+ %cmp = fcmp ogt float %neg, 1.000000e+00
+ ret i1 %cmp
+; CHECK: @test5
+; CHECK-NEXT: fcmp olt float %a, -1.0
+}
+
+define i1 @test6(float %x, float %y) nounwind {
+ %neg1 = fsub float -0.000000e+00, %x
+ %neg2 = fsub float -0.000000e+00, %y
+ %cmp = fcmp olt float %neg1, %neg2
+ ret i1 %cmp
+; CHECK: @test6
+; CHECK-NEXT: fcmp ogt float %x, %y
+}
+
+define i1 @test7(float %x) nounwind readnone ssp noredzone {
+ %ext = fpext float %x to ppc_fp128
+ %cmp = fcmp ogt ppc_fp128 %ext, 0xM00000000000000000000000000000000
+ ret i1 %cmp
+; Can't convert ppc_fp128
+; CHECK: @test7
+; CHECK-NEXT: fpext float %x to ppc_fp128
+}
+
+define float @test8(float %x) nounwind readnone optsize ssp {
+ %conv = fpext float %x to double
+ %cmp = fcmp olt double %conv, 0.000000e+00
+ %conv1 = zext i1 %cmp to i32
+ %conv2 = sitofp i32 %conv1 to float
+ ret float %conv2
+; Float comparison to zero shouldn't cast to double.
+; CHECK: @test8
+; CHECK-NEXT: fcmp olt float %x, 0.000000e+00
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/fdiv.ll b/src/LLVM/test/Transforms/InstCombine/fdiv.ll
new file mode 100644
index 0000000..a2cce01
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fdiv.ll
@@ -0,0 +1,25 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+define float @test1(float %x) nounwind readnone ssp {
+ %div = fdiv float %x, 0x3810000000000000
+ ret float %div
+
+; CHECK: @test1
+; CHECK-NEXT: fmul float %x, 0x47D0000000000000
+}
+
+define float @test2(float %x) nounwind readnone ssp {
+ %div = fdiv float %x, 0x47E0000000000000
+ ret float %div
+
+; CHECK: @test2
+; CHECK-NEXT: fdiv float %x, 0x47E0000000000000
+}
+
+define float @test3(float %x) nounwind readnone ssp {
+ %div = fdiv float %x, 0x36A0000000000000
+ ret float %div
+
+; CHECK: @test3
+; CHECK-NEXT: fdiv float %x, 0x36A0000000000000
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/fold-bin-operand.ll b/src/LLVM/test/Transforms/InstCombine/fold-bin-operand.ll
new file mode 100644
index 0000000..a8bad0d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fold-bin-operand.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+define i1 @f(i1 %x) {
+; CHECK: @f
+; CHECK: ret i1 false
+ %b = and i1 %x, icmp eq (i8* inttoptr (i32 1 to i8*), i8* inttoptr (i32 2 to i8*))
+ ret i1 %b
+}
+
+define i32 @g(i32 %x) {
+; CHECK: @g
+; CHECK: ret i32 %x
+ %b = add i32 %x, zext (i1 icmp eq (i8* inttoptr (i32 1000000 to i8*), i8* inttoptr (i32 2000000 to i8*)) to i32)
+ ret i32 %b
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/fold-calls.ll b/src/LLVM/test/Transforms/InstCombine/fold-calls.ll
new file mode 100644
index 0000000..504f874
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fold-calls.ll
@@ -0,0 +1,19 @@
+; RUN: opt -instcombine -S < %s | FileCheck %s
+
+; This shouldn't fold, because sin(inf) is invalid.
+; CHECK: @foo
+; CHECK: %t = call double @sin(double 0x7FF0000000000000)
+define double @foo() {
+ %t = call double @sin(double 0x7FF0000000000000)
+ ret double %t
+}
+
+; This should fold.
+; CHECK: @bar
+; CHECK: ret double 0.0
+define double @bar() {
+ %t = call double @sin(double 0.0)
+ ret double %t
+}
+
+declare double @sin(double)
diff --git a/src/LLVM/test/Transforms/InstCombine/fold-vector-select.ll b/src/LLVM/test/Transforms/InstCombine/fold-vector-select.ll
new file mode 100644
index 0000000..4af2872
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fold-vector-select.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | not grep select
+
+define void @foo(<4 x i32> *%A, <4 x i32> *%B, <4 x i32> *%C, <4 x i32> *%D) {
+ %r = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> zeroinitializer
+ %g = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>, <4 x i32> zeroinitializer, <4 x i32> <i32 3, i32 6, i32 9, i32 1>
+ %b = select <4 x i1> <i1 false, i1 true, i1 false, i1 true>, <4 x i32> zeroinitializer, <4 x i32> <i32 7, i32 1, i32 4, i32 9>
+ %a = select <4 x i1> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 3, i32 2, i32 8, i32 5>
+ store <4 x i32> %r, <4 x i32>* %A
+ store <4 x i32> %g, <4 x i32>* %B
+ store <4 x i32> %b, <4 x i32>* %C
+ store <4 x i32> %a, <4 x i32>* %D
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/fold-vector-zero.ll b/src/LLVM/test/Transforms/InstCombine/fold-vector-zero.ll
new file mode 100644
index 0000000..e1d86b6
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fold-vector-zero.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -S | not grep zeroinitializer
+
+define void @foo(i64 %A, i64 %B) {
+bb8:
+ br label %bb30
+
+bb30:
+ %s0 = phi i64 [ 0, %bb8 ], [ %r21, %bb30 ]
+ %l0 = phi i64 [ -2222, %bb8 ], [ %r23, %bb30 ]
+ %r2 = add i64 %s0, %B
+ %r3 = inttoptr i64 %r2 to <2 x double>*
+ %r4 = load <2 x double>* %r3, align 8
+ %r6 = bitcast <2 x double> %r4 to <2 x i64>
+ %r7 = bitcast <2 x double> zeroinitializer to <2 x i64>
+ %r8 = insertelement <2 x i64> undef, i64 9223372036854775807, i32 0
+ %r9 = insertelement <2 x i64> undef, i64 -9223372036854775808, i32 0
+ %r10 = insertelement <2 x i64> %r8, i64 9223372036854775807, i32 1
+ %r11 = insertelement <2 x i64> %r9, i64 -9223372036854775808, i32 1
+ %r12 = and <2 x i64> %r6, %r10
+ %r13 = and <2 x i64> %r7, %r11
+ %r14 = or <2 x i64> %r12, %r13
+ %r15 = bitcast <2 x i64> %r14 to <2 x double>
+ %r18 = add i64 %s0, %A
+ %r19 = inttoptr i64 %r18 to <2 x double>*
+ store <2 x double> %r15, <2 x double>* %r19, align 8
+ %r21 = add i64 16, %s0
+ %r23 = add i64 1, %l0
+ %r25 = icmp slt i64 %r23, 0
+ %r26 = zext i1 %r25 to i64
+ %r27 = icmp ne i64 %r26, 0
+ br i1 %r27, label %bb30, label %bb5
+
+bb5:
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/fp-ret-bitcast.ll b/src/LLVM/test/Transforms/InstCombine/fp-ret-bitcast.ll
new file mode 100644
index 0000000..35ece42
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fp-ret-bitcast.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {call float bitcast} | count 1
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+ %struct.NSObject = type { %struct.objc_class* }
+ %struct.NSArray = type { %struct.NSObject }
+ %struct.objc_class = type opaque
+ %struct.objc_selector = type opaque
+
+@"\01L_OBJC_METH_VAR_NAME_112" = internal global [15 x i8] c"whiteComponent\00", section "__TEXT,__cstring,cstring_literals"
+@"\01L_OBJC_SELECTOR_REFERENCES_81" = internal global %struct.objc_selector* bitcast ([15 x i8]* @"\01L_OBJC_METH_VAR_NAME_112" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip"
+
+define void @bork() nounwind {
+entry:
+ %color = alloca %struct.NSArray*
+ %color.466 = alloca %struct.NSObject*
+ %tmp103 = load %struct.NSArray** %color, align 4
+ %tmp103104 = getelementptr %struct.NSArray* %tmp103, i32 0, i32 0
+ store %struct.NSObject* %tmp103104, %struct.NSObject** %color.466, align 4
+ %tmp105 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
+ %tmp106 = load %struct.NSObject** %color.466, align 4
+ %tmp107 = call float bitcast (void (%struct.NSObject*, ...)* @objc_msgSend_fpret to float (%struct.NSObject*, %struct.objc_selector*)*)( %struct.NSObject* %tmp106, %struct.objc_selector* %tmp105 ) nounwind
+ br label %exit
+
+exit:
+ ret void
+}
+
+declare void @objc_msgSend_fpret(%struct.NSObject*, ...)
diff --git a/src/LLVM/test/Transforms/InstCombine/fpcast.ll b/src/LLVM/test/Transforms/InstCombine/fpcast.ll
new file mode 100644
index 0000000..f3f42cf
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fpcast.ll
@@ -0,0 +1,15 @@
+; Test some floating point casting cases
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i8 @test1() {
+ %x = fptoui float 2.550000e+02 to i8 ; <i8> [#uses=1]
+ ret i8 %x
+; CHECK: ret i8 -1
+}
+
+define i8 @test2() {
+ %x = fptosi float -1.000000e+00 to i8 ; <i8> [#uses=1]
+ ret i8 %x
+; CHECK: ret i8 -1
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/fpextend.ll b/src/LLVM/test/Transforms/InstCombine/fpextend.ll
new file mode 100644
index 0000000..70e0c62
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fpextend.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -instcombine -S | not grep fpext
+@X = external global float
+@Y = external global float
+
+define void @test() nounwind {
+entry:
+ %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
+ %tmp3 = fadd double %tmp1, 0.000000e+00 ; <double> [#uses=1]
+ %tmp34 = fptrunc double %tmp3 to float ; <float> [#uses=1]
+ store float %tmp34, float* @X, align 4
+ ret void
+}
+
+define void @test3() nounwind {
+entry:
+ %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
+ %tmp2 = load float* @Y, align 4 ; <float> [#uses=1]
+ %tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
+ %tmp5 = fdiv double %tmp1, %tmp23 ; <double> [#uses=1]
+ %tmp56 = fptrunc double %tmp5 to float ; <float> [#uses=1]
+ store float %tmp56, float* @X, align 4
+ ret void
+}
+
+define void @test4() nounwind {
+entry:
+ %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
+ %tmp2 = fsub double -0.000000e+00, %tmp1 ; <double> [#uses=1]
+ %tmp34 = fptrunc double %tmp2 to float ; <float> [#uses=1]
+ store float %tmp34, float* @X, align 4
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/fsub.ll b/src/LLVM/test/Transforms/InstCombine/fsub.ll
new file mode 100644
index 0000000..af2fadd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/fsub.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; PR4374
+define float @test1(float %a, float %b) nounwind {
+ %t1 = fsub float %a, %b
+ %t2 = fsub float -0.000000e+00, %t1
+
+; CHECK: %t1 = fsub float %a, %b
+; CHECK-NEXT: %t2 = fsub float -0.000000e+00, %t1
+
+ ret float %t2
+}
+
+; <rdar://problem/7530098>
+define double @test2(double %x, double %y) nounwind {
+ %t1 = fadd double %x, %y
+ %t2 = fsub double %x, %t1
+
+; CHECK: %t1 = fadd double %x, %y
+; CHECK-NEXT: %t2 = fsub double %x, %t1
+
+ ret double %t2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/gep-addrspace.ll b/src/LLVM/test/Transforms/InstCombine/gep-addrspace.ll
new file mode 100644
index 0000000..dfe12db
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/gep-addrspace.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-pc-win32"
+
+%myStruct = type { float, [3 x float], [4 x float], i32 }
+
+; make sure that we are not crashing when creating an illegal type
+define void @func(%myStruct addrspace(1)* nocapture %p) nounwind {
+ST:
+ %A = getelementptr inbounds %myStruct addrspace(1)* %p, i64 0
+ %B = bitcast %myStruct addrspace(1)* %A to %myStruct*
+ %C = getelementptr inbounds %myStruct* %B, i32 0, i32 1
+ %D = getelementptr inbounds [3 x float]* %C, i32 0, i32 2
+ %E = load float* %D, align 4
+ %F = fsub float %E, undef
+ ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/gepgep.ll b/src/LLVM/test/Transforms/InstCombine/gepgep.ll
new file mode 100644
index 0000000..9e681d2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/gepgep.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -disable-output
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+@buffer = external global [64 x float]
+
+declare void @use(i8*)
+
+define void @f() {
+ call void @use(i8* getelementptr (i8* getelementptr (i8* bitcast ([64 x float]* @buffer to i8*), i64 and (i64 sub (i64 0, i64 ptrtoint ([64 x float]* @buffer to i64)), i64 63)), i64 64))
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/getelementptr.ll b/src/LLVM/test/Transforms/InstCombine/getelementptr.ll
new file mode 100644
index 0000000..6bb2035
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/getelementptr.ll
@@ -0,0 +1,494 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64"
+%intstruct = type { i32 }
+%pair = type { i32, i32 }
+%struct.B = type { double }
+%struct.A = type { %struct.B, i32, i32 }
+
+
+@Global = constant [10 x i8] c"helloworld"
+
+; Test noop elimination
+define i32* @test1(i32* %I) {
+ %A = getelementptr i32* %I, i64 0
+ ret i32* %A
+; CHECK: @test1
+; CHECK: ret i32* %I
+}
+
+; Test noop elimination
+define i32* @test2(i32* %I) {
+ %A = getelementptr i32* %I
+ ret i32* %A
+; CHECK: @test2
+; CHECK: ret i32* %I
+}
+
+; Test that two array indexing geps fold
+define i32* @test3(i32* %I) {
+ %A = getelementptr i32* %I, i64 17
+ %B = getelementptr i32* %A, i64 4
+ ret i32* %B
+; CHECK: @test3
+; CHECK: getelementptr i32* %I, i64 21
+}
+
+; Test that two getelementptr insts fold
+define i32* @test4({ i32 }* %I) {
+ %A = getelementptr { i32 }* %I, i64 1
+ %B = getelementptr { i32 }* %A, i64 0, i32 0
+ ret i32* %B
+; CHECK: @test4
+; CHECK: getelementptr { i32 }* %I, i64 1, i32 0
+}
+
+define void @test5(i8 %B) {
+ ; This should be turned into a constexpr instead of being an instruction
+ %A = getelementptr [10 x i8]* @Global, i64 0, i64 4
+ store i8 %B, i8* %A
+ ret void
+; CHECK: @test5
+; CHECK: store i8 %B, i8* getelementptr inbounds ([10 x i8]* @Global, i64 0, i64 4)
+}
+
+
+define i32* @test7(i32* %I, i64 %C, i64 %D) {
+ %A = getelementptr i32* %I, i64 %C
+ %B = getelementptr i32* %A, i64 %D
+ ret i32* %B
+; CHECK: @test7
+; CHECK: %A.sum = add i64 %C, %D
+; CHECK: getelementptr i32* %I, i64 %A.sum
+}
+
+define i8* @test8([10 x i32]* %X) {
+ ;; Fold into the cast.
+ %A = getelementptr [10 x i32]* %X, i64 0, i64 0
+ %B = bitcast i32* %A to i8*
+ ret i8* %B
+; CHECK: @test8
+; CHECK: bitcast [10 x i32]* %X to i8*
+}
+
+define i32 @test9() {
+ %A = getelementptr { i32, double }* null, i32 0, i32 1
+ %B = ptrtoint double* %A to i32
+ ret i32 %B
+; CHECK: @test9
+; CHECK: ret i32 8
+}
+
+define i1 @test10({ i32, i32 }* %x, { i32, i32 }* %y) {
+ %tmp.1 = getelementptr { i32, i32 }* %x, i32 0, i32 1
+ %tmp.3 = getelementptr { i32, i32 }* %y, i32 0, i32 1
+ ;; seteq x, y
+ %tmp.4 = icmp eq i32* %tmp.1, %tmp.3
+ ret i1 %tmp.4
+; CHECK: @test10
+; CHECK: icmp eq { i32, i32 }* %x, %y
+}
+
+define i1 @test11({ i32, i32 }* %X) {
+ %P = getelementptr { i32, i32 }* %X, i32 0, i32 0
+ %Q = icmp eq i32* %P, null
+ ret i1 %Q
+; CHECK: @test11
+; CHECK: icmp eq { i32, i32 }* %X, null
+}
+
+
+; PR4748
+define i32 @test12(%struct.A* %a) {
+entry:
+ %g3 = getelementptr %struct.A* %a, i32 0, i32 1
+ store i32 10, i32* %g3, align 4
+
+ %g4 = getelementptr %struct.A* %a, i32 0, i32 0
+
+ %new_a = bitcast %struct.B* %g4 to %struct.A*
+
+ %g5 = getelementptr %struct.A* %new_a, i32 0, i32 1
+ %a_a = load i32* %g5, align 4
+ ret i32 %a_a
+; CHECK: @test12
+; CHECK: getelementptr %struct.A* %a, i64 0, i32 1
+; CHECK-NEXT: store i32 10, i32* %g3
+; CHECK-NEXT: ret i32 10
+}
+
+
+; PR2235
+%S = type { i32, [ 100 x i32] }
+define i1 @test13(i64 %X, %S* %P) {
+ %A = getelementptr inbounds %S* %P, i32 0, i32 1, i64 %X
+ %B = getelementptr inbounds %S* %P, i32 0, i32 0
+ %C = icmp eq i32* %A, %B
+ ret i1 %C
+; CHECK: @test13
+; CHECK: %C = icmp eq i64 %X, -1
+}
+
+
+@G = external global [3 x i8]
+define i8* @test14(i32 %Idx) {
+ %idx = zext i32 %Idx to i64
+ %tmp = getelementptr i8* getelementptr ([3 x i8]* @G, i32 0, i32 0), i64 %idx
+ ret i8* %tmp
+; CHECK: @test14
+; CHECK: getelementptr [3 x i8]* @G, i64 0, i64 %idx
+}
+
+
+; Test folding of constantexpr geps into normal geps.
+@Array = external global [40 x i32]
+define i32 *@test15(i64 %X) {
+ %A = getelementptr i32* getelementptr ([40 x i32]* @Array, i64 0, i64 0), i64 %X
+ ret i32* %A
+; CHECK: @test15
+; CHECK: getelementptr [40 x i32]* @Array, i64 0, i64 %X
+}
+
+
+define i32* @test16(i32* %X, i32 %Idx) {
+ %R = getelementptr i32* %X, i32 %Idx
+ ret i32* %R
+; CHECK: @test16
+; CHECK: sext i32 %Idx to i64
+}
+
+
+define i1 @test17(i16* %P, i32 %I, i32 %J) {
+ %X = getelementptr inbounds i16* %P, i32 %I
+ %Y = getelementptr inbounds i16* %P, i32 %J
+ %C = icmp ult i16* %X, %Y
+ ret i1 %C
+; CHECK: @test17
+; CHECK: %C = icmp slt i32 %I, %J
+}
+
+define i1 @test18(i16* %P, i32 %I) {
+ %X = getelementptr inbounds i16* %P, i32 %I
+ %C = icmp ult i16* %X, %P
+ ret i1 %C
+; CHECK: @test18
+; CHECK: %C = icmp slt i32 %I, 0
+}
+
+define i32 @test19(i32* %P, i32 %A, i32 %B) {
+ %tmp.4 = getelementptr inbounds i32* %P, i32 %A
+ %tmp.9 = getelementptr inbounds i32* %P, i32 %B
+ %tmp.10 = icmp eq i32* %tmp.4, %tmp.9
+ %tmp.11 = zext i1 %tmp.10 to i32
+ ret i32 %tmp.11
+; CHECK: @test19
+; CHECK: icmp eq i32 %A, %B
+}
+
+define i32 @test20(i32* %P, i32 %A, i32 %B) {
+ %tmp.4 = getelementptr inbounds i32* %P, i32 %A
+ %tmp.6 = icmp eq i32* %tmp.4, %P
+ %tmp.7 = zext i1 %tmp.6 to i32
+ ret i32 %tmp.7
+; CHECK: @test20
+; CHECK: icmp eq i32 %A, 0
+}
+
+
+define i32 @test21() {
+ %pbob1 = alloca %intstruct
+ %pbob2 = getelementptr %intstruct* %pbob1
+ %pbobel = getelementptr %intstruct* %pbob2, i64 0, i32 0
+ %rval = load i32* %pbobel
+ ret i32 %rval
+; CHECK: @test21
+; CHECK: getelementptr %intstruct* %pbob1, i64 0, i32 0
+}
+
+
+@A = global i32 1 ; <i32*> [#uses=1]
+@B = global i32 2 ; <i32*> [#uses=1]
+
+define i1 @test22() {
+ %C = icmp ult i32* getelementptr (i32* @A, i64 1),
+ getelementptr (i32* @B, i64 2)
+ ret i1 %C
+; CHECK: @test22
+; CHECK: icmp ult (i32* getelementptr inbounds (i32* @A, i64 1), i32* getelementptr (i32* @B, i64 2))
+}
+
+
+%X = type { [10 x i32], float }
+
+define i1 @test23() {
+ %A = getelementptr %X* null, i64 0, i32 0, i64 0 ; <i32*> [#uses=1]
+ %B = icmp ne i32* %A, null ; <i1> [#uses=1]
+ ret i1 %B
+; CHECK: @test23
+; CHECK: ret i1 false
+}
+
+define void @test25() {
+entry:
+ %tmp = getelementptr { i64, i64, i64, i64 }* null, i32 0, i32 3 ; <i64*> [#uses=1]
+ %tmp.upgrd.1 = load i64* %tmp ; <i64> [#uses=1]
+ %tmp8.ui = load i64* null ; <i64> [#uses=1]
+ %tmp8 = bitcast i64 %tmp8.ui to i64 ; <i64> [#uses=1]
+ %tmp9 = and i64 %tmp8, %tmp.upgrd.1 ; <i64> [#uses=1]
+ %sext = trunc i64 %tmp9 to i32 ; <i32> [#uses=1]
+ %tmp27.i = sext i32 %sext to i64 ; <i64> [#uses=1]
+ tail call void @foo25( i32 0, i64 %tmp27.i )
+ unreachable
+; CHECK: @test25
+}
+
+declare void @foo25(i32, i64)
+
+
+; PR1637
+define i1 @test26(i8* %arr) {
+ %X = getelementptr i8* %arr, i32 1
+ %Y = getelementptr i8* %arr, i32 1
+ %test = icmp uge i8* %X, %Y
+ ret i1 %test
+; CHECK: @test26
+; CHECK: ret i1 true
+}
+
+ %struct.__large_struct = type { [100 x i64] }
+ %struct.compat_siginfo = type { i32, i32, i32, { [29 x i32] } }
+ %struct.siginfo_t = type { i32, i32, i32, { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] } }
+ %struct.sigval_t = type { i8* }
+
+define i32 @test27(%struct.compat_siginfo* %to, %struct.siginfo_t* %from) {
+entry:
+ %from_addr = alloca %struct.siginfo_t*
+ %tmp344 = load %struct.siginfo_t** %from_addr, align 8
+ %tmp345 = getelementptr %struct.siginfo_t* %tmp344, i32 0, i32 3
+ %tmp346 = getelementptr { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }* %tmp345, i32 0, i32 0
+ %tmp346347 = bitcast { i32, i32, [0 x i8], %struct.sigval_t, i32 }* %tmp346 to { i32, i32, %struct.sigval_t }*
+ %tmp348 = getelementptr { i32, i32, %struct.sigval_t }* %tmp346347, i32 0, i32 2
+ %tmp349 = getelementptr %struct.sigval_t* %tmp348, i32 0, i32 0
+ %tmp349350 = bitcast i8** %tmp349 to i32*
+ %tmp351 = load i32* %tmp349350, align 8
+ %tmp360 = call i32 asm sideeffect "...",
+ "=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"( i32 %tmp351,
+ %struct.__large_struct* null, i32 -14, i32 0 )
+ unreachable
+; CHECK: @test27
+}
+
+; PR1978
+ %struct.x = type <{ i8 }>
+@.str = internal constant [6 x i8] c"Main!\00"
+@.str1 = internal constant [12 x i8] c"destroy %p\0A\00"
+
+define i32 @test28() nounwind {
+entry:
+ %orientations = alloca [1 x [1 x %struct.x]]
+ %tmp3 = call i32 @puts( i8* getelementptr ([6 x i8]* @.str, i32 0, i32 0) ) nounwind
+ %tmp45 = getelementptr inbounds [1 x [1 x %struct.x]]* %orientations, i32 1, i32 0, i32 0
+ %orientations62 = getelementptr [1 x [1 x %struct.x]]* %orientations, i32 0, i32 0, i32 0
+ br label %bb10
+
+bb10:
+ %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb10 ]
+ %tmp.0.reg2mem.0.rec = mul i32 %indvar, -1
+ %tmp12.rec = add i32 %tmp.0.reg2mem.0.rec, -1
+ %tmp12 = getelementptr inbounds %struct.x* %tmp45, i32 %tmp12.rec
+ %tmp16 = call i32 (i8*, ...)* @printf( i8* getelementptr ([12 x i8]* @.str1, i32 0, i32 0), %struct.x* %tmp12 ) nounwind
+ %tmp84 = icmp eq %struct.x* %tmp12, %orientations62
+ %indvar.next = add i32 %indvar, 1
+ br i1 %tmp84, label %bb17, label %bb10
+
+bb17:
+ ret i32 0
+; CHECK: @test28
+; CHECK: icmp eq i32 %indvar, 0
+}
+
+declare i32 @puts(i8*)
+
+declare i32 @printf(i8*, ...)
+
+
+
+
+; rdar://6762290
+ %T = type <{ i64, i64, i64 }>
+define i32 @test29(i8* %start, i32 %X) nounwind {
+entry:
+ %tmp3 = load i64* null
+ %add.ptr = getelementptr i8* %start, i64 %tmp3
+ %tmp158 = load i32* null
+ %add.ptr159 = getelementptr %T* null, i32 %tmp158
+ %add.ptr209 = getelementptr i8* %start, i64 0
+ %add.ptr212 = getelementptr i8* %add.ptr209, i32 %X
+ %cmp214 = icmp ugt i8* %add.ptr212, %add.ptr
+ br i1 %cmp214, label %if.then216, label %if.end363
+
+if.then216:
+ ret i32 1
+
+if.end363:
+ ret i32 0
+; CHECK: @test29
+}
+
+
+; PR3694
+define i32 @test30(i32 %m, i32 %n) nounwind {
+entry:
+ %0 = alloca i32, i32 %n, align 4
+ %1 = bitcast i32* %0 to [0 x i32]*
+ call void @test30f(i32* %0) nounwind
+ %2 = getelementptr [0 x i32]* %1, i32 0, i32 %m
+ %3 = load i32* %2, align 4
+ ret i32 %3
+; CHECK: @test30
+; CHECK: getelementptr i32
+}
+
+declare void @test30f(i32*)
+
+
+
+define i1 @test31(i32* %A) {
+ %B = getelementptr i32* %A, i32 1
+ %C = getelementptr i32* %A, i64 1
+ %V = icmp eq i32* %B, %C
+ ret i1 %V
+; CHECK: @test31
+; CHECK: ret i1 true
+}
+
+
+; PR1345
+define i8* @test32(i8* %v) {
+ %A = alloca [4 x i8*], align 16
+ %B = getelementptr [4 x i8*]* %A, i32 0, i32 0
+ store i8* null, i8** %B
+ %C = bitcast [4 x i8*]* %A to { [16 x i8] }*
+ %D = getelementptr { [16 x i8] }* %C, i32 0, i32 0, i32 8
+ %E = bitcast i8* %D to i8**
+ store i8* %v, i8** %E
+ %F = getelementptr [4 x i8*]* %A, i32 0, i32 2
+ %G = load i8** %F
+ ret i8* %G
+; CHECK: @test32
+; CHECK: %D = getelementptr [4 x i8*]* %A, i64 0, i64 1
+; CHECK: %F = getelementptr [4 x i8*]* %A, i64 0, i64 2
+}
+
+; PR3290
+%struct.Key = type { { i32, i32 } }
+%struct.anon = type <{ i8, [3 x i8], i32 }>
+
+define i32 *@test33(%struct.Key *%A) {
+ %B = bitcast %struct.Key* %A to %struct.anon*
+ %C = getelementptr %struct.anon* %B, i32 0, i32 2
+ ret i32 *%C
+; CHECK: @test33
+; CHECK: getelementptr %struct.Key* %A, i64 0, i32 0, i32 1
+}
+
+
+
+ %T2 = type { i8*, i8 }
+define i8* @test34(i8* %Val, i64 %V) nounwind {
+entry:
+ %A = alloca %T2, align 8
+ %mrv_gep = bitcast %T2* %A to i64*
+ %B = getelementptr %T2* %A, i64 0, i32 0
+
+ store i64 %V, i64* %mrv_gep
+ %C = load i8** %B, align 8
+ ret i8* %C
+; CHECK: @test34
+; CHECK: %V.c = inttoptr i64 %V to i8*
+; CHECK: ret i8* %V.c
+}
+
+%t0 = type { i8*, [19 x i8] }
+%t1 = type { i8*, [0 x i8] }
+
+@array = external global [11 x i8]
+
+@s = external global %t0
+@"\01LC8" = external constant [17 x i8]
+
+; Instcombine should be able to fold this getelementptr.
+
+define i32 @test35() nounwind {
+ call i32 (i8*, ...)* @printf(i8* getelementptr ([17 x i8]* @"\01LC8", i32 0, i32 0),
+ i8* getelementptr (%t1* bitcast (%t0* @s to %t1*), i32 0, i32 1, i32 0)) nounwind
+ ret i32 0
+; CHECK: @test35
+; CHECK: call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([17 x i8]* @"\01LC8", i64 0, i64 0), i8* getelementptr inbounds (%t0* @s, i64 0, i32 1, i64 0)) nounwind
+}
+
+; Instcombine should constant-fold the GEP so that indices that have
+; static array extents are within bounds of those array extents.
+; In the below, -1 is not in the range [0,11). After the transformation,
+; the same address is computed, but 3 is in the range of [0,11).
+
+define i8* @test36() nounwind {
+ ret i8* getelementptr ([11 x i8]* @array, i32 0, i64 -1)
+; CHECK: @test36
+; CHECK: ret i8* getelementptr ([11 x i8]* @array, i64 1676976733973595601, i64 4)
+}
+
+; Instcombine shouldn't assume that gep(A,0,1) != gep(A,1,0).
+@A37 = external constant [1 x i8]
+define i1 @test37() nounwind {
+; CHECK: @test37
+; CHECK: ret i1 true
+ %t = icmp eq i8* getelementptr ([1 x i8]* @A37, i64 0, i64 1),
+ getelementptr ([1 x i8]* @A37, i64 1, i64 0)
+ ret i1 %t
+}
+
+; Test index promotion
+define i32* @test38(i32* %I, i32 %n) {
+ %A = getelementptr i32* %I, i32 %n
+ ret i32* %A
+; CHECK: @test38
+; CHECK: = sext i32 %n to i64
+; CHECK: %A = getelementptr i32* %I, i64 %
+}
+
+; Test that we don't duplicate work when the second gep is a "bitcast".
+%pr10322_t = type { i8* }
+declare void @pr10322_f2(%pr10322_t*)
+declare void @pr10322_f3(i8**)
+define void @pr10322_f1(%pr10322_t* %foo) {
+entry:
+ %arrayidx8 = getelementptr inbounds %pr10322_t* %foo, i64 2
+ call void @pr10322_f2(%pr10322_t* %arrayidx8) nounwind
+ %tmp2 = getelementptr inbounds %pr10322_t* %arrayidx8, i64 0, i32 0
+ call void @pr10322_f3(i8** %tmp2) nounwind
+ ret void
+
+; CHECK: @pr10322_f1
+; CHECK: %tmp2 = getelementptr inbounds %pr10322_t* %arrayidx8, i64 0, i32 0
+}
+
+; Test that we combine the last two geps in this sequence, before we
+; would wait for gep1 and gep2 to be combined and never combine 2 and 3.
+%three_gep_t = type {i32}
+%three_gep_t2 = type {%three_gep_t}
+
+define void @three_gep_f(%three_gep_t2* %x) {
+ %gep1 = getelementptr %three_gep_t2* %x, i64 2
+ call void @three_gep_h(%three_gep_t2* %gep1)
+ %gep2 = getelementptr %three_gep_t2* %gep1, i64 0, i32 0
+ %gep3 = getelementptr %three_gep_t* %gep2, i64 0, i32 0
+ call void @three_gep_g(i32* %gep3)
+
+; CHECK: @three_gep_f
+; CHECK: %gep3 = getelementptr %three_gep_t2* %gep1, i64 0, i32 0, i32 0
+ ret void
+}
+
+declare void @three_gep_g(i32*)
+declare void @three_gep_h(%three_gep_t2*)
diff --git a/src/LLVM/test/Transforms/InstCombine/hoist_instr.ll b/src/LLVM/test/Transforms/InstCombine/hoist_instr.ll
new file mode 100644
index 0000000..3bd34d0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/hoist_instr.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+;; This tests that the div is hoisted into the then block.
+define i32 @foo(i1 %C, i32 %A, i32 %B) {
+entry:
+ br i1 %C, label %then, label %endif
+
+then: ; preds = %entry
+; CHECK: then:
+; CHECK-NEXT: sdiv i32
+ br label %endif
+
+endif: ; preds = %then, %entry
+ %X = phi i32 [ %A, %then ], [ 15, %entry ] ; <i32> [#uses=1]
+ %Y = sdiv i32 %X, 42 ; <i32> [#uses=1]
+ ret i32 %Y
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/icmp.ll b/src/LLVM/test/Transforms/InstCombine/icmp.ll
new file mode 100644
index 0000000..bdf72ac
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/icmp.ll
@@ -0,0 +1,561 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout =
+"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+
+define i32 @test1(i32 %X) {
+entry:
+ icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test1
+; CHECK: lshr i32 %X, 31
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test2(i32 %X) {
+entry:
+ icmp ult i32 %X, -2147483648 ; <i1>:0 [#uses=1]
+ zext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test2
+; CHECK: lshr i32 %X, 31
+; CHECK-NEXT: xor i32
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test3(i32 %X) {
+entry:
+ icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
+ sext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test3
+; CHECK: ashr i32 %X, 31
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test4(i32 %X) {
+entry:
+ icmp ult i32 %X, -2147483648 ; <i1>:0 [#uses=1]
+ sext i1 %0 to i32 ; <i32>:1 [#uses=1]
+ ret i32 %1
+; CHECK: @test4
+; CHECK: ashr i32 %X, 31
+; CHECK-NEXT: xor i32
+; CHECK-NEXT: ret i32
+}
+
+; PR4837
+define <2 x i1> @test5(<2 x i64> %x) {
+entry:
+ %V = icmp eq <2 x i64> %x, undef
+ ret <2 x i1> %V
+; CHECK: @test5
+; CHECK: ret <2 x i1> <i1 true, i1 true>
+}
+
+define i32 @test6(i32 %a, i32 %b) {
+ %c = icmp sle i32 %a, -1
+ %d = zext i1 %c to i32
+ %e = sub i32 0, %d
+ %f = and i32 %e, %b
+ ret i32 %f
+; CHECK: @test6
+; CHECK-NEXT: ashr i32 %a, 31
+; CHECK-NEXT: %f = and i32 %e, %b
+; CHECK-NEXT: ret i32 %f
+}
+
+
+define i1 @test7(i32 %x) {
+entry:
+ %a = add i32 %x, -1
+ %b = icmp ult i32 %a, %x
+ ret i1 %b
+; CHECK: @test7
+; CHECK: %b = icmp ne i32 %x, 0
+; CHECK: ret i1 %b
+}
+
+define i1 @test8(i32 %x){
+entry:
+ %a = add i32 %x, -1
+ %b = icmp eq i32 %a, %x
+ ret i1 %b
+; CHECK: @test8
+; CHECK: ret i1 false
+}
+
+define i1 @test9(i32 %x) {
+entry:
+ %a = add i32 %x, -2
+ %b = icmp ugt i32 %x, %a
+ ret i1 %b
+; CHECK: @test9
+; CHECK: icmp ugt i32 %x, 1
+; CHECK: ret i1 %b
+}
+
+define i1 @test10(i32 %x){
+entry:
+ %a = add i32 %x, -1
+ %b = icmp slt i32 %a, %x
+ ret i1 %b
+
+; CHECK: @test10
+; CHECK: %b = icmp ne i32 %x, -2147483648
+; CHECK: ret i1 %b
+}
+
+define i1 @test11(i32 %x) {
+ %a = add nsw i32 %x, 8
+ %b = icmp slt i32 %x, %a
+ ret i1 %b
+; CHECK: @test11
+; CHECK: ret i1 true
+}
+
+; PR6195
+define i1 @test12(i1 %A) {
+ %S = select i1 %A, i64 -4294967295, i64 8589934591
+ %B = icmp ne i64 bitcast (<2 x i32> <i32 1, i32 -1> to i64), %S
+ ret i1 %B
+; CHECK: @test12
+; CHECK-NEXT: %B = select i1
+; CHECK-NEXT: ret i1 %B
+}
+
+; PR6481
+define i1 @test13(i8 %X) nounwind readnone {
+entry:
+ %cmp = icmp slt i8 undef, %X
+ ret i1 %cmp
+; CHECK: @test13
+; CHECK: ret i1 false
+}
+
+define i1 @test14(i8 %X) nounwind readnone {
+entry:
+ %cmp = icmp slt i8 undef, -128
+ ret i1 %cmp
+; CHECK: @test14
+; CHECK: ret i1 false
+}
+
+define i1 @test15() nounwind readnone {
+entry:
+ %cmp = icmp eq i8 undef, -128
+ ret i1 %cmp
+; CHECK: @test15
+; CHECK: ret i1 undef
+}
+
+define i1 @test16() nounwind readnone {
+entry:
+ %cmp = icmp ne i8 undef, -128
+ ret i1 %cmp
+; CHECK: @test16
+; CHECK: ret i1 undef
+}
+
+define i1 @test17(i32 %x) nounwind {
+ %shl = shl i32 1, %x
+ %and = and i32 %shl, 8
+ %cmp = icmp eq i32 %and, 0
+ ret i1 %cmp
+; CHECK: @test17
+; CHECK-NEXT: %cmp = icmp ne i32 %x, 3
+}
+
+
+define i1 @test18(i32 %x) nounwind {
+ %sh = lshr i32 8, %x
+ %and = and i32 %sh, 1
+ %cmp = icmp eq i32 %and, 0
+ ret i1 %cmp
+; CHECK: @test18
+; CHECK-NEXT: %cmp = icmp ne i32 %x, 3
+}
+
+define i1 @test19(i32 %x) nounwind {
+ %shl = shl i32 1, %x
+ %and = and i32 %shl, 8
+ %cmp = icmp eq i32 %and, 8
+ ret i1 %cmp
+; CHECK: @test19
+; CHECK-NEXT: %cmp = icmp eq i32 %x, 3
+}
+
+define i1 @test20(i32 %x) nounwind {
+ %shl = shl i32 1, %x
+ %and = and i32 %shl, 8
+ %cmp = icmp ne i32 %and, 0
+ ret i1 %cmp
+; CHECK: @test20
+; CHECK-NEXT: %cmp = icmp eq i32 %x, 3
+}
+
+define i1 @test21(i8 %x, i8 %y) {
+; CHECK: @test21
+; CHECK-NOT: or i8
+; CHECK: icmp ugt
+ %A = or i8 %x, 1
+ %B = icmp ugt i8 %A, 3
+ ret i1 %B
+}
+
+define i1 @test22(i8 %x, i8 %y) {
+; CHECK: @test22
+; CHECK-NOT: or i8
+; CHECK: icmp ult
+ %A = or i8 %x, 1
+ %B = icmp ult i8 %A, 4
+ ret i1 %B
+}
+
+; PR2740
+; CHECK: @test23
+; CHECK: icmp sgt i32 %x, 1328634634
+define i1 @test23(i32 %x) nounwind {
+ %i3 = sdiv i32 %x, -1328634635
+ %i4 = icmp eq i32 %i3, -1
+ ret i1 %i4
+}
+
+@X = global [1000 x i32] zeroinitializer
+
+; PR8882
+; CHECK: @test24
+; CHECK: %cmp = icmp eq i64 %i, 1000
+; CHECK: ret i1 %cmp
+define i1 @test24(i64 %i) {
+ %p1 = getelementptr inbounds i32* getelementptr inbounds ([1000 x i32]* @X, i64 0, i64 0), i64 %i
+ %cmp = icmp eq i32* %p1, getelementptr inbounds ([1000 x i32]* @X, i64 1, i64 0)
+ ret i1 %cmp
+}
+
+; CHECK: @test25
+; X + Z > Y + Z -> X > Y if there is no overflow.
+; CHECK: %c = icmp sgt i32 %x, %y
+; CHECK: ret i1 %c
+define i1 @test25(i32 %x, i32 %y, i32 %z) {
+ %lhs = add nsw i32 %x, %z
+ %rhs = add nsw i32 %y, %z
+ %c = icmp sgt i32 %lhs, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test26
+; X + Z > Y + Z -> X > Y if there is no overflow.
+; CHECK: %c = icmp ugt i32 %x, %y
+; CHECK: ret i1 %c
+define i1 @test26(i32 %x, i32 %y, i32 %z) {
+ %lhs = add nuw i32 %x, %z
+ %rhs = add nuw i32 %y, %z
+ %c = icmp ugt i32 %lhs, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test27
+; X - Z > Y - Z -> X > Y if there is no overflow.
+; CHECK: %c = icmp sgt i32 %x, %y
+; CHECK: ret i1 %c
+define i1 @test27(i32 %x, i32 %y, i32 %z) {
+ %lhs = sub nsw i32 %x, %z
+ %rhs = sub nsw i32 %y, %z
+ %c = icmp sgt i32 %lhs, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test28
+; X - Z > Y - Z -> X > Y if there is no overflow.
+; CHECK: %c = icmp ugt i32 %x, %y
+; CHECK: ret i1 %c
+define i1 @test28(i32 %x, i32 %y, i32 %z) {
+ %lhs = sub nuw i32 %x, %z
+ %rhs = sub nuw i32 %y, %z
+ %c = icmp ugt i32 %lhs, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test29
+; X + Y > X -> Y > 0 if there is no overflow.
+; CHECK: %c = icmp sgt i32 %y, 0
+; CHECK: ret i1 %c
+define i1 @test29(i32 %x, i32 %y) {
+ %lhs = add nsw i32 %x, %y
+ %c = icmp sgt i32 %lhs, %x
+ ret i1 %c
+}
+
+; CHECK: @test30
+; X + Y > X -> Y > 0 if there is no overflow.
+; CHECK: %c = icmp ne i32 %y, 0
+; CHECK: ret i1 %c
+define i1 @test30(i32 %x, i32 %y) {
+ %lhs = add nuw i32 %x, %y
+ %c = icmp ugt i32 %lhs, %x
+ ret i1 %c
+}
+
+; CHECK: @test31
+; X > X + Y -> 0 > Y if there is no overflow.
+; CHECK: %c = icmp slt i32 %y, 0
+; CHECK: ret i1 %c
+define i1 @test31(i32 %x, i32 %y) {
+ %rhs = add nsw i32 %x, %y
+ %c = icmp sgt i32 %x, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test32
+; X > X + Y -> 0 > Y if there is no overflow.
+; CHECK: ret i1 false
+define i1 @test32(i32 %x, i32 %y) {
+ %rhs = add nuw i32 %x, %y
+ %c = icmp ugt i32 %x, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test33
+; X - Y > X -> 0 > Y if there is no overflow.
+; CHECK: %c = icmp slt i32 %y, 0
+; CHECK: ret i1 %c
+define i1 @test33(i32 %x, i32 %y) {
+ %lhs = sub nsw i32 %x, %y
+ %c = icmp sgt i32 %lhs, %x
+ ret i1 %c
+}
+
+; CHECK: @test34
+; X - Y > X -> 0 > Y if there is no overflow.
+; CHECK: ret i1 false
+define i1 @test34(i32 %x, i32 %y) {
+ %lhs = sub nuw i32 %x, %y
+ %c = icmp ugt i32 %lhs, %x
+ ret i1 %c
+}
+
+; CHECK: @test35
+; X > X - Y -> Y > 0 if there is no overflow.
+; CHECK: %c = icmp sgt i32 %y, 0
+; CHECK: ret i1 %c
+define i1 @test35(i32 %x, i32 %y) {
+ %rhs = sub nsw i32 %x, %y
+ %c = icmp sgt i32 %x, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test36
+; X > X - Y -> Y > 0 if there is no overflow.
+; CHECK: %c = icmp ne i32 %y, 0
+; CHECK: ret i1 %c
+define i1 @test36(i32 %x, i32 %y) {
+ %rhs = sub nuw i32 %x, %y
+ %c = icmp ugt i32 %x, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test37
+; X - Y > X - Z -> Z > Y if there is no overflow.
+; CHECK: %c = icmp sgt i32 %z, %y
+; CHECK: ret i1 %c
+define i1 @test37(i32 %x, i32 %y, i32 %z) {
+ %lhs = sub nsw i32 %x, %y
+ %rhs = sub nsw i32 %x, %z
+ %c = icmp sgt i32 %lhs, %rhs
+ ret i1 %c
+}
+
+; CHECK: @test38
+; X - Y > X - Z -> Z > Y if there is no overflow.
+; CHECK: %c = icmp ugt i32 %z, %y
+; CHECK: ret i1 %c
+define i1 @test38(i32 %x, i32 %y, i32 %z) {
+ %lhs = sub nuw i32 %x, %y
+ %rhs = sub nuw i32 %x, %z
+ %c = icmp ugt i32 %lhs, %rhs
+ ret i1 %c
+}
+
+; PR9343 #1
+; CHECK: @test39
+; CHECK: %B = icmp eq i32 %X, 0
+define i1 @test39(i32 %X, i32 %Y) {
+ %A = ashr exact i32 %X, %Y
+ %B = icmp eq i32 %A, 0
+ ret i1 %B
+}
+
+; CHECK: @test40
+; CHECK: %B = icmp ne i32 %X, 0
+define i1 @test40(i32 %X, i32 %Y) {
+ %A = lshr exact i32 %X, %Y
+ %B = icmp ne i32 %A, 0
+ ret i1 %B
+}
+
+; PR9343 #3
+; CHECK: @test41
+; CHECK: ret i1 true
+define i1 @test41(i32 %X, i32 %Y) {
+ %A = urem i32 %X, %Y
+ %B = icmp ugt i32 %Y, %A
+ ret i1 %B
+}
+
+; CHECK: @test42
+; CHECK: %B = icmp sgt i32 %Y, -1
+define i1 @test42(i32 %X, i32 %Y) {
+ %A = srem i32 %X, %Y
+ %B = icmp slt i32 %A, %Y
+ ret i1 %B
+}
+
+; CHECK: @test43
+; CHECK: %B = icmp slt i32 %Y, 0
+define i1 @test43(i32 %X, i32 %Y) {
+ %A = srem i32 %X, %Y
+ %B = icmp slt i32 %Y, %A
+ ret i1 %B
+}
+
+; CHECK: @test44
+; CHECK: %B = icmp sgt i32 %Y, -1
+define i1 @test44(i32 %X, i32 %Y) {
+ %A = srem i32 %X, %Y
+ %B = icmp slt i32 %A, %Y
+ ret i1 %B
+}
+
+; CHECK: @test45
+; CHECK: %B = icmp slt i32 %Y, 0
+define i1 @test45(i32 %X, i32 %Y) {
+ %A = srem i32 %X, %Y
+ %B = icmp slt i32 %Y, %A
+ ret i1 %B
+}
+
+; PR9343 #4
+; CHECK: @test46
+; CHECK: %C = icmp ult i32 %X, %Y
+define i1 @test46(i32 %X, i32 %Y, i32 %Z) {
+ %A = ashr exact i32 %X, %Z
+ %B = ashr exact i32 %Y, %Z
+ %C = icmp ult i32 %A, %B
+ ret i1 %C
+}
+
+; PR9343 #5
+; CHECK: @test47
+; CHECK: %C = icmp ugt i32 %X, %Y
+define i1 @test47(i32 %X, i32 %Y, i32 %Z) {
+ %A = ashr exact i32 %X, %Z
+ %B = ashr exact i32 %Y, %Z
+ %C = icmp ugt i32 %A, %B
+ ret i1 %C
+}
+
+; PR9343 #8
+; CHECK: @test48
+; CHECK: %C = icmp eq i32 %X, %Y
+define i1 @test48(i32 %X, i32 %Y, i32 %Z) {
+ %A = sdiv exact i32 %X, %Z
+ %B = sdiv exact i32 %Y, %Z
+ %C = icmp eq i32 %A, %B
+ ret i1 %C
+}
+
+; PR8469
+; CHECK: @test49
+; CHECK: ret <2 x i1> <i1 true, i1 true>
+define <2 x i1> @test49(<2 x i32> %tmp3) {
+entry:
+ %tmp11 = and <2 x i32> %tmp3, <i32 3, i32 3>
+ %cmp = icmp ult <2 x i32> %tmp11, <i32 4, i32 4>
+ ret <2 x i1> %cmp
+}
+
+; PR9343 #7
+; CHECK: @test50
+; CHECK: ret i1 true
+define i1 @test50(i16 %X, i32 %Y) {
+ %A = zext i16 %X to i32
+ %B = srem i32 %A, %Y
+ %C = icmp sgt i32 %B, -1
+ ret i1 %C
+}
+
+; CHECK: @test51
+; CHECK: ret i1 %C
+define i1 @test51(i32 %X, i32 %Y) {
+ %A = and i32 %X, 2147483648
+ %B = srem i32 %A, %Y
+ %C = icmp sgt i32 %B, -1
+ ret i1 %C
+}
+
+; CHECK: @test52
+; CHECK-NEXT: and i32 %x1, 16711935
+; CHECK-NEXT: icmp eq i32 {{.*}}, 4980863
+; CHECK-NEXT: ret i1
+define i1 @test52(i32 %x1) nounwind {
+ %conv = and i32 %x1, 255
+ %cmp = icmp eq i32 %conv, 127
+ %tmp2 = lshr i32 %x1, 16
+ %tmp3 = trunc i32 %tmp2 to i8
+ %cmp15 = icmp eq i8 %tmp3, 76
+
+ %A = and i1 %cmp, %cmp15
+ ret i1 %A
+}
+
+; PR9838
+; CHECK: @test53
+; CHECK-NEXT: ashr exact
+; CHECK-NEXT: ashr
+; CHECK-NEXT: icmp
+define i1 @test53(i32 %a, i32 %b) nounwind {
+ %x = ashr exact i32 %a, 30
+ %y = ashr i32 %b, 30
+ %z = icmp eq i32 %x, %y
+ ret i1 %z
+}
+
+; CHECK: @test54
+; CHECK-NEXT: %and = and i8 %a, -64
+; CHECK-NEXT icmp eq i8 %and, -128
+define i1 @test54(i8 %a) nounwind {
+ %ext = zext i8 %a to i32
+ %and = and i32 %ext, 192
+ %ret = icmp eq i32 %and, 128
+ ret i1 %ret
+}
+
+; CHECK: @test55
+; CHECK-NEXT: icmp eq i32 %a, -123
+define i1 @test55(i32 %a) {
+ %sub = sub i32 0, %a
+ %cmp = icmp eq i32 %sub, 123
+ ret i1 %cmp
+}
+
+; CHECK: @test56
+; CHECK-NEXT: icmp eq i32 %a, -113
+define i1 @test56(i32 %a) {
+ %sub = sub i32 10, %a
+ %cmp = icmp eq i32 %sub, 123
+ ret i1 %cmp
+}
+
+; PR10267 Don't make icmps more expensive when no other inst is subsumed.
+declare void @foo(i32)
+; CHECK: @test57
+; CHECK: %and = and i32 %a, -2
+; CHECK: %cmp = icmp ne i32 %and, 0
+define i1 @test57(i32 %a) {
+ %and = and i32 %a, -2
+ %cmp = icmp ne i32 %and, 0
+ call void @foo(i32 %and)
+ ret i1 %cmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/idioms.ll b/src/LLVM/test/Transforms/InstCombine/idioms.ll
new file mode 100644
index 0000000..6b3567f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/idioms.ll
@@ -0,0 +1,32 @@
+; RUN: opt -instcombine %s -S | FileCheck %s
+
+; Check that code corresponding to the following C function is
+; simplified into a single ASR operation:
+;
+; int test_asr(int a, int b) {
+; return a < 0 ? -(-a - 1 >> b) - 1 : a >> b;
+; }
+;
+define i32 @test_asr(i32 %a, i32 %b) {
+entry:
+ %c = icmp slt i32 %a, 0
+ br i1 %c, label %bb2, label %bb3
+
+bb2:
+ %t1 = sub i32 0, %a
+ %not = sub i32 %t1, 1
+ %d = ashr i32 %not, %b
+ %t2 = sub i32 0, %d
+ %not2 = sub i32 %t2, 1
+ br label %bb4
+bb3:
+ %e = ashr i32 %a, %b
+ br label %bb4
+bb4:
+ %f = phi i32 [ %not2, %bb2 ], [ %e, %bb3 ]
+ ret i32 %f
+; CHECK: @test_asr
+; CHECK: bb4:
+; CHECK: %f = ashr i32 %a, %b
+; CHECK: ret i32 %f
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/intrinsics.ll b/src/LLVM/test/Transforms/InstCombine/intrinsics.ll
new file mode 100644
index 0000000..f033e51
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/intrinsics.ll
@@ -0,0 +1,216 @@
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+%overflow.result = type {i8, i1}
+
+declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
+declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
+declare double @llvm.powi.f64(double, i32) nounwind readonly
+declare i32 @llvm.cttz.i32(i32) nounwind readnone
+declare i32 @llvm.ctlz.i32(i32) nounwind readnone
+declare i32 @llvm.ctpop.i32(i32) nounwind readnone
+declare i8 @llvm.ctlz.i8(i8) nounwind readnone
+
+define i8 @uaddtest1(i8 %A, i8 %B) {
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
+ %y = extractvalue %overflow.result %x, 0
+ ret i8 %y
+; CHECK: @uaddtest1
+; CHECK-NEXT: %y = add i8 %A, %B
+; CHECK-NEXT: ret i8 %y
+}
+
+define i8 @uaddtest2(i8 %A, i8 %B, i1* %overflowPtr) {
+ %and.A = and i8 %A, 127
+ %and.B = and i8 %B, 127
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @uaddtest2
+; CHECK-NEXT: %and.A = and i8 %A, 127
+; CHECK-NEXT: %and.B = and i8 %B, 127
+; CHECK-NEXT: %x = add nuw i8 %and.A, %and.B
+; CHECK-NEXT: store i1 false, i1* %overflowPtr
+; CHECK-NEXT: ret i8 %x
+}
+
+define i8 @uaddtest3(i8 %A, i8 %B, i1* %overflowPtr) {
+ %or.A = or i8 %A, -128
+ %or.B = or i8 %B, -128
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @uaddtest3
+; CHECK-NEXT: %or.A = or i8 %A, -128
+; CHECK-NEXT: %or.B = or i8 %B, -128
+; CHECK-NEXT: %x = add i8 %or.A, %or.B
+; CHECK-NEXT: store i1 true, i1* %overflowPtr
+; CHECK-NEXT: ret i8 %x
+}
+
+define i8 @uaddtest4(i8 %A, i1* %overflowPtr) {
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @uaddtest4
+; CHECK-NEXT: ret i8 undef
+}
+
+define i8 @uaddtest5(i8 %A, i1* %overflowPtr) {
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 0, i8 %A)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @uaddtest5
+; CHECK: ret i8 %A
+}
+
+define i1 @uaddtest6(i8 %A, i8 %B) {
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 -4)
+ %z = extractvalue %overflow.result %x, 1
+ ret i1 %z
+; CHECK: @uaddtest6
+; CHECK-NEXT: %z = icmp ugt i8 %A, 3
+; CHECK-NEXT: ret i1 %z
+}
+
+define i8 @uaddtest7(i8 %A, i8 %B) {
+ %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
+ %z = extractvalue %overflow.result %x, 0
+ ret i8 %z
+; CHECK: @uaddtest7
+; CHECK-NEXT: %z = add i8 %A, %B
+; CHECK-NEXT: ret i8 %z
+}
+
+
+define i8 @umultest1(i8 %A, i1* %overflowPtr) {
+ %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 0, i8 %A)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @umultest1
+; CHECK-NEXT: store i1 false, i1* %overflowPtr
+; CHECK-NEXT: ret i8 0
+}
+
+define i8 @umultest2(i8 %A, i1* %overflowPtr) {
+ %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 1, i8 %A)
+ %y = extractvalue %overflow.result %x, 0
+ %z = extractvalue %overflow.result %x, 1
+ store i1 %z, i1* %overflowPtr
+ ret i8 %y
+; CHECK: @umultest2
+; CHECK-NEXT: store i1 false, i1* %overflowPtr
+; CHECK-NEXT: ret i8 %A
+}
+
+%ov.result.32 = type { i32, i1 }
+declare %ov.result.32 @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+
+define i32 @umultest3(i32 %n) nounwind {
+ %shr = lshr i32 %n, 2
+ %mul = call %ov.result.32 @llvm.umul.with.overflow.i32(i32 %shr, i32 3)
+ %ov = extractvalue %ov.result.32 %mul, 1
+ %res = extractvalue %ov.result.32 %mul, 0
+ %ret = select i1 %ov, i32 -1, i32 %res
+ ret i32 %ret
+; CHECK: @umultest3
+; CHECK-NEXT: shr
+; CHECK-NEXT: mul nuw
+; CHECK-NEXT: ret
+}
+
+define i32 @umultest4(i32 %n) nounwind {
+ %shr = lshr i32 %n, 1
+ %mul = call %ov.result.32 @llvm.umul.with.overflow.i32(i32 %shr, i32 4)
+ %ov = extractvalue %ov.result.32 %mul, 1
+ %res = extractvalue %ov.result.32 %mul, 0
+ %ret = select i1 %ov, i32 -1, i32 %res
+ ret i32 %ret
+; CHECK: @umultest4
+; CHECK: umul.with.overflow
+}
+
+define void @powi(double %V, double *%P) {
+entry:
+ %A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
+ volatile store double %A, double* %P
+
+ %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind
+ volatile store double %B, double* %P
+
+ %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind
+ volatile store double %C, double* %P
+ ret void
+; CHECK: @powi
+; CHECK: %A = fdiv double 1.0{{.*}}, %V
+; CHECK: store volatile double %A,
+; CHECK: store volatile double 1.0
+; CHECK: store volatile double %V
+}
+
+define i32 @cttz(i32 %a) {
+entry:
+ %or = or i32 %a, 8
+ %and = and i32 %or, -8
+ %count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone
+ ret i32 %count
+; CHECK: @cttz
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i32 3
+}
+
+define i8 @ctlz(i8 %a) {
+entry:
+ %or = or i8 %a, 32
+ %and = and i8 %or, 63
+ %count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone
+ ret i8 %count
+; CHECK: @ctlz
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i8 2
+}
+
+define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
+entry:
+ %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone
+ %lz.cmp = icmp eq i32 %lz, 32
+ volatile store i1 %lz.cmp, i1* %c
+ %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone
+ %tz.cmp = icmp ne i32 %tz, 32
+ volatile store i1 %tz.cmp, i1* %c
+ %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
+ %pop.cmp = icmp eq i32 %pop, 0
+ volatile store i1 %pop.cmp, i1* %c
+ ret void
+; CHECK: @cmp.simplify
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
+; CHECK-NEXT: store volatile i1 %lz.cmp, i1* %c
+; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
+; CHECK-NEXT: store volatile i1 %tz.cmp, i1* %c
+; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
+; CHECK-NEXT: store volatile i1 %pop.cmp, i1* %c
+}
+
+
+define i32 @cttz_simplify1(i32 %x) nounwind readnone ssp {
+ %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x) ; <i32> [#uses=1]
+ %shr3 = lshr i32 %tmp1, 5 ; <i32> [#uses=1]
+ ret i32 %shr3
+
+; CHECK: @cttz_simplify1
+; CHECK: icmp eq i32 %x, 0
+; CHECK-NEXT: zext i1
+; CHECK-NEXT: ret i32
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/invariant.ll b/src/LLVM/test/Transforms/InstCombine/invariant.ll
new file mode 100644
index 0000000..3832380
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/invariant.ll
@@ -0,0 +1,16 @@
+; Test to make sure unused llvm.invariant.start calls are not trivially eliminated
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @g(i8*)
+
+declare {}* @llvm.invariant.start(i64, i8* nocapture) nounwind readonly
+
+define i8 @f() {
+ %a = alloca i8 ; <i8*> [#uses=4]
+ store i8 0, i8* %a
+ %i = call {}* @llvm.invariant.start(i64 1, i8* %a) ; <{}*> [#uses=0]
+ ; CHECK: call {}* @llvm.invariant.start
+ call void @g(i8* %a)
+ %r = load i8* %a ; <i8> [#uses=1]
+ ret i8 %r
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/known_align.ll b/src/LLVM/test/Transforms/InstCombine/known_align.ll
new file mode 100644
index 0000000..5382abf
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/known_align.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine -S | grep {align 1}
+; END.
+
+ %struct.p = type <{ i8, i32 }>
+@t = global %struct.p <{ i8 1, i32 10 }> ; <%struct.p*> [#uses=1]
+@u = weak global %struct.p zeroinitializer ; <%struct.p*> [#uses=1]
+
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4 ; <i32*> [#uses=2]
+ %tmp = alloca i32, align 4 ; <i32*> [#uses=2]
+ %tmp1 = alloca i32, align 4 ; <i32*> [#uses=3]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ %tmp3 = load i32* getelementptr (%struct.p* @t, i32 0, i32 1), align 1 ; <i32> [#uses=1]
+ store i32 %tmp3, i32* %tmp1, align 4
+ %tmp5 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ store i32 %tmp5, i32* getelementptr (%struct.p* @u, i32 0, i32 1), align 1
+ %tmp6 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ store i32 %tmp6, i32* %tmp, align 4
+ %tmp7 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ store i32 %tmp7, i32* %retval, align 4
+ br label %return
+
+return: ; preds = %entry
+ %retval8 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %retval8
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/load-cmp.ll b/src/LLVM/test/Transforms/InstCombine/load-cmp.ll
new file mode 100644
index 0000000..5cafb77
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/load-cmp.ll
@@ -0,0 +1,112 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+@G16 = internal constant [10 x i16] [i16 35, i16 82, i16 69, i16 81, i16 85,
+ i16 73, i16 82, i16 69, i16 68, i16 0]
+@GD = internal constant [6 x double]
+ [double -10.0, double 1.0, double 4.0, double 2.0, double -20.0, double -40.0]
+
+define i1 @test1(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp eq i16 %Q, 0
+ ret i1 %R
+; CHECK: @test1
+; CHECK-NEXT: %R = icmp eq i32 %X, 9
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test2(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp slt i16 %Q, 85
+ ret i1 %R
+; CHECK: @test2
+; CHECK-NEXT: %R = icmp ne i32 %X, 4
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test3(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp oeq double %Q, 1.0
+ ret i1 %R
+; CHECK: @test3
+; CHECK-NEXT: %R = icmp eq i32 %X, 1
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test4(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp sle i16 %Q, 73
+ ret i1 %R
+; CHECK: @test4
+; CHECK-NEXT: lshr i32 933, %X
+; CHECK-NEXT: and i32 {{.*}}, 1
+; CHECK-NEXT: %R = icmp ne i32 {{.*}}, 0
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test5(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = icmp eq i16 %Q, 69
+ ret i1 %R
+; CHECK: @test5
+; CHECK-NEXT: icmp eq i32 %X, 2
+; CHECK-NEXT: icmp eq i32 %X, 7
+; CHECK-NEXT: %R = or i1
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test6(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp ogt double %Q, 0.0
+ ret i1 %R
+; CHECK: @test6
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ult i32 {{.*}}, 3
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test7(i32 %X) {
+ %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %Q = load double* %P
+ %R = fcmp olt double %Q, 0.0
+ ret i1 %R
+; CHECK: @test7
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ugt i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %R
+}
+
+define i1 @test8(i32 %X) {
+ %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %Q = load i16* %P
+ %R = and i16 %Q, 3
+ %S = icmp eq i16 %R, 0
+ ret i1 %S
+; CHECK: @test8
+; CHECK-NEXT: add i32 %X, -8
+; CHECK-NEXT: icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1
+}
+
+@GA = internal constant [4 x { i32, i32 } ] [
+ { i32, i32 } { i32 1, i32 0 },
+ { i32, i32 } { i32 2, i32 1 },
+ { i32, i32 } { i32 3, i32 1 },
+ { i32, i32 } { i32 4, i32 0 }
+]
+
+define i1 @test9(i32 %X) {
+ %P = getelementptr inbounds [4 x { i32, i32 } ]* @GA, i32 0, i32 %X, i32 1
+ %Q = load i32* %P
+ %R = icmp eq i32 %Q, 1
+ ret i1 %R
+; CHECK: @test9
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/load-select.ll b/src/LLVM/test/Transforms/InstCombine/load-select.ll
new file mode 100644
index 0000000..f3d83dc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/load-select.ll
@@ -0,0 +1,16 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
+
+@a = constant [2 x i32] [i32 3, i32 6] ; <[2 x i32]*> [#uses=2]
+
+define i32 @b(i32 %y) nounwind readonly {
+; CHECK: @b
+; CHECK-NOT: load
+; CHECK: ret i32
+entry:
+ %0 = icmp eq i32 %y, 0 ; <i1> [#uses=1]
+ %storemerge = select i1 %0, i32* getelementptr inbounds ([2 x i32]* @a, i32 0, i32 1), i32* getelementptr inbounds ([2 x i32]* @a, i32 0, i32 0) ; <i32*> [#uses=1]
+ %1 = load i32* %storemerge, align 4 ; <i32> [#uses=1]
+ ret i32 %1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/load.ll b/src/LLVM/test/Transforms/InstCombine/load.ll
new file mode 100644
index 0000000..2f95409
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/load.ll
@@ -0,0 +1,98 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | not grep load
+
+@X = constant i32 42 ; <i32*> [#uses=2]
+@X2 = constant i32 47 ; <i32*> [#uses=1]
+@Y = constant [2 x { i32, float }] [ { i32, float } { i32 12, float 1.000000e+00 }, { i32, float } { i32 37, float 0x3FF3B2FEC0000000 } ] ; <[2 x { i32, float }]*> [#uses=2]
+@Z = constant [2 x { i32, float }] zeroinitializer ; <[2 x { i32, float }]*> [#uses=1]
+
+@GLOBAL = internal constant [4 x i32] zeroinitializer
+
+
+define i32 @test1() {
+ %B = load i32* @X ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define float @test2() {
+ %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
+ %B = load float* %A ; <float> [#uses=1]
+ ret float %B
+}
+
+define i32 @test3() {
+ %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
+ %B = load i32* %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test4() {
+ %A = getelementptr [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
+ %B = load i32* %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test5(i1 %C) {
+ %Y = select i1 %C, i32* @X, i32* @X2 ; <i32*> [#uses=1]
+ %Z = load i32* %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test7(i32 %X) {
+ %V = getelementptr i32* null, i32 %X ; <i32*> [#uses=1]
+ %R = load i32* %V ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @test8(i32* %P) {
+ store i32 1, i32* %P
+ %X = load i32* %P ; <i32> [#uses=1]
+ ret i32 %X
+}
+
+define i32 @test9(i32* %P) {
+ %X = load i32* %P ; <i32> [#uses=1]
+ %Y = load i32* %P ; <i32> [#uses=1]
+ %Z = sub i32 %X, %Y ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i32 @test10(i1 %C.upgrd.1, i32* %P, i32* %Q) {
+ br i1 %C.upgrd.1, label %T, label %F
+T: ; preds = %0
+ store i32 1, i32* %Q
+ store i32 0, i32* %P
+ br label %C
+F: ; preds = %0
+ store i32 0, i32* %P
+ br label %C
+C: ; preds = %F, %T
+ %V = load i32* %P ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define double @test11(double* %p) {
+ %t0 = getelementptr double* %p, i32 1
+ store double 2.0, double* %t0
+ %t1 = getelementptr double* %p, i32 1
+ %x = load double* %t1
+ ret double %x
+}
+
+define i32 @test12(i32* %P) {
+ %A = alloca i32
+ store i32 123, i32* %A
+ ; Cast the result of the load not the source
+ %Q = bitcast i32* %A to i32*
+ %V = load i32* %Q
+ ret i32 %V
+}
+
+define <16 x i8> @test13(<2 x i64> %x) {
+entry:
+ %tmp = load <16 x i8> * bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
+ ret <16 x i8> %tmp
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/load3.ll b/src/LLVM/test/Transforms/InstCombine/load3.ll
new file mode 100644
index 0000000..35398e1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/load3.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0.0"
+
+; Instcombine should be able to do trivial CSE of loads.
+
+define i32 @test1(i32* %p) {
+ %t0 = getelementptr i32* %p, i32 1
+ %y = load i32* %t0
+ %t1 = getelementptr i32* %p, i32 1
+ %x = load i32* %t1
+ %a = sub i32 %y, %x
+ ret i32 %a
+; CHECK: @test1
+; CHECK: ret i32 0
+}
+
+
+; PR7429
+@.str = private constant [4 x i8] c"XYZ\00"
+define float @test2() {
+ %tmp = load float* bitcast ([4 x i8]* @.str to float*), align 1
+ ret float %tmp
+
+; CHECK: @test2
+; CHECK: ret float 0x3806965600000000
+}
\ No newline at end of file
diff --git a/src/LLVM/test/Transforms/InstCombine/loadstore-alignment.ll b/src/LLVM/test/Transforms/InstCombine/loadstore-alignment.ll
new file mode 100644
index 0000000..1d932d2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -0,0 +1,67 @@
+; RUN: opt < %s -instcombine -S | grep {, align 16} | count 14
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+@x = external global <2 x i64>, align 16
+@xx = external global [13 x <2 x i64>], align 16
+
+define <2 x i64> @static_hem() {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem(i32 %i) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @hem_2d(i32 %i, i32 %j) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @foo() {
+ %tmp1 = load <2 x i64>* @x, align 1
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @bar() {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t)
+ %tmp1 = load <2 x i64>* %t, align 1
+ ret <2 x i64> %tmp1
+}
+
+define void @static_hem_store(<2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 7
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_store(i32 %i, <2 x i64> %y) {
+ %t = getelementptr <2 x i64>* @x, i32 %i
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
+ %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+define void @foo_store(<2 x i64> %y) {
+ store <2 x i64> %y, <2 x i64>* @x, align 1
+ ret void
+}
+
+define void @bar_store(<2 x i64> %y) {
+ %t = alloca <2 x i64>
+ call void @kip(<2 x i64>* %t)
+ store <2 x i64> %y, <2 x i64>* %t, align 1
+ ret void
+}
+
+declare void @kip(<2 x i64>* %t)
diff --git a/src/LLVM/test/Transforms/InstCombine/logical-select.ll b/src/LLVM/test/Transforms/InstCombine/logical-select.ll
new file mode 100644
index 0000000..bb59817
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/logical-select.ll
@@ -0,0 +1,68 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+
+define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+ %e = icmp slt i32 %a, %b
+ %f = sext i1 %e to i32
+ %g = and i32 %c, %f
+ %h = xor i32 %f, -1
+ %i = and i32 %d, %h
+ %j = or i32 %g, %i
+ ret i32 %j
+; CHECK: %e = icmp slt i32 %a, %b
+; CHECK: %j = select i1 %e, i32 %c, i32 %d
+; CHECK: ret i32 %j
+}
+define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+ %e = icmp slt i32 %a, %b
+ %f = sext i1 %e to i32
+ %g = and i32 %c, %f
+ %h = xor i32 %f, -1
+ %i = and i32 %d, %h
+ %j = or i32 %i, %g
+ ret i32 %j
+; CHECK: %e = icmp slt i32 %a, %b
+; CHECK: %j = select i1 %e, i32 %c, i32 %d
+; CHECK: ret i32 %j
+}
+
+define i32 @goo(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+entry:
+ %0 = icmp slt i32 %a, %b
+ %iftmp.0.0 = select i1 %0, i32 -1, i32 0
+ %1 = and i32 %iftmp.0.0, %c
+ %not = xor i32 %iftmp.0.0, -1
+ %2 = and i32 %not, %d
+ %3 = or i32 %1, %2
+ ret i32 %3
+; CHECK: %0 = icmp slt i32 %a, %b
+; CHECK: %1 = select i1 %0, i32 %c, i32 %d
+; CHECK: ret i32 %1
+}
+define i32 @poo(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+entry:
+ %0 = icmp slt i32 %a, %b
+ %iftmp.0.0 = select i1 %0, i32 -1, i32 0
+ %1 = and i32 %iftmp.0.0, %c
+ %iftmp = select i1 %0, i32 0, i32 -1
+ %2 = and i32 %iftmp, %d
+ %3 = or i32 %1, %2
+ ret i32 %3
+; CHECK: %0 = icmp slt i32 %a, %b
+; CHECK: %1 = select i1 %0, i32 %c, i32 %d
+; CHECK: ret i32 %1
+}
+
+define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
+entry:
+ %0 = icmp slt i32 %a, %b
+ %iftmp.1.0 = select i1 %0, i32 -1, i32 0
+ %1 = and i32 %iftmp.1.0, %c
+ %not = xor i32 %iftmp.1.0, -1
+ %2 = and i32 %not, %d
+ %3 = or i32 %1, %2
+ ret i32 %3
+; CHECK: %0 = icmp slt i32 %a, %b
+; CHECK: %1 = select i1 %0, i32 %c, i32 %d
+; CHECK: ret i32 %1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/lshr-phi.ll b/src/LLVM/test/Transforms/InstCombine/lshr-phi.ll
new file mode 100644
index 0000000..76a113f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/lshr-phi.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: not grep lshr %t
+; RUN: grep add %t | count 1
+
+; Instcombine should be able to eliminate the lshr, because only
+; bits in the operand which might be non-zero will be shifted
+; off the end.
+
+define i32 @hash_string(i8* nocapture %key) nounwind readonly {
+entry:
+ %t0 = load i8* %key, align 1 ; <i8> [#uses=1]
+ %t1 = icmp eq i8 %t0, 0 ; <i1> [#uses=1]
+ br i1 %t1, label %bb2, label %bb
+
+bb: ; preds = %bb, %entry
+ %indvar = phi i64 [ 0, %entry ], [ %tmp, %bb ] ; <i64> [#uses=2]
+ %k.04 = phi i32 [ 0, %entry ], [ %t8, %bb ] ; <i32> [#uses=2]
+ %cp.05 = getelementptr i8* %key, i64 %indvar ; <i8*> [#uses=1]
+ %t2 = shl i32 %k.04, 1 ; <i32> [#uses=1]
+ %t3 = lshr i32 %k.04, 14 ; <i32> [#uses=1]
+ %t4 = add i32 %t2, %t3 ; <i32> [#uses=1]
+ %t5 = load i8* %cp.05, align 1 ; <i8> [#uses=1]
+ %t6 = sext i8 %t5 to i32 ; <i32> [#uses=1]
+ %t7 = xor i32 %t6, %t4 ; <i32> [#uses=1]
+ %t8 = and i32 %t7, 16383 ; <i32> [#uses=2]
+ %tmp = add i64 %indvar, 1 ; <i64> [#uses=2]
+ %scevgep = getelementptr i8* %key, i64 %tmp ; <i8*> [#uses=1]
+ %t9 = load i8* %scevgep, align 1 ; <i8> [#uses=1]
+ %t10 = icmp eq i8 %t9, 0 ; <i1> [#uses=1]
+ br i1 %t10, label %bb2, label %bb
+
+bb2: ; preds = %bb, %entry
+ %k.0.lcssa = phi i32 [ 0, %entry ], [ %t8, %bb ] ; <i32> [#uses=1]
+ ret i32 %k.0.lcssa
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/malloc-free-delete.ll b/src/LLVM/test/Transforms/InstCombine/malloc-free-delete.ll
new file mode 100644
index 0000000..0112607
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/malloc-free-delete.ll
@@ -0,0 +1,48 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; PR1201
+define i32 @main(i32 %argc, i8** %argv) {
+ %c_19 = alloca i8*
+ %malloc_206 = tail call i8* @malloc(i32 mul (i32 ptrtoint (i8* getelementptr (i8* null, i32 1) to i32), i32 10))
+ store i8* %malloc_206, i8** %c_19
+ %tmp_207 = load i8** %c_19
+ tail call void @free(i8* %tmp_207)
+ ret i32 0
+; CHECK-NOT: malloc
+; CHECK-NOT: free
+; CHECK: ret i32 0
+}
+
+declare noalias i8* @malloc(i32)
+declare void @free(i8*)
+
+define i1 @foo() {
+; CHECK: @foo
+; CHECK-NEXT: ret i1 false
+ %m = call i8* @malloc(i32 1)
+ %z = icmp eq i8* %m, null
+ call void @free(i8* %m)
+ ret i1 %z
+}
+
+declare void @llvm.lifetime.start(i64, i8*)
+declare void @llvm.lifetime.end(i64, i8*)
+
+define void @test3() {
+; CHECK: @test3
+; CHECK-NEXT: ret void
+ %a = call noalias i8* @malloc(i32 10)
+ call void @llvm.lifetime.start(i64 10, i8* %a)
+ call void @llvm.lifetime.end(i64 10, i8* %a)
+ ret void
+}
+
+;; This used to crash.
+define void @test4() {
+; CHECK: @test4
+; CHECK-NEXT: ret void
+ %A = call i8* @malloc(i32 16000)
+ %B = bitcast i8* %A to double*
+ %C = bitcast double* %B to i8*
+ call void @free(i8* %C)
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/memcpy-to-load.ll b/src/LLVM/test/Transforms/InstCombine/memcpy-to-load.ll
new file mode 100644
index 0000000..04aac98
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/memcpy-to-load.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | grep {load double}
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+define void @foo(double* %X, double* %Y) {
+entry:
+ %tmp2 = bitcast double* %X to i8*
+ %tmp13 = bitcast double* %Y to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp13, i32 8, i32 1, i1 false)
+ ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/memcpy.ll b/src/LLVM/test/Transforms/InstCombine/memcpy.ll
new file mode 100644
index 0000000..8a2e3aa
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/memcpy.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
+
+define void @test1(i8* %a) {
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 100, i32 1, i1 false)
+ ret void
+; CHECK: define void @test1
+; CHECK-NEXT: ret void
+}
+
+
+; PR8267
+define void @test2(i8* %a) {
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 100, i32 1, i1 true)
+ ret void
+; CHECK: define void @test2
+; CHECK-NEXT: call void @llvm.memcpy
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/memmove.ll b/src/LLVM/test/Transforms/InstCombine/memmove.ll
new file mode 100644
index 0000000..4ed4300
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/memmove.ll
@@ -0,0 +1,39 @@
+; This test makes sure that memmove instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | not grep {call void @llvm.memmove}
+
+@S = internal constant [33 x i8] c"panic: restorelist inconsistency\00" ; <[33 x i8]*> [#uses=1]
+@h = constant [2 x i8] c"h\00" ; <[2 x i8]*> [#uses=1]
+@hel = constant [4 x i8] c"hel\00" ; <[4 x i8]*> [#uses=1]
+@hello_u = constant [8 x i8] c"hello_u\00" ; <[8 x i8]*> [#uses=1]
+
+define void @test1(i8* %A, i8* %B, i32 %N) {
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %A, i8* %B, i32 0, i32 1, i1 false)
+ ret void
+}
+
+define void @test2(i8* %A, i32 %N) {
+ ;; dest can't alias source since we can't write to source!
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %A, i8* getelementptr inbounds ([33 x i8]* @S, i32 0, i32 0), i32 %N, i32 1, i1 false)
+ ret void
+}
+
+define i32 @test3() {
+ %h_p = getelementptr [2 x i8]* @h, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hel_p = getelementptr [4 x i8]* @hel, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hello_u_p = getelementptr [8 x i8]* @hello_u, i32 0, i32 0 ; <i8*> [#uses=1]
+ %target = alloca [1024 x i8] ; <[1024 x i8]*> [#uses=1]
+ %target_p = getelementptr [1024 x i8]* %target, i32 0, i32 0 ; <i8*> [#uses=3]
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %h_p, i32 2, i32 2, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hel_p, i32 4, i32 4, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hello_u_p, i32 8, i32 8, i1 false)
+ ret i32 0
+}
+
+; PR2370
+define void @test4(i8* %a) {
+ tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %a, i8* %a, i32 100, i32 1, i1 false)
+ ret void
+}
+
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/memset.ll b/src/LLVM/test/Transforms/InstCombine/memset.ll
new file mode 100644
index 0000000..7f7bc9f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/memset.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | not grep {call.*llvm.memset}
+
+define i32 @main() {
+ %target = alloca [1024 x i8]
+ %target_p = getelementptr [1024 x i8]* %target, i32 0, i32 0
+ call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 0, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 1, i32 1, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 2, i32 2, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 4, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 8, i32 8, i1 false)
+ ret i32 0
+}
+
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/memset2.ll b/src/LLVM/test/Transforms/InstCombine/memset2.ll
new file mode 100644
index 0000000..87639f0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/memset2.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Test to check that instcombine doesn't drop the address space when optimizing
+; memset.
+%struct.Moves = type { [9 x i8], i8, i8, i8, [5 x i8] }
+
+define i32 @test(%struct.Moves addrspace(1)* nocapture %moves) {
+entry:
+; CHECK: bitcast i8 addrspace(1)* %gep to i64 addrspace(1)*
+ %gep = getelementptr inbounds %struct.Moves addrspace(1)* %moves, i32 1, i32 0, i32 9
+ call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %gep, i8 0, i64 8, i32 1, i1 false)
+ ret i32 0
+}
+
+declare void @llvm.memset.p1i8.i64(i8addrspace(1)* nocapture, i8, i64, i32, i1) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/memset_chk.ll b/src/LLVM/test/Transforms/InstCombine/memset_chk.ll
new file mode 100644
index 0000000..58ecda5
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/memset_chk.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; rdar://7719085
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+
+%struct.data = type { [100 x i32], [100 x i32], [1024 x i8] }
+
+define i32 @t() nounwind ssp {
+; CHECK: @t
+; CHECK: @llvm.memset.p0i8.i64
+entry:
+ %0 = alloca %struct.data, align 8 ; <%struct.data*> [#uses=1]
+ %1 = bitcast %struct.data* %0 to i8* ; <i8*> [#uses=1]
+ %2 = call i8* @__memset_chk(i8* %1, i32 0, i64 1824, i64 1824) nounwind ; <i8*> [#uses=0]
+ ret i32 0
+}
+
+declare i8* @__memset_chk(i8*, i32, i64, i64) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/merge-icmp.ll b/src/LLVM/test/Transforms/InstCombine/merge-icmp.ll
new file mode 100644
index 0000000..00020b1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/merge-icmp.ll
@@ -0,0 +1,29 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+define i1 @test1(i16* %x) {
+ %load = load i16* %x, align 4
+ %trunc = trunc i16 %load to i8
+ %cmp1 = icmp eq i8 %trunc, 127
+ %and = and i16 %load, -256
+ %cmp2 = icmp eq i16 %and, 17664
+ %or = and i1 %cmp1, %cmp2
+ ret i1 %or
+; CHECK: @test1
+; CHECK-NEXT: load i16
+; CHECK-NEXT: icmp eq i16 %load, 17791
+; CHECK-NEXT: ret i1
+}
+
+define i1 @test2(i16* %x) {
+ %load = load i16* %x, align 4
+ %and = and i16 %load, -256
+ %cmp1 = icmp eq i16 %and, 32512
+ %trunc = trunc i16 %load to i8
+ %cmp2 = icmp eq i8 %trunc, 69
+ %or = and i1 %cmp1, %cmp2
+ ret i1 %or
+; CHECK: @test2
+; CHECK-NEXT: load i16
+; CHECK-NEXT: icmp eq i16 %load, 32581
+; CHECK-NEXT: ret i1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/mul-masked-bits.ll b/src/LLVM/test/Transforms/InstCombine/mul-masked-bits.ll
new file mode 100644
index 0000000..a43d5f2
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/mul-masked-bits.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep ashr
+
+define i32 @foo(i32 %x, i32 %y) {
+ %a = and i32 %x, 7
+ %b = and i32 %y, 7
+ %c = mul i32 %a, %b
+ %d = shl i32 %c, 26
+ %e = ashr i32 %d, 26
+ ret i32 %e
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/mul.ll b/src/LLVM/test/Transforms/InstCombine/mul.ll
new file mode 100644
index 0000000..5601c20
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/mul.ll
@@ -0,0 +1,116 @@
+; This test makes sure that mul instructions are properly eliminated.
+; RUN: opt < %s -instcombine -S | not grep mul
+
+define i32 @test1(i32 %A) {
+ %B = mul i32 %A, 1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) {
+ ; Should convert to an add instruction
+ %B = mul i32 %A, 2 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test3(i32 %A) {
+ ; This should disappear entirely
+ %B = mul i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define double @test4(double %A) {
+ ; This is safe for FP
+ %B = fmul double 1.000000e+00, %A ; <double> [#uses=1]
+ ret double %B
+}
+
+define i32 @test5(i32 %A) {
+ %B = mul i32 %A, 8 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i8 @test6(i8 %A) {
+ %B = mul i8 %A, 8 ; <i8> [#uses=1]
+ %C = mul i8 %B, 8 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test7(i32 %i) {
+ %tmp = mul i32 %i, -1 ; <i32> [#uses=1]
+ ret i32 %tmp
+}
+
+define i64 @test8(i64 %i) {
+ ; tmp = sub 0, %i
+ %j = mul i64 %i, -1 ; <i64> [#uses=1]
+ ret i64 %j
+}
+
+define i32 @test9(i32 %i) {
+ ; %j = sub 0, %i
+ %j = mul i32 %i, -1 ; <i32> [#uses=1]
+ ret i32 %j
+}
+
+define i32 @test10(i32 %a, i32 %b) {
+ %c = icmp slt i32 %a, 0 ; <i1> [#uses=1]
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+define i32 @test11(i32 %a, i32 %b) {
+ %c = icmp sle i32 %a, -1 ; <i1> [#uses=1]
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+define i32 @test12(i8 %a, i32 %b) {
+ %c = icmp ugt i8 %a, 127 ; <i1> [#uses=1]
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+; PR2642
+define internal void @test13(<4 x float>*) {
+ load <4 x float>* %0, align 1
+ fmul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >
+ store <4 x float> %3, <4 x float>* %0, align 1
+ ret void
+}
+
+define <16 x i8> @test14(<16 x i8> %a) {
+ %b = mul <16 x i8> %a, zeroinitializer
+ ret <16 x i8> %b
+}
+
+; rdar://7293527
+define i32 @test15(i32 %A, i32 %B) {
+entry:
+ %shl = shl i32 1, %B
+ %m = mul i32 %shl, %A
+ ret i32 %m
+}
+
+; X * Y (when Y is 0 or 1) --> x & (0-Y)
+define i32 @test16(i32 %b, i1 %c) {
+ %d = zext i1 %c to i32 ; <i32> [#uses=1]
+ ; e = b & (a >> 31)
+ %e = mul i32 %d, %b ; <i32> [#uses=1]
+ ret i32 %e
+}
+
+; X * Y (when Y is 0 or 1) --> x & (0-Y)
+define i32 @test17(i32 %a, i32 %b) {
+ %a.lobit = lshr i32 %a, 31
+ %e = mul i32 %a.lobit, %b
+ ret i32 %e
+}
+
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/multi-use-or.ll b/src/LLVM/test/Transforms/InstCombine/multi-use-or.ll
new file mode 100644
index 0000000..8c6a0e0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/multi-use-or.ll
@@ -0,0 +1,24 @@
+; RUN: opt < %s -instcombine -S | grep {fadd double .sx, .sy}
+; The 'or' has multiple uses, make sure that this doesn't prevent instcombine
+; from propagating the extends to the truncs.
+
+define double @ScaleObjectAdd(double %sx, double %sy, double %sz) nounwind {
+entry:
+ %sx34 = bitcast double %sx to i64 ; <i64> [#uses=1]
+ %sx3435 = zext i64 %sx34 to i192 ; <i192> [#uses=1]
+ %sy22 = bitcast double %sy to i64 ; <i64> [#uses=1]
+ %sy2223 = zext i64 %sy22 to i192 ; <i192> [#uses=1]
+ %sy222324 = shl i192 %sy2223, 128 ; <i192> [#uses=1]
+ %sy222324.ins = or i192 %sx3435, %sy222324 ; <i192> [#uses=1]
+
+
+ %a = trunc i192 %sy222324.ins to i64 ; <i64> [#uses=1]
+ %b = bitcast i64 %a to double ; <double> [#uses=1]
+ %c = lshr i192 %sy222324.ins, 128 ; <i192> [#uses=1]
+ %d = trunc i192 %c to i64 ; <i64> [#uses=1]
+ %e = bitcast i64 %d to double ; <double> [#uses=1]
+ %f = fadd double %b, %e
+
+; ret double %e
+ ret double %f
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/narrow.ll b/src/LLVM/test/Transforms/InstCombine/narrow.ll
new file mode 100644
index 0000000..439ad29
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/narrow.ll
@@ -0,0 +1,18 @@
+; This file contains various testcases that check to see that instcombine
+; is narrowing computations when possible.
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {ret i1 false}
+
+; test1 - Eliminating the casts in this testcase (by narrowing the AND
+; operation) allows instcombine to realize the function always returns false.
+;
+define i1 @test1(i32 %A, i32 %B) {
+ %C1 = icmp slt i32 %A, %B ; <i1> [#uses=1]
+ %ELIM1 = zext i1 %C1 to i32 ; <i32> [#uses=1]
+ %C2 = icmp sgt i32 %A, %B ; <i1> [#uses=1]
+ %ELIM2 = zext i1 %C2 to i32 ; <i32> [#uses=1]
+ %C3 = and i32 %ELIM1, %ELIM2 ; <i32> [#uses=1]
+ %ELIM3 = trunc i32 %C3 to i1 ; <i1> [#uses=1]
+ ret i1 %ELIM3
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/neon-intrinsics.ll b/src/LLVM/test/Transforms/InstCombine/neon-intrinsics.ll
new file mode 100644
index 0000000..3ad09cc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/neon-intrinsics.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; The alignment arguments for NEON load/store intrinsics can be increased
+; by instcombine. Check for this.
+
+; CHECK: vld4.v2i32({{.*}}, i32 32)
+; CHECK: vst4.v2i32({{.*}}, i32 16)
+
+@x = common global [8 x i32] zeroinitializer, align 32
+@y = common global [8 x i32] zeroinitializer, align 16
+
+%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
+
+define void @test() nounwind ssp {
+ %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* bitcast ([8 x i32]* @x to i8*), i32 1)
+ %tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 1
+ %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
+ %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 3
+ call void @llvm.arm.neon.vst4.v2i32(i8* bitcast ([8 x i32]* @y to i8*), <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1)
+ ret void
+}
+
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*, i32) nounwind readonly
+declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/no-negzero.ll b/src/LLVM/test/Transforms/InstCombine/no-negzero.ll
new file mode 100644
index 0000000..f295130
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/no-negzero.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; ModuleID = '3555a.c'
+; sqrt(fabs) cannot be negative zero, so we should eliminate the fadd.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.8"
+
+; CHECK: @mysqrt
+; CHECK-NOT: fadd
+; CHECK: ret
+define double @mysqrt(double %x) nounwind {
+entry:
+ %x_addr = alloca double ; <double*> [#uses=2]
+ %retval = alloca double, align 8 ; <double*> [#uses=2]
+ %0 = alloca double, align 8 ; <double*> [#uses=2]
+ %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
+ store double %x, double* %x_addr
+ %1 = load double* %x_addr, align 8 ; <double> [#uses=1]
+ %2 = call double @fabs(double %1) nounwind readnone ; <double> [#uses=1]
+ %3 = call double @sqrt(double %2) nounwind readonly ; <double> [#uses=1]
+ %4 = fadd double %3, 0.000000e+00 ; <double> [#uses=1]
+ store double %4, double* %0, align 8
+ %5 = load double* %0, align 8 ; <double> [#uses=1]
+ store double %5, double* %retval, align 8
+ br label %return
+
+return: ; preds = %entry
+ %retval1 = load double* %retval ; <double> [#uses=1]
+ ret double %retval1
+}
+
+declare double @fabs(double)
+
+declare double @sqrt(double) nounwind readonly
diff --git a/src/LLVM/test/Transforms/InstCombine/not-fcmp.ll b/src/LLVM/test/Transforms/InstCombine/not-fcmp.ll
new file mode 100644
index 0000000..ad01a6b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/not-fcmp.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep "fcmp uge"
+; PR1570
+
+define i1 @f(float %X, float %Y) {
+entry:
+ %tmp3 = fcmp olt float %X, %Y ; <i1> [#uses=1]
+ %toBoolnot5 = xor i1 %tmp3, true ; <i1> [#uses=1]
+ ret i1 %toBoolnot5
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/not.ll b/src/LLVM/test/Transforms/InstCombine/not.ll
new file mode 100644
index 0000000..c3134c1
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/not.ll
@@ -0,0 +1,54 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+
+; RUN: opt < %s -instcombine -S | not grep xor
+
+define i32 @test1(i32 %A) {
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i1 @test2(i32 %A, i32 %B) {
+ ; Can change into setge
+ %cond = icmp sle i32 %A, %B ; <i1> [#uses=1]
+ %Ret = xor i1 %cond, true ; <i1> [#uses=1]
+ ret i1 %Ret
+}
+
+; Test that demorgans law can be instcombined
+define i32 @test3(i32 %A, i32 %B) {
+ %a = xor i32 %A, -1 ; <i32> [#uses=1]
+ %b = xor i32 %B, -1 ; <i32> [#uses=1]
+ %c = and i32 %a, %b ; <i32> [#uses=1]
+ %d = xor i32 %c, -1 ; <i32> [#uses=1]
+ ret i32 %d
+}
+
+; Test that demorgens law can work with constants
+define i32 @test4(i32 %A, i32 %B) {
+ %a = xor i32 %A, -1 ; <i32> [#uses=1]
+ %c = and i32 %a, 5 ; <i32> [#uses=1]
+ %d = xor i32 %c, -1 ; <i32> [#uses=1]
+ ret i32 %d
+}
+
+; test the mirror of demorgans law...
+define i32 @test5(i32 %A, i32 %B) {
+ %a = xor i32 %A, -1 ; <i32> [#uses=1]
+ %b = xor i32 %B, -1 ; <i32> [#uses=1]
+ %c = or i32 %a, %b ; <i32> [#uses=1]
+ %d = xor i32 %c, -1 ; <i32> [#uses=1]
+ ret i32 %d
+}
+
+; PR2298
+define zeroext i8 @test6(i32 %a, i32 %b) nounwind {
+entry:
+ %tmp1not = xor i32 %a, -1 ; <i32> [#uses=1]
+ %tmp2not = xor i32 %b, -1 ; <i32> [#uses=1]
+ %tmp3 = icmp slt i32 %tmp1not, %tmp2not ; <i1> [#uses=1]
+ %retval67 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
+ ret i8 %retval67
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/nothrow.ll b/src/LLVM/test/Transforms/InstCombine/nothrow.ll
new file mode 100644
index 0000000..08d90bf
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/nothrow.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | not grep call
+; rdar://6880732
+declare double @t1(i32) readonly
+
+define void @t2() nounwind {
+ call double @t1(i32 42) ;; dead call even though callee is not nothrow.
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/nsw.ll b/src/LLVM/test/Transforms/InstCombine/nsw.ll
new file mode 100644
index 0000000..0140c2f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/nsw.ll
@@ -0,0 +1,83 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; CHECK: @sub1
+; CHECK: %y = sub i32 0, %x
+; CHECK: %z = sdiv i32 %y, 337
+; CHECK: ret i32 %z
+define i32 @sub1(i32 %x) {
+ %y = sub i32 0, %x
+ %z = sdiv i32 %y, 337
+ ret i32 %z
+}
+
+; CHECK: @sub2
+; CHECK: %z = sdiv i32 %x, -337
+; CHECK: ret i32 %z
+define i32 @sub2(i32 %x) {
+ %y = sub nsw i32 0, %x
+ %z = sdiv i32 %y, 337
+ ret i32 %z
+}
+
+; CHECK: @shl_icmp
+; CHECK: %B = icmp eq i64 %X, 0
+; CHECK: ret i1 %B
+define i1 @shl_icmp(i64 %X) nounwind {
+ %A = shl nuw i64 %X, 2 ; X/4
+ %B = icmp eq i64 %A, 0
+ ret i1 %B
+}
+
+; CHECK: @shl1
+; CHECK: %B = shl nuw nsw i64 %A, 8
+; CHECK: ret i64 %B
+define i64 @shl1(i64 %X, i64* %P) nounwind {
+ %A = and i64 %X, 312
+ store i64 %A, i64* %P ; multiple uses of A.
+ %B = shl i64 %A, 8
+ ret i64 %B
+}
+
+; CHECK: @preserve1
+; CHECK: add nsw i32 %x, 5
+define i32 @preserve1(i32 %x) nounwind {
+ %add = add nsw i32 %x, 2
+ %add3 = add nsw i32 %add, 3
+ ret i32 %add3
+}
+
+; CHECK: @nopreserve1
+; CHECK: add i8 %x, -126
+define i8 @nopreserve1(i8 %x) nounwind {
+ %add = add nsw i8 %x, 127
+ %add3 = add nsw i8 %add, 3
+ ret i8 %add3
+}
+
+; CHECK: @nopreserve2
+; CHECK: add i8 %x, 3
+define i8 @nopreserve2(i8 %x) nounwind {
+ %add = add i8 %x, 1
+ %add3 = add nsw i8 %add, 2
+ ret i8 %add3
+}
+
+; CHECK: @nopreserve3
+; CHECK: add i8 %A, %B
+; CHECK: add i8
+define i8 @nopreserve3(i8 %A, i8 %B) nounwind {
+ %x = add i8 %A, 10
+ %y = add i8 %B, 10
+ %add = add nsw i8 %x, %y
+ ret i8 %add
+}
+
+; CHECK: @nopreserve4
+; CHECK: add i8 %A, %B
+; CHECK: add i8
+define i8 @nopreserve4(i8 %A, i8 %B) nounwind {
+ %x = add nsw i8 %A, 10
+ %y = add nsw i8 %B, 10
+ %add = add nsw i8 %x, %y
+ ret i8 %add
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/objsize.ll b/src/LLVM/test/Transforms/InstCombine/objsize.ll
new file mode 100644
index 0000000..28ceb68
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/objsize.ll
@@ -0,0 +1,160 @@
+; Test a pile of objectsize bounds checking.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; We need target data to get the sizes of the arrays and structures.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+
+@a = private global [60 x i8] zeroinitializer, align 1 ; <[60 x i8]*>
+@.str = private constant [8 x i8] c"abcdefg\00" ; <[8 x i8]*>
+
+define i32 @foo() nounwind {
+; CHECK: @foo
+; CHECK-NEXT: ret i32 60
+ %1 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
+ ret i32 %1
+}
+
+define i8* @bar() nounwind {
+; CHECK: @bar
+entry:
+ %retval = alloca i8*
+ %0 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
+ %cmp = icmp ne i32 %0, -1
+; CHECK: br i1 true
+ br i1 %cmp, label %cond.true, label %cond.false
+
+cond.true:
+ %1 = load i8** %retval
+ ret i8* %1
+
+cond.false:
+ %2 = load i8** %retval
+ ret i8* %2
+}
+
+define i32 @f() nounwind {
+; CHECK: @f
+; CHECK-NEXT: ret i32 0
+ %1 = call i32 @llvm.objectsize.i32(i8* getelementptr ([60 x i8]* @a, i32 1, i32 0), i1 false)
+ ret i32 %1
+}
+
+@window = external global [0 x i8]
+
+define i1 @baz() nounwind {
+; CHECK: @baz
+; CHECK-NEXT: ret i1 true
+ %1 = tail call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 0), i1 false)
+ %2 = icmp eq i32 %1, -1
+ ret i1 %2
+}
+
+define void @test1(i8* %q, i32 %x) nounwind noinline {
+; CHECK: @test1
+; CHECK: objectsize.i32
+entry:
+ %0 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 10), i1 false) ; <i64> [#uses=1]
+ %1 = icmp eq i32 %0, -1 ; <i1> [#uses=1]
+ br i1 %1, label %"47", label %"46"
+
+"46": ; preds = %entry
+ unreachable
+
+"47": ; preds = %entry
+ unreachable
+}
+
+@.str5 = private constant [9 x i32] [i32 97, i32 98, i32 99, i32 100, i32 0, i32
+ 101, i32 102, i32 103, i32 0], align 4
+define i32 @test2() nounwind {
+; CHECK: @test2
+; CHECK-NEXT: ret i32 34
+ %1 = call i32 @llvm.objectsize.i32(i8* getelementptr (i8* bitcast ([9 x i32]* @.str5 to i8*), i32 2), i1 false)
+ ret i32 %1
+}
+
+; rdar://7674946
+@array = internal global [480 x float] zeroinitializer ; <[480 x float]*> [#uses=1]
+
+declare i8* @__memcpy_chk(i8*, i8*, i32, i32) nounwind
+
+declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly
+
+declare i8* @__inline_memcpy_chk(i8*, i8*, i32) nounwind inlinehint
+
+define void @test3() nounwind {
+; CHECK: @test3
+entry:
+ br i1 undef, label %bb11, label %bb12
+
+bb11:
+ %0 = getelementptr inbounds float* getelementptr inbounds ([480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
+ %1 = bitcast float* %0 to i8* ; <i8*> [#uses=1]
+ %2 = call i32 @llvm.objectsize.i32(i8* %1, i1 false) ; <i32> [#uses=1]
+ %3 = call i8* @__memcpy_chk(i8* undef, i8* undef, i32 512, i32 %2) nounwind ; <i8*> [#uses=0]
+; CHECK: unreachable
+ unreachable
+
+bb12:
+ %4 = getelementptr inbounds float* getelementptr inbounds ([480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
+ %5 = bitcast float* %4 to i8* ; <i8*> [#uses=1]
+ %6 = call i8* @__inline_memcpy_chk(i8* %5, i8* undef, i32 512) nounwind inlinehint ; <i8*> [#uses=0]
+; CHECK: @__inline_memcpy_chk
+ unreachable
+}
+
+; rdar://7718857
+
+%struct.data = type { [100 x i32], [100 x i32], [1024 x i8] }
+
+define i32 @test4() nounwind ssp {
+; CHECK: @test4
+entry:
+ %0 = alloca %struct.data, align 8
+ %1 = bitcast %struct.data* %0 to i8*
+ %2 = call i32 @llvm.objectsize.i32(i8* %1, i1 false) nounwind
+; CHECK-NOT: @llvm.objectsize
+; CHECK: @llvm.memset.p0i8.i32(i8* %1, i8 0, i32 1824, i32 8, i1 false)
+ %3 = call i8* @__memset_chk(i8* %1, i32 0, i32 1824, i32 %2) nounwind
+ ret i32 0
+}
+
+; rdar://7782496
+@s = external global i8*
+
+define void @test5(i32 %n) nounwind ssp {
+; CHECK: @test5
+entry:
+ %0 = tail call noalias i8* @malloc(i32 20) nounwind
+ %1 = tail call i32 @llvm.objectsize.i32(i8* %0, i1 false)
+ %2 = load i8** @s, align 8
+; CHECK-NOT: @llvm.objectsize
+; CHECK: @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 10, i32 1, i1 false)
+ %3 = tail call i8* @__memcpy_chk(i8* %0, i8* %2, i32 10, i32 %1) nounwind
+ ret void
+}
+
+define void @test6(i32 %n) nounwind ssp {
+; CHECK: @test6
+entry:
+ %0 = tail call noalias i8* @malloc(i32 20) nounwind
+ %1 = tail call i32 @llvm.objectsize.i32(i8* %0, i1 false)
+ %2 = load i8** @s, align 8
+; CHECK-NOT: @llvm.objectsize
+; CHECK: @__memcpy_chk(i8* %0, i8* %1, i32 30, i32 20)
+ %3 = tail call i8* @__memcpy_chk(i8* %0, i8* %2, i32 30, i32 %1) nounwind
+ ret void
+}
+
+declare i8* @__memset_chk(i8*, i32, i32, i32) nounwind
+
+declare noalias i8* @malloc(i32) nounwind
+
+define i32 @test7() {
+; CHECK: @test7
+ %alloc = call noalias i8* @malloc(i32 48) nounwind
+ %gep = getelementptr inbounds i8* %alloc, i32 16
+ %objsize = call i32 @llvm.objectsize.i32(i8* %gep, i1 false) nounwind readonly
+; CHECK-NEXT: ret i32 32
+ ret i32 %objsize
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/odr-linkage.ll b/src/LLVM/test/Transforms/InstCombine/odr-linkage.ll
new file mode 100644
index 0000000..61365b4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/odr-linkage.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 10}
+
+@g1 = available_externally constant i32 1
+@g2 = linkonce_odr constant i32 2
+@g3 = weak_odr constant i32 3
+@g4 = internal constant i32 4
+
+define i32 @test() {
+ %A = load i32* @g1
+ %B = load i32* @g2
+ %C = load i32* @g3
+ %D = load i32* @g4
+
+ %a = add i32 %A, %B
+ %b = add i32 %a, %C
+ %c = add i32 %b, %D
+ ret i32 %c
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/or-fcmp.ll b/src/LLVM/test/Transforms/InstCombine/or-fcmp.ll
new file mode 100644
index 0000000..09a3c99
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/or-fcmp.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; CHECK: @t1
+define zeroext i8 @t1(float %x, float %y) nounwind {
+ %a = fcmp ueq float %x, %y ; <i1> [#uses=1]
+ %b = fcmp uno float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+; CHECK-NOT: fcmp uno
+; CHECK: fcmp ueq
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+; CHECK: @t2
+define zeroext i8 @t2(float %x, float %y) nounwind {
+ %a = fcmp olt float %x, %y ; <i1> [#uses=1]
+ %b = fcmp oeq float %x, %y ; <i1> [#uses=1]
+; CHECK-NOT: fcmp olt
+; CHECK-NOT: fcmp oeq
+; CHECK: fcmp ole
+ %c = or i1 %a, %b
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+; CHECK: @t3
+define zeroext i8 @t3(float %x, float %y) nounwind {
+ %a = fcmp ult float %x, %y ; <i1> [#uses=1]
+ %b = fcmp uge float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+ %retval = zext i1 %c to i8
+; CHECK: ret i8 1
+ ret i8 %retval
+}
+
+; CHECK: @t4
+define zeroext i8 @t4(float %x, float %y) nounwind {
+ %a = fcmp ult float %x, %y ; <i1> [#uses=1]
+ %b = fcmp ugt float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+; CHECK-NOT: fcmp ult
+; CHECK-NOT: fcmp ugt
+; CHECK: fcmp une
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
+
+; CHECK: @t5
+define zeroext i8 @t5(float %x, float %y) nounwind {
+ %a = fcmp olt float %x, %y ; <i1> [#uses=1]
+ %b = fcmp oge float %x, %y ; <i1> [#uses=1]
+ %c = or i1 %a, %b
+; CHECK-NOT: fcmp olt
+; CHECK-NOT: fcmp oge
+; CHECK: fcmp ord
+ %retval = zext i1 %c to i8
+ ret i8 %retval
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/or-to-xor.ll b/src/LLVM/test/Transforms/InstCombine/or-to-xor.ll
new file mode 100644
index 0000000..1495ee4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/or-to-xor.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -instcombine -S | grep {xor i32 %a, %b} | count 4
+; RUN: opt < %s -instcombine -S | not grep {and}
+
+define i32 @func1(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %a, %b_not
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %a_not, %b
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
+
+define i32 @func2(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %b_not, %a
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %a_not, %b
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
+
+define i32 @func3(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %a, %b_not
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %b, %a_not
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
+
+define i32 @func4(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %b_not = xor i32 %b, -1
+ %0 = and i32 %b_not, %a
+ %a_not = xor i32 %a, -1
+ %1 = and i32 %b, %a_not
+ %2 = or i32 %0, %1
+ ret i32 %2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/or-xor.ll b/src/LLVM/test/Transforms/InstCombine/or-xor.ll
new file mode 100644
index 0000000..f496dd4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/or-xor.ll
@@ -0,0 +1,94 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+define i32 @test1(i32 %x, i32 %y) nounwind {
+ %or = or i32 %x, %y
+ %not = xor i32 %or, -1
+ %z = or i32 %x, %not
+ ret i32 %z
+; CHECK: @test1
+; CHECK-NEXT: %y.not = xor i32 %y, -1
+; CHECK-NEXT: %z = or i32 %y.not, %x
+; CHECK-NEXT: ret i32 %z
+}
+
+define i32 @test2(i32 %x, i32 %y) nounwind {
+ %or = or i32 %x, %y
+ %not = xor i32 %or, -1
+ %z = or i32 %y, %not
+ ret i32 %z
+; CHECK: @test2
+; CHECK-NEXT: %x.not = xor i32 %x, -1
+; CHECK-NEXT: %z = or i32 %x.not, %y
+; CHECK-NEXT: ret i32 %z
+}
+
+define i32 @test3(i32 %x, i32 %y) nounwind {
+ %xor = xor i32 %x, %y
+ %not = xor i32 %xor, -1
+ %z = or i32 %x, %not
+ ret i32 %z
+; CHECK: @test3
+; CHECK-NEXT: %y.not = xor i32 %y, -1
+; CHECK-NEXT: %z = or i32 %y.not, %x
+; CHECK-NEXT: ret i32 %z
+}
+
+define i32 @test4(i32 %x, i32 %y) nounwind {
+ %xor = xor i32 %x, %y
+ %not = xor i32 %xor, -1
+ %z = or i32 %y, %not
+ ret i32 %z
+; CHECK: @test4
+; CHECK-NEXT: %x.not = xor i32 %x, -1
+; CHECK-NEXT: %z = or i32 %x.not, %y
+; CHECK-NEXT: ret i32 %z
+}
+
+define i32 @test5(i32 %x, i32 %y) nounwind {
+ %and = and i32 %x, %y
+ %not = xor i32 %and, -1
+ %z = or i32 %x, %not
+ ret i32 %z
+; CHECK: @test5
+; CHECK-NEXT: ret i32 -1
+}
+
+define i32 @test6(i32 %x, i32 %y) nounwind {
+ %and = and i32 %x, %y
+ %not = xor i32 %and, -1
+ %z = or i32 %y, %not
+ ret i32 %z
+; CHECK: @test6
+; CHECK-NEXT: ret i32 -1
+}
+
+define i32 @test7(i32 %x, i32 %y) nounwind {
+ %xor = xor i32 %x, %y
+ %z = or i32 %y, %xor
+ ret i32 %z
+; CHECK: @test7
+; CHECK-NEXT: %z = or i32 %x, %y
+; CHECK-NEXT: ret i32 %z
+}
+
+define i32 @test8(i32 %x, i32 %y) nounwind {
+ %not = xor i32 %y, -1
+ %xor = xor i32 %x, %not
+ %z = or i32 %y, %xor
+ ret i32 %z
+; CHECK: @test8
+; CHECK-NEXT: %x.not = xor i32 %x, -1
+; CHECK-NEXT: %z = or i32 %x.not, %y
+; CHECK-NEXT: ret i32 %z
+}
+
+define i32 @test9(i32 %x, i32 %y) nounwind {
+ %not = xor i32 %x, -1
+ %xor = xor i32 %not, %y
+ %z = or i32 %x, %xor
+ ret i32 %z
+; CHECK: @test9
+; CHECK-NEXT: %y.not = xor i32 %y, -1
+; CHECK-NEXT: %z = or i32 %y.not, %x
+; CHECK-NEXT: ret i32 %z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/or.ll b/src/LLVM/test/Transforms/InstCombine/or.ll
new file mode 100644
index 0000000..693fa77
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/or.ll
@@ -0,0 +1,411 @@
+; This test makes sure that these instructions are properly eliminated.
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+
+define i32 @test1(i32 %A) {
+ %B = or i32 %A, 0
+ ret i32 %B
+; CHECK: @test1
+; CHECK: ret i32 %A
+}
+
+define i32 @test2(i32 %A) {
+ %B = or i32 %A, -1
+ ret i32 %B
+; CHECK: @test2
+; CHECK: ret i32 -1
+}
+
+define i8 @test2a(i8 %A) {
+ %B = or i8 %A, -1
+ ret i8 %B
+; CHECK: @test2a
+; CHECK: ret i8 -1
+}
+
+define i1 @test3(i1 %A) {
+ %B = or i1 %A, false
+ ret i1 %B
+; CHECK: @test3
+; CHECK: ret i1 %A
+}
+
+define i1 @test4(i1 %A) {
+ %B = or i1 %A, true
+ ret i1 %B
+; CHECK: @test4
+; CHECK: ret i1 true
+}
+
+define i1 @test5(i1 %A) {
+ %B = or i1 %A, %A
+ ret i1 %B
+; CHECK: @test5
+; CHECK: ret i1 %A
+}
+
+define i32 @test6(i32 %A) {
+ %B = or i32 %A, %A
+ ret i32 %B
+; CHECK: @test6
+; CHECK: ret i32 %A
+}
+
+; A | ~A == -1
+define i32 @test7(i32 %A) {
+ %NotA = xor i32 -1, %A
+ %B = or i32 %A, %NotA
+ ret i32 %B
+; CHECK: @test7
+; CHECK: ret i32 -1
+}
+
+define i8 @test8(i8 %A) {
+ %B = or i8 %A, -2
+ %C = or i8 %B, 1
+ ret i8 %C
+; CHECK: @test8
+; CHECK: ret i8 -1
+}
+
+; Test that (A|c1)|(B|c2) == (A|B)|(c1|c2)
+define i8 @test9(i8 %A, i8 %B) {
+ %C = or i8 %A, 1
+ %D = or i8 %B, -2
+ %E = or i8 %C, %D
+ ret i8 %E
+; CHECK: @test9
+; CHECK: ret i8 -1
+}
+
+define i8 @test10(i8 %A) {
+ %B = or i8 %A, 1
+ %C = and i8 %B, -2
+ ; (X & C1) | C2 --> (X | C2) & (C1|C2)
+ %D = or i8 %C, -2
+ ret i8 %D
+; CHECK: @test10
+; CHECK: ret i8 -2
+}
+
+define i8 @test11(i8 %A) {
+ %B = or i8 %A, -2
+ %C = xor i8 %B, 13
+ ; (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
+ %D = or i8 %C, 1
+ %E = xor i8 %D, 12
+ ret i8 %E
+; CHECK: @test11
+; CHECK: ret i8 -1
+}
+
+define i32 @test12(i32 %A) {
+ ; Should be eliminated
+ %B = or i32 %A, 4
+ %C = and i32 %B, 8
+ ret i32 %C
+; CHECK: @test12
+; CHECK: %C = and i32 %A, 8
+; CHECK: ret i32 %C
+}
+
+define i32 @test13(i32 %A) {
+ %B = or i32 %A, 12
+ ; Always equal to 8
+ %C = and i32 %B, 8
+ ret i32 %C
+; CHECK: @test13
+; CHECK: ret i32 8
+}
+
+define i1 @test14(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B
+ %C2 = icmp ugt i32 %A, %B
+ ; (A < B) | (A > B) === A != B
+ %D = or i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test14
+; CHECK: icmp ne i32 %A, %B
+; CHECK: ret i1
+}
+
+define i1 @test15(i32 %A, i32 %B) {
+ %C1 = icmp ult i32 %A, %B
+ %C2 = icmp eq i32 %A, %B
+ ; (A < B) | (A == B) === A <= B
+ %D = or i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test15
+; CHECK: icmp ule i32 %A, %B
+; CHECK: ret i1
+}
+
+define i32 @test16(i32 %A) {
+ %B = and i32 %A, 1
+ ; -2 = ~1
+ %C = and i32 %A, -2
+ ; %D = and int %B, -1 == %B
+ %D = or i32 %B, %C
+ ret i32 %D
+; CHECK: @test16
+; CHECK: ret i32 %A
+}
+
+define i32 @test17(i32 %A) {
+ %B = and i32 %A, 1
+ %C = and i32 %A, 4
+ ; %D = and int %B, 5
+ %D = or i32 %B, %C
+ ret i32 %D
+; CHECK: @test17
+; CHECK: %D = and i32 %A, 5
+; CHECK: ret i32 %D
+}
+
+define i1 @test18(i32 %A) {
+ %B = icmp sge i32 %A, 100
+ %C = icmp slt i32 %A, 50
+ ;; (A-50) >u 50
+ %D = or i1 %B, %C
+ ret i1 %D
+; CHECK: @test18
+; CHECK: add i32
+; CHECK: icmp ugt
+; CHECK: ret i1
+}
+
+define i1 @test19(i32 %A) {
+ %B = icmp eq i32 %A, 50
+ %C = icmp eq i32 %A, 51
+ ;; (A-50) < 2
+ %D = or i1 %B, %C
+ ret i1 %D
+; CHECK: @test19
+; CHECK: add i32
+; CHECK: icmp ult
+; CHECK: ret i1
+}
+
+define i32 @test20(i32 %x) {
+ %y = and i32 %x, 123
+ %z = or i32 %y, %x
+ ret i32 %z
+; CHECK: @test20
+; CHECK: ret i32 %x
+}
+
+define i32 @test21(i32 %tmp.1) {
+ %tmp.1.mask1 = add i32 %tmp.1, 2
+ %tmp.3 = and i32 %tmp.1.mask1, -2
+ %tmp.5 = and i32 %tmp.1, 1
+ ;; add tmp.1, 2
+ %tmp.6 = or i32 %tmp.5, %tmp.3
+ ret i32 %tmp.6
+; CHECK: @test21
+; CHECK: add i32 %{{[^,]*}}, 2
+; CHECK: ret i32
+}
+
+define i32 @test22(i32 %B) {
+ %ELIM41 = and i32 %B, 1
+ %ELIM7 = and i32 %B, -2
+ %ELIM5 = or i32 %ELIM41, %ELIM7
+ ret i32 %ELIM5
+; CHECK: @test22
+; CHECK: ret i32 %B
+}
+
+define i16 @test23(i16 %A) {
+ %B = lshr i16 %A, 1
+ ;; fold or into xor
+ %C = or i16 %B, -32768
+ %D = xor i16 %C, 8193
+ ret i16 %D
+; CHECK: @test23
+; CHECK: %B = lshr i16 %A, 1
+; CHECK: %D = xor i16 %B, -24575
+; CHECK: ret i16 %D
+}
+
+; PR1738
+define i1 @test24(double %X, double %Y) {
+ %tmp9 = fcmp uno double %X, 0.000000e+00 ; <i1> [#uses=1]
+ %tmp13 = fcmp uno double %Y, 0.000000e+00 ; <i1> [#uses=1]
+ %bothcond = or i1 %tmp13, %tmp9 ; <i1> [#uses=1]
+ ret i1 %bothcond
+
+; CHECK: @test24
+; CHECK: = fcmp uno double %Y, %X
+; CHECK: ret i1
+}
+
+; PR3266 & PR5276
+define i1 @test25(i32 %A, i32 %B) {
+ %C = icmp eq i32 %A, 0
+ %D = icmp eq i32 %B, 57
+ %E = or i1 %C, %D
+ %F = xor i1 %E, -1
+ ret i1 %F
+
+; CHECK: @test25
+; CHECK: icmp ne i32 %A, 0
+; CHECK-NEXT: icmp ne i32 %B, 57
+; CHECK-NEXT: %F = and i1
+; CHECK-NEXT: ret i1 %F
+}
+
+; PR5634
+define i1 @test26(i32 %A, i32 %B) {
+ %C1 = icmp eq i32 %A, 0
+ %C2 = icmp eq i32 %B, 0
+ ; (A == 0) & (A == 0) --> (A|B) == 0
+ %D = and i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test26
+; CHECK: or i32 %A, %B
+; CHECK: icmp eq i32 {{.*}}, 0
+; CHECK: ret i1
+}
+
+define i1 @test27(i32* %A, i32* %B) {
+ %C1 = ptrtoint i32* %A to i32
+ %C2 = ptrtoint i32* %B to i32
+ %D = or i32 %C1, %C2
+ %E = icmp eq i32 %D, 0
+ ret i1 %E
+; CHECK: @test27
+; CHECK: icmp eq i32* %A, null
+; CHECK: icmp eq i32* %B, null
+; CHECK: and i1
+; CHECK: ret i1
+}
+
+; PR5634
+define i1 @test28(i32 %A, i32 %B) {
+ %C1 = icmp ne i32 %A, 0
+ %C2 = icmp ne i32 %B, 0
+ ; (A != 0) | (A != 0) --> (A|B) != 0
+ %D = or i1 %C1, %C2
+ ret i1 %D
+; CHECK: @test28
+; CHECK: or i32 %A, %B
+; CHECK: icmp ne i32 {{.*}}, 0
+; CHECK: ret i1
+}
+
+define i1 @test29(i32* %A, i32* %B) {
+ %C1 = ptrtoint i32* %A to i32
+ %C2 = ptrtoint i32* %B to i32
+ %D = or i32 %C1, %C2
+ %E = icmp ne i32 %D, 0
+ ret i1 %E
+; CHECK: @test29
+; CHECK: icmp ne i32* %A, null
+; CHECK: icmp ne i32* %B, null
+; CHECK: or i1
+; CHECK: ret i1
+}
+
+; PR4216
+define i32 @test30(i32 %A) {
+entry:
+ %B = or i32 %A, 32962
+ %C = and i32 %A, -65536
+ %D = and i32 %B, 40186
+ %E = or i32 %D, %C
+ ret i32 %E
+; CHECK: @test30
+; CHECK: %D = and i32 %A, -58312
+; CHECK: %E = or i32 %D, 32962
+; CHECK: ret i32 %E
+}
+
+; PR4216
+define i64 @test31(i64 %A) nounwind readnone ssp noredzone {
+ %B = or i64 %A, 194
+ %D = and i64 %B, 250
+
+ %C = or i64 %A, 32768
+ %E = and i64 %C, 4294941696
+
+ %F = or i64 %D, %E
+ ret i64 %F
+; CHECK: @test31
+; CHECK-NEXT: %E = and i64 %A, 4294908984
+; CHECK-NEXT: %F = or i64 %E, 32962
+; CHECK-NEXT: ret i64 %F
+}
+
+define <4 x i32> @test32(<4 x i1> %and.i1352, <4 x i32> %vecinit6.i176, <4 x i32> %vecinit6.i191) {
+ %and.i135 = sext <4 x i1> %and.i1352 to <4 x i32> ; <<4 x i32>> [#uses=2]
+ %and.i129 = and <4 x i32> %vecinit6.i176, %and.i135 ; <<4 x i32>> [#uses=1]
+ %neg.i = xor <4 x i32> %and.i135, <i32 -1, i32 -1, i32 -1, i32 -1> ; <<4 x i32>> [#uses=1]
+ %and.i = and <4 x i32> %vecinit6.i191, %neg.i ; <<4 x i32>> [#uses=1]
+ %or.i = or <4 x i32> %and.i, %and.i129 ; <<4 x i32>> [#uses=1]
+ ret <4 x i32> %or.i
+; Don't turn this into a vector select until codegen matures to handle them
+; better.
+; CHECK: @test32
+; CHECK: or <4 x i32> %and.i, %and.i129
+}
+
+define i1 @test33(i1 %X, i1 %Y) {
+ %a = or i1 %X, %Y
+ %b = or i1 %a, %X
+ ret i1 %b
+; CHECK: @test33
+; CHECK-NEXT: or i1 %X, %Y
+; CHECK-NEXT: ret
+}
+
+define i32 @test34(i32 %X, i32 %Y) {
+ %a = or i32 %X, %Y
+ %b = or i32 %Y, %a
+ ret i32 %b
+; CHECK: @test34
+; CHECK-NEXT: or i32 %X, %Y
+; CHECK-NEXT: ret
+}
+
+define i32 @test35(i32 %a, i32 %b) {
+ %1 = or i32 %a, 1135
+ %2 = or i32 %1, %b
+ ret i32 %2
+ ; CHECK: @test35
+ ; CHECK-NEXT: or i32 %a, %b
+ ; CHECK-NEXT: or i32 %1, 1135
+}
+
+define i1 @test36(i32 %x) {
+ %cmp1 = icmp eq i32 %x, 23
+ %cmp2 = icmp eq i32 %x, 24
+ %ret1 = or i1 %cmp1, %cmp2
+ %cmp3 = icmp eq i32 %x, 25
+ %ret2 = or i1 %ret1, %cmp3
+ ret i1 %ret2
+; CHECK: @test36
+; CHECK-NEXT: %x.off = add i32 %x, -23
+; CHECK-NEXT: icmp ult i32 %x.off, 3
+; CHECK-NEXT: ret i1
+}
+
+define i32 @test37(i32* %xp, i32 %y) {
+; CHECK: @test37
+; CHECK: select i1 %tobool, i32 -1, i32 %x
+ %tobool = icmp ne i32 %y, 0
+ %sext = sext i1 %tobool to i32
+ %x = load i32* %xp
+ %or = or i32 %sext, %x
+ ret i32 %or
+}
+
+define i32 @test38(i32* %xp, i32 %y) {
+; CHECK: @test38
+; CHECK: select i1 %tobool, i32 -1, i32 %x
+ %tobool = icmp ne i32 %y, 0
+ %sext = sext i1 %tobool to i32
+ %x = load i32* %xp
+ %or = or i32 %x, %sext
+ ret i32 %or
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/overflow.ll b/src/LLVM/test/Transforms/InstCombine/overflow.ll
new file mode 100644
index 0000000..9123283
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/overflow.ll
@@ -0,0 +1,133 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+; <rdar://problem/8558713>
+
+declare void @throwAnExceptionOrWhatever()
+
+; CHECK: @test1
+define i32 @test1(i32 %a, i32 %b) nounwind ssp {
+entry:
+; CHECK-NOT: sext
+ %conv = sext i32 %a to i64
+ %conv2 = sext i32 %b to i64
+ %add = add nsw i64 %conv2, %conv
+ %add.off = add i64 %add, 2147483648
+; CHECK: llvm.sadd.with.overflow.i32
+ %0 = icmp ugt i64 %add.off, 4294967295
+ br i1 %0, label %if.then, label %if.end
+
+if.then:
+ tail call void @throwAnExceptionOrWhatever() nounwind
+ br label %if.end
+
+if.end:
+; CHECK-NOT: trunc
+ %conv9 = trunc i64 %add to i32
+; CHECK: ret i32
+ ret i32 %conv9
+}
+
+; CHECK: @test2
+; This form should not be promoted for two reasons: 1) it is unprofitable to
+; promote it since the add.off instruction has another use, and 2) it is unsafe
+; because the add-with-off makes the high bits of the original add live.
+define i32 @test2(i32 %a, i32 %b, i64* %P) nounwind ssp {
+entry:
+ %conv = sext i32 %a to i64
+ %conv2 = sext i32 %b to i64
+ %add = add nsw i64 %conv2, %conv
+ %add.off = add i64 %add, 2147483648
+
+ store i64 %add.off, i64* %P
+
+; CHECK-NOT: llvm.sadd.with.overflow
+ %0 = icmp ugt i64 %add.off, 4294967295
+ br i1 %0, label %if.then, label %if.end
+
+if.then:
+ tail call void @throwAnExceptionOrWhatever() nounwind
+ br label %if.end
+
+if.end:
+ %conv9 = trunc i64 %add to i32
+; CHECK: ret i32
+ ret i32 %conv9
+}
+
+; CHECK: test3
+; PR8816
+; This is illegal to transform because the high bits of the original add are
+; live out.
+define i64 @test3(i32 %a, i32 %b) nounwind ssp {
+entry:
+ %conv = sext i32 %a to i64
+ %conv2 = sext i32 %b to i64
+ %add = add nsw i64 %conv2, %conv
+ %add.off = add i64 %add, 2147483648
+; CHECK-NOT: llvm.sadd.with.overflow
+ %0 = icmp ugt i64 %add.off, 4294967295
+ br i1 %0, label %if.then, label %if.end
+
+if.then:
+ tail call void @throwAnExceptionOrWhatever() nounwind
+ br label %if.end
+
+if.end:
+ ret i64 %add
+; CHECK: ret i64
+}
+
+; CHECK: @test4
+; Should be able to form an i8 sadd computed in an i32.
+define zeroext i8 @test4(i8 signext %a, i8 signext %b) nounwind ssp {
+entry:
+ %conv = sext i8 %a to i32
+ %conv2 = sext i8 %b to i32
+ %add = add nsw i32 %conv2, %conv
+ %add4 = add nsw i32 %add, 128
+ %cmp = icmp ugt i32 %add4, 255
+ br i1 %cmp, label %if.then, label %if.end
+; CHECK: llvm.sadd.with.overflow.i8
+if.then: ; preds = %entry
+ tail call void @throwAnExceptionOrWhatever() nounwind
+ unreachable
+
+if.end: ; preds = %entry
+ %conv7 = trunc i32 %add to i8
+ ret i8 %conv7
+; CHECK: ret i8
+}
+
+; CHECK: @test5
+; CHECK: llvm.uadd.with.overflow
+; CHECK: ret i64
+define i64 @test5(i64 %a, i64 %b) nounwind ssp {
+entry:
+ %add = add i64 %b, %a
+ %cmp = icmp ult i64 %add, %a
+ %Q = select i1 %cmp, i64 %b, i64 42
+ ret i64 %Q
+}
+
+; CHECK: @test6
+; CHECK: llvm.uadd.with.overflow
+; CHECK: ret i64
+define i64 @test6(i64 %a, i64 %b) nounwind ssp {
+entry:
+ %add = add i64 %b, %a
+ %cmp = icmp ult i64 %add, %b
+ %Q = select i1 %cmp, i64 %b, i64 42
+ ret i64 %Q
+}
+
+; CHECK: @test7
+; CHECK: llvm.uadd.with.overflow
+; CHECK: ret i64
+define i64 @test7(i64 %a, i64 %b) nounwind ssp {
+entry:
+ %add = add i64 %b, %a
+ %cmp = icmp ugt i64 %b, %add
+ %Q = select i1 %cmp, i64 %b, i64 42
+ ret i64 %Q
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/phi-merge-gep.ll b/src/LLVM/test/Transforms/InstCombine/phi-merge-gep.ll
new file mode 100644
index 0000000..2671749
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/phi-merge-gep.ll
@@ -0,0 +1,102 @@
+; RUN: opt < %s -S -instcombine > %t
+; RUN: grep {= getelementptr} %t | count 20
+; RUN: grep {= phi} %t | count 13
+
+; Don't push the geps through these phis, because they would require
+; two phis each, which burdens the loop with high register pressure.
+
+define void @foo(float* %Ar, float* %Ai, i64 %As, float* %Cr, float* %Ci, i64 %Cs, i64 %n) nounwind {
+entry:
+ %0 = getelementptr inbounds float* %Ar, i64 0 ; <float*> [#uses=1]
+ %1 = getelementptr inbounds float* %Ai, i64 0 ; <float*> [#uses=1]
+ %2 = mul i64 %n, %As ; <i64> [#uses=1]
+ %3 = getelementptr inbounds float* %Ar, i64 %2 ; <float*> [#uses=1]
+ %4 = mul i64 %n, %As ; <i64> [#uses=1]
+ %5 = getelementptr inbounds float* %Ai, i64 %4 ; <float*> [#uses=1]
+ %6 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %7 = mul i64 %6, %As ; <i64> [#uses=1]
+ %8 = getelementptr inbounds float* %Ar, i64 %7 ; <float*> [#uses=1]
+ %9 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %10 = mul i64 %9, %As ; <i64> [#uses=1]
+ %11 = getelementptr inbounds float* %Ai, i64 %10 ; <float*> [#uses=1]
+ %12 = getelementptr inbounds float* %Cr, i64 0 ; <float*> [#uses=1]
+ %13 = getelementptr inbounds float* %Ci, i64 0 ; <float*> [#uses=1]
+ %14 = mul i64 %n, %Cs ; <i64> [#uses=1]
+ %15 = getelementptr inbounds float* %Cr, i64 %14 ; <float*> [#uses=1]
+ %16 = mul i64 %n, %Cs ; <i64> [#uses=1]
+ %17 = getelementptr inbounds float* %Ci, i64 %16 ; <float*> [#uses=1]
+ %18 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %19 = mul i64 %18, %Cs ; <i64> [#uses=1]
+ %20 = getelementptr inbounds float* %Cr, i64 %19 ; <float*> [#uses=1]
+ %21 = mul i64 %n, 2 ; <i64> [#uses=1]
+ %22 = mul i64 %21, %Cs ; <i64> [#uses=1]
+ %23 = getelementptr inbounds float* %Ci, i64 %22 ; <float*> [#uses=1]
+ br label %bb13
+
+bb: ; preds = %bb13
+ %24 = load float* %A0r.0, align 4 ; <float> [#uses=1]
+ %25 = load float* %A0i.0, align 4 ; <float> [#uses=1]
+ %26 = load float* %A1r.0, align 4 ; <float> [#uses=2]
+ %27 = load float* %A1i.0, align 4 ; <float> [#uses=2]
+ %28 = load float* %A2r.0, align 4 ; <float> [#uses=2]
+ %29 = load float* %A2i.0, align 4 ; <float> [#uses=2]
+ %30 = fadd float %26, %28 ; <float> [#uses=2]
+ %31 = fadd float %27, %29 ; <float> [#uses=2]
+ %32 = fsub float %26, %28 ; <float> [#uses=1]
+ %33 = fsub float %27, %29 ; <float> [#uses=1]
+ %34 = fadd float %24, %30 ; <float> [#uses=2]
+ %35 = fadd float %25, %31 ; <float> [#uses=2]
+ %36 = fmul float %30, -1.500000e+00 ; <float> [#uses=1]
+ %37 = fmul float %31, -1.500000e+00 ; <float> [#uses=1]
+ %38 = fadd float %34, %36 ; <float> [#uses=2]
+ %39 = fadd float %35, %37 ; <float> [#uses=2]
+ %40 = fmul float %32, 0x3FEBB67AE0000000 ; <float> [#uses=2]
+ %41 = fmul float %33, 0x3FEBB67AE0000000 ; <float> [#uses=2]
+ %42 = fadd float %38, %41 ; <float> [#uses=1]
+ %43 = fsub float %39, %40 ; <float> [#uses=1]
+ %44 = fsub float %38, %41 ; <float> [#uses=1]
+ %45 = fadd float %39, %40 ; <float> [#uses=1]
+ store float %34, float* %C0r.0, align 4
+ store float %35, float* %C0i.0, align 4
+ store float %42, float* %C1r.0, align 4
+ store float %43, float* %C1i.0, align 4
+ store float %44, float* %C2r.0, align 4
+ store float %45, float* %C2i.0, align 4
+ %46 = getelementptr inbounds float* %A0r.0, i64 %As ; <float*> [#uses=1]
+ %47 = getelementptr inbounds float* %A0i.0, i64 %As ; <float*> [#uses=1]
+ %48 = getelementptr inbounds float* %A1r.0, i64 %As ; <float*> [#uses=1]
+ %49 = getelementptr inbounds float* %A1i.0, i64 %As ; <float*> [#uses=1]
+ %50 = getelementptr inbounds float* %A2r.0, i64 %As ; <float*> [#uses=1]
+ %51 = getelementptr inbounds float* %A2i.0, i64 %As ; <float*> [#uses=1]
+ %52 = getelementptr inbounds float* %C0r.0, i64 %Cs ; <float*> [#uses=1]
+ %53 = getelementptr inbounds float* %C0i.0, i64 %Cs ; <float*> [#uses=1]
+ %54 = getelementptr inbounds float* %C1r.0, i64 %Cs ; <float*> [#uses=1]
+ %55 = getelementptr inbounds float* %C1i.0, i64 %Cs ; <float*> [#uses=1]
+ %56 = getelementptr inbounds float* %C2r.0, i64 %Cs ; <float*> [#uses=1]
+ %57 = getelementptr inbounds float* %C2i.0, i64 %Cs ; <float*> [#uses=1]
+ %58 = add nsw i64 %i.0, 1 ; <i64> [#uses=1]
+ br label %bb13
+
+bb13: ; preds = %bb, %entry
+ %i.0 = phi i64 [ 0, %entry ], [ %58, %bb ] ; <i64> [#uses=2]
+ %C2i.0 = phi float* [ %23, %entry ], [ %57, %bb ] ; <float*> [#uses=2]
+ %C2r.0 = phi float* [ %20, %entry ], [ %56, %bb ] ; <float*> [#uses=2]
+ %C1i.0 = phi float* [ %17, %entry ], [ %55, %bb ] ; <float*> [#uses=2]
+ %C1r.0 = phi float* [ %15, %entry ], [ %54, %bb ] ; <float*> [#uses=2]
+ %C0i.0 = phi float* [ %13, %entry ], [ %53, %bb ] ; <float*> [#uses=2]
+ %C0r.0 = phi float* [ %12, %entry ], [ %52, %bb ] ; <float*> [#uses=2]
+ %A2i.0 = phi float* [ %11, %entry ], [ %51, %bb ] ; <float*> [#uses=2]
+ %A2r.0 = phi float* [ %8, %entry ], [ %50, %bb ] ; <float*> [#uses=2]
+ %A1i.0 = phi float* [ %5, %entry ], [ %49, %bb ] ; <float*> [#uses=2]
+ %A1r.0 = phi float* [ %3, %entry ], [ %48, %bb ] ; <float*> [#uses=2]
+ %A0i.0 = phi float* [ %1, %entry ], [ %47, %bb ] ; <float*> [#uses=2]
+ %A0r.0 = phi float* [ %0, %entry ], [ %46, %bb ] ; <float*> [#uses=2]
+ %59 = icmp slt i64 %i.0, %n ; <i1> [#uses=1]
+ br i1 %59, label %bb, label %bb14
+
+bb14: ; preds = %bb13
+ br label %return
+
+return: ; preds = %bb14
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/phi.ll b/src/LLVM/test/Transforms/InstCombine/phi.ll
new file mode 100644
index 0000000..67e93b0
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/phi.ll
@@ -0,0 +1,622 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128:n8:16:32:64"
+
+define i32 @test1(i32 %A, i1 %b) {
+BB0:
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ ; Combine away one argument PHI nodes
+ %B = phi i32 [ %A, %BB0 ]
+ ret i32 %B
+
+BB2:
+ ret i32 %A
+; CHECK: @test1
+; CHECK: BB1:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test2(i32 %A, i1 %b) {
+BB0:
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ br label %BB2
+
+BB2:
+ ; Combine away PHI nodes with same values
+ %B = phi i32 [ %A, %BB0 ], [ %A, %BB1 ]
+ ret i32 %B
+; CHECK: @test2
+; CHECK: BB2:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test3(i32 %A, i1 %b) {
+BB0:
+ br label %Loop
+
+Loop:
+ ; PHI has same value always.
+ %B = phi i32 [ %A, %BB0 ], [ %B, %Loop ]
+ br i1 %b, label %Loop, label %Exit
+
+Exit:
+ ret i32 %B
+; CHECK: @test3
+; CHECK: Exit:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test4(i1 %b) {
+BB0:
+ ; Loop is unreachable
+ ret i32 7
+
+Loop: ; preds = %L2, %Loop
+ ; PHI has same value always.
+ %B = phi i32 [ %B, %L2 ], [ %B, %Loop ]
+ br i1 %b, label %L2, label %Loop
+
+L2: ; preds = %Loop
+ br label %Loop
+; CHECK: @test4
+; CHECK: Loop:
+; CHECK-NEXT: br i1 %b
+}
+
+define i32 @test5(i32 %A, i1 %b) {
+BB0:
+ br label %Loop
+
+Loop: ; preds = %Loop, %BB0
+ ; PHI has same value always.
+ %B = phi i32 [ %A, %BB0 ], [ undef, %Loop ]
+ br i1 %b, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i32 %B
+; CHECK: @test5
+; CHECK: Loop:
+; CHECK-NEXT: br i1 %b
+; CHECK: Exit:
+; CHECK-NEXT: ret i32 %A
+}
+
+define i32 @test6(i16 %A, i1 %b) {
+BB0:
+ %X = zext i16 %A to i32
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ %Y = zext i16 %A to i32
+ br label %BB2
+
+BB2:
+ ;; Suck casts into phi
+ %B = phi i32 [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret i32 %B
+; CHECK: @test6
+; CHECK: BB2:
+; CHECK: zext i16 %A to i32
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test7(i32 %A, i1 %b) {
+BB0:
+ br label %Loop
+
+Loop: ; preds = %Loop, %BB0
+ ; PHI is dead.
+ %B = phi i32 [ %A, %BB0 ], [ %C, %Loop ]
+ %C = add i32 %B, 123
+ br i1 %b, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i32 0
+; CHECK: @test7
+; CHECK: Loop:
+; CHECK-NEXT: br i1 %b
+}
+
+define i32* @test8({ i32, i32 } *%A, i1 %b) {
+BB0:
+ %X = getelementptr inbounds { i32, i32 } *%A, i32 0, i32 1
+ br i1 %b, label %BB1, label %BB2
+
+BB1:
+ %Y = getelementptr { i32, i32 } *%A, i32 0, i32 1
+ br label %BB2
+
+BB2:
+ ;; Suck GEPs into phi
+ %B = phi i32* [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret i32* %B
+; CHECK: @test8
+; CHECK-NOT: phi
+; CHECK: BB2:
+; CHECK-NEXT: %B = getelementptr { i32, i32 }* %A
+; CHECK-NEXT: ret i32* %B
+}
+
+define i32 @test9(i32* %A, i32* %B) {
+entry:
+ %c = icmp eq i32* %A, null
+ br i1 %c, label %bb1, label %bb
+
+bb:
+ %C = load i32* %B, align 1
+ br label %bb2
+
+bb1:
+ %D = load i32* %A, align 1
+ br label %bb2
+
+bb2:
+ %E = phi i32 [ %C, %bb ], [ %D, %bb1 ]
+ ret i32 %E
+; CHECK: @test9
+; CHECK: bb2:
+; CHECK-NEXT: phi i32* [ %B, %bb ], [ %A, %bb1 ]
+; CHECK-NEXT: %E = load i32* %{{[^,]*}}, align 1
+; CHECK-NEXT: ret i32 %E
+
+}
+
+define i32 @test10(i32* %A, i32* %B) {
+entry:
+ %c = icmp eq i32* %A, null
+ br i1 %c, label %bb1, label %bb
+
+bb:
+ %C = load i32* %B, align 16
+ br label %bb2
+
+bb1:
+ %D = load i32* %A, align 32
+ br label %bb2
+
+bb2:
+ %E = phi i32 [ %C, %bb ], [ %D, %bb1 ]
+ ret i32 %E
+; CHECK: @test10
+; CHECK: bb2:
+; CHECK-NEXT: phi i32* [ %B, %bb ], [ %A, %bb1 ]
+; CHECK-NEXT: %E = load i32* %{{[^,]*}}, align 16
+; CHECK-NEXT: ret i32 %E
+}
+
+
+; PR1777
+declare i1 @test11a()
+
+define i1 @test11() {
+entry:
+ %a = alloca i32
+ %i = ptrtoint i32* %a to i64
+ %b = call i1 @test11a()
+ br i1 %b, label %one, label %two
+
+one:
+ %x = phi i64 [%i, %entry], [%y, %two]
+ %c = call i1 @test11a()
+ br i1 %c, label %two, label %end
+
+two:
+ %y = phi i64 [%i, %entry], [%x, %one]
+ %d = call i1 @test11a()
+ br i1 %d, label %one, label %end
+
+end:
+ %f = phi i64 [ %x, %one], [%y, %two]
+ ; Change the %f to %i, and the optimizer suddenly becomes a lot smarter
+ ; even though %f must equal %i at this point
+ %g = inttoptr i64 %f to i32*
+ store i32 10, i32* %g
+ %z = call i1 @test11a()
+ ret i1 %z
+; CHECK: @test11
+; CHECK-NOT: phi i32
+; CHECK: ret i1 %z
+}
+
+
+define i64 @test12(i1 %cond, i8* %Ptr, i64 %Val) {
+entry:
+ %tmp41 = ptrtoint i8* %Ptr to i64
+ %tmp42 = zext i64 %tmp41 to i128
+ br i1 %cond, label %end, label %two
+
+two:
+ %tmp36 = zext i64 %Val to i128 ; <i128> [#uses=1]
+ %tmp37 = shl i128 %tmp36, 64 ; <i128> [#uses=1]
+ %ins39 = or i128 %tmp42, %tmp37 ; <i128> [#uses=1]
+ br label %end
+
+end:
+ %tmp869.0 = phi i128 [ %tmp42, %entry ], [ %ins39, %two ]
+ %tmp32 = trunc i128 %tmp869.0 to i64 ; <i64> [#uses=1]
+ %tmp29 = lshr i128 %tmp869.0, 64 ; <i128> [#uses=1]
+ %tmp30 = trunc i128 %tmp29 to i64 ; <i64> [#uses=1]
+
+ %tmp2 = add i64 %tmp32, %tmp30
+ ret i64 %tmp2
+; CHECK: @test12
+; CHECK-NOT: zext
+; CHECK: end:
+; CHECK-NEXT: phi i64 [ 0, %entry ], [ %Val, %two ]
+; CHECK-NOT: phi
+; CHECK: ret i64
+}
+
+declare void @test13f(double, i32)
+
+define void @test13(i1 %cond, i32 %V1, double %Vald) {
+entry:
+ %tmp42 = zext i32 %V1 to i128
+ br i1 %cond, label %end, label %two
+
+two:
+ %Val = bitcast double %Vald to i64
+ %tmp36 = zext i64 %Val to i128 ; <i128> [#uses=1]
+ %tmp37 = shl i128 %tmp36, 64 ; <i128> [#uses=1]
+ %ins39 = or i128 %tmp42, %tmp37 ; <i128> [#uses=1]
+ br label %end
+
+end:
+ %tmp869.0 = phi i128 [ %tmp42, %entry ], [ %ins39, %two ]
+ %tmp32 = trunc i128 %tmp869.0 to i32
+ %tmp29 = lshr i128 %tmp869.0, 64 ; <i128> [#uses=1]
+ %tmp30 = trunc i128 %tmp29 to i64 ; <i64> [#uses=1]
+ %tmp31 = bitcast i64 %tmp30 to double
+
+ call void @test13f(double %tmp31, i32 %tmp32)
+ ret void
+; CHECK: @test13
+; CHECK-NOT: zext
+; CHECK: end:
+; CHECK-NEXT: phi double [ 0.000000e+00, %entry ], [ %Vald, %two ]
+; CHECK-NEXT: call void @test13f(double {{[^,]*}}, i32 %V1)
+; CHECK: ret void
+}
+
+define i640 @test14a(i320 %A, i320 %B, i1 %b1) {
+BB0:
+ %a = zext i320 %A to i640
+ %b = zext i320 %B to i640
+ br label %Loop
+
+Loop:
+ %C = phi i640 [ %a, %BB0 ], [ %b, %Loop ]
+ br i1 %b1, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i640 %C
+; CHECK: @test14a
+; CHECK: Loop:
+; CHECK-NEXT: phi i320
+}
+
+define i160 @test14b(i320 %A, i320 %B, i1 %b1) {
+BB0:
+ %a = trunc i320 %A to i160
+ %b = trunc i320 %B to i160
+ br label %Loop
+
+Loop:
+ %C = phi i160 [ %a, %BB0 ], [ %b, %Loop ]
+ br i1 %b1, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ ret i160 %C
+; CHECK: @test14b
+; CHECK: Loop:
+; CHECK-NEXT: phi i160
+}
+
+declare i64 @test15a(i64)
+
+define i64 @test15b(i64 %A, i1 %b) {
+; CHECK: @test15b
+entry:
+ %i0 = zext i64 %A to i128
+ %i1 = shl i128 %i0, 64
+ %i = or i128 %i1, %i0
+ br i1 %b, label %one, label %two
+; CHECK: entry:
+; CHECK-NEXT: br i1 %b
+
+one:
+ %x = phi i128 [%i, %entry], [%y, %two]
+ %x1 = lshr i128 %x, 64
+ %x2 = trunc i128 %x1 to i64
+ %c = call i64 @test15a(i64 %x2)
+ %c1 = zext i64 %c to i128
+ br label %two
+
+; CHECK: one:
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: %c = call i64 @test15a
+
+two:
+ %y = phi i128 [%i, %entry], [%c1, %one]
+ %y1 = lshr i128 %y, 64
+ %y2 = trunc i128 %y1 to i64
+ %d = call i64 @test15a(i64 %y2)
+ %d1 = trunc i64 %d to i1
+ br i1 %d1, label %one, label %end
+
+; CHECK: two:
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: phi i64
+; CHECK-NEXT: %d = call i64 @test15a
+
+end:
+ %g = trunc i128 %y to i64
+ ret i64 %g
+; CHECK: end:
+; CHECK-NEXT: ret i64
+}
+
+; PR6512 - Shouldn't merge loads from different addr spaces.
+define i32 @test16(i32 addrspace(1)* %pointer1, i32 %flag, i32* %pointer2)
+nounwind {
+entry:
+ %retval = alloca i32, align 4 ; <i32*> [#uses=2]
+ %pointer1.addr = alloca i32 addrspace(1)*, align 4 ; <i32 addrspace(1)**>
+ %flag.addr = alloca i32, align 4 ; <i32*> [#uses=2]
+ %pointer2.addr = alloca i32*, align 4 ; <i32**> [#uses=2]
+ %res = alloca i32, align 4 ; <i32*> [#uses=4]
+ store i32 addrspace(1)* %pointer1, i32 addrspace(1)** %pointer1.addr
+ store i32 %flag, i32* %flag.addr
+ store i32* %pointer2, i32** %pointer2.addr
+ store i32 10, i32* %res
+ %tmp = load i32* %flag.addr ; <i32> [#uses=1]
+ %tobool = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
+ br i1 %tobool, label %if.then, label %if.else
+
+return: ; preds = %if.end
+ %tmp7 = load i32* %retval ; <i32> [#uses=1]
+ ret i32 %tmp7
+
+if.end: ; preds = %if.else, %if.then
+ %tmp6 = load i32* %res ; <i32> [#uses=1]
+ store i32 %tmp6, i32* %retval
+ br label %return
+
+if.then: ; preds = %entry
+ %tmp1 = load i32 addrspace(1)** %pointer1.addr ; <i32 addrspace(1)*>
+ %arrayidx = getelementptr i32 addrspace(1)* %tmp1, i32 0 ; <i32 addrspace(1)*> [#uses=1]
+ %tmp2 = load i32 addrspace(1)* %arrayidx ; <i32> [#uses=1]
+ store i32 %tmp2, i32* %res
+ br label %if.end
+
+if.else: ; preds = %entry
+ %tmp3 = load i32** %pointer2.addr ; <i32*> [#uses=1]
+ %arrayidx4 = getelementptr i32* %tmp3, i32 0 ; <i32*> [#uses=1]
+ %tmp5 = load i32* %arrayidx4 ; <i32> [#uses=1]
+ store i32 %tmp5, i32* %res
+ br label %if.end
+}
+
+; PR4413
+declare i32 @ext()
+; CHECK: @test17
+define i32 @test17(i1 %a) {
+entry:
+ br i1 %a, label %bb1, label %bb2
+
+bb1: ; preds = %entry
+ %0 = tail call i32 @ext() ; <i32> [#uses=1]
+ br label %bb2
+
+bb2: ; preds = %bb1, %entry
+ %cond = phi i1 [ true, %bb1 ], [ false, %entry ] ; <i1> [#uses=1]
+; CHECK-NOT: %val = phi i32 [ %0, %bb1 ], [ 0, %entry ]
+ %val = phi i32 [ %0, %bb1 ], [ 0, %entry ] ; <i32> [#uses=1]
+ %res = select i1 %cond, i32 %val, i32 0 ; <i32> [#uses=1]
+; CHECK: ret i32 %cond
+ ret i32 %res
+}
+
+define i1 @test18(i1 %cond) {
+ %zero = alloca i32
+ %one = alloca i32
+ br i1 %cond, label %true, label %false
+true:
+ br label %ret
+false:
+ br label %ret
+ret:
+ %ptr = phi i32* [ %zero, %true ] , [ %one, %false ]
+ %isnull = icmp eq i32* %ptr, null
+ ret i1 %isnull
+; CHECK: @test18
+; CHECK: ret i1 false
+}
+
+define i1 @test19(i1 %cond, double %x) {
+ br i1 %cond, label %true, label %false
+true:
+ br label %ret
+false:
+ br label %ret
+ret:
+ %p = phi double [ %x, %true ], [ 0x7FF0000000000000, %false ]; RHS = +infty
+ %cmp = fcmp ule double %x, %p
+ ret i1 %cmp
+; CHECK: @test19
+; CHECK: ret i1 true
+}
+
+define i1 @test20(i1 %cond) {
+ %a = alloca i32
+ %b = alloca i32
+ %c = alloca i32
+ br i1 %cond, label %true, label %false
+true:
+ br label %ret
+false:
+ br label %ret
+ret:
+ %p = phi i32* [ %a, %true ], [ %b, %false ]
+ %r = icmp eq i32* %p, %c
+ ret i1 %r
+; CHECK: @test20
+; CHECK: ret i1 false
+}
+
+define i1 @test21(i1 %c1, i1 %c2) {
+ %a = alloca i32
+ %b = alloca i32
+ %c = alloca i32
+ br i1 %c1, label %true, label %false
+true:
+ br label %loop
+false:
+ br label %loop
+loop:
+ %p = phi i32* [ %a, %true ], [ %b, %false ], [ %p, %loop ]
+ %r = icmp eq i32* %p, %c
+ br i1 %c2, label %ret, label %loop
+ret:
+ ret i1 %r
+; CHECK: @test21
+; CHECK: ret i1 false
+}
+
+define void @test22() {
+; CHECK: @test22
+entry:
+ br label %loop
+loop:
+ %phi = phi i32 [ 0, %entry ], [ %y, %loop ]
+ %y = add i32 %phi, 1
+ %o = or i32 %y, %phi
+ %e = icmp eq i32 %o, %y
+ br i1 %e, label %loop, label %ret
+; CHECK: br i1 %e
+ret:
+ ret void
+}
+
+define i32 @test23(i32 %A, i1 %b, i32 * %P) {
+BB0:
+ br label %Loop
+
+Loop: ; preds = %Loop, %BB0
+ ; PHI has same value always.
+ %B = phi i32 [ %A, %BB0 ], [ 42, %Loop ]
+ %D = add i32 %B, 19
+ store i32 %D, i32* %P
+ br i1 %b, label %Loop, label %Exit
+
+Exit: ; preds = %Loop
+ %E = add i32 %B, 19
+ ret i32 %E
+; CHECK: @test23
+; CHECK: %phitmp = add i32 %A, 19
+; CHECK: Loop:
+; CHECK-NEXT: %B = phi i32 [ %phitmp, %BB0 ], [ 61, %Loop ]
+; CHECK: Exit:
+; CHECK-NEXT: ret i32 %B
+}
+
+define i32 @test24(i32 %A, i1 %cond) {
+BB0:
+ %X = add nuw nsw i32 %A, 1
+ br i1 %cond, label %BB1, label %BB2
+
+BB1:
+ %Y = add nuw i32 %A, 1
+ br label %BB2
+
+BB2:
+ %C = phi i32 [ %X, %BB0 ], [ %Y, %BB1 ]
+ ret i32 %C
+; CHECK: @test24
+; CHECK-NOT: phi
+; CHECK: BB2:
+; CHECK-NEXT: %C = add nuw i32 %A, 1
+; CHECK-NEXT: ret i32 %C
+}
+
+; Same as test11, but used to be missed due to a bug.
+declare i1 @test25a()
+
+define i1 @test25() {
+entry:
+ %a = alloca i32
+ %i = ptrtoint i32* %a to i64
+ %b = call i1 @test25a()
+ br i1 %b, label %one, label %two
+
+one:
+ %x = phi i64 [%y, %two], [%i, %entry]
+ %c = call i1 @test25a()
+ br i1 %c, label %two, label %end
+
+two:
+ %y = phi i64 [%x, %one], [%i, %entry]
+ %d = call i1 @test25a()
+ br i1 %d, label %one, label %end
+
+end:
+ %f = phi i64 [ %x, %one], [%y, %two]
+ ; Change the %f to %i, and the optimizer suddenly becomes a lot smarter
+ ; even though %f must equal %i at this point
+ %g = inttoptr i64 %f to i32*
+ store i32 10, i32* %g
+ %z = call i1 @test25a()
+ ret i1 %z
+; CHECK: @test25
+; CHECK-NOT: phi i32
+; CHECK: ret i1 %z
+}
+
+declare i1 @test26a()
+
+define i1 @test26(i32 %n) {
+entry:
+ %a = alloca i32
+ %i = ptrtoint i32* %a to i64
+ %b = call i1 @test26a()
+ br label %one
+
+one:
+ %x = phi i64 [%y, %two], [%w, %three], [%i, %entry]
+ %c = call i1 @test26a()
+ switch i32 %n, label %end [
+ i32 2, label %two
+ i32 3, label %three
+ ]
+
+two:
+ %y = phi i64 [%x, %one], [%w, %three]
+ %d = call i1 @test26a()
+ switch i32 %n, label %end [
+ i32 10, label %one
+ i32 30, label %three
+ ]
+
+three:
+ %w = phi i64 [%y, %two], [%x, %one]
+ %e = call i1 @test26a()
+ br i1 %e, label %one, label %two
+
+end:
+ %f = phi i64 [ %x, %one], [%y, %two]
+ ; Change the %f to %i, and the optimizer suddenly becomes a lot smarter
+ ; even though %f must equal %i at this point
+ %g = inttoptr i64 %f to i32*
+ store i32 10, i32* %g
+ %z = call i1 @test26a()
+ ret i1 %z
+; CHECK: @test26
+; CHECK-NOT: phi i32
+; CHECK: ret i1 %z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/pr2645-0.ll b/src/LLVM/test/Transforms/InstCombine/pr2645-0.ll
new file mode 100644
index 0000000..9bcaa43
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/pr2645-0.ll
@@ -0,0 +1,33 @@
+; RUN: opt < %s -instcombine -S | grep {insertelement <4 x float> undef}
+
+; Instcombine should be able to prove that none of the
+; insertelement's first operand's elements are needed.
+
+define internal void @""(i8*) {
+; <label>:1
+ bitcast i8* %0 to i32* ; <i32*>:2 [#uses=1]
+ load i32* %2, align 1 ; <i32>:3 [#uses=1]
+ getelementptr i8* %0, i32 4 ; <i8*>:4 [#uses=1]
+ bitcast i8* %4 to i32* ; <i32*>:5 [#uses=1]
+ load i32* %5, align 1 ; <i32>:6 [#uses=1]
+ br label %7
+
+; <label>:7 ; preds = %9, %1
+ %.01 = phi <4 x float> [ undef, %1 ], [ %12, %9 ] ; <<4 x float>> [#uses=1]
+ %.0 = phi i32 [ %3, %1 ], [ %15, %9 ] ; <i32> [#uses=3]
+ icmp slt i32 %.0, %6 ; <i1>:8 [#uses=1]
+ br i1 %8, label %9, label %16
+
+; <label>:9 ; preds = %7
+ sitofp i32 %.0 to float ; <float>:10 [#uses=1]
+ insertelement <4 x float> %.01, float %10, i32 0 ; <<4 x float>>:11 [#uses=1]
+ shufflevector <4 x float> %11, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:12 [#uses=2]
+ getelementptr i8* %0, i32 48 ; <i8*>:13 [#uses=1]
+ bitcast i8* %13 to <4 x float>* ; <<4 x float>*>:14 [#uses=1]
+ store <4 x float> %12, <4 x float>* %14, align 16
+ add i32 %.0, 2 ; <i32>:15 [#uses=1]
+ br label %7
+
+; <label>:16 ; preds = %7
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/pr2645-1.ll b/src/LLVM/test/Transforms/InstCombine/pr2645-1.ll
new file mode 100644
index 0000000..d320daf
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/pr2645-1.ll
@@ -0,0 +1,39 @@
+; RUN: opt < %s -instcombine -S | grep shufflevector
+; PR2645
+
+; instcombine shouldn't delete the shufflevector.
+
+define internal void @""(i8*, i32, i8*) {
+; <label>:3
+ br label %4
+
+; <label>:4 ; preds = %6, %3
+ %.0 = phi i32 [ 0, %3 ], [ %19, %6 ] ; <i32> [#uses=4]
+ %5 = icmp slt i32 %.0, %1 ; <i1> [#uses=1]
+ br i1 %5, label %6, label %20
+
+; <label>:6 ; preds = %4
+ %7 = getelementptr i8* %2, i32 %.0 ; <i8*> [#uses=1]
+ %8 = bitcast i8* %7 to <4 x i16>* ; <<4 x i16>*> [#uses=1]
+ %9 = load <4 x i16>* %8, align 1 ; <<4 x i16>> [#uses=1]
+ %10 = bitcast <4 x i16> %9 to <1 x i64> ; <<1 x i64>> [#uses=1]
+ %11 = call <2 x i64> @foo(<1 x i64> %10)
+; <<2 x i64>> [#uses=1]
+ %12 = bitcast <2 x i64> %11 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %13 = bitcast <4 x i32> %12 to <8 x i16> ; <<8 x i16>> [#uses=2]
+ %14 = shufflevector <8 x i16> %13, <8 x i16> %13, <8 x i32> < i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3 > ; <<8 x i16>> [#uses=1]
+ %15 = bitcast <8 x i16> %14 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %16 = sitofp <4 x i32> %15 to <4 x float> ; <<4 x float>> [#uses=1]
+ %17 = getelementptr i8* %0, i32 %.0 ; <i8*> [#uses=1]
+ %18 = bitcast i8* %17 to <4 x float>* ; <<4 x float>*> [#uses=1]
+ store <4 x float> %16, <4 x float>* %18, align 1
+ %19 = add i32 %.0, 1 ; <i32> [#uses=1]
+ br label %4
+
+; <label>:20 ; preds = %4
+ call void @llvm.x86.mmx.emms( )
+ ret void
+}
+
+declare <2 x i64> @foo(<1 x i64>)
+declare void @llvm.x86.mmx.emms( )
diff --git a/src/LLVM/test/Transforms/InstCombine/pr2996.ll b/src/LLVM/test/Transforms/InstCombine/pr2996.ll
new file mode 100644
index 0000000..ff3245d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/pr2996.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine
+; PR2996
+
+define void @func_53(i16 signext %p_56) nounwind {
+entry:
+ %0 = icmp sgt i16 %p_56, -1 ; <i1> [#uses=1]
+ %iftmp.0.0 = select i1 %0, i32 -1, i32 0 ; <i32> [#uses=1]
+ %1 = call i32 (...)* @func_4(i32 %iftmp.0.0) nounwind ; <i32> [#uses=0]
+ ret void
+}
+
+declare i32 @func_4(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/pr8547.ll b/src/LLVM/test/Transforms/InstCombine/pr8547.ll
new file mode 100644
index 0000000..485f4d9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/pr8547.ll
@@ -0,0 +1,26 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; Converting the 2 shifts to SHL 6 without the AND is wrong. PR 8547.
+
+@g_2 = global i32 0, align 4
+@.str = constant [10 x i8] c"g_2 = %d\0A\00"
+
+declare i32 @printf(i8*, ...)
+
+define i32 @main() nounwind {
+codeRepl:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond, %codeRepl
+ %storemerge = phi i32 [ 0, %codeRepl ], [ 5, %for.cond ]
+ store i32 %storemerge, i32* @g_2, align 4
+ %shl = shl i32 %storemerge, 30
+ %conv2 = lshr i32 %shl, 24
+; CHECK: %0 = shl nuw nsw i32 %storemerge, 6
+; CHECK: %conv2 = and i32 %0, 64
+ %tobool = icmp eq i32 %conv2, 0
+ br i1 %tobool, label %for.cond, label %codeRepl2
+
+codeRepl2: ; preds = %for.cond
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i64 0, i64 0), i32 %conv2) nounwind
+ ret i32 0
+}
\ No newline at end of file
diff --git a/src/LLVM/test/Transforms/InstCombine/preserve-sminmax.ll b/src/LLVM/test/Transforms/InstCombine/preserve-sminmax.ll
new file mode 100644
index 0000000..00232cc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/preserve-sminmax.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Instcombine normally would fold the sdiv into the comparison,
+; making "icmp slt i32 %h, 2", but in this case the sdiv has
+; another use, so it wouldn't a big win, and it would also
+; obfuscate an otherise obvious smax pattern to the point where
+; other analyses wouldn't recognize it.
+
+define i32 @foo(i32 %h) {
+ %sd = sdiv i32 %h, 2
+ %t = icmp slt i32 %sd, 1
+ %r = select i1 %t, i32 %sd, i32 1
+ ret i32 %r
+}
+
+; CHECK: %sd = sdiv i32 %h, 2
+; CHECK: %t = icmp slt i32 %sd, 1
+; CHECK: %r = select i1 %t, i32 %sd, i32 1
+; CHECK: ret i32 %r
+
+define i32 @bar(i32 %h) {
+ %sd = sdiv i32 %h, 2
+ %t = icmp sgt i32 %sd, 1
+ %r = select i1 %t, i32 %sd, i32 1
+ ret i32 %r
+}
+
+; CHECK: %sd = sdiv i32 %h, 2
+; CHECK: %t = icmp sgt i32 %sd, 1
+; CHECK: %r = select i1 %t, i32 %sd, i32 1
+; CHECK: ret i32 %r
+
diff --git a/src/LLVM/test/Transforms/InstCombine/ptr-int-cast.ll b/src/LLVM/test/Transforms/InstCombine/ptr-int-cast.ll
new file mode 100644
index 0000000..9524d44
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/ptr-int-cast.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
+
+define i1 @test1(i32 *%x) nounwind {
+entry:
+; CHECK: test1
+; CHECK: ptrtoint i32* %x to i64
+ %0 = ptrtoint i32* %x to i1
+ ret i1 %0
+}
+
+define i32* @test2(i128 %x) nounwind {
+entry:
+; CHECK: test2
+; CHECK: inttoptr i64 %0 to i32*
+ %0 = inttoptr i128 %x to i32*
+ ret i32* %0
+}
+
+; PR3574
+; CHECK: f0
+; CHECK: %1 = zext i32 %a0 to i64
+; CHECK: ret i64 %1
+define i64 @f0(i32 %a0) nounwind {
+ %t0 = inttoptr i32 %a0 to i8*
+ %t1 = ptrtoint i8* %t0 to i64
+ ret i64 %t1
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/rem.ll b/src/LLVM/test/Transforms/InstCombine/rem.ll
new file mode 100644
index 0000000..7a1b3bd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/rem.ll
@@ -0,0 +1,88 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | not grep rem
+; END.
+
+define i32 @test1(i32 %A) {
+ %B = srem i32 %A, 1 ; ISA constant 0
+ ret i32 %B
+}
+
+define i32 @test2(i32 %A) { ; 0 % X = 0, we don't need to preserve traps
+ %B = srem i32 0, %A
+ ret i32 %B
+}
+
+define i32 @test3(i32 %A) {
+ %B = urem i32 %A, 8
+ ret i32 %B
+}
+
+define i1 @test3a(i32 %A) {
+ %B = srem i32 %A, -8
+ %C = icmp ne i32 %B, 0
+ ret i1 %C
+}
+
+define i32 @test4(i32 %X, i1 %C) {
+ %V = select i1 %C, i32 1, i32 8
+ %R = urem i32 %X, %V
+ ret i32 %R
+}
+
+define i32 @test5(i32 %X, i8 %B) {
+ %shift.upgrd.1 = zext i8 %B to i32
+ %Amt = shl i32 32, %shift.upgrd.1
+ %V = urem i32 %X, %Amt
+ ret i32 %V
+}
+
+define i32 @test6(i32 %A) {
+ %B = srem i32 %A, 0 ;; undef
+ ret i32 %B
+}
+
+define i32 @test7(i32 %A) {
+ %B = mul i32 %A, 8
+ %C = srem i32 %B, 4
+ ret i32 %C
+}
+
+define i32 @test8(i32 %A) {
+ %B = shl i32 %A, 4
+ %C = srem i32 %B, 8
+ ret i32 %C
+}
+
+define i32 @test9(i32 %A) {
+ %B = mul i32 %A, 64
+ %C = urem i32 %B, 32
+ ret i32 %C
+}
+
+define i32 @test10(i8 %c) {
+ %tmp.1 = zext i8 %c to i32
+ %tmp.2 = mul i32 %tmp.1, 4
+ %tmp.3 = sext i32 %tmp.2 to i64
+ %tmp.5 = urem i64 %tmp.3, 4
+ %tmp.6 = trunc i64 %tmp.5 to i32
+ ret i32 %tmp.6
+}
+
+define i32 @test11(i32 %i) {
+ %tmp.1 = and i32 %i, -2
+ %tmp.3 = mul i32 %tmp.1, 2
+ %tmp.5 = urem i32 %tmp.3, 4
+ ret i32 %tmp.5
+}
+
+define i32 @test12(i32 %i) {
+ %tmp.1 = and i32 %i, -4
+ %tmp.5 = srem i32 %tmp.1, 2
+ ret i32 %tmp.5
+}
+
+define i32 @test13(i32 %i) {
+ %x = srem i32 %i, %i
+ ret i32 %x
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/sdiv-1.ll b/src/LLVM/test/Transforms/InstCombine/sdiv-1.ll
new file mode 100644
index 0000000..c46b5ea
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sdiv-1.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -inline -S | not grep '-715827882'
+; PR3142
+
+define i32 @a(i32 %X) nounwind readnone {
+entry:
+ %0 = sub i32 0, %X
+ %1 = sdiv i32 %0, -3
+ ret i32 %1
+}
+
+define i32 @b(i32 %X) nounwind readnone {
+entry:
+ %0 = call i32 @a(i32 -2147483648)
+ ret i32 %0
+}
+
+define i32 @c(i32 %X) nounwind readnone {
+entry:
+ %0 = sub i32 0, -2147483648
+ %1 = sdiv i32 %0, -3
+ ret i32 %1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/sdiv-2.ll b/src/LLVM/test/Transforms/InstCombine/sdiv-2.ll
new file mode 100644
index 0000000..0e4c008
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sdiv-2.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -disable-output
+; PR3144
+
+define fastcc i32 @func(i32 %length) nounwind {
+entry:
+ %0 = icmp ne i32 %length, -1 ; <i1> [#uses=1]
+ %iftmp.13.0 = select i1 %0, i128 0, i128 200000000 ; <i128> [#uses=2]
+ %1 = sdiv i128 %iftmp.13.0, 10 ; <i128> [#uses=1]
+ br label %bb5
+
+bb5: ; preds = %bb8, %entry
+ %v.0 = phi i128 [ 0, %entry ], [ %6, %bb8 ] ; <i128> [#uses=2]
+ %2 = icmp sgt i128 %v.0, %1 ; <i1> [#uses=1]
+ br i1 %2, label %overflow, label %bb7
+
+bb7: ; preds = %bb5
+ %3 = mul i128 %v.0, 10 ; <i128> [#uses=2]
+ %4 = sub i128 %iftmp.13.0, 0 ; <i128> [#uses=1]
+ %5 = icmp slt i128 %4, %3 ; <i1> [#uses=1]
+ br i1 %5, label %overflow, label %bb8
+
+bb8: ; preds = %bb7
+ %6 = add i128 0, %3 ; <i128> [#uses=1]
+ br label %bb5
+
+overflow: ; preds = %bb7, %bb5
+ ret i32 1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/sdiv-shift.ll b/src/LLVM/test/Transforms/InstCombine/sdiv-shift.ll
new file mode 100644
index 0000000..f4d2b36
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sdiv-shift.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep div
+
+define i32 @a(i16 zeroext %x, i32 %y) nounwind {
+entry:
+ %conv = zext i16 %x to i32
+ %s = shl i32 2, %y
+ %d = sdiv i32 %conv, %s
+ ret i32 %d
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/select-2.ll b/src/LLVM/test/Transforms/InstCombine/select-2.ll
new file mode 100644
index 0000000..a76addc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/select-2.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | grep select | count 2
+
+; Make sure instcombine don't fold select into operands. We don't want to emit
+; select of two integers unless it's selecting 0 / 1.
+
+define i32 @t1(i32 %c, i32 %x) nounwind {
+ %t1 = icmp eq i32 %c, 0
+ %t2 = lshr i32 %x, 18
+ %t3 = select i1 %t1, i32 %t2, i32 %x
+ ret i32 %t3
+}
+
+define i32 @t2(i32 %c, i32 %x) nounwind {
+ %t1 = icmp eq i32 %c, 0
+ %t2 = and i32 %x, 18
+ %t3 = select i1 %t1, i32 %t2, i32 %x
+ ret i32 %t3
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/select-crash.ll b/src/LLVM/test/Transforms/InstCombine/select-crash.ll
new file mode 100644
index 0000000..18af152
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/select-crash.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+; Formerly crashed, PR8490.
+
+define fastcc double @gimp_operation_color_balance_map(float %value, double %highlights) nounwind readnone inlinehint {
+entry:
+; CHECK: gimp_operation_color_balance_map
+; CHECK: fsub double -0.000000
+ %conv = fpext float %value to double
+ %div = fdiv double %conv, 1.600000e+01
+ %add = fadd double %div, 1.000000e+00
+ %div1 = fdiv double 1.000000e+00, %add
+ %sub = fsub double 1.075000e+00, %div1
+ %sub24 = fsub double 1.000000e+00, %sub
+ %add26 = fadd double %sub, 1.000000e+00
+ %cmp86 = fcmp ogt double %highlights, 0.000000e+00
+ %cond90 = select i1 %cmp86, double %sub24, double %add26
+ %mul91 = fmul double %highlights, %cond90
+ %add94 = fadd double undef, %mul91
+ ret double %add94
+}
+
+; PR10180: same crash, but with vectors
+define <4 x float> @foo(i1 %b, <4 x float> %x, <4 x float> %y, <4 x float> %z) {
+; CHECK: @foo
+; CHECK: fsub <4 x float>
+; CHECK: select
+; CHECK: fadd <4 x float>
+ %a = fadd <4 x float> %x, %y
+ %sub = fsub <4 x float> %x, %z
+ %sel = select i1 %b, <4 x float> %a, <4 x float> %sub
+ ret <4 x float> %sel
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/select-load-call.ll b/src/LLVM/test/Transforms/InstCombine/select-load-call.ll
new file mode 100644
index 0000000..bef0cf8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/select-load-call.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | grep {ret i32 1}
+
+declare void @test2()
+
+define i32 @test(i1 %cond, i32 *%P) {
+ %A = alloca i32
+ store i32 1, i32* %P
+ store i32 1, i32* %A
+
+ call void @test2() readonly
+
+ %P2 = select i1 %cond, i32 *%P, i32* %A
+ %V = load i32* %P2
+ ret i32 %V
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/select.ll b/src/LLVM/test/Transforms/InstCombine/select.ll
new file mode 100644
index 0000000..11f9b5b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/select.ll
@@ -0,0 +1,811 @@
+; This test makes sure that these instructions are properly eliminated.
+; PR1822
+
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %A, i32 %B) {
+ %C = select i1 false, i32 %A, i32 %B
+ ret i32 %C
+; CHECK: @test1
+; CHECK: ret i32 %B
+}
+
+define i32 @test2(i32 %A, i32 %B) {
+ %C = select i1 true, i32 %A, i32 %B
+ ret i32 %C
+; CHECK: @test2
+; CHECK: ret i32 %A
+}
+
+
+define i32 @test3(i1 %C, i32 %I) {
+ ; V = I
+ %V = select i1 %C, i32 %I, i32 %I
+ ret i32 %V
+; CHECK: @test3
+; CHECK: ret i32 %I
+}
+
+define i1 @test4(i1 %C) {
+ ; V = C
+ %V = select i1 %C, i1 true, i1 false
+ ret i1 %V
+; CHECK: @test4
+; CHECK: ret i1 %C
+}
+
+define i1 @test5(i1 %C) {
+ ; V = !C
+ %V = select i1 %C, i1 false, i1 true
+ ret i1 %V
+; CHECK: @test5
+; CHECK: xor i1 %C, true
+; CHECK: ret i1
+}
+
+define i32 @test6(i1 %C) {
+ ; V = cast C to int
+ %V = select i1 %C, i32 1, i32 0
+ ret i32 %V
+; CHECK: @test6
+; CHECK: %V = zext i1 %C to i32
+; CHECK: ret i32 %V
+}
+
+define i1 @test7(i1 %C, i1 %X) {
+ ; R = or C, X
+ %R = select i1 %C, i1 true, i1 %X
+ ret i1 %R
+; CHECK: @test7
+; CHECK: %R = or i1 %C, %X
+; CHECK: ret i1 %R
+}
+
+define i1 @test8(i1 %C, i1 %X) {
+ ; R = and C, X
+ %R = select i1 %C, i1 %X, i1 false
+ ret i1 %R
+; CHECK: @test8
+; CHECK: %R = and i1 %C, %X
+; CHECK: ret i1 %R
+}
+
+define i1 @test9(i1 %C, i1 %X) {
+ ; R = and !C, X
+ %R = select i1 %C, i1 false, i1 %X
+ ret i1 %R
+; CHECK: @test9
+; CHECK: xor i1 %C, true
+; CHECK: %R = and i1
+; CHECK: ret i1 %R
+}
+
+define i1 @test10(i1 %C, i1 %X) {
+ ; R = or !C, X
+ %R = select i1 %C, i1 %X, i1 true
+ ret i1 %R
+; CHECK: @test10
+; CHECK: xor i1 %C, true
+; CHECK: %R = or i1
+; CHECK: ret i1 %R
+}
+
+define i32 @test11(i32 %a) {
+ %C = icmp eq i32 %a, 0
+ %R = select i1 %C, i32 0, i32 1
+ ret i32 %R
+; CHECK: @test11
+; CHECK: icmp ne i32 %a, 0
+; CHECK: %R = zext i1
+; CHECK: ret i32 %R
+}
+
+define i32 @test12(i1 %cond, i32 %a) {
+ %b = or i32 %a, 1
+ %c = select i1 %cond, i32 %b, i32 %a
+ ret i32 %c
+; CHECK: @test12
+; CHECK: %b = zext i1 %cond to i32
+; CHECK: %c = or i32 %b, %a
+; CHECK: ret i32 %c
+}
+
+define i32 @test12a(i1 %cond, i32 %a) {
+ %b = ashr i32 %a, 1
+ %c = select i1 %cond, i32 %b, i32 %a
+ ret i32 %c
+; CHECK: @test12a
+; CHECK: %b = zext i1 %cond to i32
+; CHECK: %c = ashr i32 %a, %b
+; CHECK: ret i32 %c
+}
+
+define i32 @test12b(i1 %cond, i32 %a) {
+ %b = ashr i32 %a, 1
+ %c = select i1 %cond, i32 %a, i32 %b
+ ret i32 %c
+; CHECK: @test12b
+; CHECK: zext i1 %cond to i32
+; CHECK: %b = xor i32
+; CHECK: %c = ashr i32 %a, %b
+; CHECK: ret i32 %c
+}
+
+define i32 @test13(i32 %a, i32 %b) {
+ %C = icmp eq i32 %a, %b
+ %V = select i1 %C, i32 %a, i32 %b
+ ret i32 %V
+; CHECK: @test13
+; CHECK: ret i32 %b
+}
+
+define i32 @test13a(i32 %a, i32 %b) {
+ %C = icmp ne i32 %a, %b
+ %V = select i1 %C, i32 %a, i32 %b
+ ret i32 %V
+; CHECK: @test13a
+; CHECK: ret i32 %a
+}
+
+define i32 @test13b(i32 %a, i32 %b) {
+ %C = icmp eq i32 %a, %b
+ %V = select i1 %C, i32 %b, i32 %a
+ ret i32 %V
+; CHECK: @test13b
+; CHECK: ret i32 %a
+}
+
+define i1 @test14a(i1 %C, i32 %X) {
+ %V = select i1 %C, i32 %X, i32 0
+ ; (X < 1) | !C
+ %R = icmp slt i32 %V, 1
+ ret i1 %R
+; CHECK: @test14a
+; CHECK: icmp slt i32 %X, 1
+; CHECK: xor i1 %C, true
+; CHECK: or i1
+; CHECK: ret i1 %R
+}
+
+define i1 @test14b(i1 %C, i32 %X) {
+ %V = select i1 %C, i32 0, i32 %X
+ ; (X < 1) | C
+ %R = icmp slt i32 %V, 1
+ ret i1 %R
+; CHECK: @test14b
+; CHECK: icmp slt i32 %X, 1
+; CHECK: or i1
+; CHECK: ret i1 %R
+}
+
+;; Code sequence for (X & 16) ? 16 : 0
+define i32 @test15a(i32 %X) {
+ %t1 = and i32 %X, 16
+ %t2 = icmp eq i32 %t1, 0
+ %t3 = select i1 %t2, i32 0, i32 16
+ ret i32 %t3
+; CHECK: @test15a
+; CHECK: %t1 = and i32 %X, 16
+; CHECK: ret i32 %t1
+}
+
+;; Code sequence for (X & 32) ? 0 : 24
+define i32 @test15b(i32 %X) {
+ %t1 = and i32 %X, 32
+ %t2 = icmp eq i32 %t1, 0
+ %t3 = select i1 %t2, i32 32, i32 0
+ ret i32 %t3
+; CHECK: @test15b
+; CHECK: %t1 = and i32 %X, 32
+; CHECK: xor i32 %t1, 32
+; CHECK: ret i32
+}
+
+;; Alternate code sequence for (X & 16) ? 16 : 0
+define i32 @test15c(i32 %X) {
+ %t1 = and i32 %X, 16
+ %t2 = icmp eq i32 %t1, 16
+ %t3 = select i1 %t2, i32 16, i32 0
+ ret i32 %t3
+; CHECK: @test15c
+; CHECK: %t1 = and i32 %X, 16
+; CHECK: ret i32 %t1
+}
+
+;; Alternate code sequence for (X & 16) ? 16 : 0
+define i32 @test15d(i32 %X) {
+ %t1 = and i32 %X, 16
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 16, i32 0
+ ret i32 %t3
+; CHECK: @test15d
+; CHECK: %t1 = and i32 %X, 16
+; CHECK: ret i32 %t1
+}
+
+;; (a & 128) ? 256 : 0
+define i32 @test15e(i32 %X) {
+ %t1 = and i32 %X, 128
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 256, i32 0
+ ret i32 %t3
+; CHECK: @test15e
+; CHECK: %t1 = shl i32 %X, 1
+; CHECK: and i32 %t1, 256
+; CHECK: ret i32
+}
+
+;; (a & 128) ? 0 : 256
+define i32 @test15f(i32 %X) {
+ %t1 = and i32 %X, 128
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 0, i32 256
+ ret i32 %t3
+; CHECK: @test15f
+; CHECK: %t1 = shl i32 %X, 1
+; CHECK: and i32 %t1, 256
+; CHECK: xor i32 %{{.*}}, 256
+; CHECK: ret i32
+}
+
+;; (a & 8) ? -1 : -9
+define i32 @test15g(i32 %X) {
+ %t1 = and i32 %X, 8
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 -1, i32 -9
+ ret i32 %t3
+; CHECK: @test15g
+; CHECK-NEXT: %1 = or i32 %X, -9
+; CHECK-NEXT: ret i32 %1
+}
+
+;; (a & 8) ? -9 : -1
+define i32 @test15h(i32 %X) {
+ %t1 = and i32 %X, 8
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 -9, i32 -1
+ ret i32 %t3
+; CHECK: @test15h
+; CHECK-NEXT: %1 = or i32 %X, -9
+; CHECK-NEXT: %2 = xor i32 %1, 8
+; CHECK-NEXT: ret i32 %2
+}
+
+;; (a & 2) ? 577 : 1089
+define i32 @test15i(i32 %X) {
+ %t1 = and i32 %X, 2
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 577, i32 1089
+ ret i32 %t3
+; CHECK: @test15i
+; CHECK-NEXT: %t1 = shl i32 %X, 8
+; CHECK-NEXT: %1 = and i32 %t1, 512
+; CHECK-NEXT: %2 = xor i32 %1, 512
+; CHECK-NEXT: %3 = add i32 %2, 577
+; CHECK-NEXT: ret i32 %3
+}
+
+;; (a & 2) ? 1089 : 577
+define i32 @test15j(i32 %X) {
+ %t1 = and i32 %X, 2
+ %t2 = icmp ne i32 %t1, 0
+ %t3 = select i1 %t2, i32 1089, i32 577
+ ret i32 %t3
+; CHECK: @test15j
+; CHECK-NEXT: %t1 = shl i32 %X, 8
+; CHECK-NEXT: %1 = and i32 %t1, 512
+; CHECK-NEXT: %2 = add i32 %1, 577
+; CHECK-NEXT: ret i32 %2
+}
+
+define i32 @test16(i1 %C, i32* %P) {
+ %P2 = select i1 %C, i32* %P, i32* null
+ %V = load i32* %P2
+ ret i32 %V
+; CHECK: @test16
+; CHECK-NEXT: %V = load i32* %P
+; CHECK: ret i32 %V
+}
+
+define i1 @test17(i32* %X, i1 %C) {
+ %R = select i1 %C, i32* %X, i32* null
+ %RV = icmp eq i32* %R, null
+ ret i1 %RV
+; CHECK: @test17
+; CHECK: icmp eq i32* %X, null
+; CHECK: xor i1 %C, true
+; CHECK: %RV = or i1
+; CHECK: ret i1 %RV
+}
+
+define i32 @test18(i32 %X, i32 %Y, i1 %C) {
+ %R = select i1 %C, i32 %X, i32 0
+ %V = sdiv i32 %Y, %R
+ ret i32 %V
+; CHECK: @test18
+; CHECK: %V = sdiv i32 %Y, %X
+; CHECK: ret i32 %V
+}
+
+define i32 @test19(i32 %x) {
+ %tmp = icmp ugt i32 %x, 2147483647
+ %retval = select i1 %tmp, i32 -1, i32 0
+ ret i32 %retval
+; CHECK: @test19
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test20(i32 %x) {
+ %tmp = icmp slt i32 %x, 0
+ %retval = select i1 %tmp, i32 -1, i32 0
+ ret i32 %retval
+; CHECK: @test20
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: ret i32
+}
+
+define i64 @test21(i32 %x) {
+ %tmp = icmp slt i32 %x, 0
+ %retval = select i1 %tmp, i64 -1, i64 0
+ ret i64 %retval
+; CHECK: @test21
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: sext i32
+; CHECK-NEXT: ret i64
+}
+
+define i16 @test22(i32 %x) {
+ %tmp = icmp slt i32 %x, 0
+ %retval = select i1 %tmp, i16 -1, i16 0
+ ret i16 %retval
+; CHECK: @test22
+; CHECK-NEXT: ashr i32 %x, 31
+; CHECK-NEXT: trunc i32
+; CHECK-NEXT: ret i16
+}
+
+define i1 @test23(i1 %a, i1 %b) {
+ %c = select i1 %a, i1 %b, i1 %a
+ ret i1 %c
+; CHECK: @test23
+; CHECK-NEXT: %c = and i1 %a, %b
+; CHECK-NEXT: ret i1 %c
+}
+
+define i1 @test24(i1 %a, i1 %b) {
+ %c = select i1 %a, i1 %a, i1 %b
+ ret i1 %c
+; CHECK: @test24
+; CHECK-NEXT: %c = or i1 %a, %b
+; CHECK-NEXT: ret i1 %c
+}
+
+define i32 @test25(i1 %c) {
+entry:
+ br i1 %c, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %a = phi i1 [true, %jump], [false, %entry]
+ %b = select i1 %a, i32 10, i32 20
+ ret i32 %b
+; CHECK: @test25
+; CHECK: %a = phi i32 [ 10, %jump ], [ 20, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test26(i1 %cond) {
+entry:
+ br i1 %cond, label %jump, label %ret
+jump:
+ %c = or i1 false, false
+ br label %ret
+ret:
+ %a = phi i1 [true, %jump], [%c, %entry]
+ %b = select i1 %a, i32 10, i32 20
+ ret i32 %b
+; CHECK: @test26
+; CHECK: %a = phi i32 [ 10, %jump ], [ 20, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test27(i1 %c, i32 %A, i32 %B) {
+entry:
+ br i1 %c, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %a = phi i1 [true, %jump], [false, %entry]
+ %b = select i1 %a, i32 %A, i32 %B
+ ret i32 %b
+; CHECK: @test27
+; CHECK: %a = phi i32 [ %A, %jump ], [ %B, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test28(i1 %cond, i32 %A, i32 %B) {
+entry:
+ br i1 %cond, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %c = phi i32 [%A, %jump], [%B, %entry]
+ %a = phi i1 [true, %jump], [false, %entry]
+ %b = select i1 %a, i32 %A, i32 %c
+ ret i32 %b
+; CHECK: @test28
+; CHECK: %a = phi i32 [ %A, %jump ], [ %B, %entry ]
+; CHECK-NEXT: ret i32 %a
+}
+
+define i32 @test29(i1 %cond, i32 %A, i32 %B) {
+entry:
+ br i1 %cond, label %jump, label %ret
+jump:
+ br label %ret
+ret:
+ %c = phi i32 [%A, %jump], [%B, %entry]
+ %a = phi i1 [true, %jump], [false, %entry]
+ br label %next
+
+next:
+ %b = select i1 %a, i32 %A, i32 %c
+ ret i32 %b
+; CHECK: @test29
+; CHECK: %a = phi i32 [ %A, %jump ], [ %B, %entry ]
+; CHECK: ret i32 %a
+}
+
+
+; SMAX(SMAX(x, y), x) -> SMAX(x, y)
+define i32 @test30(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %x, i32 %y
+
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %cond, i32 %x
+ ret i32 %retval
+; CHECK: @test30
+; CHECK: ret i32 %cond
+}
+
+; UMAX(UMAX(x, y), x) -> UMAX(x, y)
+define i32 @test31(i32 %x, i32 %y) {
+ %cmp = icmp ugt i32 %x, %y
+ %cond = select i1 %cmp, i32 %x, i32 %y
+ %cmp5 = icmp ugt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %cond, i32 %x
+ ret i32 %retval
+; CHECK: @test31
+; CHECK: ret i32 %cond
+}
+
+; SMIN(SMIN(x, y), x) -> SMIN(x, y)
+define i32 @test32(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %y, i32 %x
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %x, i32 %cond
+ ret i32 %retval
+; CHECK: @test32
+; CHECK: ret i32 %cond
+}
+
+; MAX(MIN(x, y), x) -> x
+define i32 @test33(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %y, i32 %x
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %cond, i32 %x
+ ret i32 %retval
+; CHECK: @test33
+; CHECK: ret i32 %x
+}
+
+; MIN(MAX(x, y), x) -> x
+define i32 @test34(i32 %x, i32 %y) {
+ %cmp = icmp sgt i32 %x, %y
+ %cond = select i1 %cmp, i32 %x, i32 %y
+ %cmp5 = icmp sgt i32 %cond, %x
+ %retval = select i1 %cmp5, i32 %x, i32 %cond
+ ret i32 %retval
+; CHECK: @test34
+; CHECK: ret i32 %x
+}
+
+define i32 @test35(i32 %x) {
+ %cmp = icmp sge i32 %x, 0
+ %cond = select i1 %cmp, i32 60, i32 100
+ ret i32 %cond
+; CHECK: @test35
+; CHECK: ashr i32 %x, 31
+; CHECK: and i32 {{.*}}, 40
+; CHECK: add i32 {{.*}}, 60
+; CHECK: ret
+}
+
+define i32 @test36(i32 %x) {
+ %cmp = icmp slt i32 %x, 0
+ %cond = select i1 %cmp, i32 60, i32 100
+ ret i32 %cond
+; CHECK: @test36
+; CHECK: ashr i32 %x, 31
+; CHECK: and i32 {{.*}}, -40
+; CHECK: add i32 {{.*}}, 100
+; CHECK: ret
+}
+
+define i32 @test37(i32 %x) {
+ %cmp = icmp sgt i32 %x, -1
+ %cond = select i1 %cmp, i32 1, i32 -1
+ ret i32 %cond
+; CHECK: @test37
+; CHECK: ashr i32 %x, 31
+; CHECK: or i32 {{.*}}, 1
+; CHECK: ret
+}
+
+define i1 @test38(i1 %cond) {
+ %zero = alloca i32
+ %one = alloca i32
+ %ptr = select i1 %cond, i32* %zero, i32* %one
+ %isnull = icmp eq i32* %ptr, null
+ ret i1 %isnull
+; CHECK: @test38
+; CHECK: ret i1 false
+}
+
+define i1 @test39(i1 %cond, double %x) {
+ %s = select i1 %cond, double %x, double 0x7FF0000000000000 ; RHS = +infty
+ %cmp = fcmp ule double %x, %s
+ ret i1 %cmp
+; CHECK: @test39
+; CHECK: ret i1 true
+}
+
+define i1 @test40(i1 %cond) {
+ %a = alloca i32
+ %b = alloca i32
+ %c = alloca i32
+ %s = select i1 %cond, i32* %a, i32* %b
+ %r = icmp eq i32* %s, %c
+ ret i1 %r
+; CHECK: @test40
+; CHECK: ret i1 false
+}
+
+define i32 @test41(i1 %cond, i32 %x, i32 %y) {
+ %z = and i32 %x, %y
+ %s = select i1 %cond, i32 %y, i32 %z
+ %r = and i32 %x, %s
+ ret i32 %r
+; CHECK: @test41
+; CHECK-NEXT: and i32 %x, %y
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test42(i32 %x, i32 %y) {
+ %b = add i32 %y, -1
+ %cond = icmp eq i32 %x, 0
+ %c = select i1 %cond, i32 %b, i32 %y
+ ret i32 %c
+; CHECK: @test42
+; CHECK-NEXT: %cond = icmp eq i32 %x, 0
+; CHECK-NEXT: %b = sext i1 %cond to i32
+; CHECK-NEXT: %c = add i32 %b, %y
+; CHECK-NEXT: ret i32 %c
+}
+
+define i64 @test43(i32 %a) nounwind {
+ %a_ext = sext i32 %a to i64
+ %is_a_nonnegative = icmp sgt i32 %a, -1
+ %max = select i1 %is_a_nonnegative, i64 %a_ext, i64 0
+ ret i64 %max
+; CHECK: @test43
+; CHECK-NEXT: %a_ext = sext i32 %a to i64
+; CHECK-NEXT: %is_a_nonnegative = icmp slt i64 %a_ext, 0
+; CHECK-NEXT: %max = select i1 %is_a_nonnegative, i64 0, i64 %a_ext
+; CHECK-NEXT: ret i64 %max
+}
+
+define i64 @test44(i32 %a) nounwind {
+ %a_ext = sext i32 %a to i64
+ %is_a_nonpositive = icmp slt i32 %a, 1
+ %min = select i1 %is_a_nonpositive, i64 %a_ext, i64 0
+ ret i64 %min
+; CHECK: @test44
+; CHECK-NEXT: %a_ext = sext i32 %a to i64
+; CHECK-NEXT: %is_a_nonpositive = icmp sgt i64 %a_ext, 0
+; CHECK-NEXT: %min = select i1 %is_a_nonpositive, i64 0, i64 %a_ext
+; CHECK-NEXT: ret i64 %min
+}
+define i64 @test45(i32 %a) nounwind {
+ %a_ext = zext i32 %a to i64
+ %is_a_nonnegative = icmp ugt i32 %a, 2
+ %max = select i1 %is_a_nonnegative, i64 %a_ext, i64 3
+ ret i64 %max
+; CHECK: @test45
+; CHECK-NEXT: %a_ext = zext i32 %a to i64
+; CHECK-NEXT: %is_a_nonnegative = icmp ult i64 %a_ext, 3
+; CHECK-NEXT: %max = select i1 %is_a_nonnegative, i64 3, i64 %a_ext
+; CHECK-NEXT: ret i64 %max
+}
+
+define i64 @test46(i32 %a) nounwind {
+ %a_ext = zext i32 %a to i64
+ %is_a_nonpositive = icmp ult i32 %a, 3
+ %min = select i1 %is_a_nonpositive, i64 %a_ext, i64 2
+ ret i64 %min
+; CHECK: @test46
+; CHECK-NEXT: %a_ext = zext i32 %a to i64
+; CHECK-NEXT: %is_a_nonpositive = icmp ugt i64 %a_ext, 2
+; CHECK-NEXT: %min = select i1 %is_a_nonpositive, i64 2, i64 %a_ext
+; CHECK-NEXT: ret i64 %min
+}
+define i64 @test47(i32 %a) nounwind {
+ %a_ext = sext i32 %a to i64
+ %is_a_nonnegative = icmp ugt i32 %a, 2
+ %max = select i1 %is_a_nonnegative, i64 %a_ext, i64 3
+ ret i64 %max
+; CHECK: @test47
+; CHECK-NEXT: %a_ext = sext i32 %a to i64
+; CHECK-NEXT: %is_a_nonnegative = icmp ult i64 %a_ext, 3
+; CHECK-NEXT: %max = select i1 %is_a_nonnegative, i64 3, i64 %a_ext
+; CHECK-NEXT: ret i64 %max
+}
+
+define i64 @test48(i32 %a) nounwind {
+ %a_ext = sext i32 %a to i64
+ %is_a_nonpositive = icmp ult i32 %a, 3
+ %min = select i1 %is_a_nonpositive, i64 %a_ext, i64 2
+ ret i64 %min
+; CHECK: @test48
+; CHECK-NEXT: %a_ext = sext i32 %a to i64
+; CHECK-NEXT: %is_a_nonpositive = icmp ugt i64 %a_ext, 2
+; CHECK-NEXT: %min = select i1 %is_a_nonpositive, i64 2, i64 %a_ext
+; CHECK-NEXT: ret i64 %min
+}
+
+define i64 @test49(i32 %a) nounwind {
+ %a_ext = sext i32 %a to i64
+ %is_a_nonpositive = icmp ult i32 %a, 3
+ %min = select i1 %is_a_nonpositive, i64 2, i64 %a_ext
+ ret i64 %min
+; CHECK: @test49
+; CHECK-NEXT: %a_ext = sext i32 %a to i64
+; CHECK-NEXT: %is_a_nonpositive = icmp ugt i64 %a_ext, 2
+; CHECK-NEXT: %min = select i1 %is_a_nonpositive, i64 %a_ext, i64 2
+; CHECK-NEXT: ret i64 %min
+}
+define i64 @test50(i32 %a) nounwind {
+ %is_a_nonpositive = icmp ult i32 %a, 3
+ %a_ext = sext i32 %a to i64
+ %min = select i1 %is_a_nonpositive, i64 2, i64 %a_ext
+ ret i64 %min
+; CHECK: @test50
+; CHECK-NEXT: %a_ext = sext i32 %a to i64
+; CHECK-NEXT: %is_a_nonpositive = icmp ugt i64 %a_ext, 2
+; CHECK-NEXT: %min = select i1 %is_a_nonpositive, i64 %a_ext, i64 2
+; CHECK-NEXT: ret i64 %min
+}
+
+; PR8994
+
+; This select instruction can't be eliminated because trying to do so would
+; change the number of vector elements. This used to assert.
+define i48 @test51(<3 x i1> %icmp, <3 x i16> %tmp) {
+; CHECK: @test51
+ %select = select <3 x i1> %icmp, <3 x i16> zeroinitializer, <3 x i16> %tmp
+; CHECK: select <3 x i1>
+ %tmp2 = bitcast <3 x i16> %select to i48
+ ret i48 %tmp2
+}
+
+; PR8575
+
+define i32 @test52(i32 %n, i32 %m) nounwind {
+; CHECK: @test52
+ %cmp = icmp sgt i32 %n, %m
+ %. = select i1 %cmp, i32 1, i32 3
+ %add = add nsw i32 %., 3
+ %storemerge = select i1 %cmp, i32 %., i32 %add
+; CHECK: select i1 %cmp, i32 1, i32 6
+ ret i32 %storemerge
+}
+
+; PR9454
+define i32 @test53(i32 %x) nounwind {
+ %and = and i32 %x, 2
+ %cmp = icmp eq i32 %and, %x
+ %sel = select i1 %cmp, i32 2, i32 1
+ ret i32 %sel
+; CHECK: @test53
+; CHECK: select i1 %cmp
+; CHECK: ret
+}
+
+define i32 @test54(i32 %X, i32 %Y) {
+ %A = ashr exact i32 %X, %Y
+ %B = icmp eq i32 %A, 0
+ %C = select i1 %B, i32 %A, i32 1
+ ret i32 %C
+; CHECK: @test54
+; CHECK-NOT: ashr
+; CHECK-NOT: select
+; CHECK: icmp ne i32 %X, 0
+; CHECK: zext
+; CHECK: ret
+}
+
+define i1 @test55(i1 %X, i32 %Y, i32 %Z) {
+ %A = ashr exact i32 %Y, %Z
+ %B = select i1 %X, i32 %Y, i32 %A
+ %C = icmp eq i32 %B, 0
+ ret i1 %C
+; CHECK: @test55
+; CHECK-NOT: ashr
+; CHECK-NOT: select
+; CHECK: icmp eq
+; CHECK: ret i1
+}
+
+define i32 @test56(i16 %x) nounwind {
+ %tobool = icmp eq i16 %x, 0
+ %conv = zext i16 %x to i32
+ %cond = select i1 %tobool, i32 0, i32 %conv
+ ret i32 %cond
+; CHECK: @test56
+; CHECK-NEXT: zext
+; CHECK-NEXT: ret
+}
+
+define i32 @test57(i32 %x, i32 %y) nounwind {
+ %and = and i32 %x, %y
+ %tobool = icmp eq i32 %x, 0
+ %.and = select i1 %tobool, i32 0, i32 %and
+ ret i32 %.and
+; CHECK: @test57
+; CHECK-NEXT: and i32 %x, %y
+; CHECK-NEXT: ret
+}
+
+define i32 @test58(i16 %x) nounwind {
+ %tobool = icmp ne i16 %x, 1
+ %conv = zext i16 %x to i32
+ %cond = select i1 %tobool, i32 %conv, i32 1
+ ret i32 %cond
+; CHECK: @test58
+; CHECK-NEXT: zext
+; CHECK-NEXT: ret
+}
+
+define i32 @test59(i32 %x, i32 %y) nounwind {
+ %and = and i32 %x, %y
+ %tobool = icmp ne i32 %x, %y
+ %.and = select i1 %tobool, i32 %and, i32 %y
+ ret i32 %.and
+; CHECK: @test59
+; CHECK-NEXT: and i32 %x, %y
+; CHECK-NEXT: ret
+}
+
+define i1 @test60(i32 %x, i1* %y) nounwind {
+ %cmp = icmp eq i32 %x, 0
+ %load = load i1* %y, align 1
+ %cmp1 = icmp slt i32 %x, 1
+ %sel = select i1 %cmp, i1 %load, i1 %cmp1
+ ret i1 %sel
+; CHECK: @test60
+; CHECK: select
+}
+
+@glbl = constant i32 10
+define i32 @test61(i32* %ptr) {
+ %A = load i32* %ptr
+ %B = icmp eq i32* %ptr, @glbl
+ %C = select i1 %B, i32 %A, i32 10
+ ret i32 %C
+; CHECK: @test61
+; CHECK: ret i32 10
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/set.ll b/src/LLVM/test/Transforms/InstCombine/set.ll
new file mode 100644
index 0000000..4cad0769f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/set.ll
@@ -0,0 +1,171 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | not grep icmp
+; END.
+
+@X = external global i32 ; <i32*> [#uses=2]
+
+define i1 @test1(i32 %A) {
+ %B = icmp eq i32 %A, %A ; <i1> [#uses=1]
+ ; Never true
+ %C = icmp eq i32* @X, null ; <i1> [#uses=1]
+ %D = and i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test2(i32 %A) {
+ %B = icmp ne i32 %A, %A ; <i1> [#uses=1]
+ ; Never false
+ %C = icmp ne i32* @X, null ; <i1> [#uses=1]
+ %D = or i1 %B, %C ; <i1> [#uses=1]
+ ret i1 %D
+}
+
+define i1 @test3(i32 %A) {
+ %B = icmp slt i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+
+define i1 @test4(i32 %A) {
+ %B = icmp sgt i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test5(i32 %A) {
+ %B = icmp sle i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test6(i32 %A) {
+ %B = icmp sge i32 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test7(i32 %A) {
+ ; true
+ %B = icmp uge i32 %A, 0 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test8(i32 %A) {
+ ; false
+ %B = icmp ult i32 %A, 0 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+;; test operations on boolean values these should all be eliminated$a
+define i1 @test9(i1 %A) {
+ ; false
+ %B = icmp ult i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test10(i1 %A) {
+ ; false
+ %B = icmp ugt i1 %A, true ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test11(i1 %A) {
+ ; true
+ %B = icmp ule i1 %A, true ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test12(i1 %A) {
+ ; true
+ %B = icmp uge i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test13(i1 %A, i1 %B) {
+ ; A | ~B
+ %C = icmp uge i1 %A, %B ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test14(i1 %A, i1 %B) {
+ ; ~(A ^ B)
+ %C = icmp eq i1 %A, %B ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test16(i32 %A) {
+ %B = and i32 %A, 5 ; <i32> [#uses=1]
+ ; Is never true
+ %C = icmp eq i32 %B, 8 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test17(i8 %A) {
+ %B = or i8 %A, 1 ; <i8> [#uses=1]
+ ; Always false
+ %C = icmp eq i8 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test18(i1 %C, i32 %a) {
+entry:
+ br i1 %C, label %endif, label %else
+
+else: ; preds = %entry
+ br label %endif
+
+endif: ; preds = %else, %entry
+ %b.0 = phi i32 [ 0, %entry ], [ 1, %else ] ; <i32> [#uses=1]
+ %tmp.4 = icmp slt i32 %b.0, 123 ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i1 @test19(i1 %A, i1 %B) {
+ %a = zext i1 %A to i32 ; <i32> [#uses=1]
+ %b = zext i1 %B to i32 ; <i32> [#uses=1]
+ %C = icmp eq i32 %a, %b ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i32 @test20(i32 %A) {
+ %B = and i32 %A, 1 ; <i32> [#uses=1]
+ %C = icmp ne i32 %B, 0 ; <i1> [#uses=1]
+ %D = zext i1 %C to i32 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test21(i32 %a) {
+ %tmp.6 = and i32 %a, 4 ; <i32> [#uses=1]
+ %not.tmp.7 = icmp ne i32 %tmp.6, 0 ; <i1> [#uses=1]
+ %retval = zext i1 %not.tmp.7 to i32 ; <i32> [#uses=1]
+ ret i32 %retval
+}
+
+define i1 @test22(i32 %A, i32 %X) {
+ %B = and i32 %A, 100663295 ; <i32> [#uses=1]
+ %C = icmp ult i32 %B, 268435456 ; <i1> [#uses=1]
+ %Y = and i32 %X, 7 ; <i32> [#uses=1]
+ %Z = icmp sgt i32 %Y, -1 ; <i1> [#uses=1]
+ %R = or i1 %C, %Z ; <i1> [#uses=1]
+ ret i1 %R
+}
+
+define i32 @test23(i32 %a) {
+ %tmp.1 = and i32 %a, 1 ; <i32> [#uses=1]
+ %tmp.2 = icmp eq i32 %tmp.1, 0 ; <i1> [#uses=1]
+ %tmp.3 = zext i1 %tmp.2 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
+define i32 @test24(i32 %a) {
+ %tmp1 = and i32 %a, 4 ; <i32> [#uses=1]
+ %tmp.1 = lshr i32 %tmp1, 2 ; <i32> [#uses=1]
+ %tmp.2 = icmp eq i32 %tmp.1, 0 ; <i1> [#uses=1]
+ %tmp.3 = zext i1 %tmp.2 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+}
+
+define i1 @test25(i32 %A) {
+ %B = and i32 %A, 2 ; <i32> [#uses=1]
+ %C = icmp ugt i32 %B, 2 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/setcc-strength-reduce.ll b/src/LLVM/test/Transforms/InstCombine/setcc-strength-reduce.ll
new file mode 100644
index 0000000..3acc708
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/setcc-strength-reduce.ll
@@ -0,0 +1,37 @@
+; This test ensures that "strength reduction" of conditional expressions are
+; working. Basically this boils down to converting setlt,gt,le,ge instructions
+; into equivalent setne,eq instructions.
+;
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep -v {icmp eq} | grep -v {icmp ne} | not grep icmp
+; END.
+
+define i1 @test1(i32 %A) {
+ ; setne %A, 0
+ %B = icmp uge i32 %A, 1 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test2(i32 %A) {
+ ; setne %A, 0
+ %B = icmp ugt i32 %A, 0 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test3(i8 %A) {
+ ; setne %A, -128
+ %B = icmp sge i8 %A, -127 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test4(i8 %A) {
+ ; setne %A, 127
+ %B = icmp sle i8 %A, 126 ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i1 @test5(i8 %A) {
+ ; setne %A, 127
+ %B = icmp slt i8 %A, 127 ; <i1> [#uses=1]
+ ret i1 %B
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/sext.ll b/src/LLVM/test/Transforms/InstCombine/sext.ll
new file mode 100644
index 0000000..f49a2ef
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sext.ll
@@ -0,0 +1,186 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+declare i32 @llvm.ctpop.i32(i32)
+declare i32 @llvm.ctlz.i32(i32)
+declare i32 @llvm.cttz.i32(i32)
+
+define i64 @test1(i32 %x) {
+ %t = call i32 @llvm.ctpop.i32(i32 %x)
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test1
+; CHECK: zext i32 %t
+}
+
+define i64 @test2(i32 %x) {
+ %t = call i32 @llvm.ctlz.i32(i32 %x)
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test2
+; CHECK: zext i32 %t
+}
+
+define i64 @test3(i32 %x) {
+ %t = call i32 @llvm.cttz.i32(i32 %x)
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test3
+; CHECK: zext i32 %t
+}
+
+define i64 @test4(i32 %x) {
+ %t = udiv i32 %x, 3
+ %s = sext i32 %t to i64
+ ret i64 %s
+
+; CHECK: @test4
+; CHECK: zext i32 %t
+}
+
+define i64 @test5(i32 %x) {
+ %t = urem i32 %x, 30000
+ %s = sext i32 %t to i64
+ ret i64 %s
+; CHECK: @test5
+; CHECK: zext i32 %t
+}
+
+define i64 @test6(i32 %x) {
+ %u = lshr i32 %x, 3
+ %t = mul i32 %u, 3
+ %s = sext i32 %t to i64
+ ret i64 %s
+; CHECK: @test6
+; CHECK: zext i32 %t
+}
+
+define i64 @test7(i32 %x) {
+ %t = and i32 %x, 511
+ %u = sub i32 20000, %t
+ %s = sext i32 %u to i64
+ ret i64 %s
+; CHECK: @test7
+; CHECK: zext i32 %u to i64
+}
+
+define i32 @test8(i8 %a, i32 %f, i1 %p, i32* %z) {
+ %d = lshr i32 %f, 24
+ %e = select i1 %p, i32 %d, i32 0
+ %s = trunc i32 %e to i16
+ %n = sext i16 %s to i32
+ ret i32 %n
+; CHECK: @test8
+; CHECK: %d = lshr i32 %f, 24
+; CHECK: %n = select i1 %p, i32 %d, i32 0
+; CHECK: ret i32 %n
+}
+
+; rdar://6013816
+define i16 @test9(i16 %t, i1 %cond) nounwind {
+entry:
+ br i1 %cond, label %T, label %F
+T:
+ %t2 = sext i16 %t to i32
+ br label %F
+
+F:
+ %V = phi i32 [%t2, %T], [42, %entry]
+ %W = trunc i32 %V to i16
+ ret i16 %W
+; CHECK: @test9
+; CHECK: T:
+; CHECK-NEXT: br label %F
+; CHECK: F:
+; CHECK-NEXT: phi i16
+; CHECK-NEXT: ret i16
+}
+
+; PR2638
+define i32 @test10(i32 %i) nounwind {
+entry:
+ %tmp12 = trunc i32 %i to i8
+ %tmp16 = shl i8 %tmp12, 6
+ %a = ashr i8 %tmp16, 6
+ %b = sext i8 %a to i32
+ ret i32 %b
+; CHECK: @test10
+; CHECK: shl i32 %i, 30
+; CHECK-NEXT: ashr exact i32
+; CHECK-NEXT: ret i32
+}
+
+define void @test11(<2 x i16> %srcA, <2 x i16> %srcB, <2 x i16>* %dst) {
+ %cmp = icmp eq <2 x i16> %srcB, %srcA
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %tmask = ashr <2 x i16> %sext, <i16 15, i16 15>
+ store <2 x i16> %tmask, <2 x i16>* %dst
+ ret void
+; CHECK: @test11
+; CHECK-NEXT: icmp eq
+; CHECK-NEXT: sext <2 x i1>
+; CHECK-NEXT: store <2 x i16>
+; CHECK-NEXT: ret
+}
+
+define i64 @test12(i32 %x) nounwind {
+ %shr = lshr i32 %x, 1
+ %sub = sub nsw i32 0, %shr
+ %conv = sext i32 %sub to i64
+ ret i64 %conv
+; CHECK: @test12
+; CHECK: sext
+; CHECK: ret
+}
+
+define i32 @test13(i32 %x) nounwind {
+ %and = and i32 %x, 8
+ %cmp = icmp eq i32 %and, 0
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+; CHECK: @test13
+; CHECK-NEXT: %and = lshr i32 %x, 3
+; CHECK-NEXT: %1 = and i32 %and, 1
+; CHECK-NEXT: %sext = add i32 %1, -1
+; CHECK-NEXT: ret i32 %sext
+}
+
+define i32 @test14(i16 %x) nounwind {
+ %and = and i16 %x, 16
+ %cmp = icmp ne i16 %and, 16
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+; CHECK: @test14
+; CHECK-NEXT: %and = lshr i16 %x, 4
+; CHECK-NEXT: %1 = and i16 %and, 1
+; CHECK-NEXT: %sext = add i16 %1, -1
+; CHECK-NEXT: %ext = sext i16 %sext to i32
+; CHECK-NEXT: ret i32 %ext
+}
+
+define i32 @test15(i32 %x) nounwind {
+ %and = and i32 %x, 16
+ %cmp = icmp ne i32 %and, 0
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+; CHECK: @test15
+; CHECK-NEXT: %1 = shl i32 %x, 27
+; CHECK-NEXT: %sext = ashr i32 %1, 31
+; CHECK-NEXT: ret i32 %sext
+}
+
+define i32 @test16(i16 %x) nounwind {
+ %and = and i16 %x, 8
+ %cmp = icmp eq i16 %and, 8
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+; CHECK: @test16
+; CHECK-NEXT: %1 = shl i16 %x, 12
+; CHECK-NEXT: %sext = ashr i16 %1, 15
+; CHECK-NEXT: %ext = sext i16 %sext to i32
+; CHECK-NEXT: ret i32 %ext
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/shift-sra.ll b/src/LLVM/test/Transforms/InstCombine/shift-sra.ll
new file mode 100644
index 0000000..f17b83f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/shift-sra.ll
@@ -0,0 +1,78 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+
+define i32 @test1(i32 %X, i8 %A) {
+ %shift.upgrd.1 = zext i8 %A to i32 ; <i32> [#uses=1]
+ ; can be logical shift.
+ %Y = ashr i32 %X, %shift.upgrd.1 ; <i32> [#uses=1]
+ %Z = and i32 %Y, 1 ; <i32> [#uses=1]
+ ret i32 %Z
+; CHECK: @test1
+; CHECK: lshr i32 %X, %shift.upgrd.1
+}
+
+define i32 @test2(i8 %tmp) {
+ %tmp3 = zext i8 %tmp to i32 ; <i32> [#uses=1]
+ %tmp4 = add i32 %tmp3, 7 ; <i32> [#uses=1]
+ %tmp5 = ashr i32 %tmp4, 3 ; <i32> [#uses=1]
+ ret i32 %tmp5
+; CHECK: @test2
+; CHECK: lshr i32 %tmp4, 3
+}
+
+define i64 @test3(i1 %X, i64 %Y, i1 %Cond) {
+ br i1 %Cond, label %T, label %F
+T:
+ %X2 = sext i1 %X to i64
+ br label %C
+F:
+ %Y2 = ashr i64 %Y, 63
+ br label %C
+C:
+ %P = phi i64 [%X2, %T], [%Y2, %F]
+ %S = ashr i64 %P, 12
+ ret i64 %S
+
+; CHECK: @test3
+; CHECK: %P = phi i64
+; CHECK-NEXT: ret i64 %P
+}
+
+define i64 @test4(i1 %X, i64 %Y, i1 %Cond) {
+ br i1 %Cond, label %T, label %F
+T:
+ %X2 = sext i1 %X to i64
+ br label %C
+F:
+ %Y2 = ashr i64 %Y, 63
+ br label %C
+C:
+ %P = phi i64 [%X2, %T], [%Y2, %F]
+ %R = shl i64 %P, 12
+ %S = ashr i64 %R, 12
+ ret i64 %S
+
+; CHECK: @test4
+; CHECK: %P = phi i64
+; CHECK-NEXT: ret i64 %P
+}
+
+; rdar://7732987
+define i32 @test5(i32 %Y) {
+ br i1 undef, label %A, label %C
+A:
+ br i1 undef, label %B, label %D
+B:
+ br label %D
+C:
+ br i1 undef, label %D, label %E
+D:
+ %P = phi i32 [0, %A], [0, %B], [%Y, %C]
+ %S = ashr i32 %P, 16
+ ret i32 %S
+; CHECK: @test5
+; CHECK: %P = phi i32
+; CHECK-NEXT: ashr i32 %P, 16
+E:
+ ret i32 0
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/shift.ll b/src/LLVM/test/Transforms/InstCombine/shift.ll
new file mode 100644
index 0000000..70fb4c8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/shift.ll
@@ -0,0 +1,544 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %A) {
+; CHECK: @test1
+; CHECK: ret i32 %A
+ %B = shl i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test2(i8 %A) {
+; CHECK: @test2
+; CHECK: ret i32 0
+ %shift.upgrd.1 = zext i8 %A to i32 ; <i32> [#uses=1]
+ %B = shl i32 0, %shift.upgrd.1 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test3(i32 %A) {
+; CHECK: @test3
+; CHECK: ret i32 %A
+ %B = ashr i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test4(i8 %A) {
+; CHECK: @test4
+; CHECK: ret i32 0
+ %shift.upgrd.2 = zext i8 %A to i32 ; <i32> [#uses=1]
+ %B = ashr i32 0, %shift.upgrd.2 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+
+define i32 @test5(i32 %A) {
+; CHECK: @test5
+; CHECK: ret i32 undef
+ %B = lshr i32 %A, 32 ;; shift all bits out
+ ret i32 %B
+}
+
+define i32 @test5a(i32 %A) {
+; CHECK: @test5a
+; CHECK: ret i32 undef
+ %B = shl i32 %A, 32 ;; shift all bits out
+ ret i32 %B
+}
+
+define i32 @test5b() {
+; CHECK: @test5b
+; CHECK: ret i32 -1
+ %B = ashr i32 undef, 2 ;; top two bits must be equal, so not undef
+ ret i32 %B
+}
+
+define i32 @test5b2(i32 %A) {
+; CHECK: @test5b2
+; CHECK: ret i32 -1
+ %B = ashr i32 undef, %A ;; top %A bits must be equal, so not undef
+ ret i32 %B
+}
+
+define i32 @test6(i32 %A) {
+; CHECK: @test6
+; CHECK-NEXT: mul i32 %A, 6
+; CHECK-NEXT: ret i32
+ %B = shl i32 %A, 1 ;; convert to an mul instruction
+ %C = mul i32 %B, 3
+ ret i32 %C
+}
+
+define i32 @test7(i8 %A) {
+; CHECK: @test7
+; CHECK-NEXT: ret i32 -1
+ %shift.upgrd.3 = zext i8 %A to i32
+ %B = ashr i32 -1, %shift.upgrd.3 ;; Always equal to -1
+ ret i32 %B
+}
+
+;; (A << 5) << 3 === A << 8 == 0
+define i8 @test8(i8 %A) {
+; CHECK: @test8
+; CHECK: ret i8 0
+ %B = shl i8 %A, 5 ; <i8> [#uses=1]
+ %C = shl i8 %B, 3 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A << 7) >> 7 === A & 1
+define i8 @test9(i8 %A) {
+; CHECK: @test9
+; CHECK-NEXT: and i8 %A, 1
+; CHECK-NEXT: ret i8
+ %B = shl i8 %A, 7 ; <i8> [#uses=1]
+ %C = lshr i8 %B, 7 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A >> 7) << 7 === A & 128
+define i8 @test10(i8 %A) {
+; CHECK: @test10
+; CHECK-NEXT: and i8 %A, -128
+; CHECK-NEXT: ret i8
+ %B = lshr i8 %A, 7 ; <i8> [#uses=1]
+ %C = shl i8 %B, 7 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A >> 3) << 4 === (A & 0x1F) << 1
+define i8 @test11(i8 %A) {
+; CHECK: @test11
+; CHECK-NEXT: mul i8 %A, 6
+; CHECK-NEXT: and i8
+; CHECK-NEXT: ret i8
+ %a = mul i8 %A, 3 ; <i8> [#uses=1]
+ %B = lshr i8 %a, 3 ; <i8> [#uses=1]
+ %C = shl i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; (A >> 8) << 8 === A & -256
+define i32 @test12(i32 %A) {
+; CHECK: @test12
+; CHECK-NEXT: and i32 %A, -256
+; CHECK-NEXT: ret i32
+ %B = ashr i32 %A, 8 ; <i32> [#uses=1]
+ %C = shl i32 %B, 8 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+;; (A >> 3) << 4 === (A & -8) * 2
+define i8 @test13(i8 %A) {
+; CHECK: @test13
+; CHECK-NEXT: mul i8 %A, 6
+; CHECK-NEXT: and i8
+; CHECK-NEXT: ret i8
+ %a = mul i8 %A, 3 ; <i8> [#uses=1]
+ %B = ashr i8 %a, 3 ; <i8> [#uses=1]
+ %C = shl i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+;; D = ((B | 1234) << 4) === ((B << 4)|(1234 << 4)
+define i32 @test14(i32 %A) {
+; CHECK: @test14
+; CHECK-NEXT: %B = and i32 %A, -19760
+; CHECK-NEXT: or i32 %B, 19744
+; CHECK-NEXT: ret i32
+ %B = lshr i32 %A, 4 ; <i32> [#uses=1]
+ %C = or i32 %B, 1234 ; <i32> [#uses=1]
+ %D = shl i32 %C, 4 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+;; D = ((B | 1234) << 4) === ((B << 4)|(1234 << 4)
+define i32 @test14a(i32 %A) {
+; CHECK: @test14a
+; CHECK-NEXT: and i32 %A, 77
+; CHECK-NEXT: ret i32
+ %B = shl i32 %A, 4 ; <i32> [#uses=1]
+ %C = and i32 %B, 1234 ; <i32> [#uses=1]
+ %D = lshr i32 %C, 4 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test15(i1 %C) {
+; CHECK: @test15
+; CHECK-NEXT: select i1 %C, i32 12, i32 4
+; CHECK-NEXT: ret i32
+ %A = select i1 %C, i32 3, i32 1 ; <i32> [#uses=1]
+ %V = shl i32 %A, 2 ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define i32 @test15a(i1 %C) {
+; CHECK: @test15a
+; CHECK-NEXT: select i1 %C, i32 512, i32 128
+; CHECK-NEXT: ret i32
+ %A = select i1 %C, i8 3, i8 1 ; <i8> [#uses=1]
+ %shift.upgrd.4 = zext i8 %A to i32 ; <i32> [#uses=1]
+ %V = shl i32 64, %shift.upgrd.4 ; <i32> [#uses=1]
+ ret i32 %V
+}
+
+define i1 @test16(i32 %X) {
+; CHECK: @test16
+; CHECK-NEXT: and i32 %X, 16
+; CHECK-NEXT: icmp ne i32
+; CHECK-NEXT: ret i1
+ %tmp.3 = ashr i32 %X, 4
+ %tmp.6 = and i32 %tmp.3, 1
+ %tmp.7 = icmp ne i32 %tmp.6, 0
+ ret i1 %tmp.7
+}
+
+define i1 @test17(i32 %A) {
+; CHECK: @test17
+; CHECK-NEXT: and i32 %A, -8
+; CHECK-NEXT: icmp eq i32
+; CHECK-NEXT: ret i1
+ %B = lshr i32 %A, 3 ; <i32> [#uses=1]
+ %C = icmp eq i32 %B, 1234 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+
+define i1 @test18(i8 %A) {
+; CHECK: @test18
+; CHECK: ret i1 false
+
+ %B = lshr i8 %A, 7 ; <i8> [#uses=1]
+ ;; false
+ %C = icmp eq i8 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test19(i32 %A) {
+; CHECK: @test19
+; CHECK-NEXT: icmp ult i32 %A, 4
+; CHECK-NEXT: ret i1
+ %B = ashr i32 %A, 2 ; <i32> [#uses=1]
+ ;; (X & -4) == 0
+ %C = icmp eq i32 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+
+define i1 @test19a(i32 %A) {
+; CHECK: @test19a
+; CHECK-NEXT: and i32 %A, -4
+; CHECK-NEXT: icmp eq i32
+; CHECK-NEXT: ret i1
+ %B = ashr i32 %A, 2 ; <i32> [#uses=1]
+ ;; (X & -4) == -4
+ %C = icmp eq i32 %B, -1 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test20(i8 %A) {
+; CHECK: @test20
+; CHECK: ret i1 false
+ %B = ashr i8 %A, 7 ; <i8> [#uses=1]
+ ;; false
+ %C = icmp eq i8 %B, 123 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test21(i8 %A) {
+; CHECK: @test21
+; CHECK-NEXT: and i8 %A, 15
+; CHECK-NEXT: icmp eq i8
+; CHECK-NEXT: ret i1
+ %B = shl i8 %A, 4 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, -128 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i1 @test22(i8 %A) {
+; CHECK: @test22
+; CHECK-NEXT: and i8 %A, 15
+; CHECK-NEXT: icmp eq i8
+; CHECK-NEXT: ret i1
+ %B = shl i8 %A, 4 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i8 @test23(i32 %A) {
+; CHECK: @test23
+; CHECK-NEXT: trunc i32 %A to i8
+; CHECK-NEXT: ret i8
+
+ ;; casts not needed
+ %B = shl i32 %A, 24 ; <i32> [#uses=1]
+ %C = ashr i32 %B, 24 ; <i32> [#uses=1]
+ %D = trunc i32 %C to i8 ; <i8> [#uses=1]
+ ret i8 %D
+}
+
+define i8 @test24(i8 %X) {
+; CHECK: @test24
+; CHECK-NEXT: and i8 %X, 3
+; CHECK-NEXT: ret i8
+ %Y = and i8 %X, -5 ; <i8> [#uses=1]
+ %Z = shl i8 %Y, 5 ; <i8> [#uses=1]
+ %Q = ashr i8 %Z, 5 ; <i8> [#uses=1]
+ ret i8 %Q
+}
+
+define i32 @test25(i32 %tmp.2, i32 %AA) {
+; CHECK: @test25
+; CHECK-NEXT: and i32 %tmp.2, -131072
+; CHECK-NEXT: add i32 %{{[^,]*}}, %AA
+; CHECK-NEXT: and i32 %{{[^,]*}}, -131072
+; CHECK-NEXT: ret i32
+ %x = lshr i32 %AA, 17 ; <i32> [#uses=1]
+ %tmp.3 = lshr i32 %tmp.2, 17 ; <i32> [#uses=1]
+ %tmp.5 = add i32 %tmp.3, %x ; <i32> [#uses=1]
+ %tmp.6 = shl i32 %tmp.5, 17 ; <i32> [#uses=1]
+ ret i32 %tmp.6
+}
+
+;; handle casts between shifts.
+define i32 @test26(i32 %A) {
+; CHECK: @test26
+; CHECK-NEXT: and i32 %A, -2
+; CHECK-NEXT: ret i32
+ %B = lshr i32 %A, 1 ; <i32> [#uses=1]
+ %C = bitcast i32 %B to i32 ; <i32> [#uses=1]
+ %D = shl i32 %C, 1 ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+
+define i1 @test27(i32 %x) nounwind {
+; CHECK: @test27
+; CHECK-NEXT: and i32 %x, 8
+; CHECK-NEXT: icmp ne i32
+; CHECK-NEXT: ret i1
+ %y = lshr i32 %x, 3
+ %z = trunc i32 %y to i1
+ ret i1 %z
+}
+
+define i8 @test28(i8 %x) {
+entry:
+; CHECK: @test28
+; CHECK: icmp slt i8 %x, 0
+; CHECK-NEXT: br i1
+ %tmp1 = lshr i8 %x, 7
+ %cond1 = icmp ne i8 %tmp1, 0
+ br i1 %cond1, label %bb1, label %bb2
+
+bb1:
+ ret i8 0
+
+bb2:
+ ret i8 1
+}
+
+define i8 @test28a(i8 %x, i8 %y) {
+entry:
+; This shouldn't be transformed.
+; CHECK: @test28a
+; CHECK: %tmp1 = lshr i8 %x, 7
+; CHECK: %cond1 = icmp eq i8 %tmp1, 0
+; CHECK: br i1 %cond1, label %bb2, label %bb1
+ %tmp1 = lshr i8 %x, 7
+ %cond1 = icmp ne i8 %tmp1, 0
+ br i1 %cond1, label %bb1, label %bb2
+bb1:
+ ret i8 %tmp1
+bb2:
+ %tmp2 = add i8 %tmp1, %y
+ ret i8 %tmp2
+}
+
+
+define i32 @test29(i64 %d18) {
+entry:
+ %tmp916 = lshr i64 %d18, 32
+ %tmp917 = trunc i64 %tmp916 to i32
+ %tmp10 = lshr i32 %tmp917, 31
+ ret i32 %tmp10
+; CHECK: @test29
+; CHECK: %tmp916 = lshr i64 %d18, 63
+; CHECK: %tmp10 = trunc i64 %tmp916 to i32
+}
+
+
+define i32 @test30(i32 %A, i32 %B, i32 %C) {
+ %X = shl i32 %A, %C
+ %Y = shl i32 %B, %C
+ %Z = and i32 %X, %Y
+ ret i32 %Z
+; CHECK: @test30
+; CHECK: %X1 = and i32 %A, %B
+; CHECK: %Z = shl i32 %X1, %C
+}
+
+define i32 @test31(i32 %A, i32 %B, i32 %C) {
+ %X = lshr i32 %A, %C
+ %Y = lshr i32 %B, %C
+ %Z = or i32 %X, %Y
+ ret i32 %Z
+; CHECK: @test31
+; CHECK: %X1 = or i32 %A, %B
+; CHECK: %Z = lshr i32 %X1, %C
+}
+
+define i32 @test32(i32 %A, i32 %B, i32 %C) {
+ %X = ashr i32 %A, %C
+ %Y = ashr i32 %B, %C
+ %Z = xor i32 %X, %Y
+ ret i32 %Z
+; CHECK: @test32
+; CHECK: %X1 = xor i32 %A, %B
+; CHECK: %Z = ashr i32 %X1, %C
+; CHECK: ret i32 %Z
+}
+
+define i1 @test33(i32 %X) {
+ %tmp1 = shl i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+; CHECK: @test33
+; CHECK: %tmp1.mask = and i32 %X, 16777216
+; CHECK: %tmp2 = icmp ne i32 %tmp1.mask, 0
+}
+
+define i1 @test34(i32 %X) {
+ %tmp1 = lshr i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+; CHECK: @test34
+; CHECK: ret i1 false
+}
+
+define i1 @test35(i32 %X) {
+ %tmp1 = ashr i32 %X, 7
+ %tmp2 = icmp slt i32 %tmp1, 0
+ ret i1 %tmp2
+; CHECK: @test35
+; CHECK: %tmp2 = icmp slt i32 %X, 0
+; CHECK: ret i1 %tmp2
+}
+
+define i128 @test36(i128 %A, i128 %B) {
+entry:
+ %tmp27 = shl i128 %A, 64
+ %tmp23 = shl i128 %B, 64
+ %ins = or i128 %tmp23, %tmp27
+ %tmp45 = lshr i128 %ins, 64
+ ret i128 %tmp45
+
+; CHECK: @test36
+; CHECK: %tmp231 = or i128 %B, %A
+; CHECK: %ins = and i128 %tmp231, 18446744073709551615
+; CHECK: ret i128 %ins
+}
+
+define i64 @test37(i128 %A, i32 %B) {
+entry:
+ %tmp27 = shl i128 %A, 64
+ %tmp22 = zext i32 %B to i128
+ %tmp23 = shl i128 %tmp22, 96
+ %ins = or i128 %tmp23, %tmp27
+ %tmp45 = lshr i128 %ins, 64
+ %tmp46 = trunc i128 %tmp45 to i64
+ ret i64 %tmp46
+
+; CHECK: @test37
+; CHECK: %tmp23 = shl nuw nsw i128 %tmp22, 32
+; CHECK: %ins = or i128 %tmp23, %A
+; CHECK: %tmp46 = trunc i128 %ins to i64
+}
+
+define i32 @test38(i32 %x) nounwind readnone {
+ %rem = srem i32 %x, 32
+ %shl = shl i32 1, %rem
+ ret i32 %shl
+; CHECK: @test38
+; CHECK-NEXT: and i32 %x, 31
+; CHECK-NEXT: shl i32 1
+; CHECK-NEXT: ret i32
+}
+
+; <rdar://problem/8756731>
+; CHECK: @test39
+define i8 @test39(i32 %a0) {
+entry:
+ %tmp4 = trunc i32 %a0 to i8
+; CHECK: and i8 %tmp49, 64
+ %tmp5 = shl i8 %tmp4, 5
+ %tmp48 = and i8 %tmp5, 32
+ %tmp49 = lshr i8 %tmp48, 5
+ %tmp50 = mul i8 %tmp49, 64
+ %tmp51 = xor i8 %tmp50, %tmp5
+; CHECK: and i8 %0, 16
+ %tmp52 = and i8 %tmp51, -128
+ %tmp53 = lshr i8 %tmp52, 7
+ %tmp54 = mul i8 %tmp53, 16
+ %tmp55 = xor i8 %tmp54, %tmp51
+; CHECK: ret i8 %tmp551
+ ret i8 %tmp55
+}
+
+; PR9809
+define i32 @test40(i32 %a, i32 %b) nounwind {
+ %shl1 = shl i32 1, %b
+ %shl2 = shl i32 %shl1, 2
+ %div = udiv i32 %a, %shl2
+ ret i32 %div
+; CHECK: @test40
+; CHECK-NEXT: add i32 %b, 2
+; CHECK-NEXT: lshr i32 %a
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test41(i32 %a, i32 %b) nounwind {
+ %1 = shl i32 1, %b
+ %2 = shl i32 %1, 3
+ ret i32 %2
+; CHECK: @test41
+; CHECK-NEXT: shl i32 8, %b
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test42(i32 %a, i32 %b) nounwind {
+ %div = lshr i32 4096, %b ; must be exact otherwise we'd divide by zero
+ %div2 = udiv i32 %a, %div
+ ret i32 %div2
+; CHECK: @test42
+; CHECK-NEXT: lshr exact i32 4096, %b
+}
+
+define i32 @test43(i32 %a, i32 %b) nounwind {
+ %div = shl i32 4096, %b ; must be exact otherwise we'd divide by zero
+ %div2 = udiv i32 %a, %div
+ ret i32 %div2
+; CHECK: @test43
+; CHECK-NEXT: add i32 %b, 12
+; CHECK-NEXT: lshr
+; CHECK-NEXT: ret
+}
+
+define i32 @test44(i32 %a) nounwind {
+ %y = shl nuw i32 %a, 1
+ %z = shl i32 %y, 4
+ ret i32 %z
+; CHECK: @test44
+; CHECK-NEXT: %y = shl i32 %a, 5
+; CHECK-NEXT: ret i32 %y
+}
+
+define i32 @test45(i32 %a) nounwind {
+ %y = lshr exact i32 %a, 1
+ %z = lshr i32 %y, 4
+ ret i32 %z
+; CHECK: @test45
+; CHECK-NEXT: %y = lshr i32 %a, 5
+; CHECK-NEXT: ret i32 %y
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/shufflemask-undef.ll b/src/LLVM/test/Transforms/InstCombine/shufflemask-undef.ll
new file mode 100644
index 0000000..cf87aef
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/shufflemask-undef.ll
@@ -0,0 +1,109 @@
+; RUN: opt < %s -instcombine -S | not grep {shufflevector.\*i32 8}
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9"
+ %struct.ActiveTextureTargets = type { i64, i64, i64, i64, i64, i64 }
+ %struct.AlphaTest = type { float, i16, i8, i8 }
+ %struct.ArrayRange = type { i8, i8, i8, i8 }
+ %struct.BlendMode = type { i16, i16, i16, i16, %struct.IColor4, i16, i16, i8, i8, i8, i8 }
+ %struct.ClearColor = type { double, %struct.IColor4, %struct.IColor4, float, i32 }
+ %struct.ClipPlane = type { i32, [6 x %struct.IColor4] }
+ %struct.ColorBuffer = type { i16, i8, i8, [8 x i16], [0 x i32] }
+ %struct.ColorMatrix = type { [16 x float]*, %struct.ImagingColorScale }
+ %struct.Convolution = type { %struct.IColor4, %struct.ImagingColorScale, i16, i16, [0 x i32], float*, i32, i32 }
+ %struct.DepthTest = type { i16, i16, i8, i8, i8, i8, double, double }
+ %struct.FixedFunction = type { %struct.PPStreamToken* }
+ %struct.FogMode = type { %struct.IColor4, float, float, float, float, float, i16, i16, i16, i8, i8 }
+ %struct.HintMode = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
+ %struct.Histogram = type { %struct.ProgramLimits*, i32, i16, i8, i8 }
+ %struct.ImagingColorScale = type { %struct.TCoord2, %struct.TCoord2, %struct.TCoord2, %struct.TCoord2 }
+ %struct.ImagingSubset = type { %struct.Convolution, %struct.Convolution, %struct.Convolution, %struct.ColorMatrix, %struct.Minmax, %struct.Histogram, %struct.ImagingColorScale, %struct.ImagingColorScale, %struct.ImagingColorScale, %struct.ImagingColorScale, i32, [0 x i32] }
+ %struct.Light = type { %struct.IColor4, %struct.IColor4, %struct.IColor4, %struct.IColor4, %struct.PointLineLimits, float, float, float, float, float, %struct.PointLineLimits, float, %struct.PointLineLimits, float, %struct.PointLineLimits, float, float, float, float, float }
+ %struct.LightModel = type { %struct.IColor4, [8 x %struct.Light], [2 x %struct.Material], i32, i16, i16, i16, i8, i8, i8, i8, i8, i8 }
+ %struct.LightProduct = type { %struct.IColor4, %struct.IColor4, %struct.IColor4 }
+ %struct.LineMode = type { float, i32, i16, i16, i8, i8, i8, i8 }
+ %struct.LogicOp = type { i16, i8, i8 }
+ %struct.MaskMode = type { i32, [3 x i32], i8, i8, i8, i8, i8, i8, i8, i8 }
+ %struct.Material = type { %struct.IColor4, %struct.IColor4, %struct.IColor4, %struct.IColor4, float, float, float, float, [8 x %struct.LightProduct], %struct.IColor4, [8 x i32] }
+ %struct.Minmax = type { %struct.MinmaxTable*, i16, i8, i8, [0 x i32] }
+ %struct.MinmaxTable = type { %struct.IColor4, %struct.IColor4 }
+ %struct.Mipmaplevel = type { [4 x i32], [4 x i32], [4 x float], [4 x i32], i32, i32, float*, i8*, i16, i16, i16, i16, [2 x float] }
+ %struct.Multisample = type { float, i8, i8, i8, i8, i8, i8, i8, i8 }
+ %struct.PipelineProgramState = type { i8, i8, i8, i8, [0 x i32], %struct.IColor4* }
+ %struct.PixelMap = type { i32*, float*, float*, float*, float*, float*, float*, float*, float*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+ %struct.PixelMode = type { float, float, %struct.PixelStore, %struct.PixelTransfer, %struct.PixelMap, %struct.ImagingSubset, i32, i32 }
+ %struct.PixelPack = type { i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8 }
+ %struct.PixelStore = type { %struct.PixelPack, %struct.PixelPack }
+ %struct.PixelTransfer = type { float, float, float, float, float, float, float, float, float, float, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float }
+ %struct.PluginBufferData = type { i32 }
+ %struct.PointLineLimits = type { float, float, float }
+ %struct.PointMode = type { float, float, float, float, %struct.PointLineLimits, float, i8, i8, i8, i8, i16, i16, i32, i16, i16 }
+ %struct.PolygonMode = type { [128 x i8], float, float, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8 }
+ %struct.ProgramLimits = type { i32, i32, i32, i32 }
+ %struct.RegisterCombiners = type { i8, i8, i8, i8, i32, [2 x %struct.IColor4], [8 x %struct.RegisterCombinersPerStageState], %struct.RegisterCombinersFinalStageState }
+ %struct.RegisterCombinersFinalStageState = type { i8, i8, i8, i8, [7 x %struct.RegisterCombinersPerVariableState] }
+ %struct.RegisterCombinersPerPortionState = type { [4 x %struct.RegisterCombinersPerVariableState], i8, i8, i8, i8, i16, i16, i16, i16, i16, i16 }
+ %struct.RegisterCombinersPerStageState = type { [2 x %struct.RegisterCombinersPerPortionState], [2 x %struct.IColor4] }
+ %struct.RegisterCombinersPerVariableState = type { i16, i16, i16, i16 }
+ %struct.SWRSurfaceRec = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, [4 x i8*], i32 }
+ %struct.ScissorTest = type { %struct.ProgramLimits, i8, i8, i8, i8 }
+ %struct.State = type <{ i16, i16, i16, i16, i32, i32, [256 x %struct.IColor4], [128 x %struct.IColor4], %struct.Viewport, %struct.Transform, %struct.LightModel, %struct.ActiveTextureTargets, %struct.AlphaTest, %struct.BlendMode, %struct.ClearColor, %struct.ColorBuffer, %struct.DepthTest, %struct.ArrayRange, %struct.FogMode, %struct.HintMode, %struct.LineMode, %struct.LogicOp, %struct.MaskMode, %struct.PixelMode, %struct.PointMode, %struct.PolygonMode, %struct.ScissorTest, i32, %struct.StencilTest, [8 x %struct.TextureMode], [16 x %struct.TextureImageMode], %struct.ArrayRange, [8 x %struct.TextureCoordGen], %struct.ClipPlane, %struct.Multisample, %struct.RegisterCombiners, %struct.ArrayRange, %struct.ArrayRange, [3 x %struct.PipelineProgramState], %struct.ArrayRange, %struct.TransformFeedback, i32*, %struct.FixedFunction, [3 x i32], [3 x i32] }>
+ %struct.StencilTest = type { [3 x { i32, i32, i16, i16, i16, i16 }], i32, [4 x i8] }
+ %struct.TextureCoordGen = type { { i16, i16, %struct.IColor4, %struct.IColor4 }, { i16, i16, %struct.IColor4, %struct.IColor4 }, { i16, i16, %struct.IColor4, %struct.IColor4 }, { i16, i16, %struct.IColor4, %struct.IColor4 }, i8, i8, i8, i8 }
+ %struct.TextureGeomState = type { i16, i16, i16, i16, i16, i8, i8, i8, i8, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, [6 x i16], [6 x i16] }
+ %struct.TextureImageMode = type { float }
+ %struct.TextureLevel = type { i32, i32, i16, i16, i16, i8, i8, i16, i16, i16, i16, i8* }
+ %struct.TextureMode = type { %struct.IColor4, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, float, float, i16, i16, i16, i16, i16, i16, [4 x i16], i8, i8, i8, i8, [3 x float], [4 x float], float, float }
+ %struct.TextureParamState = type { i16, i16, i16, i16, i16, i16, %struct.IColor4, float, float, float, float, i16, i16, i16, i16, float, i16, i8, i8, i32, i8* }
+ %struct.TextureRec = type { [4 x float], %struct.TextureState*, %struct.Mipmaplevel*, %struct.Mipmaplevel*, float, float, float, float, i8, i8, i8, i8, i16, i16, i16, i16, i32, float, [2 x %struct.PPStreamToken] }
+ %struct.TextureState = type { i16, i8, i8, i16, i16, float, i32, %struct.SWRSurfaceRec*, %struct.TextureParamState, %struct.TextureGeomState, [0 x i32], i8*, i32, %struct.TextureLevel, [1 x [15 x %struct.TextureLevel]] }
+ %struct.Transform = type <{ [24 x [16 x float]], [24 x [16 x float]], [16 x float], float, float, float, float, float, i8, i8, i8, i8, i32, i32, i32, i16, i16, i8, i8, i8, i8, i32 }>
+ %struct.TransformFeedback = type { i8, i8, i8, i8, [0 x i32], [16 x i32], [16 x i32] }
+ %struct.Viewport = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, double, double, i32, i32, i32, i32, float, float, float, float }
+ %struct.IColor4 = type { float, float, float, float }
+ %struct.TCoord2 = type { float, float }
+ %struct.VMGPStack = type { [6 x <4 x float>*], <4 x float>*, i32, i32, <4 x float>*, <4 x float>**, i32, i32, i32, i32, i32, i32 }
+ %struct.VMTextures = type { [16 x %struct.TextureRec*] }
+ %struct.PPStreamToken = type { { i16, i16, i32 } }
+ %struct._VMConstants = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, float, float, float, float, float, float, float, float, float, float, float, float, [256 x float], [528 x i8], { void (i8*, i8*, i32, i8*)*, float (float)*, float (float)*, float (float)*, i32 (float)* } }
+
+define i32 @foo(%struct.State* %dst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._VMConstants* %cnstn, %struct.PPStreamToken* %pstrm, %struct.PluginBufferData* %gpctx, %struct.VMTextures* %txtrs, %struct.VMGPStack* %gpstk) nounwind {
+bb266.i:
+ getelementptr <4 x float>* null, i32 11 ; <<4 x float>*>:0 [#uses=1]
+ load <4 x float>* %0, align 16 ; <<4 x float>>:1 [#uses=1]
+ shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 1, i32 1 > ; <<4 x float>>:2 [#uses=1]
+ shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:3 [#uses=1]
+ shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:4 [#uses=1]
+ shufflevector <4 x float> %4, <4 x float> %3, <4 x i32> < i32 6, i32 7, i32 2, i32 3 > ; <<4 x float>>:5 [#uses=1]
+ fmul <4 x float> %5, zeroinitializer ; <<4 x float>>:6 [#uses=2]
+ fmul <4 x float> %6, %6 ; <<4 x float>>:7 [#uses=1]
+ fadd <4 x float> zeroinitializer, %7 ; <<4 x float>>:8 [#uses=1]
+ call <4 x float> @llvm.x86.sse.max.ps( <4 x float> zeroinitializer, <4 x float> %8 ) nounwind readnone ; <<4 x float>>:9 [#uses=1]
+ %phitmp40 = bitcast <4 x float> %9 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp4109.i = and <4 x i32> %phitmp40, < i32 8388607, i32 8388607, i32 8388607, i32 8388607 > ; <<4 x i32>> [#uses=1]
+ %tmp4116.i = or <4 x i32> %tmp4109.i, < i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216 > ; <<4 x i32>> [#uses=1]
+ %tmp4117.i = bitcast <4 x i32> %tmp4116.i to <4 x float> ; <<4 x float>> [#uses=1]
+ fadd <4 x float> %tmp4117.i, zeroinitializer ; <<4 x float>>:10 [#uses=1]
+ fmul <4 x float> %10, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:11 [#uses=1]
+ call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %11, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:12 [#uses=1]
+ call <4 x float> @llvm.x86.sse.min.ps( <4 x float> %12, <4 x float> zeroinitializer ) nounwind readnone ; <<4 x float>>:13 [#uses=1]
+ %tmp4170.i = call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %13, <4 x float> zeroinitializer, i8 2 ) nounwind ; <<4 x float>> [#uses=1]
+ bitcast <4 x float> %tmp4170.i to <16 x i8> ; <<16 x i8>>:14 [#uses=1]
+ call i32 @llvm.x86.sse2.pmovmskb.128( <16 x i8> %14 ) nounwind readnone ; <i32>:15 [#uses=1]
+ icmp eq i32 %15, 0 ; <i1>:16 [#uses=1]
+ br i1 %16, label %bb5574.i, label %bb4521.i
+
+bb4521.i: ; preds = %bb266.i
+ unreachable
+
+bb5574.i: ; preds = %bb266.i
+ unreachable
+}
+
+declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
+
+declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
+
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
+
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
diff --git a/src/LLVM/test/Transforms/InstCombine/shufflevec-constant.ll b/src/LLVM/test/Transforms/InstCombine/shufflevec-constant.ll
new file mode 100644
index 0000000..29ae5a7
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/shufflevec-constant.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {ret <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0x7FF0000000000000, float 0x7FF0000000000000>}
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9"
+
+define <4 x float> @__inff4() nounwind readnone {
+entry:
+ %tmp14 = extractelement <1 x double> bitcast (<2 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000> to <1 x double>), i32 0 ; <double> [#uses=1]
+ %tmp4 = bitcast double %tmp14 to i64 ; <i64> [#uses=1]
+ %tmp3 = bitcast i64 %tmp4 to <2 x float> ; <<2 x float>> [#uses=1]
+ %tmp8 = shufflevector <2 x float> %tmp3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef> ; <<4 x float>> [#uses=1]
+ %tmp9 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp8, <4 x i32> <i32 0, i32 1, i32 4, i32 5> ; <<4 x float>> [#uses=0]
+ ret <4 x float> %tmp9
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/sign-test-and-or.ll b/src/LLVM/test/Transforms/InstCombine/sign-test-and-or.ll
new file mode 100644
index 0000000..47f5f30
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sign-test-and-or.ll
@@ -0,0 +1,79 @@
+; RUN: opt -S -instcombine < %s | FileCheck %s
+
+declare void @foo()
+
+define void @test1(i32 %a, i32 %b) nounwind {
+ %1 = icmp slt i32 %a, 0
+ %2 = icmp slt i32 %b, 0
+ %or.cond = or i1 %1, %2
+ br i1 %or.cond, label %if.then, label %if.end
+
+; CHECK: @test1
+; CHECK-NEXT: %1 = or i32 %a, %b
+; CHECK-NEXT: %2 = icmp slt i32 %1, 0
+; CHECK-NEXT: br
+
+if.then:
+ tail call void @foo() nounwind
+ ret void
+
+if.end:
+ ret void
+}
+
+define void @test2(i32 %a, i32 %b) nounwind {
+ %1 = icmp sgt i32 %a, -1
+ %2 = icmp sgt i32 %b, -1
+ %or.cond = or i1 %1, %2
+ br i1 %or.cond, label %if.then, label %if.end
+
+; CHECK: @test2
+; CHECK-NEXT: %1 = and i32 %a, %b
+; CHECK-NEXT: %2 = icmp sgt i32 %1, -1
+; CHECK-NEXT: br
+
+if.then:
+ tail call void @foo() nounwind
+ ret void
+
+if.end:
+ ret void
+}
+
+define void @test3(i32 %a, i32 %b) nounwind {
+ %1 = icmp slt i32 %a, 0
+ %2 = icmp slt i32 %b, 0
+ %or.cond = and i1 %1, %2
+ br i1 %or.cond, label %if.then, label %if.end
+
+; CHECK: @test3
+; CHECK-NEXT: %1 = and i32 %a, %b
+; CHECK-NEXT: %2 = icmp slt i32 %1, 0
+; CHECK-NEXT: br
+
+if.then:
+ tail call void @foo() nounwind
+ ret void
+
+if.end:
+ ret void
+}
+
+define void @test4(i32 %a, i32 %b) nounwind {
+ %1 = icmp sgt i32 %a, -1
+ %2 = icmp sgt i32 %b, -1
+ %or.cond = and i1 %1, %2
+ br i1 %or.cond, label %if.then, label %if.end
+
+; CHECK: @test4
+; CHECK-NEXT: %1 = or i32 %a, %b
+; CHECK-NEXT: %2 = icmp sgt i32 %1, -1
+; CHECK-NEXT: br
+
+if.then:
+ tail call void @foo() nounwind
+ ret void
+
+if.end:
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/signed-comparison.ll b/src/LLVM/test/Transforms/InstCombine/signed-comparison.ll
new file mode 100644
index 0000000..9a08c64
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/signed-comparison.ll
@@ -0,0 +1,28 @@
+; RUN: opt < %s -instcombine -S > %t
+; RUN: not grep zext %t
+; RUN: not grep slt %t
+; RUN: grep {icmp ult} %t
+
+; Instcombine should convert the zext+slt into a simple ult.
+
+define void @foo(double* %p) nounwind {
+entry:
+ br label %bb
+
+bb:
+ %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %bb ]
+ %t0 = and i64 %indvar, 65535
+ %t1 = getelementptr double* %p, i64 %t0
+ %t2 = load double* %t1, align 8
+ %t3 = fmul double %t2, 2.2
+ store double %t3, double* %t1, align 8
+ %i.04 = trunc i64 %indvar to i16
+ %t4 = add i16 %i.04, 1
+ %t5 = zext i16 %t4 to i32
+ %t6 = icmp slt i32 %t5, 500
+ %indvar.next = add i64 %indvar, 1
+ br i1 %t6, label %bb, label %return
+
+return:
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/signext.ll b/src/LLVM/test/Transforms/InstCombine/signext.ll
new file mode 100644
index 0000000..b841533
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/signext.ll
@@ -0,0 +1,87 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128:n8:16:32:64"
+
+define i32 @test1(i32 %x) {
+ %tmp.1 = and i32 %x, 65535 ; <i32> [#uses=1]
+ %tmp.2 = xor i32 %tmp.1, -32768 ; <i32> [#uses=1]
+ %tmp.3 = add i32 %tmp.2, 32768 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+; CHECK: @test1
+; CHECK: %sext = shl i32 %x, 16
+; CHECK: %tmp.3 = ashr exact i32 %sext, 16
+; CHECK: ret i32 %tmp.3
+}
+
+define i32 @test2(i32 %x) {
+ %tmp.1 = and i32 %x, 65535 ; <i32> [#uses=1]
+ %tmp.2 = xor i32 %tmp.1, 32768 ; <i32> [#uses=1]
+ %tmp.3 = add i32 %tmp.2, -32768 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+; CHECK: @test2
+; CHECK: %sext = shl i32 %x, 16
+; CHECK: %tmp.3 = ashr exact i32 %sext, 16
+; CHECK: ret i32 %tmp.3
+}
+
+define i32 @test3(i16 %P) {
+ %tmp.1 = zext i16 %P to i32 ; <i32> [#uses=1]
+ %tmp.4 = xor i32 %tmp.1, 32768 ; <i32> [#uses=1]
+ %tmp.5 = add i32 %tmp.4, -32768 ; <i32> [#uses=1]
+ ret i32 %tmp.5
+; CHECK: @test3
+; CHECK: %tmp.5 = sext i16 %P to i32
+; CHECK: ret i32 %tmp.5
+}
+
+define i32 @test4(i16 %P) {
+ %tmp.1 = zext i16 %P to i32 ; <i32> [#uses=1]
+ %tmp.4 = xor i32 %tmp.1, 32768 ; <i32> [#uses=1]
+ %tmp.5 = add i32 %tmp.4, -32768 ; <i32> [#uses=1]
+ ret i32 %tmp.5
+; CHECK: @test4
+; CHECK: %tmp.5 = sext i16 %P to i32
+; CHECK: ret i32 %tmp.5
+}
+
+define i32 @test5(i32 %x) {
+ %tmp.1 = and i32 %x, 255 ; <i32> [#uses=1]
+ %tmp.2 = xor i32 %tmp.1, 128 ; <i32> [#uses=1]
+ %tmp.3 = add i32 %tmp.2, -128 ; <i32> [#uses=1]
+ ret i32 %tmp.3
+; CHECK: @test5
+; CHECK: %sext = shl i32 %x, 24
+; CHECK: %tmp.3 = ashr exact i32 %sext, 24
+; CHECK: ret i32 %tmp.3
+}
+
+define i32 @test6(i32 %x) {
+ %tmp.2 = shl i32 %x, 16 ; <i32> [#uses=1]
+ %tmp.4 = ashr i32 %tmp.2, 16 ; <i32> [#uses=1]
+ ret i32 %tmp.4
+; CHECK: @test6
+; CHECK: %tmp.2 = shl i32 %x, 16
+; CHECK: %tmp.4 = ashr exact i32 %tmp.2, 16
+; CHECK: ret i32 %tmp.4
+}
+
+define i32 @test7(i16 %P) {
+ %tmp.1 = zext i16 %P to i32 ; <i32> [#uses=1]
+ %sext1 = shl i32 %tmp.1, 16 ; <i32> [#uses=1]
+ %tmp.5 = ashr i32 %sext1, 16 ; <i32> [#uses=1]
+ ret i32 %tmp.5
+; CHECK: @test7
+; CHECK: %tmp.5 = sext i16 %P to i32
+; CHECK: ret i32 %tmp.5
+}
+
+define i32 @test8(i32 %x) nounwind readnone {
+entry:
+ %shr = lshr i32 %x, 5 ; <i32> [#uses=1]
+ %xor = xor i32 %shr, 67108864 ; <i32> [#uses=1]
+ %sub = add i32 %xor, -67108864 ; <i32> [#uses=1]
+ ret i32 %sub
+; CHECK: @test8
+; CHECK: %shr = ashr i32 %x, 5
+; CHECK: ret i32 %shr
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll b/src/LLVM/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
new file mode 100644
index 0000000..6d2193f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
@@ -0,0 +1,84 @@
+; RUN: opt < %s -instcombine -disable-output
+
+; SimplifyDemandedBits should cope with pointer types.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+ %struct.VEC_rtx_base = type { i32, i32, [1 x %struct.rtx_def*] }
+ %struct.VEC_rtx_gc = type { %struct.VEC_rtx_base }
+ %struct.block_symbol = type { [3 x %struct.rtunion], %struct.object_block*, i64 }
+ %struct.object_block = type { %struct.section*, i32, i64, %struct.VEC_rtx_gc*, %struct.VEC_rtx_gc* }
+ %struct.omp_clause_subcode = type { i32 }
+ %struct.rtunion = type { i8* }
+ %struct.rtx_def = type { i16, i8, i8, %struct.u }
+ %struct.section = type { %struct.unnamed_section }
+ %struct.u = type { %struct.block_symbol }
+ %struct.unnamed_section = type { %struct.omp_clause_subcode, void (i8*)*, i8*, %struct.section* }
+
+define fastcc void @cse_insn(%struct.rtx_def* %insn, %struct.rtx_def* %libcall_insn) nounwind {
+entry:
+ br i1 undef, label %bb43, label %bb88
+
+bb43: ; preds = %entry
+ br label %bb88
+
+bb88: ; preds = %bb43, %entry
+ br i1 undef, label %bb95, label %bb107
+
+bb95: ; preds = %bb88
+ unreachable
+
+bb107: ; preds = %bb88
+ %0 = load i16* undef, align 8 ; <i16> [#uses=1]
+ %1 = icmp eq i16 %0, 38 ; <i1> [#uses=1]
+ %src_eqv_here.0 = select i1 %1, %struct.rtx_def* null, %struct.rtx_def* null ; <%struct.rtx_def*> [#uses=1]
+ br i1 undef, label %bb127, label %bb125
+
+bb125: ; preds = %bb107
+ br i1 undef, label %bb127, label %bb126
+
+bb126: ; preds = %bb125
+ br i1 undef, label %bb129, label %bb133
+
+bb127: ; preds = %bb125, %bb107
+ unreachable
+
+bb129: ; preds = %bb126
+ br label %bb133
+
+bb133: ; preds = %bb129, %bb126
+ br i1 undef, label %bb134, label %bb146
+
+bb134: ; preds = %bb133
+ unreachable
+
+bb146: ; preds = %bb133
+ br i1 undef, label %bb180, label %bb186
+
+bb180: ; preds = %bb146
+ %2 = icmp eq %struct.rtx_def* null, null ; <i1> [#uses=1]
+ %3 = zext i1 %2 to i8 ; <i8> [#uses=1]
+ %4 = icmp ne %struct.rtx_def* %src_eqv_here.0, null ; <i1> [#uses=1]
+ %5 = zext i1 %4 to i8 ; <i8> [#uses=1]
+ %toBool181 = icmp ne i8 %3, 0 ; <i1> [#uses=1]
+ %toBool182 = icmp ne i8 %5, 0 ; <i1> [#uses=1]
+ %6 = and i1 %toBool181, %toBool182 ; <i1> [#uses=1]
+ %7 = zext i1 %6 to i8 ; <i8> [#uses=1]
+ %toBool183 = icmp ne i8 %7, 0 ; <i1> [#uses=1]
+ br i1 %toBool183, label %bb184, label %bb186
+
+bb184: ; preds = %bb180
+ br i1 undef, label %bb185, label %bb186
+
+bb185: ; preds = %bb184
+ br label %bb186
+
+bb186: ; preds = %bb185, %bb184, %bb180, %bb146
+ br i1 undef, label %bb190, label %bb195
+
+bb190: ; preds = %bb186
+ unreachable
+
+bb195: ; preds = %bb186
+ unreachable
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/sink_instruction.ll b/src/LLVM/test/Transforms/InstCombine/sink_instruction.ll
new file mode 100644
index 0000000..29d202a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sink_instruction.ll
@@ -0,0 +1,56 @@
+; RUN: opt -instcombine %s -S | FileCheck %s
+
+;; This tests that the instructions in the entry blocks are sunk into each
+;; arm of the 'if'.
+
+define i32 @test1(i1 %C, i32 %A, i32 %B) {
+; CHECK: @test1
+entry:
+ %tmp.2 = sdiv i32 %A, %B ; <i32> [#uses=1]
+ %tmp.9 = add i32 %B, %A ; <i32> [#uses=1]
+ br i1 %C, label %then, label %endif
+
+then: ; preds = %entry
+ ret i32 %tmp.9
+
+endif: ; preds = %entry
+; CHECK: sdiv i32
+; CHECK-NEXT: ret i32
+ ret i32 %tmp.2
+}
+
+
+;; PHI use, sink divide before call.
+define i32 @test2(i32 %x) nounwind ssp {
+; CHECK: @test2
+; CHECK-NOT: sdiv i32
+entry:
+ br label %bb
+
+bb: ; preds = %bb2, %entry
+ %x_addr.17 = phi i32 [ %x, %entry ], [ %x_addr.0, %bb2 ] ; <i32> [#uses=4]
+ %i.06 = phi i32 [ 0, %entry ], [ %4, %bb2 ] ; <i32> [#uses=1]
+ %0 = add nsw i32 %x_addr.17, 1 ; <i32> [#uses=1]
+ %1 = sdiv i32 %0, %x_addr.17 ; <i32> [#uses=1]
+ %2 = icmp eq i32 %x_addr.17, 0 ; <i1> [#uses=1]
+ br i1 %2, label %bb1, label %bb2
+
+bb1: ; preds = %bb
+; CHECK: bb1:
+; CHECK-NEXT: add nsw i32 %x_addr.17, 1
+; CHECK-NEXT: sdiv i32
+; CHECK-NEXT: tail call i32 @bar()
+ %3 = tail call i32 @bar() nounwind ; <i32> [#uses=0]
+ br label %bb2
+
+bb2: ; preds = %bb, %bb1
+ %x_addr.0 = phi i32 [ %1, %bb1 ], [ %x_addr.17, %bb ] ; <i32> [#uses=2]
+ %4 = add nsw i32 %i.06, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %4, 1000000 ; <i1> [#uses=1]
+ br i1 %exitcond, label %bb4, label %bb
+
+bb4: ; preds = %bb2
+ ret i32 %x_addr.0
+}
+
+declare i32 @bar()
diff --git a/src/LLVM/test/Transforms/InstCombine/sitofp.ll b/src/LLVM/test/Transforms/InstCombine/sitofp.ll
new file mode 100644
index 0000000..bd31b89
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sitofp.ll
@@ -0,0 +1,55 @@
+; RUN: opt < %s -instcombine -S | not grep itofp
+
+define i1 @test1(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ult double %B, 128.0
+ ret i1 %C ; True!
+}
+define i1 @test2(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ugt double %B, -128.1
+ ret i1 %C ; True!
+}
+
+define i1 @test3(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ule double %B, 127.0
+ ret i1 %C ; true!
+}
+
+define i1 @test4(i8 %A) {
+ %B = sitofp i8 %A to double
+ %C = fcmp ult double %B, 127.0
+ ret i1 %C ; A != 127
+}
+
+define i32 @test5(i32 %A) {
+ %B = sitofp i32 %A to double
+ %C = fptosi double %B to i32
+ %D = uitofp i32 %C to double
+ %E = fptoui double %D to i32
+ ret i32 %E
+}
+
+define i32 @test6(i32 %A) {
+ %B = and i32 %A, 7 ; <i32> [#uses=1]
+ %C = and i32 %A, 32 ; <i32> [#uses=1]
+ %D = sitofp i32 %B to double ; <double> [#uses=1]
+ %E = sitofp i32 %C to double ; <double> [#uses=1]
+ %F = fadd double %D, %E ; <double> [#uses=1]
+ %G = fptosi double %F to i32 ; <i32> [#uses=1]
+ ret i32 %G
+}
+
+define i32 @test7(i32 %a) nounwind {
+ %b = sitofp i32 %a to double ; <double> [#uses=1]
+ %c = fptoui double %b to i32 ; <i32> [#uses=1]
+ ret i32 %c
+}
+
+define i32 @test8(i32 %a) nounwind {
+ %b = uitofp i32 %a to double ; <double> [#uses=1]
+ %c = fptosi double %b to i32 ; <i32> [#uses=1]
+ ret i32 %c
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/sqrt.ll b/src/LLVM/test/Transforms/InstCombine/sqrt.ll
new file mode 100644
index 0000000..cc78417
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sqrt.ll
@@ -0,0 +1,54 @@
+; RUN: opt -S -instcombine %s | FileCheck %s
+
+define float @test1(float %x) nounwind readnone ssp {
+entry:
+; CHECK: @test1
+; CHECK-NOT: fpext
+; CHECK-NOT: sqrt(
+; CHECK: sqrtf(
+; CHECK-NOT: fptrunc
+ %conv = fpext float %x to double ; <double> [#uses=1]
+ %call = tail call double @sqrt(double %conv) readnone nounwind ; <double> [#uses=1]
+ %conv1 = fptrunc double %call to float ; <float> [#uses=1]
+; CHECK: ret float
+ ret float %conv1
+}
+
+; PR8096
+define float @test2(float %x) nounwind readnone ssp {
+entry:
+; CHECK: @test2
+; CHECK-NOT: fpext
+; CHECK-NOT: sqrt(
+; CHECK: sqrtf(
+; CHECK-NOT: fptrunc
+ %conv = fpext float %x to double ; <double> [#uses=1]
+ %call = tail call double @sqrt(double %conv) nounwind ; <double> [#uses=1]
+ %conv1 = fptrunc double %call to float ; <float> [#uses=1]
+; CHECK: ret float
+ ret float %conv1
+}
+
+; rdar://9763193
+; Can't fold (fptrunc (sqrt (fpext x))) -> (sqrtf x) since there is another
+; use of sqrt result.
+define float @test3(float* %v) nounwind uwtable ssp {
+entry:
+; CHECK: @test3
+; CHECK: sqrt(
+; CHECK-NOT: sqrtf(
+; CHECK: fptrunc
+ %arrayidx13 = getelementptr inbounds float* %v, i64 2
+ %tmp14 = load float* %arrayidx13
+ %mul18 = fmul float %tmp14, %tmp14
+ %add19 = fadd float undef, %mul18
+ %conv = fpext float %add19 to double
+ %call34 = call double @sqrt(double %conv) readnone
+ %call36 = call i32 (double)* @foo(double %call34) nounwind
+ %conv38 = fptrunc double %call34 to float
+ ret float %conv38
+}
+
+declare i32 @foo(double)
+
+declare double @sqrt(double) readnone
diff --git a/src/LLVM/test/Transforms/InstCombine/srem-simplify-bug.ll b/src/LLVM/test/Transforms/InstCombine/srem-simplify-bug.ll
new file mode 100644
index 0000000..af824a4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/srem-simplify-bug.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {ret i1 false}
+; PR2276
+
+define i1 @f(i32 %x) {
+ %A = or i32 %x, 1
+ %B = srem i32 %A, 1
+ %C = icmp ne i32 %B, 0
+ ret i1 %C
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/srem.ll b/src/LLVM/test/Transforms/InstCombine/srem.ll
new file mode 100644
index 0000000..beefe4f
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/srem.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep srem
+
+define i64 @foo(i64 %x1, i64 %y2) {
+ %r = sdiv i64 %x1, %y2
+ %r7 = mul i64 %r, %y2
+ %r8 = sub i64 %x1, %r7
+ ret i64 %r8
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/srem1.ll b/src/LLVM/test/Transforms/InstCombine/srem1.ll
new file mode 100644
index 0000000..f18690c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/srem1.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine
+; PR2670
+
+@g_127 = external global i32 ; <i32*> [#uses=1]
+
+define i32 @func_56(i32 %p_58, i32 %p_59, i32 %p_61, i16 signext %p_62) nounwind {
+entry:
+ %call = call i32 (...)* @rshift_s_s( i32 %p_61, i32 1 ) ; <i32> [#uses=1]
+ %conv = sext i32 %call to i64 ; <i64> [#uses=1]
+ %or = or i64 -1734012817166602727, %conv ; <i64> [#uses=1]
+ %rem = srem i64 %or, 1 ; <i64> [#uses=1]
+ %cmp = icmp eq i64 %rem, 1 ; <i1> [#uses=1]
+ %cmp.ext = zext i1 %cmp to i32 ; <i32> [#uses=1]
+ store i32 %cmp.ext, i32* @g_127
+ ret i32 undef
+}
+
+declare i32 @rshift_s_s(...)
diff --git a/src/LLVM/test/Transforms/InstCombine/stack-overalign.ll b/src/LLVM/test/Transforms/InstCombine/stack-overalign.ll
new file mode 100644
index 0000000..2fc8414
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/stack-overalign.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | grep {align 32} | count 1
+
+; It's tempting to have an instcombine in which the src pointer of a
+; memcpy is aligned up to the alignment of the destination, however
+; there are pitfalls. If the src is an alloca, aligning it beyond what
+; the target's stack pointer is aligned at will require dynamic
+; stack realignment, which can require functions that don't otherwise
+; need a frame pointer to need one.
+;
+; Abstaining from this transform is not the only way to approach this
+; issue. Some late phase could be smart enough to reduce alloca
+; alignments when they are greater than they need to be. Or, codegen
+; could do dynamic alignment for just the one alloca, and leave the
+; main stack pointer at its standard alignment.
+
+@dst = global [1024 x i8] zeroinitializer, align 32
+
+define void @foo() nounwind {
+entry:
+ %src = alloca [1024 x i8], align 1
+ %src1 = getelementptr [1024 x i8]* %src, i32 0, i32 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([1024 x i8]* @dst, i32 0, i32 0), i8* %src1, i32 1024, i32 1, i1 false)
+ call void @frob(i8* %src1) nounwind
+ ret void
+}
+
+declare void @frob(i8*)
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/stacksaverestore.ll b/src/LLVM/test/Transforms/InstCombine/stacksaverestore.ll
new file mode 100644
index 0000000..4396f8e
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/stacksaverestore.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -instcombine -S | grep {call.*stackrestore} | count 1
+
+declare i8* @llvm.stacksave()
+declare void @llvm.stackrestore(i8*)
+
+;; Test that llvm.stackrestore is removed when possible.
+define i32* @test1(i32 %P) {
+ %tmp = call i8* @llvm.stacksave( )
+ call void @llvm.stackrestore( i8* %tmp ) ;; not restoring anything
+ %A = alloca i32, i32 %P
+ ret i32* %A
+}
+
+define void @test2(i8* %X) {
+ call void @llvm.stackrestore( i8* %X ) ;; no allocas before return.
+ ret void
+}
+
+define void @foo(i32 %size) nounwind {
+entry:
+ %tmp118124 = icmp sgt i32 %size, 0 ; <i1> [#uses=1]
+ br i1 %tmp118124, label %bb.preheader, label %return
+
+bb.preheader: ; preds = %entry
+ %tmp25 = add i32 %size, -1 ; <i32> [#uses=1]
+ %tmp125 = icmp slt i32 %size, 1 ; <i1> [#uses=1]
+ %smax = select i1 %tmp125, i32 1, i32 %size ; <i32> [#uses=1]
+ br label %bb
+
+bb: ; preds = %bb, %bb.preheader
+ %i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
+ %tmp = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp23 = alloca i8, i32 %size ; <i8*> [#uses=2]
+ %tmp27 = getelementptr i8* %tmp23, i32 %tmp25 ; <i8*> [#uses=1]
+ store i8 0, i8* %tmp27, align 1
+ %tmp28 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp52 = alloca i8, i32 %size ; <i8*> [#uses=1]
+ %tmp53 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp77 = alloca i8, i32 %size ; <i8*> [#uses=1]
+ %tmp78 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
+ %tmp102 = alloca i8, i32 %size ; <i8*> [#uses=1]
+ call void @bar( i32 %i.0.reg2mem.0, i8* %tmp23, i8* %tmp52, i8* %tmp77, i8* %tmp102, i32 %size ) nounwind
+ call void @llvm.stackrestore( i8* %tmp78 )
+ call void @llvm.stackrestore( i8* %tmp53 )
+ call void @llvm.stackrestore( i8* %tmp28 )
+ call void @llvm.stackrestore( i8* %tmp )
+ %indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
+ %exitcond = icmp eq i32 %indvar.next, %smax ; <i1> [#uses=1]
+ br i1 %exitcond, label %return, label %bb
+
+return: ; preds = %bb, %entry
+ ret void
+}
+
+declare void @bar(i32, i8*, i8*, i8*, i8*, i32)
+
diff --git a/src/LLVM/test/Transforms/InstCombine/store.ll b/src/LLVM/test/Transforms/InstCombine/store.ll
new file mode 100644
index 0000000..bff57c9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/store.ll
@@ -0,0 +1,85 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define void @test1(i32* %P) {
+ store i32 undef, i32* %P
+ store i32 123, i32* undef
+ store i32 124, i32* null
+ ret void
+; CHECK: @test1(
+; CHECK-NEXT: store i32 123, i32* undef
+; CHECK-NEXT: store i32 undef, i32* null
+; CHECK-NEXT: ret void
+}
+
+define void @test2(i32* %P) {
+ %X = load i32* %P ; <i32> [#uses=1]
+ %Y = add i32 %X, 0 ; <i32> [#uses=1]
+ store i32 %Y, i32* %P
+ ret void
+; CHECK: @test2
+; CHECK-NEXT: ret void
+}
+
+;; Simple sinking tests
+
+; "if then else"
+define i32 @test3(i1 %C) {
+ %A = alloca i32
+ br i1 %C, label %Cond, label %Cond2
+
+Cond:
+ store i32 -987654321, i32* %A
+ br label %Cont
+
+Cond2:
+ store i32 47, i32* %A
+ br label %Cont
+
+Cont:
+ %V = load i32* %A
+ ret i32 %V
+; CHECK: @test3
+; CHECK-NOT: alloca
+; CHECK: Cont:
+; CHECK-NEXT: %storemerge = phi i32 [ 47, %Cond2 ], [ -987654321, %Cond ]
+; CHECK-NEXT: ret i32 %storemerge
+}
+
+; "if then"
+define i32 @test4(i1 %C) {
+ %A = alloca i32
+ store i32 47, i32* %A
+ br i1 %C, label %Cond, label %Cont
+
+Cond:
+ store i32 -987654321, i32* %A
+ br label %Cont
+
+Cont:
+ %V = load i32* %A
+ ret i32 %V
+; CHECK: @test4
+; CHECK-NOT: alloca
+; CHECK: Cont:
+; CHECK-NEXT: %storemerge = phi i32 [ -987654321, %Cond ], [ 47, %0 ]
+; CHECK-NEXT: ret i32 %storemerge
+}
+
+; "if then"
+define void @test5(i1 %C, i32* %P) {
+ store i32 47, i32* %P, align 1
+ br i1 %C, label %Cond, label %Cont
+
+Cond:
+ store i32 -987654321, i32* %P, align 1
+ br label %Cont
+
+Cont:
+ ret void
+; CHECK: @test5
+; CHECK: Cont:
+; CHECK-NEXT: %storemerge = phi i32
+; CHECK-NEXT: store i32 %storemerge, i32* %P, align 1
+; CHECK-NEXT: ret void
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/strcpy_chk-64.ll b/src/LLVM/test/Transforms/InstCombine/strcpy_chk-64.ll
new file mode 100644
index 0000000..036fcbe
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/strcpy_chk-64.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0.0"
+
+define void @func(i8* %i) nounwind ssp {
+; CHECK: @func
+; CHECK: @__strcpy_chk(i8* %arraydecay, i8* %i, i64 32)
+entry:
+ %s = alloca [32 x i8], align 16
+ %arraydecay = getelementptr inbounds [32 x i8]* %s, i32 0, i32 0
+ %call = call i8* @__strcpy_chk(i8* %arraydecay, i8* %i, i64 32)
+ call void @func2(i8* %arraydecay)
+ ret void
+}
+
+declare i8* @__strcpy_chk(i8*, i8*, i64) nounwind
+
+declare void @func2(i8*)
diff --git a/src/LLVM/test/Transforms/InstCombine/strcpy_chk.ll b/src/LLVM/test/Transforms/InstCombine/strcpy_chk.ll
new file mode 100644
index 0000000..8835a0b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/strcpy_chk.ll
@@ -0,0 +1,13 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+@a = common global [60 x i8] zeroinitializer, align 1 ; <[60 x i8]*> [#uses=1]
+@.str = private constant [8 x i8] c"abcdefg\00" ; <[8 x i8]*> [#uses=1]
+
+define i8* @foo() nounwind {
+; CHECK: @foo
+; CHECK-NEXT: call i8* @strcpy
+ %call = call i8* @__strcpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 60) ; <i8*> [#uses=1]
+ ret i8* %call
+}
+
+declare i8* @__strcpy_chk(i8*, i8*, i32) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/sub.ll b/src/LLVM/test/Transforms/InstCombine/sub.ll
new file mode 100644
index 0000000..6bc0d83
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/sub.ll
@@ -0,0 +1,303 @@
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+; Optimize subtracts.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @test1(i32 %A) {
+ %B = sub i32 %A, %A
+ ret i32 %B
+; CHECK: @test1
+; CHECK: ret i32 0
+}
+
+define i32 @test2(i32 %A) {
+ %B = sub i32 %A, 0
+ ret i32 %B
+; CHECK: @test2
+; CHECK: ret i32 %A
+}
+
+define i32 @test3(i32 %A) {
+ %B = sub i32 0, %A
+ %C = sub i32 0, %B
+ ret i32 %C
+; CHECK: @test3
+; CHECK: ret i32 %A
+}
+
+define i32 @test4(i32 %A, i32 %x) {
+ %B = sub i32 0, %A
+ %C = sub i32 %x, %B
+ ret i32 %C
+; CHECK: @test4
+; CHECK: %C = add i32 %x, %A
+; CHECK: ret i32 %C
+}
+
+define i32 @test5(i32 %A, i32 %B, i32 %C) {
+ %D = sub i32 %B, %C
+ %E = sub i32 %A, %D
+ ret i32 %E
+; CHECK: @test5
+; CHECK: %D1 = sub i32 %C, %B
+; CHECK: %E = add
+; CHECK: ret i32 %E
+}
+
+define i32 @test6(i32 %A, i32 %B) {
+ %C = and i32 %A, %B
+ %D = sub i32 %A, %C
+ ret i32 %D
+; CHECK: @test6
+; CHECK-NEXT: xor i32 %B, -1
+; CHECK-NEXT: %D = and i32
+; CHECK-NEXT: ret i32 %D
+}
+
+define i32 @test7(i32 %A) {
+ %B = sub i32 -1, %A
+ ret i32 %B
+; CHECK: @test7
+; CHECK: %B = xor i32 %A, -1
+; CHECK: ret i32 %B
+}
+
+define i32 @test8(i32 %A) {
+ %B = mul i32 9, %A
+ %C = sub i32 %B, %A
+ ret i32 %C
+; CHECK: @test8
+; CHECK: %C = shl i32 %A, 3
+; CHECK: ret i32 %C
+}
+
+define i32 @test9(i32 %A) {
+ %B = mul i32 3, %A
+ %C = sub i32 %A, %B
+ ret i32 %C
+; CHECK: @test9
+; CHECK: %C = mul i32 %A, -2
+; CHECK: ret i32 %C
+}
+
+define i32 @test10(i32 %A, i32 %B) {
+ %C = sub i32 0, %A
+ %D = sub i32 0, %B
+ %E = mul i32 %C, %D
+ ret i32 %E
+; CHECK: @test10
+; CHECK: %E = mul i32 %A, %B
+; CHECK: ret i32 %E
+}
+
+define i32 @test10a(i32 %A) {
+ %C = sub i32 0, %A
+ %E = mul i32 %C, 7
+ ret i32 %E
+; CHECK: @test10a
+; CHECK: %E = mul i32 %A, -7
+; CHECK: ret i32 %E
+}
+
+define i1 @test11(i8 %A, i8 %B) {
+ %C = sub i8 %A, %B
+ %cD = icmp ne i8 %C, 0
+ ret i1 %cD
+; CHECK: @test11
+; CHECK: %cD = icmp ne i8 %A, %B
+; CHECK: ret i1 %cD
+}
+
+define i32 @test12(i32 %A) {
+ %B = ashr i32 %A, 31
+ %C = sub i32 0, %B
+ ret i32 %C
+; CHECK: @test12
+; CHECK: %C = lshr i32 %A, 31
+; CHECK: ret i32 %C
+}
+
+define i32 @test13(i32 %A) {
+ %B = lshr i32 %A, 31
+ %C = sub i32 0, %B
+ ret i32 %C
+; CHECK: @test13
+; CHECK: %C = ashr i32 %A, 31
+; CHECK: ret i32 %C
+}
+
+define i32 @test14(i32 %A) {
+ %B = lshr i32 %A, 31
+ %C = bitcast i32 %B to i32
+ %D = sub i32 0, %C
+ ret i32 %D
+; CHECK: @test14
+; CHECK: %D = ashr i32 %A, 31
+; CHECK: ret i32 %D
+}
+
+define i32 @test15(i32 %A, i32 %B) {
+ %C = sub i32 0, %A
+ %D = srem i32 %B, %C
+ ret i32 %D
+; CHECK: @test15
+; CHECK: %D = srem i32 %B, %A
+; CHECK: ret i32 %D
+}
+
+define i32 @test16(i32 %A) {
+ %X = sdiv i32 %A, 1123
+ %Y = sub i32 0, %X
+ ret i32 %Y
+; CHECK: @test16
+; CHECK: %Y = sdiv i32 %A, -1123
+; CHECK: ret i32 %Y
+}
+
+; Can't fold subtract here because negation it might oveflow.
+; PR3142
+define i32 @test17(i32 %A) {
+ %B = sub i32 0, %A
+ %C = sdiv i32 %B, 1234
+ ret i32 %C
+; CHECK: @test17
+; CHECK: %B = sub i32 0, %A
+; CHECK: %C = sdiv i32 %B, 1234
+; CHECK: ret i32 %C
+}
+
+define i64 @test18(i64 %Y) {
+ %tmp.4 = shl i64 %Y, 2
+ %tmp.12 = shl i64 %Y, 2
+ %tmp.8 = sub i64 %tmp.4, %tmp.12
+ ret i64 %tmp.8
+; CHECK: @test18
+; CHECK: ret i64 0
+}
+
+define i32 @test19(i32 %X, i32 %Y) {
+ %Z = sub i32 %X, %Y
+ %Q = add i32 %Z, %Y
+ ret i32 %Q
+; CHECK: @test19
+; CHECK: ret i32 %X
+}
+
+define i1 @test20(i32 %g, i32 %h) {
+ %tmp.2 = sub i32 %g, %h
+ %tmp.4 = icmp ne i32 %tmp.2, %g
+ ret i1 %tmp.4
+; CHECK: @test20
+; CHECK: %tmp.4 = icmp ne i32 %h, 0
+; CHECK: ret i1 %tmp.4
+}
+
+define i1 @test21(i32 %g, i32 %h) {
+ %tmp.2 = sub i32 %g, %h
+ %tmp.4 = icmp ne i32 %tmp.2, %g
+ ret i1 %tmp.4
+; CHECK: @test21
+; CHECK: %tmp.4 = icmp ne i32 %h, 0
+; CHECK: ret i1 %tmp.4
+}
+
+; PR2298
+define zeroext i1 @test22(i32 %a, i32 %b) nounwind {
+ %tmp2 = sub i32 0, %a
+ %tmp4 = sub i32 0, %b
+ %tmp5 = icmp eq i32 %tmp2, %tmp4
+ ret i1 %tmp5
+; CHECK: @test22
+; CHECK: %tmp5 = icmp eq i32 %b, %a
+; CHECK: ret i1 %tmp5
+}
+
+; rdar://7362831
+define i32 @test23(i8* %P, i64 %A){
+ %B = getelementptr inbounds i8* %P, i64 %A
+ %C = ptrtoint i8* %B to i64
+ %D = trunc i64 %C to i32
+ %E = ptrtoint i8* %P to i64
+ %F = trunc i64 %E to i32
+ %G = sub i32 %D, %F
+ ret i32 %G
+; CHECK: @test23
+; CHECK-NEXT: = trunc i64 %A to i32
+; CHECK-NEXT: ret i32
+}
+
+define i64 @test24(i8* %P, i64 %A){
+ %B = getelementptr inbounds i8* %P, i64 %A
+ %C = ptrtoint i8* %B to i64
+ %E = ptrtoint i8* %P to i64
+ %G = sub i64 %C, %E
+ ret i64 %G
+; CHECK: @test24
+; CHECK-NEXT: ret i64 %A
+}
+
+define i64 @test24a(i8* %P, i64 %A){
+ %B = getelementptr inbounds i8* %P, i64 %A
+ %C = ptrtoint i8* %B to i64
+ %E = ptrtoint i8* %P to i64
+ %G = sub i64 %E, %C
+ ret i64 %G
+; CHECK: @test24a
+; CHECK-NEXT: sub i64 0, %A
+; CHECK-NEXT: ret i64
+}
+
+@Arr = external global [42 x i16]
+
+define i64 @test24b(i8* %P, i64 %A){
+ %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %C = ptrtoint i16* %B to i64
+ %G = sub i64 %C, ptrtoint ([42 x i16]* @Arr to i64)
+ ret i64 %G
+; CHECK: @test24b
+; CHECK-NEXT: shl nuw i64 %A, 1
+; CHECK-NEXT: ret i64
+}
+
+
+define i64 @test25(i8* %P, i64 %A){
+ %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %C = ptrtoint i16* %B to i64
+ %G = sub i64 %C, ptrtoint (i16* getelementptr ([42 x i16]* @Arr, i64 1, i64 0) to i64)
+ ret i64 %G
+; CHECK: @test25
+; CHECK-NEXT: shl nuw i64 %A, 1
+; CHECK-NEXT: add i64 {{.*}}, -84
+; CHECK-NEXT: ret i64
+}
+
+define i32 @test26(i32 %x) {
+ %shl = shl i32 3, %x
+ %neg = sub i32 0, %shl
+ ret i32 %neg
+; CHECK: @test26
+; CHECK-NEXT: shl i32 -3
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test27(i32 %x, i32 %y) {
+ %mul = mul i32 %y, -8
+ %sub = sub i32 %x, %mul
+ ret i32 %sub
+; CHECK: @test27
+; CHECK-NEXT: shl i32 %y, 3
+; CHECK-NEXT: add i32
+; CHECK-NEXT: ret i32
+}
+
+define i32 @test28(i32 %x, i32 %y, i32 %z) {
+ %neg = sub i32 0, %z
+ %mul = mul i32 %neg, %y
+ %sub = sub i32 %x, %mul
+ ret i32 %sub
+; CHECK: @test28
+; CHECK-NEXT: mul i32 %z, %y
+; CHECK-NEXT: add i32
+; CHECK-NEXT: ret i32
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/trunc.ll b/src/LLVM/test/Transforms/InstCombine/trunc.ll
new file mode 100644
index 0000000..6ec342a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/trunc.ll
@@ -0,0 +1,119 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+
+; Instcombine should be able to eliminate all of these ext casts.
+
+declare void @use(i32)
+
+define i64 @test1(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = and i32 %b, 15
+ %d = zext i32 %c to i64
+ call void @use(i32 %b)
+ ret i64 %d
+; CHECK: @test1
+; CHECK: %d = and i64 %a, 15
+; CHECK: ret i64 %d
+}
+define i64 @test2(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = shl i32 %b, 4
+ %q = ashr i32 %c, 4
+ %d = sext i32 %q to i64
+ call void @use(i32 %b)
+ ret i64 %d
+; CHECK: @test2
+; CHECK: shl i64 %a, 36
+; CHECK: %d = ashr exact i64 {{.*}}, 36
+; CHECK: ret i64 %d
+}
+define i64 @test3(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = and i32 %b, 8
+ %d = zext i32 %c to i64
+ call void @use(i32 %b)
+ ret i64 %d
+; CHECK: @test3
+; CHECK: %d = and i64 %a, 8
+; CHECK: ret i64 %d
+}
+define i64 @test4(i64 %a) {
+ %b = trunc i64 %a to i32
+ %c = and i32 %b, 8
+ %x = xor i32 %c, 8
+ %d = zext i32 %x to i64
+ call void @use(i32 %b)
+ ret i64 %d
+; CHECK: @test4
+; CHECK: = and i64 %a, 8
+; CHECK: %d = xor i64 {{.*}}, 8
+; CHECK: ret i64 %d
+}
+
+define i32 @test5(i32 %A) {
+ %B = zext i32 %A to i128
+ %C = lshr i128 %B, 16
+ %D = trunc i128 %C to i32
+ ret i32 %D
+; CHECK: @test5
+; CHECK: %C = lshr i32 %A, 16
+; CHECK: ret i32 %C
+}
+
+define i32 @test6(i64 %A) {
+ %B = zext i64 %A to i128
+ %C = lshr i128 %B, 32
+ %D = trunc i128 %C to i32
+ ret i32 %D
+; CHECK: @test6
+; CHECK: %C = lshr i64 %A, 32
+; CHECK: %D = trunc i64 %C to i32
+; CHECK: ret i32 %D
+}
+
+define i92 @test7(i64 %A) {
+ %B = zext i64 %A to i128
+ %C = lshr i128 %B, 32
+ %D = trunc i128 %C to i92
+ ret i92 %D
+; CHECK: @test7
+; CHECK: %B = zext i64 %A to i92
+; CHECK: %C = lshr i92 %B, 32
+; CHECK: ret i92 %C
+}
+
+define i64 @test8(i32 %A, i32 %B) {
+ %tmp38 = zext i32 %A to i128
+ %tmp32 = zext i32 %B to i128
+ %tmp33 = shl i128 %tmp32, 32
+ %ins35 = or i128 %tmp33, %tmp38
+ %tmp42 = trunc i128 %ins35 to i64
+ ret i64 %tmp42
+; CHECK: @test8
+; CHECK: %tmp38 = zext i32 %A to i64
+; CHECK: %tmp32 = zext i32 %B to i64
+; CHECK: %tmp33 = shl nuw i64 %tmp32, 32
+; CHECK: %ins35 = or i64 %tmp33, %tmp38
+; CHECK: ret i64 %ins35
+}
+
+define i8 @test9(i32 %X) {
+ %Y = and i32 %X, 42
+ %Z = trunc i32 %Y to i8
+ ret i8 %Z
+; CHECK: @test9
+; CHECK: trunc
+; CHECK: and
+; CHECK: ret
+}
+
+; rdar://8808586
+define i8 @test10(i32 %X) {
+ %Y = trunc i32 %X to i8
+ %Z = and i8 %Y, 42
+ ret i8 %Z
+; CHECK: @test10
+; CHECK: trunc
+; CHECK: and
+; CHECK: ret
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/udiv-simplify-bug-0.ll b/src/LLVM/test/Transforms/InstCombine/udiv-simplify-bug-0.ll
new file mode 100644
index 0000000..bfdd98c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/udiv-simplify-bug-0.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {ret i64 0} | count 2
+
+define i64 @foo(i32 %x) nounwind {
+ %y = lshr i32 %x, 1
+ %r = udiv i32 %y, -1
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
+define i64 @bar(i32 %x) nounwind {
+ %y = lshr i32 %x, 31
+ %r = udiv i32 %y, 3
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/udiv-simplify-bug-1.ll b/src/LLVM/test/Transforms/InstCombine/udiv-simplify-bug-1.ll
new file mode 100644
index 0000000..d95e8f8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/udiv-simplify-bug-1.ll
@@ -0,0 +1,20 @@
+; RUN: opt < %s -instcombine -S > %t1.ll
+; RUN: grep udiv %t1.ll | count 2
+; RUN: grep zext %t1.ll | count 2
+; PR2274
+
+; The udiv instructions shouldn't be optimized away, and the
+; sext instructions should be optimized to zext.
+
+define i64 @bar(i32 %x) nounwind {
+ %y = lshr i32 %x, 30
+ %r = udiv i32 %y, 3
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
+define i64 @qux(i32 %x, i32 %v) nounwind {
+ %y = lshr i32 %x, 31
+ %r = udiv i32 %y, %v
+ %z = sext i32 %r to i64
+ ret i64 %z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/udiv_select_to_select_shift.ll b/src/LLVM/test/Transforms/InstCombine/udiv_select_to_select_shift.ll
new file mode 100644
index 0000000..34ff8bd
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/udiv_select_to_select_shift.ll
@@ -0,0 +1,17 @@
+; Test that this transform works:
+; udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
+;
+; RUN: opt < %s -instcombine -S -o %t
+; RUN: not grep select %t
+; RUN: grep lshr %t | count 2
+; RUN: not grep udiv %t
+
+define i64 @test(i64 %X, i1 %Cond ) {
+entry:
+ %divisor1 = select i1 %Cond, i64 16, i64 8
+ %quotient1 = udiv i64 %X, %divisor1
+ %divisor2 = select i1 %Cond, i64 8, i64 0
+ %quotient2 = udiv i64 %X, %divisor2
+ %sum = add i64 %quotient1, %quotient2
+ ret i64 %sum
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/udivrem-change-width.ll b/src/LLVM/test/Transforms/InstCombine/udivrem-change-width.ll
new file mode 100644
index 0000000..b388a3b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/udivrem-change-width.ll
@@ -0,0 +1,62 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+; PR4548
+define i8 @udiv_i8(i8 %a, i8 %b) nounwind {
+ %conv = zext i8 %a to i32
+ %conv2 = zext i8 %b to i32
+ %div = udiv i32 %conv, %conv2
+ %conv3 = trunc i32 %div to i8
+ ret i8 %conv3
+; CHECK: @udiv_i8
+; CHECK: udiv i8 %a, %b
+}
+
+define i8 @urem_i8(i8 %a, i8 %b) nounwind {
+ %conv = zext i8 %a to i32
+ %conv2 = zext i8 %b to i32
+ %div = urem i32 %conv, %conv2
+ %conv3 = trunc i32 %div to i8
+ ret i8 %conv3
+; CHECK: @urem_i8
+; CHECK: urem i8 %a, %b
+}
+
+define i32 @udiv_i32(i8 %a, i8 %b) nounwind {
+ %conv = zext i8 %a to i32
+ %conv2 = zext i8 %b to i32
+ %div = udiv i32 %conv, %conv2
+ ret i32 %div
+; CHECK: @udiv_i32
+; CHECK: udiv i8 %a, %b
+; CHECK: zext
+}
+
+define i32 @urem_i32(i8 %a, i8 %b) nounwind {
+ %conv = zext i8 %a to i32
+ %conv2 = zext i8 %b to i32
+ %div = urem i32 %conv, %conv2
+ ret i32 %div
+; CHECK: @urem_i32
+; CHECK: urem i8 %a, %b
+; CHECK: zext
+}
+
+define i32 @udiv_i32_c(i8 %a) nounwind {
+ %conv = zext i8 %a to i32
+ %div = udiv i32 %conv, 10
+ ret i32 %div
+; CHECK: @udiv_i32_c
+; CHECK: udiv i8 %a, 10
+; CHECK: zext
+}
+
+define i32 @urem_i32_c(i8 %a) nounwind {
+ %conv = zext i8 %a to i32
+ %div = urem i32 %conv, 10
+ ret i32 %div
+; CHECK: @urem_i32_c
+; CHECK: urem i8 %a, 10
+; CHECK: zext
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/urem-simplify-bug.ll b/src/LLVM/test/Transforms/InstCombine/urem-simplify-bug.ll
new file mode 100644
index 0000000..229f1a8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/urem-simplify-bug.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -instcombine -S | grep {= or i32 %x, -5}
+
+@.str = internal constant [5 x i8] c"foo\0A\00" ; <[5 x i8]*> [#uses=1]
+@.str1 = internal constant [5 x i8] c"bar\0A\00" ; <[5 x i8]*> [#uses=1]
+
+define i32 @main() nounwind {
+entry:
+ %x = call i32 @func_11( ) nounwind ; <i32> [#uses=1]
+ %tmp3 = or i32 %x, -5 ; <i32> [#uses=1]
+ %tmp5 = urem i32 251, %tmp3 ; <i32> [#uses=1]
+ %tmp6 = icmp ne i32 %tmp5, 0 ; <i1> [#uses=1]
+ %tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
+ %tmp9 = urem i32 %tmp67, 95 ; <i32> [#uses=1]
+ %tmp10 = and i32 %tmp9, 1 ; <i32> [#uses=1]
+ %tmp12 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
+ br i1 %tmp12, label %bb14, label %bb
+
+bb: ; preds = %entry
+ br label %bb15
+
+bb14: ; preds = %entry
+ br label %bb15
+
+bb15: ; preds = %bb14, %bb
+ %iftmp.0.0 = phi i8* [ getelementptr ([5 x i8]* @.str1, i32 0, i32 0), %bb14 ], [ getelementptr ([5 x i8]* @.str, i32 0, i32 0), %bb ] ; <i8*> [#uses=1]
+ %tmp17 = call i32 (i8*, ...)* @printf( i8* %iftmp.0.0 ) nounwind ; <i32> [#uses=0]
+ ret i32 0
+}
+
+declare i32 @func_11()
+
+declare i32 @printf(i8*, ...) nounwind
diff --git a/src/LLVM/test/Transforms/InstCombine/urem.ll b/src/LLVM/test/Transforms/InstCombine/urem.ll
new file mode 100644
index 0000000..5108422
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/urem.ll
@@ -0,0 +1,8 @@
+; RUN: opt < %s -instcombine -S | grep urem
+
+define i64 @rem_unsigned(i64 %x1, i64 %y2) {
+ %r = udiv i64 %x1, %y2
+ %r7 = mul i64 %r, %y2
+ %r8 = sub i64 %x1, %r7
+ ret i64 %r8
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/vec_demanded_elts.ll b/src/LLVM/test/Transforms/InstCombine/vec_demanded_elts.ll
new file mode 100644
index 0000000..87c731a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -0,0 +1,165 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i16 @test1(float %f) {
+entry:
+; CHECK: @test1
+; CHECK: fmul float
+; CHECK-NOT: insertelement {{.*}} 0.00
+; CHECK-NOT: call {{.*}} @llvm.x86.sse.mul
+; CHECK-NOT: call {{.*}} @llvm.x86.sse.sub
+; CHECK: ret
+ %tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
+ %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
+ %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
+ %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
+ %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
+ %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer ) ; <<4 x float>> [#uses=1]
+ %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
+ %tmp69 = trunc i32 %tmp.upgrd.1 to i16 ; <i16> [#uses=1]
+ ret i16 %tmp69
+}
+
+define i32 @test2(float %f) {
+; CHECK: @test2
+; CHECK-NOT: insertelement
+; CHECK-NOT: extractelement
+; CHECK: ret
+ %tmp5 = fmul float %f, %f
+ %tmp9 = insertelement <4 x float> undef, float %tmp5, i32 0
+ %tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 1
+ %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2
+ %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3
+ %tmp19 = bitcast <4 x float> %tmp12 to <4 x i32>
+ %tmp21 = extractelement <4 x i32> %tmp19, i32 0
+ ret i32 %tmp21
+}
+
+define i64 @test3(float %f, double %d) {
+; CHECK: @test3
+; CHECK-NOT: insertelement {{.*}} 0.00
+; CHECK: ret
+entry:
+ %v00 = insertelement <4 x float> undef, float %f, i32 0
+ %v01 = insertelement <4 x float> %v00, float 0.000000e+00, i32 1
+ %v02 = insertelement <4 x float> %v01, float 0.000000e+00, i32 2
+ %v03 = insertelement <4 x float> %v02, float 0.000000e+00, i32 3
+ %tmp0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %v03)
+ %v10 = insertelement <4 x float> undef, float %f, i32 0
+ %v11 = insertelement <4 x float> %v10, float 0.000000e+00, i32 1
+ %v12 = insertelement <4 x float> %v11, float 0.000000e+00, i32 2
+ %v13 = insertelement <4 x float> %v12, float 0.000000e+00, i32 3
+ %tmp1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %v13)
+ %v20 = insertelement <4 x float> undef, float %f, i32 0
+ %v21 = insertelement <4 x float> %v20, float 0.000000e+00, i32 1
+ %v22 = insertelement <4 x float> %v21, float 0.000000e+00, i32 2
+ %v23 = insertelement <4 x float> %v22, float 0.000000e+00, i32 3
+ %tmp2 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %v23)
+ %v30 = insertelement <4 x float> undef, float %f, i32 0
+ %v31 = insertelement <4 x float> %v30, float 0.000000e+00, i32 1
+ %v32 = insertelement <4 x float> %v31, float 0.000000e+00, i32 2
+ %v33 = insertelement <4 x float> %v32, float 0.000000e+00, i32 3
+ %tmp3 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %v33)
+ %v40 = insertelement <2 x double> undef, double %d, i32 0
+ %v41 = insertelement <2 x double> %v40, double 0.000000e+00, i32 1
+ %tmp4 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %v41)
+ %v50 = insertelement <2 x double> undef, double %d, i32 0
+ %v51 = insertelement <2 x double> %v50, double 0.000000e+00, i32 1
+ %tmp5 = tail call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %v51)
+ %v60 = insertelement <2 x double> undef, double %d, i32 0
+ %v61 = insertelement <2 x double> %v60, double 0.000000e+00, i32 1
+ %tmp6 = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %v61)
+ %v70 = insertelement <2 x double> undef, double %d, i32 0
+ %v71 = insertelement <2 x double> %v70, double 0.000000e+00, i32 1
+ %tmp7 = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %v71)
+ %tmp8 = add i32 %tmp0, %tmp2
+ %tmp9 = add i32 %tmp4, %tmp6
+ %tmp10 = add i32 %tmp8, %tmp9
+ %tmp11 = sext i32 %tmp10 to i64
+ %tmp12 = add i64 %tmp1, %tmp3
+ %tmp13 = add i64 %tmp5, %tmp7
+ %tmp14 = add i64 %tmp12, %tmp13
+ %tmp15 = add i64 %tmp11, %tmp14
+ ret i64 %tmp15
+}
+
+define void @get_image() nounwind {
+; CHECK: @get_image
+; CHECK-NOT: extractelement
+; CHECK: unreachable
+entry:
+ %0 = call i32 @fgetc(i8* null) nounwind ; <i32> [#uses=1]
+ %1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
+ %tmp2 = insertelement <100 x i8> zeroinitializer, i8 %1, i32 1 ; <<100 x i8>> [#uses=1]
+ %tmp1 = extractelement <100 x i8> %tmp2, i32 0 ; <i8> [#uses=1]
+ %2 = icmp eq i8 %tmp1, 80 ; <i1> [#uses=1]
+ br i1 %2, label %bb2, label %bb3
+
+bb2: ; preds = %entry
+ br label %bb3
+
+bb3: ; preds = %bb2, %entry
+ unreachable
+}
+
+; PR4340
+define void @vac(<4 x float>* nocapture %a) nounwind {
+; CHECK: @vac
+; CHECK-NOT: load
+; CHECK: ret
+entry:
+ %tmp1 = load <4 x float>* %a ; <<4 x float>> [#uses=1]
+ %vecins = insertelement <4 x float> %tmp1, float 0.000000e+00, i32 0 ; <<4 x float>> [#uses=1]
+ %vecins4 = insertelement <4 x float> %vecins, float 0.000000e+00, i32 1; <<4 x float>> [#uses=1]
+ %vecins6 = insertelement <4 x float> %vecins4, float 0.000000e+00, i32 2; <<4 x float>> [#uses=1]
+ %vecins8 = insertelement <4 x float> %vecins6, float 0.000000e+00, i32 3; <<4 x float>> [#uses=1]
+ store <4 x float> %vecins8, <4 x float>* %a
+ ret void
+}
+
+declare i32 @fgetc(i8*)
+
+declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
+
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
+
+declare i32 @llvm.x86.sse.cvtss2si(<4 x float>)
+declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>)
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>)
+declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>)
+declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>)
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>)
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>)
+
+; <rdar://problem/6945110>
+define <4 x i32> @kernel3_vertical(<4 x i16> * %src, <8 x i16> * %foo) nounwind {
+entry:
+ %tmp = load <4 x i16>* %src
+ %tmp1 = load <8 x i16>* %foo
+; CHECK: %tmp2 = shufflevector
+ %tmp2 = shufflevector <4 x i16> %tmp, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+; pmovzxwd ignores the upper 64-bits of its input; -instcombine should remove this shuffle:
+; CHECK-NOT: shufflevector
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: pmovzxwd
+ %0 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %tmp3)
+ ret <4 x i32> %0
+}
+declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone
+
+define <4 x float> @dead_shuffle_elt(<4 x float> %x, <2 x float> %y) nounwind {
+entry:
+; CHECK: define <4 x float> @dead_shuffle_elt
+; CHECK: shufflevector <2 x float> %y, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %shuffle.i = shufflevector <2 x float> %y, <2 x float> %y, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+ %shuffle9.i = shufflevector <4 x float> %x, <4 x float> %shuffle.i, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+ ret <4 x float> %shuffle9.i
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/vec_extract_elt.ll b/src/LLVM/test/Transforms/InstCombine/vec_extract_elt.ll
new file mode 100644
index 0000000..5599fc9
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vec_extract_elt.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | not grep extractelement
+
+define i32 @test(float %f) {
+ %tmp7 = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
+ %tmp17 = bitcast <4 x float> %tmp7 to <4 x i32> ; <<4 x i32>> [#uses=1]
+ %tmp19 = extractelement <4 x i32> %tmp17, i32 0 ; <i32> [#uses=1]
+ ret i32 %tmp19
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/vec_insertelt.ll b/src/LLVM/test/Transforms/InstCombine/vec_insertelt.ll
new file mode 100644
index 0000000..35882f8
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vec_insertelt.ll
@@ -0,0 +1,7 @@
+; RUN: opt < %s -instcombine -S | grep {ret <4 x i32> %A}
+
+; PR1286
+define <4 x i32> @test1(<4 x i32> %A) {
+ %B = insertelement <4 x i32> %A, i32 undef, i32 1
+ ret <4 x i32> %B
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/vec_narrow.ll b/src/LLVM/test/Transforms/InstCombine/vec_narrow.ll
new file mode 100644
index 0000000..4ccbedc
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vec_narrow.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | grep {fadd float}
+
+
+define float @test(<4 x float> %A, <4 x float> %B, float %f) {
+ %C = insertelement <4 x float> %A, float %f, i32 0 ; <%V> [#uses=1]
+ %D = fadd <4 x float> %C, %B ; <%V> [#uses=1]
+ %E = extractelement <4 x float> %D, i32 0 ; <float> [#uses=1]
+ ret float %E
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/vec_sext.ll b/src/LLVM/test/Transforms/InstCombine/vec_sext.ll
new file mode 100644
index 0000000..d7ab96b
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vec_sext.ll
@@ -0,0 +1,22 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define <4 x i32> @psignd_3(<4 x i32> %a, <4 x i32> %b) nounwind ssp {
+entry:
+ %cmp = icmp slt <4 x i32> %b, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %sub = sub nsw <4 x i32> zeroinitializer, %a
+ %0 = icmp slt <4 x i32> %sext, zeroinitializer
+ %sext3 = sext <4 x i1> %0 to <4 x i32>
+ %1 = xor <4 x i32> %sext3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %2 = and <4 x i32> %a, %1
+ %3 = and <4 x i32> %sext3, %sub
+ %cond = or <4 x i32> %2, %3
+ ret <4 x i32> %cond
+
+; CHECK: ashr <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
+; CHECK: sub nsw <4 x i32> zeroinitializer, %a
+; CHECK: xor <4 x i32> %b.lobit, <i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK: and <4 x i32> %a, %0
+; CHECK: and <4 x i32> %b.lobit, %sub
+; CHECK: or <4 x i32> %1, %2
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/vec_shuffle.ll b/src/LLVM/test/Transforms/InstCombine/vec_shuffle.ll
new file mode 100644
index 0000000..f2822c3
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vec_shuffle.ll
@@ -0,0 +1,109 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define <4 x float> @test1(<4 x float> %v1) {
+; CHECK: @test1
+; CHECK: ret <4 x float> %v1
+ %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x float> %v2
+}
+
+define <4 x float> @test2(<4 x float> %v1) {
+; CHECK: @test2
+; CHECK: ret <4 x float> %v1
+ %v2 = shufflevector <4 x float> %v1, <4 x float> %v1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %v2
+}
+
+define float @test3(<4 x float> %A, <4 x float> %B, float %f) {
+; CHECK: @test3
+; CHECK: ret float %f
+ %C = insertelement <4 x float> %A, float %f, i32 0
+ %D = shufflevector <4 x float> %C, <4 x float> %B, <4 x i32> <i32 5, i32 0, i32 2, i32 7>
+ %E = extractelement <4 x float> %D, i32 1
+ ret float %E
+}
+
+define i32 @test4(<4 x i32> %X) {
+; CHECK: @test4
+; CHECK-NEXT: extractelement
+; CHECK-NEXT: ret
+ %tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> zeroinitializer
+ %tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
+ ret i32 %tmp34
+}
+
+define i32 @test5(<4 x i32> %X) {
+; CHECK: @test5
+; CHECK-NEXT: extractelement
+; CHECK-NEXT: ret
+ %tmp152.i53899.i = shufflevector <4 x i32> %X, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 undef, i32 undef>
+ %tmp34 = extractelement <4 x i32> %tmp152.i53899.i, i32 0
+ ret i32 %tmp34
+}
+
+define float @test6(<4 x float> %X) {
+; CHECK: @test6
+; CHECK-NEXT: extractelement
+; CHECK-NEXT: ret
+ %X1 = bitcast <4 x float> %X to <4 x i32>
+ %tmp152.i53899.i = shufflevector <4 x i32> %X1, <4 x i32> undef, <4 x i32> zeroinitializer
+ %tmp152.i53900.i = bitcast <4 x i32> %tmp152.i53899.i to <4 x float>
+ %tmp34 = extractelement <4 x float> %tmp152.i53900.i, i32 0
+ ret float %tmp34
+}
+
+define <4 x float> @test7(<4 x float> %tmp45.i) {
+; CHECK: @test7
+; CHECK-NEXT: ret <4 x float> %tmp45.i
+ %tmp1642.i = shufflevector <4 x float> %tmp45.i, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >
+ ret <4 x float> %tmp1642.i
+}
+
+; This should turn into a single shuffle.
+define <4 x float> @test8(<4 x float> %tmp, <4 x float> %tmp1) {
+; CHECK: @test8
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+ %tmp4 = extractelement <4 x float> %tmp, i32 1
+ %tmp2 = extractelement <4 x float> %tmp, i32 3
+ %tmp1.upgrd.1 = extractelement <4 x float> %tmp1, i32 0
+ %tmp128 = insertelement <4 x float> undef, float %tmp4, i32 0
+ %tmp130 = insertelement <4 x float> %tmp128, float undef, i32 1
+ %tmp132 = insertelement <4 x float> %tmp130, float %tmp2, i32 2
+ %tmp134 = insertelement <4 x float> %tmp132, float %tmp1.upgrd.1, i32 3
+ ret <4 x float> %tmp134
+}
+
+; Test fold of two shuffles where the first shuffle vectors inputs are a
+; different length then the second.
+define <4 x i8> @test9(<16 x i8> %tmp6) nounwind {
+; CHECK: @test9
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+ %tmp7 = shufflevector <16 x i8> %tmp6, <16 x i8> undef, <4 x i32> < i32 13, i32 9, i32 4, i32 13 > ; <<4 x i8>> [#uses=1]
+ %tmp9 = shufflevector <4 x i8> %tmp7, <4 x i8> undef, <4 x i32> < i32 3, i32 1, i32 2, i32 0 > ; <<4 x i8>> [#uses=1]
+ ret <4 x i8> %tmp9
+}
+
+; Same as test9, but make sure that "undef" mask values are not confused with
+; mask values of 2*N, where N is the mask length. These shuffles should not
+; be folded (because [8,9,4,8] may not be a mask supported by the target).
+define <4 x i8> @test9a(<16 x i8> %tmp6) nounwind {
+; CHECK: @test9a
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+ %tmp7 = shufflevector <16 x i8> %tmp6, <16 x i8> undef, <4 x i32> < i32 undef, i32 9, i32 4, i32 8 > ; <<4 x i8>> [#uses=1]
+ %tmp9 = shufflevector <4 x i8> %tmp7, <4 x i8> undef, <4 x i32> < i32 3, i32 1, i32 2, i32 0 > ; <<4 x i8>> [#uses=1]
+ ret <4 x i8> %tmp9
+}
+
+; Redundant vector splats should be removed. Radar 8597790.
+define <4 x i32> @test10(<4 x i32> %tmp5) nounwind {
+; CHECK: @test10
+; CHECK-NEXT: shufflevector
+; CHECK-NEXT: ret
+ %tmp6 = shufflevector <4 x i32> %tmp5, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+ %tmp7 = shufflevector <4 x i32> %tmp6, <4 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %tmp7
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/vector-casts.ll b/src/LLVM/test/Transforms/InstCombine/vector-casts.ll
new file mode 100644
index 0000000..7bbf53c
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vector-casts.ll
@@ -0,0 +1,151 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; This turns into a&1 != 0
+define <2 x i1> @test1(<2 x i64> %a) {
+ %t = trunc <2 x i64> %a to <2 x i1>
+ ret <2 x i1> %t
+
+; CHECK: @test1
+; CHECK: and <2 x i64> %a, <i64 1, i64 1>
+; CHECK: icmp ne <2 x i64> %1, zeroinitializer
+}
+
+; The ashr turns into an lshr.
+define <2 x i64> @test2(<2 x i64> %a) {
+ %b = and <2 x i64> %a, <i64 65535, i64 65535>
+ %t = ashr <2 x i64> %b, <i64 1, i64 1>
+ ret <2 x i64> %t
+
+; CHECK: @test2
+; CHECK: and <2 x i64> %a, <i64 65535, i64 65535>
+; CHECK: lshr <2 x i64> %b, <i64 1, i64 1>
+}
+
+
+
+define <2 x i64> @test3(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp ord <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp ord <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %and = and <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %and to <2 x i64>
+ ret <2 x i64> %conv
+
+; CHECK: @test3
+; CHECK: fcmp ord <4 x float> %a, %b
+}
+
+define <2 x i64> @test4(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp uno <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp uno <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %or = or <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %or to <2 x i64>
+ ret <2 x i64> %conv
+; CHECK: @test4
+; CHECK: fcmp uno <4 x float> %a, %b
+}
+
+
+; rdar://7434900
+define <2 x i64> @test5(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp ult <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp ult <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %and = and <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %and to <2 x i64>
+ ret <2 x i64> %conv
+
+; CHECK: @test5
+; CHECK: sext <4 x i1> %cmp to <4 x i32>
+; CHECK: sext <4 x i1> %cmp4 to <4 x i32>
+}
+
+
+define void @convert(<2 x i32>* %dst.addr, <2 x i64> %src) nounwind {
+entry:
+ %val = trunc <2 x i64> %src to <2 x i32>
+ %add = add <2 x i32> %val, <i32 1, i32 1>
+ store <2 x i32> %add, <2 x i32>* %dst.addr
+ ret void
+}
+
+define <2 x i65> @foo(<2 x i64> %t) {
+ %a = trunc <2 x i64> %t to <2 x i32>
+ %b = zext <2 x i32> %a to <2 x i65>
+ ret <2 x i65> %b
+}
+define <2 x i64> @bar(<2 x i65> %t) {
+ %a = trunc <2 x i65> %t to <2 x i32>
+ %b = zext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %b
+}
+define <2 x i65> @foos(<2 x i64> %t) {
+ %a = trunc <2 x i64> %t to <2 x i32>
+ %b = sext <2 x i32> %a to <2 x i65>
+ ret <2 x i65> %b
+}
+define <2 x i64> @bars(<2 x i65> %t) {
+ %a = trunc <2 x i65> %t to <2 x i32>
+ %b = sext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %b
+}
+define <2 x i64> @quxs(<2 x i64> %t) {
+ %a = trunc <2 x i64> %t to <2 x i32>
+ %b = sext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %b
+}
+define <2 x i64> @quxt(<2 x i64> %t) {
+ %a = shl <2 x i64> %t, <i64 32, i64 32>
+ %b = ashr <2 x i64> %a, <i64 32, i64 32>
+ ret <2 x i64> %b
+}
+define <2 x double> @fa(<2 x double> %t) {
+ %a = fptrunc <2 x double> %t to <2 x float>
+ %b = fpext <2 x float> %a to <2 x double>
+ ret <2 x double> %b
+}
+define <2 x double> @fb(<2 x double> %t) {
+ %a = fptoui <2 x double> %t to <2 x i64>
+ %b = uitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %b
+}
+define <2 x double> @fc(<2 x double> %t) {
+ %a = fptosi <2 x double> %t to <2 x i64>
+ %b = sitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %b
+}
+
+; PR9228
+; This was a crasher, so no CHECK statements.
+define <4 x float> @f(i32 %a) nounwind alwaysinline {
+; CHECK: @f
+entry:
+ %dim = insertelement <4 x i32> undef, i32 %a, i32 0
+ %dim30 = insertelement <4 x i32> %dim, i32 %a, i32 1
+ %dim31 = insertelement <4 x i32> %dim30, i32 %a, i32 2
+ %dim32 = insertelement <4 x i32> %dim31, i32 %a, i32 3
+
+ %offset_ptr = getelementptr <4 x float>* null, i32 1
+ %offset_int = ptrtoint <4 x float>* %offset_ptr to i64
+ %sizeof32 = trunc i64 %offset_int to i32
+
+ %smearinsert33 = insertelement <4 x i32> undef, i32 %sizeof32, i32 0
+ %smearinsert34 = insertelement <4 x i32> %smearinsert33, i32 %sizeof32, i32 1
+ %smearinsert35 = insertelement <4 x i32> %smearinsert34, i32 %sizeof32, i32 2
+ %smearinsert36 = insertelement <4 x i32> %smearinsert35, i32 %sizeof32, i32 3
+
+ %delta_scale = mul <4 x i32> %dim32, %smearinsert36
+ %offset_delta = add <4 x i32> zeroinitializer, %delta_scale
+
+ %offset_varying_delta = add <4 x i32> %offset_delta, undef
+
+ ret <4 x float> undef
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/vector-srem.ll b/src/LLVM/test/Transforms/InstCombine/vector-srem.ll
new file mode 100644
index 0000000..acb11c5
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/vector-srem.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instcombine -S | grep {srem <4 x i32>}
+
+define <4 x i32> @foo(<4 x i32> %t, <4 x i32> %u)
+{
+ %k = sdiv <4 x i32> %t, %u
+ %l = mul <4 x i32> %k, %u
+ %m = sub <4 x i32> %t, %l
+ ret <4 x i32> %m
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/volatile_store.ll b/src/LLVM/test/Transforms/InstCombine/volatile_store.ll
new file mode 100644
index 0000000..0518e5a
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/volatile_store.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -instcombine -S | grep {store volatile}
+; RUN: opt < %s -instcombine -S | grep {load volatile}
+
+@x = weak global i32 0 ; <i32*> [#uses=2]
+
+define void @self_assign_1() {
+entry:
+ %tmp = volatile load i32* @x ; <i32> [#uses=1]
+ volatile store i32 %tmp, i32* @x
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/x86-crc32-demanded.ll b/src/LLVM/test/Transforms/InstCombine/x86-crc32-demanded.ll
new file mode 100644
index 0000000..878b97d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/x86-crc32-demanded.ll
@@ -0,0 +1,17 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; crc32 with 64-bit destination zeros high 32-bit.
+; rdar://9467055
+
+define i64 @test() nounwind {
+entry:
+; CHECK: test
+; CHECK: tail call i64 @llvm.x86.sse42.crc32.64.64
+; CHECK-NOT: and
+; CHECK: ret
+ %0 = tail call i64 @llvm.x86.sse42.crc32.64.64(i64 0, i64 4) nounwind
+ %1 = and i64 %0, 4294967295
+ ret i64 %1
+}
+
+declare i64 @llvm.x86.sse42.crc32.64.64(i64, i64) nounwind readnone
diff --git a/src/LLVM/test/Transforms/InstCombine/xor-undef.ll b/src/LLVM/test/Transforms/InstCombine/xor-undef.ll
new file mode 100644
index 0000000..cf72955
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/xor-undef.ll
@@ -0,0 +1,6 @@
+; RUN: opt < %s -instcombine -S | grep zeroinitializer
+
+define <2 x i64> @f() {
+ %tmp = xor <2 x i64> undef, undef
+ ret <2 x i64> %tmp
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/xor.ll b/src/LLVM/test/Transforms/InstCombine/xor.ll
new file mode 100644
index 0000000..0808716
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/xor.ll
@@ -0,0 +1,193 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | \
+; RUN: not grep {xor }
+; END.
+@G1 = global i32 0 ; <i32*> [#uses=1]
+@G2 = global i32 0 ; <i32*> [#uses=1]
+
+define i1 @test0(i1 %A) {
+ %B = xor i1 %A, false ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i32 @test1(i32 %A) {
+ %B = xor i32 %A, 0 ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i1 @test2(i1 %A) {
+ %B = xor i1 %A, %A ; <i1> [#uses=1]
+ ret i1 %B
+}
+
+define i32 @test3(i32 %A) {
+ %B = xor i32 %A, %A ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test4(i32 %A) {
+ %NotA = xor i32 -1, %A ; <i32> [#uses=1]
+ %B = xor i32 %A, %NotA ; <i32> [#uses=1]
+ ret i32 %B
+}
+
+define i32 @test5(i32 %A) {
+ %t1 = or i32 %A, 123 ; <i32> [#uses=1]
+ %r = xor i32 %t1, 123 ; <i32> [#uses=1]
+ ret i32 %r
+}
+
+define i8 @test6(i8 %A) {
+ %B = xor i8 %A, 17 ; <i8> [#uses=1]
+ %C = xor i8 %B, 17 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i32 @test7(i32 %A, i32 %B) {
+ %A1 = and i32 %A, 7 ; <i32> [#uses=1]
+ %B1 = and i32 %B, 128 ; <i32> [#uses=1]
+ %C1 = xor i32 %A1, %B1 ; <i32> [#uses=1]
+ ret i32 %C1
+}
+
+define i8 @test8(i1 %c) {
+ %d = xor i1 %c, true ; <i1> [#uses=1]
+ br i1 %d, label %True, label %False
+
+True: ; preds = %0
+ ret i8 1
+
+False: ; preds = %0
+ ret i8 3
+}
+
+define i1 @test9(i8 %A) {
+ %B = xor i8 %A, 123 ; <i8> [#uses=1]
+ %C = icmp eq i8 %B, 34 ; <i1> [#uses=1]
+ ret i1 %C
+}
+
+define i8 @test10(i8 %A) {
+ %B = and i8 %A, 3 ; <i8> [#uses=1]
+ %C = xor i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i8 @test11(i8 %A) {
+ %B = or i8 %A, 12 ; <i8> [#uses=1]
+ %C = xor i8 %B, 4 ; <i8> [#uses=1]
+ ret i8 %C
+}
+
+define i1 @test12(i8 %A) {
+ %B = xor i8 %A, 4 ; <i8> [#uses=1]
+ %c = icmp ne i8 %B, 0 ; <i1> [#uses=1]
+ ret i1 %c
+}
+
+define i1 @test13(i8 %A, i8 %B) {
+ %C = icmp ult i8 %A, %B ; <i1> [#uses=1]
+ %D = icmp ugt i8 %A, %B ; <i1> [#uses=1]
+ %E = xor i1 %C, %D ; <i1> [#uses=1]
+ ret i1 %E
+}
+
+define i1 @test14(i8 %A, i8 %B) {
+ %C = icmp eq i8 %A, %B ; <i1> [#uses=1]
+ %D = icmp ne i8 %B, %A ; <i1> [#uses=1]
+ %E = xor i1 %C, %D ; <i1> [#uses=1]
+ ret i1 %E
+}
+
+define i32 @test15(i32 %A) {
+ %B = add i32 %A, -1 ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test16(i32 %A) {
+ %B = add i32 %A, 123 ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test17(i32 %A) {
+ %B = sub i32 123, %A ; <i32> [#uses=1]
+ %C = xor i32 %B, -1 ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test18(i32 %A) {
+ %B = xor i32 %A, -1 ; <i32> [#uses=1]
+ %C = sub i32 123, %B ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+define i32 @test19(i32 %A, i32 %B) {
+ %C = xor i32 %A, %B ; <i32> [#uses=1]
+ %D = xor i32 %C, %A ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define void @test20(i32 %A, i32 %B) {
+ %tmp.2 = xor i32 %B, %A ; <i32> [#uses=2]
+ %tmp.5 = xor i32 %tmp.2, %B ; <i32> [#uses=2]
+ %tmp.8 = xor i32 %tmp.5, %tmp.2 ; <i32> [#uses=1]
+ store i32 %tmp.8, i32* @G1
+ store i32 %tmp.5, i32* @G2
+ ret void
+}
+
+define i32 @test21(i1 %C, i32 %A, i32 %B) {
+ %C2 = xor i1 %C, true ; <i1> [#uses=1]
+ %D = select i1 %C2, i32 %A, i32 %B ; <i32> [#uses=1]
+ ret i32 %D
+}
+
+define i32 @test22(i1 %X) {
+ %Y = xor i1 %X, true ; <i1> [#uses=1]
+ %Z = zext i1 %Y to i32 ; <i32> [#uses=1]
+ %Q = xor i32 %Z, 1 ; <i32> [#uses=1]
+ ret i32 %Q
+}
+
+define i1 @test23(i32 %a, i32 %b) {
+ %tmp.2 = xor i32 %b, %a ; <i32> [#uses=1]
+ %tmp.4 = icmp eq i32 %tmp.2, %a ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i1 @test24(i32 %c, i32 %d) {
+ %tmp.2 = xor i32 %d, %c ; <i32> [#uses=1]
+ %tmp.4 = icmp ne i32 %tmp.2, %c ; <i1> [#uses=1]
+ ret i1 %tmp.4
+}
+
+define i32 @test25(i32 %g, i32 %h) {
+ %h2 = xor i32 %h, -1 ; <i32> [#uses=1]
+ %tmp2 = and i32 %h2, %g ; <i32> [#uses=1]
+ %tmp4 = xor i32 %tmp2, %g ; <i32> [#uses=1]
+ ret i32 %tmp4
+}
+
+define i32 @test26(i32 %a, i32 %b) {
+ %b2 = xor i32 %b, -1 ; <i32> [#uses=1]
+ %tmp2 = xor i32 %a, %b2 ; <i32> [#uses=1]
+ %tmp4 = and i32 %tmp2, %a ; <i32> [#uses=1]
+ ret i32 %tmp4
+}
+
+define i32 @test27(i32 %b, i32 %c, i32 %d) {
+ %tmp2 = xor i32 %d, %b ; <i32> [#uses=1]
+ %tmp5 = xor i32 %d, %c ; <i32> [#uses=1]
+ %tmp = icmp eq i32 %tmp2, %tmp5 ; <i1> [#uses=1]
+ %tmp6 = zext i1 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %tmp6
+}
+
+define i32 @test28(i32 %indvar) {
+ %tmp7 = add i32 %indvar, -2147483647 ; <i32> [#uses=1]
+ %tmp214 = xor i32 %tmp7, -2147483648 ; <i32> [#uses=1]
+ ret i32 %tmp214
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/xor2.ll b/src/LLVM/test/Transforms/InstCombine/xor2.ll
new file mode 100644
index 0000000..47d1da7
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/xor2.ll
@@ -0,0 +1,53 @@
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; PR1253
+define i1 @test0(i32 %A) {
+; CHECK: @test0
+; CHECK: %C = icmp slt i32 %A, 0
+ %B = xor i32 %A, -2147483648
+ %C = icmp sgt i32 %B, -1
+ ret i1 %C
+}
+
+define i1 @test1(i32 %A) {
+; CHECK: @test1
+; CHECK: %C = icmp slt i32 %A, 0
+ %B = xor i32 %A, 12345
+ %C = icmp slt i32 %B, 0
+ ret i1 %C
+}
+
+; PR1014
+define i32 @test2(i32 %tmp1) {
+; CHECK: @test2
+; CHECK-NEXT: and i32 %tmp1, 32
+; CHECK-NEXT: or i32 %ovm, 8
+; CHECK-NEXT: ret i32
+ %ovm = and i32 %tmp1, 32
+ %ov3 = add i32 %ovm, 145
+ %ov110 = xor i32 %ov3, 153
+ ret i32 %ov110
+}
+
+define i32 @test3(i32 %tmp1) {
+; CHECK: @test3
+; CHECK-NEXT: and i32 %tmp1, 32
+; CHECK-NEXT: or i32 %ovm, 8
+; CHECK-NEXT: ret i32
+ %ovm = or i32 %tmp1, 145
+ %ov31 = and i32 %ovm, 177
+ %ov110 = xor i32 %ov31, 153
+ ret i32 %ov110
+}
+
+define i32 @test4(i32 %A, i32 %B) {
+ %1 = xor i32 %A, -1
+ %2 = ashr i32 %1, %B
+ %3 = xor i32 %2, -1
+ ret i32 %3
+; CHECK: @test4
+; CHECK: %1 = ashr i32 %A, %B
+; CHECK: ret i32 %1
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/zero-point-zero-add.ll b/src/LLVM/test/Transforms/InstCombine/zero-point-zero-add.ll
new file mode 100644
index 0000000..d07a9f4
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/zero-point-zero-add.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instcombine -S | grep 0.0 | count 1
+
+declare double @abs(double)
+
+define double @test(double %X) {
+ %Y = fadd double %X, 0.0 ;; Should be a single add x, 0.0
+ %Z = fadd double %Y, 0.0
+ ret double %Z
+}
+
+define double @test1(double %X) {
+ %Y = call double @abs(double %X)
+ %Z = fadd double %Y, 0.0
+ ret double %Z
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/zeroext-and-reduce.ll b/src/LLVM/test/Transforms/InstCombine/zeroext-and-reduce.ll
new file mode 100644
index 0000000..6093533
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/zeroext-and-reduce.ll
@@ -0,0 +1,10 @@
+; RUN: opt < %s -instcombine -S | \
+; RUN: grep {and i32 %Y, 8}
+
+define i32 @test1(i8 %X) {
+ %Y = zext i8 %X to i32 ; <i32> [#uses=1]
+ %Z = and i32 %Y, 65544 ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+
diff --git a/src/LLVM/test/Transforms/InstCombine/zext-bool-add-sub.ll b/src/LLVM/test/Transforms/InstCombine/zext-bool-add-sub.ll
new file mode 100644
index 0000000..1164273
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/zext-bool-add-sub.ll
@@ -0,0 +1,29 @@
+; RUN: opt < %s -instcombine -S | not grep zext
+
+define i32 @a(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = add i32 %y, 1
+ ret i32 %res
+}
+
+define i32 @b(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = add i32 %y, -1
+ ret i32 %res
+}
+
+define i32 @c(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = sub i32 0, %y
+ ret i32 %res
+}
+
+define i32 @d(i1 %x) {
+entry:
+ %y = zext i1 %x to i32
+ %res = sub i32 3, %y
+ ret i32 %res
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/zext-fold.ll b/src/LLVM/test/Transforms/InstCombine/zext-fold.ll
new file mode 100644
index 0000000..9521101
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/zext-fold.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | grep {zext } | count 1
+; PR1570
+
+define i32 @test2(float %X, float %Y) {
+entry:
+ %tmp3 = fcmp uno float %X, %Y ; <i1> [#uses=1]
+ %tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
+ %tmp = xor i8 %tmp34, 1 ; <i8> [#uses=1]
+ %toBoolnot5 = zext i8 %tmp to i32 ; <i32> [#uses=1]
+ ret i32 %toBoolnot5
+}
+
diff --git a/src/LLVM/test/Transforms/InstCombine/zext-or-icmp.ll b/src/LLVM/test/Transforms/InstCombine/zext-or-icmp.ll
new file mode 100644
index 0000000..ddc6083
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/zext-or-icmp.ll
@@ -0,0 +1,35 @@
+; RUN: opt < %s -instcombine -S | grep icmp | count 1
+
+ %struct.FooBar = type <{ i8, i8, [2 x i8], i8, i8, i8, i8, i16, i16, [4 x i8], [8 x %struct.Rock] }>
+ %struct.Rock = type { i16, i16 }
+@some_idx = internal constant [4 x i8] c"\0A\0B\0E\0F" ; <[4 x i8]*> [#uses=1]
+
+define zeroext i8 @t(%struct.FooBar* %up, i8 zeroext %intra_flag, i32 %blk_i) nounwind {
+entry:
+ %tmp2 = lshr i32 %blk_i, 1 ; <i32> [#uses=1]
+ %tmp3 = and i32 %tmp2, 2 ; <i32> [#uses=1]
+ %tmp5 = and i32 %blk_i, 1 ; <i32> [#uses=1]
+ %tmp6 = or i32 %tmp3, %tmp5 ; <i32> [#uses=1]
+ %tmp8 = getelementptr %struct.FooBar* %up, i32 0, i32 7 ; <i16*> [#uses=1]
+ %tmp9 = load i16* %tmp8, align 1 ; <i16> [#uses=1]
+ %tmp910 = zext i16 %tmp9 to i32 ; <i32> [#uses=1]
+ %tmp12 = getelementptr [4 x i8]* @some_idx, i32 0, i32 %tmp6 ; <i8*> [#uses=1]
+ %tmp13 = load i8* %tmp12, align 1 ; <i8> [#uses=1]
+ %tmp1314 = zext i8 %tmp13 to i32 ; <i32> [#uses=1]
+ %tmp151 = lshr i32 %tmp910, %tmp1314 ; <i32> [#uses=1]
+ %tmp1516 = trunc i32 %tmp151 to i8 ; <i8> [#uses=1]
+ %tmp18 = getelementptr %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp19 = load i8* %tmp18, align 1 ; <i8> [#uses=1]
+ %tmp22 = and i8 %tmp1516, %tmp19 ; <i8> [#uses=1]
+ %tmp24 = getelementptr %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp25 = load i8* %tmp24, align 1 ; <i8> [#uses=1]
+ %tmp26.mask = and i8 %tmp25, 1 ; <i8> [#uses=1]
+ %toBool = icmp eq i8 %tmp26.mask, 0 ; <i1> [#uses=1]
+ %toBool.not = xor i1 %toBool, true ; <i1> [#uses=1]
+ %toBool33 = icmp eq i8 %intra_flag, 0 ; <i1> [#uses=1]
+ %bothcond = or i1 %toBool.not, %toBool33 ; <i1> [#uses=1]
+ %iftmp.1.0 = select i1 %bothcond, i8 0, i8 1 ; <i8> [#uses=1]
+ %tmp40 = or i8 %tmp22, %iftmp.1.0 ; <i8> [#uses=1]
+ %tmp432 = and i8 %tmp40, 1 ; <i8> [#uses=1]
+ ret i8 %tmp432
+}
diff --git a/src/LLVM/test/Transforms/InstCombine/zext.ll b/src/LLVM/test/Transforms/InstCombine/zext.ll
new file mode 100644
index 0000000..295e44d
--- /dev/null
+++ b/src/LLVM/test/Transforms/InstCombine/zext.ll
@@ -0,0 +1,11 @@
+; Tests to make sure elimination of casts is working correctly
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i64 @test_sext_zext(i16 %A) {
+ %c1 = zext i16 %A to i32 ; <i32> [#uses=1]
+ %c2 = sext i32 %c1 to i64 ; <i64> [#uses=1]
+ ret i64 %c2
+; CHECK-NOT: %c1
+; CHECK: %c2 = zext i16 %A to i64
+; CHECK: ret i64 %c2
+}