Tighten a ARM dag combine condition to avoid an identity transformation, which
ends up introducing a cycle in the DAG.

rdar://10196296


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@140733 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 9f31385..54f8aaa 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -7344,7 +7344,7 @@
   //   movne   r0, y
   /// FIXME: Turn this into a target neutral optimization?
   SDValue Res;
-  if (CC == ARMCC::NE && FalseVal == RHS) {
+  if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
                       N->getOperand(3), Cmp);
   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
diff --git a/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll b/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll
new file mode 100644
index 0000000..c6f4a93
--- /dev/null
+++ b/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll
@@ -0,0 +1,30 @@
+; RUN: llc -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 < %s
+
+; rdar://10196296
+; ARM target specific dag combine created a cycle in DAG.
+
+define void @t() nounwind ssp {
+  %1 = load i64* undef, align 4
+  %2 = shl i32 5, 0
+  %3 = zext i32 %2 to i64
+  %4 = and i64 %1, %3
+  %5 = lshr i64 %4, undef
+  switch i64 %5, label %8 [
+    i64 0, label %9
+    i64 1, label %6
+    i64 4, label %9
+    i64 5, label %7
+  ]
+
+; <label>:6                                       ; preds = %0
+  unreachable
+
+; <label>:7                                       ; preds = %0
+  unreachable
+
+; <label>:8                                       ; preds = %0
+  unreachable
+
+; <label>:9                                       ; preds = %0, %0
+  ret void
+}