Be sure to destroy the normal entry block of a cleanup that we
aren't actually going to make a normal cleanup for.  Sometimes
we optimistically create branches to such blocks for fixups,
and then we resolve the fixup to somewhere within the cleanup's
scope, and then the cleanup is actually not reachable for some
reason.  The process of resolving the fixup leaves us with
switches whose default edge leads to the cleanup;  we can
replace that with unreachable, then (in many cases) turn
the switch into an unconditional branch.

Fixes PR10467.



git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@137011 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 10f716a..5e86d54 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -483,6 +483,49 @@
   }
 }
 
+/// We don't need a normal entry block for the given cleanup.
+/// Optimistic fixup branches can cause these blocks to come into
+/// existence anyway;  if so, destroy it.
+///
+/// The validity of this transformation is very much specific to the
+/// exact ways in which we form branches to cleanup entries.
+static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
+                                         EHCleanupScope &scope) {
+  llvm::BasicBlock *entry = scope.getNormalBlock();
+  if (!entry) return;
+
+  // Replace all the uses with unreachable.
+  llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
+  for (llvm::BasicBlock::use_iterator
+         i = entry->use_begin(), e = entry->use_end(); i != e; ) {
+    llvm::Use &use = i.getUse();
+    ++i;
+
+    use.set(unreachableBB);
+    
+    // The only uses should be fixup switches.
+    llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
+    if (si->getNumCases() == 2 && si->getDefaultDest() == unreachableBB) {
+      // Replace the switch with a branch.
+      llvm::BranchInst::Create(si->getSuccessor(1), si);
+
+      // The switch operand is a load from the cleanup-dest alloca.
+      llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
+
+      // Destroy the switch.
+      si->eraseFromParent();
+
+      // Destroy the load.
+      assert(condition->getOperand(0) == CGF.NormalCleanupDest);
+      assert(condition->use_empty());
+      condition->eraseFromParent();
+    }
+  }
+  
+  assert(entry->use_empty());
+  delete entry;
+}
+
 /// Pops a cleanup block.  If the block includes a normal cleanup, the
 /// current insertion point is threaded through the cleanup, as are
 /// any branch fixups on the cleanup.
@@ -574,6 +617,7 @@
 
   // If we don't need the cleanup at all, we're done.
   if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+    destroyOptimisticNormalEntry(*this, Scope);
     EHStack.popCleanup(); // safe because there are no fixups
     assert(EHStack.getNumBranchFixups() == 0 ||
            EHStack.hasNormalCleanups());
@@ -648,6 +692,7 @@
   }
 
   if (!RequiresNormalCleanup) {
+    destroyOptimisticNormalEntry(*this, Scope);
     EHStack.popCleanup();
   } else {
     // If we have a fallthrough and no other need for the cleanup,
@@ -655,15 +700,7 @@
     if (HasFallthrough && !HasPrebranchedFallthrough &&
         !HasFixups && !HasExistingBranches) {
 
-      // Fixups can cause us to optimistically create a normal block,
-      // only to later have no real uses for it.  Just delete it in
-      // this case.
-      // TODO: we can potentially simplify all the uses after this.
-      if (Scope.getNormalBlock()) {
-        Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
-        delete Scope.getNormalBlock();
-      }
-
+      destroyOptimisticNormalEntry(*this, Scope);
       EHStack.popCleanup();
 
       EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
diff --git a/test/CodeGenCXX/destructors.cpp b/test/CodeGenCXX/destructors.cpp
index 3381985..bfdf93c 100644
--- a/test/CodeGenCXX/destructors.cpp
+++ b/test/CodeGenCXX/destructors.cpp
@@ -323,7 +323,32 @@
   // CHECK:   invoke void @_ZN5test71DD1Ev(
   // CHECK:   call void @_ZN5test71AD2Ev(
   B::~B() {}
+}
 
+// PR10467
+namespace test8 {
+  struct A { A(); ~A(); };
+
+  void die() __attribute__((noreturn));
+  void test() {
+    A x;
+    while (1) {
+      A y;
+      goto l;
+    }
+  l: die();
+  }
+
+  // CHECK:    define void @_ZN5test84testEv()
+  // CHECK:      [[X:%.*]] = alloca [[A:%.*]], align 1
+  // CHECK-NEXT: [[Y:%.*]] = alloca [[A:%.*]], align 1
+  // CHECK:      call void @_ZN5test81AC1Ev([[A]]* [[X]])
+  // CHECK-NEXT: br label
+  // CHECK:      invoke void @_ZN5test81AC1Ev([[A]]* [[Y]])
+  // CHECK:      invoke void @_ZN5test81AD1Ev([[A]]* [[Y]])
+  // CHECK-NOT:  switch
+  // CHECK:      invoke void @_ZN5test83dieEv()
+  // CHECK:      unreachable
 }
 
 // Checks from test3:
diff --git a/test/CodeGenCXX/eh.cpp b/test/CodeGenCXX/eh.cpp
index 58cb445..736b124 100644
--- a/test/CodeGenCXX/eh.cpp
+++ b/test/CodeGenCXX/eh.cpp
@@ -296,10 +296,7 @@
 
       // CHECK: invoke void @_ZN6test121AD1Ev([[A]]* [[Z]])
       // CHECK: invoke void @_ZN6test121AD1Ev([[A]]* [[Y]])
-
-      // It'd be great if something eliminated this switch.
-      // CHECK:      load i32* [[CLEANUPDEST]]
-      // CHECK-NEXT: switch i32
+      // CHECK-NOT: switch
       goto success;
     }