Add "select" detection to common frontend

dx produces a somewhat ugly code pattern for selects:

   foo = (condition) ? true : false;

There is no select Dex opcode, so this turns into:

    IF_EQ v0, L1
    CONST_4 V2, #0
L2:
    <rejoin>
.
.
L1:
    CONST_4 V2, #1
    GOTO L2

...  or ...

   foo = (condition) ? bar1 : bar2;

    IF_EQ v0, L1
    MOVE V2, V3
L2:
    <rejoin>
.
.
L1:
    MOVE V2, V4
    GOTO L2

Not only do we end up with excessive branching (and, unless we
something special, really poor code layout), but the compilers
generally drop down a suspend check on backwards branches - which is
completely unnecessary in the "GOTO L2" case above.  There are ~2100
instances of the simplest variants of this pattern in the framework.
With this new optimization, boot.oat size is reduced by 90K bytes
and one of our standard benchmarks got an 8% pop.

This CL adds a select detection operation to the common frontend's
BasicBlock optimization pass, and introduces a new extended MIR
opcode: kMirOpSelect.

Change-Id: I06249956ba21afb0ed5cdd35019ac87cd063a17b
diff --git a/src/compiler/codegen/arm/codegen_arm.h b/src/compiler/codegen/arm/codegen_arm.h
index 9342620..17b8357 100644
--- a/src/compiler/codegen/arm/codegen_arm.h
+++ b/src/compiler/codegen/arm/codegen_arm.h
@@ -140,6 +140,7 @@
     virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double);
     virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
     virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
     virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
     virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc
index fbc48d4..1447aec 100644
--- a/src/compiler/codegen/arm/int_arm.cc
+++ b/src/compiler/codegen/arm/int_arm.cc
@@ -182,6 +182,57 @@
   OpCmpImmBranch(cu, ccode, low_reg, val_lo, taken);
 }
 
+void ArmCodegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+  RegLocation rl_result;
+  RegLocation rl_src = GetSrc(cu, mir, 0);
+  RegLocation rl_dest = GetDest(cu, mir);
+  rl_src = LoadValue(cu, rl_src, kCoreReg);
+  if (mir->ssa_rep->num_uses == 1) {
+    // CONST case
+    int true_val = mir->dalvikInsn.vB;
+    int false_val = mir->dalvikInsn.vC;
+    rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+    if ((true_val == 1) && (false_val == 0)) {
+      OpRegRegImm(cu, kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
+      OpIT(cu, kCondCc, "");
+      LoadConstant(cu, rl_result.low_reg, 0);
+      GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+    } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
+      OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
+      OpIT(cu, kCondEq, "E");
+      LoadConstant(cu, rl_result.low_reg, true_val);
+      LoadConstant(cu, rl_result.low_reg, false_val);
+      GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+    } else {
+      // Unlikely case - could be tuned.
+      int t_reg1 = AllocTemp(cu);
+      int t_reg2 = AllocTemp(cu);
+      LoadConstant(cu, t_reg1, true_val);
+      LoadConstant(cu, t_reg2, false_val);
+      OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
+      OpIT(cu, kCondEq, "E");
+      OpRegCopy(cu, rl_result.low_reg, t_reg1);
+      OpRegCopy(cu, rl_result.low_reg, t_reg2);
+      GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+    }
+  } else {
+    // MOVE case
+    RegLocation rl_true = cu->reg_location[mir->ssa_rep->uses[1]];
+    RegLocation rl_false = cu->reg_location[mir->ssa_rep->uses[2]];
+    rl_true = LoadValue(cu, rl_true, kCoreReg);
+    rl_false = LoadValue(cu, rl_false, kCoreReg);
+    rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+    OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
+    OpIT(cu, kCondEq, "E");
+    LIR* l1 = OpRegCopy(cu, rl_result.low_reg, rl_true.low_reg);
+    l1->flags.is_nop = false;  // Make sure this instruction isn't optimized away
+    LIR* l2 = OpRegCopy(cu, rl_result.low_reg, rl_false.low_reg);
+    l2->flags.is_nop = false;  // Make sure this instruction isn't optimized away
+    GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+  }
+  StoreValue(cu, rl_dest, rl_result);
+}
 
 void ArmCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
 {
diff --git a/src/compiler/codegen/codegen.h b/src/compiler/codegen/codegen.h
index 372e842..4085a41 100644
--- a/src/compiler/codegen/codegen.h
+++ b/src/compiler/codegen/codegen.h
@@ -335,6 +335,7 @@
     virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double) = 0;
     virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) = 0;
+    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir) = 0;
     virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind) = 0;
     virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
     virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
diff --git a/src/compiler/codegen/mips/codegen_mips.h b/src/compiler/codegen/mips/codegen_mips.h
index eec7b08..10a3f77 100644
--- a/src/compiler/codegen/mips/codegen_mips.h
+++ b/src/compiler/codegen/mips/codegen_mips.h
@@ -141,6 +141,7 @@
     virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double);
     virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
     virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
     virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
     virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
diff --git a/src/compiler/codegen/mips/int_mips.cc b/src/compiler/codegen/mips/int_mips.cc
index 113183c..8e71ca6 100644
--- a/src/compiler/codegen/mips/int_mips.cc
+++ b/src/compiler/codegen/mips/int_mips.cc
@@ -215,6 +215,11 @@
   }
 }
 
+void MipsCodegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+  UNIMPLEMENTED(FATAL) << "Need codegen for select";
+}
+
 void MipsCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
 {
   UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
diff --git a/src/compiler/codegen/mir_to_lir.cc b/src/compiler/codegen/mir_to_lir.cc
index 23c3fe7..1e777d9 100644
--- a/src/compiler/codegen/mir_to_lir.cc
+++ b/src/compiler/codegen/mir_to_lir.cc
@@ -689,6 +689,9 @@
     case kMirOpFusedCmpLong:
       cg->GenFusedLongCmpBranch(cu, bb, mir);
       break;
+    case kMirOpSelect:
+      cg->GenSelect(cu, bb, mir);
+      break;
     default:
       break;
   }
diff --git a/src/compiler/codegen/x86/codegen_x86.h b/src/compiler/codegen/x86/codegen_x86.h
index 15a4662..a004232 100644
--- a/src/compiler/codegen/x86/codegen_x86.h
+++ b/src/compiler/codegen/x86/codegen_x86.h
@@ -141,6 +141,7 @@
     virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
                                      bool is_double);
     virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+    virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
     virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
     virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
     virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
index b2292fb..09e5eb3 100644
--- a/src/compiler/codegen/x86/int_x86.cc
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -168,6 +168,11 @@
   }
 }
 
+void X86Codegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+  UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect";
+}
+
 void X86Codegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
   LIR* label_list = cu->block_label_list;
   LIR* taken = &label_list[bb->taken->id];
diff --git a/src/compiler/compiler_enums.h b/src/compiler/compiler_enums.h
index ae305c0..24930ed 100644
--- a/src/compiler/compiler_enums.h
+++ b/src/compiler/compiler_enums.h
@@ -113,6 +113,7 @@
   kMirOpDivZeroCheck,
   kMirOpCheck,
   kMirOpCheckPart2,
+  kMirOpSelect,
   kMirOpLast,
 };
 
@@ -391,6 +392,13 @@
   kUsesCCodes
 };
 
+enum SelectInstructionKind {
+  kSelectNone,
+  kSelectConst,
+  kSelectMove,
+  kSelectGoto
+};
+
 std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& flag);
 
 }  // namespace art
diff --git a/src/compiler/compiler_ir.h b/src/compiler/compiler_ir.h
index a3a4ff9..50cdc02 100644
--- a/src/compiler/compiler_ir.h
+++ b/src/compiler/compiler_ir.h
@@ -598,6 +598,12 @@
   return cu->constant_values[loc.orig_sreg];
 }
 
+static inline int32_t ConstantValue(const CompilationUnit* cu, int32_t s_reg)
+{
+  DCHECK(IsConst(cu, s_reg));
+  return cu->constant_values[s_reg];
+}
+
 static inline int64_t ConstantValueWide(const CompilationUnit* cu, RegLocation loc)
 {
   DCHECK(IsConst(cu, loc));
diff --git a/src/compiler/compiler_utility.cc b/src/compiler/compiler_utility.cc
index 0bce17c..b5185b0 100644
--- a/src/compiler/compiler_utility.cc
+++ b/src/compiler/compiler_utility.cc
@@ -32,6 +32,7 @@
   "OpDivZeroCheck",
   "Check1",
   "Check2",
+  "Select",
 };
 
 #ifdef WITH_MEMSTATS
diff --git a/src/compiler/dataflow.cc b/src/compiler/dataflow.cc
index 4d7e9d7..7505f6c 100644
--- a/src/compiler/dataflow.cc
+++ b/src/compiler/dataflow.cc
@@ -834,6 +834,12 @@
 
   // 111 MIR_CHECK
   0,
+
+  // 112 MIR_CHECKPART2
+  0,
+
+  // 113 MIR_SELECT
+  DF_DA | DF_UB,
 };
 
 /* Return the base virtual register for a SSA name */
@@ -1588,7 +1594,7 @@
     mir = mir->next;
     if (mir == NULL) {
       bb = bb->fall_through;
-      if ((bb == NULL) || bb->predecessors->num_used != 1) {
+      if ((bb == NULL) || Predecessors(bb) != 1) {
         mir = NULL;
       } else {
       *p_bb = bb;
@@ -1629,19 +1635,62 @@
 
 static BasicBlock* NextDominatedBlock(CompilationUnit* cu, BasicBlock* bb)
 {
+  if (bb->block_type == kDead) {
+    return NULL;
+  }
   DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
       || (bb->block_type == kExitBlock));
   bb = bb->fall_through;
-  if (bb == NULL || (bb->predecessors->num_used != 1)) {
+  if (bb == NULL || (Predecessors(bb) != 1)) {
     return NULL;
   }
   DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
   return bb;
 }
 
+static MIR* FindPhi(CompilationUnit* cu, BasicBlock* bb, int ssa_name)
+{
+  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
+      for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+        if (mir->ssa_rep->uses[i] == ssa_name) {
+          return mir;
+        }
+      }
+    }
+  }
+  return NULL;
+}
+
+static SelectInstructionKind SelectKind(MIR* mir)
+{
+  switch (mir->dalvikInsn.opcode) {
+    case Instruction::MOVE:
+    case Instruction::MOVE_OBJECT:
+    case Instruction::MOVE_16:
+    case Instruction::MOVE_OBJECT_16:
+    case Instruction::MOVE_FROM16:
+    case Instruction::MOVE_OBJECT_FROM16:
+      return kSelectMove;
+   case Instruction::CONST:
+   case Instruction::CONST_4:
+   case Instruction::CONST_16:
+      return kSelectConst;
+   case Instruction::GOTO:
+   case Instruction::GOTO_16:
+   case Instruction::GOTO_32:
+      return kSelectGoto;
+   default:;
+  }
+  return kSelectNone;
+}
+
 /* Do some MIR-level extended basic block optimizations */
 static bool BasicBlockOpt(CompilationUnit* cu, BasicBlock* bb)
 {
+  if (bb->block_type == kDead) {
+    return true;
+  }
   int num_temps = 0;
   BBOpt bb_opt(cu);
   while (bb != NULL) {
@@ -1749,6 +1798,133 @@
         default:
           break;
       }
+      // Is this the select pattern?
+      // TODO: flesh out support for Mips and X86.  NOTE: llvm's select op doesn't quite work here.
+      // TUNING: expand to support IF_xx compare & branches
+      if (!cu->gen_bitcode && (cu->instruction_set == kThumb2) &&
+          ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
+          (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
+        BasicBlock* ft = bb->fall_through;
+        DCHECK(ft != NULL);
+        BasicBlock* ft_ft = ft->fall_through;
+        BasicBlock* ft_tk = ft->taken;
+
+        BasicBlock* tk = bb->taken;
+        DCHECK(tk != NULL);
+        BasicBlock* tk_ft = tk->fall_through;
+        BasicBlock* tk_tk = tk->taken;
+
+        /*
+         * In the select pattern, the taken edge goes to a block that unconditionally
+         * transfers to the rejoin block and the fall_though edge goes to a block that
+         * unconditionally falls through to the rejoin block.
+         */
+
+        if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+            (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
+          // Okay - we have the basic diamond shape.  Are the block bodies something we can handle?
+          if ((ft->first_mir_insn == ft->last_mir_insn) &&
+              (tk->first_mir_insn != tk->last_mir_insn) &&
+              (tk->first_mir_insn->next == tk->last_mir_insn) &&
+              ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
+              (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
+              (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
+              (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
+            // Almost there.  Are the instructions targeting the same vreg?
+            MIR* if_true = tk->first_mir_insn;
+            MIR* if_false = ft->first_mir_insn;
+            if (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA) {
+              /*
+               * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
+               * Phi node in the merge block and delete it (while using the SSA name
+               * of the merge as the target of the SELECT.  Delete both taken and
+               * fallthrough blocks, and set fallthrough to merge block.
+               * NOTE: not updating other dataflow info (no longer used at this point).
+               * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
+               */
+              if (opcode == Instruction::IF_NEZ) {
+                // Normalize.
+                MIR* tmp_mir = if_true;
+                if_true = if_false;
+                if_false = tmp_mir;
+              }
+              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
+              bool const_form = (SelectKind(if_true) == kSelectConst);
+              if ((SelectKind(if_true) == kSelectMove)) {
+                if (IsConst(cu, if_true->ssa_rep->uses[0]) &&
+                    IsConst(cu, if_false->ssa_rep->uses[0])) {
+                    const_form = true;
+                    if_true->dalvikInsn.vB = ConstantValue(cu, if_true->ssa_rep->uses[0]);
+                    if_false->dalvikInsn.vB = ConstantValue(cu, if_false->ssa_rep->uses[0]);
+                }
+              }
+              if (const_form) {
+                // "true" set val in vB
+                mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
+                // "false" set val in vC
+                mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
+              } else {
+                DCHECK_EQ(SelectKind(if_true), kSelectMove);
+                DCHECK_EQ(SelectKind(if_false), kSelectMove);
+                int* src_ssa = static_cast<int*>(NewMem(cu, sizeof(int) * 3, false,
+                                                 kAllocDFInfo));
+                src_ssa[0] = mir->ssa_rep->uses[0];
+                src_ssa[1] = if_true->ssa_rep->uses[0];
+                src_ssa[2] = if_false->ssa_rep->uses[0];
+                mir->ssa_rep->uses = src_ssa;
+                mir->ssa_rep->num_uses = 3;
+              }
+              mir->ssa_rep->num_defs = 1;
+              mir->ssa_rep->defs = static_cast<int*>(NewMem(cu, sizeof(int) * 1, false,
+                                                     kAllocDFInfo));
+              mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu, sizeof(bool) * 1, false,
+                                                     kAllocDFInfo));
+              mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
+              /*
+               * There is usually a Phi node in the join block for our two cases.  If the
+               * Phi node only contains our two cases as input, we will use the result
+               * SSA name of the Phi node as our select result and delete the Phi.  If
+               * the Phi node has more than two operands, we will arbitrarily use the SSA
+               * name of the "true" path, delete the SSA name of the "false" path from the
+               * Phi node (and fix up the incoming arc list).
+               */
+              MIR* phi = FindPhi(cu, tk_tk, if_true->ssa_rep->defs[0]);
+              if (phi != NULL) {
+                if (phi->ssa_rep->num_uses == 2) {
+                  mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
+                  phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+                } else {
+                  int dead_def = if_false->ssa_rep->defs[0];
+                  int live_def = if_true->ssa_rep->defs[0];
+                  mir->ssa_rep->defs[0] = live_def;
+                  int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
+                  for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+                    if (phi->ssa_rep->uses[i] == live_def) {
+                      incoming[i] = bb->id;
+                    }
+                  }
+                  for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+                    if (phi->ssa_rep->uses[i] == dead_def) {
+                      int last_slot = phi->ssa_rep->num_uses - 1;
+                      phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
+                      incoming[i] = incoming[last_slot];
+                    }
+                  }
+                }
+                phi->ssa_rep->num_uses--;
+              }
+              bb->taken = NULL;
+              tk->block_type = kDead;
+              for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
+                tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+              }
+            } else {
+              // At least we can eliminate the suspend check on the backwards branch.
+              tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
+            }
+          }
+        }
+      }
     }
     bb = NextDominatedBlock(cu, bb);
   }
@@ -1803,7 +1979,7 @@
   BasicBlock* walker = bb;
   while (true) {
     // Check termination conditions
-    if ((walker->block_type == kEntryBlock) || (walker->predecessors->num_used != 1)) {
+    if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
       break;
     }
     BasicBlock* prev = GET_ELEM_N(walker->predecessors, BasicBlock*, 0);
@@ -1876,7 +2052,7 @@
     // OK - got one.  Combine
     BasicBlock* bb_next = bb->fall_through;
     DCHECK(!bb_next->catch_entry);
-    DCHECK_EQ(bb_next->predecessors->num_used, 1U);
+    DCHECK_EQ(Predecessors(bb_next), 1U);
     MIR* t_mir = bb->last_mir_insn->prev;
     // Overwrite the kOpCheck insn with the paired opcode
     DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
@@ -2096,9 +2272,6 @@
     return false;
   }
   // Must be head of extended basic block.
-  if (cu->verbose) {
-    LOG(INFO) << "Extended bb head " << bb->id;
-  }
   BasicBlock* start_bb = bb;
   cu->extended_basic_blocks.push_back(bb);
   bool terminated_by_return = false;
@@ -2107,9 +2280,6 @@
     bb->visited = true;
     terminated_by_return |= bb->terminated_by_return;
     bb = NextDominatedBlock(cu, bb);
-    if (cu->verbose && (bb != NULL)) {
-      LOG(INFO) << "...added bb " << bb->id;
-    }
   }
   if (terminated_by_return) {
     // This extended basic block contains a return, so mark all members.
diff --git a/src/compiler/dataflow.h b/src/compiler/dataflow.h
index 38edd36..22ea33e 100644
--- a/src/compiler/dataflow.h
+++ b/src/compiler/dataflow.h
@@ -157,6 +157,8 @@
   ArenaBitVector* blocks;
 };
 
+static inline unsigned int Predecessors(BasicBlock* bb) {return bb->predecessors->num_used;}
+
 int SRegToVReg(const CompilationUnit* cu, int ssa_reg);
 char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir);
 bool FindLocalLiveIn(CompilationUnit* cu, BasicBlock* bb);