am 4922e9d4: Use /system/framework/framework.jar:preloaded-classes for on device dex2oat

* commit '4922e9d4e5f86e40ca89fb097cec40e191dae0a1':
  Use /system/framework/framework.jar:preloaded-classes for on device dex2oat
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 33c5ac6..f6fd5c8 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -317,7 +317,8 @@
 
 ifeq ($(ART_SEA_IR_MODE),true)
 LIBART_COMMON_SRC_FILES += \
-	src/compiler/sea_ir/sea.cc
+	src/compiler/sea_ir/sea.cc \
+	src/compiler/sea_ir/instruction_tools.cc
 endif
 
 LIBART_TARGET_SRC_FILES := \
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index 25e6997..c226bbc 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -78,7 +78,8 @@
 
 ifeq ($(ART_SEA_IR_MODE),true)
 LIBART_COMPILER_SRC_FILES += \
-	src/compiler/sea_ir/frontend.cc
+	src/compiler/sea_ir/frontend.cc \
+	src/compiler/sea_ir/instruction_tools.cc
 endif
 
 LIBART_COMPILER_CFLAGS :=
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 68d0fbb..159fbf7 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -1560,6 +1560,18 @@
   return oat_class->GetOatMethod(oat_method_idx).GetCode();
 }
 
+// Returns true if the method must run with interpreter, false otherwise.
+static bool NeedsInterpreter(const mirror::AbstractMethod* method, const void* code) {
+  if (code == NULL) {
+    // No code: need interpreter.
+    return true;
+  }
+  // If interpreter mode is enabled, every method (except native and proxy) must
+  // be run with interpreter.
+  return Runtime::Current()->GetInstrumentation()->InterpretOnly() &&
+         !method->IsNative() && !method->IsProxyMethod();
+}
+
 void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
   ClassHelper kh(klass);
   const DexFile::ClassDef* dex_class_def = kh.GetClassDef();
@@ -1584,19 +1596,20 @@
   while (it.HasNextInstanceField()) {
     it.Next();
   }
-  size_t method_index = 0;
   // Link the code of methods skipped by LinkCode
-  for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
-    mirror::AbstractMethod* method = klass->GetDirectMethod(i);
-    if (method->IsStatic()) {
-      const void* code = oat_class->GetOatMethod(method_index).GetCode();
-      if (code == NULL) {
-        // No code? You must mean to go into the interpreter.
-        code = GetInterpreterEntryPoint();
-      }
-      runtime->GetInstrumentation()->UpdateMethodsCode(method, code);
+  for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) {
+    mirror::AbstractMethod* method = klass->GetDirectMethod(method_index);
+    if (!method->IsStatic()) {
+      // Only update static methods.
+      continue;
     }
-    method_index++;
+    const void* code = oat_class->GetOatMethod(method_index).GetCode();
+    const bool enter_interpreter = NeedsInterpreter(method, code);
+    if (enter_interpreter) {
+      // Use interpreter entry point.
+      code = GetInterpreterEntryPoint();
+    }
+    runtime->GetInstrumentation()->UpdateMethodsCode(method, code);
   }
   // Ignore virtual methods on the iterator.
 }
@@ -1613,9 +1626,7 @@
 
   // Install entry point from interpreter.
   Runtime* runtime = Runtime::Current();
-  bool enter_interpreter = method->GetEntryPointFromCompiledCode() == NULL ||
-                           (runtime->GetInstrumentation()->InterpretOnly() &&
-                           !method->IsNative() && !method->IsProxyMethod());
+  bool enter_interpreter = NeedsInterpreter(method.get(), method->GetEntryPointFromCompiledCode());
   if (enter_interpreter) {
     method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
   } else {
@@ -1629,7 +1640,12 @@
 
   if (method->IsStatic() && !method->IsConstructor()) {
     // For static methods excluding the class initializer, install the trampoline.
+    // It will be replaced by the proper entry point by ClassLinker::FixupStaticTrampolines
+    // after initializing class (see ClassLinker::InitializeClass method).
     method->SetEntryPointFromCompiledCode(GetResolutionTrampoline(runtime->GetClassLinker()));
+  } else if (enter_interpreter) {
+    // Set entry point from compiled code if there's no code or in interpreter only mode.
+    method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
   }
 
   if (method->IsNative()) {
@@ -1637,11 +1653,6 @@
     method->UnregisterNative(Thread::Current());
   }
 
-  if (enter_interpreter) {
-    // Set entry point from compiled code if there's no code or in interpreter only mode.
-    method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
-  }
-
   // Allow instrumentation its chance to hijack code.
   runtime->GetInstrumentation()->UpdateMethodsCode(method.get(),
                                                    method->GetEntryPointFromCompiledCode());
diff --git a/src/common_throws.cc b/src/common_throws.cc
index 66e512e..0497901 100644
--- a/src/common_throws.cc
+++ b/src/common_throws.cc
@@ -66,6 +66,14 @@
   }
 }
 
+// AbstractMethodError
+
+void ThrowAbstractMethodError(const mirror::AbstractMethod* method) {
+  ThrowException(NULL, "Ljava/lang/AbstractMethodError;", NULL,
+                 StringPrintf("abstract method \"%s\"",
+                              PrettyMethod(method).c_str()).c_str());
+}
+
 // ArithmeticException
 
 void ThrowArithmeticExceptionDivideByZero() {
diff --git a/src/common_throws.h b/src/common_throws.h
index fbaf4c1..4bf12c0 100644
--- a/src/common_throws.h
+++ b/src/common_throws.h
@@ -30,6 +30,11 @@
 class StringPiece;
 class ThrowLocation;
 
+// AbstractMethodError
+
+void ThrowAbstractMethodError(const mirror::AbstractMethod* method)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
 // ArithmeticException
 
 void ThrowArithmeticExceptionDivideByZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/src/compiler/dex/dex_to_dex_compiler.cc b/src/compiler/dex/dex_to_dex_compiler.cc
index afb29f4..938de7a 100644
--- a/src/compiler/dex/dex_to_dex_compiler.cc
+++ b/src/compiler/dex/dex_to_dex_compiler.cc
@@ -55,8 +55,25 @@
     return *const_cast<DexFile*>(unit_.GetDexFile());
   }
 
+  // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
+  // a barrier is required.
+  void CompileReturnVoid(Instruction* inst, uint32_t dex_pc);
+
+  // Compiles a field access into a quick field access.
+  // The field index is replaced by an offset within an Object where we can read
+  // from / write to this field. Therefore, this does not involve any resolution
+  // at runtime.
+  // Since the field index is encoded with 16 bits, we can replace it only if the
+  // field offset can be encoded with 16 bits too.
   void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc,
                                   Instruction::Code new_opcode, bool is_put);
+
+  // Compiles a virtual method invocation into a quick virtual method invocation.
+  // The method index is replaced by the vtable index where the corresponding
+  // AbstractMethod can be found. Therefore, this does not involve any resolution
+  // at runtime.
+  // Since the method index is encoded with 16 bits, we can replace it only if the
+  // vtable index can be encoded with 16 bits too.
   void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
                             Instruction::Code new_opcode, bool is_range);
 
@@ -124,9 +141,14 @@
   for (uint32_t dex_pc = 0; dex_pc < insns_size;
        inst = const_cast<Instruction*>(inst->Next()), dex_pc = inst->GetDexPc(insns)) {
     switch (inst->Opcode()) {
+      case Instruction::RETURN_VOID:
+        CompileReturnVoid(inst, dex_pc);
+        break;
+
       case Instruction::IGET:
         CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_QUICK, false);
         break;
+
       case Instruction::IGET_WIDE:
         CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_WIDE_QUICK, false);
         break;
@@ -162,12 +184,34 @@
         break;
 
       default:
-        // No optimization.
+        // Nothing to do.
         break;
     }
   }
 }
 
+void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
+  DCHECK(inst->Opcode() == Instruction::RETURN_VOID);
+  // Are we compiling a constructor ?
+  if ((unit_.GetAccessFlags() & kAccConstructor) == 0) {
+    return;
+  }
+  // Do we need a constructor barrier ?
+  if (!driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(),
+                                         unit_.GetClassDefIndex())) {
+    return;
+  }
+  // Replace RETURN_VOID by RETURN_VOID_BARRIER.
+  if (kEnableLogging) {
+    LOG(INFO) << "Replacing " << Instruction::Name(inst->Opcode())
+    << " by " << Instruction::Name(Instruction::RETURN_VOID_BARRIER)
+    << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
+    << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
+  }
+  ScopedDexWriteAccess sdwa(GetModifiableDexFile(), inst, 2u);
+  inst->SetOpcode(Instruction::RETURN_VOID_BARRIER);
+}
+
 void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
                                              uint32_t dex_pc,
                                              Instruction::Code new_opcode,
diff --git a/src/compiler/dex/frontend.cc b/src/compiler/dex/frontend.cc
index c528d86..033657b 100644
--- a/src/compiler/dex/frontend.cc
+++ b/src/compiler/dex/frontend.cc
@@ -29,8 +29,6 @@
 #include "backend.h"
 #include "base/logging.h"
 
-
-
 namespace {
 #if !defined(ART_USE_PORTABLE_COMPILER)
   pthread_once_t llvm_multi_init = PTHREAD_ONCE_INIT;
@@ -106,7 +104,6 @@
   //(1 << kDebugShowSummaryMemoryUsage) |
   0;
 
-
 static CompiledMethod* CompileMethod(CompilerDriver& compiler,
                                      const CompilerBackend compiler_backend,
                                      const DexFile::CodeItem* code_item,
@@ -116,8 +113,7 @@
 #if defined(ART_USE_PORTABLE_COMPILER)
                                      , llvm::LlvmCompilationUnit* llvm_compilation_unit
 #endif
-)
-{
+) {
   VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
 
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -270,8 +266,7 @@
                                  uint32_t method_idx,
                                  jobject class_loader,
                                  const DexFile& dex_file,
-                                 llvm::LlvmCompilationUnit* llvm_compilation_unit)
-{
+                                 llvm::LlvmCompilationUnit* llvm_compilation_unit) {
   return CompileMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
                        method_idx, class_loader, dex_file
 #if defined(ART_USE_PORTABLE_COMPILER)
@@ -280,8 +275,6 @@
                        );
 }
 
-
-
 }  // namespace art
 
 extern "C" art::CompiledMethod*
@@ -289,8 +282,7 @@
                           const art::DexFile::CodeItem* code_item,
                           uint32_t access_flags, art::InvokeType invoke_type,
                           uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
-                          const art::DexFile& dex_file)
-{
+                          const art::DexFile& dex_file) {
   // TODO: check method fingerprint here to determine appropriate backend type.  Until then, use build default
   art::CompilerBackend backend = compiler.GetCompilerBackend();
   return art::CompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
diff --git a/src/compiler/sea_ir/frontend.cc b/src/compiler/sea_ir/frontend.cc
index d4e1c7e..6bfa459 100644
--- a/src/compiler/sea_ir/frontend.cc
+++ b/src/compiler/sea_ir/frontend.cc
@@ -1,28 +1,16 @@
-
+#ifdef ART_SEA_IR_MODE
 #include <llvm/Support/Threading.h>
-
+#include "compiler/sea_ir/sea.h"
 #include "compiler/driver/compiler_driver.h"
-
-
 #include "compiler/llvm/llvm_compilation_unit.h"
 #include "compiler/dex/portable/mir_to_gbc.h"
-
 #include "leb128.h"
 #include "mirror/object.h"
 #include "runtime.h"
 #include "base/logging.h"
 
-#ifdef ART_SEA_IR_MODE
-#include "compiler/sea_ir/sea.h"
-#endif
-
-
-
-
-#ifdef ART_SEA_IR_MODE
-#include "compiler/sea_ir/sea.h"
 namespace art {
-  
+
 static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler,
                                      const CompilerBackend compiler_backend,
                                      const DexFile::CodeItem* code_item,
@@ -32,8 +20,9 @@
 #if defined(ART_USE_PORTABLE_COMPILER)
                                      , llvm::LlvmCompilationUnit* llvm_compilation_unit
 #endif
-)
-{
+) {
+  // NOTE: Instead of keeping the convention from the Dalvik frontend.cc
+  //       and silencing the cpplint.py warning, I just corrected the formatting.
   VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
   sea_ir::SeaGraph* sg = sea_ir::SeaGraph::GetCurrentGraph();
   sg->CompileMethod(code_item, class_def_idx, method_idx, dex_file);
@@ -52,15 +41,13 @@
                                  uint32_t method_idx,
                                  jobject class_loader,
                                  const DexFile& dex_file,
-                                 llvm::LlvmCompilationUnit* llvm_compilation_unit)
-{
+                                 llvm::LlvmCompilationUnit* llvm_compilation_unit) {
   return CompileMethodWithSeaIr(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
                        method_idx, class_loader, dex_file
 #if defined(ART_USE_PORTABLE_COMPILER)
                        , llvm_compilation_unit
 #endif
-
-                       );
+                       ); // NOLINT
 }
 
 extern "C" art::CompiledMethod*
@@ -68,8 +55,7 @@
                           const art::DexFile::CodeItem* code_item,
                           uint32_t access_flags, art::InvokeType invoke_type,
                           uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
-                          const art::DexFile& dex_file)
-{
+                          const art::DexFile& dex_file) {
   // TODO: check method fingerprint here to determine appropriate backend type.  Until then, use build default
   art::CompilerBackend backend = compiler.GetCompilerBackend();
   return art::SeaIrCompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
diff --git a/src/compiler/sea_ir/instruction_tools.cc b/src/compiler/sea_ir/instruction_tools.cc
new file mode 100644
index 0000000..68be589
--- /dev/null
+++ b/src/compiler/sea_ir/instruction_tools.cc
@@ -0,0 +1,797 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_tools.h"
+
+namespace sea_ir {
+
+bool InstructionTools::IsDefinition(const art::Instruction* const instruction) {
+  if (0 != (InstructionTools::instruction_attributes_[instruction->Opcode()] & (1 << kDA))) {
+    return true;
+  }
+  return false;
+}
+
+const int InstructionTools::instruction_attributes_[] = {
+  // 00 NOP
+  DF_NOP,
+
+  // 01 MOVE vA, vB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 02 MOVE_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 03 MOVE_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_IS_MOVE,
+
+  // 04 MOVE_WIDE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 06 MOVE_WIDE_16 vAAAA, vBBBB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+  // 07 MOVE_OBJECT vA, vB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
+  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+  // 0A MOVE_RESULT vAA
+  DF_DA,
+
+  // 0B MOVE_RESULT_WIDE vAA
+  DF_DA | DF_A_WIDE,
+
+  // 0C MOVE_RESULT_OBJECT vAA
+  DF_DA | DF_REF_A,
+
+  // 0D MOVE_EXCEPTION vAA
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 0E RETURN_VOID
+  DF_NOP,
+
+  // 0F RETURN vAA
+  DF_UA,
+
+  // 10 RETURN_WIDE vAA
+  DF_UA | DF_A_WIDE,
+
+  // 11 RETURN_OBJECT vAA
+  DF_UA | DF_REF_A,
+
+  // 12 CONST_4 vA, #+B
+  DF_DA | DF_SETS_CONST,
+
+  // 13 CONST_16 vAA, #+BBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 14 CONST vAA, #+BBBBBBBB
+  DF_DA | DF_SETS_CONST,
+
+  // 15 CONST_HIGH16 VAA, #+BBBB0000
+  DF_DA | DF_SETS_CONST,
+
+  // 16 CONST_WIDE_16 vAA, #+BBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
+  DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+  // 1A CONST_STRING vAA, string@BBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1C CONST_CLASS vAA, type@BBBB
+  DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+  // 1D MONITOR_ENTER vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1E MONITOR_EXIT vAA
+  DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+  // 1F CHK_CAST vAA, type@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 20 INSTANCE_OF vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
+
+  // 21 ARRAY_LENGTH vA, vB
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
+
+  // 22 NEW_INSTANCE vAA, type@BBBB
+  DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
+
+  // 23 NEW_ARRAY vA, vB, type@CCCC
+  DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
+
+  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
+
+  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
+  DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
+
+  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 27 THROW vAA
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 28 GOTO
+  DF_NOP,
+
+  // 29 GOTO_16
+  DF_NOP,
+
+  // 2A GOTO_32
+  DF_NOP,
+
+  // 2B PACKED_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
+  DF_UA,
+
+  // 2D CMPL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2E CMPG_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 2F CMPL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 30 CMPG_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+  // 31 CMP_LONG vAA, vBB, vCC
+  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 32 IF_EQ vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 33 IF_NE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 34 IF_LT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 35 IF_GE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 36 IF_GT vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 37 IF_LE vA, vB, +CCCC
+  DF_UA | DF_UB,
+
+  // 38 IF_EQZ vAA, +BBBB
+  DF_UA,
+
+  // 39 IF_NEZ vAA, +BBBB
+  DF_UA,
+
+  // 3A IF_LTZ vAA, +BBBB
+  DF_UA,
+
+  // 3B IF_GEZ vAA, +BBBB
+  DF_UA,
+
+  // 3C IF_GTZ vAA, +BBBB
+  DF_UA,
+
+  // 3D IF_LEZ vAA, +BBBB
+  DF_UA,
+
+  // 3E UNUSED_3E
+  DF_NOP,
+
+  // 3F UNUSED_3F
+  DF_NOP,
+
+  // 40 UNUSED_40
+  DF_NOP,
+
+  // 41 UNUSED_41
+  DF_NOP,
+
+  // 42 UNUSED_42
+  DF_NOP,
+
+  // 43 UNUSED_43
+  DF_NOP,
+
+  // 44 AGET vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 45 AGET_WIDE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 46 AGET_OBJECT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 47 AGET_BOOLEAN vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 48 AGET_BYTE vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 49 AGET_CHAR vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4A AGET_SHORT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+  // 4B APUT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4C APUT_WIDE vAA, vBB, vCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
+
+  // 4D APUT_OBJECT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+  // 4E APUT_BOOLEAN vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 4F APUT_BYTE vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 50 APUT_CHAR vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 51 APUT_SHORT vAA, vBB, vCC
+  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+  // 52 IGET vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 53 IGET_WIDE vA, vB, field@CCCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 54 IGET_OBJECT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // 55 IGET_BOOLEAN vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 56 IGET_BYTE vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 57 IGET_CHAR vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 58 IGET_SHORT vA, vB, field@CCCC
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // 59 IPUT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5A IPUT_WIDE vA, vB, field@CCCC
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // 5B IPUT_OBJECT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5D IPUT_BYTE vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5E IPUT_CHAR vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 5F IPUT_SHORT vA, vB, field@CCCC
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // 60 SGET vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 61 SGET_WIDE vAA, field@BBBB
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // 62 SGET_OBJECT vAA, field@BBBB
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // 63 SGET_BOOLEAN vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 64 SGET_BYTE vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 65 SGET_CHAR vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 66 SGET_SHORT vAA, field@BBBB
+  DF_DA | DF_UMS,
+
+  // 67 SPUT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 68 SPUT_WIDE vAA, field@BBBB
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // 69 SPUT_OBJECT vAA, field@BBBB
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // 6A SPUT_BOOLEAN vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6B SPUT_BYTE vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6C SPUT_CHAR vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6D SPUT_SHORT vAA, field@BBBB
+  DF_UA | DF_UMS,
+
+  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_UMS,
+
+  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
+  DF_FORMAT_35C | DF_UMS,
+
+  // 73 UNUSED_73
+  DF_NOP,
+
+  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_UMS,
+
+  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
+  DF_FORMAT_3RC | DF_UMS,
+
+  // 79 UNUSED_79
+  DF_NOP,
+
+  // 7A UNUSED_7A
+  DF_NOP,
+
+  // 7B NEG_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7C NOT_INT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 7D NEG_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7E NOT_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 7F NEG_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 80 NEG_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 81 INT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 82 INT_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 83 INT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
+
+  // 84 LONG_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // 85 LONG_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 86 LONG_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+  // 87 FLOAT_TO_INT vA, vB
+  DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 88 FLOAT_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
+
+  // 89 FLOAT_TO_DOUBLE vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
+
+  // 8A DOUBLE_TO_INT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8B DOUBLE_TO_LONG vA, vB
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+  // 8C DOUBLE_TO_FLOAT vA, vB
+  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // 8D INT_TO_BYTE vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8E INT_TO_CHAR vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 8F INT_TO_SHORT vA, vB
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // 90 ADD_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 91 SUB_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 92 MUL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 93 DIV_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 94 REM_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 95 AND_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 96 OR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 97 XOR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 98 SHL_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 99 SHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9A USHR_INT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9B ADD_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9C SUB_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9D MUL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9E DIV_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // 9F REM_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A0 AND_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A1 OR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A2 XOR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A3 SHL_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A4 SHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A5 USHR_LONG vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+  // A6 ADD_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A7 SUB_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A8 MUL_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // A9 DIV_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AA REM_FLOAT vAA, vBB, vCC
+  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AB ADD_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AC SUB_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AD MUL_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AE DIV_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // AF REM_DOUBLE vAA, vBB, vCC
+  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+  // B0 ADD_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B1 SUB_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B2 MUL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B3 DIV_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B4 REM_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B5 AND_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B6 OR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B7 XOR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B8 SHL_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // B9 SHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BA USHR_INT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // BB ADD_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BC SUB_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BD MUL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BE DIV_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // BF REM_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C0 AND_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C1 OR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C2 XOR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+  // C3 SHL_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C4 SHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C5 USHR_LONG_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // C6 ADD_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C7 SUB_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C8 MUL_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // C9 DIV_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CA REM_FLOAT_2ADDR vA, vB
+  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+  // CB ADD_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CC SUB_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CD MUL_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CE DIV_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // CF REM_DOUBLE_2ADDR vA, vB
+  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D1 RSUB_INT vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D4 REM_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D5 AND_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D6 OR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DA MUL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DB DIV_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DC REM_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DD AND_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DE OR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // DF XOR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
+  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+  // E3 IGET_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E4 IPUT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+  // E5 SGET_VOLATILE
+  DF_DA | DF_UMS,
+
+  // E6 SPUT_VOLATILE
+  DF_UA | DF_UMS,
+
+  // E7 IGET_OBJECT_VOLATILE
+  DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+  // E8 IGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+  // E9 IPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+  // EA SGET_WIDE_VOLATILE
+  DF_DA | DF_A_WIDE | DF_UMS,
+
+  // EB SPUT_WIDE_VOLATILE
+  DF_UA | DF_A_WIDE | DF_UMS,
+
+  // EC BREAKPOINT
+  DF_NOP,
+
+  // ED THROW_VERIFICATION_ERROR
+  DF_NOP | DF_UMS,
+
+  // EE EXECUTE_INLINE
+  DF_FORMAT_35C,
+
+  // EF EXECUTE_INLINE_RANGE
+  DF_FORMAT_3RC,
+
+  // F0 INVOKE_OBJECT_INIT_RANGE
+  DF_NOP | DF_NULL_CHK_0,
+
+  // F1 RETURN_VOID_BARRIER
+  DF_NOP,
+
+  // F2 IGET_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F3 IGET_WIDE_QUICK
+  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
+
+  // F4 IGET_OBJECT_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_0,
+
+  // F5 IPUT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F6 IPUT_WIDE_QUICK
+  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
+
+  // F7 IPUT_OBJECT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_1,
+
+  // F8 INVOKE_VIRTUAL_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // F9 INVOKE_VIRTUAL_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FA INVOKE_SUPER_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FB INVOKE_SUPER_QUICK_RANGE
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // FC IPUT_OBJECT_VOLATILE
+  DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+  // FD SGET_OBJECT_VOLATILE
+  DF_DA | DF_REF_A | DF_UMS,
+
+  // FE SPUT_OBJECT_VOLATILE
+  DF_UA | DF_REF_A | DF_UMS,
+
+  // FF UNUSED_FF
+  DF_NOP
+};
+} // end namespace sea_ir
diff --git a/src/compiler/sea_ir/instruction_tools.h b/src/compiler/sea_ir/instruction_tools.h
new file mode 100644
index 0000000..f68cdd0
--- /dev/null
+++ b/src/compiler/sea_ir/instruction_tools.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "dex_instruction.h"
+
+#ifndef INSTRUCTION_TOOLS_H_
+#define INSTRUCTION_TOOLS_H_
+
+// Note: This file has content cannibalized for SEA_IR from the MIR implementation,
+//       to avoid having a dependence on MIR.
+namespace sea_ir {
+
+#define DF_NOP                  0
+#define DF_UA                   (1 << kUA)
+#define DF_UB                   (1 << kUB)
+#define DF_UC                   (1 << kUC)
+#define DF_A_WIDE               (1 << kAWide)
+#define DF_B_WIDE               (1 << kBWide)
+#define DF_C_WIDE               (1 << kCWide)
+#define DF_DA                   (1 << kDA)
+#define DF_IS_MOVE              (1 << kIsMove)
+#define DF_SETS_CONST           (1 << kSetsConst)
+#define DF_FORMAT_35C           (1 << kFormat35c)
+#define DF_FORMAT_3RC           (1 << kFormat3rc)
+#define DF_NULL_CHK_0           (1 << kNullCheckSrc0)
+#define DF_NULL_CHK_1           (1 << kNullCheckSrc1)
+#define DF_NULL_CHK_2           (1 << kNullCheckSrc2)
+#define DF_NULL_CHK_OUT0        (1 << kNullCheckOut0)
+#define DF_NON_NULL_DST         (1 << kDstNonNull)
+#define DF_NON_NULL_RET         (1 << kRetNonNull)
+#define DF_NULL_TRANSFER_0      (1 << kNullTransferSrc0)
+#define DF_NULL_TRANSFER_N      (1 << kNullTransferSrcN)
+#define DF_RANGE_CHK_1          (1 << kRangeCheckSrc1)
+#define DF_RANGE_CHK_2          (1 << kRangeCheckSrc2)
+#define DF_RANGE_CHK_3          (1 << kRangeCheckSrc3)
+#define DF_FP_A                 (1 << kFPA)
+#define DF_FP_B                 (1 << kFPB)
+#define DF_FP_C                 (1 << kFPC)
+#define DF_CORE_A               (1 << kCoreA)
+#define DF_CORE_B               (1 << kCoreB)
+#define DF_CORE_C               (1 << kCoreC)
+#define DF_REF_A                (1 << kRefA)
+#define DF_REF_B                (1 << kRefB)
+#define DF_REF_C                (1 << kRefC)
+#define DF_UMS                  (1 << kUsesMethodStar)
+
+#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
+
+#define DF_HAS_DEFS             (DF_DA)
+
+#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_0 | \
+                                 DF_NULL_CHK_1 | \
+                                 DF_NULL_CHK_2 | \
+                                 DF_NULL_CHK_OUT0)
+
+#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_1 | \
+                                 DF_RANGE_CHK_2 | \
+                                 DF_RANGE_CHK_3)
+
+#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
+                                 DF_HAS_RANGE_CHKS)
+
+#define DF_A_IS_REG             (DF_UA | DF_DA)
+#define DF_B_IS_REG             (DF_UB)
+#define DF_C_IS_REG             (DF_UC)
+#define DF_IS_GETTER_OR_SETTER  (DF_IS_GETTER | DF_IS_SETTER)
+#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
+
+enum DataFlowAttributePos {
+  kUA = 0,
+  kUB,
+  kUC,
+  kAWide,
+  kBWide,
+  kCWide,
+  kDA,
+  kIsMove,
+  kSetsConst,
+  kFormat35c,
+  kFormat3rc,
+  kNullCheckSrc0,        // Null check of uses[0].
+  kNullCheckSrc1,        // Null check of uses[1].
+  kNullCheckSrc2,        // Null check of uses[2].
+  kNullCheckOut0,        // Null check out outgoing arg0.
+  kDstNonNull,           // May assume dst is non-null.
+  kRetNonNull,           // May assume retval is non-null.
+  kNullTransferSrc0,     // Object copy src[0] -> dst.
+  kNullTransferSrcN,     // Phi null check state transfer.
+  kRangeCheckSrc1,       // Range check of uses[1].
+  kRangeCheckSrc2,       // Range check of uses[2].
+  kRangeCheckSrc3,       // Range check of uses[3].
+  kFPA,
+  kFPB,
+  kFPC,
+  kCoreA,
+  kCoreB,
+  kCoreC,
+  kRefA,
+  kRefB,
+  kRefC,
+  kUsesMethodStar,       // Implicit use of Method*.
+};
+
+class InstructionTools {
+ public:
+  static bool IsDefinition(const art::Instruction* instruction);
+  static const int instruction_attributes_[];
+};
+} // end namespace sea_ir
+#endif // INSTRUCTION_TOOLS_H_
diff --git a/src/compiler/sea_ir/sea.cc b/src/compiler/sea_ir/sea.cc
index e08558f..4a9bc72 100644
--- a/src/compiler/sea_ir/sea.cc
+++ b/src/compiler/sea_ir/sea.cc
@@ -17,14 +17,14 @@
 #include "compiler/sea_ir/sea.h"
 #include "file_output_stream.h"
 
-
+#define MAX_REACHING_DEF_ITERERATIONS (10)
 
 namespace sea_ir {
 
-
 SeaGraph SeaGraph::graph_;
 int SeaNode::current_max_node_id_ = 0;
 
+
 SeaGraph* SeaGraph::GetCurrentGraph() {
   return &sea_ir::SeaGraph::graph_;
 }
@@ -32,16 +32,47 @@
 void SeaGraph::DumpSea(std::string filename) const {
   std::string result;
   result += "digraph seaOfNodes {\n";
-  for(std::vector<Region*>::const_iterator cit = regions_.begin(); cit != regions_.end(); cit++) {
-    result += (*cit)->ToDot();
+  for (std::vector<Region*>::const_iterator cit = regions_.begin(); cit != regions_.end(); cit++) {
+    (*cit)->ToDot(result);
   }
   result += "}\n";
   art::File* file = art::OS::OpenFile(filename.c_str(), true, true);
   art::FileOutputStream fos(file);
   fos.WriteFully(result.c_str(), result.size());
-  LOG(INFO) << "Written SEA string to file...";
+  LOG(INFO) << "Written SEA string to file.";
 }
 
+void SeaGraph::AddEdge(Region* src, Region* dst) const {
+  src->AddSuccessor(dst);
+  dst->AddPredecessor(src);
+}
+
+void SeaGraph::ComputeDownExposedDefs() {
+  for (std::vector<Region*>::iterator region_it = regions_.begin();
+        region_it != regions_.end(); region_it++) {
+      (*region_it)->ComputeDownExposedDefs();
+    }
+}
+
+void SeaGraph::ComputeReachingDefs() {
+  // Iterate until the reaching definitions set doesn't change anymore.
+  // (See Cooper & Torczon, "Engineering a Compiler", second edition, page 487)
+  bool changed = true;
+  int iteration = 0;
+  while (changed && (iteration < MAX_REACHING_DEF_ITERERATIONS)) {
+    iteration++;
+    changed = false;
+    // TODO: optimize the ordering if this becomes performance bottleneck.
+    for (std::vector<Region*>::iterator regions_it = regions_.begin();
+        regions_it != regions_.end();
+        regions_it++) {
+      changed |= (*regions_it)->UpdateReachingDefs();
+    }
+  }
+  DCHECK(!changed) << "Reaching definitions computation did not reach a fixed point.";
+}
+
+
 void SeaGraph::CompileMethod(const art::DexFile::CodeItem* code_item,
   uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file) {
   const uint16_t* code = code_item->insns_;
@@ -52,7 +83,8 @@
   std::map<const uint16_t*, Region*> target_regions;
   size_t i = 0;
 
-  // Pass 1: Find the start instruction of basic blocks, as targets and flow-though of branches.
+  // Pass: Find the start instruction of basic blocks
+  //         by locating targets and flow-though instructions of branches.
   while (i < size_in_code_units) {
     const art::Instruction* inst = art::Instruction::At(&code[i]);
     if (inst->IsBranch()||inst->IsUnconditional()) {
@@ -61,7 +93,7 @@
         Region* region = GetNewRegion();
         target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i+offset], region));
       }
-      if (inst->IsFlowthrough() &&
+      if (inst->CanFlowThrough() &&
           (target_regions.end() == target_regions.find(&code[i+inst->SizeInCodeUnits()]))) {
         Region* region = GetNewRegion();
         target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i+inst->SizeInCodeUnits()], region));
@@ -70,44 +102,47 @@
     i += inst->SizeInCodeUnits();
   }
 
-
-  // Pass 2: Assign instructions to region nodes and
+  // Pass: Assign instructions to region nodes and
   //         assign branches their control flow successors.
   i = 0;
   r = GetNewRegion();
-  sea_ir::SeaNode* last_node = NULL;
-  sea_ir::SeaNode* node = NULL;
+  sea_ir::InstructionNode* last_node = NULL;
+  sea_ir::InstructionNode* node = NULL;
   while (i < size_in_code_units) {
     const art::Instruction* inst = art::Instruction::At(&code[i]); //TODO: find workaround for this
     last_node = node;
-    node = new sea_ir::SeaNode(inst);
+    node = new sea_ir::InstructionNode(inst);
 
     if (inst->IsBranch() || inst->IsUnconditional()) {
       int32_t offset = inst->GetTargetOffset();
       std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i+offset]);
       DCHECK(it != target_regions.end());
-      node->AddSuccessor(it->second);
+      AddEdge(r, it->second); // Add edge to branch target.
     }
 
     std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i]);
     if (target_regions.end() != it) {
       // Get the already created region because this is a branch target.
       Region* nextRegion = it->second;
-      if (last_node->GetInstruction()->IsBranch() && last_node->GetInstruction()->IsFlowthrough()) {
-        last_node->AddSuccessor(nextRegion);
-
+      if (last_node->GetInstruction()->IsBranch() && last_node->GetInstruction()->CanFlowThrough()) {
+        AddEdge(r, it->second); // Add flow-through edge.
       }
       r = nextRegion;
     }
-
+    bool definesRegister = (0 !=
+            InstructionTools::instruction_attributes_[inst->Opcode()] && (1 << kDA));
     LOG(INFO) << inst->GetDexPc(code) << "*** " << inst->DumpString(&dex_file)
-            << " region:" <<r->StringId() << std::endl;
+            << " region:" <<r->StringId() << "Definition?" << definesRegister << std::endl;
     r->AddChild(node);
     i += inst->SizeInCodeUnits();
   }
 
-}
+  // Pass: compute downward-exposed definitions.
+  ComputeDownExposedDefs();
 
+  // Multiple Passes: Compute reaching definitions (iterative fixed-point algorithm)
+  ComputeReachingDefs();
+}
 
 Region* SeaGraph::GetNewRegion() {
   Region* new_region = new Region();
@@ -119,54 +154,175 @@
   DCHECK(r) << "Tried to add NULL region to SEA graph.";
   regions_.push_back(r);
 }
-void Region::AddChild(sea_ir::SeaNode* instruction) {
-  DCHECK(inst) << "Tried to add NULL instruction to region node.";
+
+void Region::AddChild(sea_ir::InstructionNode* instruction) {
+  DCHECK(instruction) << "Tried to add NULL instruction to region node.";
   instructions_.push_back(instruction);
 }
 
 SeaNode* Region::GetLastChild() const {
-  if (instructions_.size()>0) {
+  if (instructions_.size() > 0) {
     return instructions_.back();
   }
   return NULL;
 }
 
-std::string SeaNode::ToDot() const {
-  std::string node = "// Instruction: \n" + StringId() +
-      " [label=\"" + instruction_->DumpString(NULL) + "\"];\n";
-
-  for(std::vector<SeaNode*>::const_iterator cit = successors_.begin();
-      cit != successors_.end(); cit++) {
-    DCHECK(NULL != *cit) << "Null successor found for SeaNode" << StringId() << ".";
-    node += StringId() + " -> " + (*cit)->StringId() + ";\n\n";
+void InstructionNode::ToDot(std::string& result) const {
+  result += "// Instruction: \n" + StringId() +
+      " [label=\"" + instruction_->DumpString(NULL) + "\"";
+  if (de_def_) {
+    result += "style=bold";
   }
-  return node;
+  result += "];\n";
 }
 
-std::string SeaNode::StringId() const {
-  std::stringstream ss;
-  ss << id_;
-  return ss.str();
+int InstructionNode::GetResultRegister() const {
+  if (!InstructionTools::IsDefinition(instruction_)) {
+    return NO_REGISTER;
+  }
+  return instruction_->VRegA();
 }
 
-std::string Region::ToDot() const {
-  std::string result = "// Region: \n" +
-      StringId() + " [label=\"region " + StringId() + "\"];";
+void InstructionNode::MarkAsDEDef() {
+  de_def_ = true;
+}
 
-  for(std::vector<SeaNode*>::const_iterator cit = instructions_.begin();
+void Region::ToDot(std::string& result) const {
+  result += "\n// Region: \n" + StringId() + " [label=\"region " + StringId() + "\"];";
+  // Save instruction nodes that belong to this region.
+  for (std::vector<InstructionNode*>::const_iterator cit = instructions_.begin();
       cit != instructions_.end(); cit++) {
-    result += (*cit)->ToDot();
+    (*cit)->ToDot(result);
     result += StringId() + " -> " + (*cit)->StringId() + ";\n";
   }
 
+  for (std::vector<Region*>::const_iterator cit = successors_.begin(); cit != successors_.end();
+      cit++) {
+    DCHECK(NULL != *cit) << "Null successor found for SeaNode" << GetLastChild()->StringId() << ".";
+    result += GetLastChild()->StringId() + " -> " + (*cit)->StringId() + ";\n\n";
+  }
+
+  // Save reaching definitions.
+  for (std::map<int, std::set<sea_ir::InstructionNode*>* >::const_iterator cit =
+      reaching_defs_.begin();
+      cit != reaching_defs_.end(); cit++) {
+    for (std::set<sea_ir::InstructionNode*>::const_iterator
+        reaching_set_it = (*cit).second->begin();
+        reaching_set_it != (*cit).second->end();
+        reaching_set_it++) {
+      result += (*reaching_set_it)->StringId() +
+         " -> " + StringId() +
+         " [style=dotted]; // Reaching def.\n";
+    }
+  }
+
   result += "// End Region.\n";
-  return result;
 }
 
-void SeaNode::AddSuccessor(SeaNode* successor) {
+
+void Region::ComputeDownExposedDefs() {
+  for (std::vector<InstructionNode*>::const_iterator inst_it = instructions_.begin();
+      inst_it != instructions_.end(); inst_it++) {
+    int reg_no = (*inst_it)->GetResultRegister();
+    std::map<int, InstructionNode*>::iterator res = de_defs_.find(reg_no);
+    if ((reg_no != NO_REGISTER) && (res == de_defs_.end())) {
+      de_defs_.insert(std::pair<int, InstructionNode*>(reg_no, *inst_it));
+    } else {
+      res->second = *inst_it;
+    }
+  }
+
+  for (std::map<int, sea_ir::InstructionNode*>::const_iterator cit = de_defs_.begin();
+      cit != de_defs_.end(); cit++) {
+    (*cit).second->MarkAsDEDef();
+  }
+}
+
+
+const std::map<int, sea_ir::InstructionNode*>* Region::GetDownExposedDefs() const {
+  return &de_defs_;
+}
+
+std::map<int, std::set<sea_ir::InstructionNode*>* >* Region::GetReachingDefs() {
+  return &reaching_defs_;
+}
+
+bool Region::UpdateReachingDefs() {
+  std::map<int, std::set<sea_ir::InstructionNode*>* > new_reaching;
+  for (std::vector<Region*>::const_iterator pred_it = predecessors_.begin();
+      pred_it != predecessors_.end(); pred_it++) {
+    // The reaching_defs variable will contain reaching defs __for current predecessor only__
+    std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs;
+    std::map<int, std::set<sea_ir::InstructionNode*>* >* pred_reaching = (*pred_it)->GetReachingDefs();
+    const std::map<int, InstructionNode*>* de_defs = (*pred_it)->GetDownExposedDefs();
+
+    // The definitions from the reaching set of the predecessor
+    // may be shadowed by downward exposed definitions from the predecessor,
+    // otherwise the defs from the reaching set are still good.
+    for (std::map<int, InstructionNode*>::const_iterator de_def = de_defs->begin();
+        de_def != de_defs->end(); de_def++) {
+      std::set<InstructionNode*>* solo_def;
+      solo_def = new std::set<InstructionNode*>();
+      solo_def->insert(de_def->second);
+      reaching_defs.insert(
+          std::pair<int const, std::set<InstructionNode*>*>(de_def->first, solo_def));
+    }
+    LOG(INFO) << "Adding to " <<StringId() << "reaching set of " << (*pred_it)->StringId();
+    reaching_defs.insert(pred_reaching->begin(), pred_reaching->end());
+
+    // Now we combine the reaching map coming from the current predecessor (reaching_defs)
+    // with the accumulated set from all predecessors so far (from new_reaching).
+    std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = reaching_defs.begin();
+    for (; reaching_it != reaching_defs.end(); reaching_it++) {
+      std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator crt_entry =
+          new_reaching.find(reaching_it->first);
+      if (new_reaching.end() != crt_entry) {
+        crt_entry->second->insert(reaching_it->second->begin(), reaching_it->second->end());
+      } else {
+        new_reaching.insert(
+            std::pair<int, std::set<sea_ir::InstructionNode*>*>(
+                reaching_it->first,
+                reaching_it->second) );
+      }
+    }
+  }
+  bool changed = false;
+  // Because the sets are monotonically increasing,
+  // we can compare sizes instead of using set comparison.
+  // TODO: Find formal proof.
+  int old_size = 0;
+  if (-1 == reaching_defs_size_) {
+    std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = reaching_defs_.begin();
+    for (; reaching_it != reaching_defs_.end(); reaching_it++) {
+      old_size += (*reaching_it).second->size();
+    }
+  } else {
+    old_size = reaching_defs_size_;
+  }
+  int new_size = 0;
+  std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = new_reaching.begin();
+  for (; reaching_it != new_reaching.end(); reaching_it++) {
+    new_size += (*reaching_it).second->size();
+  }
+  if (old_size != new_size) {
+    changed = true;
+  }
+  if (changed) {
+    reaching_defs_ = new_reaching;
+    reaching_defs_size_ = new_size;
+  }
+  return changed;
+}
+
+void SeaNode::AddSuccessor(Region* successor) {
   DCHECK(successor) << "Tried to add NULL successor to SEA node.";
   successors_.push_back(successor);
   return;
 }
 
-} // end namespace
+void SeaNode::AddPredecessor(Region* predecessor) {
+  DCHECK(predecessor) << "Tried to add NULL predecessor to SEA node.";
+  predecessors_.push_back(predecessor);
+}
+
+} // end namespace sea_ir
diff --git a/src/compiler/sea_ir/sea.h b/src/compiler/sea_ir/sea.h
index 0ebd4d0..041e299 100644
--- a/src/compiler/sea_ir/sea.h
+++ b/src/compiler/sea_ir/sea.h
@@ -14,8 +14,6 @@
  * limitations under the License.
  */
 
-#include "dex_file.h"
-#include "dex_instruction.h"
 
 #ifndef SEA_IR_H_
 #define SEA_IR_H_
@@ -23,55 +21,105 @@
 #include <set>
 #include <map>
 
-namespace sea_ir {
+#include "compiler/sea_ir/instruction_tools.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
 
+#define NO_REGISTER       (-1)
+
+namespace sea_ir {
+class Region;
 
 class SeaNode {
  public:
-  explicit SeaNode(const art::Instruction* in):id_(GetNewId()), instruction_(in), successors_() {};
-  explicit SeaNode():id_(GetNewId()), instruction_(NULL) {};
-  void AddSuccessor(SeaNode* successor);
-  const art::Instruction* GetInstruction() {
-    DCHECK(NULL != instruction_);
-    return instruction_;
+  explicit SeaNode():id_(GetNewId()), string_id_(), successors_(), predecessors_() {
+    std::stringstream ss;
+    ss << id_;
+    string_id_.append(ss.str());
   }
-  std::string StringId() const;
-  // Returns a dot language formatted string representing the node and
+
+  // Adds CFG predecessors and successors to each block.
+  void AddSuccessor(Region* successor);
+  void AddPredecessor(Region* predecesor);
+
+  // Returns the id of the current block as string
+  const std::string& StringId() const {
+    return string_id_;
+  }
+
+  // Appends to @result a dot language formatted string representing the node and
   //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
-  //    builds a complete dot graph (without prolog and epilog though).
-  virtual std::string ToDot() const;
-  virtual ~SeaNode(){};
+  //    builds a complete dot graph, but without prolog ("digraph {") and epilog ("}").
+  virtual void ToDot(std::string& result) const = 0;
+
+  virtual ~SeaNode() {}
 
  protected:
-  // Returns the id of the current block as string
-
   static int GetNewId() {
     return current_max_node_id_++;
   }
 
+  const int id_;
+  std::string string_id_;
+  std::vector<sea_ir::Region*> successors_;    // CFG successor nodes (regions)
+  std::vector<sea_ir::Region*> predecessors_;  // CFG predecessor nodes (instructions/regions)
 
  private:
-  const int id_;
-  const art::Instruction* const instruction_;
-  std::vector<sea_ir::SeaNode*> successors_;
   static int current_max_node_id_;
 };
 
+class InstructionNode: public SeaNode {
+ public:
+  explicit InstructionNode(const art::Instruction* in):SeaNode(), instruction_(in), de_def_(false) {}
+
+  const art::Instruction* GetInstruction() const {
+    DCHECK(NULL != instruction_) << "Tried to access NULL instruction in an InstructionNode.";
+    return instruction_;
+  }
+  // Returns the register that is defined by the current instruction, or NO_REGISTER otherwise.
+  int GetResultRegister() const;
+  void ToDot(std::string& result) const;
+  void MarkAsDEDef();
+
+ private:
+  const art::Instruction* const instruction_;
+  bool de_def_;
+};
+
 
 
 class Region : public SeaNode {
  public:
-  explicit Region():SeaNode() {}
-  void AddChild(sea_ir::SeaNode* instruction);
+  explicit Region():SeaNode(), reaching_defs_size_(-1) {}
+
+  // Adds @inst as an instruction node child in the current region.
+  void AddChild(sea_ir::InstructionNode* inst);
+
+  // Returns the last instruction node child of the current region.
+  // This child has the CFG successors pointing to the new regions.
   SeaNode* GetLastChild() const;
 
-  // Returns a dot language formatted string representing the node and
+  // Appends to @result a dot language formatted string representing the node and
   //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
   //    builds a complete dot graph (without prolog and epilog though).
-  virtual std::string ToDot() const;
+  virtual void ToDot(std::string& result) const;
+
+  // Computes Downward Exposed Definitions for the current node.
+  void ComputeDownExposedDefs();
+  const std::map<int, sea_ir::InstructionNode*>* GetDownExposedDefs() const;
+
+  // Performs one iteration of the reaching definitions algorithm
+  // and returns true if the reaching definitions set changed.
+  bool UpdateReachingDefs();
+
+  // Returns the set of reaching definitions for the current region.
+  std::map<int, std::set<sea_ir::InstructionNode*>* >* GetReachingDefs();
 
  private:
-  std::vector<sea_ir::SeaNode*> instructions_;
+  std::vector<sea_ir::InstructionNode*> instructions_;
+  std::map<int, sea_ir::InstructionNode*> de_defs_;
+  std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs_;
+  int reaching_defs_size_;
 };
 
 
@@ -81,8 +129,20 @@
   static SeaGraph* GetCurrentGraph();
   void CompileMethod(const art::DexFile::CodeItem* code_item,
       uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file);
+
   // Returns a string representation of the region and its Instruction children
   void DumpSea(std::string filename) const;
+
+  // Adds a CFG edge from @src node to @dst node.
+  void AddEdge(Region* src, Region* dst) const;
+
+  // Computes Downward Exposed Definitions for all regions in the graph.
+  void ComputeDownExposedDefs();
+
+  // Computes the reaching definitions set following the equations from
+  // Cooper & Torczon, "Engineering a Compiler", second edition, page 491
+  void ComputeReachingDefs();
+
   /*** Static helper functions follow: ***/
   static int ParseInstruction(const uint16_t* code_ptr,
       art::DecodedInstruction* decoded_instruction);
diff --git a/src/dex_instruction.cc b/src/dex_instruction.cc
index c5901aa..6527f10 100644
--- a/src/dex_instruction.cc
+++ b/src/dex_instruction.cc
@@ -122,6 +122,38 @@
   return 0;
 }
 
+int32_t Instruction::VRegA() const {
+  switch (FormatOf(Opcode())) {
+    case k10t: return VRegA_10t();
+    case k10x: return VRegA_10x();
+    case k11n: return VRegA_11n();
+    case k11x: return VRegA_11x();
+    case k12x: return VRegA_12x();
+    case k20t: return VRegA_20t();
+    case k21c: return VRegA_21c();
+    case k21h: return VRegA_21h();
+    case k21s: return VRegA_21s();
+    case k21t: return VRegA_21t();
+    case k22b: return VRegA_22b();
+    case k22c: return VRegA_22c();
+    case k22s: return VRegA_22s();
+    case k22t: return VRegA_22t();
+    case k22x: return VRegA_22x();
+    case k23x: return VRegA_23x();
+    case k30t: return VRegA_30t();
+    case k31c: return VRegA_31c();
+    case k31i: return VRegA_31i();
+    case k31t: return VRegA_31t();
+    case k32x: return VRegA_32x();
+    case k35c: return VRegA_35c();
+    case k3rc: return VRegA_3rc();
+    case k51l: return VRegA_51l();
+    default: LOG(FATAL) << "Tried to access vA of instruction "<< Name() <<
+        " which has no A operand.";
+  }
+  return 0;
+}
+
 int32_t Instruction::GetTargetOffset() const {
   switch (FormatOf(Opcode())) {
     // Cases for conditional branches follow.
diff --git a/src/dex_instruction.h b/src/dex_instruction.h
index 602667a..0407c57 100644
--- a/src/dex_instruction.h
+++ b/src/dex_instruction.h
@@ -215,6 +215,7 @@
   }
 
   // VRegA
+  int32_t VRegA() const;
   int8_t VRegA_10t() const;
   uint8_t VRegA_10x() const;
   uint4_t VRegA_11n() const;
diff --git a/src/dex_instruction_list.h b/src/dex_instruction_list.h
index 9daec61..8257c78 100644
--- a/src/dex_instruction_list.h
+++ b/src/dex_instruction_list.h
@@ -130,7 +130,7 @@
   V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
   V(0x71, INVOKE_STATIC, "invoke-static", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
   V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
-  V(0x73, UNUSED_73, "unused-73", k10x, false, kUnknown, 0, kVerifyError) \
+  V(0x73, RETURN_VOID_BARRIER, "return-void-barrier", k10x, false, kNone, kReturn, kVerifyNone) \
   V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
   V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
   V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
diff --git a/src/gc/collector/mark_sweep.cc b/src/gc/collector/mark_sweep.cc
index d54fec6..279796f 100644
--- a/src/gc/collector/mark_sweep.cc
+++ b/src/gc/collector/mark_sweep.cc
@@ -58,7 +58,7 @@
 
 // Performance options.
 static const bool kParallelMarkStack = true;
-static const bool kDisableFinger = kParallelMarkStack;
+static const bool kDisableFinger = true; // TODO: Fix, bit rotten.
 static const bool kUseMarkStackPrefetch = true;
 
 // Profiling and information flags.
@@ -1477,7 +1477,7 @@
   heap->PostGcVerification(this);
 
   timings_.NewSplit("GrowForUtilization");
-  heap->GrowForUtilization(GetDurationNs());
+  heap->GrowForUtilization(GetGcType(), GetDurationNs());
 
   timings_.NewSplit("RequestHeapTrim");
   heap->RequestHeapTrim();
diff --git a/src/gc/heap.cc b/src/gc/heap.cc
index 7bd8687..a68cc02 100644
--- a/src/gc/heap.cc
+++ b/src/gc/heap.cc
@@ -166,13 +166,12 @@
       reference_queue_lock_(NULL),
       is_gc_running_(false),
       last_gc_type_(collector::kGcTypeNone),
+      next_gc_type_(collector::kGcTypePartial),
       capacity_(capacity),
       growth_limit_(growth_limit),
       max_allowed_footprint_(initial_size),
       concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
                                             :  std::numeric_limits<size_t>::max()),
-      sticky_gc_count_(0),
-      sticky_to_partial_gc_ratio_(10),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       large_object_threshold_(3 * kPageSize),
@@ -536,7 +535,7 @@
   size_t size = 0;
   uint64_t allocation_start = 0;
   if (measure_allocation_time_) {
-    allocation_start = NanoTime();
+    allocation_start = NanoTime() / kTimeAdjust;
   }
 
   // We need to have a zygote space or else our newly allocated large object can end up in the
@@ -580,7 +579,7 @@
     VerifyObject(obj);
 
     if (measure_allocation_time_) {
-      total_allocation_time_ += (NanoTime() - allocation_start) / kTimeAdjust;
+      total_allocation_time_ += NanoTime() / kTimeAdjust - allocation_start;
     }
 
     return obj;
@@ -1169,20 +1168,6 @@
     ++Thread::Current()->GetStats()->gc_for_alloc_count;
   }
 
-  // We need to do partial GCs every now and then to avoid the heap growing too much and
-  // fragmenting.
-  // TODO: if sticky GCs are failing to free memory then we should lower the
-  // sticky_to_partial_gc_ratio_, if they are successful we can increase it.
-  if (gc_type == collector::kGcTypeSticky) {
-    ++sticky_gc_count_;
-    if (sticky_gc_count_ >= sticky_to_partial_gc_ratio_) {
-      gc_type = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
-      sticky_gc_count_ = 0;
-    }
-  } else {
-    sticky_gc_count_ = 0;
-  }
-
   uint64_t gc_start_time_ns = NanoTime();
   uint64_t gc_start_size = GetBytesAllocated();
   // Approximate allocation rate in bytes / second.
@@ -1195,6 +1180,11 @@
     VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
   }
 
+  if (gc_type == collector::kGcTypeSticky &&
+      alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
+    gc_type = collector::kGcTypePartial;
+  }
+
   DCHECK_LT(gc_type, collector::kGcTypeMax);
   DCHECK_NE(gc_type, collector::kGcTypeNone);
   collector::MarkSweep* collector = NULL;
@@ -1700,20 +1690,39 @@
   max_allowed_footprint_ = max_allowed_footprint;
 }
 
-void Heap::GrowForUtilization(uint64_t gc_duration) {
+void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
   // We know what our utilization is at this moment.
   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
   const size_t bytes_allocated = GetBytesAllocated();
   last_gc_size_ = bytes_allocated;
   last_gc_time_ns_ = NanoTime();
 
-  size_t target_size = bytes_allocated / GetTargetHeapUtilization();
-  if (target_size > bytes_allocated + max_free_) {
-    target_size = bytes_allocated + max_free_;
-  } else if (target_size < bytes_allocated + min_free_) {
-    target_size = bytes_allocated + min_free_;
-  }
+  size_t target_size;
+  if (gc_type != collector::kGcTypeSticky) {
+    // Grow the heap for non sticky GC.
+    target_size = bytes_allocated / GetTargetHeapUtilization();
+    if (target_size > bytes_allocated + max_free_) {
+      target_size = bytes_allocated + max_free_;
+    } else if (target_size < bytes_allocated + min_free_) {
+      target_size = bytes_allocated + min_free_;
+    }
+    next_gc_type_ = collector::kGcTypeSticky;
+  } else {
+    // Based on how close the current heap size is to the target size, decide
+    // whether or not to do a partial or sticky GC next.
+    if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
+      next_gc_type_ = collector::kGcTypeSticky;
+    } else {
+      next_gc_type_ = collector::kGcTypePartial;
+    }
 
+    // If we have freed enough memory, shrink the heap back down.
+    if (bytes_allocated + max_free_ < max_allowed_footprint_) {
+      target_size = bytes_allocated + max_free_;
+    } else {
+      target_size = std::max(bytes_allocated, max_allowed_footprint_);
+    }
+  }
   SetIdealFootprint(target_size);
 
   // Calculate when to perform the next ConcurrentGC.
@@ -1890,11 +1899,7 @@
 
   // Wait for any GCs currently running to finish.
   if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) {
-    if (alloc_space_->Size() > min_alloc_space_size_for_sticky_gc_) {
-      CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseBackground, false);
-    } else {
-      CollectGarbageInternal(collector::kGcTypePartial, kGcCauseBackground, false);
-    }
+    CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false);
   }
 }
 
diff --git a/src/gc/heap.h b/src/gc/heap.h
index d86c7dc..790ab02 100644
--- a/src/gc/heap.h
+++ b/src/gc/heap.h
@@ -424,7 +424,7 @@
   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   // the target utilization ratio.  This should only be called immediately after a full garbage
   // collection.
-  void GrowForUtilization(uint64_t gc_duration);
+  void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration);
 
   size_t GetPercentFree();
 
@@ -488,6 +488,7 @@
 
   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
   volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
+  collector::GcType next_gc_type_;
 
   // Maximum size that the heap can reach.
   const size_t capacity_;
@@ -502,12 +503,6 @@
   // it completes ahead of an allocation failing.
   size_t concurrent_start_bytes_;
 
-  // Number of back-to-back sticky mark sweep collections.
-  size_t sticky_gc_count_;
-
-  // After how many sticky GCs we force to do a partial GC instead of sticky mark bits GC.
-  const size_t sticky_to_partial_gc_ratio_;
-
   // Since the heap was created, how many bytes have been freed.
   size_t total_bytes_freed_ever_;
 
diff --git a/src/instrumentation.cc b/src/instrumentation.cc
index 8af0885..8598d6d 100644
--- a/src/instrumentation.cc
+++ b/src/instrumentation.cc
@@ -131,7 +131,8 @@
       uintptr_t return_pc = GetReturnPc();
       CHECK_NE(return_pc, instrumentation_exit_pc_);
       CHECK_NE(return_pc, 0U);
-      InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, return_pc, GetFrameId());
+      InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, return_pc, GetFrameId(),
+                                                      false);
       if (kVerboseInstrumentation) {
         LOG(INFO) << "Pushing frame " << instrumentation_frame.Dump();
       }
@@ -209,7 +210,11 @@
           if (kVerboseInstrumentation) {
             LOG(INFO) << "  Removing exit stub in " << DescribeLocation();
           }
-          CHECK(m == instrumentation_frame.method_) << PrettyMethod(m);
+          if (instrumentation_frame.interpreter_entry_) {
+            CHECK(m == Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
+          } else {
+            CHECK(m == instrumentation_frame.method_) << PrettyMethod(m);
+          }
           SetReturnPc(instrumentation_frame.return_pc_);
           // Create the method exit events. As the methods didn't really exit the result is 0.
           instrumentation_->MethodExitEvent(thread_, instrumentation_frame.this_object_, m,
@@ -222,7 +227,6 @@
       if (!removed_stub) {
         if (kVerboseInstrumentation) {
           LOG(INFO) << "  No exit stub in " << DescribeLocation();
-          DescribeStack(thread_);
         }
       }
       return true;  // Continue.
@@ -380,7 +384,8 @@
   if (LIKELY(!instrumentation_stubs_installed_)) {
     const void* code = method->GetEntryPointFromCompiledCode();
     DCHECK(code != NULL);
-    if (LIKELY(code != GetResolutionTrampoline(runtime->GetClassLinker()))) {
+    if (LIKELY(code != GetResolutionTrampoline(runtime->GetClassLinker()) &&
+               code != GetInterpreterEntryPoint())) {
       return code;
     }
   }
@@ -463,7 +468,7 @@
 
 void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
                                                     mirror::AbstractMethod* method,
-                                                    uintptr_t lr) {
+                                                    uintptr_t lr, bool interpreter_entry) {
   // We have a callee-save frame meaning this value is guaranteed to never be 0.
   size_t frame_id = StackVisitor::ComputeNumFrames(self);
   std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
@@ -471,7 +476,7 @@
     LOG(INFO) << "Entering " << PrettyMethod(method) << " from PC " << (void*)lr;
   }
   instrumentation::InstrumentationStackFrame instrumentation_frame(this_object, method, lr,
-                                                                   frame_id);
+                                                                   frame_id, interpreter_entry);
   stack->push_front(instrumentation_frame);
 
   MethodEnterEvent(self, this_object, method, 0);
diff --git a/src/instrumentation.h b/src/instrumentation.h
index e79c75e..5fea34f 100644
--- a/src/instrumentation.h
+++ b/src/instrumentation.h
@@ -192,7 +192,8 @@
   // Called when an instrumented method is entered. The intended link register (lr) is saved so
   // that returning causes a branch to the method exit stub. Generates method enter events.
   void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
-                                     mirror::AbstractMethod* method, uintptr_t lr)
+                                     mirror::AbstractMethod* method, uintptr_t lr,
+                                     bool interpreter_entry)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Called when an instrumented method is exited. Removes the pushed instrumentation frame
@@ -272,8 +273,9 @@
 // An element in the instrumentation side stack maintained in art::Thread.
 struct InstrumentationStackFrame {
   InstrumentationStackFrame(mirror::Object* this_object, mirror::AbstractMethod* method,
-                            uintptr_t return_pc, size_t frame_id)
-      : this_object_(this_object), method_(method), return_pc_(return_pc), frame_id_(frame_id) {
+                            uintptr_t return_pc, size_t frame_id, bool interpreter_entry)
+      : this_object_(this_object), method_(method), return_pc_(return_pc), frame_id_(frame_id),
+        interpreter_entry_(interpreter_entry) {
   }
 
   std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -282,6 +284,7 @@
   mirror::AbstractMethod* method_;
   const uintptr_t return_pc_;
   const size_t frame_id_;
+  bool interpreter_entry_;
 };
 
 }  // namespace instrumentation
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 1e8ee9c..16e04a5 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -348,6 +348,13 @@
         jresult = fn(soa.Env(), rcvr.get());
       }
       result->SetL(soa.Decode<Object*>(jresult));
+    } else if (shorty == "V") {
+      typedef void (fnptr)(JNIEnv*, jobject);
+      const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+      ScopedLocalRef<jobject> rcvr(soa.Env(),
+                                   soa.AddLocalReference<jobject>(receiver));
+      ScopedThreadStateChange tsc(self, kNative);
+      fn(soa.Env(), rcvr.get());
     } else if (shorty == "LL") {
       typedef jobject (fnptr)(JNIEnv*, jobject, jobject);
       const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
@@ -413,9 +420,7 @@
     num_regs = code_item->registers_size_;
     num_ins = code_item->ins_size_;
   } else if (method->IsAbstract()) {
-    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-    self->ThrowNewExceptionF(throw_location, "Ljava/lang/AbstractMethodError;",
-                             "abstract method \"%s\"", PrettyMethod(method).c_str());
+    ThrowAbstractMethodError(method);
     return;
   } else {
     DCHECK(method->IsNative() || method->IsProxyMethod());
@@ -504,9 +509,7 @@
     num_regs = code_item->registers_size_;
     num_ins = code_item->ins_size_;
   } else if (method->IsAbstract()) {
-    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-    self->ThrowNewExceptionF(throw_location, "Ljava/lang/AbstractMethodError;",
-                             "abstract method \"%s\"", PrettyMethod(method).c_str());
+    ThrowAbstractMethodError(method);
     return;
   } else {
     DCHECK(method->IsNative() || method->IsProxyMethod());
@@ -1136,6 +1139,17 @@
         }
         return result;
       }
+      case Instruction::RETURN_VOID_BARRIER: {
+        PREAMBLE();
+        ANDROID_MEMBAR_STORE();
+        JValue result;
+        if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+          instrumentation->MethodExitEvent(self, this_object_ref.get(),
+                                           shadow_frame.GetMethod(), inst->GetDexPc(insns),
+                                           result);
+        }
+        return result;
+      }
       case Instruction::RETURN: {
         PREAMBLE();
         JValue result;
@@ -2932,7 +2946,6 @@
         break;
       case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
       case Instruction::UNUSED_EB ... Instruction::UNUSED_FF:
-      case Instruction::UNUSED_73:
       case Instruction::UNUSED_79:
       case Instruction::UNUSED_7A:
         UnexpectedOpcode(inst, mh);
@@ -2971,9 +2984,7 @@
     num_regs =  code_item->registers_size_;
     num_ins = code_item->ins_size_;
   } else if (method->IsAbstract()) {
-    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-    self->ThrowNewExceptionF(throw_location, "Ljava/lang/AbstractMethodError;",
-                             "abstract method \"%s\"", PrettyMethod(method).c_str());
+    ThrowAbstractMethodError(method);
     return;
   } else {
     DCHECK(method->IsNative());
diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc
index df74988..3b6dd81 100644
--- a/src/jdwp/jdwp_main.cc
+++ b/src/jdwp/jdwp_main.cc
@@ -36,7 +36,7 @@
  * JdwpNetStateBase class implementation
  */
 JdwpNetStateBase::JdwpNetStateBase(JdwpState* state)
-    : state_(state), socket_lock_("JdwpNetStateBase lock") {
+    : state_(state), socket_lock_("JdwpNetStateBase lock", kJdwpSerialSocketLock) {
   clientSock = -1;
   wake_pipe_[0] = -1;
   wake_pipe_[1] = -1;
@@ -211,7 +211,7 @@
       attach_lock_("JDWP attach lock", kJdwpAttachLock),
       attach_cond_("JDWP attach condition variable", attach_lock_),
       last_activity_time_ms_(0),
-      serial_lock_("JDWP serial lock", kJdwpSerialLock),
+      serial_lock_("JDWP serial lock", kJdwpSerialSocketLock),
       request_serial_(0x10000000),
       event_serial_(0x20000000),
       event_list_lock_("JDWP event list lock", kJdwpEventListLock),
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index e457edc..cbdf3cd 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -29,6 +29,7 @@
 #include "class_linker.h"
 #include "dex_file-inl.h"
 #include "gc/accounting/card_table-inl.h"
+#include "interpreter/interpreter.h"
 #include "invoke_arg_array_builder.h"
 #include "jni.h"
 #include "mirror/class-inl.h"
@@ -62,17 +63,10 @@
 static const size_t kPinTableMax = 1024; // Arbitrary sanity check.
 
 static size_t gGlobalsInitial = 512; // Arbitrary.
-static size_t gGlobalsMax = 51200; // Arbitrary sanity check.
+static size_t gGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
 
 static const size_t kWeakGlobalsInitial = 16; // Arbitrary.
-static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check.
-
-void SetJniGlobalsMax(size_t max) {
-  if (max != 0) {
-    gGlobalsMax = max;
-    gGlobalsInitial = std::min(gGlobalsInitial, gGlobalsMax);
-  }
-}
+static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
 
 static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -137,10 +131,11 @@
 void InvokeWithArgArray(const ScopedObjectAccess& soa, AbstractMethod* method,
                         ArgArray* arg_array, JValue* result, char result_type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uint32_t* args = arg_array->GetArray();
   if (UNLIKELY(soa.Env()->check_jni)) {
-    CheckMethodArguments(method, arg_array->GetArray());
+    CheckMethodArguments(method, args);
   }
-  method->Invoke(soa.Self(), arg_array->GetArray(), arg_array->GetNumBytes(), result, result_type);
+  method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, result_type);
 }
 
 static JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj,
diff --git a/src/jni_internal.h b/src/jni_internal.h
index dc6ca4a..7b43f95 100644
--- a/src/jni_internal.h
+++ b/src/jni_internal.h
@@ -48,7 +48,6 @@
 class ScopedObjectAccess;
 class Thread;
 
-void SetJniGlobalsMax(size_t max);
 void JniAbortF(const char* jni_function_name, const char* fmt, ...)
     __attribute__((__format__(__printf__, 2, 3)));
 void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
diff --git a/src/locks.h b/src/locks.h
index 202fa02..91437e1 100644
--- a/src/locks.h
+++ b/src/locks.h
@@ -36,9 +36,9 @@
   kUnexpectedSignalLock,
   kThreadSuspendCountLock,
   kAbortLock,
+  kJdwpSerialSocketLock,
   kAllocSpaceLock,
   kDefaultMutexLevel,
-  kJdwpSerialLock,
   kMarkSweepLargeObjectLock,
   kPinTableLock,
   kLoadLibraryLock,
diff --git a/src/oat/runtime/support_instrumentation.cc b/src/oat/runtime/support_instrumentation.cc
index 8f56ce3..1f1b952 100644
--- a/src/oat/runtime/support_instrumentation.cc
+++ b/src/oat/runtime/support_instrumentation.cc
@@ -31,9 +31,10 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object,
-                                                 method, lr);
   const void* result = instrumentation->GetQuickCodeFor(method);
+  bool interpreter_entry = (result == GetInterpreterEntryPoint());
+  instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object,
+                                                 method, lr, interpreter_entry);
   CHECK(result != NULL) << PrettyMethod(method);
   return result;
 }
diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc
index 71b67d0..096cb9c 100644
--- a/src/oat/runtime/support_stubs.cc
+++ b/src/oat/runtime/support_stubs.cc
@@ -409,9 +409,7 @@
 #else
   UNUSED(sp);
 #endif
-  ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-  self->ThrowNewExceptionF(throw_location, "Ljava/lang/AbstractMethodError;",
-                           "abstract method \"%s\"", PrettyMethod(method).c_str());
+  ThrowAbstractMethodError(method);
   self->QuickDeliverException();
 }
 
diff --git a/src/object_utils.h b/src/object_utils.h
index 6a07425..4af5d4c 100644
--- a/src/object_utils.h
+++ b/src/object_utils.h
@@ -439,8 +439,7 @@
     return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, GetDexCache());
   }
 
-  const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     const char* result = shorty_;
     if (result == NULL) {
       const DexFile& dex_file = GetDexFile();
diff --git a/src/runtime.cc b/src/runtime.cc
index 3a528a1..e5fb46f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -345,7 +345,6 @@
   parsed->interpreter_only_ = false;
   parsed->is_concurrent_gc_enabled_ = true;
 
-  parsed->jni_globals_max_ = 0;
   parsed->lock_profiling_threshold_ = 0;
   parsed->hook_is_sensitive_thread_ = NULL;
 
@@ -535,7 +534,7 @@
         }
       }
     } else if (StartsWith(option, "-Xjnigreflimit:")) {
-      parsed->jni_globals_max_ = ParseIntegerOrDie(option);
+      // Silently ignored for backwards compatibility.
     } else if (StartsWith(option, "-Xlockprofthreshold:")) {
       parsed->lock_profiling_threshold_ = ParseIntegerOrDie(option);
     } else if (StartsWith(option, "-Xstacktracefile:")) {
@@ -791,7 +790,6 @@
 
   QuasiAtomic::Startup();
 
-  SetJniGlobalsMax(options->jni_globals_max_);
   Monitor::Init(options->lock_profiling_threshold_, options->hook_is_sensitive_thread_);
 
   host_prefix_ = options->host_prefix_;
diff --git a/src/runtime.h b/src/runtime.h
index 0b893a3..31b3a1b 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -63,7 +63,7 @@
  public:
   typedef std::vector<std::pair<std::string, const void*> > Options;
 
-  // In small mode, apps with fewer than this number of methods will be compiled 
+  // In small mode, apps with fewer than this number of methods will be compiled
   // anyways.
   // TODO: come up with a reasonable default.
   static const size_t kDefaultSmallModeMethodThreshold = 0;
@@ -96,7 +96,6 @@
     size_t heap_max_free_;
     double heap_target_utilization_;
     size_t stack_size_;
-    size_t jni_globals_max_;
     size_t lock_profiling_threshold_;
     std::string stack_trace_file_;
     bool method_trace_;
diff --git a/src/stack.cc b/src/stack.cc
index 8672975..fcd0f2d 100644
--- a/src/stack.cc
+++ b/src/stack.cc
@@ -310,7 +310,12 @@
             instrumentation::InstrumentationStackFrame instrumentation_frame =
                 GetInstrumentationStackFrame(instrumentation_stack_depth);
             instrumentation_stack_depth++;
-            if (instrumentation_frame.method_ != GetMethod()) {
+            if (instrumentation_frame.interpreter_entry_) {
+              mirror::AbstractMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+              if (GetMethod() != callee) {
+                LOG(FATAL) << "Expected: " << callee << " Found: " << PrettyMethod(GetMethod());
+              }
+            } else if (instrumentation_frame.method_ != GetMethod()) {
               LOG(FATAL)  << "Expected: " << PrettyMethod(instrumentation_frame.method_)
                 << " Found: " << PrettyMethod(GetMethod());
             }
diff --git a/src/thread_list.cc b/src/thread_list.cc
index eacd848..59c38b4 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -140,14 +140,6 @@
   ss << "Thread suspend timeout\n";
   runtime->DumpLockHolders(ss);
   ss << "\n";
-  Locks::mutator_lock_->SharedTryLock(self);
-  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
-    LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
-  }
-  Locks::thread_list_lock_->TryLock(self);
-  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
-    LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
-  }
   runtime->GetThreadList()->DumpLocked(ss);
   LOG(FATAL) << ss.str();
 }
diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc
index 74a79e0..c0f1daa 100644
--- a/src/verifier/method_verifier.cc
+++ b/src/verifier/method_verifier.cc
@@ -2419,7 +2419,12 @@
       break;
 
     // Special instructions.
-    //
+    case Instruction::RETURN_VOID_BARRIER:
+      DCHECK(Runtime::Current()->IsStarted());
+      if (!IsConstructor()) {
+          Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-barrier not expected";
+      }
+      break;
     // Note: the following instructions encode offsets derived from class linking.
     // As such they use Class*/Field*/AbstractMethod* as these offsets only have
     // meaning if the class linking and resolution were successful.
@@ -2465,7 +2470,6 @@
     case Instruction::UNUSED_41:
     case Instruction::UNUSED_42:
     case Instruction::UNUSED_43:
-    case Instruction::UNUSED_73:
     case Instruction::UNUSED_79:
     case Instruction::UNUSED_7A:
     case Instruction::UNUSED_EB: