Merge "Add support for inlining already sharpened interface calls."
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 1386439..6192be7 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -262,6 +262,13 @@
   EXPECT_SINGLE_PARSE_FAIL("-verbose:blablabla", CmdlineResult::kUsage);  // invalid verbose opt
 
   {
+    const char* log_args = "-verbose:deopt";
+    LogVerbosity log_verbosity = LogVerbosity();
+    log_verbosity.deopt = true;
+    EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose);
+  }
+
+  {
     const char* log_args = "-verbose:oat";
     LogVerbosity log_verbosity = LogVerbosity();
     log_verbosity.oat = true;
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 48090a3..e383ec6 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -646,8 +646,13 @@
   HBasicBlock* block = new (allocator) HBasicBlock(graph);
   graph->AddBlock(block);
   entry->AddSuccessor(block);
-  HInstruction* new_array = new (allocator)
-      HNewArray(constant_10, 0, Primitive::kPrimInt, graph->GetDexFile(), kQuickAllocArray);
+  HInstruction* new_array = new (allocator) HNewArray(
+      constant_10,
+      graph->GetCurrentMethod(),
+      0,
+      Primitive::kPrimInt,
+      graph->GetDexFile(),
+      kQuickAllocArray);
   block->AddInstruction(new_array);
   block->AddInstruction(new (allocator) HGoto());
 
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index dbda63b..cbd0429 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1050,6 +1050,7 @@
       ? kQuickAllocArrayWithAccessCheck
       : kQuickAllocArray;
   HInstruction* object = new (arena_) HNewArray(length,
+                                                graph_->GetCurrentMethod(),
                                                 dex_pc,
                                                 type_index,
                                                 *dex_compilation_unit_->GetDexFile(),
@@ -2008,7 +2009,11 @@
             : kQuickAllocObject;
 
         current_block_->AddInstruction(new (arena_) HNewInstance(
-            dex_pc, type_index, *dex_compilation_unit_->GetDexFile(), entrypoint));
+            graph_->GetCurrentMethod(),
+            dex_pc,
+            type_index,
+            *dex_compilation_unit_->GetDexFile(),
+            entrypoint));
         UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
       }
       break;
@@ -2020,8 +2025,12 @@
       QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
           ? kQuickAllocArrayWithAccessCheck
           : kQuickAllocArray;
-      current_block_->AddInstruction(new (arena_) HNewArray(
-          length, dex_pc, type_index, *dex_compilation_unit_->GetDexFile(), entrypoint));
+      current_block_->AddInstruction(new (arena_) HNewArray(length,
+                                                            graph_->GetCurrentMethod(),
+                                                            dex_pc,
+                                                            type_index,
+                                                            *dex_compilation_unit_->GetDexFile(),
+                                                            entrypoint));
       UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
       break;
     }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f4544ea..3d3e35d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1254,21 +1254,12 @@
   IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
                                          codegen_->GetInstructionSetFeatures());
   if (intrinsic.TryDispatch(invoke)) {
-    LocationSummary* locations = invoke->GetLocations();
-    if (locations->CanCall()) {
-      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
-    }
     return;
   }
 
   HandleInvoke(invoke);
 }
 
-void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
-  DCHECK(RequiresCurrentMethod());
-  __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
-}
-
 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
   if (invoke->GetLocations()->Intrinsified()) {
     IntrinsicCodeGeneratorARM intrinsic(codegen);
@@ -2710,13 +2701,12 @@
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   locations->SetOut(Location::RegisterLocation(R0));
 }
 
 void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
   InvokeRuntimeCallingConvention calling_convention;
-  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
   __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
   codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
                           instruction,
@@ -2729,14 +2719,13 @@
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   locations->SetOut(Location::RegisterLocation(R0));
   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
 }
 
 void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
   InvokeRuntimeCallingConvention calling_convention;
-  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
   __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
   codegen_->InvokeRuntime(GetThreadOffset<kArmWordSize>(instruction->GetEntrypoint()).Int32Value(),
                           instruction,
@@ -4227,12 +4216,20 @@
   } else if (invoke->IsRecursive()) {
     __ bl(GetFrameEntryLabel());
   } else {
-    Register current_method =
-        invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<Register>();
+    Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+    Register method_reg;
     Register reg = temp.AsRegister<Register>();
+    if (current_method.IsRegister()) {
+      method_reg = current_method.AsRegister<Register>();
+    } else {
+      DCHECK(invoke->GetLocations()->Intrinsified());
+      DCHECK(!current_method.IsValid());
+      method_reg = reg;
+      __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
+    }
     // reg = current_method->dex_cache_resolved_methods_;
     __ LoadFromOffset(
-        kLoadWord, reg, current_method, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+        kLoadWord, reg, method_reg, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
     // reg = reg[index_in_cache]
     __ LoadFromOffset(
         kLoadWord, reg, reg, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index b871acd..af24816 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -271,9 +271,6 @@
   // Helper method to move a 64bits value between two locations.
   void Move64(Location destination, Location source);
 
-  // Load current method into `reg`.
-  void LoadCurrentMethod(Register reg);
-
   // Generate code to invoke a runtime entry point.
   void InvokeRuntime(
       int32_t offset, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ac99d56..3c8f117 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1071,12 +1071,6 @@
   }
 }
 
-void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
-  DCHECK(RequiresCurrentMethod());
-  CHECK(current_method.IsX());
-  __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
-}
-
 void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
                                        HInstruction* instruction,
                                        uint32_t dex_pc,
@@ -2227,10 +2221,6 @@
 
   IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
   if (intrinsic.TryDispatch(invoke)) {
-    LocationSummary* locations = invoke->GetLocations();
-    if (locations->CanCall()) {
-      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
-    }
     return;
   }
 
@@ -2269,11 +2259,20 @@
   } else if (invoke->IsRecursive()) {
     __ Bl(&frame_entry_label_);
   } else {
-    Register current_method =
-        XRegisterFrom(invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()));
+    Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
     Register reg = XRegisterFrom(temp);
+    Register method_reg;
+    if (current_method.IsRegister()) {
+      method_reg = XRegisterFrom(current_method);
+    } else {
+      DCHECK(invoke->GetLocations()->Intrinsified());
+      DCHECK(!current_method.IsValid());
+      method_reg = reg;
+      __ Ldr(reg.X(), MemOperand(sp, kCurrentMethodStackOffset));
+    }
+
     // temp = current_method->dex_cache_resolved_methods_;
-    __ Ldr(reg.W(), MemOperand(current_method.X(),
+    __ Ldr(reg.W(), MemOperand(method_reg.X(),
                                ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
     // temp = temp[index_in_cache];
     __ Ldr(reg.X(), MemOperand(reg, index_in_cache));
@@ -2521,9 +2520,9 @@
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
   locations->SetOut(LocationFrom(x0));
   locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
   CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
                        void*, uint32_t, int32_t, ArtMethod*>();
 }
@@ -2533,9 +2532,6 @@
   InvokeRuntimeCallingConvention calling_convention;
   Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
   DCHECK(type_index.Is(w0));
-  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimLong);
-  DCHECK(current_method.Is(x2));
-  codegen_->LoadCurrentMethod(current_method.X());
   __ Mov(type_index, instruction->GetTypeIndex());
   codegen_->InvokeRuntime(
       GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
@@ -2550,7 +2546,7 @@
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
   CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
 }
@@ -2559,9 +2555,6 @@
   LocationSummary* locations = instruction->GetLocations();
   Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
   DCHECK(type_index.Is(w0));
-  Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
-  DCHECK(current_method.Is(w1));
-  codegen_->LoadCurrentMethod(current_method.X());
   __ Mov(type_index, instruction->GetTypeIndex());
   codegen_->InvokeRuntime(
       GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 3246648..2d2419a 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -150,8 +150,6 @@
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 #undef DECLARE_VISIT_INSTRUCTION
 
-  void LoadCurrentMethod(XRegister reg);
-
   Arm64Assembler* GetAssembler() const { return assembler_; }
   vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
 
@@ -328,7 +326,6 @@
                     Primitive::Type type = Primitive::kPrimVoid);
   void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
   void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
-  void LoadCurrentMethod(vixl::Register current_method);
   void LoadAcquire(HInstruction* instruction, vixl::CPURegister dst, const vixl::MemOperand& src);
   void StoreRelease(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
 
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f6110a5..e39a1c2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -527,11 +527,6 @@
   __ Bind(GetLabelOf(block));
 }
 
-void CodeGeneratorX86::LoadCurrentMethod(Register reg) {
-  DCHECK(RequiresCurrentMethod());
-  __ movl(reg, Address(ESP, kCurrentMethodStackOffset));
-}
-
 Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const {
   switch (load->GetType()) {
     case Primitive::kPrimLong:
@@ -1231,10 +1226,6 @@
 
   IntrinsicLocationsBuilderX86 intrinsic(codegen_);
   if (intrinsic.TryDispatch(invoke)) {
-    LocationSummary* locations = invoke->GetLocations();
-    if (locations->CanCall()) {
-      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
-    }
     return;
   }
 
@@ -2972,14 +2963,12 @@
   locations->SetOut(Location::RegisterLocation(EAX));
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
 }
 
 void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
   InvokeRuntimeCallingConvention calling_convention;
-  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
   __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
-
   __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())));
 
   codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -2992,13 +2981,12 @@
   locations->SetOut(Location::RegisterLocation(EAX));
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
 }
 
 void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
   InvokeRuntimeCallingConvention calling_convention;
-  codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
   __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
 
   __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())));
@@ -3237,7 +3225,7 @@
     if (current_method.IsRegister()) {
       method_reg = current_method.AsRegister<Register>();
     } else {
-      DCHECK(IsBaseline());
+      DCHECK(IsBaseline() || invoke->GetLocations()->Intrinsified());
       DCHECK(!current_method.IsValid());
       method_reg = reg;
       __ movl(reg, Address(ESP, kCurrentMethodStackOffset));
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index b8553d2..faf3cf9 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -272,8 +272,6 @@
                   Register value,
                   bool value_can_be_null);
 
-  void LoadCurrentMethod(Register reg);
-
   Label* GetLabelOf(HBasicBlock* block) const {
     return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
   }
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index c9fe813..bfc827d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -380,13 +380,20 @@
   } else if (invoke->IsRecursive()) {
     __ call(&frame_entry_label_);
   } else {
-    LocationSummary* locations = invoke->GetLocations();
     CpuRegister reg = temp.AsRegister<CpuRegister>();
-    CpuRegister current_method =
-        locations->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<CpuRegister>();
+    Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+    Register method_reg;
+    if (current_method.IsRegister()) {
+      method_reg = current_method.AsRegister<Register>();
+    } else {
+      DCHECK(invoke->GetLocations()->Intrinsified());
+      DCHECK(!current_method.IsValid());
+      method_reg = reg.AsRegister();
+      __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
+    }
     // temp = temp->dex_cache_resolved_methods_;
-    __ movl(reg, Address(
-        current_method, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+    __ movl(reg, Address(CpuRegister(method_reg),
+                         ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
     // temp = temp[index_in_cache]
     __ movq(reg, Address(
         reg, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
@@ -587,11 +594,6 @@
   __ Bind(GetLabelOf(block));
 }
 
-void CodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
-  DCHECK(RequiresCurrentMethod());
-  __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
-}
-
 Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
   switch (load->GetType()) {
     case Primitive::kPrimLong:
@@ -1336,10 +1338,6 @@
 
   IntrinsicLocationsBuilderX86_64 intrinsic(codegen_);
   if (intrinsic.TryDispatch(invoke)) {
-    LocationSummary* locations = invoke->GetLocations();
-    if (locations->CanCall()) {
-      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
-    }
     return;
   }
 
@@ -3022,13 +3020,12 @@
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   locations->SetOut(Location::RegisterLocation(RAX));
 }
 
 void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) {
   InvokeRuntimeCallingConvention calling_convention;
-  codegen_->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
   codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
                            instruction->GetTypeIndex());
   __ gs()->call(
@@ -3043,14 +3040,13 @@
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   locations->SetOut(Location::RegisterLocation(RAX));
   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
 }
 
 void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
   InvokeRuntimeCallingConvention calling_convention;
-  codegen_->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(2)));
   codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
                            instruction->GetTypeIndex());
 
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 61f863c..e46994c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -263,8 +263,6 @@
   // Helper method to move a value between two locations.
   void Move(Location destination, Location source);
 
-  void LoadCurrentMethod(CpuRegister reg);
-
   Label* GetLabelOf(HBasicBlock* block) const {
     return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
   }
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d914363..f87775e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2668,9 +2668,10 @@
   DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
 };
 
-class HNewInstance : public HExpression<0> {
+class HNewInstance : public HExpression<1> {
  public:
-  HNewInstance(uint32_t dex_pc,
+  HNewInstance(HCurrentMethod* current_method,
+               uint32_t dex_pc,
                uint16_t type_index,
                const DexFile& dex_file,
                QuickEntrypointEnum entrypoint)
@@ -2678,7 +2679,9 @@
         dex_pc_(dex_pc),
         type_index_(type_index),
         dex_file_(dex_file),
-        entrypoint_(entrypoint) {}
+        entrypoint_(entrypoint) {
+    SetRawInputAt(0, current_method);
+  }
 
   uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
   uint16_t GetTypeIndex() const { return type_index_; }
@@ -2721,9 +2724,10 @@
   DISALLOW_COPY_AND_ASSIGN(HNeg);
 };
 
-class HNewArray : public HExpression<1> {
+class HNewArray : public HExpression<2> {
  public:
   HNewArray(HInstruction* length,
+            HCurrentMethod* current_method,
             uint32_t dex_pc,
             uint16_t type_index,
             const DexFile& dex_file,
@@ -2734,6 +2738,7 @@
         dex_file_(dex_file),
         entrypoint_(entrypoint) {
     SetRawInputAt(0, length);
+    SetRawInputAt(1, current_method);
   }
 
   uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 50d9860..c78a851 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -428,7 +428,8 @@
         // exception was thrown to force the activations to be removed from the
         // stack. Continue execution in the interpreter.
         self->ClearException();
-        ShadowFrame* shadow_frame = self->PopStackedShadowFrame(kDeoptimizationShadowFrame);
+        ShadowFrame* shadow_frame =
+            self->PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
         result->SetJ(self->PopDeoptimizationReturnValue().GetJ());
         self->SetTopOfStack(nullptr);
         self->SetTopOfShadowStack(shadow_frame);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3f2e6db..86a79ce 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -518,7 +518,7 @@
     // We might need to do class loading, which incurs a thread state change to kNative. So
     // register the shadow frame as under construction and allow suspension again.
     ScopedStackedShadowFramePusher pusher(
-        self, new_shadow_frame, kShadowFrameUnderConstruction);
+        self, new_shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
     self->EndAssertNoThreadSuspension(old_cause);
 
     // We need to do runtime check on reference assignment. We need to load the shorty
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 0bc834f..4b563b5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -368,23 +368,28 @@
   return true;
 }
 
-bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognized,
-                          RuntimeArgumentMap* runtime_options) {
+// Intended for local changes only.
+static void MaybeOverrideVerbosity() {
   //  gLogVerbosity.class_linker = true;  // TODO: don't check this in!
   //  gLogVerbosity.compiler = true;  // TODO: don't check this in!
+  //  gLogVerbosity.deopt = true;  // TODO: don't check this in!
   //  gLogVerbosity.gc = true;  // TODO: don't check this in!
   //  gLogVerbosity.heap = true;  // TODO: don't check this in!
   //  gLogVerbosity.jdwp = true;  // TODO: don't check this in!
   //  gLogVerbosity.jit = true;  // TODO: don't check this in!
   //  gLogVerbosity.jni = true;  // TODO: don't check this in!
   //  gLogVerbosity.monitor = true;  // TODO: don't check this in!
+  //  gLogVerbosity.oat = true;  // TODO: don't check this in!
   //  gLogVerbosity.profiler = true;  // TODO: don't check this in!
   //  gLogVerbosity.signals = true;  // TODO: don't check this in!
   //  gLogVerbosity.startup = true;  // TODO: don't check this in!
   //  gLogVerbosity.third_party_jni = true;  // TODO: don't check this in!
   //  gLogVerbosity.threads = true;  // TODO: don't check this in!
   //  gLogVerbosity.verifier = true;  // TODO: don't check this in!
+}
 
+bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognized,
+                          RuntimeArgumentMap* runtime_options) {
   for (size_t i = 0; i < options.size(); ++i) {
     if (true && options[0].first == "-Xzygote") {
       LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -453,6 +458,8 @@
     }
   }
 
+  MaybeOverrideVerbosity();
+
   // -Xprofile:
   Trace::SetDefaultClockSource(args.GetOrDefault(M::ProfileClock));
 
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index a10c5c8..02baad7 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -178,7 +178,7 @@
         // In case there is no deoptimized shadow frame for this upcall, we still
         // need to push a nullptr to the stack since there is always a matching pop after
         // the long jump.
-        self_->PushStackedShadowFrame(nullptr, kDeoptimizationShadowFrame);
+        self_->PushStackedShadowFrame(nullptr, StackedShadowFrameType::kDeoptimizationShadowFrame);
         stacked_shadow_frame_pushed_ = true;
       }
       return false;  // End stack walk.
@@ -212,7 +212,8 @@
     CHECK(verifier_success) << PrettyMethod(m);
     ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc);
     {
-      ScopedStackedShadowFramePusher pusher(self_, new_frame, kShadowFrameUnderConstruction);
+      ScopedStackedShadowFramePusher pusher(self_, new_frame,
+                                            StackedShadowFrameType::kShadowFrameUnderConstruction);
       const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
 
       // Markers for dead values, used when the verifier knows a Dex register is undefined,
@@ -318,7 +319,7 @@
       // Will be popped after the long jump after DeoptimizeStack(),
       // right before interpreter::EnterInterpreterFromDeoptimize().
       stacked_shadow_frame_pushed_ = true;
-      self_->PushStackedShadowFrame(new_frame, kDeoptimizationShadowFrame);
+      self_->PushStackedShadowFrame(new_frame, StackedShadowFrameType::kDeoptimizationShadowFrame);
     }
     prev_shadow_frame_ = new_frame;
     return true;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3f49ab9..fe98b0a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -147,6 +147,50 @@
   ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
 }
 
+class DeoptimizationReturnValueRecord {
+ public:
+  DeoptimizationReturnValueRecord(const JValue& ret_val,
+                                  bool is_reference,
+                                  DeoptimizationReturnValueRecord* link)
+      : ret_val_(ret_val), is_reference_(is_reference), link_(link) {}
+
+  JValue GetReturnValue() const { return ret_val_; }
+  bool IsReference() const { return is_reference_; }
+  DeoptimizationReturnValueRecord* GetLink() const { return link_; }
+  mirror::Object** GetGCRoot() {
+    DCHECK(is_reference_);
+    return ret_val_.GetGCRoot();
+  }
+
+ private:
+  JValue ret_val_;
+  const bool is_reference_;
+  DeoptimizationReturnValueRecord* const link_;
+
+  DISALLOW_COPY_AND_ASSIGN(DeoptimizationReturnValueRecord);
+};
+
+class StackedShadowFrameRecord {
+ public:
+  StackedShadowFrameRecord(ShadowFrame* shadow_frame,
+                           StackedShadowFrameType type,
+                           StackedShadowFrameRecord* link)
+      : shadow_frame_(shadow_frame),
+        type_(type),
+        link_(link) {}
+
+  ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
+  StackedShadowFrameType GetType() const { return type_; }
+  StackedShadowFrameRecord* GetLink() const { return link_; }
+
+ private:
+  ShadowFrame* const shadow_frame_;
+  const StackedShadowFrameType type_;
+  StackedShadowFrameRecord* const link_;
+
+  DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
+};
+
 void Thread::PushAndClearDeoptimizationReturnValue() {
   DeoptimizationReturnValueRecord* record = new DeoptimizationReturnValueRecord(
       tls64_.deoptimization_return_value,
@@ -174,7 +218,7 @@
 ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type) {
   StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
   DCHECK(record != nullptr);
-  DCHECK(record->GetType() == type);
+  DCHECK_EQ(record->GetType(), type);
   tlsPtr_.stacked_shadow_frame_record = record->GetLink();
   ShadowFrame* shadow_frame = record->GetShadowFrame();
   delete record;
diff --git a/runtime/thread.h b/runtime/thread.h
index e996fc9..9311bef 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -74,6 +74,7 @@
 class Closure;
 class Context;
 struct DebugInvokeReq;
+class DeoptimizationReturnValueRecord;
 class DexFile;
 class JavaVMExt;
 struct JNIEnvExt;
@@ -82,6 +83,7 @@
 class ScopedObjectAccessAlreadyRunnable;
 class ShadowFrame;
 class SingleStepControl;
+class StackedShadowFrameRecord;
 class Thread;
 class ThreadList;
 
@@ -99,55 +101,11 @@
   kCheckpointRequest = 2  // Request that the thread do some checkpoint work and then continue.
 };
 
-enum StackedShadowFrameType {
+enum class StackedShadowFrameType {
   kShadowFrameUnderConstruction,
   kDeoptimizationShadowFrame
 };
 
-class StackedShadowFrameRecord {
- public:
-  StackedShadowFrameRecord(ShadowFrame* shadow_frame,
-                           StackedShadowFrameType type,
-                           StackedShadowFrameRecord* link)
-      : shadow_frame_(shadow_frame),
-        type_(type),
-        link_(link) {}
-
-  ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
-  bool GetType() const { return type_; }
-  StackedShadowFrameRecord* GetLink() const { return link_; }
-
- private:
-  ShadowFrame* const shadow_frame_;
-  const StackedShadowFrameType type_;
-  StackedShadowFrameRecord* const link_;
-
-  DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
-};
-
-class DeoptimizationReturnValueRecord {
- public:
-  DeoptimizationReturnValueRecord(const JValue& ret_val,
-                                  bool is_reference,
-                                  DeoptimizationReturnValueRecord* link)
-      : ret_val_(ret_val), is_reference_(is_reference), link_(link) {}
-
-  JValue GetReturnValue() const { return ret_val_; }
-  bool IsReference() const { return is_reference_; }
-  DeoptimizationReturnValueRecord* GetLink() const { return link_; }
-  mirror::Object** GetGCRoot() {
-    DCHECK(is_reference_);
-    return ret_val_.GetGCRoot();
-  }
-
- private:
-  JValue ret_val_;
-  const bool is_reference_;
-  DeoptimizationReturnValueRecord* const link_;
-
-  DISALLOW_COPY_AND_ASSIGN(DeoptimizationReturnValueRecord);
-};
-
 static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
 
 // Thread's stack layout for implicit stack overflow checks:
@@ -1372,6 +1330,7 @@
 };
 
 std::ostream& operator<<(std::ostream& os, const Thread& thread);
+std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
 
 }  // namespace art
 
diff --git a/test/491-current-method/expected.txt b/test/491-current-method/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/491-current-method/expected.txt
diff --git a/test/491-current-method/info.txt b/test/491-current-method/info.txt
new file mode 100644
index 0000000..e9678da
--- /dev/null
+++ b/test/491-current-method/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing that used to
+crash in the presence of slow paths with intrinsics.
diff --git a/test/491-current-method/src/Main.java b/test/491-current-method/src/Main.java
new file mode 100644
index 0000000..87ef052
--- /dev/null
+++ b/test/491-current-method/src/Main.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+
+  // The code below is written in a way that will crash
+  // the generated code at the time of submission of this test.
+  // Therefore, changes to the register allocator may
+  // affect the reproducibility of the crash.
+  public static void $noinline$foo(int a, int b, int c) {
+    // The division on x86 will take EAX and EDX, leaving ECX
+    // to put the ART current method.
+    c = c / 42;
+    // We use the empty string for forcing the slow path.
+    // The slow path for charAt when it is intrinsified, will
+    // move the parameter to ECX, and therefore overwrite the ART
+    // current method.
+    "".charAt(c);
+
+    // Do more things in the method to prevent inlining.
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+    c = c / 42;
+    "".charAt(c);
+  }
+
+  public static void main(String[] args) {
+    boolean didThrow = false;
+    try {
+      $noinline$foo(1, 2, 3);
+    } catch (Throwable e) {
+      didThrow = true;
+    }
+
+    if (!didThrow) {
+      throw new Error("Expected an exception from charAt");
+    }
+  }
+}
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index 2b57222..c74508d 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -154,10 +154,12 @@
       sys.stderr.write('%s\n' % (rest))
       Confused(filename, line_number, raw_line)
 
-    if len(enclosing_classes) > 0:
-      if is_enum_class:
-        enum_value = enum_name + '::' + enum_value
-      else:
+    # If the enum is scoped, we must prefix enum value with enum name (which is already prefixed
+    # by enclosing classes).
+    if is_enum_class:
+      enum_value = enum_name + '::' + enum_value
+    else:
+      if len(enclosing_classes) > 0:
         enum_value = '::'.join(enclosing_classes) + '::' + enum_value
 
     _ENUMS[enum_name].append((enum_value, enum_text))