Use HCurrentMethod in HInvokeStaticOrDirect.

Change-Id: I0d15244b6b44c8b10079398c55da5071a3e3af66
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index f98029d..dbda63b 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -763,6 +763,11 @@
   }
   DCHECK_EQ(argument_index, number_of_arguments);
 
+  if (invoke->IsInvokeStaticOrDirect()) {
+    invoke->SetArgumentAt(argument_index, graph_->GetCurrentMethod());
+    argument_index++;
+  }
+
   if (clinit_check_requirement == HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit) {
     // Add the class initialization check as last input of `invoke`.
     DCHECK(clinit_check != nullptr);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 08c0351..049b3e3 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -292,7 +292,6 @@
     HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
   LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCall);
-  locations->AddTemp(visitor->GetMethodLocation());
 
   for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
     HInstruction* input = invoke->InputAt(i);
@@ -300,6 +299,20 @@
   }
 
   locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
+
+  if (invoke->IsInvokeStaticOrDirect()) {
+    HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
+    if (call->IsStringInit()) {
+      locations->AddTemp(visitor->GetMethodLocation());
+    } else if (call->IsRecursive()) {
+      locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
+    } else {
+      locations->AddTemp(visitor->GetMethodLocation());
+      locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+    }
+  } else {
+    locations->AddTemp(visitor->GetMethodLocation());
+  }
 }
 
 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 04952be..da5a731 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1254,6 +1254,10 @@
   IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
                                          codegen_->GetInstructionSetFeatures());
   if (intrinsic.TryDispatch(invoke)) {
+    LocationSummary* locations = invoke->GetLocations();
+    if (locations->CanCall()) {
+      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+    }
     return;
   }
 
@@ -1283,9 +1287,9 @@
     return;
   }
 
-  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
-
-  codegen_->GenerateStaticOrDirectCall(invoke, temp);
+  LocationSummary* locations = invoke->GetLocations();
+  codegen_->GenerateStaticOrDirectCall(
+      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
@@ -1316,12 +1320,8 @@
   Location receiver = locations->InAt(0);
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   // temp = object->GetClass();
-  if (receiver.IsStackSlot()) {
-    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
-    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
-  } else {
-    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
-  }
+  DCHECK(receiver.IsRegister());
+  __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
   codegen_->MaybeRecordImplicitNullCheck(invoke);
   // temp = temp->GetMethodAt(method_offset);
   uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
@@ -4215,9 +4215,7 @@
   }
 }
 
-void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
-  DCHECK_EQ(temp, kArtMethodRegister);
-
+void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   // TODO: Implement all kinds of calls:
   // 1) boot -> boot
   // 2) app -> boot
@@ -4226,32 +4224,32 @@
   // Currently we implement the app -> app logic, which looks up in the resolve cache.
 
   if (invoke->IsStringInit()) {
+    Register reg = temp.AsRegister<Register>();
     // temp = thread->string_init_entrypoint
-    __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
+    __ LoadFromOffset(kLoadWord, reg, TR, invoke->GetStringInitOffset());
     // LR = temp[offset_of_quick_compiled_code]
-    __ LoadFromOffset(kLoadWord, LR, temp,
+    __ LoadFromOffset(kLoadWord, LR, reg,
                       ArtMethod::EntryPointFromQuickCompiledCodeOffset(
                           kArmWordSize).Int32Value());
     // LR()
     __ blx(LR);
+  } else if (invoke->IsRecursive()) {
+    __ bl(GetFrameEntryLabel());
   } else {
-    // temp = method;
-    LoadCurrentMethod(temp);
-    if (!invoke->IsRecursive()) {
-      // temp = temp->dex_cache_resolved_methods_;
-      __ LoadFromOffset(
-          kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
-      // temp = temp[index_in_cache]
-      __ LoadFromOffset(
-          kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
-      // LR = temp[offset_of_quick_compiled_code]
-      __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-          kArmWordSize).Int32Value());
-      // LR()
-      __ blx(LR);
-    } else {
-      __ bl(GetFrameEntryLabel());
-    }
+    Register current_method =
+        invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<Register>();
+    Register reg = temp.AsRegister<Register>();
+    // reg = current_method->dex_cache_resolved_methods_;
+    __ LoadFromOffset(
+        kLoadWord, reg, current_method, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+    // reg = reg[index_in_cache]
+    __ LoadFromOffset(
+        kLoadWord, reg, reg, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
+    // LR = reg[offset_of_quick_compiled_code]
+    __ LoadFromOffset(kLoadWord, LR, reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+        kArmWordSize).Int32Value());
+    // LR()
+    __ blx(LR);
   }
 
   DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index d84f2d3..b871acd 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -301,7 +301,7 @@
 
   Label* GetFrameEntryLabel() { return &frame_entry_label_; }
 
-  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp);
+  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
 
  private:
   // Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 2f607f7..ac99d56 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -484,7 +484,7 @@
 }
 
 Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const {
-  return LocationFrom(x0);
+  return LocationFrom(kArtMethodRegister);
 }
 
 CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
@@ -2227,6 +2227,10 @@
 
   IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
   if (intrinsic.TryDispatch(invoke)) {
+    LocationSummary* locations = invoke->GetLocations();
+    if (locations->CanCall()) {
+      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+    }
     return;
   }
 
@@ -2242,9 +2246,8 @@
   return false;
 }
 
-void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
+void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
-  DCHECK(temp.Is(kArtMethodRegister));
   size_t index_in_cache = GetCachePointerOffset(invoke->GetDexMethodIndex());
 
   // TODO: Implement all kinds of calls:
@@ -2255,30 +2258,30 @@
   // Currently we implement the app -> app logic, which looks up in the resolve cache.
 
   if (invoke->IsStringInit()) {
+    Register reg = XRegisterFrom(temp);
     // temp = thread->string_init_entrypoint
-    __ Ldr(temp.X(), MemOperand(tr, invoke->GetStringInitOffset()));
+    __ Ldr(reg.X(), MemOperand(tr, invoke->GetStringInitOffset()));
     // LR = temp->entry_point_from_quick_compiled_code_;
     __ Ldr(lr, MemOperand(
-        temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
+        reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
     // lr()
     __ Blr(lr);
+  } else if (invoke->IsRecursive()) {
+    __ Bl(&frame_entry_label_);
   } else {
-    // temp = method;
-    LoadCurrentMethod(temp.X());
-    if (!invoke->IsRecursive()) {
-      // temp = temp->dex_cache_resolved_methods_;
-      __ Ldr(temp.W(), MemOperand(temp.X(),
-                                  ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
-      // temp = temp[index_in_cache];
-      __ Ldr(temp.X(), MemOperand(temp, index_in_cache));
-      // lr = temp->entry_point_from_quick_compiled_code_;
-      __ Ldr(lr, MemOperand(temp.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-          kArm64WordSize).Int32Value()));
-      // lr();
-      __ Blr(lr);
-    } else {
-      __ Bl(&frame_entry_label_);
-    }
+    Register current_method =
+        XRegisterFrom(invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()));
+    Register reg = XRegisterFrom(temp);
+    // temp = current_method->dex_cache_resolved_methods_;
+    __ Ldr(reg.W(), MemOperand(current_method.X(),
+                               ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+    // temp = temp[index_in_cache];
+    __ Ldr(reg.X(), MemOperand(reg, index_in_cache));
+    // lr = temp->entry_point_from_quick_compiled_code_;
+    __ Ldr(lr, MemOperand(reg.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+        kArm64WordSize).Int32Value()));
+    // lr();
+    __ Blr(lr);
   }
 
   DCHECK(!IsLeafMethod());
@@ -2294,8 +2297,9 @@
   }
 
   BlockPoolsScope block_pools(GetVIXLAssembler());
-  Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
-  codegen_->GenerateStaticOrDirectCall(invoke, temp);
+  LocationSummary* locations = invoke->GetLocations();
+  codegen_->GenerateStaticOrDirectCall(
+      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
@@ -2314,14 +2318,8 @@
 
   BlockPoolsScope block_pools(GetVIXLAssembler());
 
-  // temp = object->GetClass();
-  if (receiver.IsStackSlot()) {
-    __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
-    __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
-  } else {
-    DCHECK(receiver.IsRegister());
-    __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
-  }
+  DCHECK(receiver.IsRegister());
+  __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
   codegen_->MaybeRecordImplicitNullCheck(invoke);
   // temp = temp->GetMethodAt(method_offset);
   __ Ldr(temp, MemOperand(temp, method_offset));
@@ -2674,7 +2672,7 @@
 void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetOut(LocationFrom(x0));
+  locations->SetOut(LocationFrom(kArtMethodRegister));
 }
 
 void InstructionCodeGeneratorARM64::VisitCurrentMethod(
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index c62ba95..3246648 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -344,7 +344,7 @@
     return false;
   }
 
-  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, vixl::Register temp);
+  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
 
  private:
   // Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8a7b52e..4065c44 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1231,6 +1231,10 @@
 
   IntrinsicLocationsBuilderX86 intrinsic(codegen_);
   if (intrinsic.TryDispatch(invoke)) {
+    LocationSummary* locations = invoke->GetLocations();
+    if (locations->CanCall()) {
+      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+    }
     return;
   }
 
@@ -1255,8 +1259,9 @@
     return;
   }
 
+  LocationSummary* locations = invoke->GetLocations();
   codegen_->GenerateStaticOrDirectCall(
-      invoke, invoke->GetLocations()->GetTemp(0).AsRegister<Register>());
+      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
@@ -1276,13 +1281,8 @@
   LocationSummary* locations = invoke->GetLocations();
   Location receiver = locations->InAt(0);
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  // temp = object->GetClass();
-  if (receiver.IsStackSlot()) {
-    __ movl(temp, Address(ESP, receiver.GetStackIndex()));
-    __ movl(temp, Address(temp, class_offset));
-  } else {
-    __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
-  }
+  DCHECK(receiver.IsRegister());
+  __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
   codegen_->MaybeRecordImplicitNullCheck(invoke);
   // temp = temp->GetMethodAt(method_offset);
   __ movl(temp, Address(temp, method_offset));
@@ -3201,7 +3201,7 @@
 
 
 void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
-                                                  Register temp) {
+                                                  Location temp) {
   // TODO: Implement all kinds of calls:
   // 1) boot -> boot
   // 2) app -> boot
@@ -3211,25 +3211,26 @@
 
   if (invoke->IsStringInit()) {
     // temp = thread->string_init_entrypoint
-    __ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
+    Register reg = temp.AsRegister<Register>();
+    __ fs()->movl(reg, Address::Absolute(invoke->GetStringInitOffset()));
     // (temp + offset_of_quick_compiled_code)()
     __ call(Address(
-        temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+        reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+  } else if (invoke->IsRecursive()) {
+    __ call(GetFrameEntryLabel());
   } else {
-    // temp = method;
-    LoadCurrentMethod(temp);
-    if (!invoke->IsRecursive()) {
-      // temp = temp->dex_cache_resolved_methods_;
-      __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
-      // temp = temp[index_in_cache]
-      __ movl(temp, Address(temp,
-                            CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
-      // (temp + offset_of_quick_compiled_code)()
-      __ call(Address(temp,
-          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
-    } else {
-      __ call(GetFrameEntryLabel());
-    }
+    Register current_method =
+        invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<Register>();
+    Register reg = temp.AsRegister<Register>();
+    // temp = temp->dex_cache_resolved_methods_;
+    __ movl(reg, Address(
+        current_method, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+    // temp = temp[index_in_cache]
+    __ movl(reg, Address(reg,
+                         CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
+    // (temp + offset_of_quick_compiled_code)()
+    __ call(Address(reg,
+        ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
   }
 
   DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 61827a4..b8553d2 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -263,7 +263,7 @@
   void Move64(Location destination, Location source);
 
   // Generate a call to a static or direct method.
-  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp);
+  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
 
   // Emit a write barrier.
   void MarkGCCard(Register temp,
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a2a3cf5..c9fe813 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -360,7 +360,7 @@
 }
 
 void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
-                                                     CpuRegister temp) {
+                                                     Location temp) {
   // All registers are assumed to be correctly set up.
 
   // TODO: Implement all kinds of calls:
@@ -371,26 +371,28 @@
   // Currently we implement the app -> app logic, which looks up in the resolve cache.
 
   if (invoke->IsStringInit()) {
+    CpuRegister reg = temp.AsRegister<CpuRegister>();
     // temp = thread->string_init_entrypoint
-    __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
+    __ gs()->movl(reg, Address::Absolute(invoke->GetStringInitOffset()));
     // (temp + offset_of_quick_compiled_code)()
-    __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+    __ call(Address(reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
         kX86_64WordSize).SizeValue()));
+  } else if (invoke->IsRecursive()) {
+    __ call(&frame_entry_label_);
   } else {
-    // temp = method;
-    LoadCurrentMethod(temp);
-    if (!invoke->IsRecursive()) {
-      // temp = temp->dex_cache_resolved_methods_;
-      __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
-      // temp = temp[index_in_cache]
-      __ movq(temp, Address(
-          temp, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
-      // (temp + offset_of_quick_compiled_code)()
-      __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-          kX86_64WordSize).SizeValue()));
-    } else {
-      __ call(&frame_entry_label_);
-    }
+    LocationSummary* locations = invoke->GetLocations();
+    CpuRegister reg = temp.AsRegister<CpuRegister>();
+    CpuRegister current_method =
+        locations->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<CpuRegister>();
+    // temp = temp->dex_cache_resolved_methods_;
+    __ movl(reg, Address(
+        current_method, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+    // temp = temp[index_in_cache]
+    __ movq(reg, Address(
+        reg, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
+    // (temp + offset_of_quick_compiled_code)()
+    __ call(Address(reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+        kX86_64WordSize).SizeValue()));
   }
 
   DCHECK(!IsLeafMethod());
@@ -1334,6 +1336,10 @@
 
   IntrinsicLocationsBuilderX86_64 intrinsic(codegen_);
   if (intrinsic.TryDispatch(invoke)) {
+    LocationSummary* locations = invoke->GetLocations();
+    if (locations->CanCall()) {
+      locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+    }
     return;
   }
 
@@ -1358,9 +1364,9 @@
     return;
   }
 
+  LocationSummary* locations = invoke->GetLocations();
   codegen_->GenerateStaticOrDirectCall(
-      invoke,
-      invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>());
+      invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
@@ -1390,12 +1396,8 @@
   Location receiver = locations->InAt(0);
   size_t class_offset = mirror::Object::ClassOffset().SizeValue();
   // temp = object->GetClass();
-  if (receiver.IsStackSlot()) {
-    __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
-    __ movl(temp, Address(temp, class_offset));
-  } else {
-    __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
-  }
+  DCHECK(receiver.IsRegister());
+  __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
   codegen_->MaybeRecordImplicitNullCheck(invoke);
   // temp = temp->GetMethodAt(method_offset);
   __ movq(temp, Address(temp, method_offset));
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index c19e686..61f863c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -277,7 +277,7 @@
     return false;
   }
 
-  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, CpuRegister temp);
+  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
 
   const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
     return isa_features_;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 5436ec2..749bedf 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -101,7 +101,8 @@
     MoveArguments(invoke_, codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
-      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
+      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+                                          Location::RegisterLocation(kArtMethodRegister));
       RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d1dc5b3..c108ad5 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -110,7 +110,8 @@
     MoveArguments(invoke_, codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
-      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
+      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+                                          LocationFrom(kArtMethodRegister));
       RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5bbbc72..424ac7c 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -138,7 +138,8 @@
     MoveArguments(invoke_, codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
-      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), EAX);
+      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+                                          Location::RegisterLocation(EAX));
       RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
@@ -732,7 +733,8 @@
   MoveArguments(invoke, codegen);
 
   DCHECK(invoke->IsInvokeStaticOrDirect());
-  codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), EAX);
+  codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(),
+                                      Location::RegisterLocation(EAX));
   codegen->RecordPcInfo(invoke, invoke->GetDexPc());
 
   // Copy the result back to the expected output.
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index d6c90ff..8915314 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -129,7 +129,8 @@
     MoveArguments(invoke_, codegen);
 
     if (invoke_->IsInvokeStaticOrDirect()) {
-      codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
+      codegen->GenerateStaticOrDirectCall(
+          invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
       RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
@@ -609,7 +610,8 @@
   MoveArguments(invoke, codegen);
 
   DCHECK(invoke->IsInvokeStaticOrDirect());
-  codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), CpuRegister(RDI));
+  codegen->GenerateStaticOrDirectCall(
+      invoke->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
   codegen->RecordPcInfo(invoke, invoke->GetDexPc());
 
   // Copy the result back to the expected output.
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 09bbb33..66c5fb1 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -525,6 +525,8 @@
     return temps_.Size();
   }
 
+  bool HasTemps() const { return !temps_.IsEmpty(); }
+
   Location Out() const { return output_; }
 
   bool CanCall() const { return call_kind_ != kNoCall; }
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4792734..d914363 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2528,7 +2528,9 @@
                         ClinitCheckRequirement clinit_check_requirement)
       : HInvoke(arena,
                 number_of_arguments,
-                clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u,
+                // There is one extra argument for the  HCurrentMethod node, and
+                // potentially one other if the clinit check is explicit.
+                clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 2u : 1u,
                 return_type,
                 dex_pc,
                 dex_method_index,
@@ -2550,6 +2552,7 @@
   bool NeedsDexCache() const OVERRIDE { return !IsRecursive(); }
   bool IsStringInit() const { return string_init_offset_ != 0; }
   int32_t GetStringInitOffset() const { return string_init_offset_; }
+  uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
 
   // Is this instruction a call to a static method?
   bool IsStatic() const {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index a381315..e38e49c 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -714,13 +714,15 @@
   if (defined_by != nullptr && !current->IsSplit()) {
     LocationSummary* locations = defined_by->GetLocations();
     if (!locations->OutputCanOverlapWithInputs() && locations->Out().IsUnallocated()) {
-      for (HInputIterator it(defined_by); !it.Done(); it.Advance()) {
+      for (size_t i = 0, e = defined_by->InputCount(); i < e; ++i) {
         // Take the last interval of the input. It is the location of that interval
         // that will be used at `defined_by`.
-        LiveInterval* interval = it.Current()->GetLiveInterval()->GetLastSibling();
+        LiveInterval* interval = defined_by->InputAt(i)->GetLiveInterval()->GetLastSibling();
         // Note that interval may have not been processed yet.
         // TODO: Handle non-split intervals last in the work list.
-        if (interval->HasRegister() && interval->SameRegisterKind(*current)) {
+        if (locations->InAt(i).IsValid()
+            && interval->HasRegister()
+            && interval->SameRegisterKind(*current)) {
           // The input must be live until the end of `defined_by`, to comply to
           // the linear scan algorithm. So we use `defined_by`'s end lifetime
           // position to check whether the input is dead or is inactive after
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index d5f977f..701dbb0 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -242,7 +242,7 @@
         HInstruction* input = current->InputAt(i);
         // Some instructions 'inline' their inputs, that is they do not need
         // to be materialized.
-        if (input->HasSsaIndex()) {
+        if (input->HasSsaIndex() && current->GetLocations()->InAt(i).IsValid()) {
           live_in->SetBit(input->GetSsaIndex());
           input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i);
         }
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 4667825..220ee6a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -394,7 +394,7 @@
       first_range_->start_ = from;
     } else {
       // Instruction without uses.
-      DCHECK(!defined_by_->HasNonEnvironmentUses());
+      DCHECK(first_use_ == nullptr);
       DCHECK(from == defined_by_->GetLifetimePosition());
       first_range_ = last_range_ = range_search_start_ =
           new (allocator_) LiveRange(from, from + 2, nullptr);
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index df969a4..3899d7f 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -19,7 +19,7 @@
   /// CHECK-START: void Main.InlineVoid() inliner (before)
   /// CHECK-DAG:     <<Const42:i\d+>> IntConstant 42
   /// CHECK-DAG:                      InvokeStaticOrDirect
-  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Const42>>]
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Const42>>,{{[ij]\d+}}]
 
   /// CHECK-START: void Main.InlineVoid() inliner (after)
   /// CHECK-NOT:                      InvokeStaticOrDirect
@@ -31,7 +31,7 @@
 
   /// CHECK-START: int Main.InlineParameter(int) inliner (before)
   /// CHECK-DAG:     <<Param:i\d+>>  ParameterValue
-  /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect [<<Param>>]
+  /// CHECK-DAG:     <<Result:i\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
   /// CHECK-DAG:                     Return [<<Result>>]
 
   /// CHECK-START: int Main.InlineParameter(int) inliner (after)
@@ -44,7 +44,7 @@
 
   /// CHECK-START: long Main.InlineWideParameter(long) inliner (before)
   /// CHECK-DAG:     <<Param:j\d+>>  ParameterValue
-  /// CHECK-DAG:     <<Result:j\d+>> InvokeStaticOrDirect [<<Param>>]
+  /// CHECK-DAG:     <<Result:j\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
   /// CHECK-DAG:                     Return [<<Result>>]
 
   /// CHECK-START: long Main.InlineWideParameter(long) inliner (after)
@@ -57,7 +57,7 @@
 
   /// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (before)
   /// CHECK-DAG:     <<Param:l\d+>>  ParameterValue
-  /// CHECK-DAG:     <<Result:l\d+>> InvokeStaticOrDirect [<<Param>>]
+  /// CHECK-DAG:     <<Result:l\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
   /// CHECK-DAG:                     Return [<<Result>>]
 
   /// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (after)
@@ -130,8 +130,8 @@
   /// CHECK-DAG:     <<Const1:i\d+>> IntConstant 1
   /// CHECK-DAG:     <<Const3:i\d+>> IntConstant 3
   /// CHECK-DAG:     <<Const5:i\d+>> IntConstant 5
-  /// CHECK-DAG:     <<Add:i\d+>>    InvokeStaticOrDirect [<<Const1>>,<<Const3>>]
-  /// CHECK-DAG:     <<Sub:i\d+>>    InvokeStaticOrDirect [<<Const5>>,<<Const3>>]
+  /// CHECK-DAG:     <<Add:i\d+>>    InvokeStaticOrDirect [<<Const1>>,<<Const3>>,{{[ij]\d+}}]
+  /// CHECK-DAG:     <<Sub:i\d+>>    InvokeStaticOrDirect [<<Const5>>,<<Const3>>,{{[ij]\d+}}]
   /// CHECK-DAG:     <<Phi:i\d+>>    Phi [<<Add>>,<<Sub>>]
   /// CHECK-DAG:                     Return [<<Phi>>]
 
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index 51be912..c23d9d3 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -26,7 +26,7 @@
   /// CHECK-START: void Main.invokeStaticInlined() builder (after)
   /// CHECK-DAG:     <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
   /// CHECK-DAG:     <<ClinitCheck:l\d+>>  ClinitCheck [<<LoadClass>>]
-  /// CHECK-DAG:                           InvokeStaticOrDirect [<<ClinitCheck>>]
+  /// CHECK-DAG:                           InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
 
   /// CHECK-START: void Main.invokeStaticInlined() inliner (after)
   /// CHECK-DAG:     <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
@@ -69,12 +69,12 @@
   /// CHECK-START: void Main.invokeStaticNotInlined() builder (after)
   /// CHECK-DAG:     <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
   /// CHECK-DAG:     <<ClinitCheck:l\d+>>  ClinitCheck [<<LoadClass>>]
-  /// CHECK-DAG:                           InvokeStaticOrDirect [<<ClinitCheck>>]
+  /// CHECK-DAG:                           InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
 
   /// CHECK-START: void Main.invokeStaticNotInlined() inliner (after)
   /// CHECK-DAG:     <<LoadClass:l\d+>>    LoadClass gen_clinit_check:false
   /// CHECK-DAG:     <<ClinitCheck:l\d+>>  ClinitCheck [<<LoadClass>>]
-  /// CHECK-DAG:                           InvokeStaticOrDirect [<<ClinitCheck>>]
+  /// CHECK-DAG:                           InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
 
   // The following checks ensure the clinit check and load class
   // instructions added by the builder are pruned by the