Put the resolved class in HLoadClass.

To avoid repeated lookups in sharpening/rtp/inlining.

Test: test-art-host test-art-target
Change-Id: I08d0da36a4bb061cdaa490ea2af3a3217a875bbe
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index f896f11..8cf4089 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -63,7 +63,8 @@
                              driver,
                              interpreter_metadata,
                              compiler_stats,
-                             dex_cache) {}
+                             dex_cache,
+                             handles) {}
 
   // Only for unit testing.
   HGraphBuilder(HGraph* graph,
@@ -90,7 +91,8 @@
                              /* compiler_driver */ nullptr,
                              /* interpreter_metadata */ nullptr,
                              /* compiler_stats */ nullptr,
-                             null_dex_cache_) {}
+                             null_dex_cache_,
+                             handles) {}
 
   GraphAnalysisResult BuildGraph();
 
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ef4bd1e..07b1746 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5770,7 +5770,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5821,8 +5823,9 @@
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      DCHECK_NE(cls->GetAddress(), 0u);
-      uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+      uint32_t address = dchecked_integral_cast<uint32_t>(
+          reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+      DCHECK_NE(address, 0u);
       __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
       break;
     }
@@ -5842,7 +5845,7 @@
     case HLoadClass::LoadKind::kJitTableAddress: {
       __ LoadLiteral(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
                                                                cls->GetTypeIndex(),
-                                                               cls->GetAddress()));
+                                                               cls->GetClass()));
       // /* GcRoot<mirror::Class> */ out = *out
       GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
       break;
@@ -7348,8 +7351,9 @@
 
 Literal* CodeGeneratorARM::DeduplicateJitClassLiteral(const DexFile& dex_file,
                                                       dex::TypeIndex type_index,
-                                                      uint64_t address) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+                                                      Handle<mirror::Class> handle) {
+  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+                             reinterpret_cast64<uint64_t>(handle.GetReference()));
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index bd237e9..52d1857 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -496,7 +496,7 @@
                                        Handle<mirror::String> handle);
   Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
                                       dex::TypeIndex type_index,
-                                      uint64_t address);
+                                      Handle<mirror::Class> handle);
 
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a9617e1..870fb26 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4181,8 +4181,9 @@
 }
 
 vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
-    const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+    const DexFile& dex_file, dex::TypeIndex type_index, Handle<mirror::Class> handle) {
+  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+                             reinterpret_cast64<uint64_t>(handle.GetReference()));
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4377,7 +4378,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -4426,8 +4429,10 @@
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
-      __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
+      uint32_t address = dchecked_integral_cast<uint32_t>(
+          reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+      DCHECK_NE(address, 0u);
+      __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -4452,7 +4457,7 @@
     case HLoadClass::LoadKind::kJitTableAddress: {
       __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
                                                        cls->GetTypeIndex(),
-                                                       cls->GetAddress()));
+                                                       cls->GetClass()));
       GenerateGcRootFieldLoad(cls,
                               out_loc,
                               out.X(),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index c7a0614..a9dca92 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -579,7 +579,7 @@
                                                                 Handle<mirror::String> handle);
   vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
                                                                dex::TypeIndex string_index,
-                                                               uint64_t address);
+                                                               Handle<mirror::Class> handle);
 
   void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
   void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 83c289e..05a76e1 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -5846,7 +5846,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5892,8 +5894,9 @@
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      DCHECK_NE(cls->GetAddress(), 0u);
-      uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+      uint32_t address = dchecked_integral_cast<uint32_t>(
+          reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+      DCHECK_NE(address, 0u);
       __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
       break;
     }
@@ -5908,7 +5911,7 @@
     case HLoadClass::LoadKind::kJitTableAddress: {
       __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
                                                        cls->GetTypeIndex(),
-                                                       cls->GetAddress()));
+                                                       cls->GetClass()));
       // /* GcRoot<mirror::Class> */ out = *out
       GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
       break;
@@ -7466,8 +7469,9 @@
 
 VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file,
                                                       dex::TypeIndex type_index,
-                                                      uint64_t address) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+                                                      Handle<mirror::Class> handle) {
+  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+                             reinterpret_cast64<uint64_t>(handle.GetReference()));
   return jit_class_patches_.GetOrCreate(
       TypeReference(&dex_file, type_index),
       [this]() {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 0f0a954..be65353 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -579,7 +579,7 @@
                                                  Handle<mirror::String> handle);
   VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
                                                 dex::TypeIndex type_index,
-                                                uint64_t address);
+                                                Handle<mirror::Class> handle);
 
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
 
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 1f4ff27..24234e1 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5490,7 +5490,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5548,8 +5550,9 @@
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
       DCHECK(!kEmitCompilerReadBarrier);
-      DCHECK_NE(cls->GetAddress(), 0u);
-      uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+      uint32_t address = dchecked_integral_cast<uint32_t>(
+          reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+      DCHECK_NE(address, 0u);
       __ LoadLiteral(out,
                      base_or_current_method_reg,
                      codegen_->DeduplicateBootImageAddressLiteral(address));
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index a350de7..5db0c8e 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3524,7 +3524,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -3569,8 +3571,9 @@
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
       DCHECK(!kEmitCompilerReadBarrier);
-      DCHECK_NE(cls->GetAddress(), 0u);
-      uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+      uint32_t address = dchecked_integral_cast<uint32_t>(
+          reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+      DCHECK_NE(address, 0u);
       __ LoadLiteral(out,
                      kLoadUnsignedWord,
                      codegen_->DeduplicateBootImageAddressLiteral(address));
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a850d38..4debe29 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6055,15 +6055,18 @@
 
 Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
                                               dex::TypeIndex dex_index,
-                                              uint64_t address) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+                                              Handle<mirror::Class> handle) {
+  jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
+                             reinterpret_cast64<uint64_t>(handle.GetReference()));
   // Add a patch entry and return the label.
   jit_class_patches_.emplace_back(dex_file, dex_index.index_);
   PatchInfo<Label>* info = &jit_class_patches_.back();
   return &info->label;
 }
 
-void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -6110,8 +6113,9 @@
     }
     case HLoadClass::LoadKind::kBootImageAddress: {
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      DCHECK_NE(cls->GetAddress(), 0u);
-      uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+      uint32_t address = dchecked_integral_cast<uint32_t>(
+          reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+      DCHECK_NE(address, 0u);
       __ movl(out, Immediate(address));
       codegen_->RecordSimplePatch();
       break;
@@ -6127,7 +6131,7 @@
     case HLoadClass::LoadKind::kJitTableAddress: {
       Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
       Label* fixup_label = codegen_->NewJitRootClassPatch(
-          cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
+          cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
       // /* GcRoot<mirror::Class> */ out = *address
       GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
       break;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index b86d080..9eb9765 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -419,7 +419,9 @@
   Label* NewJitRootStringPatch(const DexFile& dex_file,
                                dex::StringIndex dex_index,
                                Handle<mirror::String> handle);
-  Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+  Label* NewJitRootClassPatch(const DexFile& dex_file,
+                              dex::TypeIndex dex_index,
+                              Handle<mirror::Class> handle);
 
   void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2691af8..6423901 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5482,15 +5482,18 @@
 
 Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
                                                  dex::TypeIndex dex_index,
-                                                 uint64_t address) {
-  jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+                                                 Handle<mirror::Class> handle) {
+  jit_class_roots_.Overwrite(
+      TypeReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
   // Add a patch entry and return the label.
   jit_class_patches_.emplace_back(dex_file, dex_index.index_);
   PatchInfo<Label>* info = &jit_class_patches_.back();
   return &info->label;
 }
 
-void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
     codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5528,8 +5531,9 @@
       break;
     case HLoadClass::LoadKind::kBootImageAddress: {
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      DCHECK_NE(cls->GetAddress(), 0u);
-      uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+      uint32_t address = dchecked_integral_cast<uint32_t>(
+          reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+      DCHECK_NE(address, 0u);
       __ movl(out, Immediate(address));  // Zero-extended.
       codegen_->RecordSimplePatch();
       break;
@@ -5547,7 +5551,7 @@
       Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
                                           /* no_rip */ true);
       Label* fixup_label =
-          codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
+          codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
       // /* GcRoot<mirror::Class> */ out = *address
       GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
       break;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 8b3ab4c..3438b81 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -416,7 +416,9 @@
   Label* NewJitRootStringPatch(const DexFile& dex_file,
                                dex::StringIndex dex_index,
                                Handle<mirror::String> handle);
-  Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+  Label* NewJitRootClassPatch(const DexFile& dex_file,
+                              dex::TypeIndex dex_index,
+                              Handle<mirror::Class> handle);
 
   void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
 
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d7da46b..50aa442 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -474,10 +474,10 @@
   HInstruction* receiver = invoke_instruction->InputAt(0);
   HInstruction* cursor = invoke_instruction->GetPrevious();
   HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
-  Handle<mirror::Class> handle = handles_->NewHandle(GetMonomorphicType(classes));
+  Handle<mirror::Class> monomorphic_type = handles_->NewHandle(GetMonomorphicType(classes));
   if (!TryInlineAndReplace(invoke_instruction,
                            resolved_method,
-                           ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+                           ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
                            /* do_rtp */ false,
                            /* cha_devirtualize */ false)) {
     return false;
@@ -488,7 +488,7 @@
                cursor,
                bb_cursor,
                class_index,
-               GetMonomorphicType(classes),
+               monomorphic_type,
                invoke_instruction,
                /* with_deoptimization */ true);
 
@@ -533,11 +533,9 @@
                                      HInstruction* cursor,
                                      HBasicBlock* bb_cursor,
                                      dex::TypeIndex class_index,
-                                     mirror::Class* klass,
+                                     Handle<mirror::Class> klass,
                                      HInstruction* invoke_instruction,
                                      bool with_deoptimization) {
-  ScopedAssertNoThreadSuspension sants("Adding compiler type guard");
-
   ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
   HInstanceFieldGet* receiver_class = BuildGetReceiverClass(
       class_linker, receiver, invoke_instruction->GetDexPc());
@@ -548,19 +546,20 @@
   }
 
   const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
-  bool is_referrer = (klass == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+  bool is_referrer = (klass.Get() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
   // Note that we will just compare the classes, so we don't need Java semantics access checks.
   // Note that the type index and the dex file are relative to the method this type guard is
   // inlined into.
   HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
                                                                class_index,
                                                                caller_dex_file,
+                                                               klass,
                                                                is_referrer,
                                                                invoke_instruction->GetDexPc(),
                                                                /* needs_access_check */ false);
   bb_cursor->InsertInstructionAfter(load_class, receiver_class);
   // Sharpen after adding the instruction, as the sharpening may remove inputs.
-  HSharpening::SharpenClass(load_class, klass, handles_, codegen_, compiler_driver_);
+  HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
 
   // TODO: Extend reference type propagation to understand the guard.
   HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -637,7 +636,7 @@
                                            cursor,
                                            bb_cursor,
                                            class_index,
-                                           handle.Get(),
+                                           handle,
                                            invoke_instruction,
                                            deoptimize);
       if (deoptimize) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 4c0b990..11aacab 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -170,7 +170,7 @@
                              HInstruction* cursor,
                              HBasicBlock* bb_cursor,
                              dex::TypeIndex class_index,
-                             mirror::Class* klass,
+                             Handle<mirror::Class> klass,
                              HInstruction* invoke_instruction,
                              bool with_deoptimization)
     REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 3cfabdd..6aa5d45 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -936,48 +936,40 @@
 
 bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<1> hs(soa.Self());
   Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
-  Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
-  const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
   Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
 
-  bool finalizable;
-  bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable);
-
-  // Only the access check entrypoint handles the finalizable class case. If we
-  // need access checks, then we haven't resolved the method and the class may
-  // again be finalizable.
-  QuickEntrypointEnum entrypoint = (finalizable || needs_access_check)
-      ? kQuickAllocObjectWithChecks
-      : kQuickAllocObjectInitialized;
-
   if (outer_dex_cache.Get() != dex_cache.Get()) {
     // We currently do not support inlining allocations across dex files.
     return false;
   }
 
-  HLoadClass* load_class = new (arena_) HLoadClass(
-      graph_->GetCurrentMethod(),
-      type_index,
-      outer_dex_file,
-      IsOutermostCompilingClass(type_index),
-      dex_pc,
-      needs_access_check);
+  HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
 
-  AppendInstruction(load_class);
   HInstruction* cls = load_class;
-  if (!IsInitialized(resolved_class)) {
+  Handle<mirror::Class> klass = load_class->GetClass();
+
+  if (!IsInitialized(klass)) {
     cls = new (arena_) HClinitCheck(load_class, dex_pc);
     AppendInstruction(cls);
   }
 
+  // Only the access check entrypoint handles the finalizable class case. If we
+  // need access checks, then we haven't resolved the method and the class may
+  // again be finalizable.
+  QuickEntrypointEnum entrypoint = kQuickAllocObjectInitialized;
+  if (load_class->NeedsAccessCheck() || klass->IsFinalizable() || !klass->IsInstantiable()) {
+    entrypoint = kQuickAllocObjectWithChecks;
+  }
+
+  // Consider classes we haven't resolved as potentially finalizable.
+  bool finalizable = (klass.Get() == nullptr) || klass->IsFinalizable();
+
   AppendInstruction(new (arena_) HNewInstance(
       cls,
       dex_pc,
       type_index,
       *dex_compilation_unit_->GetDexFile(),
-      needs_access_check,
       finalizable,
       entrypoint));
   return true;
@@ -1018,7 +1010,6 @@
       ArtMethod* resolved_method,
       uint32_t method_idx,
       HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
-  const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
   Thread* self = Thread::Current();
   StackHandleScope<2> hs(self);
   Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
@@ -1046,15 +1037,9 @@
     *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
   } else if (storage_index.IsValid()) {
     *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
-    HLoadClass* load_class = new (arena_) HLoadClass(
-        graph_->GetCurrentMethod(),
-        storage_index,
-        outer_dex_file,
-        is_outer_class,
-        dex_pc,
-        /*needs_access_check*/ false);
-    AppendInstruction(load_class);
-    clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
+    HLoadClass* cls = BuildLoadClass(
+        storage_index, dex_pc, /* check_access */ false, /* outer */ true);
+    clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
     AppendInstruction(clinit_check);
   }
   return clinit_check;
@@ -1376,7 +1361,6 @@
   }
 
   Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
-  const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
   Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
   Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
 
@@ -1404,16 +1388,10 @@
     }
   }
 
-  HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(),
-                                                 storage_index,
-                                                 outer_dex_file,
-                                                 is_outer_class,
-                                                 dex_pc,
-                                                 /*needs_access_check*/ false);
-  AppendInstruction(constant);
+  HLoadClass* constant = BuildLoadClass(
+      storage_index, dex_pc, /* check_access */ false, /* outer */ true);
 
   HInstruction* cls = constant;
-
   Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
   if (!IsInitialized(klass)) {
     cls = new (arena_) HClinitCheck(constant, dex_pc);
@@ -1660,33 +1638,53 @@
   }
 }
 
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+                                                uint32_t dex_pc,
+                                                bool check_access,
+                                                bool outer) {
+  ScopedObjectAccess soa(Thread::Current());
+  const DexCompilationUnit* compilation_unit =
+      outer ? outer_compilation_unit_ : dex_compilation_unit_;
+  const DexFile& dex_file = *compilation_unit->GetDexFile();
+  Handle<mirror::DexCache> dex_cache = compilation_unit->GetDexCache();
+  bool is_accessible = false;
+  Handle<mirror::Class> klass = handles_->NewHandle(dex_cache->GetResolvedType(type_index));
+  if (!check_access) {
+    is_accessible = true;
+  } else if (klass.Get() != nullptr) {
+    if (klass->IsPublic()) {
+      is_accessible = true;
+    } else {
+      mirror::Class* compiling_class = GetCompilingClass();
+      if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
+        is_accessible = true;
+      }
+    }
+  }
+
+  HLoadClass* load_class = new (arena_) HLoadClass(
+      graph_->GetCurrentMethod(),
+      type_index,
+      dex_file,
+      klass,
+      klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
+      dex_pc,
+      !is_accessible);
+
+  AppendInstruction(load_class);
+  return load_class;
+}
+
 void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
                                          uint8_t destination,
                                          uint8_t reference,
                                          dex::TypeIndex type_index,
                                          uint32_t dex_pc) {
-  ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<1> hs(soa.Self());
-  const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
-  Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
-  Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
-
-  bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
-      dex_compilation_unit_->GetDexMethodIndex(),
-      dex_cache,
-      type_index);
-
   HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
-  HLoadClass* cls = new (arena_) HLoadClass(
-      graph_->GetCurrentMethod(),
-      type_index,
-      dex_file,
-      IsOutermostCompilingClass(type_index),
-      dex_pc,
-      !can_access);
-  AppendInstruction(cls);
+  HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
 
-  TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class);
+  ScopedObjectAccess soa(Thread::Current());
+  TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
   if (instruction.Opcode() == Instruction::INSTANCE_OF) {
     AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
     UpdateLocal(destination, current_block_->GetLastInstruction());
@@ -2690,21 +2688,7 @@
 
     case Instruction::CONST_CLASS: {
       dex::TypeIndex type_index(instruction.VRegB_21c());
-      // `CanAccessTypeWithoutChecks` will tell whether the method being
-      // built is trying to access its own class, so that the generated
-      // code can optimize for this case. However, the optimization does not
-      // work for inlining, so we use `IsOutermostCompilingClass` instead.
-      ScopedObjectAccess soa(Thread::Current());
-      Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
-      bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
-          dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index);
-      AppendInstruction(new (arena_) HLoadClass(
-          graph_->GetCurrentMethod(),
-          type_index,
-          *dex_file_,
-          IsOutermostCompilingClass(type_index),
-          dex_pc,
-          !can_access));
+      BuildLoadClass(type_index, dex_pc, /* check_access */ true);
       UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
       break;
     }
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index aef0b94..5efe950 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -46,9 +46,11 @@
                       CompilerDriver* driver,
                       const uint8_t* interpreter_metadata,
                       OptimizingCompilerStats* compiler_stats,
-                      Handle<mirror::DexCache> dex_cache)
+                      Handle<mirror::DexCache> dex_cache,
+                      VariableSizedHandleScope* handles)
       : arena_(graph->GetArena()),
         graph_(graph),
+        handles_(handles),
         dex_file_(dex_file),
         code_item_(code_item),
         return_type_(return_type),
@@ -223,6 +225,14 @@
   // Builds an instruction sequence for a switch statement.
   void BuildSwitch(const Instruction& instruction, uint32_t dex_pc);
 
+  // Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
+  // this method will use the outer class's dex file to lookup the type at
+  // `type_index`.
+  HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+                             uint32_t dex_pc,
+                             bool check_access,
+                             bool outer = false);
+
   // Returns the outer-most compiling method's class.
   mirror::Class* GetOutermostCompilingClass() const;
 
@@ -282,6 +292,7 @@
 
   ArenaAllocator* const arena_;
   HGraph* const graph_;
+  VariableSizedHandleScope* handles_;
 
   // The dex file where the method being compiled is, and the bytecode data.
   const DexFile* const dex_file_;
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 4f30b11..2d3c00f 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -979,7 +979,7 @@
     }
     if (ref_info->IsSingletonAndRemovable() &&
         !new_instance->IsFinalizable() &&
-        !new_instance->NeedsAccessCheck()) {
+        !new_instance->NeedsChecks()) {
       singleton_new_instances_.push_back(new_instance);
     }
     ArenaVector<HInstruction*>& heap_values =
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 0af0d19..fa51287 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2440,9 +2440,9 @@
 // mirrors, they are stored in a variable size handle scope which is always
 // visited during a pause. Also, the only caller of this helper
 // only uses the mirror for pointer comparison.
-static inline mirror::Class* AsMirrorInternal(uint64_t address)
+static inline mirror::Class* AsMirrorInternal(Handle<mirror::Class> handle)
     NO_THREAD_SAFETY_ANALYSIS {
-  return reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr();
+  return handle.Get();
 }
 
 bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
@@ -2455,9 +2455,8 @@
   }
   switch (GetLoadKind()) {
     case LoadKind::kBootImageAddress:
-      return GetAddress() == other_load_class->GetAddress();
     case LoadKind::kJitTableAddress:
-      return AsMirrorInternal(GetAddress()) == AsMirrorInternal(other_load_class->GetAddress());
+      return AsMirrorInternal(GetClass()) == AsMirrorInternal(other_load_class->GetClass());
     default:
       DCHECK(HasTypeReference(GetLoadKind()));
       return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3e7914c..d50a71c 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3784,14 +3784,12 @@
                uint32_t dex_pc,
                dex::TypeIndex type_index,
                const DexFile& dex_file,
-               bool needs_access_check,
                bool finalizable,
                QuickEntrypointEnum entrypoint)
       : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
         type_index_(type_index),
         dex_file_(dex_file),
         entrypoint_(entrypoint) {
-    SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
     SetPackedFlag<kFlagFinalizable>(finalizable);
     SetRawInputAt(0, cls);
   }
@@ -3805,8 +3803,9 @@
   // Can throw errors when out-of-memory or if it's not instantiable/accessible.
   bool CanThrow() const OVERRIDE { return true; }
 
-  // Needs to call into runtime to make sure it's instantiable/accessible.
-  bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
+  bool NeedsChecks() const {
+    return entrypoint_ == kQuickAllocObjectWithChecks;
+  }
 
   bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
 
@@ -3823,8 +3822,7 @@
   DECLARE_INSTRUCTION(NewInstance);
 
  private:
-  static constexpr size_t kFlagNeedsAccessCheck = kNumberOfExpressionPackedBits;
-  static constexpr size_t kFlagFinalizable = kFlagNeedsAccessCheck + 1;
+  static constexpr size_t kFlagFinalizable = kNumberOfExpressionPackedBits;
   static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
   static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
                 "Too many packed fields.");
@@ -5567,6 +5565,7 @@
   HLoadClass(HCurrentMethod* current_method,
              dex::TypeIndex type_index,
              const DexFile& dex_file,
+             Handle<mirror::Class> klass,
              bool is_referrers_class,
              uint32_t dex_pc,
              bool needs_access_check)
@@ -5574,7 +5573,7 @@
         special_input_(HUserRecord<HInstruction*>(current_method)),
         type_index_(type_index),
         dex_file_(dex_file),
-        address_(0u),
+        klass_(klass),
         loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
     // Referrers class should not need access check. We never inline unverified
     // methods so we can't possibly end up in this situation.
@@ -5587,10 +5586,7 @@
     SetPackedFlag<kFlagGenerateClInitCheck>(false);
   }
 
-  void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
-    DCHECK(HasAddress(load_kind));
-    DCHECK_NE(address, 0u);
-    address_ = address;
+  void SetLoadKind(LoadKind load_kind) {
     SetLoadKindInternal(load_kind);
   }
 
@@ -5657,11 +5653,6 @@
   dex::TypeIndex GetTypeIndex() const { return type_index_; }
   const DexFile& GetDexFile() const { return dex_file_; }
 
-  uint64_t GetAddress() const {
-    DCHECK(HasAddress(GetLoadKind()));
-    return address_;
-  }
-
   bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
     return GetLoadKind() == LoadKind::kDexCacheViaMethod;
   }
@@ -5691,6 +5682,10 @@
     return Primitive::kPrimNot;
   }
 
+  Handle<mirror::Class> GetClass() const {
+    return klass_;
+  }
+
   DECLARE_INSTRUCTION(LoadClass);
 
  private:
@@ -5714,11 +5709,6 @@
         load_kind == LoadKind::kDexCacheViaMethod;
   }
 
-  static bool HasAddress(LoadKind load_kind) {
-    return load_kind == LoadKind::kBootImageAddress ||
-        load_kind == LoadKind::kJitTableAddress;
-  }
-
   void SetLoadKindInternal(LoadKind load_kind);
 
   // The special input is the HCurrentMethod for kDexCacheViaMethod or kReferrersClass.
@@ -5729,7 +5719,7 @@
   const dex::TypeIndex type_index_;
   const DexFile& dex_file_;
 
-  uint64_t address_;  // Up to 64-bit, needed for kJitTableAddress on 64-bit targets.
+  Handle<mirror::Class> klass_;
 
   ReferenceTypeInfo loaded_class_rti_;
 
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f8a4469..4b99aac 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -619,14 +619,10 @@
 
 void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) {
   ScopedObjectAccess soa(Thread::Current());
-  // Get type from dex cache assuming it was populated by the verifier.
-  mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
-                                                       instr->GetDexFile(),
-                                                       instr->GetTypeIndex(),
-                                                       hint_dex_cache_);
-  if (IsAdmissible(resolved_class)) {
+  Handle<mirror::Class> resolved_class = instr->GetClass();
+  if (IsAdmissible(resolved_class.Get())) {
     instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
-        handle_cache_->NewHandle(resolved_class), /* is_exact */ true));
+        resolved_class, /* is_exact */ true));
   }
   instr->SetReferenceTypeInfo(
       ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index eabda26..c529410 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -133,24 +133,13 @@
 
 void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<1> hs(soa.Self());
-  Runtime* runtime = Runtime::Current();
-  ClassLinker* class_linker = runtime->GetClassLinker();
-  const DexFile& dex_file = load_class->GetDexFile();
-  dex::TypeIndex type_index = load_class->GetTypeIndex();
-  Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile())
-      ? compilation_unit_.GetDexCache()
-      : hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
-  mirror::Class* cls = dex_cache->GetResolvedType(type_index);
-  SharpenClass(load_class, cls, handles_, codegen_, compiler_driver_);
+  SharpenClass(load_class, codegen_, compiler_driver_);
 }
 
 void HSharpening::SharpenClass(HLoadClass* load_class,
-                               mirror::Class* klass,
-                               VariableSizedHandleScope* handles,
                                CodeGenerator* codegen,
                                CompilerDriver* compiler_driver) {
-  ScopedAssertNoThreadSuspension sants("Sharpening class in compiler");
+  Handle<mirror::Class> klass = load_class->GetClass();
   DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
          load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
       << load_class->GetLoadKind();
@@ -174,7 +163,6 @@
 
   bool is_in_boot_image = false;
   HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
-  uint64_t address = 0u;  // Class or dex cache element address.
   Runtime* runtime = Runtime::Current();
   if (codegen->GetCompilerOptions().IsBootImage()) {
     // Compiling boot image. Check if the class is a boot image class.
@@ -182,7 +170,7 @@
     if (!compiler_driver->GetSupportBootImageFixup()) {
       // compiler_driver_test. Do not sharpen.
       desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
-    } else if ((klass != nullptr) && compiler_driver->IsImageClass(
+    } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
         dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
       is_in_boot_image = true;
       desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
@@ -194,20 +182,16 @@
       desired_load_kind = HLoadClass::LoadKind::kBssEntry;
     }
   } else {
-    is_in_boot_image = (klass != nullptr) && runtime->GetHeap()->ObjectIsInBootImageSpace(klass);
+    is_in_boot_image = (klass.Get() != nullptr) &&
+        runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
     if (runtime->UseJitCompilation()) {
       // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
       // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
       if (is_in_boot_image) {
         // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
         desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
-        address = reinterpret_cast64<uint64_t>(klass);
-      } else if (klass != nullptr) {
+      } else if (klass.Get() != nullptr) {
         desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
-        // We store in the address field the location of the stack reference maintained
-        // by the handle. We do this now so that the code generation does not need to figure
-        // out which class loader to use.
-        address = reinterpret_cast<uint64_t>(handles->NewHandle(klass).GetReference());
       } else {
         // Class not loaded yet. This happens when the dex code requesting
         // this `HLoadClass` hasn't been executed in the interpreter.
@@ -218,7 +202,6 @@
     } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
       // AOT app compilation. Check if the class is in the boot image.
       desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
-      address = reinterpret_cast64<uint64_t>(klass);
     } else {
       // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
       desired_load_kind = HLoadClass::LoadKind::kBssEntry;
@@ -240,8 +223,7 @@
       break;
     case HLoadClass::LoadKind::kBootImageAddress:
     case HLoadClass::LoadKind::kJitTableAddress:
-      DCHECK_NE(address, 0u);
-      load_class->SetLoadKindWithAddress(load_kind, address);
+      load_class->SetLoadKind(load_kind);
       break;
     default:
       LOG(FATAL) << "Unexpected load kind: " << load_kind;
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae5ccb3..ae3d83e 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -49,8 +49,6 @@
 
   // Used internally but also by the inliner.
   static void SharpenClass(HLoadClass* load_class,
-                           mirror::Class* klass,
-                           VariableSizedHandleScope* handles,
                            CodeGenerator* codegen,
                            CompilerDriver* compiler_driver)
     REQUIRES_SHARED(Locks::mutator_lock_);