Merge "ART: On shutdown, only warn on mutex contention" into lmp-mr1-dev
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6f6eeec..b99aca1 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -508,7 +508,7 @@
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
- VLOG(compiler) << "Before precompile " << GetMemoryUsageString();
+ VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
PreCompile(class_loader, dex_files, thread_pool.get(), timings);
Compile(class_loader, dex_files, thread_pool.get(), timings);
if (dump_stats_) {
@@ -605,10 +605,10 @@
void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
LoadImageClasses(timings);
- VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString();
+ VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
Resolve(class_loader, dex_files, thread_pool, timings);
- VLOG(compiler) << "Resolve: " << GetMemoryUsageString();
+ VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false);
if (!compiler_options_->IsVerificationEnabled()) {
VLOG(compiler) << "Verify none mode specified, skipping verification.";
@@ -617,13 +617,13 @@
}
Verify(class_loader, dex_files, thread_pool, timings);
- VLOG(compiler) << "Verify: " << GetMemoryUsageString();
+ VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
InitializeClasses(class_loader, dex_files, thread_pool, timings);
- VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString();
+ VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
UpdateImageClasses(timings);
- VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString();
+ VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false);
}
bool CompilerDriver::IsImageClass(const char* descriptor) const {
@@ -1184,14 +1184,15 @@
// TODO: support patching on all architectures.
use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
}
- bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
+ mirror::Class* declaring_class = method->GetDeclaringClass();
+ bool method_code_in_boot = (declaring_class->GetClassLoader() == nullptr);
if (!use_dex_cache) {
if (!method_code_in_boot) {
use_dex_cache = true;
} else {
bool has_clinit_trampoline =
- method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
- if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
+ method->IsStatic() && !declaring_class->IsInitialized();
+ if (has_clinit_trampoline && (declaring_class != referrer_class)) {
// Ensure we run the clinit trampoline unless we are invoking a static method in the same
// class.
use_dex_cache = true;
@@ -1202,7 +1203,15 @@
*stats_flags |= kFlagDirectCallToBoot | kFlagDirectMethodToBoot;
}
if (!use_dex_cache && force_relocations) {
- if (!IsImage() || !IsImageClass(method->GetDeclaringClassDescriptor())) {
+ bool is_in_image;
+ if (IsImage()) {
+ is_in_image = IsImageClass(method->GetDeclaringClassDescriptor());
+ } else {
+ is_in_image = instruction_set_ != kX86 && instruction_set_ != kX86_64 &&
+ Runtime::Current()->GetHeap()->FindSpaceFromObject(declaring_class,
+ false)->IsImageSpace();
+ }
+ if (!is_in_image) {
// We can only branch directly to Methods that are resolved in the DexCache.
// Otherwise we won't invoke the resolution trampoline.
use_dex_cache = true;
@@ -1211,7 +1220,7 @@
// The method is defined not within this dex file. We need a dex cache slot within the current
// dex file or direct pointers.
bool must_use_direct_pointers = false;
- if (target_method->dex_file == method->GetDeclaringClass()->GetDexCache()->GetDexFile()) {
+ if (target_method->dex_file == declaring_class->GetDexCache()->GetDexFile()) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
if (no_guarantee_of_dex_cache_entry) {
@@ -1225,7 +1234,7 @@
} else {
if (force_relocations && !use_dex_cache) {
target_method->dex_method_index = method->GetDexMethodIndex();
- target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ target_method->dex_file = declaring_class->GetDexCache()->GetDexFile();
}
must_use_direct_pointers = true;
}
@@ -1248,14 +1257,14 @@
*type = sharp_type;
*direct_method = force_relocations ? -1 : reinterpret_cast<uintptr_t>(method);
*direct_code = force_relocations ? -1 : compiler_->GetEntryPointOf(method);
- target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ target_method->dex_file = declaring_class->GetDexCache()->GetDexFile();
target_method->dex_method_index = method->GetDexMethodIndex();
} else if (!must_use_direct_pointers) {
// Set the code and rely on the dex cache for the method.
*type = sharp_type;
if (force_relocations) {
*direct_code = -1;
- target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ target_method->dex_file = declaring_class->GetDexCache()->GetDexFile();
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
*direct_code = compiler_->GetEntryPointOf(method);
@@ -1976,7 +1985,7 @@
CHECK(dex_file != nullptr);
CompileDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
- VLOG(compiler) << "Compile: " << GetMemoryUsageString();
+ VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
@@ -2289,7 +2298,7 @@
return !compile;
}
-std::string CompilerDriver::GetMemoryUsageString() const {
+std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
std::ostringstream oss;
const ArenaPool* arena_pool = GetArenaPool();
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -2305,11 +2314,13 @@
if (swap_space_.get() != nullptr) {
oss << " swap=" << PrettySize(swap_space_->GetSize());
}
- oss << "\nCode dedupe: " << dedupe_code_.DumpStats();
- oss << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats();
- oss << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats();
- oss << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats();
- oss << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats();
+ if (extended) {
+ oss << "\nCode dedupe: " << dedupe_code_.DumpStats();
+ oss << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats();
+ oss << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats();
+ oss << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats();
+ oss << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats();
+ }
return oss.str();
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 58285cc..db07a61 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -665,7 +665,7 @@
bool SkipCompilation(const std::string& method_name);
// Get memory usage during compilation.
- std::string GetMemoryUsageString() const;
+ std::string GetMemoryUsageString(bool extended) const;
private:
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a20592c..1b7ac06 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -592,8 +592,10 @@
for (size_t i = 0; i < total_strings; ++i) {
strings->GetWithoutChecks(i)->SetArray(array);
}
- LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
- << total_length << " prefix saved chars=" << prefix_saved_chars;
+ if (kIsDebugBuild || VLOG_IS_ON(compiler)) {
+ LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
+ << total_length << " prefix saved chars=" << prefix_saved_chars;
+ }
ComputeEagerResolvedStrings();
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 3a06006..c411472 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -274,7 +274,8 @@
void LogCompletionTime(const CompilerDriver* compiler) {
LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
- << " (threads: " << thread_count_ << ") " << compiler->GetMemoryUsageString();
+ << " (threads: " << thread_count_ << ") "
+ << compiler->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler));
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0afd174..5911735 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -5998,4 +5998,34 @@
return ComputeModifiedUtf8Hash(descriptor);
}
+bool ClassLinker::MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m) {
+ // Non-image methods don't use direct code pointer.
+ if (!m->GetDeclaringClass()->IsBootStrapClassLoaded()) {
+ return false;
+ }
+ if (m->IsPrivate()) {
+ // The method can only be called inside its own oat file. Therefore it won't be called using
+ // its direct code if the oat file has been compiled in PIC mode.
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const DexFile& dex_file = m->GetDeclaringClass()->GetDexFile();
+ const OatFile::OatDexFile* oat_dex_file = class_linker->FindOpenedOatDexFileForDexFile(dex_file);
+ if (oat_dex_file == nullptr) {
+ // No oat file: the method has not been compiled.
+ return false;
+ }
+ const OatFile* oat_file = oat_dex_file->GetOatFile();
+ return oat_file != nullptr && !oat_file->IsPic();
+ } else {
+ // The method can be called outside its own oat file. Therefore it won't be called using its
+ // direct code pointer only if all loaded oat files have been compiled in PIC mode.
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
+ for (const OatFile* oat_file : oat_files_) {
+ if (!oat_file->IsPic()) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
+
} // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 3e45249..03dfe8f 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -423,6 +423,10 @@
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Returns true if the method can be called with its direct code pointer, false otherwise.
+ bool MayBeCalledWithDirectCodePointer(mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
bool FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 7ca0927..4d96287 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1135,4 +1135,24 @@
CheckPreverified(statics.Get(), true);
}
+TEST_F(ClassLinkerTest, IsBootStrapClassLoaded) {
+ ScopedObjectAccess soa(Thread::Current());
+
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Statics"))));
+
+ // java.lang.Object is a bootstrap class.
+ Handle<mirror::Class> jlo_class(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(jlo_class.Get() != nullptr);
+ EXPECT_TRUE(jlo_class.Get()->IsBootStrapClassLoaded());
+
+ // Statics is not a bootstrap class.
+ Handle<mirror::Class> statics(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
+ ASSERT_TRUE(statics.Get() != nullptr);
+ EXPECT_FALSE(statics.Get()->IsBootStrapClassLoaded());
+}
+
} // namespace art
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5db9d41..2fff70e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3233,8 +3233,16 @@
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
if (is_compiled) {
- VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
- return DeoptimizationRequest::kSelectiveDeoptimization;
+ // If the method may be called through its direct code pointer (without loading
+ // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
+ if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
+ VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
+ << "into image for compiled method " << PrettyMethod(m);
+ return DeoptimizationRequest::kFullDeoptimization;
+ } else {
+ VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
+ return DeoptimizationRequest::kSelectiveDeoptimization;
+ }
} else {
// Method is not compiled: we don't need to deoptimize.
VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 8282fab..d06a67f 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1088,15 +1088,14 @@
// back to an upcall.
NthCallerVisitor visitor(self, 1, true);
visitor.WalkStack(true);
- bool deoptimize = (visitor.caller != NULL) &&
+ bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller));
- if (deoptimize && kVerboseInstrumentation) {
- LOG(INFO) << "Deoptimizing into " << PrettyMethod(visitor.caller);
- }
if (deoptimize) {
if (kVerboseInstrumentation) {
- LOG(INFO) << "Deoptimizing from " << PrettyMethod(method)
- << " result is " << std::hex << return_value.GetJ();
+ LOG(INFO) << StringPrintf("Deoptimizing %s by returning from %s with result %#" PRIx64 " in ",
+ PrettyMethod(visitor.caller).c_str(),
+ PrettyMethod(method).c_str(),
+ return_value.GetJ()) << *self;
}
self->SetDeoptimizationReturnValue(return_value);
return GetTwoWordSuccessValue(*return_pc,
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 26c7099..d2a3d09 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -500,7 +500,23 @@
StackHandleScope<1> hs(self);
MethodHelper mh(hs.NewHandle(shadow_frame->GetMethod()));
const DexFile::CodeItem* code_item = mh.GetMethod()->GetCodeItem();
- value = Execute(self, mh, code_item, *shadow_frame, value);
+ const uint32_t dex_pc = shadow_frame->GetDexPC();
+ uint32_t new_dex_pc;
+ if (UNLIKELY(self->IsExceptionPending())) {
+ const instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame, dex_pc,
+ instrumentation);
+ new_dex_pc = found_dex_pc; // the dex pc of a matching catch handler
+ // or DexFile::kDexNoIndex if there is none.
+ } else {
+ const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
+ new_dex_pc = dex_pc + instr->SizeInCodeUnits(); // the dex pc of the next instruction.
+ }
+ if (new_dex_pc != DexFile::kDexNoIndex) {
+ shadow_frame->SetDexPC(new_dex_pc);
+ value = Execute(self, mh, code_item, *shadow_frame, value);
+ }
ShadowFrame* old_frame = shadow_frame;
shadow_frame = shadow_frame->GetLink();
delete old_frame;
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index e098ac8..f9c4978 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -147,7 +147,10 @@
const void* const* currentHandlersTable;
bool notified_method_entry_event = false;
UPDATE_HANDLER_TABLE();
- if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
+ if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing.
+ if (kIsDebugBuild) {
+ self->AssertNoPendingException();
+ }
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -235,6 +238,7 @@
HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
Throwable* exception = self->GetException(nullptr);
+ DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
self->ClearException();
ADVANCE(1);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 5401495..5888180 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -70,7 +70,10 @@
uint32_t dex_pc = shadow_frame.GetDexPC();
bool notified_method_entry_event = false;
const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
- if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
+ if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing.
+ if (kIsDebugBuild) {
+ self->AssertNoPendingException();
+ }
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), 0);
@@ -162,6 +165,7 @@
case Instruction::MOVE_EXCEPTION: {
PREAMBLE();
Throwable* exception = self->GetException(nullptr);
+ DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
self->ClearException();
inst = inst->Next_1xx();
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 7066cf6..2e26d9c 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -25,6 +25,7 @@
#include "object_callbacks.h"
#include "quick/quick_method_frame_info.h"
#include "read_barrier_option.h"
+#include "stack.h"
namespace art {
@@ -407,8 +408,10 @@
return frame_size_in_bytes - kPointerSize;
}
- size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return kPointerSize;
+ FrameOffset GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ constexpr size_t handle_scope_offset = sizeof(StackReference<mirror::ArtMethod>);
+ DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
+ return FrameOffset(handle_scope_offset);
}
void RegisterNative(Thread* self, const void* native_method, bool is_fast)
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index a474166..23df6c0 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1045,6 +1045,11 @@
DISALLOW_COPY_AND_ASSIGN(InitializeClassVisitor);
};
+ // Returns true if the class loader is null, ie the class loader is the boot strap class loader.
+ bool IsBootStrapClassLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetClassLoader() == nullptr;
+ }
+
private:
void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index d4713a0..9ad221c 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -203,9 +203,7 @@
CHECK(code_item != nullptr);
uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
- const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
- uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
+ ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, dex_pc);
StackHandleScope<2> hs(self_);
mirror::Class* declaring_class = m->GetDeclaringClass();
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
diff --git a/runtime/stack.cc b/runtime/stack.cc
index f0b6c21..7dd4f08 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -122,7 +122,7 @@
} else if (m->IsNative()) {
if (cur_quick_frame_ != NULL) {
HandleScope* hs = reinterpret_cast<HandleScope*>(
- reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffsetInBytes());
+ reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffsetInBytes().SizeValue());
return hs->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f46cd14..ee7bd14 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1555,6 +1555,12 @@
break;
case Instruction::MOVE_EXCEPTION: {
+ // We do not allow MOVE_EXCEPTION as the first instruction in a method. This is a simple case
+ // where one entrypoint to the catch block is not actually an exception path.
+ if (work_insn_idx_ == 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "move-exception at pc 0x0";
+ break;
+ }
/*
* This statement can only appear as the first instruction in an exception handler. We verify
* that as part of extracting the exception type from the catch block list.