blob: fae3af31d5463c368eacdfdae350441f786376d4 [file] [log] [blame]
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "method_verifier-inl.h"
#include <ostream>
#include "android-base/stringprintf.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/aborting.h"
#include "base/enums.h"
#include "base/leb128.h"
#include "base/indenter.h"
#include "base/logging.h" // For VLOG.
#include "base/mutex-inl.h"
#include "base/sdk_version.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
#include "base/utils.h"
#include "class_linker.h"
#include "class_root.h"
#include "compiler_callbacks.h"
#include "dex/class_accessor-inl.h"
#include "dex/descriptors_names.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
#include "dex/dex_instruction-inl.h"
#include "dex/dex_instruction_utils.h"
#include "experimental_flags.h"
#include "gc/accounting/card_table-inl.h"
#include "handle_scope-inl.h"
#include "intern_table.h"
#include "mirror/class-inl.h"
#include "mirror/class.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/method_handle_impl.h"
#include "mirror/method_type.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/var_handle.h"
#include "reg_type-inl.h"
#include "register_line-inl.h"
#include "runtime.h"
#include "scoped_newline.h"
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "vdex_file.h"
#include "verifier_compiler_binding.h"
#include "verifier_deps.h"
namespace art {
namespace verifier {
using android::base::StringPrintf;
static constexpr bool kTimeVerifyMethod = !kIsDebugBuild;
// On VLOG(verifier), should we dump the whole state when we run into a hard failure?
static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& allocator)
: register_lines_(allocator.Adapter(kArenaAllocVerifier)) {}
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
MethodVerifier* verifier) {
DCHECK_GT(insns_size, 0U);
register_lines_.resize(insns_size);
for (uint32_t i = 0; i < insns_size; i++) {
bool interesting = false;
switch (mode) {
case kTrackRegsAll:
interesting = flags[i].IsOpcode();
break;
case kTrackCompilerInterestPoints:
interesting = flags[i].IsCompileTimeInfoPoint() || flags[i].IsBranchTarget();
break;
case kTrackRegsBranches:
interesting = flags[i].IsBranchTarget();
break;
default:
break;
}
if (interesting) {
register_lines_[i].reset(RegisterLine::Create(registers_size, verifier));
}
}
}
PcToRegisterLineTable::~PcToRegisterLineTable() {}
// Note: returns true on failure.
inline bool MethodVerifier::FailOrAbort(bool condition,
const char* error_msg,
uint32_t work_insn_idx) {
if (kIsDebugBuild) {
// In a debug build, abort if the error condition is wrong. Only warn if
// we are already aborting (as this verification is likely run to print
// lock information).
if (LIKELY(gAborting == 0)) {
DCHECK(condition) << error_msg << work_insn_idx << " "
<< dex_file_->PrettyMethod(dex_method_idx_);
} else {
if (!condition) {
LOG(ERROR) << error_msg << work_insn_idx;
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << error_msg << work_insn_idx;
return true;
}
}
} else {
// In a non-debug build, just fail the class.
if (!condition) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << error_msg << work_insn_idx;
return true;
}
}
return false;
}
static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, RegisterLine* reg_line) {
if (verifier->IsInstanceConstructor()) {
// Before we mark all regs as conflicts, check that we don't have an uninitialized this.
reg_line->CheckConstructorReturn(verifier);
}
reg_line->MarkAllRegistersAsConflicts(verifier);
}
static FailureKind FailureKindMax(FailureKind fk1, FailureKind fk2) {
static_assert(FailureKind::kNoFailure < FailureKind::kSoftFailure
&& FailureKind::kSoftFailure < FailureKind::kHardFailure,
"Unexpected FailureKind order");
return std::max(fk1, fk2);
}
void MethodVerifier::FailureData::Merge(const MethodVerifier::FailureData& fd) {
kind = FailureKindMax(kind, fd.kind);
types |= fd.types;
}
static bool IsLargeMethod(const CodeItemDataAccessor& accessor) {
if (!accessor.HasCodeItem()) {
return false;
}
uint16_t registers_size = accessor.RegistersSize();
uint32_t insns_size = accessor.InsnsSizeInCodeUnits();
return registers_size * insns_size > 4*1024*1024;
}
MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
uint32_t method_idx,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
const dex::ClassDef& class_def,
const dex::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
CompilerCallbacks* callbacks,
bool allow_soft_failures,
HardFailLogMode log_level,
bool need_precise_constants,
uint32_t api_level,
std::string* hard_failure_msg) {
MethodVerifier::FailureData result;
uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
MethodVerifier verifier(self,
dex_file,
dex_cache,
class_loader,
class_def,
code_item,
method_idx,
method,
method_access_flags,
/* can_load_classes= */ true,
allow_soft_failures,
need_precise_constants,
/* verify to dump */ false,
/* allow_thread_suspension= */ true,
api_level);
if (verifier.Verify()) {
// Verification completed, however failures may be pending that didn't cause the verification
// to hard fail.
CHECK(!verifier.have_pending_hard_failure_);
if (code_item != nullptr && callbacks != nullptr) {
// Let the interested party know that the method was verified.
callbacks->MethodVerified(&verifier);
}
if (verifier.failures_.size() != 0) {
if (VLOG_IS_ON(verifier)) {
verifier.DumpFailures(VLOG_STREAM(verifier) << "Soft verification failures in "
<< dex_file->PrettyMethod(method_idx) << "\n");
}
if (VLOG_IS_ON(verifier_debug)) {
LOG(INFO) << verifier.info_messages_.str();
verifier.Dump(LOG_STREAM(INFO));
}
result.kind = FailureKind::kSoftFailure;
if (method != nullptr &&
!CanCompilerHandleVerificationFailure(verifier.encountered_failure_types_)) {
method->SetDontCompile();
}
}
if (method != nullptr) {
if (verifier.HasInstructionThatWillThrow()) {
method->SetDontCompile();
if (Runtime::Current()->IsAotCompiler() &&
(callbacks != nullptr) && !callbacks->IsBootImage()) {
// When compiling apps, make HasInstructionThatWillThrow a soft error to trigger
// re-verification at runtime.
// The dead code after the throw is not verified and might be invalid. This may cause
// the JIT compiler to crash since it assumes that all the code is valid.
//
// There's a strong assumption that the entire boot image is verified and all its dex
// code is valid (even the dead and unverified one). As such this is done only for apps.
// (CompilerDriver DCHECKs in VerifyClassVisitor that methods from boot image are
// fully verified).
result.kind = FailureKind::kSoftFailure;
}
}
if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) {
method->SetMustCountLocks();
}
}
} else {
// Bad method data.
CHECK_NE(verifier.failures_.size(), 0U);
if (UNLIKELY(verifier.have_pending_experimental_failure_)) {
// Failed due to being forced into interpreter. This is ok because
// we just want to skip verification.
result.kind = FailureKind::kSoftFailure;
} else {
CHECK(verifier.have_pending_hard_failure_);
if (VLOG_IS_ON(verifier)) {
log_level = std::max(HardFailLogMode::kLogVerbose, log_level);
}
if (log_level >= HardFailLogMode::kLogVerbose) {
LogSeverity severity;
switch (log_level) {
case HardFailLogMode::kLogVerbose:
severity = LogSeverity::VERBOSE;
break;
case HardFailLogMode::kLogWarning:
severity = LogSeverity::WARNING;
break;
case HardFailLogMode::kLogInternalFatal:
severity = LogSeverity::FATAL_WITHOUT_ABORT;
break;
default:
LOG(FATAL) << "Unsupported log-level " << static_cast<uint32_t>(log_level);
UNREACHABLE();
}
verifier.DumpFailures(LOG_STREAM(severity) << "Verification error in "
<< dex_file->PrettyMethod(method_idx)
<< "\n");
}
if (hard_failure_msg != nullptr) {
CHECK(!verifier.failure_messages_.empty());
*hard_failure_msg =
verifier.failure_messages_[verifier.failure_messages_.size() - 1]->str();
}
result.kind = FailureKind::kHardFailure;
if (callbacks != nullptr) {
// Let the interested party know that we failed the class.
ClassReference ref(dex_file, dex_file->GetIndexForClassDef(class_def));
callbacks->ClassRejected(ref);
}
}
if (VLOG_IS_ON(verifier) || VLOG_IS_ON(verifier_debug)) {
LOG(ERROR) << verifier.info_messages_.str();
verifier.Dump(LOG_STREAM(ERROR));
}
}
if (kTimeVerifyMethod) {
uint64_t duration_ns = NanoTime() - start_ns;
if (duration_ns > MsToNs(Runtime::Current()->GetVerifierLoggingThresholdMs())) {
LOG(WARNING) << "Verification of " << dex_file->PrettyMethod(method_idx)
<< " took " << PrettyDuration(duration_ns)
<< (IsLargeMethod(verifier.CodeItem()) ? " (large method)" : "");
}
}
result.types = verifier.encountered_failure_types_;
return result;
}
MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self,
VariableIndentationOutputStream* vios,
uint32_t dex_method_idx,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
const dex::ClassDef& class_def,
const dex::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
uint32_t api_level) {
MethodVerifier* verifier = new MethodVerifier(self,
dex_file,
dex_cache,
class_loader,
class_def,
code_item,
dex_method_idx,
method,
method_access_flags,
/* can_load_classes= */ true,
/* allow_soft_failures= */ true,
/* need_precise_constants= */ true,
/* verify_to_dump= */ true,
/* allow_thread_suspension= */ true,
api_level);
verifier->Verify();
verifier->DumpFailures(vios->Stream());
vios->Stream() << verifier->info_messages_.str();
// Only dump and return if no hard failures. Otherwise the verifier may be not fully initialized
// and querying any info is dangerous/can abort.
if (verifier->have_pending_hard_failure_) {
delete verifier;
return nullptr;
} else {
verifier->Dump(vios);
return verifier;
}
}
MethodVerifier::MethodVerifier(Thread* self,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
const dex::ClassDef& class_def,
const dex::CodeItem* code_item,
uint32_t dex_method_idx,
ArtMethod* method,
uint32_t method_access_flags,
bool can_load_classes,
bool allow_soft_failures,
bool need_precise_constants,
bool verify_to_dump,
bool allow_thread_suspension,
uint32_t api_level)
: self_(self),
arena_stack_(Runtime::Current()->GetArenaPool()),
allocator_(&arena_stack_),
reg_types_(can_load_classes, allocator_, allow_thread_suspension),
reg_table_(allocator_),
work_insn_idx_(dex::kDexNoIndex),
dex_method_idx_(dex_method_idx),
method_being_verified_(method),
method_access_flags_(method_access_flags),
return_type_(nullptr),
dex_file_(dex_file),
dex_cache_(dex_cache),
class_loader_(class_loader),
class_def_(class_def),
code_item_accessor_(*dex_file, code_item),
declaring_class_(nullptr),
interesting_dex_pc_(-1),
monitor_enter_dex_pcs_(nullptr),
have_pending_hard_failure_(false),
have_pending_runtime_throw_failure_(false),
have_pending_experimental_failure_(false),
have_any_pending_runtime_throw_failure_(false),
new_instance_count_(0),
monitor_enter_count_(0),
encountered_failure_types_(0),
can_load_classes_(can_load_classes),
allow_soft_failures_(allow_soft_failures),
need_precise_constants_(need_precise_constants),
has_check_casts_(false),
has_virtual_or_interface_invokes_(false),
verify_to_dump_(verify_to_dump),
allow_thread_suspension_(allow_thread_suspension),
is_constructor_(false),
link_(nullptr),
api_level_(api_level == 0 ? std::numeric_limits<uint32_t>::max() : api_level) {
self->PushVerifier(this);
}
MethodVerifier::~MethodVerifier() {
Thread::Current()->PopVerifier(this);
STLDeleteElements(&failure_messages_);
}
void MethodVerifier::FindLocksAtDexPc(
ArtMethod* m,
uint32_t dex_pc,
std::vector<MethodVerifier::DexLockInfo>* monitor_enter_dex_pcs,
uint32_t api_level) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
MethodVerifier verifier(hs.Self(),
m->GetDexFile(),
dex_cache,
class_loader,
m->GetClassDef(),
m->GetCodeItem(),
m->GetDexMethodIndex(),
m,
m->GetAccessFlags(),
/* can_load_classes= */ false,
/* allow_soft_failures= */ true,
/* need_precise_constants= */ false,
/* verify_to_dump= */ false,
/* allow_thread_suspension= */ false,
api_level);
verifier.interesting_dex_pc_ = dex_pc;
verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
verifier.FindLocksAtDexPc();
}
void MethodVerifier::FindLocksAtDexPc() {
CHECK(monitor_enter_dex_pcs_ != nullptr);
CHECK(code_item_accessor_.HasCodeItem()); // This only makes sense for methods with code.
// Quick check whether there are any monitor_enter instructions before verifying.
for (const DexInstructionPcPair& inst : code_item_accessor_) {
if (inst->Opcode() == Instruction::MONITOR_ENTER) {
// Strictly speaking, we ought to be able to get away with doing a subset of the full method
// verification. In practice, the phase we want relies on data structures set up by all the
// earlier passes, so we just run the full method verification and bail out early when we've
// got what we wanted.
Verify();
return;
}
}
}
bool MethodVerifier::Verify() {
// Some older code doesn't correctly mark constructors as such. Test for this case by looking at
// the name.
const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* method_name = dex_file_->StringDataByIdx(method_id.name_idx_);
bool instance_constructor_by_name = strcmp("<init>", method_name) == 0;
bool static_constructor_by_name = strcmp("<clinit>", method_name) == 0;
bool constructor_by_name = instance_constructor_by_name || static_constructor_by_name;
// Check that only constructors are tagged, and check for bad code that doesn't tag constructors.
if ((method_access_flags_ & kAccConstructor) != 0) {
if (!constructor_by_name) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "method is marked as constructor, but not named accordingly";
return false;
}
is_constructor_ = true;
} else if (constructor_by_name) {
LOG(WARNING) << "Method " << dex_file_->PrettyMethod(dex_method_idx_)
<< " not marked as constructor.";
is_constructor_ = true;
}
// If it's a constructor, check whether IsStatic() matches the name.
// This should have been rejected by the dex file verifier. Only do in debug build.
if (kIsDebugBuild) {
if (IsConstructor()) {
if (IsStatic() ^ static_constructor_by_name) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "constructor name doesn't match static flag";
return false;
}
}
}
// Methods may only have one of public/protected/private.
// This should have been rejected by the dex file verifier. Only do in debug build.
if (kIsDebugBuild) {
size_t access_mod_count =
(((method_access_flags_ & kAccPublic) == 0) ? 0 : 1) +
(((method_access_flags_ & kAccProtected) == 0) ? 0 : 1) +
(((method_access_flags_ & kAccPrivate) == 0) ? 0 : 1);
if (access_mod_count > 1) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "method has more than one of public/protected/private";
return false;
}
}
// If there aren't any instructions, make sure that's expected, then exit successfully.
if (!code_item_accessor_.HasCodeItem()) {
// Only native or abstract methods may not have code.
if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
return false;
}
// This should have been rejected by the dex file verifier. Only do in debug build.
// Note: the above will also be rejected in the dex file verifier, starting in dex version 37.
if (kIsDebugBuild) {
if ((method_access_flags_ & kAccAbstract) != 0) {
// Abstract methods are not allowed to have the following flags.
static constexpr uint32_t kForbidden =
kAccPrivate |
kAccStatic |
kAccFinal |
kAccNative |
kAccStrict |
kAccSynchronized;
if ((method_access_flags_ & kForbidden) != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "method can't be abstract and private/static/final/native/strict/synchronized";
return false;
}
}
if ((class_def_.GetJavaAccessFlags() & kAccInterface) != 0) {
// Interface methods must be public and abstract (if default methods are disabled).
uint32_t kRequired = kAccPublic;
if ((method_access_flags_ & kRequired) != kRequired) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be public";
return false;
}
// In addition to the above, interface methods must not be protected.
static constexpr uint32_t kForbidden = kAccProtected;
if ((method_access_flags_ & kForbidden) != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods can't be protected";
return false;
}
}
// We also don't allow constructors to be abstract or native.
if (IsConstructor()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "constructors can't be abstract or native";
return false;
}
}
return true;
}
// This should have been rejected by the dex file verifier. Only do in debug build.
if (kIsDebugBuild) {
// When there's code, the method must not be native or abstract.
if ((method_access_flags_ & (kAccNative | kAccAbstract)) != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "non-zero-length code in abstract or native method";
return false;
}
if ((class_def_.GetJavaAccessFlags() & kAccInterface) != 0) {
// Interfaces may always have static initializers for their fields. If we are running with
// default methods enabled we also allow other public, static, non-final methods to have code.
// Otherwise that is the only type of method allowed.
if (!(IsConstructor() && IsStatic())) {
if (IsInstanceConstructor()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor";
return false;
} else if (method_access_flags_ & kAccFinal) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods";
return false;
} else {
uint32_t access_flag_options = kAccPublic;
if (dex_file_->SupportsDefaultMethods()) {
access_flag_options |= kAccPrivate;
}
if (!(method_access_flags_ & access_flag_options)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "interfaces may not have protected or package-private members";
return false;
}
}
}
}
// Instance constructors must not be synchronized.
if (IsInstanceConstructor()) {
static constexpr uint32_t kForbidden = kAccSynchronized;
if ((method_access_flags_ & kForbidden) != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "constructors can't be synchronized";
return false;
}
}
}
// Sanity-check the register counts. ins + locals = registers, so make sure that ins <= registers.
if (code_item_accessor_.InsSize() > code_item_accessor_.RegistersSize()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad register counts (ins="
<< code_item_accessor_.InsSize()
<< " regs=" << code_item_accessor_.RegistersSize();
return false;
}
// Allocate and initialize an array to hold instruction data.
insn_flags_.reset(allocator_.AllocArray<InstructionFlags>(
code_item_accessor_.InsnsSizeInCodeUnits()));
DCHECK(insn_flags_ != nullptr);
std::uninitialized_fill_n(insn_flags_.get(),
code_item_accessor_.InsnsSizeInCodeUnits(),
InstructionFlags());
// Run through the instructions and see if the width checks out.
bool result = ComputeWidthsAndCountOps();
bool allow_runtime_only_instructions = !Runtime::Current()->IsAotCompiler() || verify_to_dump_;
// Flag instructions guarded by a "try" block and check exception handlers.
result = result && ScanTryCatchBlocks();
// Perform static instruction verification.
result = result && (allow_runtime_only_instructions
? VerifyInstructions<true>()
: VerifyInstructions<false>());
// Perform code-flow analysis and return.
result = result && VerifyCodeFlow();
return result;
}
std::ostream& MethodVerifier::Fail(VerifyError error) {
// Mark the error type as encountered.
encountered_failure_types_ |= static_cast<uint32_t>(error);
switch (error) {
case VERIFY_ERROR_NO_CLASS:
case VERIFY_ERROR_NO_FIELD:
case VERIFY_ERROR_NO_METHOD:
case VERIFY_ERROR_ACCESS_CLASS:
case VERIFY_ERROR_ACCESS_FIELD:
case VERIFY_ERROR_ACCESS_METHOD:
case VERIFY_ERROR_INSTANTIATION:
case VERIFY_ERROR_CLASS_CHANGE:
case VERIFY_ERROR_FORCE_INTERPRETER:
case VERIFY_ERROR_LOCKING:
if (Runtime::Current()->IsAotCompiler() || !can_load_classes_) {
// If we're optimistically running verification at compile time, turn NO_xxx, ACCESS_xxx,
// class change and instantiation errors into soft verification errors so that we re-verify
// at runtime. We may fail to find or to agree on access because of not yet available class
// loaders, or class loaders that will differ at runtime. In these cases, we don't want to
// affect the soundness of the code being compiled. Instead, the generated code runs "slow
// paths" that dynamically perform the verification and cause the behavior to be that akin
// to an interpreter.
error = VERIFY_ERROR_BAD_CLASS_SOFT;
} else {
// If we fail again at runtime, mark that this instruction would throw and force this
// method to be executed using the interpreter with checks.
have_pending_runtime_throw_failure_ = true;
// We need to save the work_line if the instruction wasn't throwing before. Otherwise we'll
// try to merge garbage.
// Note: this assumes that Fail is called before we do any work_line modifications.
// Note: this can fail before we touch any instruction, for the signature of a method. So
// add a check.
if (work_insn_idx_ < dex::kDexNoIndex) {
const Instruction& inst = code_item_accessor_.InstructionAt(work_insn_idx_);
int opcode_flags = Instruction::FlagsOf(inst.Opcode());
if ((opcode_flags & Instruction::kThrow) == 0 && CurrentInsnFlags()->IsInTry()) {
saved_line_->CopyFromLine(work_line_.get());
}
}
}
break;
// Indication that verification should be retried at runtime.
case VERIFY_ERROR_BAD_CLASS_SOFT:
if (!allow_soft_failures_) {
have_pending_hard_failure_ = true;
}
break;
// Hard verification failures at compile time will still fail at runtime, so the class is
// marked as rejected to prevent it from being compiled.
case VERIFY_ERROR_BAD_CLASS_HARD: {
have_pending_hard_failure_ = true;
if (VLOG_IS_ON(verifier) && kDumpRegLinesOnHardFailureIfVLOG) {
ScopedObjectAccess soa(Thread::Current());
std::ostringstream oss;
Dump(oss);
LOG(ERROR) << oss.str();
}
break;
}
}
failures_.push_back(error);
std::string location(StringPrintf("%s: [0x%X] ", dex_file_->PrettyMethod(dex_method_idx_).c_str(),
work_insn_idx_));
std::ostringstream* failure_message = new std::ostringstream(location, std::ostringstream::ate);
failure_messages_.push_back(failure_message);
return *failure_message;
}
ScopedNewLine MethodVerifier::LogVerifyInfo() {
ScopedNewLine ret{info_messages_};
ret << "VFY: " << dex_file_->PrettyMethod(dex_method_idx_)
<< '[' << reinterpret_cast<void*>(work_insn_idx_) << "] : ";
return ret;
}
void MethodVerifier::PrependToLastFailMessage(std::string prepend) {
size_t failure_num = failure_messages_.size();
DCHECK_NE(failure_num, 0U);
std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
prepend += last_fail_message->str();
failure_messages_[failure_num - 1] = new std::ostringstream(prepend, std::ostringstream::ate);
delete last_fail_message;
}
void MethodVerifier::AppendToLastFailMessage(const std::string& append) {
size_t failure_num = failure_messages_.size();
DCHECK_NE(failure_num, 0U);
std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
(*last_fail_message) << append;
}
bool MethodVerifier::ComputeWidthsAndCountOps() {
size_t new_instance_count = 0;
size_t monitor_enter_count = 0;
// We can't assume the instruction is well formed, handle the case where calculating the size
// goes past the end of the code item.
SafeDexInstructionIterator it(code_item_accessor_.begin(), code_item_accessor_.end());
for ( ; !it.IsErrorState() && it < code_item_accessor_.end(); ++it) {
// In case the instruction goes past the end of the code item, make sure to not process it.
SafeDexInstructionIterator next = it;
++next;
if (next.IsErrorState()) {
break;
}
Instruction::Code opcode = it->Opcode();
switch (opcode) {
case Instruction::APUT_OBJECT:
case Instruction::CHECK_CAST:
has_check_casts_ = true;
break;
case Instruction::INVOKE_VIRTUAL:
case Instruction::INVOKE_VIRTUAL_RANGE:
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE:
has_virtual_or_interface_invokes_ = true;
break;
case Instruction::MONITOR_ENTER:
monitor_enter_count++;
break;
case Instruction::NEW_INSTANCE:
new_instance_count++;
break;
default:
break;
}
GetInstructionFlags(it.DexPc()).SetIsOpcode();
}
if (it != code_item_accessor_.end()) {
const size_t insns_size = code_item_accessor_.InsnsSizeInCodeUnits();
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "code did not end where expected ("
<< it.DexPc() << " vs. " << insns_size << ")";
return false;
}
new_instance_count_ = new_instance_count;
monitor_enter_count_ = monitor_enter_count;
return true;
}
bool MethodVerifier::ScanTryCatchBlocks() {
const uint32_t tries_size = code_item_accessor_.TriesSize();
if (tries_size == 0) {
return true;
}
const uint32_t insns_size = code_item_accessor_.InsnsSizeInCodeUnits();
for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) {
const uint32_t start = try_item.start_addr_;
const uint32_t end = start + try_item.insn_count_;
if ((start >= end) || (start >= insns_size) || (end > insns_size)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad exception entry: startAddr=" << start
<< " endAddr=" << end << " (size=" << insns_size << ")";
return false;
}
if (!GetInstructionFlags(start).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "'try' block starts inside an instruction (" << start << ")";
return false;
}
DexInstructionIterator end_it(code_item_accessor_.Insns(), end);
for (DexInstructionIterator it(code_item_accessor_.Insns(), start); it < end_it; ++it) {
GetInstructionFlags(it.DexPc()).SetInTry();
}
}
// Iterate over each of the handlers to verify target addresses.
const uint8_t* handlers_ptr = code_item_accessor_.GetCatchHandlerData();
const uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
ClassLinker* linker = Runtime::Current()->GetClassLinker();
for (uint32_t idx = 0; idx < handlers_size; idx++) {
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t dex_pc = iterator.GetHandlerAddress();
if (!GetInstructionFlags(dex_pc).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "exception handler starts at bad address (" << dex_pc << ")";
return false;
}
if (!CheckNotMoveResult(code_item_accessor_.Insns(), dex_pc)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "exception handler begins with move-result* (" << dex_pc << ")";
return false;
}
GetInstructionFlags(dex_pc).SetBranchTarget();
// Ensure exception types are resolved so that they don't need resolution to be delivered,
// unresolved exception types will be ignored by exception delivery
if (iterator.GetHandlerTypeIndex().IsValid()) {
ObjPtr<mirror::Class> exception_type =
linker->ResolveType(iterator.GetHandlerTypeIndex(), dex_cache_, class_loader_);
if (exception_type == nullptr) {
DCHECK(self_->IsExceptionPending());
self_->ClearException();
}
}
}
handlers_ptr = iterator.EndDataPointer();
}
return true;
}
template <bool kAllowRuntimeOnlyInstructions>
bool MethodVerifier::VerifyInstructions() {
/* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
GetInstructionFlags(0).SetBranchTarget();
GetInstructionFlags(0).SetCompileTimeInfoPoint();
for (const DexInstructionPcPair& inst : code_item_accessor_) {
const uint32_t dex_pc = inst.DexPc();
if (!VerifyInstruction<kAllowRuntimeOnlyInstructions>(&inst.Inst(), dex_pc)) {
DCHECK_NE(failures_.size(), 0U);
return false;
}
/* Flag instructions that are garbage collection points */
// All invoke points are marked as "Throw" points already.
// We are relying on this to also count all the invokes as interesting.
if (inst->IsBranch()) {
GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
// The compiler also needs safepoints for fall-through to loop heads.
// Such a loop head must be a target of a branch.
int32_t offset = 0;
bool cond, self_ok;
bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
DCHECK(target_ok);
GetInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
} else if (inst->IsSwitch() || inst->IsThrow()) {
GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
} else if (inst->IsReturn()) {
GetInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
}
}
return true;
}
template <bool kAllowRuntimeOnlyInstructions>
bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_offset) {
if (Instruction::kHaveExperimentalInstructions && UNLIKELY(inst->IsExperimental())) {
// Experimental instructions don't yet have verifier support implementation.
// While it is possible to use them by themselves, when we try to use stable instructions
// with a virtual register that was created by an experimental instruction,
// the data flow analysis will fail.
Fail(VERIFY_ERROR_FORCE_INTERPRETER)
<< "experimental instruction is not supported by verifier; skipping verification";
have_pending_experimental_failure_ = true;
return false;
}
bool result = true;
switch (inst->GetVerifyTypeArgumentA()) {
case Instruction::kVerifyRegA:
result = result && CheckRegisterIndex(inst->VRegA());
break;
case Instruction::kVerifyRegAWide:
result = result && CheckWideRegisterIndex(inst->VRegA());
break;
}
switch (inst->GetVerifyTypeArgumentB()) {
case Instruction::kVerifyRegB:
result = result && CheckRegisterIndex(inst->VRegB());
break;
case Instruction::kVerifyRegBField:
result = result && CheckFieldIndex(inst->VRegB());
break;
case Instruction::kVerifyRegBMethod:
result = result && CheckMethodIndex(inst->VRegB());
break;
case Instruction::kVerifyRegBNewInstance:
result = result && CheckNewInstance(dex::TypeIndex(inst->VRegB()));
break;
case Instruction::kVerifyRegBString:
result = result && CheckStringIndex(inst->VRegB());
break;
case Instruction::kVerifyRegBType:
result = result && CheckTypeIndex(dex::TypeIndex(inst->VRegB()));
break;
case Instruction::kVerifyRegBWide:
result = result && CheckWideRegisterIndex(inst->VRegB());
break;
case Instruction::kVerifyRegBCallSite:
result = result && CheckCallSiteIndex(inst->VRegB());
break;
case Instruction::kVerifyRegBMethodHandle:
result = result && CheckMethodHandleIndex(inst->VRegB());
break;
case Instruction::kVerifyRegBPrototype:
result = result && CheckPrototypeIndex(inst->VRegB());
break;
}
switch (inst->GetVerifyTypeArgumentC()) {
case Instruction::kVerifyRegC:
result = result && CheckRegisterIndex(inst->VRegC());
break;
case Instruction::kVerifyRegCField:
result = result && CheckFieldIndex(inst->VRegC());
break;
case Instruction::kVerifyRegCNewArray:
result = result && CheckNewArray(dex::TypeIndex(inst->VRegC()));
break;
case Instruction::kVerifyRegCType:
result = result && CheckTypeIndex(dex::TypeIndex(inst->VRegC()));
break;
case Instruction::kVerifyRegCWide:
result = result && CheckWideRegisterIndex(inst->VRegC());
break;
}
switch (inst->GetVerifyTypeArgumentH()) {
case Instruction::kVerifyRegHPrototype:
result = result && CheckPrototypeIndex(inst->VRegH());
break;
}
switch (inst->GetVerifyExtraFlags()) {
case Instruction::kVerifyArrayData:
result = result && CheckArrayData(code_offset);
break;
case Instruction::kVerifyBranchTarget:
result = result && CheckBranchTarget(code_offset);
break;
case Instruction::kVerifySwitchTargets:
result = result && CheckSwitchTargets(code_offset);
break;
case Instruction::kVerifyVarArgNonZero:
// Fall-through.
case Instruction::kVerifyVarArg: {
// Instructions that can actually return a negative value shouldn't have this flag.
uint32_t v_a = dchecked_integral_cast<uint32_t>(inst->VRegA());
if ((inst->GetVerifyExtraFlags() == Instruction::kVerifyVarArgNonZero && v_a == 0) ||
v_a > Instruction::kMaxVarArgRegs) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << v_a << ") in "
"non-range invoke";
return false;
}
uint32_t args[Instruction::kMaxVarArgRegs];
inst->GetVarArgs(args);
result = result && CheckVarArgRegs(v_a, args);
break;
}
case Instruction::kVerifyVarArgRangeNonZero:
// Fall-through.
case Instruction::kVerifyVarArgRange:
if (inst->GetVerifyExtraFlags() == Instruction::kVerifyVarArgRangeNonZero &&
inst->VRegA() <= 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << inst->VRegA() << ") in "
"range invoke";
return false;
}
result = result && CheckVarArgRangeRegs(inst->VRegA(), inst->VRegC());
break;
case Instruction::kVerifyError:
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected opcode " << inst->Name();
result = false;
break;
}
if (!kAllowRuntimeOnlyInstructions && inst->GetVerifyIsRuntimeOnly()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "opcode only expected at runtime " << inst->Name();
result = false;
}
return result;
}
inline bool MethodVerifier::CheckRegisterIndex(uint32_t idx) {
if (UNLIKELY(idx >= code_item_accessor_.RegistersSize())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register index out of range (" << idx << " >= "
<< code_item_accessor_.RegistersSize() << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckWideRegisterIndex(uint32_t idx) {
if (UNLIKELY(idx + 1 >= code_item_accessor_.RegistersSize())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register index out of range (" << idx
<< "+1 >= " << code_item_accessor_.RegistersSize() << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckCallSiteIndex(uint32_t idx) {
uint32_t limit = dex_file_->NumCallSiteIds();
if (UNLIKELY(idx >= limit)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad call site index " << idx << " (max "
<< limit << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
if (UNLIKELY(idx >= dex_file_->GetHeader().field_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
<< dex_file_->GetHeader().field_ids_size_ << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckMethodIndex(uint32_t idx) {
if (UNLIKELY(idx >= dex_file_->GetHeader().method_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method index " << idx << " (max "
<< dex_file_->GetHeader().method_ids_size_ << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckMethodHandleIndex(uint32_t idx) {
uint32_t limit = dex_file_->NumMethodHandles();
if (UNLIKELY(idx >= limit)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method handle index " << idx << " (max "
<< limit << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckNewInstance(dex::TypeIndex idx) {
if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
<< dex_file_->GetHeader().type_ids_size_ << ")";
return false;
}
// We don't need the actual class, just a pointer to the class name.
const char* descriptor = dex_file_->StringByTypeIdx(idx);
if (UNLIKELY(descriptor[0] != 'L')) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't call new-instance on type '" << descriptor << "'";
return false;
} else if (UNLIKELY(strcmp(descriptor, "Ljava/lang/Class;") == 0)) {
// An unlikely new instance on Class is not allowed. Fall back to interpreter to ensure an
// exception is thrown when this statement is executed (compiled code would not do that).
Fail(VERIFY_ERROR_INSTANTIATION);
}
return true;
}
inline bool MethodVerifier::CheckPrototypeIndex(uint32_t idx) {
if (UNLIKELY(idx >= dex_file_->GetHeader().proto_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad prototype index " << idx << " (max "
<< dex_file_->GetHeader().proto_ids_size_ << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckStringIndex(uint32_t idx) {
if (UNLIKELY(idx >= dex_file_->GetHeader().string_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
<< dex_file_->GetHeader().string_ids_size_ << ")";
return false;
}
return true;
}
inline bool MethodVerifier::CheckTypeIndex(dex::TypeIndex idx) {
if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
<< dex_file_->GetHeader().type_ids_size_ << ")";
return false;
}
return true;
}
bool MethodVerifier::CheckNewArray(dex::TypeIndex idx) {
if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
<< dex_file_->GetHeader().type_ids_size_ << ")";
return false;
}
int bracket_count = 0;
const char* descriptor = dex_file_->StringByTypeIdx(idx);
const char* cp = descriptor;
while (*cp++ == '[') {
bracket_count++;
}
if (UNLIKELY(bracket_count == 0)) {
/* The given class must be an array type. */
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "can't new-array class '" << descriptor << "' (not an array)";
return false;
} else if (UNLIKELY(bracket_count > 255)) {
/* It is illegal to create an array of more than 255 dimensions. */
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "can't new-array class '" << descriptor << "' (exceeds limit)";
return false;
}
return true;
}
bool MethodVerifier::CheckArrayData(uint32_t cur_offset) {
const uint32_t insn_count = code_item_accessor_.InsnsSizeInCodeUnits();
const uint16_t* insns = code_item_accessor_.Insns() + cur_offset;
const uint16_t* array_data;
int32_t array_data_offset;
DCHECK_LT(cur_offset, insn_count);
/* make sure the start of the array data table is in range */
array_data_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
if (UNLIKELY(static_cast<int32_t>(cur_offset) + array_data_offset < 0 ||
cur_offset + array_data_offset + 2 >= insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset
<< ", data offset " << array_data_offset
<< ", count " << insn_count;
return false;
}
/* offset to array data table is a relative branch-style offset */
array_data = insns + array_data_offset;
// Make sure the table is at an even dex pc, that is, 32-bit aligned.
if (UNLIKELY(!IsAligned<4>(array_data))) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned array data table: at " << cur_offset
<< ", data offset " << array_data_offset;
return false;
}
// Make sure the array-data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
if (UNLIKELY(!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
<< ", data offset " << array_data_offset
<< " not correctly visited, probably bad padding.";
return false;
}
uint32_t value_width = array_data[1];
uint32_t value_count = *reinterpret_cast<const uint32_t*>(&array_data[2]);
uint32_t table_size = 4 + (value_width * value_count + 1) / 2;
/* make sure the end of the switch is in range */
if (UNLIKELY(cur_offset + array_data_offset + table_size > insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data end: at " << cur_offset
<< ", data offset " << array_data_offset << ", end "
<< cur_offset + array_data_offset + table_size
<< ", count " << insn_count;
return false;
}
return true;
}
bool MethodVerifier::CheckBranchTarget(uint32_t cur_offset) {
int32_t offset;
bool isConditional, selfOkay;
if (!GetBranchOffset(cur_offset, &offset, &isConditional, &selfOkay)) {
return false;
}
if (UNLIKELY(!selfOkay && offset == 0)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch offset of zero not allowed at"
<< reinterpret_cast<void*>(cur_offset);
return false;
}
// Check for 32-bit overflow. This isn't strictly necessary if we can depend on the runtime
// to have identical "wrap-around" behavior, but it's unwise to depend on that.
if (UNLIKELY(((int64_t) cur_offset + (int64_t) offset) != (int64_t) (cur_offset + offset))) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch target overflow "
<< reinterpret_cast<void*>(cur_offset) << " +" << offset;
return false;
}
int32_t abs_offset = cur_offset + offset;
if (UNLIKELY(abs_offset < 0 ||
(uint32_t) abs_offset >= code_item_accessor_.InsnsSizeInCodeUnits() ||
!GetInstructionFlags(abs_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
<< reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset);
return false;
}
GetInstructionFlags(abs_offset).SetBranchTarget();
return true;
}
bool MethodVerifier::GetBranchOffset(uint32_t cur_offset, int32_t* pOffset, bool* pConditional,
bool* selfOkay) {
const uint16_t* insns = code_item_accessor_.Insns() + cur_offset;
*pConditional = false;
*selfOkay = false;
switch (*insns & 0xff) {
case Instruction::GOTO:
*pOffset = ((int16_t) *insns) >> 8;
break;
case Instruction::GOTO_32:
*pOffset = insns[1] | (((uint32_t) insns[2]) << 16);
*selfOkay = true;
break;
case Instruction::GOTO_16:
*pOffset = (int16_t) insns[1];
break;
case Instruction::IF_EQ:
case Instruction::IF_NE:
case Instruction::IF_LT:
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE:
case Instruction::IF_EQZ:
case Instruction::IF_NEZ:
case Instruction::IF_LTZ:
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ:
*pOffset = (int16_t) insns[1];
*pConditional = true;
break;
default:
return false;
}
return true;
}
bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
const uint32_t insn_count = code_item_accessor_.InsnsSizeInCodeUnits();
DCHECK_LT(cur_offset, insn_count);
const uint16_t* insns = code_item_accessor_.Insns() + cur_offset;
/* make sure the start of the switch is in range */
int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
if (UNLIKELY(static_cast<int32_t>(cur_offset) + switch_offset < 0 ||
cur_offset + switch_offset + 2 > insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset
<< ", switch offset " << switch_offset
<< ", count " << insn_count;
return false;
}
/* offset to switch table is a relative branch-style offset */
const uint16_t* switch_insns = insns + switch_offset;
// Make sure the table is at an even dex pc, that is, 32-bit aligned.
if (UNLIKELY(!IsAligned<4>(switch_insns))) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned switch table: at " << cur_offset
<< ", switch offset " << switch_offset;
return false;
}
// Make sure the switch data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
if (UNLIKELY(!GetInstructionFlags(cur_offset + switch_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
<< ", switch offset " << switch_offset
<< " not correctly visited, probably bad padding.";
return false;
}
bool is_packed_switch = (*insns & 0xff) == Instruction::PACKED_SWITCH;
uint32_t switch_count = switch_insns[1];
int32_t targets_offset;
uint16_t expected_signature;
if (is_packed_switch) {
/* 0=sig, 1=count, 2/3=firstKey */
targets_offset = 4;
expected_signature = Instruction::kPackedSwitchSignature;
} else {
/* 0=sig, 1=count, 2..count*2 = keys */
targets_offset = 2 + 2 * switch_count;
expected_signature = Instruction::kSparseSwitchSignature;
}
uint32_t table_size = targets_offset + switch_count * 2;
if (UNLIKELY(switch_insns[0] != expected_signature)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< StringPrintf("wrong signature for switch table (%x, wanted %x)",
switch_insns[0], expected_signature);
return false;
}
/* make sure the end of the switch is in range */
if (UNLIKELY(cur_offset + switch_offset + table_size > (uint32_t) insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch end: at " << cur_offset
<< ", switch offset " << switch_offset
<< ", end " << (cur_offset + switch_offset + table_size)
<< ", count " << insn_count;
return false;
}
constexpr int32_t keys_offset = 2;
if (switch_count > 1) {
if (is_packed_switch) {
/* for a packed switch, verify that keys do not overflow int32 */
int32_t first_key = switch_insns[keys_offset] | (switch_insns[keys_offset + 1] << 16);
int32_t max_first_key =
std::numeric_limits<int32_t>::max() - (static_cast<int32_t>(switch_count) - 1);
if (UNLIKELY(first_key > max_first_key)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid packed switch: first_key=" << first_key
<< ", switch_count=" << switch_count;
return false;
}
} else {
/* for a sparse switch, verify the keys are in ascending order */
int32_t last_key = switch_insns[keys_offset] | (switch_insns[keys_offset + 1] << 16);
for (uint32_t targ = 1; targ < switch_count; targ++) {
int32_t key =
static_cast<int32_t>(switch_insns[keys_offset + targ * 2]) |
static_cast<int32_t>(switch_insns[keys_offset + targ * 2 + 1] << 16);
if (UNLIKELY(key <= last_key)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid sparse switch: last key=" << last_key
<< ", this=" << key;
return false;
}
last_key = key;
}
}
}
/* verify each switch target */
for (uint32_t targ = 0; targ < switch_count; targ++) {
int32_t offset = static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
int32_t abs_offset = cur_offset + offset;
if (UNLIKELY(abs_offset < 0 ||
abs_offset >= static_cast<int32_t>(insn_count) ||
!GetInstructionFlags(abs_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
<< " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset)
<< "[" << targ << "]";
return false;
}
GetInstructionFlags(abs_offset).SetBranchTarget();
}
return true;
}
bool MethodVerifier::CheckVarArgRegs(uint32_t vA, uint32_t arg[]) {
uint16_t registers_size = code_item_accessor_.RegistersSize();
for (uint32_t idx = 0; idx < vA; idx++) {
if (UNLIKELY(arg[idx] >= registers_size)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index (" << arg[idx]
<< ") in non-range invoke (>= " << registers_size << ")";
return false;
}
}
return true;
}
bool MethodVerifier::CheckVarArgRangeRegs(uint32_t vA, uint32_t vC) {
uint16_t registers_size = code_item_accessor_.RegistersSize();
// vA/vC are unsigned 8-bit/16-bit quantities for /range instructions, so there's no risk of
// integer overflow when adding them here.
if (UNLIKELY(vA + vC > registers_size)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC
<< " in range invoke (> " << registers_size << ")";
return false;
}
return true;
}
bool MethodVerifier::VerifyCodeFlow() {
const uint16_t registers_size = code_item_accessor_.RegistersSize();
/* Create and initialize table holding register status */
reg_table_.Init(kTrackCompilerInterestPoints,
insn_flags_.get(),
code_item_accessor_.InsnsSizeInCodeUnits(),
registers_size,
this);
work_line_.reset(RegisterLine::Create(registers_size, this));
saved_line_.reset(RegisterLine::Create(registers_size, this));
/* Initialize register types of method arguments. */
if (!SetTypesFromSignature()) {
DCHECK_NE(failures_.size(), 0U);
std::string prepend("Bad signature in ");
prepend += dex_file_->PrettyMethod(dex_method_idx_);
PrependToLastFailMessage(prepend);
return false;
}
// We may have a runtime failure here, clear.
have_pending_runtime_throw_failure_ = false;
/* Perform code flow verification. */
if (!CodeFlowVerifyMethod()) {
DCHECK_NE(failures_.size(), 0U);
return false;
}
return true;
}
std::ostream& MethodVerifier::DumpFailures(std::ostream& os) {
DCHECK_EQ(failures_.size(), failure_messages_.size());
for (size_t i = 0; i < failures_.size(); ++i) {
os << failure_messages_[i]->str() << "\n";
}
return os;
}
void MethodVerifier::Dump(std::ostream& os) {
VariableIndentationOutputStream vios(&os);
Dump(&vios);
}
void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
if (!code_item_accessor_.HasCodeItem()) {
vios->Stream() << "Native method\n";
return;
}
{
vios->Stream() << "Register Types:\n";
ScopedIndentation indent1(vios);
reg_types_.Dump(vios->Stream());
}
vios->Stream() << "Dumping instructions and register lines:\n";
ScopedIndentation indent1(vios);
for (const DexInstructionPcPair& inst : code_item_accessor_) {
const size_t dex_pc = inst.DexPc();
// Might be asked to dump before the table is initialized.
if (reg_table_.IsInitialized()) {
RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
if (reg_line != nullptr) {
vios->Stream() << reg_line->Dump(this) << "\n";
}
}
vios->Stream()
<< StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
const bool kDumpHexOfInstruction = false;
if (kDumpHexOfInstruction) {
vios->Stream() << inst->DumpHex(5) << " ";
}
vios->Stream() << inst->DumpString(dex_file_) << "\n";
}
}
static bool IsPrimitiveDescriptor(char descriptor) {
switch (descriptor) {
case 'I':
case 'C':
case 'S':
case 'B':
case 'Z':
case 'F':
case 'D':
case 'J':
return true;
default:
return false;
}
}
bool MethodVerifier::SetTypesFromSignature() {
RegisterLine* reg_line = reg_table_.GetLine(0);
// Should have been verified earlier.
DCHECK_GE(code_item_accessor_.RegistersSize(), code_item_accessor_.InsSize());
uint32_t arg_start = code_item_accessor_.RegistersSize() - code_item_accessor_.InsSize();
size_t expected_args = code_item_accessor_.InsSize(); /* long/double count as two */
// Include the "this" pointer.
size_t cur_arg = 0;
if (!IsStatic()) {
if (expected_args == 0) {
// Expect at least a receiver.
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected 0 args, but method is not static";
return false;
}
// If this is a constructor for a class other than java.lang.Object, mark the first ("this")
// argument as uninitialized. This restricts field access until the superclass constructor is
// called.
const RegType& declaring_class = GetDeclaringClass();
if (IsConstructor()) {
if (declaring_class.IsJavaLangObject()) {
// "this" is implicitly initialized.
reg_line->SetThisInitialized();
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, declaring_class);
} else {
reg_line->SetRegisterType<LockOp::kClear>(
this,
arg_start + cur_arg,
reg_types_.UninitializedThisArgument(declaring_class));
}
} else {
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, declaring_class);
}
cur_arg++;
}
const dex::ProtoId& proto_id =
dex_file_->GetMethodPrototype(dex_file_->GetMethodId(dex_method_idx_));
DexFileParameterIterator iterator(*dex_file_, proto_id);
for (; iterator.HasNext(); iterator.Next()) {
const char* descriptor = iterator.GetDescriptor();
if (descriptor == nullptr) {
LOG(FATAL) << "Null descriptor";
}
if (cur_arg >= expected_args) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args
<< " args, found more (" << descriptor << ")";
return false;
}
switch (descriptor[0]) {
case 'L':
case '[':
// We assume that reference arguments are initialized. The only way it could be otherwise
// (assuming the caller was verified) is if the current method is <init>, but in that case
// it's effectively considered initialized the instant we reach here (in the sense that we
// can return without doing anything or call virtual methods).
{
// Note: don't check access. No error would be thrown for declaring or passing an
// inaccessible class. Only actual accesses to fields or methods will.
const RegType& reg_type = ResolveClass<CheckAccess::kNo>(iterator.GetTypeIdx());
if (!reg_type.IsNonZeroReferenceTypes()) {
DCHECK(HasFailures());
return false;
}
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_type);
}
break;
case 'Z':
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Boolean());
break;
case 'C':
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Char());
break;
case 'B':
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Byte());
break;
case 'I':
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Integer());
break;
case 'S':
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Short());
break;
case 'F':
reg_line->SetRegisterType<LockOp::kClear>(this, arg_start + cur_arg, reg_types_.Float());
break;
case 'J':
case 'D': {
if (cur_arg + 1 >= expected_args) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args
<< " args, found more (" << descriptor << ")";
return false;
}
const RegType* lo_half;
const RegType* hi_half;
if (descriptor[0] == 'J') {
lo_half = &reg_types_.LongLo();
hi_half = &reg_types_.LongHi();
} else {
lo_half = &reg_types_.DoubleLo();
hi_half = &reg_types_.DoubleHi();
}
reg_line->SetRegisterTypeWide(this, arg_start + cur_arg, *lo_half, *hi_half);
cur_arg++;
break;
}
default:
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected signature type char '"
<< descriptor << "'";
return false;
}
cur_arg++;
}
if (cur_arg != expected_args) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args
<< " arguments, found " << cur_arg;
return false;
}
const char* descriptor = dex_file_->GetReturnTypeDescriptor(proto_id);
// Validate return type. We don't do the type lookup; just want to make sure that it has the right
// format. Only major difference from the method argument format is that 'V' is supported.
bool result;
if (IsPrimitiveDescriptor(descriptor[0]) || descriptor[0] == 'V') {
result = descriptor[1] == '\0';
} else if (descriptor[0] == '[') { // single/multi-dimensional array of object/primitive
size_t i = 0;
do {
i++;
} while (descriptor[i] == '['); // process leading [
if (descriptor[i] == 'L') { // object array
do {
i++; // find closing ;
} while (descriptor[i] != ';' && descriptor[i] != '\0');
result = descriptor[i] == ';';
} else { // primitive array
result = IsPrimitiveDescriptor(descriptor[i]) && descriptor[i + 1] == '\0';
}
} else if (descriptor[0] == 'L') {
// could be more thorough here, but shouldn't be required
size_t i = 0;
do {
i++;
} while (descriptor[i] != ';' && descriptor[i] != '\0');
result = descriptor[i] == ';';
} else {
result = false;
}
if (!result) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected char in return type descriptor '"
<< descriptor << "'";
}
return result;
}
bool MethodVerifier::CodeFlowVerifyMethod() {
const uint16_t* insns = code_item_accessor_.Insns();
const uint32_t insns_size = code_item_accessor_.InsnsSizeInCodeUnits();
/* Begin by marking the first instruction as "changed". */
GetInstructionFlags(0).SetChanged();
uint32_t start_guess = 0;
/* Continue until no instructions are marked "changed". */
while (true) {
if (allow_thread_suspension_) {
self_->AllowThreadSuspension();
}
// Find the first marked one. Use "start_guess" as a way to find one quickly.
uint32_t insn_idx = start_guess;
for (; insn_idx < insns_size; insn_idx++) {
if (GetInstructionFlags(insn_idx).IsChanged())
break;
}
if (insn_idx == insns_size) {
if (start_guess != 0) {
/* try again, starting from the top */
start_guess = 0;
continue;
} else {
/* all flags are clear */
break;
}
}
// We carry the working set of registers from instruction to instruction. If this address can
// be the target of a branch (or throw) instruction, or if we're skipping around chasing
// "changed" flags, we need to load the set of registers from the table.
// Because we always prefer to continue on to the next instruction, we should never have a
// situation where we have a stray "changed" flag set on an instruction that isn't a branch
// target.
work_insn_idx_ = insn_idx;
if (GetInstructionFlags(insn_idx).IsBranchTarget()) {
work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
} else if (kIsDebugBuild) {
/*
* Sanity check: retrieve the stored register line (assuming
* a full table) and make sure it actually matches.
*/
RegisterLine* register_line = reg_table_.GetLine(insn_idx);
if (register_line != nullptr) {
if (work_line_->CompareLine(register_line) != 0) {
Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
LOG(FATAL_WITHOUT_ABORT) << info_messages_.str();
LOG(FATAL) << "work_line diverged in " << dex_file_->PrettyMethod(dex_method_idx_)
<< "@" << reinterpret_cast<void*>(work_insn_idx_) << "\n"
<< " work_line=" << work_line_->Dump(this) << "\n"
<< " expected=" << register_line->Dump(this);
}
}
}
if (!CodeFlowVerifyInstruction(&start_guess)) {
std::string prepend(dex_file_->PrettyMethod(dex_method_idx_));
prepend += " failed to verify: ";
PrependToLastFailMessage(prepend);
return false;
}
/* Clear "changed" and mark as visited. */
GetInstructionFlags(insn_idx).SetVisited();
GetInstructionFlags(insn_idx).ClearChanged();
}
if (UNLIKELY(VLOG_IS_ON(verifier_debug))) {
/*
* Scan for dead code. There's nothing "evil" about dead code
* (besides the wasted space), but it indicates a flaw somewhere
* down the line, possibly in the verifier.
*
* If we've substituted "always throw" instructions into the stream,
* we are almost certainly going to have some dead code.
*/
int dead_start = -1;
for (const DexInstructionPcPair& inst : code_item_accessor_) {
const uint32_t insn_idx = inst.DexPc();
/*
* Switch-statement data doesn't get "visited" by scanner. It
* may or may not be preceded by a padding NOP (for alignment).
*/
if (insns[insn_idx] == Instruction::kPackedSwitchSignature ||
insns[insn_idx] == Instruction::kSparseSwitchSignature ||
insns[insn_idx] == Instruction::kArrayDataSignature ||
(insns[insn_idx] == Instruction::NOP && (insn_idx + 1 < insns_size) &&
(insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
GetInstructionFlags(insn_idx).SetVisited();
}
if (!GetInstructionFlags(insn_idx).IsVisited()) {
if (dead_start < 0) {
dead_start = insn_idx;
}
} else if (dead_start >= 0) {
LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start)
<< "-" << reinterpret_cast<void*>(insn_idx - 1);
dead_start = -1;
}
}
if (dead_start >= 0) {
LogVerifyInfo()
<< "dead code " << reinterpret_cast<void*>(dead_start)
<< "-" << reinterpret_cast<void*>(code_item_accessor_.InsnsSizeInCodeUnits() - 1);
}
// To dump the state of the verify after a method, do something like:
// if (dex_file_->PrettyMethod(dex_method_idx_) ==
// "boolean java.lang.String.equals(java.lang.Object)") {
// LOG(INFO) << info_messages_.str();
// }
}
return true;
}
// Returns the index of the first final instance field of the given class, or kDexNoIndex if there
// is no such field.
static uint32_t GetFirstFinalInstanceFieldIndex(const DexFile& dex_file, dex::TypeIndex type_idx) {
const dex::ClassDef* class_def = dex_file.FindClassDef(type_idx);
DCHECK(class_def != nullptr);
ClassAccessor accessor(dex_file, *class_def);
for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
if (field.IsFinal()) {
return field.GetIndex();
}
}
return dex::kDexNoIndex;
}
// Setup a register line for the given return instruction.
static void AdjustReturnLine(MethodVerifier* verifier,
const Instruction* ret_inst,
RegisterLine* line) {
Instruction::Code opcode = ret_inst->Opcode();
switch (opcode) {
case Instruction::RETURN_VOID:
case Instruction::RETURN_VOID_NO_BARRIER:
SafelyMarkAllRegistersAsConflicts(verifier, line);
break;
case Instruction::RETURN:
case Instruction::RETURN_OBJECT:
line->MarkAllRegistersAsConflictsExcept(verifier, ret_inst->VRegA_11x());
break;
case Instruction::RETURN_WIDE:
line->MarkAllRegistersAsConflictsExceptWide(verifier, ret_inst->VRegA_11x());
break;
default:
LOG(FATAL) << "Unknown return opcode " << opcode;
UNREACHABLE();
}
}
bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// If we're doing FindLocksAtDexPc, check whether we're at the dex pc we care about.
// We want the state _before_ the instruction, for the case where the dex pc we're
// interested in is itself a monitor-enter instruction (which is a likely place
// for a thread to be suspended).
if (monitor_enter_dex_pcs_ != nullptr && work_insn_idx_ == interesting_dex_pc_) {
monitor_enter_dex_pcs_->clear(); // The new work line is more accurate than the previous one.
std::map<uint32_t, DexLockInfo> depth_to_lock_info;
auto collector = [&](uint32_t dex_reg, uint32_t depth) {
auto insert_pair = depth_to_lock_info.emplace(depth, DexLockInfo(depth));
auto it = insert_pair.first;
auto set_insert_pair = it->second.dex_registers.insert(dex_reg);
DCHECK(set_insert_pair.second);
};
work_line_->IterateRegToLockDepths(collector);
for (auto& pair : depth_to_lock_info) {
monitor_enter_dex_pcs_->push_back(pair.second);
// Map depth to dex PC.
(*monitor_enter_dex_pcs_)[monitor_enter_dex_pcs_->size() - 1].dex_pc =
work_line_->GetMonitorEnterDexPc(pair.second.dex_pc);
}
}
/*
* Once we finish decoding the instruction, we need to figure out where
* we can go from here. There are three possible ways to transfer
* control to another statement:
*
* (1) Continue to the next instruction. Applies to all but
* unconditional branches, method returns, and exception throws.
* (2) Branch to one or more possible locations. Applies to branches
* and switch statements.
* (3) Exception handlers. Applies to any instruction that can
* throw an exception that is handled by an encompassing "try"
* block.
*
* We can also return, in which case there is no successor instruction
* from this point.
*
* The behavior can be determined from the opcode flags.
*/
const uint16_t* insns = code_item_accessor_.Insns() + work_insn_idx_;
const Instruction* inst = Instruction::At(insns);
int opcode_flags = Instruction::FlagsOf(inst->Opcode());
int32_t branch_target = 0;
bool just_set_result = false;
if (UNLIKELY(VLOG_IS_ON(verifier_debug))) {
// Generate processing back trace to debug verifier
LogVerifyInfo() << "Processing " << inst->DumpString(dex_file_) << std::endl
<< work_line_->Dump(this);
}
/*
* Make a copy of the previous register state. If the instruction
* can throw an exception, we will copy/merge this into the "catch"
* address rather than work_line, because we don't want the result
* from the "successful" code path (e.g. a check-cast that "improves"
* a type) to be visible to the exception handler.
*/
if ((opcode_flags & Instruction::kThrow) != 0 && CurrentInsnFlags()->IsInTry()) {
saved_line_->CopyFromLine(work_line_.get());
} else if (kIsDebugBuild) {
saved_line_->FillWithGarbage();
}
DCHECK(!have_pending_runtime_throw_failure_); // Per-instruction flag, should not be set here.
// We need to ensure the work line is consistent while performing validation. When we spot a
// peephole pattern we compute a new line for either the fallthrough instruction or the
// branch target.
RegisterLineArenaUniquePtr branch_line;
RegisterLineArenaUniquePtr fallthrough_line;
switch (inst->Opcode()) {
case Instruction::NOP:
/*
* A "pure" NOP has no effect on anything. Data tables start with
* a signature that looks like a NOP; if we see one of these in
* the course of executing code then we have a problem.
*/
if (inst->VRegA_10x() != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "encountered data table in instruction stream";
}
break;
case Instruction::MOVE:
work_line_->CopyRegister1(this, inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategory1nr);
break;
case Instruction::MOVE_FROM16:
work_line_->CopyRegister1(this, inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategory1nr);
break;
case Instruction::MOVE_16:
work_line_->CopyRegister1(this, inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategory1nr);
break;
case Instruction::MOVE_WIDE:
work_line_->CopyRegister2(this, inst->VRegA_12x(), inst->VRegB_12x());
break;
case Instruction::MOVE_WIDE_FROM16:
work_line_->CopyRegister2(this, inst->VRegA_22x(), inst->VRegB_22x());
break;
case Instruction::MOVE_WIDE_16:
work_line_->CopyRegister2(this, inst->VRegA_32x(), inst->VRegB_32x());
break;
case Instruction::MOVE_OBJECT:
work_line_->CopyRegister1(this, inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategoryRef);
break;
case Instruction::MOVE_OBJECT_FROM16:
work_line_->CopyRegister1(this, inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategoryRef);
break;
case Instruction::MOVE_OBJECT_16:
work_line_->CopyRegister1(this, inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategoryRef);
break;
/*
* The move-result instructions copy data out of a "pseudo-register"
* with the results from the last method invocation. In practice we
* might want to hold the result in an actual CPU register, so the
* Dalvik spec requires that these only appear immediately after an
* invoke or filled-new-array.
*
* These calls invalidate the "result" register. (This is now
* redundant with the reset done below, but it can make the debug info
* easier to read in some cases.)
*/
case Instruction::MOVE_RESULT:
work_line_->CopyResultRegister1(this, inst->VRegA_11x(), false);
break;
case Instruction::MOVE_RESULT_WIDE:
work_line_->CopyResultRegister2(this, inst->VRegA_11x());
break;
case Instruction::MOVE_RESULT_OBJECT:
work_line_->CopyResultRegister1(this, inst->VRegA_11x(), true);
break;
case Instruction::MOVE_EXCEPTION: {
// We do not allow MOVE_EXCEPTION as the first instruction in a method. This is a simple case
// where one entrypoint to the catch block is not actually an exception path.
if (work_insn_idx_ == 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "move-exception at pc 0x0";
break;
}
/*
* This statement can only appear as the first instruction in an exception handler. We verify
* that as part of extracting the exception type from the catch block list.
*/
const RegType& res_type = GetCaughtExceptionType();
work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_11x(), res_type);
break;
}
case Instruction::RETURN_VOID:
if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
if (!GetMethodReturnType().IsConflict()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected";
}
}
break;
case Instruction::RETURN:
if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
/* check the method signature */
const RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory1Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
<< return_type;
} else {
// Compilers may generate synthetic functions that write byte values into boolean fields.
// Also, it may use integer values for boolean, byte, short, and character return types.
const uint32_t vregA = inst->VRegA_11x();
const RegType& src_type = work_line_->GetRegisterType(this, vregA);
bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
((return_type.IsBoolean() || return_type.IsByte() ||
return_type.IsShort() || return_type.IsChar()) &&
src_type.IsInteger()));
/* check the register contents */
bool success =
work_line_->VerifyRegisterType(this, vregA, use_src ? src_type : return_type);
if (!success) {
AppendToLastFailMessage(StringPrintf(" return-1nr on invalid register v%d", vregA));
}
}
}
break;
case Instruction::RETURN_WIDE:
if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
/* check the method signature */
const RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory2Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
} else {
/* check the register contents */
const uint32_t vregA = inst->VRegA_11x();
bool success = work_line_->VerifyRegisterType(this, vregA, return_type);
if (!success) {
AppendToLastFailMessage(StringPrintf(" return-wide on invalid register v%d", vregA));
}
}
}
break;
case Instruction::RETURN_OBJECT:
if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
const RegType& return_type = GetMethodReturnType();
if (!return_type.IsReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
} else {
/* return_type is the *expected* return type, not register value */
DCHECK(!return_type.IsZeroOrNull());
DCHECK(!return_type.IsUninitializedReference());
const uint32_t vregA = inst->VRegA_11x();
const RegType& reg_type = work_line_->GetRegisterType(this, vregA);
// Disallow returning undefined, conflict & uninitialized values and verify that the
// reference in vAA is an instance of the "return_type."
if (reg_type.IsUndefined()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "returning undefined register";
} else if (reg_type.IsConflict()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "returning register with conflict";
} else if (reg_type.IsUninitializedTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "returning uninitialized object '"
<< reg_type << "'";
} else if (!reg_type.IsReferenceTypes()) {
// We really do expect a reference here.
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object returns a non-reference type "
<< reg_type;
} else if (!return_type.IsAssignableFrom(reg_type, this)) {
if (reg_type.IsUnresolvedTypes() || return_type.IsUnresolvedTypes()) {
Fail(VERIFY_ERROR_NO_CLASS) << " can't resolve returned type '" << return_type
<< "' or '" << reg_type << "'";
} else {
bool soft_error = false;
// Check whether arrays are involved. They will show a valid class status, even
// if their components are erroneous.
if (reg_type.IsArrayTypes() && return_type.IsArrayTypes()) {
return_type.CanAssignArray(reg_type, reg_types_, class_loader_, this, &soft_error);
if (soft_error) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "array with erroneous component type: "
<< reg_type << " vs " << return_type;
}
}
if (!soft_error) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "returning '" << reg_type
<< "', but expected from declaration '" << return_type << "'";
}
}
}
}
}
break;
/* could be boolean, int, float, or a null reference */
case Instruction::CONST_4: {
int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_11n(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
case Instruction::CONST_16: {
int16_t val = static_cast<int16_t>(inst->VRegB_21s());
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_21s(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
case Instruction::CONST: {
int32_t val = inst->VRegB_31i();
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_31i(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
case Instruction::CONST_HIGH16: {
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_21h(), DetermineCat1Constant(val, need_precise_constants_));
break;
}
/* could be long or double; resolved upon use */
case Instruction::CONST_WIDE_16: {
int64_t val = static_cast<int16_t>(inst->VRegB_21s());
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(this, inst->VRegA_21s(), lo, hi);
break;
}
case Instruction::CONST_WIDE_32: {
int64_t val = static_cast<int32_t>(inst->VRegB_31i());
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(this, inst->VRegA_31i(), lo, hi);
break;
}
case Instruction::CONST_WIDE: {
int64_t val = inst->VRegB_51l();
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(this, inst->VRegA_51l(), lo, hi);
break;
}
case Instruction::CONST_WIDE_HIGH16: {
int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(this, inst->VRegA_21h(), lo, hi);
break;
}
case Instruction::CONST_STRING:
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_21c(), reg_types_.JavaLangString());
break;
case Instruction::CONST_STRING_JUMBO:
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_31c(), reg_types_.JavaLangString());
break;
case Instruction::CONST_CLASS: {
// Get type from instruction if unresolved then we need an access check
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
const RegType& res_type = ResolveClass<CheckAccess::kYes>(dex::TypeIndex(inst->VRegB_21c()));
// Register holds class, ie its type is class, on error it will hold Conflict.
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_21c(), res_type.IsConflict() ? res_type
: reg_types_.JavaLangClass());
break;
}
case Instruction::CONST_METHOD_HANDLE:
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_21c(), reg_types_.JavaLangInvokeMethodHandle());
break;
case Instruction::CONST_METHOD_TYPE:
work_line_->SetRegisterType<LockOp::kClear>(
this, inst->VRegA_21c(), reg_types_.JavaLangInvokeMethodType());
break;
case Instruction::MONITOR_ENTER:
work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
// Check whether the previous instruction is a move-object with vAA as a source, creating
// untracked lock aliasing.
if (0 != work_insn_idx_ && !GetInstructionFlags(work_insn_idx_).IsBranchTarget()) {
uint32_t prev_idx = work_insn_idx_ - 1;
while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
prev_idx--;
}
const Instruction& prev_inst = code_item_accessor_.InstructionAt(prev_idx);
switch (prev_inst.Opcode()) {
case Instruction::MOVE_OBJECT:
case Instruction::MOVE_OBJECT_16:
case Instruction::MOVE_OBJECT_FROM16:
if (prev_inst.VRegB() == inst->VRegA_11x()) {
// Redo the copy. This won't change the register types, but update the lock status
// for the aliased register.
work_line_->CopyRegister1(this,
prev_inst.VRegA(),
prev_inst.VRegB(),
kTypeCategoryRef);
}
break;
// Catch a case of register aliasing when two registers are linked to the same
// java.lang.Class object via two consequent const-class instructions immediately
// preceding monitor-enter called on one of those registers.
case Instruction::CONST_CLASS: {
// Get the second previous instruction.
if (prev_idx == 0 || GetInstructionFlags(prev_idx).IsBranchTarget()) {
break;
}
prev_idx--;
while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
prev_idx--;
}
const Instruction& prev2_inst = code_item_accessor_.InstructionAt(prev_idx);
// Match the pattern "const-class; const-class; monitor-enter;"
if (prev2_inst.Opcode() != Instruction::CONST_CLASS) {
break;
}
// Ensure both const-classes are called for the same type_idx.
if (prev_inst.VRegB_21c() != prev2_inst.VRegB_21c()) {
break;
}
// Update the lock status for the aliased register.
if (prev_inst.VRegA() == inst->VRegA_11x()) {
work_line_->CopyRegister1(this,
prev2_inst.VRegA(),
inst->VRegA_11x(),
kTypeCategoryRef);
} else if (prev2_inst.VRegA() == inst->VRegA_11x()) {
work_line_->CopyRegister1(this,
prev_inst.VRegA(),
inst->VRegA_11x(),
kTypeCategoryRef);
}
break;
}
default: // Other instruction types ignored.
break;
}
}
break;
case Instruction::MONITOR_EXIT:
/*
* monitor-exit instructions are odd. They can throw exceptions,
* but when they do they act as if they succeeded and the PC is
* pointing to the following instruction. (This behavior goes back
* to the need to handle asynchronous exceptions, a now-deprecated
* feature that Dalvik doesn't support.)
*
* In practice we don't need to worry about this. The only
* exceptions that can be thrown from monitor-exit are for a
* null reference and -exit without a matching -enter. If the
* structured locking checks are working, the former would have
* failed on the -enter instruction, and the latter is impossible.
*
* This is fortunate, because issue 3221411 prevents us from
* chasing the "can throw" path when monitor verification is
* enabled. If we can fully verify the locking we can ignore
* some catch blocks (which will show up as "dead" code when
* we skip them here); if we can't, then the code path could be
* "live" so we still need to check it.
*/
opcode_flags &= ~Instruction::kThrow;
work_line_->PopMonitor(this, inst->VRegA_11x());
break;
case Instruction::CHECK_CAST:
case Instruction::INSTANCE_OF: {
/*
* If this instruction succeeds, we will "downcast" register vA to the type in vB. (This
* could be a "upcast" -- not expected, so we don't try to address it.)
*
* If it fails, an exception is thrown, which we deal with later by ignoring the update to
* dec_insn.vA when branching to a handler.
*/
const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
const dex::TypeIndex type_idx((is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c());
const RegType& res_type = ResolveClass<CheckAccess::kYes>(type_idx);
if (res_type.IsConflict()) {
// If this is a primitive type, fail HARD.
ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
type_idx, dex_cache_.Get(), class_loader_.Get());
if (klass != nullptr && klass->IsPrimitive()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
<< dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
<< GetDeclaringClass();
break;
}
DCHECK_NE(failures_.size(), 0U);
if (!is_checkcast) {
work_line_->SetRegisterType<LockOp::kClear>(this,
inst->VRegA_22c(),
reg_types_.Boolean());
}
break; // bad class
}
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
const RegType& orig_type = work_line_->GetRegisterType(this, orig_type_reg);
if (!res_type.IsNonZeroReferenceTypes()) {
if (is_checkcast) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
} else {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance-of on unexpected class " << res_type;
}
} else if (!orig_type.IsReferenceTypes()) {
if (is_checkcast) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on non-reference in v" << orig_type_reg;
} else {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance-of on non-reference in v" << orig_type_reg;
}
} else if (orig_type.IsUninitializedTypes()) {
if (is_checkcast) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on uninitialized reference in v"
<< orig_type_reg;
} else {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance-of on uninitialized reference in v"
<< orig_type_reg;
}
} else {
if (is_checkcast) {
work_line_->SetRegisterType<LockOp::kKeep>(this, inst->VRegA_21c(), res_type);
} else {
work_line_->SetRegisterType<LockOp::kClear>(this,
inst->VRegA_22c(),
reg_types_.Boolean());
}
}
break;
}
case Instruction::ARRAY_LENGTH: {
const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegB_12x());
if (res_type.IsReferenceTypes()) {
if (!res_type.IsArrayTypes() && !res_type.IsZeroOrNull()) {
// ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
} else {
work_line_->SetRegisterType<LockOp::kClear>(this,
inst->VRegA_12x(),
reg_types_.Integer());
}
} else {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
}
break;
}
case Instruction::NEW_INSTANCE: {
const RegType& res_type = ResolveClass<CheckAccess::kYes>(dex::TypeIndex(inst->VRegB_21c()));
if (res_type.IsConflict()) {
DCHECK_NE(failures_.size(), 0U);
break; // bad class
}
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
// can't create an instance of an interface or abstract class */
if (!res_type.IsInstantiableTypes()) {
Fail(VERIFY_ERROR_INSTANTIATION)
<< "new-instance on primitive, interface or abstract class" << res_type;
// Soft failure so carry on to set register type.
}
const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
// Any registers holding previous allocations from this address that have not yet been
// initialized must be marked invalid.
work_line_->MarkUninitRefsAsInvalid(this, uninit_type);
// add the new uninitialized reference to the register state
work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_21c(), uninit_type);
break;
}
case Instruction::NEW_ARRAY:
VerifyNewArray(inst, false, false);
break;
case Instruction::FILLED_NEW_ARRAY:
VerifyNewArray(inst, true, false);
just_set_result = true; // Filled new array sets result register
break;
case Instruction::FILLED_NEW_ARRAY_RANGE:
VerifyNewArray(inst, true, true);
just_set_result = true; // Filled new array range sets result register
break;
case Instruction::CMPL_FLOAT:
case Instruction::CMPG_FLOAT:
if (!work_line_->VerifyRegisterType(this, inst->VRegB_23x(), reg_types_.Float())) {
break;
}
if (!work_line_->VerifyRegisterType(this, inst->VRegC_23x(), reg_types_.Float())) {
break;
}
work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::CMPL_DOUBLE:
case Instruction::CMPG_DOUBLE:
if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.DoubleLo(),
reg_types_.DoubleHi())) {
break;
}
if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegC_23x(), reg_types_.DoubleLo(),
reg_types_.DoubleHi())) {
break;
}
work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::CMP_LONG:
if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.LongLo(),
reg_types_.LongHi())) {
break;
}
if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegC_23x(), reg_types_.LongLo(),
reg_types_.LongHi())) {
break;
}
work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::THROW: {
const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegA_11x());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type, this)) {
if (res_type.IsUninitializedTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "thrown exception not initialized";
} else if (!res_type.IsReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "thrown value of non-reference type " << res_type;
} else {
Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
<< "thrown class " << res_type << " not instanceof Throwable";
}
}
break;
}
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32:
/* no effect on or use of registers */
break;
case Instruction::PACKED_SWITCH:
case Instruction::SPARSE_SWITCH:
/* verify that vAA is an integer, or can be converted to one */
work_line_->VerifyRegisterType(this, inst->VRegA_31t(), reg_types_.Integer());
break;
case Instruction::FILL_ARRAY_DATA: {
/* Similar to the verification done for APUT */
const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegA_31t());
/* array_type can be null if the reg type is Zero */
if (!array_type.IsZeroOrNull()) {
if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
<< array_type;
} else if (array_type.IsUnresolvedTypes()) {
// If it's an unresolved array type, it must be non-primitive.
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data for array of type "
<< array_type;
} else {
const RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with component type "
<< component_type;
} else {
// Now verify if the element width in the table matches the element width declared in
// the array
const uint16_t* array_data =
insns + (insns[1] | (static_cast<int32_t>(insns[2]) << 16));
if (array_data[0] != Instruction::kArrayDataSignature) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid magic for array-data";
} else {
size_t elem_width = Primitive::ComponentSize(component_type.GetPrimitiveType());
// Since we don't compress the data in Dex, expect to see equal width of data stored
// in the table and expected from the array class.
if (array_data[1] != elem_width) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-data size mismatch (" << array_data[1]
<< " vs " << elem_width << ")";
}
}
}
}
}
break;
}
case Instruction::IF_EQ:
case Instruction::IF_NE: {
const RegType& reg_type1 = work_line_->GetRegisterType(this, inst->VRegA_22t());
const RegType& reg_type2 = work_line_->GetRegisterType(this, inst->VRegB_22t());
bool mismatch = false;
if (reg_type1.IsZeroOrNull()) { // zero then integral or reference expected
mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
} else if (reg_type1.IsReferenceTypes()) { // both references?
mismatch = !reg_type2.IsReferenceTypes();
} else { // both integral?
mismatch = !reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes();
}